1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2010 QLogic Corporation */ 23 24 /* 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 26 */ 27 28 /* 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 */ 31 32 #pragma ident "Copyright 2010 QLogic Corporation; ql_xioctl.c" 33 34 /* 35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 36 * 37 * *********************************************************************** 38 * * ** 39 * * NOTICE ** 40 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION ** 41 * * ALL RIGHTS RESERVED ** 42 * * ** 43 * *********************************************************************** 44 * 45 */ 46 47 #include <ql_apps.h> 48 #include <ql_api.h> 49 #include <ql_debug.h> 50 #include <ql_init.h> 51 #include <ql_iocb.h> 52 #include <ql_ioctl.h> 53 #include <ql_mbx.h> 54 #include <ql_xioctl.h> 55 56 /* 57 * Local data 58 */ 59 60 /* 61 * Local prototypes 62 */ 63 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int); 64 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int, 65 boolean_t (*)(EXT_IOCTL *)); 66 static boolean_t ql_validate_signature(EXT_IOCTL *); 67 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int); 68 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int); 69 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int); 70 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int); 71 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int); 72 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int); 73 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int); 74 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int); 75 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int); 76 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int); 77 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int); 78 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int); 79 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int); 80 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int); 81 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int); 82 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int); 83 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int); 84 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int); 85 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int); 86 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int); 87 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int); 88 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int); 89 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int); 90 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int); 91 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int); 92 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int); 93 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int); 94 95 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *); 96 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *); 97 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int); 98 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *, 99 uint8_t); 100 static uint32_t ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int); 101 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int); 102 static int ql_24xx_flash_desc(ql_adapter_state_t *); 103 static int ql_setup_flash(ql_adapter_state_t *); 104 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t); 105 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int); 106 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t, 107 uint32_t, int); 108 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, 109 uint8_t); 110 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int); 111 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int); 112 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *); 113 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int); 114 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int); 115 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int); 116 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int); 117 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int); 118 static void ql_drive_led(ql_adapter_state_t *, uint32_t); 119 static uint32_t ql_setup_led(ql_adapter_state_t *); 120 static uint32_t ql_wrapup_led(ql_adapter_state_t *); 121 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int); 122 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int); 123 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int); 124 static int ql_dump_sfp(ql_adapter_state_t *, void *, int); 125 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *); 126 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int); 127 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int); 128 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t); 129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *); 130 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t); 131 static void ql_process_flt(ql_adapter_state_t *, uint32_t); 132 static void ql_flash_nvram_defaults(ql_adapter_state_t *); 133 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int); 134 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *); 135 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int); 136 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int); 137 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int); 138 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int); 139 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int); 140 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int); 141 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int); 142 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t); 143 static void ql_restart_hba(ql_adapter_state_t *); 144 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int); 145 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int); 146 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int); 147 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int); 148 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *); 149 static void ql_update_flash_caches(ql_adapter_state_t *); 150 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int); 151 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int); 152 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int); 153 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int); 154 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int); 155 static int ql_set_loop_point(ql_adapter_state_t *, uint16_t); 156 157 /* ******************************************************************** */ 158 /* External IOCTL support. */ 159 /* ******************************************************************** */ 160 161 /* 162 * ql_alloc_xioctl_resource 163 * Allocates resources needed by module code. 164 * 165 * Input: 166 * ha: adapter state pointer. 167 * 168 * Returns: 169 * SYS_ERRNO 170 * 171 * Context: 172 * Kernel context. 173 */ 174 int 175 ql_alloc_xioctl_resource(ql_adapter_state_t *ha) 176 { 177 ql_xioctl_t *xp; 178 179 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 180 181 if (ha->xioctl != NULL) { 182 QL_PRINT_9(CE_CONT, "(%d): already allocated done\n", 183 ha->instance); 184 return (0); 185 } 186 187 xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP); 188 ha->xioctl = xp; 189 190 /* Allocate AEN tracking buffer */ 191 xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE * 192 sizeof (EXT_ASYNC_EVENT), KM_SLEEP); 193 194 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 195 196 return (0); 197 } 198 199 /* 200 * ql_free_xioctl_resource 201 * Frees resources used by module code. 202 * 203 * Input: 204 * ha: adapter state pointer. 205 * 206 * Context: 207 * Kernel context. 208 */ 209 void 210 ql_free_xioctl_resource(ql_adapter_state_t *ha) 211 { 212 ql_xioctl_t *xp = ha->xioctl; 213 214 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 215 216 if (xp == NULL) { 217 QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance); 218 return; 219 } 220 221 if (xp->aen_tracking_queue != NULL) { 222 kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE * 223 sizeof (EXT_ASYNC_EVENT)); 224 xp->aen_tracking_queue = NULL; 225 } 226 227 kmem_free(xp, sizeof (ql_xioctl_t)); 228 ha->xioctl = NULL; 229 230 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 231 } 232 233 /* 234 * ql_xioctl 235 * External IOCTL processing. 236 * 237 * Input: 238 * ha: adapter state pointer. 239 * cmd: function to perform 240 * arg: data type varies with request 241 * mode: flags 242 * cred_p: credentials pointer 243 * rval_p: pointer to result value 244 * 245 * Returns: 246 * 0: success 247 * ENXIO: No such device or address 248 * ENOPROTOOPT: Protocol not available 249 * 250 * Context: 251 * Kernel context. 252 */ 253 /* ARGSUSED */ 254 int 255 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode, 256 cred_t *cred_p, int *rval_p) 257 { 258 int rval; 259 260 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd); 261 262 if (ha->xioctl == NULL) { 263 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance); 264 return (ENXIO); 265 } 266 267 switch (cmd) { 268 case EXT_CC_QUERY: 269 case EXT_CC_SEND_FCCT_PASSTHRU: 270 case EXT_CC_REG_AEN: 271 case EXT_CC_GET_AEN: 272 case EXT_CC_SEND_SCSI_PASSTHRU: 273 case EXT_CC_WWPN_TO_SCSIADDR: 274 case EXT_CC_SEND_ELS_RNID: 275 case EXT_CC_SET_DATA: 276 case EXT_CC_GET_DATA: 277 case EXT_CC_HOST_IDX: 278 case EXT_CC_READ_NVRAM: 279 case EXT_CC_UPDATE_NVRAM: 280 case EXT_CC_READ_OPTION_ROM: 281 case EXT_CC_READ_OPTION_ROM_EX: 282 case EXT_CC_UPDATE_OPTION_ROM: 283 case EXT_CC_UPDATE_OPTION_ROM_EX: 284 case EXT_CC_GET_VPD: 285 case EXT_CC_SET_VPD: 286 case EXT_CC_LOOPBACK: 287 case EXT_CC_GET_FCACHE: 288 case EXT_CC_GET_FCACHE_EX: 289 case EXT_CC_HOST_DRVNAME: 290 case EXT_CC_GET_SFP_DATA: 291 case EXT_CC_PORT_PARAM: 292 case EXT_CC_GET_PCI_DATA: 293 case EXT_CC_GET_FWEXTTRACE: 294 case EXT_CC_GET_FWFCETRACE: 295 case EXT_CC_GET_VP_CNT_ID: 296 case EXT_CC_VPORT_CMD: 297 case EXT_CC_ACCESS_FLASH: 298 case EXT_CC_RESET_FW: 299 case EXT_CC_MENLO_MANAGE_INFO: 300 rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode); 301 break; 302 default: 303 /* function not supported. */ 304 EL(ha, "function=%d not supported\n", cmd); 305 rval = ENOPROTOOPT; 306 } 307 308 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 309 310 return (rval); 311 } 312 313 /* 314 * ql_sdm_ioctl 315 * Provides ioctl functions for SAN/Device Management functions 316 * AKA External Ioctl functions. 317 * 318 * Input: 319 * ha: adapter state pointer. 320 * ioctl_code: ioctl function to perform 321 * arg: Pointer to EXT_IOCTL cmd data in application land. 322 * mode: flags 323 * 324 * Returns: 325 * 0: success 326 * ENOMEM: Alloc of local EXT_IOCTL struct failed. 327 * EFAULT: Copyin of caller's EXT_IOCTL struct failed or 328 * copyout of EXT_IOCTL status info failed. 329 * EINVAL: Signature or version of caller's EXT_IOCTL invalid. 330 * EBUSY: Device busy 331 * 332 * Context: 333 * Kernel context. 334 */ 335 static int 336 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode) 337 { 338 EXT_IOCTL *cmd; 339 int rval; 340 ql_adapter_state_t *vha; 341 342 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 343 344 /* Copy argument structure (EXT_IOCTL) from application land. */ 345 if ((rval = ql_sdm_setup(ha, &cmd, arg, mode, 346 ql_validate_signature)) != 0) { 347 /* 348 * a non-zero value at this time means a problem getting 349 * the requested information from application land, just 350 * return the error code and hope for the best. 351 */ 352 EL(ha, "failed, sdm_setup\n"); 353 return (rval); 354 } 355 356 /* 357 * Map the physical ha ptr (which the ioctl is called with) 358 * to the virtual ha that the caller is addressing. 359 */ 360 if (ha->flags & VP_ENABLED) { 361 /* Check that it is within range. */ 362 if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ? 363 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) { 364 EL(ha, "Invalid HbaSelect vp index: %xh\n", 365 cmd->HbaSelect); 366 cmd->Status = EXT_STATUS_INVALID_VPINDEX; 367 cmd->ResponseLen = 0; 368 return (EFAULT); 369 } 370 /* 371 * Special case: HbaSelect == 0 is physical ha 372 */ 373 if (cmd->HbaSelect != 0) { 374 vha = ha->vp_next; 375 while (vha != NULL) { 376 if (vha->vp_index == cmd->HbaSelect) { 377 ha = vha; 378 break; 379 } 380 vha = vha->vp_next; 381 } 382 /* 383 * The specified vp index may be valid(within range) 384 * but it's not in the list. Currently this is all 385 * we can say. 386 */ 387 if (vha == NULL) { 388 cmd->Status = EXT_STATUS_INVALID_VPINDEX; 389 cmd->ResponseLen = 0; 390 return (EFAULT); 391 } 392 } 393 } 394 395 /* 396 * If driver is suspended, stalled, or powered down rtn BUSY 397 */ 398 if (ha->flags & ADAPTER_SUSPENDED || 399 ha->task_daemon_flags & DRIVER_STALL || 400 ha->power_level != PM_LEVEL_D0) { 401 EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ? 402 "driver suspended" : 403 (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" : 404 "FCA powered down")); 405 cmd->Status = EXT_STATUS_BUSY; 406 cmd->ResponseLen = 0; 407 rval = EBUSY; 408 409 /* Return results to caller */ 410 if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) { 411 EL(ha, "failed, sdm_return\n"); 412 rval = EFAULT; 413 } 414 return (rval); 415 } 416 417 switch (ioctl_code) { 418 case EXT_CC_QUERY_OS: 419 ql_query(ha, cmd, mode); 420 break; 421 case EXT_CC_SEND_FCCT_PASSTHRU_OS: 422 ql_fcct(ha, cmd, mode); 423 break; 424 case EXT_CC_REG_AEN_OS: 425 ql_aen_reg(ha, cmd, mode); 426 break; 427 case EXT_CC_GET_AEN_OS: 428 ql_aen_get(ha, cmd, mode); 429 break; 430 case EXT_CC_GET_DATA_OS: 431 ql_get_host_data(ha, cmd, mode); 432 break; 433 case EXT_CC_SET_DATA_OS: 434 ql_set_host_data(ha, cmd, mode); 435 break; 436 case EXT_CC_SEND_ELS_RNID_OS: 437 ql_send_els_rnid(ha, cmd, mode); 438 break; 439 case EXT_CC_SCSI_PASSTHRU_OS: 440 ql_scsi_passthru(ha, cmd, mode); 441 break; 442 case EXT_CC_WWPN_TO_SCSIADDR_OS: 443 ql_wwpn_to_scsiaddr(ha, cmd, mode); 444 break; 445 case EXT_CC_HOST_IDX_OS: 446 ql_host_idx(ha, cmd, mode); 447 break; 448 case EXT_CC_HOST_DRVNAME_OS: 449 ql_host_drvname(ha, cmd, mode); 450 break; 451 case EXT_CC_READ_NVRAM_OS: 452 ql_read_nvram(ha, cmd, mode); 453 break; 454 case EXT_CC_UPDATE_NVRAM_OS: 455 ql_write_nvram(ha, cmd, mode); 456 break; 457 case EXT_CC_READ_OPTION_ROM_OS: 458 case EXT_CC_READ_OPTION_ROM_EX_OS: 459 ql_read_flash(ha, cmd, mode); 460 break; 461 case EXT_CC_UPDATE_OPTION_ROM_OS: 462 case EXT_CC_UPDATE_OPTION_ROM_EX_OS: 463 ql_write_flash(ha, cmd, mode); 464 break; 465 case EXT_CC_LOOPBACK_OS: 466 ql_diagnostic_loopback(ha, cmd, mode); 467 break; 468 case EXT_CC_GET_VPD_OS: 469 ql_read_vpd(ha, cmd, mode); 470 break; 471 case EXT_CC_SET_VPD_OS: 472 ql_write_vpd(ha, cmd, mode); 473 break; 474 case EXT_CC_GET_FCACHE_OS: 475 ql_get_fcache(ha, cmd, mode); 476 break; 477 case EXT_CC_GET_FCACHE_EX_OS: 478 ql_get_fcache_ex(ha, cmd, mode); 479 break; 480 case EXT_CC_GET_SFP_DATA_OS: 481 ql_get_sfp(ha, cmd, mode); 482 break; 483 case EXT_CC_PORT_PARAM_OS: 484 ql_port_param(ha, cmd, mode); 485 break; 486 case EXT_CC_GET_PCI_DATA_OS: 487 ql_get_pci_data(ha, cmd, mode); 488 break; 489 case EXT_CC_GET_FWEXTTRACE_OS: 490 ql_get_fwexttrace(ha, cmd, mode); 491 break; 492 case EXT_CC_GET_FWFCETRACE_OS: 493 ql_get_fwfcetrace(ha, cmd, mode); 494 break; 495 case EXT_CC_MENLO_RESET: 496 ql_menlo_reset(ha, cmd, mode); 497 break; 498 case EXT_CC_MENLO_GET_FW_VERSION: 499 ql_menlo_get_fw_version(ha, cmd, mode); 500 break; 501 case EXT_CC_MENLO_UPDATE_FW: 502 ql_menlo_update_fw(ha, cmd, mode); 503 break; 504 case EXT_CC_MENLO_MANAGE_INFO: 505 ql_menlo_manage_info(ha, cmd, mode); 506 break; 507 case EXT_CC_GET_VP_CNT_ID_OS: 508 ql_get_vp_cnt_id(ha, cmd, mode); 509 break; 510 case EXT_CC_VPORT_CMD_OS: 511 ql_vp_ioctl(ha, cmd, mode); 512 break; 513 case EXT_CC_ACCESS_FLASH_OS: 514 ql_access_flash(ha, cmd, mode); 515 break; 516 case EXT_CC_RESET_FW_OS: 517 ql_reset_cmd(ha, cmd); 518 break; 519 default: 520 /* function not supported. */ 521 EL(ha, "failed, function not supported=%d\n", ioctl_code); 522 523 cmd->Status = EXT_STATUS_INVALID_REQUEST; 524 cmd->ResponseLen = 0; 525 break; 526 } 527 528 /* Return results to caller */ 529 if (ql_sdm_return(ha, cmd, arg, mode) == -1) { 530 EL(ha, "failed, sdm_return\n"); 531 return (EFAULT); 532 } 533 534 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 535 536 return (0); 537 } 538 539 /* 540 * ql_sdm_setup 541 * Make a local copy of the EXT_IOCTL struct and validate it. 542 * 543 * Input: 544 * ha: adapter state pointer. 545 * cmd_struct: Pointer to location to store local adrs of EXT_IOCTL. 546 * arg: Address of application EXT_IOCTL cmd data 547 * mode: flags 548 * val_sig: Pointer to a function to validate the ioctl signature. 549 * 550 * Returns: 551 * 0: success 552 * EFAULT: Copy in error of application EXT_IOCTL struct. 553 * EINVAL: Invalid version, signature. 554 * ENOMEM: Local allocation of EXT_IOCTL failed. 555 * 556 * Context: 557 * Kernel context. 558 */ 559 static int 560 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg, 561 int mode, boolean_t (*val_sig)(EXT_IOCTL *)) 562 { 563 int rval; 564 EXT_IOCTL *cmd; 565 566 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 567 568 /* Allocate local memory for EXT_IOCTL. */ 569 *cmd_struct = NULL; 570 cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP); 571 if (cmd == NULL) { 572 EL(ha, "failed, kmem_zalloc\n"); 573 return (ENOMEM); 574 } 575 /* Get argument structure. */ 576 rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode); 577 if (rval != 0) { 578 EL(ha, "failed, ddi_copyin\n"); 579 rval = EFAULT; 580 } else { 581 /* 582 * Check signature and the version. 583 * If either are not valid then neither is the 584 * structure so don't attempt to return any error status 585 * because we can't trust what caller's arg points to. 586 * Just return the errno. 587 */ 588 if (val_sig(cmd) == 0) { 589 EL(ha, "failed, signature\n"); 590 rval = EINVAL; 591 } else if (cmd->Version > EXT_VERSION) { 592 EL(ha, "failed, version\n"); 593 rval = EINVAL; 594 } 595 } 596 597 if (rval == 0) { 598 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 599 *cmd_struct = cmd; 600 cmd->Status = EXT_STATUS_OK; 601 cmd->DetailStatus = 0; 602 } else { 603 kmem_free((void *)cmd, sizeof (EXT_IOCTL)); 604 } 605 606 return (rval); 607 } 608 609 /* 610 * ql_validate_signature 611 * Validate the signature string for an external ioctl call. 612 * 613 * Input: 614 * sg: Pointer to EXT_IOCTL signature to validate. 615 * 616 * Returns: 617 * B_TRUE: Signature is valid. 618 * B_FALSE: Signature is NOT valid. 619 * 620 * Context: 621 * Kernel context. 622 */ 623 static boolean_t 624 ql_validate_signature(EXT_IOCTL *cmd_struct) 625 { 626 /* 627 * Check signature. 628 * 629 * If signature is not valid then neither is the rest of 630 * the structure (e.g., can't trust it), so don't attempt 631 * to return any error status other than the errno. 632 */ 633 if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) { 634 QL_PRINT_2(CE_CONT, "failed,\n"); 635 return (B_FALSE); 636 } 637 638 return (B_TRUE); 639 } 640 641 /* 642 * ql_sdm_return 643 * Copies return data/status to application land for 644 * ioctl call using the SAN/Device Management EXT_IOCTL call interface. 645 * 646 * Input: 647 * ha: adapter state pointer. 648 * cmd: Pointer to kernel copy of requestor's EXT_IOCTL struct. 649 * ioctl_code: ioctl function to perform 650 * arg: EXT_IOCTL cmd data in application land. 651 * mode: flags 652 * 653 * Returns: 654 * 0: success 655 * EFAULT: Copy out error. 656 * 657 * Context: 658 * Kernel context. 659 */ 660 /* ARGSUSED */ 661 static int 662 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode) 663 { 664 int rval = 0; 665 666 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 667 668 rval |= ddi_copyout((void *)&cmd->ResponseLen, 669 (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t), 670 mode); 671 672 rval |= ddi_copyout((void *)&cmd->Status, 673 (void *)&(((EXT_IOCTL*)arg)->Status), 674 sizeof (cmd->Status), mode); 675 rval |= ddi_copyout((void *)&cmd->DetailStatus, 676 (void *)&(((EXT_IOCTL*)arg)->DetailStatus), 677 sizeof (cmd->DetailStatus), mode); 678 679 kmem_free((void *)cmd, sizeof (EXT_IOCTL)); 680 681 if (rval != 0) { 682 /* Some copyout operation failed */ 683 EL(ha, "failed, ddi_copyout\n"); 684 return (EFAULT); 685 } 686 687 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 688 689 return (0); 690 } 691 692 /* 693 * ql_query 694 * Performs all EXT_CC_QUERY functions. 695 * 696 * Input: 697 * ha: adapter state pointer. 698 * cmd: Local EXT_IOCTL cmd struct pointer. 699 * mode: flags. 700 * 701 * Returns: 702 * None, request status indicated in cmd->Status. 703 * 704 * Context: 705 * Kernel context. 706 */ 707 static void 708 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 709 { 710 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, 711 cmd->SubCode); 712 713 /* case off on command subcode */ 714 switch (cmd->SubCode) { 715 case EXT_SC_QUERY_HBA_NODE: 716 ql_qry_hba_node(ha, cmd, mode); 717 break; 718 case EXT_SC_QUERY_HBA_PORT: 719 ql_qry_hba_port(ha, cmd, mode); 720 break; 721 case EXT_SC_QUERY_DISC_PORT: 722 ql_qry_disc_port(ha, cmd, mode); 723 break; 724 case EXT_SC_QUERY_DISC_TGT: 725 ql_qry_disc_tgt(ha, cmd, mode); 726 break; 727 case EXT_SC_QUERY_DRIVER: 728 ql_qry_driver(ha, cmd, mode); 729 break; 730 case EXT_SC_QUERY_FW: 731 ql_qry_fw(ha, cmd, mode); 732 break; 733 case EXT_SC_QUERY_CHIP: 734 ql_qry_chip(ha, cmd, mode); 735 break; 736 case EXT_SC_QUERY_CNA_PORT: 737 ql_qry_cna_port(ha, cmd, mode); 738 break; 739 case EXT_SC_QUERY_ADAPTER_VERSIONS: 740 ql_qry_adapter_versions(ha, cmd, mode); 741 break; 742 case EXT_SC_QUERY_DISC_LUN: 743 default: 744 /* function not supported. */ 745 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 746 EL(ha, "failed, Unsupported Subcode=%xh\n", 747 cmd->SubCode); 748 break; 749 } 750 751 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 752 } 753 754 /* 755 * ql_qry_hba_node 756 * Performs EXT_SC_QUERY_HBA_NODE subfunction. 757 * 758 * Input: 759 * ha: adapter state pointer. 760 * cmd: EXT_IOCTL cmd struct pointer. 761 * mode: flags. 762 * 763 * Returns: 764 * None, request status indicated in cmd->Status. 765 * 766 * Context: 767 * Kernel context. 768 */ 769 static void 770 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 771 { 772 EXT_HBA_NODE tmp_node = {0}; 773 uint_t len; 774 caddr_t bufp; 775 776 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 777 778 if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) { 779 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 780 cmd->DetailStatus = sizeof (EXT_HBA_NODE); 781 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, " 782 "Len=%xh\n", cmd->ResponseLen); 783 cmd->ResponseLen = 0; 784 return; 785 } 786 787 /* fill in the values */ 788 789 bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN, 790 EXT_DEF_WWN_NAME_SIZE); 791 792 (void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation"); 793 794 (void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id); 795 796 bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3); 797 798 (void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION); 799 800 if (CFG_IST(ha, CFG_SBUS_CARD)) { 801 size_t verlen; 802 uint16_t w; 803 char *tmpptr; 804 805 verlen = strlen((char *)(tmp_node.DriverVersion)); 806 if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) { 807 EL(ha, "failed, No room for fpga version string\n"); 808 } else { 809 w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle, 810 (uint16_t *) 811 (ha->sbus_fpga_iobase + FPGA_REVISION)); 812 813 tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]); 814 if (tmpptr == NULL) { 815 EL(ha, "Unable to insert fpga version str\n"); 816 } else { 817 (void) sprintf(tmpptr, "%d.%d", 818 ((w & 0xf0) >> 4), (w & 0x0f)); 819 tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS; 820 } 821 } 822 } 823 824 (void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d", 825 ha->fw_major_version, ha->fw_minor_version, 826 ha->fw_subminor_version); 827 828 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 829 switch (ha->fw_attributes) { 830 case FWATTRIB_EF: 831 (void) strcat((char *)(tmp_node.FWVersion), " EF"); 832 break; 833 case FWATTRIB_TP: 834 (void) strcat((char *)(tmp_node.FWVersion), " TP"); 835 break; 836 case FWATTRIB_IP: 837 (void) strcat((char *)(tmp_node.FWVersion), " IP"); 838 break; 839 case FWATTRIB_IPX: 840 (void) strcat((char *)(tmp_node.FWVersion), " IPX"); 841 break; 842 case FWATTRIB_FL: 843 (void) strcat((char *)(tmp_node.FWVersion), " FL"); 844 break; 845 case FWATTRIB_FPX: 846 (void) strcat((char *)(tmp_node.FWVersion), " FLX"); 847 break; 848 default: 849 break; 850 } 851 } 852 853 /* FCode version. */ 854 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/ 855 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC | 856 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp, 857 (int *)&len) == DDI_PROP_SUCCESS) { 858 if (len < EXT_DEF_MAX_STR_SIZE) { 859 bcopy(bufp, tmp_node.OptRomVersion, len); 860 } else { 861 bcopy(bufp, tmp_node.OptRomVersion, 862 EXT_DEF_MAX_STR_SIZE - 1); 863 tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] = 864 '\0'; 865 } 866 kmem_free(bufp, len); 867 } else { 868 (void) sprintf((char *)tmp_node.OptRomVersion, "0"); 869 } 870 tmp_node.PortCount = 1; 871 tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE; 872 873 if (ddi_copyout((void *)&tmp_node, 874 (void *)(uintptr_t)(cmd->ResponseAdr), 875 sizeof (EXT_HBA_NODE), mode) != 0) { 876 cmd->Status = EXT_STATUS_COPY_ERR; 877 cmd->ResponseLen = 0; 878 EL(ha, "failed, ddi_copyout\n"); 879 } else { 880 cmd->ResponseLen = sizeof (EXT_HBA_NODE); 881 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 882 } 883 } 884 885 /* 886 * ql_qry_hba_port 887 * Performs EXT_SC_QUERY_HBA_PORT subfunction. 888 * 889 * Input: 890 * ha: adapter state pointer. 891 * cmd: EXT_IOCTL cmd struct pointer. 892 * mode: flags. 893 * 894 * Returns: 895 * None, request status indicated in cmd->Status. 896 * 897 * Context: 898 * Kernel context. 899 */ 900 static void 901 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 902 { 903 ql_link_t *link; 904 ql_tgt_t *tq; 905 ql_mbx_data_t mr; 906 EXT_HBA_PORT tmp_port = {0}; 907 int rval; 908 uint16_t port_cnt, tgt_cnt, index; 909 910 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 911 912 if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) { 913 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 914 cmd->DetailStatus = sizeof (EXT_HBA_PORT); 915 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n", 916 cmd->ResponseLen); 917 cmd->ResponseLen = 0; 918 return; 919 } 920 921 /* fill in the values */ 922 923 bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN, 924 EXT_DEF_WWN_NAME_SIZE); 925 tmp_port.Id[0] = 0; 926 tmp_port.Id[1] = ha->d_id.b.domain; 927 tmp_port.Id[2] = ha->d_id.b.area; 928 tmp_port.Id[3] = ha->d_id.b.al_pa; 929 930 /* For now we are initiator only driver */ 931 tmp_port.Type = EXT_DEF_INITIATOR_DEV; 932 933 if (ha->task_daemon_flags & LOOP_DOWN) { 934 tmp_port.State = EXT_DEF_HBA_LOOP_DOWN; 935 } else if (DRIVER_SUSPENDED(ha)) { 936 tmp_port.State = EXT_DEF_HBA_SUSPENDED; 937 } else { 938 tmp_port.State = EXT_DEF_HBA_OK; 939 } 940 941 if (ha->flags & POINT_TO_POINT) { 942 tmp_port.Mode = EXT_DEF_P2P_MODE; 943 } else { 944 tmp_port.Mode = EXT_DEF_LOOP_MODE; 945 } 946 /* 947 * fill in the portspeed values. 948 * 949 * default to not yet negotiated state 950 */ 951 tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED; 952 953 if (tmp_port.State == EXT_DEF_HBA_OK) { 954 switch (ha->iidma_rate) { 955 case IIDMA_RATE_1GB: 956 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT; 957 break; 958 case IIDMA_RATE_2GB: 959 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT; 960 break; 961 case IIDMA_RATE_4GB: 962 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT; 963 break; 964 case IIDMA_RATE_8GB: 965 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT; 966 break; 967 case IIDMA_RATE_10GB: 968 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT; 969 break; 970 default: 971 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN; 972 EL(ha, "failed, data rate=%xh\n", mr.mb[1]); 973 break; 974 } 975 } 976 977 /* Report all supported port speeds */ 978 if (CFG_IST(ha, CFG_CTRL_25XX)) { 979 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT | 980 EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT | 981 EXT_DEF_PORTSPEED_1GBIT); 982 /* 983 * Correct supported speeds based on type of 984 * sfp that is present 985 */ 986 switch (ha->sfp_stat) { 987 case 1: 988 /* no sfp detected */ 989 break; 990 case 2: 991 case 4: 992 /* 4GB sfp */ 993 tmp_port.PortSupportedSpeed &= 994 ~EXT_DEF_PORTSPEED_8GBIT; 995 break; 996 case 3: 997 case 5: 998 /* 8GB sfp */ 999 tmp_port.PortSupportedSpeed &= 1000 ~EXT_DEF_PORTSPEED_1GBIT; 1001 break; 1002 default: 1003 EL(ha, "sfp_stat: %xh\n", ha->sfp_stat); 1004 break; 1005 1006 } 1007 } else if (CFG_IST(ha, CFG_CTRL_8081)) { 1008 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT; 1009 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 1010 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT | 1011 EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT); 1012 } else if (CFG_IST(ha, CFG_CTRL_2300)) { 1013 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT | 1014 EXT_DEF_PORTSPEED_1GBIT); 1015 } else if (CFG_IST(ha, CFG_CTRL_6322)) { 1016 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT; 1017 } else if (CFG_IST(ha, CFG_CTRL_2200)) { 1018 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT; 1019 } else { 1020 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN; 1021 EL(ha, "unknown HBA type: %xh\n", ha->device_id); 1022 } 1023 tmp_port.LinkState2 = LSB(ha->sfp_stat); 1024 port_cnt = 0; 1025 tgt_cnt = 0; 1026 1027 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 1028 for (link = ha->dev[index].first; link != NULL; 1029 link = link->next) { 1030 tq = link->base_address; 1031 1032 if (!VALID_TARGET_ID(ha, tq->loop_id)) { 1033 continue; 1034 } 1035 1036 port_cnt++; 1037 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) { 1038 tgt_cnt++; 1039 } 1040 } 1041 } 1042 1043 tmp_port.DiscPortCount = port_cnt; 1044 tmp_port.DiscTargetCount = tgt_cnt; 1045 1046 tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME; 1047 1048 rval = ddi_copyout((void *)&tmp_port, 1049 (void *)(uintptr_t)(cmd->ResponseAdr), 1050 sizeof (EXT_HBA_PORT), mode); 1051 if (rval != 0) { 1052 cmd->Status = EXT_STATUS_COPY_ERR; 1053 cmd->ResponseLen = 0; 1054 EL(ha, "failed, ddi_copyout\n"); 1055 } else { 1056 cmd->ResponseLen = sizeof (EXT_HBA_PORT); 1057 QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n", 1058 ha->instance, port_cnt, tgt_cnt); 1059 } 1060 } 1061 1062 /* 1063 * ql_qry_disc_port 1064 * Performs EXT_SC_QUERY_DISC_PORT subfunction. 1065 * 1066 * Input: 1067 * ha: adapter state pointer. 1068 * cmd: EXT_IOCTL cmd struct pointer. 1069 * mode: flags. 1070 * 1071 * cmd->Instance = Port instance in fcport chain. 1072 * 1073 * Returns: 1074 * None, request status indicated in cmd->Status. 1075 * 1076 * Context: 1077 * Kernel context. 1078 */ 1079 static void 1080 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1081 { 1082 EXT_DISC_PORT tmp_port = {0}; 1083 ql_link_t *link; 1084 ql_tgt_t *tq; 1085 uint16_t index; 1086 uint16_t inst = 0; 1087 1088 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1089 1090 if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) { 1091 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1092 cmd->DetailStatus = sizeof (EXT_DISC_PORT); 1093 EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n", 1094 cmd->ResponseLen); 1095 cmd->ResponseLen = 0; 1096 return; 1097 } 1098 1099 for (link = NULL, index = 0; 1100 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) { 1101 for (link = ha->dev[index].first; link != NULL; 1102 link = link->next) { 1103 tq = link->base_address; 1104 1105 if (!VALID_TARGET_ID(ha, tq->loop_id)) { 1106 continue; 1107 } 1108 if (inst != cmd->Instance) { 1109 inst++; 1110 continue; 1111 } 1112 1113 /* fill in the values */ 1114 bcopy(tq->node_name, tmp_port.WWNN, 1115 EXT_DEF_WWN_NAME_SIZE); 1116 bcopy(tq->port_name, tmp_port.WWPN, 1117 EXT_DEF_WWN_NAME_SIZE); 1118 1119 break; 1120 } 1121 } 1122 1123 if (link == NULL) { 1124 /* no matching device */ 1125 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1126 EL(ha, "failed, port not found port=%d\n", cmd->Instance); 1127 cmd->ResponseLen = 0; 1128 return; 1129 } 1130 1131 tmp_port.Id[0] = 0; 1132 tmp_port.Id[1] = tq->d_id.b.domain; 1133 tmp_port.Id[2] = tq->d_id.b.area; 1134 tmp_port.Id[3] = tq->d_id.b.al_pa; 1135 1136 tmp_port.Type = 0; 1137 if (tq->flags & TQF_INITIATOR_DEVICE) { 1138 tmp_port.Type = (uint16_t)(tmp_port.Type | 1139 EXT_DEF_INITIATOR_DEV); 1140 } else if ((tq->flags & TQF_TAPE_DEVICE) == 0) { 1141 (void) ql_inq_scan(ha, tq, 1); 1142 } else if (tq->flags & TQF_TAPE_DEVICE) { 1143 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV); 1144 } 1145 1146 if (tq->flags & TQF_FABRIC_DEVICE) { 1147 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV); 1148 } else { 1149 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV); 1150 } 1151 1152 tmp_port.Status = 0; 1153 tmp_port.Bus = 0; /* Hard-coded for Solaris */ 1154 1155 bcopy(tq->port_name, &tmp_port.TargetId, 8); 1156 1157 if (ddi_copyout((void *)&tmp_port, 1158 (void *)(uintptr_t)(cmd->ResponseAdr), 1159 sizeof (EXT_DISC_PORT), mode) != 0) { 1160 cmd->Status = EXT_STATUS_COPY_ERR; 1161 cmd->ResponseLen = 0; 1162 EL(ha, "failed, ddi_copyout\n"); 1163 } else { 1164 cmd->ResponseLen = sizeof (EXT_DISC_PORT); 1165 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1166 } 1167 } 1168 1169 /* 1170 * ql_qry_disc_tgt 1171 * Performs EXT_SC_QUERY_DISC_TGT subfunction. 1172 * 1173 * Input: 1174 * ha: adapter state pointer. 1175 * cmd: EXT_IOCTL cmd struct pointer. 1176 * mode: flags. 1177 * 1178 * cmd->Instance = Port instance in fcport chain. 1179 * 1180 * Returns: 1181 * None, request status indicated in cmd->Status. 1182 * 1183 * Context: 1184 * Kernel context. 1185 */ 1186 static void 1187 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1188 { 1189 EXT_DISC_TARGET tmp_tgt = {0}; 1190 ql_link_t *link; 1191 ql_tgt_t *tq; 1192 uint16_t index; 1193 uint16_t inst = 0; 1194 1195 QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance, 1196 cmd->Instance); 1197 1198 if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) { 1199 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1200 cmd->DetailStatus = sizeof (EXT_DISC_TARGET); 1201 EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n", 1202 cmd->ResponseLen); 1203 cmd->ResponseLen = 0; 1204 return; 1205 } 1206 1207 /* Scan port list for requested target and fill in the values */ 1208 for (link = NULL, index = 0; 1209 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) { 1210 for (link = ha->dev[index].first; link != NULL; 1211 link = link->next) { 1212 tq = link->base_address; 1213 1214 if (!VALID_TARGET_ID(ha, tq->loop_id) || 1215 tq->flags & TQF_INITIATOR_DEVICE) { 1216 continue; 1217 } 1218 if (inst != cmd->Instance) { 1219 inst++; 1220 continue; 1221 } 1222 1223 /* fill in the values */ 1224 bcopy(tq->node_name, tmp_tgt.WWNN, 1225 EXT_DEF_WWN_NAME_SIZE); 1226 bcopy(tq->port_name, tmp_tgt.WWPN, 1227 EXT_DEF_WWN_NAME_SIZE); 1228 1229 break; 1230 } 1231 } 1232 1233 if (link == NULL) { 1234 /* no matching device */ 1235 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1236 cmd->DetailStatus = EXT_DSTATUS_TARGET; 1237 EL(ha, "failed, not found target=%d\n", cmd->Instance); 1238 cmd->ResponseLen = 0; 1239 return; 1240 } 1241 tmp_tgt.Id[0] = 0; 1242 tmp_tgt.Id[1] = tq->d_id.b.domain; 1243 tmp_tgt.Id[2] = tq->d_id.b.area; 1244 tmp_tgt.Id[3] = tq->d_id.b.al_pa; 1245 1246 tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq); 1247 1248 if ((tq->flags & TQF_TAPE_DEVICE) == 0) { 1249 (void) ql_inq_scan(ha, tq, 1); 1250 } 1251 1252 tmp_tgt.Type = 0; 1253 if (tq->flags & TQF_TAPE_DEVICE) { 1254 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV); 1255 } 1256 1257 if (tq->flags & TQF_FABRIC_DEVICE) { 1258 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV); 1259 } else { 1260 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV); 1261 } 1262 1263 tmp_tgt.Status = 0; 1264 1265 tmp_tgt.Bus = 0; /* Hard-coded for Solaris. */ 1266 1267 bcopy(tq->port_name, &tmp_tgt.TargetId, 8); 1268 1269 if (ddi_copyout((void *)&tmp_tgt, 1270 (void *)(uintptr_t)(cmd->ResponseAdr), 1271 sizeof (EXT_DISC_TARGET), mode) != 0) { 1272 cmd->Status = EXT_STATUS_COPY_ERR; 1273 cmd->ResponseLen = 0; 1274 EL(ha, "failed, ddi_copyout\n"); 1275 } else { 1276 cmd->ResponseLen = sizeof (EXT_DISC_TARGET); 1277 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1278 } 1279 } 1280 1281 /* 1282 * ql_qry_fw 1283 * Performs EXT_SC_QUERY_FW subfunction. 1284 * 1285 * Input: 1286 * ha: adapter state pointer. 1287 * cmd: EXT_IOCTL cmd struct pointer. 1288 * mode: flags. 1289 * 1290 * Returns: 1291 * None, request status indicated in cmd->Status. 1292 * 1293 * Context: 1294 * Kernel context. 1295 */ 1296 static void 1297 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1298 { 1299 EXT_FW fw_info = {0}; 1300 1301 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1302 1303 if (cmd->ResponseLen < sizeof (EXT_FW)) { 1304 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1305 cmd->DetailStatus = sizeof (EXT_FW); 1306 EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n", 1307 cmd->ResponseLen); 1308 cmd->ResponseLen = 0; 1309 return; 1310 } 1311 1312 (void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d", 1313 ha->fw_major_version, ha->fw_minor_version, 1314 ha->fw_subminor_version); 1315 1316 fw_info.Attrib = ha->fw_attributes; 1317 1318 if (ddi_copyout((void *)&fw_info, 1319 (void *)(uintptr_t)(cmd->ResponseAdr), 1320 sizeof (EXT_FW), mode) != 0) { 1321 cmd->Status = EXT_STATUS_COPY_ERR; 1322 cmd->ResponseLen = 0; 1323 EL(ha, "failed, ddi_copyout\n"); 1324 return; 1325 } else { 1326 cmd->ResponseLen = sizeof (EXT_FW); 1327 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1328 } 1329 } 1330 1331 /* 1332 * ql_qry_chip 1333 * Performs EXT_SC_QUERY_CHIP subfunction. 1334 * 1335 * Input: 1336 * ha: adapter state pointer. 1337 * cmd: EXT_IOCTL cmd struct pointer. 1338 * mode: flags. 1339 * 1340 * Returns: 1341 * None, request status indicated in cmd->Status. 1342 * 1343 * Context: 1344 * Kernel context. 1345 */ 1346 static void 1347 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1348 { 1349 EXT_CHIP chip = {0}; 1350 1351 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1352 1353 if (cmd->ResponseLen < sizeof (EXT_CHIP)) { 1354 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1355 cmd->DetailStatus = sizeof (EXT_CHIP); 1356 EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n", 1357 cmd->ResponseLen); 1358 cmd->ResponseLen = 0; 1359 return; 1360 } 1361 1362 chip.VendorId = ha->ven_id; 1363 chip.DeviceId = ha->device_id; 1364 chip.SubVendorId = ha->subven_id; 1365 chip.SubSystemId = ha->subsys_id; 1366 chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0); 1367 chip.IoAddrLen = 0x100; 1368 chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1); 1369 chip.MemAddrLen = 0x100; 1370 chip.ChipRevID = ha->rev_id; 1371 if (ha->flags & FUNCTION_1) { 1372 chip.FuncNo = 1; 1373 } 1374 1375 if (ddi_copyout((void *)&chip, 1376 (void *)(uintptr_t)(cmd->ResponseAdr), 1377 sizeof (EXT_CHIP), mode) != 0) { 1378 cmd->Status = EXT_STATUS_COPY_ERR; 1379 cmd->ResponseLen = 0; 1380 EL(ha, "failed, ddi_copyout\n"); 1381 } else { 1382 cmd->ResponseLen = sizeof (EXT_CHIP); 1383 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1384 } 1385 } 1386 1387 /* 1388 * ql_qry_driver 1389 * Performs EXT_SC_QUERY_DRIVER subfunction. 1390 * 1391 * Input: 1392 * ha: adapter state pointer. 1393 * cmd: EXT_IOCTL cmd struct pointer. 1394 * mode: flags. 1395 * 1396 * Returns: 1397 * None, request status indicated in cmd->Status. 1398 * 1399 * Context: 1400 * Kernel context. 1401 */ 1402 static void 1403 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1404 { 1405 EXT_DRIVER qd = {0}; 1406 1407 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1408 1409 if (cmd->ResponseLen < sizeof (EXT_DRIVER)) { 1410 cmd->Status = EXT_STATUS_DATA_OVERRUN; 1411 cmd->DetailStatus = sizeof (EXT_DRIVER); 1412 EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n", 1413 cmd->ResponseLen); 1414 cmd->ResponseLen = 0; 1415 return; 1416 } 1417 1418 (void) strcpy((void *)&qd.Version[0], QL_VERSION); 1419 qd.NumOfBus = 1; /* Fixed for Solaris */ 1420 qd.TargetsPerBus = (uint16_t) 1421 (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ? 1422 MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES); 1423 qd.LunsPerTarget = 2030; 1424 qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE; 1425 qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH; 1426 1427 if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr, 1428 sizeof (EXT_DRIVER), mode) != 0) { 1429 cmd->Status = EXT_STATUS_COPY_ERR; 1430 cmd->ResponseLen = 0; 1431 EL(ha, "failed, ddi_copyout\n"); 1432 } else { 1433 cmd->ResponseLen = sizeof (EXT_DRIVER); 1434 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1435 } 1436 } 1437 1438 /* 1439 * ql_fcct 1440 * IOCTL management server FC-CT passthrough. 1441 * 1442 * Input: 1443 * ha: adapter state pointer. 1444 * cmd: User space CT arguments pointer. 1445 * mode: flags. 1446 * 1447 * Returns: 1448 * None, request status indicated in cmd->Status. 1449 * 1450 * Context: 1451 * Kernel context. 1452 */ 1453 static void 1454 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1455 { 1456 ql_mbx_iocb_t *pkt; 1457 ql_mbx_data_t mr; 1458 dma_mem_t *dma_mem; 1459 caddr_t pld; 1460 uint32_t pkt_size, pld_byte_cnt, *long_ptr; 1461 int rval; 1462 ql_ct_iu_preamble_t *ct; 1463 ql_xioctl_t *xp = ha->xioctl; 1464 ql_tgt_t tq; 1465 uint16_t comp_status, loop_id; 1466 1467 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1468 1469 /* Get CT argument structure. */ 1470 if ((ha->topology & QL_SNS_CONNECTION) == 0) { 1471 EL(ha, "failed, No switch\n"); 1472 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1473 cmd->ResponseLen = 0; 1474 return; 1475 } 1476 1477 if (DRIVER_SUSPENDED(ha)) { 1478 EL(ha, "failed, LOOP_NOT_READY\n"); 1479 cmd->Status = EXT_STATUS_BUSY; 1480 cmd->ResponseLen = 0; 1481 return; 1482 } 1483 1484 /* Login management server device. */ 1485 if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) { 1486 tq.d_id.b.al_pa = 0xfa; 1487 tq.d_id.b.area = 0xff; 1488 tq.d_id.b.domain = 0xff; 1489 tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ? 1490 MANAGEMENT_SERVER_24XX_LOOP_ID : 1491 MANAGEMENT_SERVER_LOOP_ID); 1492 rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr); 1493 if (rval != QL_SUCCESS) { 1494 EL(ha, "failed, server login\n"); 1495 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1496 cmd->ResponseLen = 0; 1497 return; 1498 } else { 1499 xp->flags |= QL_MGMT_SERVER_LOGIN; 1500 } 1501 } 1502 1503 QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance); 1504 QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL)); 1505 1506 /* Allocate a DMA Memory Descriptor */ 1507 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP); 1508 if (dma_mem == NULL) { 1509 EL(ha, "failed, kmem_zalloc\n"); 1510 cmd->Status = EXT_STATUS_NO_MEMORY; 1511 cmd->ResponseLen = 0; 1512 return; 1513 } 1514 /* Determine maximum buffer size. */ 1515 if (cmd->RequestLen < cmd->ResponseLen) { 1516 pld_byte_cnt = cmd->ResponseLen; 1517 } else { 1518 pld_byte_cnt = cmd->RequestLen; 1519 } 1520 1521 /* Allocate command block. */ 1522 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt); 1523 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 1524 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t); 1525 1526 /* Get command payload data. */ 1527 if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld, 1528 cmd->RequestLen, mode) != cmd->RequestLen) { 1529 EL(ha, "failed, get_buffer_data\n"); 1530 kmem_free(pkt, pkt_size); 1531 cmd->Status = EXT_STATUS_COPY_ERR; 1532 cmd->ResponseLen = 0; 1533 return; 1534 } 1535 1536 /* Get DMA memory for the IOCB */ 1537 if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA, 1538 QL_DMA_RING_ALIGN) != QL_SUCCESS) { 1539 cmn_err(CE_WARN, "%s(%d): DMA memory " 1540 "alloc failed", QL_NAME, ha->instance); 1541 kmem_free(pkt, pkt_size); 1542 kmem_free(dma_mem, sizeof (dma_mem_t)); 1543 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 1544 cmd->ResponseLen = 0; 1545 return; 1546 } 1547 1548 /* Copy out going payload data to IOCB DMA buffer. */ 1549 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld, 1550 (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR); 1551 1552 /* Sync IOCB DMA buffer. */ 1553 (void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt, 1554 DDI_DMA_SYNC_FORDEV); 1555 1556 /* 1557 * Setup IOCB 1558 */ 1559 ct = (ql_ct_iu_preamble_t *)pld; 1560 if (CFG_IST(ha, CFG_CTRL_24258081)) { 1561 pkt->ms24.entry_type = CT_PASSTHRU_TYPE; 1562 pkt->ms24.entry_count = 1; 1563 1564 pkt->ms24.vp_index = ha->vp_index; 1565 1566 /* Set loop ID */ 1567 pkt->ms24.n_port_hdl = (uint16_t) 1568 (ct->gs_type == GS_TYPE_DIR_SERVER ? 1569 LE_16(SNS_24XX_HDL) : 1570 LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID)); 1571 1572 /* Set ISP command timeout. */ 1573 pkt->ms24.timeout = LE_16(120); 1574 1575 /* Set cmd/response data segment counts. */ 1576 pkt->ms24.cmd_dseg_count = LE_16(1); 1577 pkt->ms24.resp_dseg_count = LE_16(1); 1578 1579 /* Load ct cmd byte count. */ 1580 pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen); 1581 1582 /* Load ct rsp byte count. */ 1583 pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen); 1584 1585 long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address; 1586 1587 /* Load MS command entry data segments. */ 1588 *long_ptr++ = (uint32_t) 1589 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1590 *long_ptr++ = (uint32_t) 1591 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1592 *long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen)); 1593 1594 /* Load MS response entry data segments. */ 1595 *long_ptr++ = (uint32_t) 1596 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1597 *long_ptr++ = (uint32_t) 1598 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1599 *long_ptr = (uint32_t)LE_32(cmd->ResponseLen); 1600 1601 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 1602 sizeof (ql_mbx_iocb_t)); 1603 1604 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status); 1605 if (comp_status == CS_DATA_UNDERRUN) { 1606 if ((BE_16(ct->max_residual_size)) == 0) { 1607 comp_status = CS_COMPLETE; 1608 } 1609 } 1610 1611 if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) != 1612 0) { 1613 EL(ha, "failed, I/O timeout or " 1614 "es=%xh, ss_l=%xh, rval=%xh\n", 1615 pkt->sts24.entry_status, 1616 pkt->sts24.scsi_status_l, rval); 1617 kmem_free(pkt, pkt_size); 1618 ql_free_dma_resource(ha, dma_mem); 1619 kmem_free(dma_mem, sizeof (dma_mem_t)); 1620 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 1621 cmd->ResponseLen = 0; 1622 return; 1623 } 1624 } else { 1625 pkt->ms.entry_type = MS_TYPE; 1626 pkt->ms.entry_count = 1; 1627 1628 /* Set loop ID */ 1629 loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ? 1630 SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID); 1631 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 1632 pkt->ms.loop_id_l = LSB(loop_id); 1633 pkt->ms.loop_id_h = MSB(loop_id); 1634 } else { 1635 pkt->ms.loop_id_h = LSB(loop_id); 1636 } 1637 1638 /* Set ISP command timeout. */ 1639 pkt->ms.timeout = LE_16(120); 1640 1641 /* Set data segment counts. */ 1642 pkt->ms.cmd_dseg_count_l = 1; 1643 pkt->ms.total_dseg_count = LE_16(2); 1644 1645 /* Response total byte count. */ 1646 pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen); 1647 pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen); 1648 1649 /* Command total byte count. */ 1650 pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen); 1651 pkt->ms.dseg_0_length = LE_32(cmd->RequestLen); 1652 1653 /* Load command/response data segments. */ 1654 pkt->ms.dseg_0_address[0] = (uint32_t) 1655 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1656 pkt->ms.dseg_0_address[1] = (uint32_t) 1657 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1658 pkt->ms.dseg_1_address[0] = (uint32_t) 1659 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1660 pkt->ms.dseg_1_address[1] = (uint32_t) 1661 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1662 1663 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 1664 sizeof (ql_mbx_iocb_t)); 1665 1666 comp_status = (uint16_t)LE_16(pkt->sts.comp_status); 1667 if (comp_status == CS_DATA_UNDERRUN) { 1668 if ((BE_16(ct->max_residual_size)) == 0) { 1669 comp_status = CS_COMPLETE; 1670 } 1671 } 1672 if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) { 1673 EL(ha, "failed, I/O timeout or " 1674 "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval); 1675 kmem_free(pkt, pkt_size); 1676 ql_free_dma_resource(ha, dma_mem); 1677 kmem_free(dma_mem, sizeof (dma_mem_t)); 1678 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 1679 cmd->ResponseLen = 0; 1680 return; 1681 } 1682 } 1683 1684 /* Sync in coming DMA buffer. */ 1685 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 1686 pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL); 1687 /* Copy in coming DMA data. */ 1688 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld, 1689 (uint8_t *)dma_mem->bp, pld_byte_cnt, 1690 DDI_DEV_AUTOINCR); 1691 1692 /* Copy response payload from DMA buffer to application. */ 1693 if (cmd->ResponseLen != 0) { 1694 QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance, 1695 cmd->ResponseLen); 1696 QL_DUMP_9(pld, 8, cmd->ResponseLen); 1697 1698 /* Send response payload. */ 1699 if (ql_send_buffer_data(pld, 1700 (caddr_t)(uintptr_t)cmd->ResponseAdr, 1701 cmd->ResponseLen, mode) != cmd->ResponseLen) { 1702 EL(ha, "failed, send_buffer_data\n"); 1703 cmd->Status = EXT_STATUS_COPY_ERR; 1704 cmd->ResponseLen = 0; 1705 } 1706 } 1707 1708 kmem_free(pkt, pkt_size); 1709 ql_free_dma_resource(ha, dma_mem); 1710 kmem_free(dma_mem, sizeof (dma_mem_t)); 1711 1712 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1713 } 1714 1715 /* 1716 * ql_aen_reg 1717 * IOCTL management server Asynchronous Event Tracking Enable/Disable. 1718 * 1719 * Input: 1720 * ha: adapter state pointer. 1721 * cmd: EXT_IOCTL cmd struct pointer. 1722 * mode: flags. 1723 * 1724 * Returns: 1725 * None, request status indicated in cmd->Status. 1726 * 1727 * Context: 1728 * Kernel context. 1729 */ 1730 static void 1731 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1732 { 1733 EXT_REG_AEN reg_struct; 1734 int rval = 0; 1735 ql_xioctl_t *xp = ha->xioctl; 1736 1737 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1738 1739 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, ®_struct, 1740 cmd->RequestLen, mode); 1741 1742 if (rval == 0) { 1743 if (reg_struct.Enable) { 1744 xp->flags |= QL_AEN_TRACKING_ENABLE; 1745 } else { 1746 xp->flags &= ~QL_AEN_TRACKING_ENABLE; 1747 /* Empty the queue. */ 1748 INTR_LOCK(ha); 1749 xp->aen_q_head = 0; 1750 xp->aen_q_tail = 0; 1751 INTR_UNLOCK(ha); 1752 } 1753 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1754 } else { 1755 cmd->Status = EXT_STATUS_COPY_ERR; 1756 EL(ha, "failed, ddi_copyin\n"); 1757 } 1758 } 1759 1760 /* 1761 * ql_aen_get 1762 * IOCTL management server Asynchronous Event Record Transfer. 1763 * 1764 * Input: 1765 * ha: adapter state pointer. 1766 * cmd: EXT_IOCTL cmd struct pointer. 1767 * mode: flags. 1768 * 1769 * Returns: 1770 * None, request status indicated in cmd->Status. 1771 * 1772 * Context: 1773 * Kernel context. 1774 */ 1775 static void 1776 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1777 { 1778 uint32_t out_size; 1779 EXT_ASYNC_EVENT *tmp_q; 1780 EXT_ASYNC_EVENT aen[EXT_DEF_MAX_AEN_QUEUE]; 1781 uint8_t i; 1782 uint8_t queue_cnt; 1783 uint8_t request_cnt; 1784 ql_xioctl_t *xp = ha->xioctl; 1785 1786 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1787 1788 /* Compute the number of events that can be returned */ 1789 request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT)); 1790 1791 if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) { 1792 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1793 cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE; 1794 EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, " 1795 "Len=%xh\n", request_cnt); 1796 cmd->ResponseLen = 0; 1797 return; 1798 } 1799 1800 /* 1st: Make a local copy of the entire queue content. */ 1801 tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue; 1802 queue_cnt = 0; 1803 1804 INTR_LOCK(ha); 1805 i = xp->aen_q_head; 1806 1807 for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) { 1808 if (tmp_q[i].AsyncEventCode != 0) { 1809 bcopy(&tmp_q[i], &aen[queue_cnt], 1810 sizeof (EXT_ASYNC_EVENT)); 1811 queue_cnt++; 1812 tmp_q[i].AsyncEventCode = 0; /* empty out the slot */ 1813 } 1814 if (i == xp->aen_q_tail) { 1815 /* done. */ 1816 break; 1817 } 1818 i++; 1819 if (i == EXT_DEF_MAX_AEN_QUEUE) { 1820 i = 0; 1821 } 1822 } 1823 1824 /* Empty the queue. */ 1825 xp->aen_q_head = 0; 1826 xp->aen_q_tail = 0; 1827 1828 INTR_UNLOCK(ha); 1829 1830 /* 2nd: Now transfer the queue content to user buffer */ 1831 /* Copy the entire queue to user's buffer. */ 1832 out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT)); 1833 if (queue_cnt == 0) { 1834 cmd->ResponseLen = 0; 1835 } else if (ddi_copyout((void *)&aen[0], 1836 (void *)(uintptr_t)(cmd->ResponseAdr), 1837 out_size, mode) != 0) { 1838 cmd->Status = EXT_STATUS_COPY_ERR; 1839 cmd->ResponseLen = 0; 1840 EL(ha, "failed, ddi_copyout\n"); 1841 } else { 1842 cmd->ResponseLen = out_size; 1843 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1844 } 1845 } 1846 1847 /* 1848 * ql_enqueue_aen 1849 * 1850 * Input: 1851 * ha: adapter state pointer. 1852 * event_code: async event code of the event to add to queue. 1853 * payload: event payload for the queue. 1854 * INTR_LOCK must be already obtained. 1855 * 1856 * Context: 1857 * Interrupt or Kernel context, no mailbox commands allowed. 1858 */ 1859 void 1860 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload) 1861 { 1862 uint8_t new_entry; /* index to current entry */ 1863 uint16_t *mbx; 1864 EXT_ASYNC_EVENT *aen_queue; 1865 ql_xioctl_t *xp = ha->xioctl; 1866 1867 QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance, 1868 event_code); 1869 1870 if (xp == NULL) { 1871 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance); 1872 return; 1873 } 1874 aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue; 1875 1876 if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) { 1877 /* Need to change queue pointers to make room. */ 1878 1879 /* Increment tail for adding new entry. */ 1880 xp->aen_q_tail++; 1881 if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) { 1882 xp->aen_q_tail = 0; 1883 } 1884 if (xp->aen_q_head == xp->aen_q_tail) { 1885 /* 1886 * We're overwriting the oldest entry, so need to 1887 * update the head pointer. 1888 */ 1889 xp->aen_q_head++; 1890 if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) { 1891 xp->aen_q_head = 0; 1892 } 1893 } 1894 } 1895 1896 new_entry = xp->aen_q_tail; 1897 aen_queue[new_entry].AsyncEventCode = event_code; 1898 1899 /* Update payload */ 1900 if (payload != NULL) { 1901 switch (event_code) { 1902 case MBA_LIP_OCCURRED: 1903 case MBA_LOOP_UP: 1904 case MBA_LOOP_DOWN: 1905 case MBA_LIP_F8: 1906 case MBA_LIP_RESET: 1907 case MBA_PORT_UPDATE: 1908 break; 1909 case MBA_RSCN_UPDATE: 1910 mbx = (uint16_t *)payload; 1911 /* al_pa */ 1912 aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] = 1913 LSB(mbx[2]); 1914 /* area */ 1915 aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] = 1916 MSB(mbx[2]); 1917 /* domain */ 1918 aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] = 1919 LSB(mbx[1]); 1920 /* save in big endian */ 1921 BIG_ENDIAN_24(&aen_queue[new_entry]. 1922 Payload.RSCN.RSCNInfo[0]); 1923 1924 aen_queue[new_entry].Payload.RSCN.AddrFormat = 1925 MSB(mbx[1]); 1926 1927 break; 1928 default: 1929 /* Not supported */ 1930 EL(ha, "failed, event code not supported=%xh\n", 1931 event_code); 1932 aen_queue[new_entry].AsyncEventCode = 0; 1933 break; 1934 } 1935 } 1936 1937 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1938 } 1939 1940 /* 1941 * ql_scsi_passthru 1942 * IOCTL SCSI passthrough. 1943 * 1944 * Input: 1945 * ha: adapter state pointer. 1946 * cmd: User space SCSI command pointer. 1947 * mode: flags. 1948 * 1949 * Returns: 1950 * None, request status indicated in cmd->Status. 1951 * 1952 * Context: 1953 * Kernel context. 1954 */ 1955 static void 1956 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1957 { 1958 ql_mbx_iocb_t *pkt; 1959 ql_mbx_data_t mr; 1960 dma_mem_t *dma_mem; 1961 caddr_t pld; 1962 uint32_t pkt_size, pld_size; 1963 uint16_t qlnt, retries, cnt, cnt2; 1964 uint8_t *name; 1965 EXT_FC_SCSI_PASSTHRU *ufc_req; 1966 EXT_SCSI_PASSTHRU *usp_req; 1967 int rval; 1968 union _passthru { 1969 EXT_SCSI_PASSTHRU sp_cmd; 1970 EXT_FC_SCSI_PASSTHRU fc_cmd; 1971 } pt_req; /* Passthru request */ 1972 uint32_t status, sense_sz = 0; 1973 ql_tgt_t *tq = NULL; 1974 EXT_SCSI_PASSTHRU *sp_req = &pt_req.sp_cmd; 1975 EXT_FC_SCSI_PASSTHRU *fc_req = &pt_req.fc_cmd; 1976 1977 /* SCSI request struct for SCSI passthrough IOs. */ 1978 struct { 1979 uint16_t lun; 1980 uint16_t sense_length; /* Sense buffer size */ 1981 size_t resid; /* Residual */ 1982 uint8_t *cdbp; /* Requestor's CDB */ 1983 uint8_t *u_sense; /* Requestor's sense buffer */ 1984 uint8_t cdb_len; /* Requestor's CDB length */ 1985 uint8_t direction; 1986 } scsi_req; 1987 1988 struct { 1989 uint8_t *rsp_info; 1990 uint8_t *req_sense_data; 1991 uint32_t residual_length; 1992 uint32_t rsp_info_length; 1993 uint32_t req_sense_length; 1994 uint16_t comp_status; 1995 uint8_t state_flags_l; 1996 uint8_t state_flags_h; 1997 uint8_t scsi_status_l; 1998 uint8_t scsi_status_h; 1999 } sts; 2000 2001 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2002 2003 /* Verify Sub Code and set cnt to needed request size. */ 2004 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) { 2005 pld_size = sizeof (EXT_SCSI_PASSTHRU); 2006 } else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) { 2007 pld_size = sizeof (EXT_FC_SCSI_PASSTHRU); 2008 } else { 2009 EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode); 2010 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 2011 cmd->ResponseLen = 0; 2012 return; 2013 } 2014 2015 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP); 2016 if (dma_mem == NULL) { 2017 EL(ha, "failed, kmem_zalloc\n"); 2018 cmd->Status = EXT_STATUS_NO_MEMORY; 2019 cmd->ResponseLen = 0; 2020 return; 2021 } 2022 /* Verify the size of and copy in the passthru request structure. */ 2023 if (cmd->RequestLen != pld_size) { 2024 /* Return error */ 2025 EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n", 2026 cmd->RequestLen, pld_size); 2027 cmd->Status = EXT_STATUS_INVALID_PARAM; 2028 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 2029 cmd->ResponseLen = 0; 2030 return; 2031 } 2032 2033 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req, 2034 pld_size, mode) != 0) { 2035 EL(ha, "failed, ddi_copyin\n"); 2036 cmd->Status = EXT_STATUS_COPY_ERR; 2037 cmd->ResponseLen = 0; 2038 return; 2039 } 2040 2041 /* 2042 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req 2043 * request data structure. 2044 */ 2045 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) { 2046 scsi_req.lun = sp_req->TargetAddr.Lun; 2047 scsi_req.sense_length = sizeof (sp_req->SenseData); 2048 scsi_req.cdbp = &sp_req->Cdb[0]; 2049 scsi_req.cdb_len = sp_req->CdbLength; 2050 scsi_req.direction = sp_req->Direction; 2051 usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr; 2052 scsi_req.u_sense = &usp_req->SenseData[0]; 2053 cmd->DetailStatus = EXT_DSTATUS_TARGET; 2054 2055 qlnt = QLNT_PORT; 2056 name = (uint8_t *)&sp_req->TargetAddr.Target; 2057 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n", 2058 ha->instance, cmd->SubCode, sp_req->TargetAddr.Target); 2059 tq = ql_find_port(ha, name, qlnt); 2060 } else { 2061 /* 2062 * Must be FC PASSTHRU, verified above. 2063 */ 2064 if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) { 2065 qlnt = QLNT_PORT; 2066 name = &fc_req->FCScsiAddr.DestAddr.WWPN[0]; 2067 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, " 2068 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 2069 ha->instance, cmd->SubCode, name[0], name[1], 2070 name[2], name[3], name[4], name[5], name[6], 2071 name[7]); 2072 tq = ql_find_port(ha, name, qlnt); 2073 } else if (fc_req->FCScsiAddr.DestType == 2074 EXT_DEF_DESTTYPE_WWNN) { 2075 qlnt = QLNT_NODE; 2076 name = &fc_req->FCScsiAddr.DestAddr.WWNN[0]; 2077 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, " 2078 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 2079 ha->instance, cmd->SubCode, name[0], name[1], 2080 name[2], name[3], name[4], name[5], name[6], 2081 name[7]); 2082 tq = ql_find_port(ha, name, qlnt); 2083 } else if (fc_req->FCScsiAddr.DestType == 2084 EXT_DEF_DESTTYPE_PORTID) { 2085 qlnt = QLNT_PID; 2086 name = &fc_req->FCScsiAddr.DestAddr.Id[0]; 2087 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID=" 2088 "%02x%02x%02x\n", ha->instance, cmd->SubCode, 2089 name[0], name[1], name[2]); 2090 tq = ql_find_port(ha, name, qlnt); 2091 } else { 2092 EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n", 2093 cmd->SubCode, fc_req->FCScsiAddr.DestType); 2094 cmd->Status = EXT_STATUS_INVALID_PARAM; 2095 cmd->ResponseLen = 0; 2096 return; 2097 } 2098 scsi_req.lun = fc_req->FCScsiAddr.Lun; 2099 scsi_req.sense_length = sizeof (fc_req->SenseData); 2100 scsi_req.cdbp = &sp_req->Cdb[0]; 2101 scsi_req.cdb_len = sp_req->CdbLength; 2102 ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr; 2103 scsi_req.u_sense = &ufc_req->SenseData[0]; 2104 scsi_req.direction = fc_req->Direction; 2105 } 2106 2107 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 2108 EL(ha, "failed, fc_port not found\n"); 2109 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 2110 cmd->ResponseLen = 0; 2111 return; 2112 } 2113 2114 if (tq->flags & TQF_NEED_AUTHENTICATION) { 2115 EL(ha, "target not available; loopid=%xh\n", tq->loop_id); 2116 cmd->Status = EXT_STATUS_DEVICE_OFFLINE; 2117 cmd->ResponseLen = 0; 2118 return; 2119 } 2120 2121 /* Allocate command block. */ 2122 if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN || 2123 scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) && 2124 cmd->ResponseLen) { 2125 pld_size = cmd->ResponseLen; 2126 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size); 2127 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 2128 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t); 2129 2130 /* Get DMA memory for the IOCB */ 2131 if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA, 2132 QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 2133 cmn_err(CE_WARN, "%s(%d): request queue DMA memory " 2134 "alloc failed", QL_NAME, ha->instance); 2135 kmem_free(pkt, pkt_size); 2136 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 2137 cmd->ResponseLen = 0; 2138 return; 2139 } 2140 2141 if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) { 2142 scsi_req.direction = (uint8_t) 2143 (CFG_IST(ha, CFG_CTRL_24258081) ? 2144 CF_RD : CF_DATA_IN | CF_STAG); 2145 } else { 2146 scsi_req.direction = (uint8_t) 2147 (CFG_IST(ha, CFG_CTRL_24258081) ? 2148 CF_WR : CF_DATA_OUT | CF_STAG); 2149 cmd->ResponseLen = 0; 2150 2151 /* Get command payload. */ 2152 if (ql_get_buffer_data( 2153 (caddr_t)(uintptr_t)cmd->ResponseAdr, 2154 pld, pld_size, mode) != pld_size) { 2155 EL(ha, "failed, get_buffer_data\n"); 2156 cmd->Status = EXT_STATUS_COPY_ERR; 2157 2158 kmem_free(pkt, pkt_size); 2159 ql_free_dma_resource(ha, dma_mem); 2160 kmem_free(dma_mem, sizeof (dma_mem_t)); 2161 return; 2162 } 2163 2164 /* Copy out going data to DMA buffer. */ 2165 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld, 2166 (uint8_t *)dma_mem->bp, pld_size, 2167 DDI_DEV_AUTOINCR); 2168 2169 /* Sync DMA buffer. */ 2170 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 2171 dma_mem->size, DDI_DMA_SYNC_FORDEV); 2172 } 2173 } else { 2174 scsi_req.direction = (uint8_t) 2175 (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG); 2176 cmd->ResponseLen = 0; 2177 2178 pkt_size = sizeof (ql_mbx_iocb_t); 2179 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 2180 pld = NULL; 2181 pld_size = 0; 2182 } 2183 2184 /* retries = ha->port_down_retry_count; */ 2185 retries = 1; 2186 cmd->Status = EXT_STATUS_OK; 2187 cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO; 2188 2189 QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance); 2190 QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len); 2191 2192 do { 2193 if (DRIVER_SUSPENDED(ha)) { 2194 sts.comp_status = CS_LOOP_DOWN_ABORT; 2195 break; 2196 } 2197 2198 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2199 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7; 2200 pkt->cmd24.entry_count = 1; 2201 2202 /* Set LUN number */ 2203 pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun); 2204 pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun); 2205 2206 /* Set N_port handle */ 2207 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id); 2208 2209 /* Set VP Index */ 2210 pkt->cmd24.vp_index = ha->vp_index; 2211 2212 /* Set target ID */ 2213 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa; 2214 pkt->cmd24.target_id[1] = tq->d_id.b.area; 2215 pkt->cmd24.target_id[2] = tq->d_id.b.domain; 2216 2217 /* Set ISP command timeout. */ 2218 pkt->cmd24.timeout = (uint16_t)LE_16(15); 2219 2220 /* Load SCSI CDB */ 2221 ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp, 2222 pkt->cmd24.scsi_cdb, scsi_req.cdb_len, 2223 DDI_DEV_AUTOINCR); 2224 for (cnt = 0; cnt < MAX_CMDSZ; 2225 cnt = (uint16_t)(cnt + 4)) { 2226 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb 2227 + cnt, 4); 2228 } 2229 2230 /* Set tag queue control flags */ 2231 pkt->cmd24.task = TA_STAG; 2232 2233 if (pld_size) { 2234 /* Set transfer direction. */ 2235 pkt->cmd24.control_flags = scsi_req.direction; 2236 2237 /* Set data segment count. */ 2238 pkt->cmd24.dseg_count = LE_16(1); 2239 2240 /* Load total byte count. */ 2241 pkt->cmd24.total_byte_count = LE_32(pld_size); 2242 2243 /* Load data descriptor. */ 2244 pkt->cmd24.dseg_0_address[0] = (uint32_t) 2245 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 2246 pkt->cmd24.dseg_0_address[1] = (uint32_t) 2247 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 2248 pkt->cmd24.dseg_0_length = LE_32(pld_size); 2249 } 2250 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 2251 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3; 2252 pkt->cmd3.entry_count = 1; 2253 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 2254 pkt->cmd3.target_l = LSB(tq->loop_id); 2255 pkt->cmd3.target_h = MSB(tq->loop_id); 2256 } else { 2257 pkt->cmd3.target_h = LSB(tq->loop_id); 2258 } 2259 pkt->cmd3.lun_l = LSB(scsi_req.lun); 2260 pkt->cmd3.lun_h = MSB(scsi_req.lun); 2261 pkt->cmd3.control_flags_l = scsi_req.direction; 2262 pkt->cmd3.timeout = LE_16(15); 2263 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) { 2264 pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt]; 2265 } 2266 if (pld_size) { 2267 pkt->cmd3.dseg_count = LE_16(1); 2268 pkt->cmd3.byte_count = LE_32(pld_size); 2269 pkt->cmd3.dseg_0_address[0] = (uint32_t) 2270 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 2271 pkt->cmd3.dseg_0_address[1] = (uint32_t) 2272 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 2273 pkt->cmd3.dseg_0_length = LE_32(pld_size); 2274 } 2275 } else { 2276 pkt->cmd.entry_type = IOCB_CMD_TYPE_2; 2277 pkt->cmd.entry_count = 1; 2278 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 2279 pkt->cmd.target_l = LSB(tq->loop_id); 2280 pkt->cmd.target_h = MSB(tq->loop_id); 2281 } else { 2282 pkt->cmd.target_h = LSB(tq->loop_id); 2283 } 2284 pkt->cmd.lun_l = LSB(scsi_req.lun); 2285 pkt->cmd.lun_h = MSB(scsi_req.lun); 2286 pkt->cmd.control_flags_l = scsi_req.direction; 2287 pkt->cmd.timeout = LE_16(15); 2288 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) { 2289 pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt]; 2290 } 2291 if (pld_size) { 2292 pkt->cmd.dseg_count = LE_16(1); 2293 pkt->cmd.byte_count = LE_32(pld_size); 2294 pkt->cmd.dseg_0_address = (uint32_t) 2295 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 2296 pkt->cmd.dseg_0_length = LE_32(pld_size); 2297 } 2298 } 2299 /* Go issue command and wait for completion. */ 2300 QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance); 2301 QL_DUMP_9(pkt, 8, pkt_size); 2302 2303 status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); 2304 2305 if (pld_size) { 2306 /* Sync in coming DMA buffer. */ 2307 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 2308 dma_mem->size, DDI_DMA_SYNC_FORKERNEL); 2309 /* Copy in coming DMA data. */ 2310 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld, 2311 (uint8_t *)dma_mem->bp, pld_size, 2312 DDI_DEV_AUTOINCR); 2313 } 2314 2315 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2316 pkt->sts24.entry_status = (uint8_t) 2317 (pkt->sts24.entry_status & 0x3c); 2318 } else { 2319 pkt->sts.entry_status = (uint8_t) 2320 (pkt->sts.entry_status & 0x7e); 2321 } 2322 2323 if (status == QL_SUCCESS && pkt->sts.entry_status != 0) { 2324 EL(ha, "failed, entry_status=%xh, d_id=%xh\n", 2325 pkt->sts.entry_status, tq->d_id.b24); 2326 status = QL_FUNCTION_PARAMETER_ERROR; 2327 } 2328 2329 sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ? 2330 LE_16(pkt->sts24.comp_status) : 2331 LE_16(pkt->sts.comp_status)); 2332 2333 /* 2334 * We have verified about all the request that can be so far. 2335 * Now we need to start verification of our ability to 2336 * actually issue the CDB. 2337 */ 2338 if (DRIVER_SUSPENDED(ha)) { 2339 sts.comp_status = CS_LOOP_DOWN_ABORT; 2340 break; 2341 } else if (status == QL_SUCCESS && 2342 (sts.comp_status == CS_PORT_LOGGED_OUT || 2343 sts.comp_status == CS_PORT_UNAVAILABLE)) { 2344 EL(ha, "login retry d_id=%xh\n", tq->d_id.b24); 2345 if (tq->flags & TQF_FABRIC_DEVICE) { 2346 rval = ql_login_fport(ha, tq, tq->loop_id, 2347 LFF_NO_PLOGI, &mr); 2348 if (rval != QL_SUCCESS) { 2349 EL(ha, "failed, login_fport=%xh, " 2350 "d_id=%xh\n", rval, tq->d_id.b24); 2351 } 2352 } else { 2353 rval = ql_login_lport(ha, tq, tq->loop_id, 2354 LLF_NONE); 2355 if (rval != QL_SUCCESS) { 2356 EL(ha, "failed, login_lport=%xh, " 2357 "d_id=%xh\n", rval, tq->d_id.b24); 2358 } 2359 } 2360 } else { 2361 break; 2362 } 2363 2364 bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 2365 2366 } while (retries--); 2367 2368 if (sts.comp_status == CS_LOOP_DOWN_ABORT) { 2369 /* Cannot issue command now, maybe later */ 2370 EL(ha, "failed, suspended\n"); 2371 kmem_free(pkt, pkt_size); 2372 ql_free_dma_resource(ha, dma_mem); 2373 kmem_free(dma_mem, sizeof (dma_mem_t)); 2374 cmd->Status = EXT_STATUS_SUSPENDED; 2375 cmd->ResponseLen = 0; 2376 return; 2377 } 2378 2379 if (status != QL_SUCCESS) { 2380 /* Command error */ 2381 EL(ha, "failed, I/O\n"); 2382 kmem_free(pkt, pkt_size); 2383 ql_free_dma_resource(ha, dma_mem); 2384 kmem_free(dma_mem, sizeof (dma_mem_t)); 2385 cmd->Status = EXT_STATUS_ERR; 2386 cmd->DetailStatus = status; 2387 cmd->ResponseLen = 0; 2388 return; 2389 } 2390 2391 /* Setup status. */ 2392 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2393 sts.scsi_status_l = pkt->sts24.scsi_status_l; 2394 sts.scsi_status_h = pkt->sts24.scsi_status_h; 2395 2396 /* Setup residuals. */ 2397 sts.residual_length = LE_32(pkt->sts24.residual_length); 2398 2399 /* Setup state flags. */ 2400 sts.state_flags_l = pkt->sts24.state_flags_l; 2401 sts.state_flags_h = pkt->sts24.state_flags_h; 2402 if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) { 2403 sts.state_flags_h = (uint8_t)(sts.state_flags_h | 2404 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD | 2405 SF_XFERRED_DATA | SF_GOT_STATUS); 2406 } else { 2407 sts.state_flags_h = (uint8_t)(sts.state_flags_h | 2408 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD | 2409 SF_GOT_STATUS); 2410 } 2411 if (scsi_req.direction & CF_WR) { 2412 sts.state_flags_l = (uint8_t)(sts.state_flags_l | 2413 SF_DATA_OUT); 2414 } else if (scsi_req.direction & CF_RD) { 2415 sts.state_flags_l = (uint8_t)(sts.state_flags_l | 2416 SF_DATA_IN); 2417 } 2418 sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q); 2419 2420 /* Setup FCP response info. */ 2421 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ? 2422 LE_32(pkt->sts24.fcp_rsp_data_length) : 0; 2423 sts.rsp_info = &pkt->sts24.rsp_sense_data[0]; 2424 for (cnt = 0; cnt < sts.rsp_info_length; 2425 cnt = (uint16_t)(cnt + 4)) { 2426 ql_chg_endian(sts.rsp_info + cnt, 4); 2427 } 2428 2429 /* Setup sense data. */ 2430 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2431 sts.req_sense_length = 2432 LE_32(pkt->sts24.fcp_sense_length); 2433 sts.state_flags_h = (uint8_t)(sts.state_flags_h | 2434 SF_ARQ_DONE); 2435 } else { 2436 sts.req_sense_length = 0; 2437 } 2438 sts.req_sense_data = 2439 &pkt->sts24.rsp_sense_data[sts.rsp_info_length]; 2440 cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) - 2441 (uintptr_t)sts.req_sense_data); 2442 for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) { 2443 ql_chg_endian(sts.req_sense_data + cnt, 4); 2444 } 2445 } else { 2446 sts.scsi_status_l = pkt->sts.scsi_status_l; 2447 sts.scsi_status_h = pkt->sts.scsi_status_h; 2448 2449 /* Setup residuals. */ 2450 sts.residual_length = LE_32(pkt->sts.residual_length); 2451 2452 /* Setup state flags. */ 2453 sts.state_flags_l = pkt->sts.state_flags_l; 2454 sts.state_flags_h = pkt->sts.state_flags_h; 2455 2456 /* Setup FCP response info. */ 2457 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ? 2458 LE_16(pkt->sts.rsp_info_length) : 0; 2459 sts.rsp_info = &pkt->sts.rsp_info[0]; 2460 2461 /* Setup sense data. */ 2462 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ? 2463 LE_16(pkt->sts.req_sense_length) : 0; 2464 sts.req_sense_data = &pkt->sts.req_sense_data[0]; 2465 } 2466 2467 QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance); 2468 QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t)); 2469 2470 switch (sts.comp_status) { 2471 case CS_INCOMPLETE: 2472 case CS_ABORTED: 2473 case CS_DEVICE_UNAVAILABLE: 2474 case CS_PORT_UNAVAILABLE: 2475 case CS_PORT_LOGGED_OUT: 2476 case CS_PORT_CONFIG_CHG: 2477 case CS_PORT_BUSY: 2478 case CS_LOOP_DOWN_ABORT: 2479 cmd->Status = EXT_STATUS_BUSY; 2480 break; 2481 case CS_RESET: 2482 case CS_QUEUE_FULL: 2483 cmd->Status = EXT_STATUS_ERR; 2484 break; 2485 case CS_TIMEOUT: 2486 cmd->Status = EXT_STATUS_ERR; 2487 break; 2488 case CS_DATA_OVERRUN: 2489 cmd->Status = EXT_STATUS_DATA_OVERRUN; 2490 break; 2491 case CS_DATA_UNDERRUN: 2492 cmd->Status = EXT_STATUS_DATA_UNDERRUN; 2493 break; 2494 } 2495 2496 /* 2497 * If non data transfer commands fix tranfer counts. 2498 */ 2499 if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY || 2500 scsi_req.cdbp[0] == SCMD_REZERO_UNIT || 2501 scsi_req.cdbp[0] == SCMD_SEEK || 2502 scsi_req.cdbp[0] == SCMD_SEEK_G1 || 2503 scsi_req.cdbp[0] == SCMD_RESERVE || 2504 scsi_req.cdbp[0] == SCMD_RELEASE || 2505 scsi_req.cdbp[0] == SCMD_START_STOP || 2506 scsi_req.cdbp[0] == SCMD_DOORLOCK || 2507 scsi_req.cdbp[0] == SCMD_VERIFY || 2508 scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK || 2509 scsi_req.cdbp[0] == SCMD_VERIFY_G0 || 2510 scsi_req.cdbp[0] == SCMD_SPACE || 2511 scsi_req.cdbp[0] == SCMD_ERASE || 2512 (scsi_req.cdbp[0] == SCMD_FORMAT && 2513 (scsi_req.cdbp[1] & FPB_DATA) == 0)) { 2514 /* 2515 * Non data transfer command, clear sts_entry residual 2516 * length. 2517 */ 2518 sts.residual_length = 0; 2519 cmd->ResponseLen = 0; 2520 if (sts.comp_status == CS_DATA_UNDERRUN) { 2521 sts.comp_status = CS_COMPLETE; 2522 cmd->Status = EXT_STATUS_OK; 2523 } 2524 } else { 2525 cmd->ResponseLen = pld_size; 2526 } 2527 2528 /* Correct ISP completion status */ 2529 if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 && 2530 (sts.scsi_status_h & FCP_RSP_MASK) == 0) { 2531 QL_PRINT_9(CE_CONT, "(%d): Correct completion\n", 2532 ha->instance); 2533 scsi_req.resid = 0; 2534 } else if (sts.comp_status == CS_DATA_UNDERRUN) { 2535 QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n", 2536 ha->instance); 2537 scsi_req.resid = sts.residual_length; 2538 if (sts.scsi_status_h & FCP_RESID_UNDER) { 2539 cmd->Status = (uint32_t)EXT_STATUS_OK; 2540 2541 cmd->ResponseLen = (uint32_t) 2542 (pld_size - scsi_req.resid); 2543 } else { 2544 EL(ha, "failed, Transfer ERROR\n"); 2545 cmd->Status = EXT_STATUS_ERR; 2546 cmd->ResponseLen = 0; 2547 } 2548 } else { 2549 QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, " 2550 "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance, 2551 tq->d_id.b24, sts.comp_status, sts.scsi_status_h, 2552 sts.scsi_status_l); 2553 2554 scsi_req.resid = pld_size; 2555 /* 2556 * Handle residual count on SCSI check 2557 * condition. 2558 * 2559 * - If Residual Under / Over is set, use the 2560 * Residual Transfer Length field in IOCB. 2561 * - If Residual Under / Over is not set, and 2562 * Transferred Data bit is set in State Flags 2563 * field of IOCB, report residual value of 0 2564 * (you may want to do this for tape 2565 * Write-type commands only). This takes care 2566 * of logical end of tape problem and does 2567 * not break Unit Attention. 2568 * - If Residual Under / Over is not set, and 2569 * Transferred Data bit is not set in State 2570 * Flags, report residual value equal to 2571 * original data transfer length. 2572 */ 2573 if (sts.scsi_status_l & STATUS_CHECK) { 2574 cmd->Status = EXT_STATUS_SCSI_STATUS; 2575 cmd->DetailStatus = sts.scsi_status_l; 2576 if (sts.scsi_status_h & 2577 (FCP_RESID_OVER | FCP_RESID_UNDER)) { 2578 scsi_req.resid = sts.residual_length; 2579 } else if (sts.state_flags_h & 2580 STATE_XFERRED_DATA) { 2581 scsi_req.resid = 0; 2582 } 2583 } 2584 } 2585 2586 if (sts.scsi_status_l & STATUS_CHECK && 2587 sts.scsi_status_h & FCP_SNS_LEN_VALID && 2588 sts.req_sense_length) { 2589 /* 2590 * Check condition with vaild sense data flag set and sense 2591 * length != 0 2592 */ 2593 if (sts.req_sense_length > scsi_req.sense_length) { 2594 sense_sz = scsi_req.sense_length; 2595 } else { 2596 sense_sz = sts.req_sense_length; 2597 } 2598 2599 EL(ha, "failed, Check Condition Status, d_id=%xh\n", 2600 tq->d_id.b24); 2601 QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length); 2602 2603 if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense, 2604 (size_t)sense_sz, mode) != 0) { 2605 EL(ha, "failed, request sense ddi_copyout\n"); 2606 } 2607 2608 cmd->Status = EXT_STATUS_SCSI_STATUS; 2609 cmd->DetailStatus = sts.scsi_status_l; 2610 } 2611 2612 /* Copy response payload from DMA buffer to application. */ 2613 if (scsi_req.direction & (CF_RD | CF_DATA_IN) && 2614 cmd->ResponseLen != 0) { 2615 QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, " 2616 "byte_count=%u, ResponseLen=%xh\n", ha->instance, 2617 scsi_req.resid, pld_size, cmd->ResponseLen); 2618 QL_DUMP_9(pld, 8, cmd->ResponseLen); 2619 2620 /* Send response payload. */ 2621 if (ql_send_buffer_data(pld, 2622 (caddr_t)(uintptr_t)cmd->ResponseAdr, 2623 cmd->ResponseLen, mode) != cmd->ResponseLen) { 2624 EL(ha, "failed, send_buffer_data\n"); 2625 cmd->Status = EXT_STATUS_COPY_ERR; 2626 cmd->ResponseLen = 0; 2627 } 2628 } 2629 2630 if (cmd->Status != EXT_STATUS_OK) { 2631 EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, " 2632 "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24); 2633 } else { 2634 /*EMPTY*/ 2635 QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n", 2636 ha->instance, cmd->ResponseLen); 2637 } 2638 2639 kmem_free(pkt, pkt_size); 2640 ql_free_dma_resource(ha, dma_mem); 2641 kmem_free(dma_mem, sizeof (dma_mem_t)); 2642 } 2643 2644 /* 2645 * ql_wwpn_to_scsiaddr 2646 * 2647 * Input: 2648 * ha: adapter state pointer. 2649 * cmd: EXT_IOCTL cmd struct pointer. 2650 * mode: flags. 2651 * 2652 * Context: 2653 * Kernel context. 2654 */ 2655 static void 2656 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2657 { 2658 int status; 2659 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE]; 2660 EXT_SCSI_ADDR *tmp_addr; 2661 ql_tgt_t *tq; 2662 2663 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2664 2665 if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) { 2666 /* Return error */ 2667 EL(ha, "incorrect RequestLen\n"); 2668 cmd->Status = EXT_STATUS_INVALID_PARAM; 2669 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 2670 return; 2671 } 2672 2673 status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn, 2674 cmd->RequestLen, mode); 2675 2676 if (status != 0) { 2677 cmd->Status = EXT_STATUS_COPY_ERR; 2678 EL(ha, "failed, ddi_copyin\n"); 2679 return; 2680 } 2681 2682 tq = ql_find_port(ha, wwpn, QLNT_PORT); 2683 2684 if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) { 2685 /* no matching device */ 2686 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 2687 EL(ha, "failed, device not found\n"); 2688 return; 2689 } 2690 2691 /* Copy out the IDs found. For now we can only return target ID. */ 2692 tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr; 2693 2694 status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode); 2695 2696 if (status != 0) { 2697 cmd->Status = EXT_STATUS_COPY_ERR; 2698 EL(ha, "failed, ddi_copyout\n"); 2699 } else { 2700 cmd->Status = EXT_STATUS_OK; 2701 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2702 } 2703 } 2704 2705 /* 2706 * ql_host_idx 2707 * Gets host order index. 2708 * 2709 * Input: 2710 * ha: adapter state pointer. 2711 * cmd: EXT_IOCTL cmd struct pointer. 2712 * mode: flags. 2713 * 2714 * Returns: 2715 * None, request status indicated in cmd->Status. 2716 * 2717 * Context: 2718 * Kernel context. 2719 */ 2720 static void 2721 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2722 { 2723 uint16_t idx; 2724 2725 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2726 2727 if (cmd->ResponseLen < sizeof (uint16_t)) { 2728 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2729 cmd->DetailStatus = sizeof (uint16_t); 2730 EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen); 2731 cmd->ResponseLen = 0; 2732 return; 2733 } 2734 2735 idx = (uint16_t)ha->instance; 2736 2737 if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr), 2738 sizeof (uint16_t), mode) != 0) { 2739 cmd->Status = EXT_STATUS_COPY_ERR; 2740 cmd->ResponseLen = 0; 2741 EL(ha, "failed, ddi_copyout\n"); 2742 } else { 2743 cmd->ResponseLen = sizeof (uint16_t); 2744 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2745 } 2746 } 2747 2748 /* 2749 * ql_host_drvname 2750 * Gets host driver name 2751 * 2752 * Input: 2753 * ha: adapter state pointer. 2754 * cmd: EXT_IOCTL cmd struct pointer. 2755 * mode: flags. 2756 * 2757 * Returns: 2758 * None, request status indicated in cmd->Status. 2759 * 2760 * Context: 2761 * Kernel context. 2762 */ 2763 static void 2764 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2765 { 2766 2767 char drvname[] = QL_NAME; 2768 uint32_t qlnamelen; 2769 2770 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2771 2772 qlnamelen = (uint32_t)(strlen(QL_NAME)+1); 2773 2774 if (cmd->ResponseLen < qlnamelen) { 2775 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2776 cmd->DetailStatus = qlnamelen; 2777 EL(ha, "failed, ResponseLen: %xh, needed: %xh\n", 2778 cmd->ResponseLen, qlnamelen); 2779 cmd->ResponseLen = 0; 2780 return; 2781 } 2782 2783 if (ddi_copyout((void *)&drvname, 2784 (void *)(uintptr_t)(cmd->ResponseAdr), 2785 qlnamelen, mode) != 0) { 2786 cmd->Status = EXT_STATUS_COPY_ERR; 2787 cmd->ResponseLen = 0; 2788 EL(ha, "failed, ddi_copyout\n"); 2789 } else { 2790 cmd->ResponseLen = qlnamelen-1; 2791 } 2792 2793 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2794 } 2795 2796 /* 2797 * ql_read_nvram 2798 * Get NVRAM contents. 2799 * 2800 * Input: 2801 * ha: adapter state pointer. 2802 * cmd: EXT_IOCTL cmd struct pointer. 2803 * mode: flags. 2804 * 2805 * Returns: 2806 * None, request status indicated in cmd->Status. 2807 * 2808 * Context: 2809 * Kernel context. 2810 */ 2811 static void 2812 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2813 { 2814 2815 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2816 2817 if (cmd->ResponseLen < ha->nvram_cache->size) { 2818 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2819 cmd->DetailStatus = ha->nvram_cache->size; 2820 EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n", 2821 cmd->ResponseLen); 2822 cmd->ResponseLen = 0; 2823 return; 2824 } 2825 2826 /* Get NVRAM data. */ 2827 if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 2828 mode) != 0) { 2829 cmd->Status = EXT_STATUS_COPY_ERR; 2830 cmd->ResponseLen = 0; 2831 EL(ha, "failed, copy error\n"); 2832 } else { 2833 cmd->ResponseLen = ha->nvram_cache->size; 2834 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2835 } 2836 } 2837 2838 /* 2839 * ql_write_nvram 2840 * Loads NVRAM contents. 2841 * 2842 * Input: 2843 * ha: adapter state pointer. 2844 * cmd: EXT_IOCTL cmd struct pointer. 2845 * mode: flags. 2846 * 2847 * Returns: 2848 * None, request status indicated in cmd->Status. 2849 * 2850 * Context: 2851 * Kernel context. 2852 */ 2853 static void 2854 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2855 { 2856 2857 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2858 2859 if (cmd->RequestLen < ha->nvram_cache->size) { 2860 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2861 cmd->DetailStatus = ha->nvram_cache->size; 2862 EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n", 2863 cmd->RequestLen); 2864 return; 2865 } 2866 2867 /* Load NVRAM data. */ 2868 if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr), 2869 mode) != 0) { 2870 cmd->Status = EXT_STATUS_COPY_ERR; 2871 EL(ha, "failed, copy error\n"); 2872 } else { 2873 /*EMPTY*/ 2874 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2875 } 2876 } 2877 2878 /* 2879 * ql_write_vpd 2880 * Loads VPD contents. 2881 * 2882 * Input: 2883 * ha: adapter state pointer. 2884 * cmd: EXT_IOCTL cmd struct pointer. 2885 * mode: flags. 2886 * 2887 * Returns: 2888 * None, request status indicated in cmd->Status. 2889 * 2890 * Context: 2891 * Kernel context. 2892 */ 2893 static void 2894 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2895 { 2896 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2897 2898 int32_t rval = 0; 2899 2900 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2901 cmd->Status = EXT_STATUS_INVALID_REQUEST; 2902 EL(ha, "failed, invalid request for HBA\n"); 2903 return; 2904 } 2905 2906 if (cmd->RequestLen < QL_24XX_VPD_SIZE) { 2907 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2908 cmd->DetailStatus = QL_24XX_VPD_SIZE; 2909 EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n", 2910 cmd->RequestLen); 2911 return; 2912 } 2913 2914 /* Load VPD data. */ 2915 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr), 2916 mode)) != 0) { 2917 cmd->Status = EXT_STATUS_COPY_ERR; 2918 cmd->DetailStatus = rval; 2919 EL(ha, "failed, errno=%x\n", rval); 2920 } else { 2921 /*EMPTY*/ 2922 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2923 } 2924 } 2925 2926 /* 2927 * ql_read_vpd 2928 * Dumps VPD contents. 2929 * 2930 * Input: 2931 * ha: adapter state pointer. 2932 * cmd: EXT_IOCTL cmd struct pointer. 2933 * mode: flags. 2934 * 2935 * Returns: 2936 * None, request status indicated in cmd->Status. 2937 * 2938 * Context: 2939 * Kernel context. 2940 */ 2941 static void 2942 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2943 { 2944 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2945 2946 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2947 cmd->Status = EXT_STATUS_INVALID_REQUEST; 2948 EL(ha, "failed, invalid request for HBA\n"); 2949 return; 2950 } 2951 2952 if (cmd->ResponseLen < QL_24XX_VPD_SIZE) { 2953 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2954 cmd->DetailStatus = QL_24XX_VPD_SIZE; 2955 EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n", 2956 cmd->ResponseLen); 2957 return; 2958 } 2959 2960 /* Dump VPD data. */ 2961 if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 2962 mode)) != 0) { 2963 cmd->Status = EXT_STATUS_COPY_ERR; 2964 EL(ha, "failed,\n"); 2965 } else { 2966 /*EMPTY*/ 2967 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2968 } 2969 } 2970 2971 /* 2972 * ql_get_fcache 2973 * Dumps flash cache contents. 2974 * 2975 * Input: 2976 * ha: adapter state pointer. 2977 * cmd: EXT_IOCTL cmd struct pointer. 2978 * mode: flags. 2979 * 2980 * Returns: 2981 * None, request status indicated in cmd->Status. 2982 * 2983 * Context: 2984 * Kernel context. 2985 */ 2986 static void 2987 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2988 { 2989 uint32_t bsize, boff, types, cpsize, hsize; 2990 ql_fcache_t *fptr; 2991 2992 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2993 2994 CACHE_LOCK(ha); 2995 2996 if (ha->fcache == NULL) { 2997 CACHE_UNLOCK(ha); 2998 cmd->Status = EXT_STATUS_ERR; 2999 EL(ha, "failed, adapter fcache not setup\n"); 3000 return; 3001 } 3002 3003 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 3004 bsize = 100; 3005 } else { 3006 bsize = 400; 3007 } 3008 3009 if (cmd->ResponseLen < bsize) { 3010 CACHE_UNLOCK(ha); 3011 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 3012 cmd->DetailStatus = bsize; 3013 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n", 3014 bsize, cmd->ResponseLen); 3015 return; 3016 } 3017 3018 boff = 0; 3019 bsize = 0; 3020 fptr = ha->fcache; 3021 3022 /* 3023 * For backwards compatibility, get one of each image type 3024 */ 3025 types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI); 3026 while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) { 3027 /* Get the next image */ 3028 if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) { 3029 3030 cpsize = (fptr->buflen < 100 ? fptr->buflen : 100); 3031 3032 if (ddi_copyout(fptr->buf, 3033 (void *)(uintptr_t)(cmd->ResponseAdr + boff), 3034 cpsize, mode) != 0) { 3035 CACHE_UNLOCK(ha); 3036 EL(ha, "ddicopy failed, done\n"); 3037 cmd->Status = EXT_STATUS_COPY_ERR; 3038 cmd->DetailStatus = 0; 3039 return; 3040 } 3041 boff += 100; 3042 bsize += cpsize; 3043 types &= ~(fptr->type); 3044 } 3045 } 3046 3047 /* 3048 * Get the firmware image -- it needs to be last in the 3049 * buffer at offset 300 for backwards compatibility. Also for 3050 * backwards compatibility, the pci header is stripped off. 3051 */ 3052 if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) { 3053 3054 hsize = sizeof (pci_header_t) + sizeof (pci_data_t); 3055 if (hsize > fptr->buflen) { 3056 CACHE_UNLOCK(ha); 3057 EL(ha, "header size (%xh) exceeds buflen (%xh)\n", 3058 hsize, fptr->buflen); 3059 cmd->Status = EXT_STATUS_COPY_ERR; 3060 cmd->DetailStatus = 0; 3061 return; 3062 } 3063 3064 cpsize = ((fptr->buflen - hsize) < 100 ? 3065 fptr->buflen - hsize : 100); 3066 3067 if (ddi_copyout(fptr->buf+hsize, 3068 (void *)(uintptr_t)(cmd->ResponseAdr + 300), 3069 cpsize, mode) != 0) { 3070 CACHE_UNLOCK(ha); 3071 EL(ha, "fw ddicopy failed, done\n"); 3072 cmd->Status = EXT_STATUS_COPY_ERR; 3073 cmd->DetailStatus = 0; 3074 return; 3075 } 3076 bsize += 100; 3077 } 3078 3079 CACHE_UNLOCK(ha); 3080 cmd->Status = EXT_STATUS_OK; 3081 cmd->DetailStatus = bsize; 3082 3083 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3084 } 3085 3086 /* 3087 * ql_get_fcache_ex 3088 * Dumps flash cache contents. 3089 * 3090 * Input: 3091 * ha: adapter state pointer. 3092 * cmd: EXT_IOCTL cmd struct pointer. 3093 * mode: flags. 3094 * 3095 * Returns: 3096 * None, request status indicated in cmd->Status. 3097 * 3098 * Context: 3099 * Kernel context. 3100 */ 3101 static void 3102 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3103 { 3104 uint32_t bsize = 0; 3105 uint32_t boff = 0; 3106 ql_fcache_t *fptr; 3107 3108 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3109 3110 CACHE_LOCK(ha); 3111 if (ha->fcache == NULL) { 3112 CACHE_UNLOCK(ha); 3113 cmd->Status = EXT_STATUS_ERR; 3114 EL(ha, "failed, adapter fcache not setup\n"); 3115 return; 3116 } 3117 3118 /* Make sure user passed enough buffer space */ 3119 for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) { 3120 bsize += FBUFSIZE; 3121 } 3122 3123 if (cmd->ResponseLen < bsize) { 3124 CACHE_UNLOCK(ha); 3125 if (cmd->ResponseLen != 0) { 3126 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n", 3127 bsize, cmd->ResponseLen); 3128 } 3129 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 3130 cmd->DetailStatus = bsize; 3131 return; 3132 } 3133 3134 boff = 0; 3135 fptr = ha->fcache; 3136 while ((fptr != NULL) && (fptr->buf != NULL)) { 3137 /* Get the next image */ 3138 if (ddi_copyout(fptr->buf, 3139 (void *)(uintptr_t)(cmd->ResponseAdr + boff), 3140 (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE), 3141 mode) != 0) { 3142 CACHE_UNLOCK(ha); 3143 EL(ha, "failed, ddicopy at %xh, done\n", boff); 3144 cmd->Status = EXT_STATUS_COPY_ERR; 3145 cmd->DetailStatus = 0; 3146 return; 3147 } 3148 boff += FBUFSIZE; 3149 fptr = fptr->next; 3150 } 3151 3152 CACHE_UNLOCK(ha); 3153 cmd->Status = EXT_STATUS_OK; 3154 cmd->DetailStatus = bsize; 3155 3156 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3157 } 3158 3159 /* 3160 * ql_read_flash 3161 * Get flash contents. 3162 * 3163 * Input: 3164 * ha: adapter state pointer. 3165 * cmd: EXT_IOCTL cmd struct pointer. 3166 * mode: flags. 3167 * 3168 * Returns: 3169 * None, request status indicated in cmd->Status. 3170 * 3171 * Context: 3172 * Kernel context. 3173 */ 3174 static void 3175 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3176 { 3177 ql_xioctl_t *xp = ha->xioctl; 3178 3179 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3180 3181 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 3182 EL(ha, "ql_stall_driver failed\n"); 3183 cmd->Status = EXT_STATUS_BUSY; 3184 cmd->DetailStatus = xp->fdesc.flash_size; 3185 cmd->ResponseLen = 0; 3186 return; 3187 } 3188 3189 if (ql_setup_fcache(ha) != QL_SUCCESS) { 3190 cmd->Status = EXT_STATUS_ERR; 3191 cmd->DetailStatus = xp->fdesc.flash_size; 3192 EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n", 3193 cmd->ResponseLen, xp->fdesc.flash_size); 3194 cmd->ResponseLen = 0; 3195 } else { 3196 /* adjust read size to flash size */ 3197 if (cmd->ResponseLen > xp->fdesc.flash_size) { 3198 EL(ha, "adjusting req=%xh, max=%xh\n", 3199 cmd->ResponseLen, xp->fdesc.flash_size); 3200 cmd->ResponseLen = xp->fdesc.flash_size; 3201 } 3202 3203 /* Get flash data. */ 3204 if (ql_flash_fcode_dump(ha, 3205 (void *)(uintptr_t)(cmd->ResponseAdr), 3206 (size_t)(cmd->ResponseLen), 0, mode) != 0) { 3207 cmd->Status = EXT_STATUS_COPY_ERR; 3208 cmd->ResponseLen = 0; 3209 EL(ha, "failed,\n"); 3210 } 3211 } 3212 3213 /* Resume I/O */ 3214 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3215 ql_restart_driver(ha); 3216 } else { 3217 EL(ha, "isp_abort_needed for restart\n"); 3218 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 3219 DRIVER_STALL); 3220 } 3221 3222 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3223 } 3224 3225 /* 3226 * ql_write_flash 3227 * Loads flash contents. 3228 * 3229 * Input: 3230 * ha: adapter state pointer. 3231 * cmd: EXT_IOCTL cmd struct pointer. 3232 * mode: flags. 3233 * 3234 * Returns: 3235 * None, request status indicated in cmd->Status. 3236 * 3237 * Context: 3238 * Kernel context. 3239 */ 3240 static void 3241 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3242 { 3243 ql_xioctl_t *xp = ha->xioctl; 3244 3245 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3246 3247 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 3248 EL(ha, "ql_stall_driver failed\n"); 3249 cmd->Status = EXT_STATUS_BUSY; 3250 cmd->DetailStatus = xp->fdesc.flash_size; 3251 cmd->ResponseLen = 0; 3252 return; 3253 } 3254 3255 if (ql_setup_fcache(ha) != QL_SUCCESS) { 3256 cmd->Status = EXT_STATUS_ERR; 3257 cmd->DetailStatus = xp->fdesc.flash_size; 3258 EL(ha, "failed, RequestLen=%xh, size=%xh\n", 3259 cmd->RequestLen, xp->fdesc.flash_size); 3260 cmd->ResponseLen = 0; 3261 } else { 3262 /* Load flash data. */ 3263 if (cmd->RequestLen > xp->fdesc.flash_size) { 3264 cmd->Status = EXT_STATUS_ERR; 3265 cmd->DetailStatus = xp->fdesc.flash_size; 3266 EL(ha, "failed, RequestLen=%xh, flash size=%xh\n", 3267 cmd->RequestLen, xp->fdesc.flash_size); 3268 } else if (ql_flash_fcode_load(ha, 3269 (void *)(uintptr_t)(cmd->RequestAdr), 3270 (size_t)(cmd->RequestLen), mode) != 0) { 3271 cmd->Status = EXT_STATUS_COPY_ERR; 3272 EL(ha, "failed,\n"); 3273 } 3274 } 3275 3276 /* Resume I/O */ 3277 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3278 ql_restart_driver(ha); 3279 } else { 3280 EL(ha, "isp_abort_needed for restart\n"); 3281 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 3282 DRIVER_STALL); 3283 } 3284 3285 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3286 } 3287 3288 /* 3289 * ql_diagnostic_loopback 3290 * Performs EXT_CC_LOOPBACK Command 3291 * 3292 * Input: 3293 * ha: adapter state pointer. 3294 * cmd: Local EXT_IOCTL cmd struct pointer. 3295 * mode: flags. 3296 * 3297 * Returns: 3298 * None, request status indicated in cmd->Status. 3299 * 3300 * Context: 3301 * Kernel context. 3302 */ 3303 static void 3304 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3305 { 3306 EXT_LOOPBACK_REQ plbreq; 3307 EXT_LOOPBACK_RSP plbrsp; 3308 ql_mbx_data_t mr; 3309 uint32_t rval; 3310 caddr_t bp; 3311 uint16_t opt; 3312 3313 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3314 3315 /* Get loop back request. */ 3316 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 3317 (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) { 3318 EL(ha, "failed, ddi_copyin\n"); 3319 cmd->Status = EXT_STATUS_COPY_ERR; 3320 cmd->ResponseLen = 0; 3321 return; 3322 } 3323 3324 opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK); 3325 3326 /* Check transfer length fits in buffer. */ 3327 if (plbreq.BufferLength < plbreq.TransferCount && 3328 plbreq.TransferCount < MAILBOX_BUFFER_SIZE) { 3329 EL(ha, "failed, BufferLength=%d, xfercnt=%d, " 3330 "mailbox_buffer_size=%d\n", plbreq.BufferLength, 3331 plbreq.TransferCount, MAILBOX_BUFFER_SIZE); 3332 cmd->Status = EXT_STATUS_INVALID_PARAM; 3333 cmd->ResponseLen = 0; 3334 return; 3335 } 3336 3337 /* Allocate command memory. */ 3338 bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP); 3339 3340 /* Get loopback data. */ 3341 if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress, 3342 bp, plbreq.TransferCount, mode) != plbreq.TransferCount) { 3343 EL(ha, "failed, ddi_copyin-2\n"); 3344 kmem_free(bp, plbreq.TransferCount); 3345 cmd->Status = EXT_STATUS_COPY_ERR; 3346 cmd->ResponseLen = 0; 3347 return; 3348 } 3349 3350 if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) || 3351 ql_stall_driver(ha, 0) != QL_SUCCESS) { 3352 EL(ha, "failed, LOOP_NOT_READY\n"); 3353 kmem_free(bp, plbreq.TransferCount); 3354 cmd->Status = EXT_STATUS_BUSY; 3355 cmd->ResponseLen = 0; 3356 return; 3357 } 3358 3359 /* Shutdown IP. */ 3360 if (ha->flags & IP_INITIALIZED) { 3361 (void) ql_shutdown_ip(ha); 3362 } 3363 3364 /* determine topology so we can send the loopback or the echo */ 3365 /* Echo is supported on 2300's only and above */ 3366 3367 if (CFG_IST(ha, CFG_CTRL_8081)) { 3368 if (!(ha->task_daemon_flags & LOOP_DOWN) && opt == 3369 MBC_LOOPBACK_POINT_EXTERNAL) { 3370 if (plbreq.TransferCount > 252) { 3371 EL(ha, "transfer count (%d) > 252\n", 3372 plbreq.TransferCount); 3373 kmem_free(bp, plbreq.TransferCount); 3374 cmd->Status = EXT_STATUS_INVALID_PARAM; 3375 cmd->ResponseLen = 0; 3376 return; 3377 } 3378 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD; 3379 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount, 3380 MBC_ECHO_ELS, &mr); 3381 } else { 3382 if (CFG_IST(ha, CFG_CTRL_81XX)) { 3383 (void) ql_set_loop_point(ha, opt); 3384 } 3385 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD; 3386 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount, 3387 opt, plbreq.IterationCount, &mr); 3388 if (CFG_IST(ha, CFG_CTRL_81XX)) { 3389 (void) ql_set_loop_point(ha, 0); 3390 } 3391 } 3392 } else { 3393 if (!(ha->task_daemon_flags & LOOP_DOWN) && 3394 (ha->topology & QL_F_PORT) && 3395 ha->device_id >= 0x2300) { 3396 QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using " 3397 "echo\n", ha->instance); 3398 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD; 3399 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount, 3400 (uint16_t)(CFG_IST(ha, CFG_CTRL_8081) ? 3401 MBC_ECHO_ELS : MBC_ECHO_64BIT), &mr); 3402 } else { 3403 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD; 3404 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount, 3405 opt, plbreq.IterationCount, &mr); 3406 } 3407 } 3408 3409 ql_restart_driver(ha); 3410 3411 /* Restart IP if it was shutdown. */ 3412 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) { 3413 (void) ql_initialize_ip(ha); 3414 ql_isp_rcvbuf(ha); 3415 } 3416 3417 if (rval != QL_SUCCESS) { 3418 EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval); 3419 kmem_free(bp, plbreq.TransferCount); 3420 cmd->Status = EXT_STATUS_MAILBOX; 3421 cmd->DetailStatus = rval; 3422 cmd->ResponseLen = 0; 3423 return; 3424 } 3425 3426 /* Return loopback data. */ 3427 if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress, 3428 plbreq.TransferCount, mode) != plbreq.TransferCount) { 3429 EL(ha, "failed, ddi_copyout\n"); 3430 kmem_free(bp, plbreq.TransferCount); 3431 cmd->Status = EXT_STATUS_COPY_ERR; 3432 cmd->ResponseLen = 0; 3433 return; 3434 } 3435 kmem_free(bp, plbreq.TransferCount); 3436 3437 /* Return loopback results. */ 3438 plbrsp.BufferAddress = plbreq.BufferAddress; 3439 plbrsp.BufferLength = plbreq.TransferCount; 3440 plbrsp.CompletionStatus = mr.mb[0]; 3441 3442 if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) { 3443 plbrsp.CrcErrorCount = 0; 3444 plbrsp.DisparityErrorCount = 0; 3445 plbrsp.FrameLengthErrorCount = 0; 3446 plbrsp.IterationCountLastError = 0; 3447 } else { 3448 plbrsp.CrcErrorCount = mr.mb[1]; 3449 plbrsp.DisparityErrorCount = mr.mb[2]; 3450 plbrsp.FrameLengthErrorCount = mr.mb[3]; 3451 plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18]; 3452 } 3453 3454 rval = ddi_copyout((void *)&plbrsp, 3455 (void *)(uintptr_t)cmd->ResponseAdr, 3456 sizeof (EXT_LOOPBACK_RSP), mode); 3457 if (rval != 0) { 3458 EL(ha, "failed, ddi_copyout-2\n"); 3459 cmd->Status = EXT_STATUS_COPY_ERR; 3460 cmd->ResponseLen = 0; 3461 return; 3462 } 3463 cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP); 3464 3465 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3466 } 3467 3468 /* 3469 * ql_set_loop_point 3470 * Setup loop point for port configuration. 3471 * 3472 * Input: 3473 * ha: adapter state structure. 3474 * opt: loop point option. 3475 * 3476 * Returns: 3477 * ql local function return status code. 3478 * 3479 * Context: 3480 * Kernel context. 3481 */ 3482 static int 3483 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt) 3484 { 3485 ql_mbx_data_t mr; 3486 int rval; 3487 uint32_t timer; 3488 3489 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3490 3491 /* 3492 * We get the current port config, modify the loopback field and 3493 * write it back out. 3494 */ 3495 if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) { 3496 EL(ha, "get_port_config status=%xh\n", rval); 3497 return (rval); 3498 } 3499 /* 3500 * Set the loopback mode field while maintaining the others. 3501 * Currently only internal or none are supported. 3502 */ 3503 mr.mb[1] = (uint16_t)(mr.mb[1] &~LOOPBACK_MODE_FIELD_MASK); 3504 if (opt == MBC_LOOPBACK_POINT_INTERNAL) { 3505 mr.mb[1] = (uint16_t)(mr.mb[1] | 3506 LOOPBACK_MODE(LOOPBACK_MODE_INTERNAL)); 3507 } 3508 /* 3509 * Changing the port configuration will cause the port state to cycle 3510 * down and back up. The indication that this has happened is that 3511 * the point to point flag gets set. 3512 */ 3513 ADAPTER_STATE_LOCK(ha); 3514 ha->flags &= ~POINT_TO_POINT; 3515 ADAPTER_STATE_UNLOCK(ha); 3516 if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) { 3517 EL(ha, "set_port_config status=%xh\n", rval); 3518 } 3519 3520 /* wait for a while */ 3521 for (timer = opt ? 10 : 0; timer; timer--) { 3522 if (ha->flags & POINT_TO_POINT) { 3523 break; 3524 } 3525 /* Delay for 1000000 usec (1 second). */ 3526 ql_delay(ha, 1000000); 3527 } 3528 3529 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3530 3531 return (rval); 3532 } 3533 3534 /* 3535 * ql_send_els_rnid 3536 * IOCTL for extended link service RNID command. 3537 * 3538 * Input: 3539 * ha: adapter state pointer. 3540 * cmd: User space CT arguments pointer. 3541 * mode: flags. 3542 * 3543 * Returns: 3544 * None, request status indicated in cmd->Status. 3545 * 3546 * Context: 3547 * Kernel context. 3548 */ 3549 static void 3550 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3551 { 3552 EXT_RNID_REQ tmp_rnid; 3553 port_id_t tmp_fcid; 3554 caddr_t tmp_buf, bptr; 3555 uint32_t copy_len; 3556 ql_tgt_t *tq; 3557 EXT_RNID_DATA rnid_data; 3558 uint32_t loop_ready_wait = 10 * 60 * 10; 3559 int rval = 0; 3560 uint32_t local_hba = 0; 3561 3562 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3563 3564 if (DRIVER_SUSPENDED(ha)) { 3565 EL(ha, "failed, LOOP_NOT_READY\n"); 3566 cmd->Status = EXT_STATUS_BUSY; 3567 cmd->ResponseLen = 0; 3568 return; 3569 } 3570 3571 if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) { 3572 /* parameter error */ 3573 EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n", 3574 cmd->RequestLen); 3575 cmd->Status = EXT_STATUS_INVALID_PARAM; 3576 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 3577 cmd->ResponseLen = 0; 3578 return; 3579 } 3580 3581 if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, 3582 &tmp_rnid, cmd->RequestLen, mode) != 0) { 3583 EL(ha, "failed, ddi_copyin\n"); 3584 cmd->Status = EXT_STATUS_COPY_ERR; 3585 cmd->ResponseLen = 0; 3586 return; 3587 } 3588 3589 /* Find loop ID of the device */ 3590 if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) { 3591 bptr = CFG_IST(ha, CFG_CTRL_24258081) ? 3592 (caddr_t)&ha->init_ctrl_blk.cb24.node_name : 3593 (caddr_t)&ha->init_ctrl_blk.cb.node_name; 3594 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN, 3595 EXT_DEF_WWN_NAME_SIZE) == 0) { 3596 local_hba = 1; 3597 } else { 3598 tq = ql_find_port(ha, 3599 (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE); 3600 } 3601 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) { 3602 bptr = CFG_IST(ha, CFG_CTRL_24258081) ? 3603 (caddr_t)&ha->init_ctrl_blk.cb24.port_name : 3604 (caddr_t)&ha->init_ctrl_blk.cb.port_name; 3605 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN, 3606 EXT_DEF_WWN_NAME_SIZE) == 0) { 3607 local_hba = 1; 3608 } else { 3609 tq = ql_find_port(ha, 3610 (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT); 3611 } 3612 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) { 3613 /* 3614 * Copy caller's d_id to tmp space. 3615 */ 3616 bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id, 3617 EXT_DEF_PORTID_SIZE_ACTUAL); 3618 BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]); 3619 3620 if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id, 3621 EXT_DEF_PORTID_SIZE_ACTUAL) == 0) { 3622 local_hba = 1; 3623 } else { 3624 tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id, 3625 QLNT_PID); 3626 } 3627 } 3628 3629 /* Allocate memory for command. */ 3630 tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP); 3631 3632 if (local_hba) { 3633 rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf); 3634 if (rval != QL_SUCCESS) { 3635 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval); 3636 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3637 cmd->Status = EXT_STATUS_ERR; 3638 cmd->ResponseLen = 0; 3639 return; 3640 } 3641 3642 /* Save gotten RNID data. */ 3643 bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA)); 3644 3645 /* Now build the Send RNID response */ 3646 tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC); 3647 tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE); 3648 tmp_buf[2] = 0; 3649 tmp_buf[3] = sizeof (EXT_RNID_DATA); 3650 3651 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3652 bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4], 3653 EXT_DEF_WWN_NAME_SIZE); 3654 bcopy(ha->init_ctrl_blk.cb24.node_name, 3655 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE], 3656 EXT_DEF_WWN_NAME_SIZE); 3657 } else { 3658 bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4], 3659 EXT_DEF_WWN_NAME_SIZE); 3660 bcopy(ha->init_ctrl_blk.cb.node_name, 3661 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE], 3662 EXT_DEF_WWN_NAME_SIZE); 3663 } 3664 3665 bcopy((uint8_t *)&rnid_data, 3666 &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE], 3667 sizeof (EXT_RNID_DATA)); 3668 } else { 3669 if (tq == NULL) { 3670 /* no matching device */ 3671 EL(ha, "failed, device not found\n"); 3672 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3673 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 3674 cmd->DetailStatus = EXT_DSTATUS_TARGET; 3675 cmd->ResponseLen = 0; 3676 return; 3677 } 3678 3679 /* Send command */ 3680 rval = ql_send_rnid_els(ha, tq->loop_id, 3681 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf); 3682 if (rval != QL_SUCCESS) { 3683 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n", 3684 rval, tq->loop_id); 3685 while (LOOP_NOT_READY(ha)) { 3686 ql_delay(ha, 100000); 3687 if (loop_ready_wait-- == 0) { 3688 EL(ha, "failed, loop not ready\n"); 3689 cmd->Status = EXT_STATUS_ERR; 3690 cmd->ResponseLen = 0; 3691 } 3692 } 3693 rval = ql_send_rnid_els(ha, tq->loop_id, 3694 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, 3695 tmp_buf); 3696 if (rval != QL_SUCCESS) { 3697 /* error */ 3698 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n", 3699 rval, tq->loop_id); 3700 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3701 cmd->Status = EXT_STATUS_ERR; 3702 cmd->ResponseLen = 0; 3703 return; 3704 } 3705 } 3706 } 3707 3708 /* Copy the response */ 3709 copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ? 3710 SEND_RNID_RSP_SIZE : cmd->ResponseLen; 3711 3712 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr, 3713 copy_len, mode) != copy_len) { 3714 cmd->Status = EXT_STATUS_COPY_ERR; 3715 EL(ha, "failed, ddi_copyout\n"); 3716 } else { 3717 cmd->ResponseLen = copy_len; 3718 if (copy_len < SEND_RNID_RSP_SIZE) { 3719 cmd->Status = EXT_STATUS_DATA_OVERRUN; 3720 EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n"); 3721 3722 } else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) { 3723 cmd->Status = EXT_STATUS_DATA_UNDERRUN; 3724 EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n"); 3725 } else { 3726 cmd->Status = EXT_STATUS_OK; 3727 QL_PRINT_9(CE_CONT, "(%d): done\n", 3728 ha->instance); 3729 } 3730 } 3731 3732 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3733 } 3734 3735 /* 3736 * ql_set_host_data 3737 * Process IOCTL subcommand to set host/adapter related data. 3738 * 3739 * Input: 3740 * ha: adapter state pointer. 3741 * cmd: User space CT arguments pointer. 3742 * mode: flags. 3743 * 3744 * Returns: 3745 * None, request status indicated in cmd->Status. 3746 * 3747 * Context: 3748 * Kernel context. 3749 */ 3750 static void 3751 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3752 { 3753 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance, 3754 cmd->SubCode); 3755 3756 /* 3757 * case off on command subcode 3758 */ 3759 switch (cmd->SubCode) { 3760 case EXT_SC_SET_RNID: 3761 ql_set_rnid_parameters(ha, cmd, mode); 3762 break; 3763 case EXT_SC_RST_STATISTICS: 3764 (void) ql_reset_statistics(ha, cmd); 3765 break; 3766 case EXT_SC_SET_BEACON_STATE: 3767 ql_set_led_state(ha, cmd, mode); 3768 break; 3769 case EXT_SC_SET_PARMS: 3770 case EXT_SC_SET_BUS_MODE: 3771 case EXT_SC_SET_DR_DUMP_BUF: 3772 case EXT_SC_SET_RISC_CODE: 3773 case EXT_SC_SET_FLASH_RAM: 3774 case EXT_SC_SET_LUN_BITMASK: 3775 case EXT_SC_SET_RETRY_CNT: 3776 case EXT_SC_SET_RTIN: 3777 case EXT_SC_SET_FC_LUN_BITMASK: 3778 case EXT_SC_ADD_TARGET_DEVICE: 3779 case EXT_SC_SWAP_TARGET_DEVICE: 3780 case EXT_SC_SET_SEL_TIMEOUT: 3781 default: 3782 /* function not supported. */ 3783 EL(ha, "failed, function not supported=%d\n", cmd->SubCode); 3784 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 3785 break; 3786 } 3787 3788 if (cmd->Status != EXT_STATUS_OK) { 3789 EL(ha, "failed, Status=%d\n", cmd->Status); 3790 } else { 3791 /*EMPTY*/ 3792 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3793 } 3794 } 3795 3796 /* 3797 * ql_get_host_data 3798 * Performs EXT_CC_GET_DATA subcommands. 3799 * 3800 * Input: 3801 * ha: adapter state pointer. 3802 * cmd: Local EXT_IOCTL cmd struct pointer. 3803 * mode: flags. 3804 * 3805 * Returns: 3806 * None, request status indicated in cmd->Status. 3807 * 3808 * Context: 3809 * Kernel context. 3810 */ 3811 static void 3812 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3813 { 3814 int out_size = 0; 3815 3816 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance, 3817 cmd->SubCode); 3818 3819 /* case off on command subcode */ 3820 switch (cmd->SubCode) { 3821 case EXT_SC_GET_STATISTICS: 3822 out_size = sizeof (EXT_HBA_PORT_STAT); 3823 break; 3824 case EXT_SC_GET_FC_STATISTICS: 3825 out_size = sizeof (EXT_HBA_PORT_STAT); 3826 break; 3827 case EXT_SC_GET_PORT_SUMMARY: 3828 out_size = sizeof (EXT_DEVICEDATA); 3829 break; 3830 case EXT_SC_GET_RNID: 3831 out_size = sizeof (EXT_RNID_DATA); 3832 break; 3833 case EXT_SC_GET_TARGET_ID: 3834 out_size = sizeof (EXT_DEST_ADDR); 3835 break; 3836 case EXT_SC_GET_BEACON_STATE: 3837 out_size = sizeof (EXT_BEACON_CONTROL); 3838 break; 3839 case EXT_SC_GET_FC4_STATISTICS: 3840 out_size = sizeof (EXT_HBA_FC4STATISTICS); 3841 break; 3842 case EXT_SC_GET_DCBX_PARAM: 3843 out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE; 3844 break; 3845 case EXT_SC_GET_RESOURCE_CNTS: 3846 out_size = sizeof (EXT_RESOURCE_CNTS); 3847 break; 3848 case EXT_SC_GET_FCF_LIST: 3849 out_size = sizeof (EXT_FCF_LIST); 3850 break; 3851 case EXT_SC_GET_SCSI_ADDR: 3852 case EXT_SC_GET_ERR_DETECTIONS: 3853 case EXT_SC_GET_BUS_MODE: 3854 case EXT_SC_GET_DR_DUMP_BUF: 3855 case EXT_SC_GET_RISC_CODE: 3856 case EXT_SC_GET_FLASH_RAM: 3857 case EXT_SC_GET_LINK_STATUS: 3858 case EXT_SC_GET_LOOP_ID: 3859 case EXT_SC_GET_LUN_BITMASK: 3860 case EXT_SC_GET_PORT_DATABASE: 3861 case EXT_SC_GET_PORT_DATABASE_MEM: 3862 case EXT_SC_GET_POSITION_MAP: 3863 case EXT_SC_GET_RETRY_CNT: 3864 case EXT_SC_GET_RTIN: 3865 case EXT_SC_GET_FC_LUN_BITMASK: 3866 case EXT_SC_GET_SEL_TIMEOUT: 3867 default: 3868 /* function not supported. */ 3869 EL(ha, "failed, function not supported=%d\n", cmd->SubCode); 3870 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 3871 cmd->ResponseLen = 0; 3872 return; 3873 } 3874 3875 if (cmd->ResponseLen < out_size) { 3876 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 3877 cmd->DetailStatus = out_size; 3878 EL(ha, "failed, ResponseLen=%xh, size=%xh\n", 3879 cmd->ResponseLen, out_size); 3880 cmd->ResponseLen = 0; 3881 return; 3882 } 3883 3884 switch (cmd->SubCode) { 3885 case EXT_SC_GET_RNID: 3886 ql_get_rnid_parameters(ha, cmd, mode); 3887 break; 3888 case EXT_SC_GET_STATISTICS: 3889 ql_get_statistics(ha, cmd, mode); 3890 break; 3891 case EXT_SC_GET_FC_STATISTICS: 3892 ql_get_statistics_fc(ha, cmd, mode); 3893 break; 3894 case EXT_SC_GET_FC4_STATISTICS: 3895 ql_get_statistics_fc4(ha, cmd, mode); 3896 break; 3897 case EXT_SC_GET_PORT_SUMMARY: 3898 ql_get_port_summary(ha, cmd, mode); 3899 break; 3900 case EXT_SC_GET_TARGET_ID: 3901 ql_get_target_id(ha, cmd, mode); 3902 break; 3903 case EXT_SC_GET_BEACON_STATE: 3904 ql_get_led_state(ha, cmd, mode); 3905 break; 3906 case EXT_SC_GET_DCBX_PARAM: 3907 ql_get_dcbx_parameters(ha, cmd, mode); 3908 break; 3909 case EXT_SC_GET_FCF_LIST: 3910 ql_get_fcf_list(ha, cmd, mode); 3911 break; 3912 case EXT_SC_GET_RESOURCE_CNTS: 3913 ql_get_resource_counts(ha, cmd, mode); 3914 break; 3915 } 3916 3917 if (cmd->Status != EXT_STATUS_OK) { 3918 EL(ha, "failed, Status=%d\n", cmd->Status); 3919 } else { 3920 /*EMPTY*/ 3921 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3922 } 3923 } 3924 3925 /* ******************************************************************** */ 3926 /* Helper Functions */ 3927 /* ******************************************************************** */ 3928 3929 /* 3930 * ql_lun_count 3931 * Get numbers of LUNS on target. 3932 * 3933 * Input: 3934 * ha: adapter state pointer. 3935 * q: device queue pointer. 3936 * 3937 * Returns: 3938 * Number of LUNs. 3939 * 3940 * Context: 3941 * Kernel context. 3942 */ 3943 static int 3944 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq) 3945 { 3946 int cnt; 3947 3948 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3949 3950 /* Bypass LUNs that failed. */ 3951 cnt = ql_report_lun(ha, tq); 3952 if (cnt == 0) { 3953 cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target); 3954 } 3955 3956 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3957 3958 return (cnt); 3959 } 3960 3961 /* 3962 * ql_report_lun 3963 * Get numbers of LUNS using report LUN command. 3964 * 3965 * Input: 3966 * ha: adapter state pointer. 3967 * q: target queue pointer. 3968 * 3969 * Returns: 3970 * Number of LUNs. 3971 * 3972 * Context: 3973 * Kernel context. 3974 */ 3975 static int 3976 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq) 3977 { 3978 int rval; 3979 uint8_t retries; 3980 ql_mbx_iocb_t *pkt; 3981 ql_rpt_lun_lst_t *rpt; 3982 dma_mem_t dma_mem; 3983 uint32_t pkt_size, cnt; 3984 uint16_t comp_status; 3985 uint8_t scsi_status_h, scsi_status_l, *reqs; 3986 3987 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3988 3989 if (DRIVER_SUSPENDED(ha)) { 3990 EL(ha, "failed, LOOP_NOT_READY\n"); 3991 return (0); 3992 } 3993 3994 pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t); 3995 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 3996 rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t)); 3997 3998 /* Get DMA memory for the IOCB */ 3999 if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t), 4000 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) { 4001 cmn_err(CE_WARN, "%s(%d): DMA memory " 4002 "alloc failed", QL_NAME, ha->instance); 4003 kmem_free(pkt, pkt_size); 4004 return (0); 4005 } 4006 4007 for (retries = 0; retries < 4; retries++) { 4008 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4009 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7; 4010 pkt->cmd24.entry_count = 1; 4011 4012 /* Set N_port handle */ 4013 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id); 4014 4015 /* Set target ID */ 4016 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa; 4017 pkt->cmd24.target_id[1] = tq->d_id.b.area; 4018 pkt->cmd24.target_id[2] = tq->d_id.b.domain; 4019 4020 /* Set Virtual Port ID */ 4021 pkt->cmd24.vp_index = ha->vp_index; 4022 4023 /* Set ISP command timeout. */ 4024 pkt->cmd24.timeout = LE_16(15); 4025 4026 /* Load SCSI CDB */ 4027 pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS; 4028 pkt->cmd24.scsi_cdb[6] = 4029 MSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4030 pkt->cmd24.scsi_cdb[7] = 4031 LSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4032 pkt->cmd24.scsi_cdb[8] = 4033 MSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4034 pkt->cmd24.scsi_cdb[9] = 4035 LSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4036 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) { 4037 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb 4038 + cnt, 4); 4039 } 4040 4041 /* Set tag queue control flags */ 4042 pkt->cmd24.task = TA_STAG; 4043 4044 /* Set transfer direction. */ 4045 pkt->cmd24.control_flags = CF_RD; 4046 4047 /* Set data segment count. */ 4048 pkt->cmd24.dseg_count = LE_16(1); 4049 4050 /* Load total byte count. */ 4051 /* Load data descriptor. */ 4052 pkt->cmd24.dseg_0_address[0] = (uint32_t) 4053 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4054 pkt->cmd24.dseg_0_address[1] = (uint32_t) 4055 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4056 pkt->cmd24.total_byte_count = 4057 LE_32(sizeof (ql_rpt_lun_lst_t)); 4058 pkt->cmd24.dseg_0_length = 4059 LE_32(sizeof (ql_rpt_lun_lst_t)); 4060 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 4061 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3; 4062 pkt->cmd3.entry_count = 1; 4063 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4064 pkt->cmd3.target_l = LSB(tq->loop_id); 4065 pkt->cmd3.target_h = MSB(tq->loop_id); 4066 } else { 4067 pkt->cmd3.target_h = LSB(tq->loop_id); 4068 } 4069 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG; 4070 pkt->cmd3.timeout = LE_16(15); 4071 pkt->cmd3.dseg_count = LE_16(1); 4072 pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS; 4073 pkt->cmd3.scsi_cdb[6] = 4074 MSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4075 pkt->cmd3.scsi_cdb[7] = 4076 LSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4077 pkt->cmd3.scsi_cdb[8] = 4078 MSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4079 pkt->cmd3.scsi_cdb[9] = 4080 LSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4081 pkt->cmd3.byte_count = 4082 LE_32(sizeof (ql_rpt_lun_lst_t)); 4083 pkt->cmd3.dseg_0_address[0] = (uint32_t) 4084 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4085 pkt->cmd3.dseg_0_address[1] = (uint32_t) 4086 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4087 pkt->cmd3.dseg_0_length = 4088 LE_32(sizeof (ql_rpt_lun_lst_t)); 4089 } else { 4090 pkt->cmd.entry_type = IOCB_CMD_TYPE_2; 4091 pkt->cmd.entry_count = 1; 4092 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4093 pkt->cmd.target_l = LSB(tq->loop_id); 4094 pkt->cmd.target_h = MSB(tq->loop_id); 4095 } else { 4096 pkt->cmd.target_h = LSB(tq->loop_id); 4097 } 4098 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG; 4099 pkt->cmd.timeout = LE_16(15); 4100 pkt->cmd.dseg_count = LE_16(1); 4101 pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS; 4102 pkt->cmd.scsi_cdb[6] = 4103 MSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4104 pkt->cmd.scsi_cdb[7] = 4105 LSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4106 pkt->cmd.scsi_cdb[8] = 4107 MSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4108 pkt->cmd.scsi_cdb[9] = 4109 LSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4110 pkt->cmd.byte_count = 4111 LE_32(sizeof (ql_rpt_lun_lst_t)); 4112 pkt->cmd.dseg_0_address = (uint32_t) 4113 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4114 pkt->cmd.dseg_0_length = 4115 LE_32(sizeof (ql_rpt_lun_lst_t)); 4116 } 4117 4118 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 4119 sizeof (ql_mbx_iocb_t)); 4120 4121 /* Sync in coming DMA buffer. */ 4122 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size, 4123 DDI_DMA_SYNC_FORKERNEL); 4124 /* Copy in coming DMA data. */ 4125 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt, 4126 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR); 4127 4128 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4129 pkt->sts24.entry_status = (uint8_t) 4130 (pkt->sts24.entry_status & 0x3c); 4131 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status); 4132 scsi_status_h = pkt->sts24.scsi_status_h; 4133 scsi_status_l = pkt->sts24.scsi_status_l; 4134 cnt = scsi_status_h & FCP_RSP_LEN_VALID ? 4135 LE_32(pkt->sts24.fcp_rsp_data_length) : 0; 4136 reqs = &pkt->sts24.rsp_sense_data[cnt]; 4137 } else { 4138 pkt->sts.entry_status = (uint8_t) 4139 (pkt->sts.entry_status & 0x7e); 4140 comp_status = (uint16_t)LE_16(pkt->sts.comp_status); 4141 scsi_status_h = pkt->sts.scsi_status_h; 4142 scsi_status_l = pkt->sts.scsi_status_l; 4143 reqs = &pkt->sts.req_sense_data[0]; 4144 } 4145 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) { 4146 EL(ha, "failed, entry_status=%xh, d_id=%xh\n", 4147 pkt->sts.entry_status, tq->d_id.b24); 4148 rval = QL_FUNCTION_PARAMETER_ERROR; 4149 } 4150 4151 if (rval != QL_SUCCESS || comp_status != CS_COMPLETE || 4152 scsi_status_l & STATUS_CHECK) { 4153 /* Device underrun, treat as OK. */ 4154 if (rval == QL_SUCCESS && 4155 comp_status == CS_DATA_UNDERRUN && 4156 scsi_status_h & FCP_RESID_UNDER) { 4157 break; 4158 } 4159 4160 EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, " 4161 "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24, 4162 comp_status, scsi_status_h, scsi_status_l); 4163 4164 if (rval == QL_SUCCESS) { 4165 if ((comp_status == CS_TIMEOUT) || 4166 (comp_status == CS_PORT_UNAVAILABLE) || 4167 (comp_status == CS_PORT_LOGGED_OUT)) { 4168 rval = QL_FUNCTION_TIMEOUT; 4169 break; 4170 } 4171 rval = QL_FUNCTION_FAILED; 4172 } else if (rval == QL_ABORTED) { 4173 break; 4174 } 4175 4176 if (scsi_status_l & STATUS_CHECK) { 4177 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh" 4178 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh" 4179 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0], 4180 reqs[1], reqs[2], reqs[3], reqs[4], 4181 reqs[5], reqs[6], reqs[7], reqs[8], 4182 reqs[9], reqs[10], reqs[11], reqs[12], 4183 reqs[13], reqs[14], reqs[15], reqs[16], 4184 reqs[17]); 4185 } 4186 } else { 4187 break; 4188 } 4189 bzero((caddr_t)pkt, pkt_size); 4190 } 4191 4192 if (rval != QL_SUCCESS) { 4193 EL(ha, "failed=%xh\n", rval); 4194 rval = 0; 4195 } else { 4196 QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance); 4197 QL_DUMP_9(rpt, 8, rpt->hdr.len + 8); 4198 rval = (int)(BE_32(rpt->hdr.len) / 8); 4199 } 4200 4201 kmem_free(pkt, pkt_size); 4202 ql_free_dma_resource(ha, &dma_mem); 4203 4204 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4205 4206 return (rval); 4207 } 4208 4209 /* 4210 * ql_inq_scan 4211 * Get numbers of LUNS using inquiry command. 4212 * 4213 * Input: 4214 * ha: adapter state pointer. 4215 * tq: target queue pointer. 4216 * count: scan for the number of existing LUNs. 4217 * 4218 * Returns: 4219 * Number of LUNs. 4220 * 4221 * Context: 4222 * Kernel context. 4223 */ 4224 static int 4225 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count) 4226 { 4227 int lun, cnt, rval; 4228 ql_mbx_iocb_t *pkt; 4229 uint8_t *inq; 4230 uint32_t pkt_size; 4231 4232 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4233 4234 pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE; 4235 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 4236 inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t)); 4237 4238 cnt = 0; 4239 for (lun = 0; lun < MAX_LUNS; lun++) { 4240 4241 if (DRIVER_SUSPENDED(ha)) { 4242 rval = QL_LOOP_DOWN; 4243 cnt = 0; 4244 break; 4245 } 4246 4247 rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE); 4248 if (rval == QL_SUCCESS) { 4249 switch (*inq) { 4250 case DTYPE_DIRECT: 4251 case DTYPE_PROCESSOR: /* Appliance. */ 4252 case DTYPE_WORM: 4253 case DTYPE_RODIRECT: 4254 case DTYPE_SCANNER: 4255 case DTYPE_OPTICAL: 4256 case DTYPE_CHANGER: 4257 case DTYPE_ESI: 4258 cnt++; 4259 break; 4260 case DTYPE_SEQUENTIAL: 4261 cnt++; 4262 tq->flags |= TQF_TAPE_DEVICE; 4263 break; 4264 default: 4265 QL_PRINT_9(CE_CONT, "(%d): failed, " 4266 "unsupported device id=%xh, lun=%d, " 4267 "type=%xh\n", ha->instance, tq->loop_id, 4268 lun, *inq); 4269 break; 4270 } 4271 4272 if (*inq == DTYPE_ESI || cnt >= count) { 4273 break; 4274 } 4275 } else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) { 4276 cnt = 0; 4277 break; 4278 } 4279 } 4280 4281 kmem_free(pkt, pkt_size); 4282 4283 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4284 4285 return (cnt); 4286 } 4287 4288 /* 4289 * ql_inq 4290 * Issue inquiry command. 4291 * 4292 * Input: 4293 * ha: adapter state pointer. 4294 * tq: target queue pointer. 4295 * lun: LUN number. 4296 * pkt: command and buffer pointer. 4297 * inq_len: amount of inquiry data. 4298 * 4299 * Returns: 4300 * ql local function return status code. 4301 * 4302 * Context: 4303 * Kernel context. 4304 */ 4305 static int 4306 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt, 4307 uint8_t inq_len) 4308 { 4309 dma_mem_t dma_mem; 4310 int rval, retries; 4311 uint32_t pkt_size, cnt; 4312 uint16_t comp_status; 4313 uint8_t scsi_status_h, scsi_status_l, *reqs; 4314 caddr_t inq_data; 4315 4316 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4317 4318 if (DRIVER_SUSPENDED(ha)) { 4319 EL(ha, "failed, loop down\n"); 4320 return (QL_FUNCTION_TIMEOUT); 4321 } 4322 4323 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len); 4324 bzero((caddr_t)pkt, pkt_size); 4325 4326 inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t); 4327 4328 /* Get DMA memory for the IOCB */ 4329 if (ql_get_dma_mem(ha, &dma_mem, inq_len, 4330 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) { 4331 cmn_err(CE_WARN, "%s(%d): DMA memory " 4332 "alloc failed", QL_NAME, ha->instance); 4333 return (0); 4334 } 4335 4336 for (retries = 0; retries < 4; retries++) { 4337 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4338 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7; 4339 pkt->cmd24.entry_count = 1; 4340 4341 /* Set LUN number */ 4342 pkt->cmd24.fcp_lun[2] = LSB(lun); 4343 pkt->cmd24.fcp_lun[3] = MSB(lun); 4344 4345 /* Set N_port handle */ 4346 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id); 4347 4348 /* Set target ID */ 4349 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa; 4350 pkt->cmd24.target_id[1] = tq->d_id.b.area; 4351 pkt->cmd24.target_id[2] = tq->d_id.b.domain; 4352 4353 /* Set Virtual Port ID */ 4354 pkt->cmd24.vp_index = ha->vp_index; 4355 4356 /* Set ISP command timeout. */ 4357 pkt->cmd24.timeout = LE_16(15); 4358 4359 /* Load SCSI CDB */ 4360 pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY; 4361 pkt->cmd24.scsi_cdb[4] = inq_len; 4362 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) { 4363 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb 4364 + cnt, 4); 4365 } 4366 4367 /* Set tag queue control flags */ 4368 pkt->cmd24.task = TA_STAG; 4369 4370 /* Set transfer direction. */ 4371 pkt->cmd24.control_flags = CF_RD; 4372 4373 /* Set data segment count. */ 4374 pkt->cmd24.dseg_count = LE_16(1); 4375 4376 /* Load total byte count. */ 4377 pkt->cmd24.total_byte_count = LE_32(inq_len); 4378 4379 /* Load data descriptor. */ 4380 pkt->cmd24.dseg_0_address[0] = (uint32_t) 4381 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4382 pkt->cmd24.dseg_0_address[1] = (uint32_t) 4383 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4384 pkt->cmd24.dseg_0_length = LE_32(inq_len); 4385 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 4386 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3; 4387 cnt = CMD_TYPE_3_DATA_SEGMENTS; 4388 4389 pkt->cmd3.entry_count = 1; 4390 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4391 pkt->cmd3.target_l = LSB(tq->loop_id); 4392 pkt->cmd3.target_h = MSB(tq->loop_id); 4393 } else { 4394 pkt->cmd3.target_h = LSB(tq->loop_id); 4395 } 4396 pkt->cmd3.lun_l = LSB(lun); 4397 pkt->cmd3.lun_h = MSB(lun); 4398 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG; 4399 pkt->cmd3.timeout = LE_16(15); 4400 pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY; 4401 pkt->cmd3.scsi_cdb[4] = inq_len; 4402 pkt->cmd3.dseg_count = LE_16(1); 4403 pkt->cmd3.byte_count = LE_32(inq_len); 4404 pkt->cmd3.dseg_0_address[0] = (uint32_t) 4405 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4406 pkt->cmd3.dseg_0_address[1] = (uint32_t) 4407 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4408 pkt->cmd3.dseg_0_length = LE_32(inq_len); 4409 } else { 4410 pkt->cmd.entry_type = IOCB_CMD_TYPE_2; 4411 cnt = CMD_TYPE_2_DATA_SEGMENTS; 4412 4413 pkt->cmd.entry_count = 1; 4414 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4415 pkt->cmd.target_l = LSB(tq->loop_id); 4416 pkt->cmd.target_h = MSB(tq->loop_id); 4417 } else { 4418 pkt->cmd.target_h = LSB(tq->loop_id); 4419 } 4420 pkt->cmd.lun_l = LSB(lun); 4421 pkt->cmd.lun_h = MSB(lun); 4422 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG; 4423 pkt->cmd.timeout = LE_16(15); 4424 pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY; 4425 pkt->cmd.scsi_cdb[4] = inq_len; 4426 pkt->cmd.dseg_count = LE_16(1); 4427 pkt->cmd.byte_count = LE_32(inq_len); 4428 pkt->cmd.dseg_0_address = (uint32_t) 4429 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4430 pkt->cmd.dseg_0_length = LE_32(inq_len); 4431 } 4432 4433 /* rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */ 4434 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 4435 sizeof (ql_mbx_iocb_t)); 4436 4437 /* Sync in coming IOCB DMA buffer. */ 4438 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size, 4439 DDI_DMA_SYNC_FORKERNEL); 4440 /* Copy in coming DMA data. */ 4441 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data, 4442 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR); 4443 4444 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4445 pkt->sts24.entry_status = (uint8_t) 4446 (pkt->sts24.entry_status & 0x3c); 4447 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status); 4448 scsi_status_h = pkt->sts24.scsi_status_h; 4449 scsi_status_l = pkt->sts24.scsi_status_l; 4450 cnt = scsi_status_h & FCP_RSP_LEN_VALID ? 4451 LE_32(pkt->sts24.fcp_rsp_data_length) : 0; 4452 reqs = &pkt->sts24.rsp_sense_data[cnt]; 4453 } else { 4454 pkt->sts.entry_status = (uint8_t) 4455 (pkt->sts.entry_status & 0x7e); 4456 comp_status = (uint16_t)LE_16(pkt->sts.comp_status); 4457 scsi_status_h = pkt->sts.scsi_status_h; 4458 scsi_status_l = pkt->sts.scsi_status_l; 4459 reqs = &pkt->sts.req_sense_data[0]; 4460 } 4461 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) { 4462 EL(ha, "failed, entry_status=%xh, d_id=%xh\n", 4463 pkt->sts.entry_status, tq->d_id.b24); 4464 rval = QL_FUNCTION_PARAMETER_ERROR; 4465 } 4466 4467 if (rval != QL_SUCCESS || comp_status != CS_COMPLETE || 4468 scsi_status_l & STATUS_CHECK) { 4469 EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, " 4470 "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24, 4471 comp_status, scsi_status_h, scsi_status_l); 4472 4473 if (rval == QL_SUCCESS) { 4474 if ((comp_status == CS_TIMEOUT) || 4475 (comp_status == CS_PORT_UNAVAILABLE) || 4476 (comp_status == CS_PORT_LOGGED_OUT)) { 4477 rval = QL_FUNCTION_TIMEOUT; 4478 break; 4479 } 4480 rval = QL_FUNCTION_FAILED; 4481 } 4482 4483 if (scsi_status_l & STATUS_CHECK) { 4484 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh" 4485 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh" 4486 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0], 4487 reqs[1], reqs[2], reqs[3], reqs[4], 4488 reqs[5], reqs[6], reqs[7], reqs[8], 4489 reqs[9], reqs[10], reqs[11], reqs[12], 4490 reqs[13], reqs[14], reqs[15], reqs[16], 4491 reqs[17]); 4492 } 4493 } else { 4494 break; 4495 } 4496 } 4497 ql_free_dma_resource(ha, &dma_mem); 4498 4499 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4500 4501 return (rval); 4502 } 4503 4504 /* 4505 * ql_get_buffer_data 4506 * Copies data from user space to kernal buffer. 4507 * 4508 * Input: 4509 * src: User source buffer address. 4510 * dst: Kernal destination buffer address. 4511 * size: Amount of data. 4512 * mode: flags. 4513 * 4514 * Returns: 4515 * Returns number of bytes transferred. 4516 * 4517 * Context: 4518 * Kernel context. 4519 */ 4520 static uint32_t 4521 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode) 4522 { 4523 uint32_t cnt; 4524 4525 for (cnt = 0; cnt < size; cnt++) { 4526 if (ddi_copyin(src++, dst++, 1, mode) != 0) { 4527 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n"); 4528 break; 4529 } 4530 } 4531 4532 return (cnt); 4533 } 4534 4535 /* 4536 * ql_send_buffer_data 4537 * Copies data from kernal buffer to user space. 4538 * 4539 * Input: 4540 * src: Kernal source buffer address. 4541 * dst: User destination buffer address. 4542 * size: Amount of data. 4543 * mode: flags. 4544 * 4545 * Returns: 4546 * Returns number of bytes transferred. 4547 * 4548 * Context: 4549 * Kernel context. 4550 */ 4551 static uint32_t 4552 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode) 4553 { 4554 uint32_t cnt; 4555 4556 for (cnt = 0; cnt < size; cnt++) { 4557 if (ddi_copyout(src++, dst++, 1, mode) != 0) { 4558 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n"); 4559 break; 4560 } 4561 } 4562 4563 return (cnt); 4564 } 4565 4566 /* 4567 * ql_find_port 4568 * Locates device queue. 4569 * 4570 * Input: 4571 * ha: adapter state pointer. 4572 * name: device port name. 4573 * 4574 * Returns: 4575 * Returns target queue pointer. 4576 * 4577 * Context: 4578 * Kernel context. 4579 */ 4580 static ql_tgt_t * 4581 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type) 4582 { 4583 ql_link_t *link; 4584 ql_tgt_t *tq; 4585 uint16_t index; 4586 4587 /* Scan port list for requested target */ 4588 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 4589 for (link = ha->dev[index].first; link != NULL; 4590 link = link->next) { 4591 tq = link->base_address; 4592 4593 switch (type) { 4594 case QLNT_LOOP_ID: 4595 if (bcmp(name, &tq->loop_id, 4596 sizeof (uint16_t)) == 0) { 4597 return (tq); 4598 } 4599 break; 4600 case QLNT_PORT: 4601 if (bcmp(name, tq->port_name, 8) == 0) { 4602 return (tq); 4603 } 4604 break; 4605 case QLNT_NODE: 4606 if (bcmp(name, tq->node_name, 8) == 0) { 4607 return (tq); 4608 } 4609 break; 4610 case QLNT_PID: 4611 if (bcmp(name, tq->d_id.r.d_id, 4612 sizeof (tq->d_id.r.d_id)) == 0) { 4613 return (tq); 4614 } 4615 break; 4616 default: 4617 EL(ha, "failed, invalid type=%d\n", type); 4618 return (NULL); 4619 } 4620 } 4621 } 4622 4623 return (NULL); 4624 } 4625 4626 /* 4627 * ql_24xx_flash_desc 4628 * Get flash descriptor table. 4629 * 4630 * Input: 4631 * ha: adapter state pointer. 4632 * 4633 * Returns: 4634 * ql local function return status code. 4635 * 4636 * Context: 4637 * Kernel context. 4638 */ 4639 static int 4640 ql_24xx_flash_desc(ql_adapter_state_t *ha) 4641 { 4642 uint32_t cnt; 4643 uint16_t chksum, *bp, data; 4644 int rval; 4645 flash_desc_t *fdesc; 4646 ql_xioctl_t *xp = ha->xioctl; 4647 4648 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4649 4650 if (ha->flash_desc_addr == 0) { 4651 QL_PRINT_9(CE_CONT, "(%d): desc ptr=0\n", ha->instance); 4652 return (QL_FUNCTION_FAILED); 4653 } 4654 4655 fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP); 4656 rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t), 4657 ha->flash_desc_addr << 2); 4658 if (rval != QL_SUCCESS) { 4659 EL(ha, "read status=%xh\n", rval); 4660 kmem_free(fdesc, sizeof (flash_desc_t)); 4661 return (rval); 4662 } 4663 4664 chksum = 0; 4665 bp = (uint16_t *)fdesc; 4666 for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) { 4667 data = *bp++; 4668 LITTLE_ENDIAN_16(&data); 4669 chksum += data; 4670 } 4671 4672 LITTLE_ENDIAN_32(&fdesc->flash_valid); 4673 LITTLE_ENDIAN_16(&fdesc->flash_version); 4674 LITTLE_ENDIAN_16(&fdesc->flash_len); 4675 LITTLE_ENDIAN_16(&fdesc->flash_checksum); 4676 LITTLE_ENDIAN_16(&fdesc->flash_manuf); 4677 LITTLE_ENDIAN_16(&fdesc->flash_id); 4678 LITTLE_ENDIAN_32(&fdesc->block_size); 4679 LITTLE_ENDIAN_32(&fdesc->alt_block_size); 4680 LITTLE_ENDIAN_32(&fdesc->flash_size); 4681 LITTLE_ENDIAN_32(&fdesc->write_enable_data); 4682 LITTLE_ENDIAN_32(&fdesc->read_timeout); 4683 4684 /* flash size in desc table is in 1024 bytes */ 4685 fdesc->flash_size = fdesc->flash_size * 0x400; 4686 4687 if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD || 4688 fdesc->flash_version != FLASH_DESC_VERSION) { 4689 EL(ha, "invalid descriptor table\n"); 4690 kmem_free(fdesc, sizeof (flash_desc_t)); 4691 return (QL_FUNCTION_FAILED); 4692 } 4693 4694 bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t)); 4695 kmem_free(fdesc, sizeof (flash_desc_t)); 4696 4697 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4698 4699 return (QL_SUCCESS); 4700 } 4701 4702 /* 4703 * ql_setup_flash 4704 * Gets the manufacturer and id number of the flash chip, and 4705 * sets up the size parameter. 4706 * 4707 * Input: 4708 * ha: adapter state pointer. 4709 * 4710 * Returns: 4711 * int: ql local function return status code. 4712 * 4713 * Context: 4714 * Kernel context. 4715 */ 4716 static int 4717 ql_setup_flash(ql_adapter_state_t *ha) 4718 { 4719 ql_xioctl_t *xp = ha->xioctl; 4720 int rval = QL_SUCCESS; 4721 4722 if (xp->fdesc.flash_size != 0) { 4723 return (rval); 4724 } 4725 4726 if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) { 4727 return (QL_FUNCTION_FAILED); 4728 } 4729 4730 if (CFG_IST(ha, CFG_CTRL_258081)) { 4731 /* 4732 * Temporarily set the ha->xioctl->fdesc.flash_size to 4733 * 25xx flash size to avoid failing of ql_dump_focde. 4734 */ 4735 if (CFG_IST(ha, CFG_CTRL_8021)) { 4736 ha->xioctl->fdesc.flash_size = 0x800000; 4737 } else if (CFG_IST(ha, CFG_CTRL_25XX)) { 4738 ha->xioctl->fdesc.flash_size = 0x200000; 4739 } else { 4740 ha->xioctl->fdesc.flash_size = 0x400000; 4741 } 4742 4743 if (ql_24xx_flash_desc(ha) == QL_SUCCESS) { 4744 EL(ha, "flash desc table ok, exit\n"); 4745 return (rval); 4746 } 4747 if (CFG_IST(ha, CFG_CTRL_8021)) { 4748 xp->fdesc.flash_manuf = WINBOND_FLASH; 4749 xp->fdesc.flash_id = WINBOND_FLASHID; 4750 xp->fdesc.flash_len = 0x17; 4751 } else { 4752 (void) ql_24xx_flash_id(ha); 4753 } 4754 4755 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 4756 (void) ql_24xx_flash_id(ha); 4757 } else { 4758 ql_flash_enable(ha); 4759 4760 ql_write_flash_byte(ha, 0x5555, 0xaa); 4761 ql_write_flash_byte(ha, 0x2aaa, 0x55); 4762 ql_write_flash_byte(ha, 0x5555, 0x90); 4763 xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000); 4764 4765 if (CFG_IST(ha, CFG_SBUS_CARD)) { 4766 ql_write_flash_byte(ha, 0xaaaa, 0xaa); 4767 ql_write_flash_byte(ha, 0x5555, 0x55); 4768 ql_write_flash_byte(ha, 0xaaaa, 0x90); 4769 xp->fdesc.flash_id = (uint16_t) 4770 ql_read_flash_byte(ha, 0x0002); 4771 } else { 4772 ql_write_flash_byte(ha, 0x5555, 0xaa); 4773 ql_write_flash_byte(ha, 0x2aaa, 0x55); 4774 ql_write_flash_byte(ha, 0x5555, 0x90); 4775 xp->fdesc.flash_id = (uint16_t) 4776 ql_read_flash_byte(ha, 0x0001); 4777 } 4778 4779 ql_write_flash_byte(ha, 0x5555, 0xaa); 4780 ql_write_flash_byte(ha, 0x2aaa, 0x55); 4781 ql_write_flash_byte(ha, 0x5555, 0xf0); 4782 4783 ql_flash_disable(ha); 4784 } 4785 4786 /* Default flash descriptor table. */ 4787 xp->fdesc.write_statusreg_cmd = 1; 4788 xp->fdesc.write_enable_bits = 0; 4789 xp->fdesc.unprotect_sector_cmd = 0; 4790 xp->fdesc.protect_sector_cmd = 0; 4791 xp->fdesc.write_disable_bits = 0x9c; 4792 xp->fdesc.block_size = 0x10000; 4793 xp->fdesc.erase_cmd = 0xd8; 4794 4795 switch (xp->fdesc.flash_manuf) { 4796 case AMD_FLASH: 4797 switch (xp->fdesc.flash_id) { 4798 case SPAN_FLASHID_2048K: 4799 xp->fdesc.flash_size = 0x200000; 4800 break; 4801 case AMD_FLASHID_1024K: 4802 xp->fdesc.flash_size = 0x100000; 4803 break; 4804 case AMD_FLASHID_512K: 4805 case AMD_FLASHID_512Kt: 4806 case AMD_FLASHID_512Kb: 4807 if (CFG_IST(ha, CFG_SBUS_CARD)) { 4808 xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE; 4809 } else { 4810 xp->fdesc.flash_size = 0x80000; 4811 } 4812 break; 4813 case AMD_FLASHID_128K: 4814 xp->fdesc.flash_size = 0x20000; 4815 break; 4816 default: 4817 rval = QL_FUNCTION_FAILED; 4818 break; 4819 } 4820 break; 4821 case ST_FLASH: 4822 switch (xp->fdesc.flash_id) { 4823 case ST_FLASHID_128K: 4824 xp->fdesc.flash_size = 0x20000; 4825 break; 4826 case ST_FLASHID_512K: 4827 xp->fdesc.flash_size = 0x80000; 4828 break; 4829 case ST_FLASHID_M25PXX: 4830 if (xp->fdesc.flash_len == 0x14) { 4831 xp->fdesc.flash_size = 0x100000; 4832 } else if (xp->fdesc.flash_len == 0x15) { 4833 xp->fdesc.flash_size = 0x200000; 4834 } else { 4835 rval = QL_FUNCTION_FAILED; 4836 } 4837 break; 4838 default: 4839 rval = QL_FUNCTION_FAILED; 4840 break; 4841 } 4842 break; 4843 case SST_FLASH: 4844 switch (xp->fdesc.flash_id) { 4845 case SST_FLASHID_128K: 4846 xp->fdesc.flash_size = 0x20000; 4847 break; 4848 case SST_FLASHID_1024K_A: 4849 xp->fdesc.flash_size = 0x100000; 4850 xp->fdesc.block_size = 0x8000; 4851 xp->fdesc.erase_cmd = 0x52; 4852 break; 4853 case SST_FLASHID_1024K: 4854 case SST_FLASHID_1024K_B: 4855 xp->fdesc.flash_size = 0x100000; 4856 break; 4857 case SST_FLASHID_2048K: 4858 xp->fdesc.flash_size = 0x200000; 4859 break; 4860 default: 4861 rval = QL_FUNCTION_FAILED; 4862 break; 4863 } 4864 break; 4865 case MXIC_FLASH: 4866 switch (xp->fdesc.flash_id) { 4867 case MXIC_FLASHID_512K: 4868 xp->fdesc.flash_size = 0x80000; 4869 break; 4870 case MXIC_FLASHID_1024K: 4871 xp->fdesc.flash_size = 0x100000; 4872 break; 4873 case MXIC_FLASHID_25LXX: 4874 if (xp->fdesc.flash_len == 0x14) { 4875 xp->fdesc.flash_size = 0x100000; 4876 } else if (xp->fdesc.flash_len == 0x15) { 4877 xp->fdesc.flash_size = 0x200000; 4878 } else { 4879 rval = QL_FUNCTION_FAILED; 4880 } 4881 break; 4882 default: 4883 rval = QL_FUNCTION_FAILED; 4884 break; 4885 } 4886 break; 4887 case ATMEL_FLASH: 4888 switch (xp->fdesc.flash_id) { 4889 case ATMEL_FLASHID_1024K: 4890 xp->fdesc.flash_size = 0x100000; 4891 xp->fdesc.write_disable_bits = 0xbc; 4892 xp->fdesc.unprotect_sector_cmd = 0x39; 4893 xp->fdesc.protect_sector_cmd = 0x36; 4894 break; 4895 default: 4896 rval = QL_FUNCTION_FAILED; 4897 break; 4898 } 4899 break; 4900 case WINBOND_FLASH: 4901 switch (xp->fdesc.flash_id) { 4902 case WINBOND_FLASHID: 4903 if (xp->fdesc.flash_len == 0x15) { 4904 xp->fdesc.flash_size = 0x200000; 4905 } else if (xp->fdesc.flash_len == 0x16) { 4906 xp->fdesc.flash_size = 0x400000; 4907 } else if (xp->fdesc.flash_len == 0x17) { 4908 xp->fdesc.flash_size = 0x800000; 4909 } else { 4910 rval = QL_FUNCTION_FAILED; 4911 } 4912 break; 4913 default: 4914 rval = QL_FUNCTION_FAILED; 4915 break; 4916 } 4917 break; 4918 case INTEL_FLASH: 4919 switch (xp->fdesc.flash_id) { 4920 case INTEL_FLASHID: 4921 if (xp->fdesc.flash_len == 0x11) { 4922 xp->fdesc.flash_size = 0x200000; 4923 } else if (xp->fdesc.flash_len == 0x12) { 4924 xp->fdesc.flash_size = 0x400000; 4925 } else if (xp->fdesc.flash_len == 0x13) { 4926 xp->fdesc.flash_size = 0x800000; 4927 } else { 4928 rval = QL_FUNCTION_FAILED; 4929 } 4930 break; 4931 default: 4932 rval = QL_FUNCTION_FAILED; 4933 break; 4934 } 4935 break; 4936 default: 4937 rval = QL_FUNCTION_FAILED; 4938 break; 4939 } 4940 4941 /* Try flash table later. */ 4942 if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_24258081)) { 4943 EL(ha, "no default id\n"); 4944 return (QL_SUCCESS); 4945 } 4946 4947 /* 4948 * hack for non std 2312 and 6312 boards. hardware people need to 4949 * use either the 128k flash chip (original), or something larger. 4950 * For driver purposes, we'll treat it as a 128k flash chip. 4951 */ 4952 if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 || 4953 ha->device_id == 0x2322 || ha->device_id == 0x6322) && 4954 (xp->fdesc.flash_size > 0x20000) && 4955 (CFG_IST(ha, CFG_SBUS_CARD) == 0)) { 4956 EL(ha, "chip exceeds max size: %xh, using 128k\n", 4957 xp->fdesc.flash_size); 4958 xp->fdesc.flash_size = 0x20000; 4959 } 4960 4961 if (rval == QL_SUCCESS) { 4962 EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n", 4963 xp->fdesc.flash_manuf, xp->fdesc.flash_id, 4964 xp->fdesc.flash_size); 4965 } else { 4966 EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n", 4967 xp->fdesc.flash_manuf, xp->fdesc.flash_id); 4968 } 4969 4970 return (rval); 4971 } 4972 4973 /* 4974 * ql_flash_fcode_load 4975 * Loads fcode data into flash from application. 4976 * 4977 * Input: 4978 * ha: adapter state pointer. 4979 * bp: user buffer address. 4980 * size: user buffer size. 4981 * mode: flags 4982 * 4983 * Returns: 4984 * 4985 * Context: 4986 * Kernel context. 4987 */ 4988 static int 4989 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize, 4990 int mode) 4991 { 4992 uint8_t *bfp; 4993 ql_xioctl_t *xp = ha->xioctl; 4994 int rval = 0; 4995 4996 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4997 4998 if (bsize > xp->fdesc.flash_size) { 4999 EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize, 5000 xp->fdesc.flash_size); 5001 return (ENOMEM); 5002 } 5003 5004 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) { 5005 EL(ha, "failed, kmem_zalloc\n"); 5006 rval = ENOMEM; 5007 } else { 5008 if (ddi_copyin(bp, bfp, bsize, mode) != 0) { 5009 EL(ha, "failed, ddi_copyin\n"); 5010 rval = EFAULT; 5011 } else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) { 5012 EL(ha, "failed, load_fcode\n"); 5013 rval = EFAULT; 5014 } else { 5015 /* Reset caches on all adapter instances. */ 5016 ql_update_flash_caches(ha); 5017 rval = 0; 5018 } 5019 kmem_free(bfp, bsize); 5020 } 5021 5022 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5023 5024 return (rval); 5025 } 5026 5027 /* 5028 * ql_load_fcode 5029 * Loads fcode in to flash. 5030 * 5031 * Input: 5032 * ha: adapter state pointer. 5033 * dp: data pointer. 5034 * size: data length. 5035 * addr: flash byte address. 5036 * 5037 * Returns: 5038 * ql local function return status code. 5039 * 5040 * Context: 5041 * Kernel context. 5042 */ 5043 int 5044 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr) 5045 { 5046 uint32_t cnt; 5047 int rval; 5048 5049 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5050 return (ql_24xx_load_flash(ha, dp, size, addr)); 5051 } 5052 5053 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5054 5055 if (CFG_IST(ha, CFG_SBUS_CARD)) { 5056 /* 5057 * sbus has an additional check to make 5058 * sure they don't brick the HBA. 5059 */ 5060 if (dp[0] != 0xf1) { 5061 EL(ha, "failed, incorrect fcode for sbus\n"); 5062 return (QL_FUNCTION_PARAMETER_ERROR); 5063 } 5064 } 5065 5066 GLOBAL_HW_LOCK(); 5067 5068 /* Enable Flash Read/Write. */ 5069 ql_flash_enable(ha); 5070 5071 /* Erase flash prior to write. */ 5072 rval = ql_erase_flash(ha, 0); 5073 5074 if (rval == QL_SUCCESS) { 5075 /* Write fcode data to flash. */ 5076 for (cnt = 0; cnt < (uint32_t)size; cnt++) { 5077 /* Allow other system activity. */ 5078 if (cnt % 0x1000 == 0) { 5079 drv_usecwait(1); 5080 } 5081 rval = ql_program_flash_address(ha, addr++, *dp++); 5082 if (rval != QL_SUCCESS) 5083 break; 5084 } 5085 } 5086 5087 ql_flash_disable(ha); 5088 5089 GLOBAL_HW_UNLOCK(); 5090 5091 if (rval != QL_SUCCESS) { 5092 EL(ha, "failed, rval=%xh\n", rval); 5093 } else { 5094 /*EMPTY*/ 5095 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5096 } 5097 return (rval); 5098 } 5099 5100 /* 5101 * ql_flash_fcode_dump 5102 * Dumps FLASH to application. 5103 * 5104 * Input: 5105 * ha: adapter state pointer. 5106 * bp: user buffer address. 5107 * bsize: user buffer size 5108 * faddr: flash byte address 5109 * mode: flags 5110 * 5111 * Returns: 5112 * 5113 * Context: 5114 * Kernel context. 5115 */ 5116 static int 5117 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize, 5118 uint32_t faddr, int mode) 5119 { 5120 uint8_t *bfp; 5121 int rval; 5122 ql_xioctl_t *xp = ha->xioctl; 5123 5124 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5125 5126 /* adjust max read size to flash size */ 5127 if (bsize > xp->fdesc.flash_size) { 5128 EL(ha, "adjusting req=%xh, max=%xh\n", bsize, 5129 xp->fdesc.flash_size); 5130 bsize = xp->fdesc.flash_size; 5131 } 5132 5133 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) { 5134 EL(ha, "failed, kmem_zalloc\n"); 5135 rval = ENOMEM; 5136 } else { 5137 /* Dump Flash fcode. */ 5138 rval = ql_dump_fcode(ha, bfp, bsize, faddr); 5139 5140 if (rval != QL_SUCCESS) { 5141 EL(ha, "failed, dump_fcode = %x\n", rval); 5142 rval = EFAULT; 5143 } else if (ddi_copyout(bfp, bp, bsize, mode) != 0) { 5144 EL(ha, "failed, ddi_copyout\n"); 5145 rval = EFAULT; 5146 } else { 5147 rval = 0; 5148 } 5149 kmem_free(bfp, bsize); 5150 } 5151 5152 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5153 5154 return (rval); 5155 } 5156 5157 /* 5158 * ql_dump_fcode 5159 * Dumps fcode from flash. 5160 * 5161 * Input: 5162 * ha: adapter state pointer. 5163 * dp: data pointer. 5164 * size: data length in bytes. 5165 * startpos: starting position in flash (byte address). 5166 * 5167 * Returns: 5168 * ql local function return status code. 5169 * 5170 * Context: 5171 * Kernel context. 5172 * 5173 */ 5174 int 5175 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, 5176 uint32_t startpos) 5177 { 5178 uint32_t cnt, data, addr; 5179 uint8_t bp[4], *src; 5180 int fp_rval, rval = QL_SUCCESS; 5181 dma_mem_t mem; 5182 5183 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5184 5185 /* make sure startpos+size doesn't exceed flash */ 5186 if (size + startpos > ha->xioctl->fdesc.flash_size) { 5187 EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n", 5188 size, startpos, ha->xioctl->fdesc.flash_size); 5189 return (QL_FUNCTION_PARAMETER_ERROR); 5190 } 5191 5192 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5193 /* check start addr is 32 bit aligned for 24xx */ 5194 if ((startpos & 0x3) != 0) { 5195 rval = ql_24xx_read_flash(ha, 5196 ha->flash_data_addr | startpos >> 2, &data); 5197 if (rval != QL_SUCCESS) { 5198 EL(ha, "failed2, rval = %xh\n", rval); 5199 return (rval); 5200 } 5201 bp[0] = LSB(LSW(data)); 5202 bp[1] = MSB(LSW(data)); 5203 bp[2] = LSB(MSW(data)); 5204 bp[3] = MSB(MSW(data)); 5205 while (size && startpos & 0x3) { 5206 *dp++ = bp[startpos & 0x3]; 5207 startpos++; 5208 size--; 5209 } 5210 if (size == 0) { 5211 QL_PRINT_9(CE_CONT, "(%d): done2\n", 5212 ha->instance); 5213 return (rval); 5214 } 5215 } 5216 5217 /* adjust 24xx start addr for 32 bit words */ 5218 addr = startpos / 4 | ha->flash_data_addr; 5219 } 5220 5221 bzero(&mem, sizeof (dma_mem_t)); 5222 /* Check for Fast page is supported */ 5223 if ((ha->pha->task_daemon_flags & FIRMWARE_UP) && 5224 (CFG_IST(ha, CFG_CTRL_2581))) { 5225 fp_rval = QL_SUCCESS; 5226 /* Setup DMA buffer. */ 5227 rval = ql_get_dma_mem(ha, &mem, size, 5228 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN); 5229 if (rval != QL_SUCCESS) { 5230 EL(ha, "failed, ql_get_dma_mem=%xh\n", 5231 rval); 5232 return (ENOMEM); 5233 } 5234 } else { 5235 fp_rval = QL_NOT_SUPPORTED; 5236 } 5237 5238 GLOBAL_HW_LOCK(); 5239 5240 /* Enable Flash Read/Write. */ 5241 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 5242 ql_flash_enable(ha); 5243 } 5244 5245 /* Read fcode data from flash. */ 5246 while (size) { 5247 /* Allow other system activity. */ 5248 if (size % 0x1000 == 0) { 5249 ql_delay(ha, 100000); 5250 } 5251 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5252 if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) { 5253 cnt = (size + 3) >> 2; 5254 fp_rval = ql_rd_risc_ram(ha, addr, 5255 mem.cookie.dmac_laddress, cnt); 5256 if (fp_rval == QL_SUCCESS) { 5257 for (src = mem.bp; size; size--) { 5258 *dp++ = *src++; 5259 } 5260 addr += cnt; 5261 continue; 5262 } 5263 } 5264 rval = ql_24xx_read_flash(ha, addr++, 5265 &data); 5266 if (rval != QL_SUCCESS) { 5267 break; 5268 } 5269 bp[0] = LSB(LSW(data)); 5270 bp[1] = MSB(LSW(data)); 5271 bp[2] = LSB(MSW(data)); 5272 bp[3] = MSB(MSW(data)); 5273 for (cnt = 0; size && cnt < 4; size--) { 5274 *dp++ = bp[cnt++]; 5275 } 5276 } else { 5277 *dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++); 5278 size--; 5279 } 5280 } 5281 5282 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 5283 ql_flash_disable(ha); 5284 } 5285 5286 GLOBAL_HW_UNLOCK(); 5287 5288 if (mem.dma_handle != NULL) { 5289 ql_free_dma_resource(ha, &mem); 5290 } 5291 5292 if (rval != QL_SUCCESS) { 5293 EL(ha, "failed, rval = %xh\n", rval); 5294 } else { 5295 /*EMPTY*/ 5296 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5297 } 5298 return (rval); 5299 } 5300 5301 /* 5302 * ql_program_flash_address 5303 * Program flash address. 5304 * 5305 * Input: 5306 * ha: adapter state pointer. 5307 * addr: flash byte address. 5308 * data: data to be written to flash. 5309 * 5310 * Returns: 5311 * ql local function return status code. 5312 * 5313 * Context: 5314 * Kernel context. 5315 */ 5316 static int 5317 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, 5318 uint8_t data) 5319 { 5320 int rval; 5321 5322 /* Write Program Command Sequence */ 5323 if (CFG_IST(ha, CFG_SBUS_CARD)) { 5324 ql_write_flash_byte(ha, 0x5555, 0xa0); 5325 ql_write_flash_byte(ha, addr, data); 5326 } else { 5327 ql_write_flash_byte(ha, 0x5555, 0xaa); 5328 ql_write_flash_byte(ha, 0x2aaa, 0x55); 5329 ql_write_flash_byte(ha, 0x5555, 0xa0); 5330 ql_write_flash_byte(ha, addr, data); 5331 } 5332 5333 /* Wait for write to complete. */ 5334 rval = ql_poll_flash(ha, addr, data); 5335 5336 if (rval != QL_SUCCESS) { 5337 EL(ha, "failed, rval=%xh\n", rval); 5338 } 5339 return (rval); 5340 } 5341 5342 /* 5343 * ql_set_rnid_parameters 5344 * Set RNID parameters. 5345 * 5346 * Input: 5347 * ha: adapter state pointer. 5348 * cmd: User space CT arguments pointer. 5349 * mode: flags. 5350 */ 5351 static void 5352 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5353 { 5354 EXT_SET_RNID_REQ tmp_set; 5355 EXT_RNID_DATA *tmp_buf; 5356 int rval = 0; 5357 5358 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5359 5360 if (DRIVER_SUSPENDED(ha)) { 5361 EL(ha, "failed, LOOP_NOT_READY\n"); 5362 cmd->Status = EXT_STATUS_BUSY; 5363 cmd->ResponseLen = 0; 5364 return; 5365 } 5366 5367 cmd->ResponseLen = 0; /* NO response to caller. */ 5368 if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) { 5369 /* parameter error */ 5370 EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n", 5371 cmd->RequestLen); 5372 cmd->Status = EXT_STATUS_INVALID_PARAM; 5373 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 5374 cmd->ResponseLen = 0; 5375 return; 5376 } 5377 5378 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set, 5379 cmd->RequestLen, mode); 5380 if (rval != 0) { 5381 EL(ha, "failed, ddi_copyin\n"); 5382 cmd->Status = EXT_STATUS_COPY_ERR; 5383 cmd->ResponseLen = 0; 5384 return; 5385 } 5386 5387 /* Allocate memory for command. */ 5388 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP); 5389 5390 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA), 5391 (caddr_t)tmp_buf); 5392 if (rval != QL_SUCCESS) { 5393 /* error */ 5394 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval); 5395 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5396 cmd->Status = EXT_STATUS_ERR; 5397 cmd->ResponseLen = 0; 5398 return; 5399 } 5400 5401 /* Now set the requested params. */ 5402 bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2); 5403 bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2); 5404 bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16); 5405 5406 rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA), 5407 (caddr_t)tmp_buf); 5408 if (rval != QL_SUCCESS) { 5409 /* error */ 5410 EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval); 5411 cmd->Status = EXT_STATUS_ERR; 5412 cmd->ResponseLen = 0; 5413 } 5414 5415 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5416 5417 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5418 } 5419 5420 /* 5421 * ql_get_rnid_parameters 5422 * Get RNID parameters. 5423 * 5424 * Input: 5425 * ha: adapter state pointer. 5426 * cmd: User space CT arguments pointer. 5427 * mode: flags. 5428 */ 5429 static void 5430 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5431 { 5432 EXT_RNID_DATA *tmp_buf; 5433 uint32_t rval; 5434 5435 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5436 5437 if (DRIVER_SUSPENDED(ha)) { 5438 EL(ha, "failed, LOOP_NOT_READY\n"); 5439 cmd->Status = EXT_STATUS_BUSY; 5440 cmd->ResponseLen = 0; 5441 return; 5442 } 5443 5444 /* Allocate memory for command. */ 5445 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP); 5446 5447 /* Send command */ 5448 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA), 5449 (caddr_t)tmp_buf); 5450 if (rval != QL_SUCCESS) { 5451 /* error */ 5452 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval); 5453 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5454 cmd->Status = EXT_STATUS_ERR; 5455 cmd->ResponseLen = 0; 5456 return; 5457 } 5458 5459 /* Copy the response */ 5460 if (ql_send_buffer_data((caddr_t)tmp_buf, 5461 (caddr_t)(uintptr_t)cmd->ResponseAdr, 5462 sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) { 5463 EL(ha, "failed, ddi_copyout\n"); 5464 cmd->Status = EXT_STATUS_COPY_ERR; 5465 cmd->ResponseLen = 0; 5466 } else { 5467 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5468 cmd->ResponseLen = sizeof (EXT_RNID_DATA); 5469 } 5470 5471 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5472 } 5473 5474 /* 5475 * ql_reset_statistics 5476 * Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA. 5477 * 5478 * Input: 5479 * ha: adapter state pointer. 5480 * cmd: Local EXT_IOCTL cmd struct pointer. 5481 * 5482 * Returns: 5483 * None, request status indicated in cmd->Status. 5484 * 5485 * Context: 5486 * Kernel context. 5487 */ 5488 static int 5489 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd) 5490 { 5491 ql_xioctl_t *xp = ha->xioctl; 5492 int rval = 0; 5493 5494 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5495 5496 if (DRIVER_SUSPENDED(ha)) { 5497 EL(ha, "failed, LOOP_NOT_READY\n"); 5498 cmd->Status = EXT_STATUS_BUSY; 5499 cmd->ResponseLen = 0; 5500 return (QL_FUNCTION_SUSPENDED); 5501 } 5502 5503 rval = ql_reset_link_status(ha); 5504 if (rval != QL_SUCCESS) { 5505 EL(ha, "failed, reset_link_status_mbx=%xh\n", rval); 5506 cmd->Status = EXT_STATUS_MAILBOX; 5507 cmd->DetailStatus = rval; 5508 cmd->ResponseLen = 0; 5509 } 5510 5511 TASK_DAEMON_LOCK(ha); 5512 xp->IosRequested = 0; 5513 xp->BytesRequested = 0; 5514 xp->IOInputRequests = 0; 5515 xp->IOOutputRequests = 0; 5516 xp->IOControlRequests = 0; 5517 xp->IOInputMByteCnt = 0; 5518 xp->IOOutputMByteCnt = 0; 5519 xp->IOOutputByteCnt = 0; 5520 xp->IOInputByteCnt = 0; 5521 TASK_DAEMON_UNLOCK(ha); 5522 5523 INTR_LOCK(ha); 5524 xp->ControllerErrorCount = 0; 5525 xp->DeviceErrorCount = 0; 5526 xp->TotalLipResets = 0; 5527 xp->TotalInterrupts = 0; 5528 INTR_UNLOCK(ha); 5529 5530 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5531 5532 return (rval); 5533 } 5534 5535 /* 5536 * ql_get_statistics 5537 * Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA. 5538 * 5539 * Input: 5540 * ha: adapter state pointer. 5541 * cmd: Local EXT_IOCTL cmd struct pointer. 5542 * mode: flags. 5543 * 5544 * Returns: 5545 * None, request status indicated in cmd->Status. 5546 * 5547 * Context: 5548 * Kernel context. 5549 */ 5550 static void 5551 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5552 { 5553 EXT_HBA_PORT_STAT ps = {0}; 5554 ql_link_stats_t *ls; 5555 int rval; 5556 ql_xioctl_t *xp = ha->xioctl; 5557 int retry = 10; 5558 5559 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5560 5561 while (ha->task_daemon_flags & 5562 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) { 5563 ql_delay(ha, 10000000); /* 10 second delay */ 5564 5565 retry--; 5566 5567 if (retry == 0) { /* effectively 100 seconds */ 5568 EL(ha, "failed, LOOP_NOT_READY\n"); 5569 cmd->Status = EXT_STATUS_BUSY; 5570 cmd->ResponseLen = 0; 5571 return; 5572 } 5573 } 5574 5575 /* Allocate memory for command. */ 5576 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP); 5577 5578 /* 5579 * I think these are supposed to be port statistics 5580 * the loop ID or port ID should be in cmd->Instance. 5581 */ 5582 rval = ql_get_status_counts(ha, (uint16_t) 5583 (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id), 5584 sizeof (ql_link_stats_t), (caddr_t)ls, 0); 5585 if (rval != QL_SUCCESS) { 5586 EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval, 5587 ha->loop_id); 5588 cmd->Status = EXT_STATUS_MAILBOX; 5589 cmd->DetailStatus = rval; 5590 cmd->ResponseLen = 0; 5591 } else { 5592 ps.ControllerErrorCount = xp->ControllerErrorCount; 5593 ps.DeviceErrorCount = xp->DeviceErrorCount; 5594 ps.IoCount = (uint32_t)(xp->IOInputRequests + 5595 xp->IOOutputRequests + xp->IOControlRequests); 5596 ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt + 5597 xp->IOOutputMByteCnt); 5598 ps.LipResetCount = xp->TotalLipResets; 5599 ps.InterruptCount = xp->TotalInterrupts; 5600 ps.LinkFailureCount = LE_32(ls->link_fail_cnt); 5601 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt); 5602 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt); 5603 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt); 5604 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt); 5605 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt); 5606 5607 rval = ddi_copyout((void *)&ps, 5608 (void *)(uintptr_t)cmd->ResponseAdr, 5609 sizeof (EXT_HBA_PORT_STAT), mode); 5610 if (rval != 0) { 5611 EL(ha, "failed, ddi_copyout\n"); 5612 cmd->Status = EXT_STATUS_COPY_ERR; 5613 cmd->ResponseLen = 0; 5614 } else { 5615 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT); 5616 } 5617 } 5618 5619 kmem_free(ls, sizeof (ql_link_stats_t)); 5620 5621 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5622 } 5623 5624 /* 5625 * ql_get_statistics_fc 5626 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA. 5627 * 5628 * Input: 5629 * ha: adapter state pointer. 5630 * cmd: Local EXT_IOCTL cmd struct pointer. 5631 * mode: flags. 5632 * 5633 * Returns: 5634 * None, request status indicated in cmd->Status. 5635 * 5636 * Context: 5637 * Kernel context. 5638 */ 5639 static void 5640 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5641 { 5642 EXT_HBA_PORT_STAT ps = {0}; 5643 ql_link_stats_t *ls; 5644 int rval; 5645 uint16_t qlnt; 5646 EXT_DEST_ADDR pextdestaddr; 5647 uint8_t *name; 5648 ql_tgt_t *tq = NULL; 5649 int retry = 10; 5650 5651 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5652 5653 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 5654 (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) { 5655 EL(ha, "failed, ddi_copyin\n"); 5656 cmd->Status = EXT_STATUS_COPY_ERR; 5657 cmd->ResponseLen = 0; 5658 return; 5659 } 5660 5661 qlnt = QLNT_PORT; 5662 name = pextdestaddr.DestAddr.WWPN; 5663 5664 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 5665 ha->instance, name[0], name[1], name[2], name[3], name[4], 5666 name[5], name[6], name[7]); 5667 5668 tq = ql_find_port(ha, name, qlnt); 5669 5670 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 5671 EL(ha, "failed, fc_port not found\n"); 5672 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 5673 cmd->ResponseLen = 0; 5674 return; 5675 } 5676 5677 while (ha->task_daemon_flags & 5678 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) { 5679 ql_delay(ha, 10000000); /* 10 second delay */ 5680 5681 retry--; 5682 5683 if (retry == 0) { /* effectively 100 seconds */ 5684 EL(ha, "failed, LOOP_NOT_READY\n"); 5685 cmd->Status = EXT_STATUS_BUSY; 5686 cmd->ResponseLen = 0; 5687 return; 5688 } 5689 } 5690 5691 /* Allocate memory for command. */ 5692 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP); 5693 5694 rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t), 5695 (caddr_t)ls, 0); 5696 if (rval != QL_SUCCESS) { 5697 EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval, 5698 tq->d_id.b24); 5699 cmd->Status = EXT_STATUS_MAILBOX; 5700 cmd->DetailStatus = rval; 5701 cmd->ResponseLen = 0; 5702 } else { 5703 ps.LinkFailureCount = LE_32(ls->link_fail_cnt); 5704 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt); 5705 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt); 5706 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt); 5707 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt); 5708 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt); 5709 5710 rval = ddi_copyout((void *)&ps, 5711 (void *)(uintptr_t)cmd->ResponseAdr, 5712 sizeof (EXT_HBA_PORT_STAT), mode); 5713 5714 if (rval != 0) { 5715 EL(ha, "failed, ddi_copyout\n"); 5716 cmd->Status = EXT_STATUS_COPY_ERR; 5717 cmd->ResponseLen = 0; 5718 } else { 5719 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT); 5720 } 5721 } 5722 5723 kmem_free(ls, sizeof (ql_link_stats_t)); 5724 5725 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5726 } 5727 5728 /* 5729 * ql_get_statistics_fc4 5730 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA. 5731 * 5732 * Input: 5733 * ha: adapter state pointer. 5734 * cmd: Local EXT_IOCTL cmd struct pointer. 5735 * mode: flags. 5736 * 5737 * Returns: 5738 * None, request status indicated in cmd->Status. 5739 * 5740 * Context: 5741 * Kernel context. 5742 */ 5743 static void 5744 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5745 { 5746 uint32_t rval; 5747 EXT_HBA_FC4STATISTICS fc4stats = {0}; 5748 ql_xioctl_t *xp = ha->xioctl; 5749 5750 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5751 5752 fc4stats.InputRequests = xp->IOInputRequests; 5753 fc4stats.OutputRequests = xp->IOOutputRequests; 5754 fc4stats.ControlRequests = xp->IOControlRequests; 5755 fc4stats.InputMegabytes = xp->IOInputMByteCnt; 5756 fc4stats.OutputMegabytes = xp->IOOutputMByteCnt; 5757 5758 rval = ddi_copyout((void *)&fc4stats, 5759 (void *)(uintptr_t)cmd->ResponseAdr, 5760 sizeof (EXT_HBA_FC4STATISTICS), mode); 5761 5762 if (rval != 0) { 5763 EL(ha, "failed, ddi_copyout\n"); 5764 cmd->Status = EXT_STATUS_COPY_ERR; 5765 cmd->ResponseLen = 0; 5766 } else { 5767 cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS); 5768 } 5769 5770 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5771 } 5772 5773 /* 5774 * ql_set_led_state 5775 * Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA. 5776 * 5777 * Input: 5778 * ha: adapter state pointer. 5779 * cmd: Local EXT_IOCTL cmd struct pointer. 5780 * mode: flags. 5781 * 5782 * Returns: 5783 * None, request status indicated in cmd->Status. 5784 * 5785 * Context: 5786 * Kernel context. 5787 */ 5788 static void 5789 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5790 { 5791 EXT_BEACON_CONTROL bstate; 5792 uint32_t rval; 5793 ql_xioctl_t *xp = ha->xioctl; 5794 5795 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5796 5797 if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) { 5798 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 5799 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL); 5800 EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL," 5801 " Len=%xh\n", cmd->RequestLen); 5802 cmd->ResponseLen = 0; 5803 return; 5804 } 5805 5806 if (ha->device_id < 0x2300) { 5807 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 5808 cmd->DetailStatus = 0; 5809 EL(ha, "done - failed, Invalid function for HBA model\n"); 5810 cmd->ResponseLen = 0; 5811 return; 5812 } 5813 5814 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate, 5815 cmd->RequestLen, mode); 5816 5817 if (rval != 0) { 5818 cmd->Status = EXT_STATUS_COPY_ERR; 5819 EL(ha, "done - failed, ddi_copyin\n"); 5820 return; 5821 } 5822 5823 switch (bstate.State) { 5824 case EXT_DEF_GRN_BLINK_OFF: /* turn beacon off */ 5825 if (xp->ledstate.BeaconState == BEACON_OFF) { 5826 /* not quite an error -- LED state is already off */ 5827 cmd->Status = EXT_STATUS_OK; 5828 EL(ha, "LED off request -- LED is already off\n"); 5829 break; 5830 } 5831 5832 xp->ledstate.BeaconState = BEACON_OFF; 5833 xp->ledstate.LEDflags = LED_ALL_OFF; 5834 5835 if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) { 5836 cmd->Status = EXT_STATUS_MAILBOX; 5837 } else { 5838 cmd->Status = EXT_STATUS_OK; 5839 } 5840 break; 5841 5842 case EXT_DEF_GRN_BLINK_ON: /* turn beacon on */ 5843 if (xp->ledstate.BeaconState == BEACON_ON) { 5844 /* not quite an error -- LED state is already on */ 5845 cmd->Status = EXT_STATUS_OK; 5846 EL(ha, "LED on request - LED is already on\n"); 5847 break; 5848 } 5849 5850 if ((rval = ql_setup_led(ha)) != QL_SUCCESS) { 5851 cmd->Status = EXT_STATUS_MAILBOX; 5852 break; 5853 } 5854 5855 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5856 xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24; 5857 } else { 5858 xp->ledstate.LEDflags = LED_GREEN; 5859 } 5860 xp->ledstate.BeaconState = BEACON_ON; 5861 5862 cmd->Status = EXT_STATUS_OK; 5863 break; 5864 default: 5865 cmd->Status = EXT_STATUS_ERR; 5866 EL(ha, "failed, unknown state request %xh\n", bstate.State); 5867 break; 5868 } 5869 5870 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5871 } 5872 5873 /* 5874 * ql_get_led_state 5875 * Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA. 5876 * 5877 * Input: 5878 * ha: adapter state pointer. 5879 * cmd: Local EXT_IOCTL cmd struct pointer. 5880 * mode: flags. 5881 * 5882 * Returns: 5883 * None, request status indicated in cmd->Status. 5884 * 5885 * Context: 5886 * Kernel context. 5887 */ 5888 static void 5889 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5890 { 5891 EXT_BEACON_CONTROL bstate = {0}; 5892 uint32_t rval; 5893 ql_xioctl_t *xp = ha->xioctl; 5894 5895 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5896 5897 if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) { 5898 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 5899 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL); 5900 EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL," 5901 "Len=%xh\n", cmd->ResponseLen); 5902 cmd->ResponseLen = 0; 5903 return; 5904 } 5905 5906 if (ha->device_id < 0x2300) { 5907 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 5908 cmd->DetailStatus = 0; 5909 EL(ha, "done - failed, Invalid function for HBA model\n"); 5910 cmd->ResponseLen = 0; 5911 return; 5912 } 5913 5914 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) { 5915 cmd->Status = EXT_STATUS_BUSY; 5916 EL(ha, "done - failed, isp abort active\n"); 5917 cmd->ResponseLen = 0; 5918 return; 5919 } 5920 5921 /* inform the user of the current beacon state (off or on) */ 5922 bstate.State = xp->ledstate.BeaconState; 5923 5924 rval = ddi_copyout((void *)&bstate, 5925 (void *)(uintptr_t)cmd->ResponseAdr, 5926 sizeof (EXT_BEACON_CONTROL), mode); 5927 5928 if (rval != 0) { 5929 EL(ha, "failed, ddi_copyout\n"); 5930 cmd->Status = EXT_STATUS_COPY_ERR; 5931 cmd->ResponseLen = 0; 5932 } else { 5933 cmd->Status = EXT_STATUS_OK; 5934 cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL); 5935 } 5936 5937 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5938 } 5939 5940 /* 5941 * ql_blink_led 5942 * Determine the next state of the LED and drive it 5943 * 5944 * Input: 5945 * ha: adapter state pointer. 5946 * 5947 * Context: 5948 * Interrupt context. 5949 */ 5950 void 5951 ql_blink_led(ql_adapter_state_t *ha) 5952 { 5953 uint32_t nextstate; 5954 ql_xioctl_t *xp = ha->xioctl; 5955 5956 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5957 5958 if (xp->ledstate.BeaconState == BEACON_ON) { 5959 /* determine the next led state */ 5960 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5961 nextstate = (xp->ledstate.LEDflags) & 5962 (~(RD32_IO_REG(ha, gpiod))); 5963 } else { 5964 nextstate = (xp->ledstate.LEDflags) & 5965 (~(RD16_IO_REG(ha, gpiod))); 5966 } 5967 5968 /* turn the led on or off */ 5969 ql_drive_led(ha, nextstate); 5970 } 5971 5972 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5973 } 5974 5975 /* 5976 * ql_drive_led 5977 * drive the led's as determined by LEDflags 5978 * 5979 * Input: 5980 * ha: adapter state pointer. 5981 * LEDflags: LED flags 5982 * 5983 * Context: 5984 * Kernel/Interrupt context. 5985 */ 5986 static void 5987 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags) 5988 { 5989 5990 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5991 5992 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) { 5993 5994 uint16_t gpio_enable, gpio_data; 5995 5996 /* setup to send new data */ 5997 gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe); 5998 gpio_enable = (uint16_t)(gpio_enable | LED_MASK); 5999 WRT16_IO_REG(ha, gpioe, gpio_enable); 6000 6001 /* read current data and clear out old led data */ 6002 gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod); 6003 gpio_data = (uint16_t)(gpio_data & ~LED_MASK); 6004 6005 /* set in the new led data. */ 6006 gpio_data = (uint16_t)(gpio_data | LEDflags); 6007 6008 /* write out the new led data */ 6009 WRT16_IO_REG(ha, gpiod, gpio_data); 6010 6011 } else if (CFG_IST(ha, CFG_CTRL_24258081)) { 6012 6013 uint32_t gpio_data; 6014 6015 /* setup to send new data */ 6016 gpio_data = RD32_IO_REG(ha, gpiod); 6017 gpio_data |= LED_MASK_UPDATE_24; 6018 WRT32_IO_REG(ha, gpiod, gpio_data); 6019 6020 /* read current data and clear out old led data */ 6021 gpio_data = RD32_IO_REG(ha, gpiod); 6022 gpio_data &= ~LED_MASK_COLORS_24; 6023 6024 /* set in the new led data */ 6025 gpio_data |= LEDflags; 6026 6027 /* write out the new led data */ 6028 WRT32_IO_REG(ha, gpiod, gpio_data); 6029 6030 } else { 6031 EL(ha, "unsupported HBA: %xh", ha->device_id); 6032 } 6033 6034 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6035 } 6036 6037 /* 6038 * ql_setup_led 6039 * Setup LED for driver control 6040 * 6041 * Input: 6042 * ha: adapter state pointer. 6043 * 6044 * Context: 6045 * Kernel/Interrupt context. 6046 */ 6047 static uint32_t 6048 ql_setup_led(ql_adapter_state_t *ha) 6049 { 6050 uint32_t rval; 6051 ql_mbx_data_t mr; 6052 6053 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6054 6055 /* decouple the LED control from the fw */ 6056 rval = ql_get_firmware_option(ha, &mr); 6057 if (rval != QL_SUCCESS) { 6058 EL(ha, "failed, get_firmware_option=%xh\n", rval); 6059 return (rval); 6060 } 6061 6062 /* set the appropriate options */ 6063 mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO); 6064 6065 /* send it back to the firmware */ 6066 rval = ql_set_firmware_option(ha, &mr); 6067 if (rval != QL_SUCCESS) { 6068 EL(ha, "failed, set_firmware_option=%xh\n", rval); 6069 return (rval); 6070 } 6071 6072 /* initally, turn the LED's off */ 6073 ql_drive_led(ha, LED_ALL_OFF); 6074 6075 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6076 6077 return (rval); 6078 } 6079 6080 /* 6081 * ql_wrapup_led 6082 * Return LED control to the firmware 6083 * 6084 * Input: 6085 * ha: adapter state pointer. 6086 * 6087 * Context: 6088 * Kernel/Interrupt context. 6089 */ 6090 static uint32_t 6091 ql_wrapup_led(ql_adapter_state_t *ha) 6092 { 6093 uint32_t rval; 6094 ql_mbx_data_t mr; 6095 6096 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6097 6098 /* Turn all LED's off */ 6099 ql_drive_led(ha, LED_ALL_OFF); 6100 6101 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6102 6103 uint32_t gpio_data; 6104 6105 /* disable the LED update mask */ 6106 gpio_data = RD32_IO_REG(ha, gpiod); 6107 gpio_data &= ~LED_MASK_UPDATE_24; 6108 6109 /* write out the data */ 6110 WRT32_IO_REG(ha, gpiod, gpio_data); 6111 } 6112 6113 /* give LED control back to the f/w */ 6114 rval = ql_get_firmware_option(ha, &mr); 6115 if (rval != QL_SUCCESS) { 6116 EL(ha, "failed, get_firmware_option=%xh\n", rval); 6117 return (rval); 6118 } 6119 6120 mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO); 6121 6122 rval = ql_set_firmware_option(ha, &mr); 6123 if (rval != QL_SUCCESS) { 6124 EL(ha, "failed, set_firmware_option=%xh\n", rval); 6125 return (rval); 6126 } 6127 6128 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6129 6130 return (rval); 6131 } 6132 6133 /* 6134 * ql_get_port_summary 6135 * Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA. 6136 * 6137 * The EXT_IOCTL->RequestAdr points to a single 6138 * UINT32 which identifies the device type. 6139 * 6140 * Input: 6141 * ha: adapter state pointer. 6142 * cmd: Local EXT_IOCTL cmd struct pointer. 6143 * mode: flags. 6144 * 6145 * Returns: 6146 * None, request status indicated in cmd->Status. 6147 * 6148 * Context: 6149 * Kernel context. 6150 */ 6151 static void 6152 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 6153 { 6154 EXT_DEVICEDATA dd = {0}; 6155 EXT_DEVICEDATA *uddp; 6156 ql_link_t *link; 6157 ql_tgt_t *tq; 6158 uint32_t rlen, dev_type, index; 6159 int rval = 0; 6160 EXT_DEVICEDATAENTRY *uddep, *ddep; 6161 6162 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6163 6164 ddep = &dd.EntryList[0]; 6165 6166 /* 6167 * Get the type of device the requestor is looking for. 6168 * 6169 * We ignore this for now. 6170 */ 6171 rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 6172 (void *)&dev_type, sizeof (dev_type), mode); 6173 if (rval != 0) { 6174 cmd->Status = EXT_STATUS_COPY_ERR; 6175 cmd->ResponseLen = 0; 6176 EL(ha, "failed, ddi_copyin\n"); 6177 return; 6178 } 6179 /* 6180 * Count the number of entries to be returned. Count devices 6181 * that are offlline, but have been persistently bound. 6182 */ 6183 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 6184 for (link = ha->dev[index].first; link != NULL; 6185 link = link->next) { 6186 tq = link->base_address; 6187 if (tq->flags & TQF_INITIATOR_DEVICE || 6188 !VALID_TARGET_ID(ha, tq->loop_id)) { 6189 continue; /* Skip this one */ 6190 } 6191 dd.TotalDevices++; 6192 } 6193 } 6194 /* 6195 * Compute the number of entries that can be returned 6196 * based upon the size of caller's response buffer. 6197 */ 6198 dd.ReturnListEntryCount = 0; 6199 if (dd.TotalDevices == 0) { 6200 rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY); 6201 } else { 6202 rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) + 6203 (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1))); 6204 } 6205 if (rlen > cmd->ResponseLen) { 6206 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 6207 cmd->DetailStatus = rlen; 6208 EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n", 6209 rlen, cmd->ResponseLen); 6210 cmd->ResponseLen = 0; 6211 return; 6212 } 6213 cmd->ResponseLen = 0; 6214 uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr; 6215 uddep = &uddp->EntryList[0]; 6216 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 6217 for (link = ha->dev[index].first; link != NULL; 6218 link = link->next) { 6219 tq = link->base_address; 6220 if (tq->flags & TQF_INITIATOR_DEVICE || 6221 !VALID_TARGET_ID(ha, tq->loop_id)) { 6222 continue; /* Skip this one */ 6223 } 6224 6225 bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY)); 6226 6227 bcopy(tq->node_name, ddep->NodeWWN, 8); 6228 bcopy(tq->port_name, ddep->PortWWN, 8); 6229 6230 ddep->PortID[0] = tq->d_id.b.domain; 6231 ddep->PortID[1] = tq->d_id.b.area; 6232 ddep->PortID[2] = tq->d_id.b.al_pa; 6233 6234 bcopy(tq->port_name, 6235 (caddr_t)&ddep->TargetAddress.Target, 8); 6236 6237 ddep->DeviceFlags = tq->flags; 6238 ddep->LoopID = tq->loop_id; 6239 QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, " 6240 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, " 6241 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 6242 ha->instance, ddep->TargetAddress.Target, 6243 ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1], 6244 ddep->NodeWWN[2], ddep->NodeWWN[3], 6245 ddep->NodeWWN[4], ddep->NodeWWN[5], 6246 ddep->NodeWWN[6], ddep->NodeWWN[7], 6247 ddep->PortWWN[0], ddep->PortWWN[1], 6248 ddep->PortWWN[2], ddep->PortWWN[3], 6249 ddep->PortWWN[4], ddep->PortWWN[5], 6250 ddep->PortWWN[6], ddep->PortWWN[7]); 6251 rval = ddi_copyout((void *)ddep, (void *)uddep, 6252 sizeof (EXT_DEVICEDATAENTRY), mode); 6253 6254 if (rval != 0) { 6255 cmd->Status = EXT_STATUS_COPY_ERR; 6256 cmd->ResponseLen = 0; 6257 EL(ha, "failed, ddi_copyout\n"); 6258 break; 6259 } 6260 dd.ReturnListEntryCount++; 6261 uddep++; 6262 cmd->ResponseLen += (uint32_t) 6263 sizeof (EXT_DEVICEDATAENTRY); 6264 } 6265 } 6266 rval = ddi_copyout((void *)&dd, (void *)uddp, 6267 sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode); 6268 6269 if (rval != 0) { 6270 cmd->Status = EXT_STATUS_COPY_ERR; 6271 cmd->ResponseLen = 0; 6272 EL(ha, "failed, ddi_copyout-2\n"); 6273 } else { 6274 cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY); 6275 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6276 } 6277 } 6278 6279 /* 6280 * ql_get_target_id 6281 * Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA. 6282 * 6283 * Input: 6284 * ha: adapter state pointer. 6285 * cmd: Local EXT_IOCTL cmd struct pointer. 6286 * mode: flags. 6287 * 6288 * Returns: 6289 * None, request status indicated in cmd->Status. 6290 * 6291 * Context: 6292 * Kernel context. 6293 */ 6294 static void 6295 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 6296 { 6297 uint32_t rval; 6298 uint16_t qlnt; 6299 EXT_DEST_ADDR extdestaddr = {0}; 6300 uint8_t *name; 6301 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE]; 6302 ql_tgt_t *tq; 6303 6304 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6305 6306 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 6307 (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) { 6308 EL(ha, "failed, ddi_copyin\n"); 6309 cmd->Status = EXT_STATUS_COPY_ERR; 6310 cmd->ResponseLen = 0; 6311 return; 6312 } 6313 6314 qlnt = QLNT_PORT; 6315 name = wwpn; 6316 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 6317 ha->instance, name[0], name[1], name[2], name[3], name[4], 6318 name[5], name[6], name[7]); 6319 6320 tq = ql_find_port(ha, name, qlnt); 6321 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 6322 EL(ha, "failed, fc_port not found\n"); 6323 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 6324 cmd->ResponseLen = 0; 6325 return; 6326 } 6327 6328 bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8); 6329 6330 rval = ddi_copyout((void *)&extdestaddr, 6331 (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode); 6332 if (rval != 0) { 6333 EL(ha, "failed, ddi_copyout\n"); 6334 cmd->Status = EXT_STATUS_COPY_ERR; 6335 cmd->ResponseLen = 0; 6336 } 6337 6338 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6339 } 6340 6341 /* 6342 * ql_setup_fcache 6343 * Populates selected flash sections into the cache 6344 * 6345 * Input: 6346 * ha = adapter state pointer. 6347 * 6348 * Returns: 6349 * ql local function return status code. 6350 * 6351 * Context: 6352 * Kernel context. 6353 * 6354 * Note: 6355 * Driver must be in stalled state prior to entering or 6356 * add code to this function prior to calling ql_setup_flash() 6357 */ 6358 int 6359 ql_setup_fcache(ql_adapter_state_t *ha) 6360 { 6361 int rval; 6362 uint32_t freadpos = 0; 6363 uint32_t fw_done = 0; 6364 ql_fcache_t *head = NULL; 6365 ql_fcache_t *tail = NULL; 6366 ql_fcache_t *ftmp; 6367 6368 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 6369 6370 CACHE_LOCK(ha); 6371 6372 /* If we already have populated it, rtn */ 6373 if (ha->fcache != NULL) { 6374 CACHE_UNLOCK(ha); 6375 EL(ha, "buffer already populated\n"); 6376 return (QL_SUCCESS); 6377 } 6378 6379 ql_flash_nvram_defaults(ha); 6380 6381 if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) { 6382 CACHE_UNLOCK(ha); 6383 EL(ha, "unable to setup flash; rval=%xh\n", rval); 6384 return (rval); 6385 } 6386 6387 while (freadpos != 0xffffffff) { 6388 /* Allocate & populate this node */ 6389 if ((ftmp = ql_setup_fnode(ha)) == NULL) { 6390 EL(ha, "node alloc failed\n"); 6391 rval = QL_FUNCTION_FAILED; 6392 break; 6393 } 6394 6395 /* link in the new node */ 6396 if (head == NULL) { 6397 head = tail = ftmp; 6398 } else { 6399 tail->next = ftmp; 6400 tail = ftmp; 6401 } 6402 6403 /* Do the firmware node first for 24xx/25xx's */ 6404 if (fw_done == 0) { 6405 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6406 freadpos = ha->flash_fw_addr << 2; 6407 } 6408 fw_done = 1; 6409 } 6410 6411 if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE, 6412 freadpos)) != QL_SUCCESS) { 6413 EL(ha, "failed, 24xx dump_fcode" 6414 " pos=%xh rval=%xh\n", freadpos, rval); 6415 rval = QL_FUNCTION_FAILED; 6416 break; 6417 } 6418 6419 /* checkout the pci data / format */ 6420 if (ql_check_pci(ha, ftmp, &freadpos)) { 6421 EL(ha, "flash header incorrect\n"); 6422 rval = QL_FUNCTION_FAILED; 6423 break; 6424 } 6425 } 6426 6427 if (rval != QL_SUCCESS) { 6428 /* release all resources we have */ 6429 ftmp = head; 6430 while (ftmp != NULL) { 6431 tail = ftmp->next; 6432 kmem_free(ftmp->buf, FBUFSIZE); 6433 kmem_free(ftmp, sizeof (ql_fcache_t)); 6434 ftmp = tail; 6435 } 6436 6437 EL(ha, "failed, done\n"); 6438 } else { 6439 ha->fcache = head; 6440 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 6441 } 6442 CACHE_UNLOCK(ha); 6443 6444 return (rval); 6445 } 6446 6447 /* 6448 * ql_update_fcache 6449 * re-populates updated flash into the fcache. If 6450 * fcache does not exist (e.g., flash was empty/invalid on 6451 * boot), this routine will create and the populate it. 6452 * 6453 * Input: 6454 * ha = adapter state pointer. 6455 * *bpf = Pointer to flash buffer. 6456 * bsize = Size of flash buffer. 6457 * 6458 * Returns: 6459 * 6460 * Context: 6461 * Kernel context. 6462 */ 6463 void 6464 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize) 6465 { 6466 int rval = QL_SUCCESS; 6467 uint32_t freadpos = 0; 6468 uint32_t fw_done = 0; 6469 ql_fcache_t *head = NULL; 6470 ql_fcache_t *tail = NULL; 6471 ql_fcache_t *ftmp; 6472 6473 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 6474 6475 while (freadpos != 0xffffffff) { 6476 6477 /* Allocate & populate this node */ 6478 6479 if ((ftmp = ql_setup_fnode(ha)) == NULL) { 6480 EL(ha, "node alloc failed\n"); 6481 rval = QL_FUNCTION_FAILED; 6482 break; 6483 } 6484 6485 /* link in the new node */ 6486 if (head == NULL) { 6487 head = tail = ftmp; 6488 } else { 6489 tail->next = ftmp; 6490 tail = ftmp; 6491 } 6492 6493 /* Do the firmware node first for 24xx's */ 6494 if (fw_done == 0) { 6495 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6496 freadpos = ha->flash_fw_addr << 2; 6497 } 6498 fw_done = 1; 6499 } 6500 6501 /* read in first FBUFSIZE bytes of this flash section */ 6502 if (freadpos+FBUFSIZE > bsize) { 6503 EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n", 6504 freadpos, bsize); 6505 rval = QL_FUNCTION_FAILED; 6506 break; 6507 } 6508 bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE); 6509 6510 /* checkout the pci data / format */ 6511 if (ql_check_pci(ha, ftmp, &freadpos)) { 6512 EL(ha, "flash header incorrect\n"); 6513 rval = QL_FUNCTION_FAILED; 6514 break; 6515 } 6516 } 6517 6518 if (rval != QL_SUCCESS) { 6519 /* 6520 * release all resources we have 6521 */ 6522 ql_fcache_rel(head); 6523 EL(ha, "failed, done\n"); 6524 } else { 6525 /* 6526 * Release previous fcache resources and update with new 6527 */ 6528 CACHE_LOCK(ha); 6529 ql_fcache_rel(ha->fcache); 6530 ha->fcache = head; 6531 CACHE_UNLOCK(ha); 6532 6533 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 6534 } 6535 } 6536 6537 /* 6538 * ql_setup_fnode 6539 * Allocates fcache node 6540 * 6541 * Input: 6542 * ha = adapter state pointer. 6543 * node = point to allocated fcache node (NULL = failed) 6544 * 6545 * Returns: 6546 * 6547 * Context: 6548 * Kernel context. 6549 * 6550 * Note: 6551 * Driver must be in stalled state prior to entering or 6552 * add code to this function prior to calling ql_setup_flash() 6553 */ 6554 static ql_fcache_t * 6555 ql_setup_fnode(ql_adapter_state_t *ha) 6556 { 6557 ql_fcache_t *fnode = NULL; 6558 6559 if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t), 6560 KM_SLEEP))) == NULL) { 6561 EL(ha, "fnode alloc failed\n"); 6562 fnode = NULL; 6563 } else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, 6564 KM_SLEEP))) == NULL) { 6565 EL(ha, "buf alloc failed\n"); 6566 kmem_free(fnode, sizeof (ql_fcache_t)); 6567 fnode = NULL; 6568 } else { 6569 fnode->buflen = FBUFSIZE; 6570 } 6571 6572 return (fnode); 6573 } 6574 6575 /* 6576 * ql_fcache_rel 6577 * Releases the fcache resources 6578 * 6579 * Input: 6580 * ha = adapter state pointer. 6581 * head = Pointer to fcache linked list 6582 * 6583 * Returns: 6584 * 6585 * Context: 6586 * Kernel context. 6587 * 6588 */ 6589 void 6590 ql_fcache_rel(ql_fcache_t *head) 6591 { 6592 ql_fcache_t *ftmp = head; 6593 ql_fcache_t *tail; 6594 6595 /* release all resources we have */ 6596 while (ftmp != NULL) { 6597 tail = ftmp->next; 6598 kmem_free(ftmp->buf, FBUFSIZE); 6599 kmem_free(ftmp, sizeof (ql_fcache_t)); 6600 ftmp = tail; 6601 } 6602 } 6603 6604 /* 6605 * ql_update_flash_caches 6606 * Updates driver flash caches 6607 * 6608 * Input: 6609 * ha: adapter state pointer. 6610 * 6611 * Context: 6612 * Kernel context. 6613 */ 6614 static void 6615 ql_update_flash_caches(ql_adapter_state_t *ha) 6616 { 6617 uint32_t len; 6618 ql_link_t *link; 6619 ql_adapter_state_t *ha2; 6620 6621 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 6622 6623 /* Get base path length. */ 6624 for (len = (uint32_t)strlen(ha->devpath); len; len--) { 6625 if (ha->devpath[len] == ',' || 6626 ha->devpath[len] == '@') { 6627 break; 6628 } 6629 } 6630 6631 /* Reset fcache on all adapter instances. */ 6632 for (link = ql_hba.first; link != NULL; link = link->next) { 6633 ha2 = link->base_address; 6634 6635 if (strncmp(ha->devpath, ha2->devpath, len) != 0) { 6636 continue; 6637 } 6638 6639 CACHE_LOCK(ha2); 6640 ql_fcache_rel(ha2->fcache); 6641 ha2->fcache = NULL; 6642 6643 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6644 if (ha2->vcache != NULL) { 6645 kmem_free(ha2->vcache, QL_24XX_VPD_SIZE); 6646 ha2->vcache = NULL; 6647 } 6648 } 6649 CACHE_UNLOCK(ha2); 6650 6651 (void) ql_setup_fcache(ha2); 6652 } 6653 6654 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 6655 } 6656 6657 /* 6658 * ql_get_fbuf 6659 * Search the fcache list for the type specified 6660 * 6661 * Input: 6662 * fptr = Pointer to fcache linked list 6663 * ftype = Type of image to be returned. 6664 * 6665 * Returns: 6666 * Pointer to ql_fcache_t. 6667 * NULL means not found. 6668 * 6669 * Context: 6670 * Kernel context. 6671 * 6672 * 6673 */ 6674 ql_fcache_t * 6675 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype) 6676 { 6677 while (fptr != NULL) { 6678 /* does this image meet criteria? */ 6679 if (ftype & fptr->type) { 6680 break; 6681 } 6682 fptr = fptr->next; 6683 } 6684 return (fptr); 6685 } 6686 6687 /* 6688 * ql_check_pci 6689 * 6690 * checks the passed buffer for a valid pci signature and 6691 * expected (and in range) pci length values. 6692 * 6693 * For firmware type, a pci header is added since the image in 6694 * the flash does not have one (!!!). 6695 * 6696 * On successful pci check, nextpos adjusted to next pci header. 6697 * 6698 * Returns: 6699 * -1 --> last pci image 6700 * 0 --> pci header valid 6701 * 1 --> pci header invalid. 6702 * 6703 * Context: 6704 * Kernel context. 6705 */ 6706 static int 6707 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos) 6708 { 6709 pci_header_t *pcih; 6710 pci_data_t *pcid; 6711 uint32_t doff; 6712 uint8_t *pciinfo; 6713 6714 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6715 6716 if (fcache != NULL) { 6717 pciinfo = fcache->buf; 6718 } else { 6719 EL(ha, "failed, null fcache ptr passed\n"); 6720 return (1); 6721 } 6722 6723 if (pciinfo == NULL) { 6724 EL(ha, "failed, null pciinfo ptr passed\n"); 6725 return (1); 6726 } 6727 6728 if (CFG_IST(ha, CFG_SBUS_CARD)) { 6729 caddr_t bufp; 6730 uint_t len; 6731 6732 if (pciinfo[0] != SBUS_CODE_FCODE) { 6733 EL(ha, "failed, unable to detect sbus fcode\n"); 6734 return (1); 6735 } 6736 fcache->type = FTYPE_FCODE; 6737 6738 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/ 6739 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, 6740 PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS | 6741 DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp, 6742 (int *)&len) == DDI_PROP_SUCCESS) { 6743 6744 (void) snprintf(fcache->verstr, 6745 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp); 6746 kmem_free(bufp, len); 6747 } 6748 6749 *nextpos = 0xffffffff; 6750 6751 QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n", 6752 ha->instance); 6753 6754 return (0); 6755 } 6756 6757 if (*nextpos == ha->flash_fw_addr << 2) { 6758 6759 pci_header_t fwh = {0}; 6760 pci_data_t fwd = {0}; 6761 uint8_t *buf, *bufp; 6762 6763 /* 6764 * Build a pci header for the firmware module 6765 */ 6766 if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) == 6767 NULL) { 6768 EL(ha, "failed, unable to allocate buffer\n"); 6769 return (1); 6770 } 6771 6772 fwh.signature[0] = PCI_HEADER0; 6773 fwh.signature[1] = PCI_HEADER1; 6774 fwh.dataoffset[0] = LSB(sizeof (pci_header_t)); 6775 fwh.dataoffset[1] = MSB(sizeof (pci_header_t)); 6776 6777 fwd.signature[0] = 'P'; 6778 fwd.signature[1] = 'C'; 6779 fwd.signature[2] = 'I'; 6780 fwd.signature[3] = 'R'; 6781 fwd.codetype = PCI_CODE_FW; 6782 fwd.pcidatalen[0] = LSB(sizeof (pci_data_t)); 6783 fwd.pcidatalen[1] = MSB(sizeof (pci_data_t)); 6784 6785 bufp = buf; 6786 bcopy(&fwh, bufp, sizeof (pci_header_t)); 6787 bufp += sizeof (pci_header_t); 6788 bcopy(&fwd, bufp, sizeof (pci_data_t)); 6789 bufp += sizeof (pci_data_t); 6790 6791 bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) - 6792 sizeof (pci_data_t))); 6793 bcopy(buf, fcache->buf, FBUFSIZE); 6794 6795 fcache->type = FTYPE_FW; 6796 6797 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN, 6798 "%d.%02d.%02d", fcache->buf[19], fcache->buf[23], 6799 fcache->buf[27]); 6800 6801 if (CFG_IST(ha, CFG_CTRL_81XX)) { 6802 *nextpos = 0x200000; 6803 } else if (CFG_IST(ha, CFG_CTRL_8021)) { 6804 *nextpos = 0x80000; 6805 } else { 6806 *nextpos = 0; 6807 } 6808 kmem_free(buf, FBUFSIZE); 6809 6810 QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance); 6811 6812 return (0); 6813 } 6814 6815 /* get to the pci header image length */ 6816 pcih = (pci_header_t *)pciinfo; 6817 6818 doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8); 6819 6820 /* some header section sanity check */ 6821 if (pcih->signature[0] != PCI_HEADER0 || 6822 pcih->signature[1] != PCI_HEADER1 || doff > 50) { 6823 EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n", 6824 pcih->signature[0], pcih->signature[1], doff); 6825 return (1); 6826 } 6827 6828 pcid = (pci_data_t *)(pciinfo + doff); 6829 6830 /* a slight sanity data section check */ 6831 if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' || 6832 pcid->signature[2] != 'I' || pcid->signature[3] != 'R') { 6833 EL(ha, "failed, data sig mismatch!\n"); 6834 return (1); 6835 } 6836 6837 if (pcid->indicator == PCI_IND_LAST_IMAGE) { 6838 QL_PRINT_9(CE_CONT, "(%d): last image\n", ha->instance); 6839 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6840 ql_flash_layout_table(ha, *nextpos + 6841 (pcid->imagelength[0] | (pcid->imagelength[1] << 6842 8)) * PCI_SECTOR_SIZE); 6843 (void) ql_24xx_flash_desc(ha); 6844 } 6845 *nextpos = 0xffffffff; 6846 } else { 6847 /* adjust the next flash read start position */ 6848 *nextpos += (pcid->imagelength[0] | 6849 (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE; 6850 } 6851 6852 switch (pcid->codetype) { 6853 case PCI_CODE_X86PC: 6854 fcache->type = FTYPE_BIOS; 6855 break; 6856 case PCI_CODE_FCODE: 6857 fcache->type = FTYPE_FCODE; 6858 break; 6859 case PCI_CODE_EFI: 6860 fcache->type = FTYPE_EFI; 6861 break; 6862 case PCI_CODE_HPPA: 6863 fcache->type = FTYPE_HPPA; 6864 break; 6865 default: 6866 fcache->type = FTYPE_UNKNOWN; 6867 break; 6868 } 6869 6870 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN, 6871 "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]); 6872 6873 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6874 6875 return (0); 6876 } 6877 6878 /* 6879 * ql_flash_layout_table 6880 * Obtains flash addresses from table 6881 * 6882 * Input: 6883 * ha: adapter state pointer. 6884 * flt_paddr: flash layout pointer address. 6885 * 6886 * Context: 6887 * Kernel context. 6888 */ 6889 static void 6890 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr) 6891 { 6892 ql_flt_ptr_t *fptr; 6893 uint8_t *bp; 6894 int rval; 6895 uint32_t len, faddr, cnt; 6896 uint16_t chksum, w16; 6897 6898 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6899 6900 /* Process flash layout table header */ 6901 len = sizeof (ql_flt_ptr_t); 6902 bp = kmem_zalloc(len, KM_SLEEP); 6903 6904 /* Process pointer to flash layout table */ 6905 if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) { 6906 EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr, 6907 rval); 6908 kmem_free(bp, len); 6909 return; 6910 } 6911 fptr = (ql_flt_ptr_t *)bp; 6912 6913 /* Verify pointer to flash layout table. */ 6914 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) { 6915 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]); 6916 chksum += w16; 6917 } 6918 if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' || 6919 fptr->sig[2] != 'L' || fptr->sig[3] != 'T') { 6920 EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0], 6921 fptr->sig[1], fptr->sig[2], fptr->sig[3]); 6922 kmem_free(bp, len); 6923 return; 6924 } 6925 faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2], 6926 fptr->addr[3]); 6927 6928 kmem_free(bp, len); 6929 6930 ql_process_flt(ha, faddr); 6931 6932 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6933 } 6934 6935 /* 6936 * ql_process_flt 6937 * Obtains flash addresses from flash layout table 6938 * 6939 * Input: 6940 * ha: adapter state pointer. 6941 * faddr: flash layout table byte address. 6942 * 6943 * Context: 6944 * Kernel context. 6945 */ 6946 static void 6947 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr) 6948 { 6949 ql_flt_hdr_t *fhdr; 6950 ql_flt_region_t *frgn; 6951 uint8_t *bp, *eaddr, nv_rg, vpd_rg; 6952 int rval; 6953 uint32_t len, cnt, fe_addr; 6954 uint16_t chksum, w16; 6955 6956 QL_PRINT_9(CE_CONT, "(%d): started faddr=%xh\n", ha->instance, faddr); 6957 6958 /* Process flash layout table header */ 6959 bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP); 6960 fhdr = (ql_flt_hdr_t *)bp; 6961 6962 /* Process flash layout table. */ 6963 if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) != 6964 QL_SUCCESS) { 6965 EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval); 6966 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE); 6967 return; 6968 } 6969 6970 /* Verify flash layout table. */ 6971 len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) + 6972 sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t)); 6973 if (len > FLASH_LAYOUT_TABLE_SIZE) { 6974 chksum = 0xffff; 6975 } else { 6976 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) { 6977 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]); 6978 chksum += w16; 6979 } 6980 } 6981 w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]); 6982 if (chksum != 0 || w16 != 1) { 6983 EL(ha, "table chksum=%xh, version=%d\n", chksum, w16); 6984 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE); 6985 return; 6986 } 6987 eaddr = bp + len; 6988 6989 /* Process Function/Port Configuration Map. */ 6990 nv_rg = vpd_rg = 0; 6991 if (CFG_IST(ha, CFG_CTRL_8021)) { 6992 uint16_t i; 6993 uint8_t *mbp = eaddr; 6994 ql_fp_cfg_map_t *cmp = (ql_fp_cfg_map_t *)mbp; 6995 6996 len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0], 6997 cmp->hdr.len[1])); 6998 if (len > FLASH_LAYOUT_TABLE_SIZE) { 6999 chksum = 0xffff; 7000 } else { 7001 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) { 7002 w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt], 7003 mbp[cnt + 1]); 7004 chksum += w16; 7005 } 7006 } 7007 w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]); 7008 if (chksum != 0 || w16 != 1 || 7009 cmp->hdr.Signature[0] != 'F' || 7010 cmp->hdr.Signature[1] != 'P' || 7011 cmp->hdr.Signature[2] != 'C' || 7012 cmp->hdr.Signature[3] != 'M') { 7013 EL(ha, "cfg_map chksum=%xh, version=%d, " 7014 "sig=%c%c%c%c\n", chksum, w16, 7015 cmp->hdr.Signature[0], cmp->hdr.Signature[1], 7016 cmp->hdr.Signature[2], cmp->hdr.Signature[3]); 7017 } else { 7018 cnt = (uint16_t) 7019 (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0], 7020 cmp->hdr.NumberEntries[1])); 7021 /* Locate entry for function. */ 7022 for (i = 0; i < cnt; i++) { 7023 if (cmp->cfg[i].FunctionType == FT_FC && 7024 cmp->cfg[i].FunctionNumber[0] == 7025 ha->function_number && 7026 cmp->cfg[i].FunctionNumber[1] == 0) { 7027 nv_rg = cmp->cfg[i].ConfigRegion; 7028 vpd_rg = cmp->cfg[i].VpdRegion; 7029 break; 7030 } 7031 } 7032 7033 if (nv_rg == 0 || vpd_rg == 0) { 7034 EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg, 7035 vpd_rg); 7036 nv_rg = vpd_rg = 0; 7037 } 7038 } 7039 } 7040 7041 /* Process flash layout table regions */ 7042 for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t)); 7043 (uint8_t *)frgn < eaddr; frgn++) { 7044 faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1], 7045 frgn->beg_addr[2], frgn->beg_addr[3]); 7046 faddr >>= 2; 7047 fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1], 7048 frgn->end_addr[2], frgn->end_addr[3]); 7049 fe_addr >>= 2; 7050 7051 switch (frgn->region) { 7052 case FLASH_8021_BOOTLOADER_REGION: 7053 ha->bootloader_addr = faddr; 7054 ha->bootloader_size = (fe_addr - faddr) + 1; 7055 QL_PRINT_9(CE_CONT, "(%d): bootloader_addr=%xh, " 7056 "size=%xh\n", ha->instance, faddr, 7057 ha->bootloader_size); 7058 break; 7059 case FLASH_FW_REGION: 7060 case FLASH_8021_FW_REGION: 7061 ha->flash_fw_addr = faddr; 7062 ha->flash_fw_size = (fe_addr - faddr) + 1; 7063 QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh, " 7064 "size=%xh\n", ha->instance, faddr, 7065 ha->flash_fw_size); 7066 break; 7067 case FLASH_GOLDEN_FW_REGION: 7068 case FLASH_8021_GOLDEN_FW_REGION: 7069 ha->flash_golden_fw_addr = faddr; 7070 QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n", 7071 ha->instance, faddr); 7072 break; 7073 case FLASH_8021_VPD_REGION: 7074 if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) { 7075 ha->flash_vpd_addr = faddr; 7076 QL_PRINT_9(CE_CONT, "(%d): 8021_flash_vpd_" 7077 "addr=%xh\n", ha->instance, faddr); 7078 } 7079 break; 7080 case FLASH_VPD_0_REGION: 7081 if (vpd_rg) { 7082 if (vpd_rg == FLASH_VPD_0_REGION) { 7083 ha->flash_vpd_addr = faddr; 7084 QL_PRINT_9(CE_CONT, "(%d): vpd_rg " 7085 "flash_vpd_addr=%xh\n", 7086 ha->instance, faddr); 7087 } 7088 } else if (!(ha->flags & FUNCTION_1) && 7089 !(CFG_IST(ha, CFG_CTRL_8021))) { 7090 ha->flash_vpd_addr = faddr; 7091 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh" 7092 "\n", ha->instance, faddr); 7093 } 7094 break; 7095 case FLASH_NVRAM_0_REGION: 7096 if (nv_rg) { 7097 if (nv_rg == FLASH_NVRAM_0_REGION) { 7098 ADAPTER_STATE_LOCK(ha); 7099 ha->flags &= ~FUNCTION_1; 7100 ADAPTER_STATE_UNLOCK(ha); 7101 ha->flash_nvram_addr = faddr; 7102 QL_PRINT_9(CE_CONT, "(%d): nv_rg " 7103 "flash_nvram_addr=%xh\n", 7104 ha->instance, faddr); 7105 } 7106 } else if (!(ha->flags & FUNCTION_1)) { 7107 ha->flash_nvram_addr = faddr; 7108 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr=" 7109 "%xh\n", ha->instance, faddr); 7110 } 7111 break; 7112 case FLASH_VPD_1_REGION: 7113 if (vpd_rg) { 7114 if (vpd_rg == FLASH_VPD_1_REGION) { 7115 ha->flash_vpd_addr = faddr; 7116 QL_PRINT_9(CE_CONT, "(%d): vpd_rg " 7117 "flash_vpd_addr=%xh\n", 7118 ha->instance, faddr); 7119 } 7120 } else if (ha->flags & FUNCTION_1 && 7121 !(CFG_IST(ha, CFG_CTRL_8021))) { 7122 ha->flash_vpd_addr = faddr; 7123 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh" 7124 "\n", ha->instance, faddr); 7125 } 7126 break; 7127 case FLASH_NVRAM_1_REGION: 7128 if (nv_rg) { 7129 if (nv_rg == FLASH_NVRAM_1_REGION) { 7130 ADAPTER_STATE_LOCK(ha); 7131 ha->flags |= FUNCTION_1; 7132 ADAPTER_STATE_UNLOCK(ha); 7133 ha->flash_nvram_addr = faddr; 7134 QL_PRINT_9(CE_CONT, "(%d): nv_rg " 7135 "flash_nvram_addr=%xh\n", 7136 ha->instance, faddr); 7137 } 7138 } else if (ha->flags & FUNCTION_1) { 7139 ha->flash_nvram_addr = faddr; 7140 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr=" 7141 "%xh\n", ha->instance, faddr); 7142 } 7143 break; 7144 case FLASH_DESC_TABLE_REGION: 7145 if (!(CFG_IST(ha, CFG_CTRL_8021))) { 7146 ha->flash_desc_addr = faddr; 7147 QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr=" 7148 "%xh\n", ha->instance, faddr); 7149 } 7150 break; 7151 case FLASH_ERROR_LOG_0_REGION: 7152 if (!(ha->flags & FUNCTION_1)) { 7153 ha->flash_errlog_start = faddr; 7154 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr=" 7155 "%xh\n", ha->instance, faddr); 7156 } 7157 break; 7158 case FLASH_ERROR_LOG_1_REGION: 7159 if (ha->flags & FUNCTION_1) { 7160 ha->flash_errlog_start = faddr; 7161 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr=" 7162 "%xh\n", ha->instance, faddr); 7163 } 7164 break; 7165 default: 7166 break; 7167 } 7168 } 7169 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE); 7170 7171 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7172 } 7173 7174 /* 7175 * ql_flash_nvram_defaults 7176 * Flash default addresses. 7177 * 7178 * Input: 7179 * ha: adapter state pointer. 7180 * 7181 * Returns: 7182 * ql local function return status code. 7183 * 7184 * Context: 7185 * Kernel context. 7186 */ 7187 static void 7188 ql_flash_nvram_defaults(ql_adapter_state_t *ha) 7189 { 7190 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7191 7192 if (ha->flags & FUNCTION_1) { 7193 if (CFG_IST(ha, CFG_CTRL_2300)) { 7194 ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR; 7195 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR; 7196 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 7197 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7198 ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR; 7199 ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR; 7200 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1; 7201 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE; 7202 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR; 7203 } else if (CFG_IST(ha, CFG_CTRL_25XX)) { 7204 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7205 ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR; 7206 ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR; 7207 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1; 7208 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE; 7209 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR; 7210 } else if (CFG_IST(ha, CFG_CTRL_81XX)) { 7211 ha->flash_data_addr = FLASH_8100_DATA_ADDR; 7212 ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR; 7213 ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR; 7214 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1; 7215 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE; 7216 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR; 7217 } else if (CFG_IST(ha, CFG_CTRL_8021)) { 7218 ha->flash_data_addr = 0; 7219 ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR; 7220 ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR; 7221 ha->flash_errlog_start = 0; 7222 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE; 7223 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR; 7224 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE; 7225 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR; 7226 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE; 7227 } 7228 } else { 7229 if (CFG_IST(ha, CFG_CTRL_2200)) { 7230 ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR; 7231 ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR; 7232 } else if (CFG_IST(ha, CFG_CTRL_2300) || 7233 (CFG_IST(ha, CFG_CTRL_6322))) { 7234 ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR; 7235 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR; 7236 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 7237 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7238 ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR; 7239 ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR; 7240 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0; 7241 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE; 7242 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR; 7243 } else if (CFG_IST(ha, CFG_CTRL_25XX)) { 7244 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7245 ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR; 7246 ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR; 7247 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0; 7248 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE; 7249 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR; 7250 } else if (CFG_IST(ha, CFG_CTRL_81XX)) { 7251 ha->flash_data_addr = FLASH_8100_DATA_ADDR; 7252 ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR; 7253 ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR; 7254 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0; 7255 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE; 7256 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR; 7257 } else if (CFG_IST(ha, CFG_CTRL_8021)) { 7258 ha->flash_data_addr = 0; 7259 ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR; 7260 ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR; 7261 ha->flash_errlog_start = 0; 7262 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE; 7263 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR; 7264 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE; 7265 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR; 7266 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE; 7267 } else { 7268 EL(ha, "unassigned flash fn0 addr: %x\n", 7269 ha->device_id); 7270 } 7271 } 7272 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7273 } 7274 7275 /* 7276 * ql_get_sfp 7277 * Returns sfp data to sdmapi caller 7278 * 7279 * Input: 7280 * ha: adapter state pointer. 7281 * cmd: Local EXT_IOCTL cmd struct pointer. 7282 * mode: flags. 7283 * 7284 * Returns: 7285 * None, request status indicated in cmd->Status. 7286 * 7287 * Context: 7288 * Kernel context. 7289 */ 7290 static void 7291 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7292 { 7293 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7294 7295 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 7296 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7297 EL(ha, "failed, invalid request for HBA\n"); 7298 return; 7299 } 7300 7301 if (cmd->ResponseLen < QL_24XX_SFP_SIZE) { 7302 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7303 cmd->DetailStatus = QL_24XX_SFP_SIZE; 7304 EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n", 7305 cmd->ResponseLen); 7306 return; 7307 } 7308 7309 /* Dump SFP data in user buffer */ 7310 if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 7311 mode)) != 0) { 7312 cmd->Status = EXT_STATUS_COPY_ERR; 7313 EL(ha, "failed, copy error\n"); 7314 } else { 7315 cmd->Status = EXT_STATUS_OK; 7316 } 7317 7318 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7319 } 7320 7321 /* 7322 * ql_dump_sfp 7323 * Dumps SFP. 7324 * 7325 * Input: 7326 * ha: adapter state pointer. 7327 * bp: buffer address. 7328 * mode: flags 7329 * 7330 * Returns: 7331 * 7332 * Context: 7333 * Kernel context. 7334 */ 7335 static int 7336 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode) 7337 { 7338 dma_mem_t mem; 7339 uint32_t cnt; 7340 int rval2, rval = 0; 7341 uint32_t dxfer; 7342 7343 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7344 7345 /* Get memory for SFP. */ 7346 7347 if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA, 7348 QL_DMA_DATA_ALIGN)) != QL_SUCCESS) { 7349 EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2); 7350 return (ENOMEM); 7351 } 7352 7353 for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) { 7354 rval2 = ql_read_sfp(ha, &mem, 7355 (uint16_t)(cnt < 256 ? 0xA0 : 0xA2), 7356 (uint16_t)(cnt & 0xff)); 7357 if (rval2 != QL_SUCCESS) { 7358 EL(ha, "failed, read_sfp=%xh\n", rval2); 7359 rval = EFAULT; 7360 break; 7361 } 7362 7363 /* copy the data back */ 7364 if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size, 7365 mode)) != mem.size) { 7366 /* ddi copy error */ 7367 EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer); 7368 rval = EFAULT; 7369 break; 7370 } 7371 7372 /* adjust the buffer pointer */ 7373 bp = (caddr_t)bp + mem.size; 7374 } 7375 7376 ql_free_phys(ha, &mem); 7377 7378 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7379 7380 return (rval); 7381 } 7382 7383 /* 7384 * ql_port_param 7385 * Retrieves or sets the firmware port speed settings 7386 * 7387 * Input: 7388 * ha: adapter state pointer. 7389 * cmd: Local EXT_IOCTL cmd struct pointer. 7390 * mode: flags. 7391 * 7392 * Returns: 7393 * None, request status indicated in cmd->Status. 7394 * 7395 * Context: 7396 * Kernel context. 7397 * 7398 */ 7399 static void 7400 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7401 { 7402 uint8_t *name; 7403 ql_tgt_t *tq; 7404 EXT_PORT_PARAM port_param = {0}; 7405 uint32_t rval = QL_SUCCESS; 7406 uint32_t idma_rate; 7407 7408 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7409 7410 if (CFG_IST(ha, CFG_CTRL_242581) == 0) { 7411 EL(ha, "invalid request for this HBA\n"); 7412 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7413 cmd->ResponseLen = 0; 7414 return; 7415 } 7416 7417 if (LOOP_NOT_READY(ha)) { 7418 EL(ha, "failed, loop not ready\n"); 7419 cmd->Status = EXT_STATUS_DEVICE_OFFLINE; 7420 cmd->ResponseLen = 0; 7421 return; 7422 } 7423 7424 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 7425 (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) { 7426 EL(ha, "failed, ddi_copyin\n"); 7427 cmd->Status = EXT_STATUS_COPY_ERR; 7428 cmd->ResponseLen = 0; 7429 return; 7430 } 7431 7432 if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) { 7433 EL(ha, "Unsupported dest lookup type: %xh\n", 7434 port_param.FCScsiAddr.DestType); 7435 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 7436 cmd->ResponseLen = 0; 7437 return; 7438 } 7439 7440 name = port_param.FCScsiAddr.DestAddr.WWPN; 7441 7442 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 7443 ha->instance, name[0], name[1], name[2], name[3], name[4], 7444 name[5], name[6], name[7]); 7445 7446 tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT); 7447 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 7448 EL(ha, "failed, fc_port not found\n"); 7449 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 7450 cmd->ResponseLen = 0; 7451 return; 7452 } 7453 7454 cmd->Status = EXT_STATUS_OK; 7455 cmd->DetailStatus = EXT_STATUS_OK; 7456 7457 switch (port_param.Mode) { 7458 case EXT_IIDMA_MODE_GET: 7459 /* 7460 * Report the firmware's port rate for the wwpn 7461 */ 7462 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate, 7463 port_param.Mode); 7464 7465 if (rval != QL_SUCCESS) { 7466 EL(ha, "iidma get failed: %xh\n", rval); 7467 cmd->Status = EXT_STATUS_MAILBOX; 7468 cmd->DetailStatus = rval; 7469 cmd->ResponseLen = 0; 7470 } else { 7471 switch (idma_rate) { 7472 case IIDMA_RATE_1GB: 7473 port_param.Speed = 7474 EXT_DEF_PORTSPEED_1GBIT; 7475 break; 7476 case IIDMA_RATE_2GB: 7477 port_param.Speed = 7478 EXT_DEF_PORTSPEED_2GBIT; 7479 break; 7480 case IIDMA_RATE_4GB: 7481 port_param.Speed = 7482 EXT_DEF_PORTSPEED_4GBIT; 7483 break; 7484 case IIDMA_RATE_8GB: 7485 port_param.Speed = 7486 EXT_DEF_PORTSPEED_8GBIT; 7487 break; 7488 case IIDMA_RATE_10GB: 7489 port_param.Speed = 7490 EXT_DEF_PORTSPEED_10GBIT; 7491 break; 7492 default: 7493 port_param.Speed = 7494 EXT_DEF_PORTSPEED_UNKNOWN; 7495 EL(ha, "failed, Port speed rate=%xh\n", 7496 idma_rate); 7497 break; 7498 } 7499 7500 /* Copy back the data */ 7501 rval = ddi_copyout((void *)&port_param, 7502 (void *)(uintptr_t)cmd->ResponseAdr, 7503 sizeof (EXT_PORT_PARAM), mode); 7504 7505 if (rval != 0) { 7506 cmd->Status = EXT_STATUS_COPY_ERR; 7507 cmd->ResponseLen = 0; 7508 EL(ha, "failed, ddi_copyout\n"); 7509 } else { 7510 cmd->ResponseLen = (uint32_t) 7511 sizeof (EXT_PORT_PARAM); 7512 } 7513 } 7514 break; 7515 7516 case EXT_IIDMA_MODE_SET: 7517 /* 7518 * Set the firmware's port rate for the wwpn 7519 */ 7520 switch (port_param.Speed) { 7521 case EXT_DEF_PORTSPEED_1GBIT: 7522 idma_rate = IIDMA_RATE_1GB; 7523 break; 7524 case EXT_DEF_PORTSPEED_2GBIT: 7525 idma_rate = IIDMA_RATE_2GB; 7526 break; 7527 case EXT_DEF_PORTSPEED_4GBIT: 7528 idma_rate = IIDMA_RATE_4GB; 7529 break; 7530 case EXT_DEF_PORTSPEED_8GBIT: 7531 idma_rate = IIDMA_RATE_8GB; 7532 break; 7533 case EXT_DEF_PORTSPEED_10GBIT: 7534 port_param.Speed = IIDMA_RATE_10GB; 7535 break; 7536 default: 7537 EL(ha, "invalid set iidma rate: %x\n", 7538 port_param.Speed); 7539 cmd->Status = EXT_STATUS_INVALID_PARAM; 7540 cmd->ResponseLen = 0; 7541 rval = QL_PARAMETER_ERROR; 7542 break; 7543 } 7544 7545 if (rval == QL_SUCCESS) { 7546 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate, 7547 port_param.Mode); 7548 if (rval != QL_SUCCESS) { 7549 EL(ha, "iidma set failed: %xh\n", rval); 7550 cmd->Status = EXT_STATUS_MAILBOX; 7551 cmd->DetailStatus = rval; 7552 cmd->ResponseLen = 0; 7553 } 7554 } 7555 break; 7556 default: 7557 EL(ha, "invalid mode specified: %x\n", port_param.Mode); 7558 cmd->Status = EXT_STATUS_INVALID_PARAM; 7559 cmd->ResponseLen = 0; 7560 cmd->DetailStatus = 0; 7561 break; 7562 } 7563 7564 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7565 } 7566 7567 /* 7568 * ql_get_fwexttrace 7569 * Dumps f/w extended trace buffer 7570 * 7571 * Input: 7572 * ha: adapter state pointer. 7573 * bp: buffer address. 7574 * mode: flags 7575 * 7576 * Returns: 7577 * 7578 * Context: 7579 * Kernel context. 7580 */ 7581 /* ARGSUSED */ 7582 static void 7583 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7584 { 7585 int rval; 7586 caddr_t payload; 7587 7588 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7589 7590 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 7591 EL(ha, "invalid request for this HBA\n"); 7592 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7593 cmd->ResponseLen = 0; 7594 return; 7595 } 7596 7597 if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) || 7598 (ha->fwexttracebuf.bp == NULL)) { 7599 EL(ha, "f/w extended trace is not enabled\n"); 7600 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7601 cmd->ResponseLen = 0; 7602 return; 7603 } 7604 7605 if (cmd->ResponseLen < FWEXTSIZE) { 7606 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7607 cmd->DetailStatus = FWEXTSIZE; 7608 EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n", 7609 cmd->ResponseLen, FWEXTSIZE); 7610 cmd->ResponseLen = 0; 7611 return; 7612 } 7613 7614 /* Time Stamp */ 7615 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP); 7616 if (rval != QL_SUCCESS) { 7617 EL(ha, "f/w extended trace insert" 7618 "time stamp failed: %xh\n", rval); 7619 cmd->Status = EXT_STATUS_ERR; 7620 cmd->ResponseLen = 0; 7621 return; 7622 } 7623 7624 /* Disable Tracing */ 7625 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE); 7626 if (rval != QL_SUCCESS) { 7627 EL(ha, "f/w extended trace disable failed: %xh\n", rval); 7628 cmd->Status = EXT_STATUS_ERR; 7629 cmd->ResponseLen = 0; 7630 return; 7631 } 7632 7633 /* Allocate payload buffer */ 7634 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP); 7635 7636 /* Sync DMA buffer. */ 7637 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0, 7638 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL); 7639 7640 /* Copy trace buffer data. */ 7641 ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload, 7642 (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE, 7643 DDI_DEV_AUTOINCR); 7644 7645 /* Send payload to application. */ 7646 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr, 7647 cmd->ResponseLen, mode) != cmd->ResponseLen) { 7648 EL(ha, "failed, send_buffer_data\n"); 7649 cmd->Status = EXT_STATUS_COPY_ERR; 7650 cmd->ResponseLen = 0; 7651 } else { 7652 cmd->Status = EXT_STATUS_OK; 7653 } 7654 7655 kmem_free(payload, FWEXTSIZE); 7656 7657 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7658 } 7659 7660 /* 7661 * ql_get_fwfcetrace 7662 * Dumps f/w fibre channel event trace buffer 7663 * 7664 * Input: 7665 * ha: adapter state pointer. 7666 * bp: buffer address. 7667 * mode: flags 7668 * 7669 * Returns: 7670 * 7671 * Context: 7672 * Kernel context. 7673 */ 7674 /* ARGSUSED */ 7675 static void 7676 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7677 { 7678 int rval; 7679 caddr_t payload; 7680 7681 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7682 7683 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 7684 EL(ha, "invalid request for this HBA\n"); 7685 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7686 cmd->ResponseLen = 0; 7687 return; 7688 } 7689 7690 if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) || 7691 (ha->fwfcetracebuf.bp == NULL)) { 7692 EL(ha, "f/w FCE trace is not enabled\n"); 7693 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7694 cmd->ResponseLen = 0; 7695 return; 7696 } 7697 7698 if (cmd->ResponseLen < FWFCESIZE) { 7699 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7700 cmd->DetailStatus = FWFCESIZE; 7701 EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n", 7702 cmd->ResponseLen, FWFCESIZE); 7703 cmd->ResponseLen = 0; 7704 return; 7705 } 7706 7707 /* Disable Tracing */ 7708 rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE); 7709 if (rval != QL_SUCCESS) { 7710 EL(ha, "f/w FCE trace disable failed: %xh\n", rval); 7711 cmd->Status = EXT_STATUS_ERR; 7712 cmd->ResponseLen = 0; 7713 return; 7714 } 7715 7716 /* Allocate payload buffer */ 7717 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP); 7718 7719 /* Sync DMA buffer. */ 7720 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0, 7721 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL); 7722 7723 /* Copy trace buffer data. */ 7724 ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload, 7725 (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE, 7726 DDI_DEV_AUTOINCR); 7727 7728 /* Send payload to application. */ 7729 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr, 7730 cmd->ResponseLen, mode) != cmd->ResponseLen) { 7731 EL(ha, "failed, send_buffer_data\n"); 7732 cmd->Status = EXT_STATUS_COPY_ERR; 7733 cmd->ResponseLen = 0; 7734 } else { 7735 cmd->Status = EXT_STATUS_OK; 7736 } 7737 7738 kmem_free(payload, FWFCESIZE); 7739 7740 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7741 } 7742 7743 /* 7744 * ql_get_pci_data 7745 * Retrieves pci config space data 7746 * 7747 * Input: 7748 * ha: adapter state pointer. 7749 * cmd: Local EXT_IOCTL cmd struct pointer. 7750 * mode: flags. 7751 * 7752 * Returns: 7753 * None, request status indicated in cmd->Status. 7754 * 7755 * Context: 7756 * Kernel context. 7757 * 7758 */ 7759 static void 7760 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7761 { 7762 uint8_t cap_ptr; 7763 uint8_t cap_id; 7764 uint32_t buf_size = 256; 7765 7766 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7767 7768 /* 7769 * First check the "Capabilities List" bit of the status register. 7770 */ 7771 if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) { 7772 /* 7773 * Now get the capability pointer 7774 */ 7775 cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR); 7776 while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) { 7777 /* 7778 * Check for the pcie capability. 7779 */ 7780 cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr); 7781 if (cap_id == PCI_CAP_ID_PCI_E) { 7782 buf_size = 4096; 7783 break; 7784 } 7785 cap_ptr = (uint8_t)ql_pci_config_get8(ha, 7786 (cap_ptr + PCI_CAP_NEXT_PTR)); 7787 } 7788 } 7789 7790 if (cmd->ResponseLen < buf_size) { 7791 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7792 cmd->DetailStatus = buf_size; 7793 EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n", 7794 cmd->ResponseLen); 7795 return; 7796 } 7797 7798 /* Dump PCI config data. */ 7799 if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 7800 buf_size, mode)) != 0) { 7801 cmd->Status = EXT_STATUS_COPY_ERR; 7802 cmd->DetailStatus = 0; 7803 EL(ha, "failed, copy err pci_dump\n"); 7804 } else { 7805 cmd->Status = EXT_STATUS_OK; 7806 cmd->DetailStatus = buf_size; 7807 } 7808 7809 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7810 } 7811 7812 /* 7813 * ql_pci_dump 7814 * Dumps PCI config data to application buffer. 7815 * 7816 * Input: 7817 * ha = adapter state pointer. 7818 * bp = user buffer address. 7819 * 7820 * Returns: 7821 * 7822 * Context: 7823 * Kernel context. 7824 */ 7825 int 7826 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode) 7827 { 7828 uint32_t pci_os; 7829 uint32_t *ptr32, *org_ptr32; 7830 7831 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7832 7833 ptr32 = kmem_zalloc(pci_size, KM_SLEEP); 7834 7835 /* store the initial value of ptr32 */ 7836 org_ptr32 = ptr32; 7837 for (pci_os = 0; pci_os < pci_size; pci_os += 4) { 7838 *ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os); 7839 LITTLE_ENDIAN_32(ptr32); 7840 ptr32++; 7841 } 7842 7843 if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) != 7844 0) { 7845 EL(ha, "failed ddi_copyout\n"); 7846 kmem_free(org_ptr32, pci_size); 7847 return (EFAULT); 7848 } 7849 7850 QL_DUMP_9(org_ptr32, 8, pci_size); 7851 7852 kmem_free(org_ptr32, pci_size); 7853 7854 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7855 7856 return (0); 7857 } 7858 7859 /* 7860 * ql_menlo_reset 7861 * Reset Menlo 7862 * 7863 * Input: 7864 * ha: adapter state pointer. 7865 * bp: buffer address. 7866 * mode: flags 7867 * 7868 * Returns: 7869 * 7870 * Context: 7871 * Kernel context. 7872 */ 7873 static void 7874 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7875 { 7876 EXT_MENLO_RESET rst; 7877 ql_mbx_data_t mr; 7878 int rval; 7879 7880 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7881 7882 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) { 7883 EL(ha, "failed, invalid request for HBA\n"); 7884 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7885 cmd->ResponseLen = 0; 7886 return; 7887 } 7888 7889 /* 7890 * TODO: only vp_index 0 can do this (?) 7891 */ 7892 7893 /* Verify the size of request structure. */ 7894 if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) { 7895 /* Return error */ 7896 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 7897 sizeof (EXT_MENLO_RESET)); 7898 cmd->Status = EXT_STATUS_INVALID_PARAM; 7899 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 7900 cmd->ResponseLen = 0; 7901 return; 7902 } 7903 7904 /* Get reset request. */ 7905 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 7906 (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) { 7907 EL(ha, "failed, ddi_copyin\n"); 7908 cmd->Status = EXT_STATUS_COPY_ERR; 7909 cmd->ResponseLen = 0; 7910 return; 7911 } 7912 7913 /* Wait for I/O to stop and daemon to stall. */ 7914 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) { 7915 EL(ha, "ql_stall_driver failed\n"); 7916 ql_restart_hba(ha); 7917 cmd->Status = EXT_STATUS_BUSY; 7918 cmd->ResponseLen = 0; 7919 return; 7920 } 7921 7922 rval = ql_reset_menlo(ha, &mr, rst.Flags); 7923 if (rval != QL_SUCCESS) { 7924 EL(ha, "failed, status=%xh\n", rval); 7925 cmd->Status = EXT_STATUS_MAILBOX; 7926 cmd->DetailStatus = rval; 7927 cmd->ResponseLen = 0; 7928 } else if (mr.mb[1] != 0) { 7929 EL(ha, "failed, substatus=%d\n", mr.mb[1]); 7930 cmd->Status = EXT_STATUS_ERR; 7931 cmd->DetailStatus = mr.mb[1]; 7932 cmd->ResponseLen = 0; 7933 } 7934 7935 ql_restart_hba(ha); 7936 7937 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7938 } 7939 7940 /* 7941 * ql_menlo_get_fw_version 7942 * Get Menlo firmware version. 7943 * 7944 * Input: 7945 * ha: adapter state pointer. 7946 * bp: buffer address. 7947 * mode: flags 7948 * 7949 * Returns: 7950 * 7951 * Context: 7952 * Kernel context. 7953 */ 7954 static void 7955 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7956 { 7957 int rval; 7958 ql_mbx_iocb_t *pkt; 7959 EXT_MENLO_GET_FW_VERSION ver = {0}; 7960 7961 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7962 7963 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) { 7964 EL(ha, "failed, invalid request for HBA\n"); 7965 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7966 cmd->ResponseLen = 0; 7967 return; 7968 } 7969 7970 if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) { 7971 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7972 cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION); 7973 EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen, 7974 sizeof (EXT_MENLO_GET_FW_VERSION)); 7975 cmd->ResponseLen = 0; 7976 return; 7977 } 7978 7979 /* Allocate packet. */ 7980 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP); 7981 7982 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE; 7983 pkt->mvfy.entry_count = 1; 7984 pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW); 7985 7986 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 7987 LITTLE_ENDIAN_16(&pkt->mvfy.options_status); 7988 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code); 7989 ver.FwVersion = LE_32(pkt->mvfy.fw_version); 7990 7991 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 || 7992 pkt->mvfy.options_status != CS_COMPLETE) { 7993 /* Command error */ 7994 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval, 7995 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status, 7996 pkt->mvfy.failure_code); 7997 cmd->Status = EXT_STATUS_ERR; 7998 cmd->DetailStatus = rval != QL_SUCCESS ? rval : 7999 QL_FUNCTION_FAILED; 8000 cmd->ResponseLen = 0; 8001 } else if (ddi_copyout((void *)&ver, 8002 (void *)(uintptr_t)cmd->ResponseAdr, 8003 sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) { 8004 EL(ha, "failed, ddi_copyout\n"); 8005 cmd->Status = EXT_STATUS_COPY_ERR; 8006 cmd->ResponseLen = 0; 8007 } else { 8008 cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION); 8009 } 8010 8011 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8012 8013 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8014 } 8015 8016 /* 8017 * ql_menlo_update_fw 8018 * Get Menlo update firmware. 8019 * 8020 * Input: 8021 * ha: adapter state pointer. 8022 * bp: buffer address. 8023 * mode: flags 8024 * 8025 * Returns: 8026 * 8027 * Context: 8028 * Kernel context. 8029 */ 8030 static void 8031 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8032 { 8033 ql_mbx_iocb_t *pkt; 8034 dma_mem_t *dma_mem; 8035 EXT_MENLO_UPDATE_FW fw; 8036 uint32_t *ptr32; 8037 int rval; 8038 8039 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8040 8041 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) { 8042 EL(ha, "failed, invalid request for HBA\n"); 8043 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8044 cmd->ResponseLen = 0; 8045 return; 8046 } 8047 8048 /* 8049 * TODO: only vp_index 0 can do this (?) 8050 */ 8051 8052 /* Verify the size of request structure. */ 8053 if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) { 8054 /* Return error */ 8055 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 8056 sizeof (EXT_MENLO_UPDATE_FW)); 8057 cmd->Status = EXT_STATUS_INVALID_PARAM; 8058 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 8059 cmd->ResponseLen = 0; 8060 return; 8061 } 8062 8063 /* Get update fw request. */ 8064 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw, 8065 sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) { 8066 EL(ha, "failed, ddi_copyin\n"); 8067 cmd->Status = EXT_STATUS_COPY_ERR; 8068 cmd->ResponseLen = 0; 8069 return; 8070 } 8071 8072 /* Wait for I/O to stop and daemon to stall. */ 8073 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) { 8074 EL(ha, "ql_stall_driver failed\n"); 8075 ql_restart_hba(ha); 8076 cmd->Status = EXT_STATUS_BUSY; 8077 cmd->ResponseLen = 0; 8078 return; 8079 } 8080 8081 /* Allocate packet. */ 8082 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP); 8083 if (dma_mem == NULL) { 8084 EL(ha, "failed, kmem_zalloc\n"); 8085 cmd->Status = EXT_STATUS_NO_MEMORY; 8086 cmd->ResponseLen = 0; 8087 return; 8088 } 8089 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP); 8090 8091 /* Get DMA memory for the IOCB */ 8092 if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA, 8093 QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 8094 cmn_err(CE_WARN, "%s(%d): request queue DMA memory " 8095 "alloc failed", QL_NAME, ha->instance); 8096 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8097 kmem_free(dma_mem, sizeof (dma_mem_t)); 8098 ql_restart_hba(ha); 8099 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 8100 cmd->ResponseLen = 0; 8101 return; 8102 } 8103 8104 /* Get firmware data. */ 8105 if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp, 8106 fw.TotalByteCount, mode) != fw.TotalByteCount) { 8107 EL(ha, "failed, get_buffer_data\n"); 8108 ql_free_dma_resource(ha, dma_mem); 8109 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8110 kmem_free(dma_mem, sizeof (dma_mem_t)); 8111 ql_restart_hba(ha); 8112 cmd->Status = EXT_STATUS_COPY_ERR; 8113 cmd->ResponseLen = 0; 8114 return; 8115 } 8116 8117 /* Sync DMA buffer. */ 8118 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size, 8119 DDI_DMA_SYNC_FORDEV); 8120 8121 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE; 8122 pkt->mvfy.entry_count = 1; 8123 pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags); 8124 ptr32 = dma_mem->bp; 8125 pkt->mvfy.fw_version = LE_32(ptr32[2]); 8126 pkt->mvfy.fw_size = LE_32(fw.TotalByteCount); 8127 pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount); 8128 pkt->mvfy.dseg_count = LE_16(1); 8129 pkt->mvfy.dseg_0_address[0] = (uint32_t) 8130 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 8131 pkt->mvfy.dseg_0_address[1] = (uint32_t) 8132 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 8133 pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount); 8134 8135 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 8136 LITTLE_ENDIAN_16(&pkt->mvfy.options_status); 8137 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code); 8138 8139 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 || 8140 pkt->mvfy.options_status != CS_COMPLETE) { 8141 /* Command error */ 8142 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval, 8143 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status, 8144 pkt->mvfy.failure_code); 8145 cmd->Status = EXT_STATUS_ERR; 8146 cmd->DetailStatus = rval != QL_SUCCESS ? rval : 8147 QL_FUNCTION_FAILED; 8148 cmd->ResponseLen = 0; 8149 } 8150 8151 ql_free_dma_resource(ha, dma_mem); 8152 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8153 kmem_free(dma_mem, sizeof (dma_mem_t)); 8154 ql_restart_hba(ha); 8155 8156 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8157 } 8158 8159 /* 8160 * ql_menlo_manage_info 8161 * Get Menlo manage info. 8162 * 8163 * Input: 8164 * ha: adapter state pointer. 8165 * bp: buffer address. 8166 * mode: flags 8167 * 8168 * Returns: 8169 * 8170 * Context: 8171 * Kernel context. 8172 */ 8173 static void 8174 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8175 { 8176 ql_mbx_iocb_t *pkt; 8177 dma_mem_t *dma_mem = NULL; 8178 EXT_MENLO_MANAGE_INFO info; 8179 int rval; 8180 8181 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8182 8183 8184 /* The call is only supported for Schultz right now */ 8185 if (CFG_IST(ha, CFG_CTRL_8081)) { 8186 ql_get_xgmac_statistics(ha, cmd, mode); 8187 QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n", 8188 ha->instance); 8189 return; 8190 } 8191 8192 if (!CFG_IST(ha, CFG_CTRL_8081) || !CFG_IST(ha, CFG_CTRL_MENLO)) { 8193 EL(ha, "failed, invalid request for HBA\n"); 8194 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8195 cmd->ResponseLen = 0; 8196 return; 8197 } 8198 8199 /* Verify the size of request structure. */ 8200 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) { 8201 /* Return error */ 8202 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 8203 sizeof (EXT_MENLO_MANAGE_INFO)); 8204 cmd->Status = EXT_STATUS_INVALID_PARAM; 8205 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 8206 cmd->ResponseLen = 0; 8207 return; 8208 } 8209 8210 /* Get manage info request. */ 8211 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, 8212 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) { 8213 EL(ha, "failed, ddi_copyin\n"); 8214 cmd->Status = EXT_STATUS_COPY_ERR; 8215 cmd->ResponseLen = 0; 8216 return; 8217 } 8218 8219 /* Allocate packet. */ 8220 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP); 8221 8222 pkt->mdata.entry_type = MENLO_DATA_TYPE; 8223 pkt->mdata.entry_count = 1; 8224 pkt->mdata.options_status = (uint16_t)LE_16(info.Operation); 8225 8226 /* Get DMA memory for the IOCB */ 8227 if (info.Operation == MENLO_OP_READ_MEM || 8228 info.Operation == MENLO_OP_WRITE_MEM) { 8229 pkt->mdata.total_byte_count = LE_32(info.TotalByteCount); 8230 pkt->mdata.parameter_1 = 8231 LE_32(info.Parameters.ap.MenloMemory.StartingAddr); 8232 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), 8233 KM_SLEEP); 8234 if (dma_mem == NULL) { 8235 EL(ha, "failed, kmem_zalloc\n"); 8236 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8237 cmd->Status = EXT_STATUS_NO_MEMORY; 8238 cmd->ResponseLen = 0; 8239 return; 8240 } 8241 if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount, 8242 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 8243 cmn_err(CE_WARN, "%s(%d): request queue DMA memory " 8244 "alloc failed", QL_NAME, ha->instance); 8245 kmem_free(dma_mem, sizeof (dma_mem_t)); 8246 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8247 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 8248 cmd->ResponseLen = 0; 8249 return; 8250 } 8251 if (info.Operation == MENLO_OP_WRITE_MEM) { 8252 /* Get data. */ 8253 if (ql_get_buffer_data( 8254 (caddr_t)(uintptr_t)info.pDataBytes, 8255 dma_mem->bp, info.TotalByteCount, mode) != 8256 info.TotalByteCount) { 8257 EL(ha, "failed, get_buffer_data\n"); 8258 ql_free_dma_resource(ha, dma_mem); 8259 kmem_free(dma_mem, sizeof (dma_mem_t)); 8260 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8261 cmd->Status = EXT_STATUS_COPY_ERR; 8262 cmd->ResponseLen = 0; 8263 return; 8264 } 8265 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 8266 dma_mem->size, DDI_DMA_SYNC_FORDEV); 8267 } 8268 pkt->mdata.dseg_count = LE_16(1); 8269 pkt->mdata.dseg_0_address[0] = (uint32_t) 8270 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 8271 pkt->mdata.dseg_0_address[1] = (uint32_t) 8272 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 8273 pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount); 8274 } else if (info.Operation & MENLO_OP_CHANGE_CONFIG) { 8275 pkt->mdata.parameter_1 = 8276 LE_32(info.Parameters.ap.MenloConfig.ConfigParamID); 8277 pkt->mdata.parameter_2 = 8278 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0); 8279 pkt->mdata.parameter_3 = 8280 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1); 8281 } else if (info.Operation & MENLO_OP_GET_INFO) { 8282 pkt->mdata.parameter_1 = 8283 LE_32(info.Parameters.ap.MenloInfo.InfoDataType); 8284 pkt->mdata.parameter_2 = 8285 LE_32(info.Parameters.ap.MenloInfo.InfoContext); 8286 } 8287 8288 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 8289 LITTLE_ENDIAN_16(&pkt->mdata.options_status); 8290 LITTLE_ENDIAN_16(&pkt->mdata.failure_code); 8291 8292 if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 || 8293 pkt->mdata.options_status != CS_COMPLETE) { 8294 /* Command error */ 8295 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval, 8296 pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status, 8297 pkt->mdata.failure_code); 8298 cmd->Status = EXT_STATUS_ERR; 8299 cmd->DetailStatus = rval != QL_SUCCESS ? rval : 8300 QL_FUNCTION_FAILED; 8301 cmd->ResponseLen = 0; 8302 } else if (info.Operation == MENLO_OP_READ_MEM) { 8303 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size, 8304 DDI_DMA_SYNC_FORKERNEL); 8305 if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes, 8306 dma_mem->bp, info.TotalByteCount, mode) != 8307 info.TotalByteCount) { 8308 cmd->Status = EXT_STATUS_COPY_ERR; 8309 cmd->ResponseLen = 0; 8310 } 8311 } 8312 8313 ql_free_dma_resource(ha, dma_mem); 8314 kmem_free(dma_mem, sizeof (dma_mem_t)); 8315 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8316 8317 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8318 } 8319 8320 /* 8321 * ql_suspend_hba 8322 * Suspends all adapter ports. 8323 * 8324 * Input: 8325 * ha: adapter state pointer. 8326 * options: BIT_0 --> leave driver stalled on exit if 8327 * failed. 8328 * 8329 * Returns: 8330 * ql local function return status code. 8331 * 8332 * Context: 8333 * Kernel context. 8334 */ 8335 static int 8336 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt) 8337 { 8338 ql_adapter_state_t *ha2; 8339 ql_link_t *link; 8340 int rval = QL_SUCCESS; 8341 8342 /* Quiesce I/O on all adapter ports */ 8343 for (link = ql_hba.first; link != NULL; link = link->next) { 8344 ha2 = link->base_address; 8345 8346 if (ha2->fru_hba_index != ha->fru_hba_index) { 8347 continue; 8348 } 8349 8350 if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) { 8351 EL(ha, "ql_stall_driver status=%xh\n", rval); 8352 break; 8353 } 8354 } 8355 8356 return (rval); 8357 } 8358 8359 /* 8360 * ql_restart_hba 8361 * Restarts adapter. 8362 * 8363 * Input: 8364 * ha: adapter state pointer. 8365 * 8366 * Context: 8367 * Kernel context. 8368 */ 8369 static void 8370 ql_restart_hba(ql_adapter_state_t *ha) 8371 { 8372 ql_adapter_state_t *ha2; 8373 ql_link_t *link; 8374 8375 /* Resume I/O on all adapter ports */ 8376 for (link = ql_hba.first; link != NULL; link = link->next) { 8377 ha2 = link->base_address; 8378 8379 if (ha2->fru_hba_index != ha->fru_hba_index) { 8380 continue; 8381 } 8382 8383 ql_restart_driver(ha2); 8384 } 8385 } 8386 8387 /* 8388 * ql_get_vp_cnt_id 8389 * Retrieves pci config space data 8390 * 8391 * Input: 8392 * ha: adapter state pointer. 8393 * cmd: Local EXT_IOCTL cmd struct pointer. 8394 * mode: flags. 8395 * 8396 * Returns: 8397 * None, request status indicated in cmd->Status. 8398 * 8399 * Context: 8400 * Kernel context. 8401 * 8402 */ 8403 static void 8404 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8405 { 8406 ql_adapter_state_t *vha; 8407 PEXT_VPORT_ID_CNT ptmp_vp; 8408 int id = 0; 8409 int rval; 8410 char name[MAXPATHLEN]; 8411 8412 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8413 8414 /* 8415 * To be backward compatible with older API 8416 * check for the size of old EXT_VPORT_ID_CNT 8417 */ 8418 if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) && 8419 (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) { 8420 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8421 cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT); 8422 EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n", 8423 cmd->ResponseLen); 8424 cmd->ResponseLen = 0; 8425 return; 8426 } 8427 8428 ptmp_vp = (EXT_VPORT_ID_CNT *) 8429 kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP); 8430 if (ptmp_vp == NULL) { 8431 EL(ha, "failed, kmem_zalloc\n"); 8432 cmd->ResponseLen = 0; 8433 return; 8434 } 8435 vha = ha->vp_next; 8436 while (vha != NULL) { 8437 ptmp_vp->VpCnt++; 8438 ptmp_vp->VpId[id] = vha->vp_index; 8439 (void) ddi_pathname(vha->dip, name); 8440 (void) strcpy((char *)ptmp_vp->vp_path[id], name); 8441 ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance; 8442 id++; 8443 vha = vha->vp_next; 8444 } 8445 rval = ddi_copyout((void *)ptmp_vp, 8446 (void *)(uintptr_t)(cmd->ResponseAdr), 8447 cmd->ResponseLen, mode); 8448 if (rval != 0) { 8449 cmd->Status = EXT_STATUS_COPY_ERR; 8450 cmd->ResponseLen = 0; 8451 EL(ha, "failed, ddi_copyout\n"); 8452 } else { 8453 cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT); 8454 QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n", 8455 ha->instance, ptmp_vp->VpCnt); 8456 } 8457 8458 } 8459 8460 /* 8461 * ql_vp_ioctl 8462 * Performs all EXT_CC_VPORT_CMD functions. 8463 * 8464 * Input: 8465 * ha: adapter state pointer. 8466 * cmd: Local EXT_IOCTL cmd struct pointer. 8467 * mode: flags. 8468 * 8469 * Returns: 8470 * None, request status indicated in cmd->Status. 8471 * 8472 * Context: 8473 * Kernel context. 8474 */ 8475 static void 8476 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8477 { 8478 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, 8479 cmd->SubCode); 8480 8481 /* case off on command subcode */ 8482 switch (cmd->SubCode) { 8483 case EXT_VF_SC_VPORT_GETINFO: 8484 ql_qry_vport(ha, cmd, mode); 8485 break; 8486 default: 8487 /* function not supported. */ 8488 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 8489 EL(ha, "failed, Unsupported Subcode=%xh\n", 8490 cmd->SubCode); 8491 break; 8492 } 8493 8494 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8495 } 8496 8497 /* 8498 * ql_qry_vport 8499 * Performs EXT_VF_SC_VPORT_GETINFO subfunction. 8500 * 8501 * Input: 8502 * ha: adapter state pointer. 8503 * cmd: EXT_IOCTL cmd struct pointer. 8504 * mode: flags. 8505 * 8506 * Returns: 8507 * None, request status indicated in cmd->Status. 8508 * 8509 * Context: 8510 * Kernel context. 8511 */ 8512 static void 8513 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode) 8514 { 8515 ql_adapter_state_t *tmp_vha; 8516 EXT_VPORT_INFO tmp_vport = {0}; 8517 int max_vport; 8518 8519 QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance); 8520 8521 if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) { 8522 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8523 cmd->DetailStatus = sizeof (EXT_VPORT_INFO); 8524 EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n", 8525 cmd->ResponseLen); 8526 cmd->ResponseLen = 0; 8527 return; 8528 } 8529 8530 /* Fill in the vport information. */ 8531 bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn, 8532 EXT_DEF_WWN_NAME_SIZE); 8533 bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn, 8534 EXT_DEF_WWN_NAME_SIZE); 8535 tmp_vport.state = vha->state; 8536 tmp_vport.id = vha->vp_index; 8537 8538 tmp_vha = vha->pha->vp_next; 8539 while (tmp_vha != NULL) { 8540 tmp_vport.used++; 8541 tmp_vha = tmp_vha->vp_next; 8542 } 8543 8544 max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS : 8545 MAX_25_VIRTUAL_PORTS); 8546 if (max_vport > tmp_vport.used) { 8547 tmp_vport.free = max_vport - tmp_vport.used; 8548 } 8549 8550 if (ddi_copyout((void *)&tmp_vport, 8551 (void *)(uintptr_t)(cmd->ResponseAdr), 8552 sizeof (EXT_VPORT_INFO), mode) != 0) { 8553 cmd->Status = EXT_STATUS_COPY_ERR; 8554 cmd->ResponseLen = 0; 8555 EL(vha, "failed, ddi_copyout\n"); 8556 } else { 8557 cmd->ResponseLen = sizeof (EXT_VPORT_INFO); 8558 QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance); 8559 } 8560 } 8561 8562 /* 8563 * ql_access_flash 8564 * Performs all EXT_CC_ACCESS_FLASH_OS functions. 8565 * 8566 * Input: 8567 * pi: port info pointer. 8568 * cmd: Local EXT_IOCTL cmd struct pointer. 8569 * mode: flags. 8570 * 8571 * Returns: 8572 * None, request status indicated in cmd->Status. 8573 * 8574 * Context: 8575 * Kernel context. 8576 */ 8577 static void 8578 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8579 { 8580 int rval; 8581 8582 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8583 8584 switch (cmd->SubCode) { 8585 case EXT_SC_FLASH_READ: 8586 if ((rval = ql_flash_fcode_dump(ha, 8587 (void *)(uintptr_t)(cmd->ResponseAdr), 8588 (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) { 8589 cmd->Status = EXT_STATUS_COPY_ERR; 8590 cmd->ResponseLen = 0; 8591 EL(ha, "flash_fcode_dump status=%xh\n", rval); 8592 } 8593 break; 8594 case EXT_SC_FLASH_WRITE: 8595 if ((rval = ql_r_m_w_flash(ha, 8596 (void *)(uintptr_t)(cmd->RequestAdr), 8597 (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) != 8598 QL_SUCCESS) { 8599 cmd->Status = EXT_STATUS_COPY_ERR; 8600 cmd->ResponseLen = 0; 8601 EL(ha, "r_m_w_flash status=%xh\n", rval); 8602 } else { 8603 /* Reset caches on all adapter instances. */ 8604 ql_update_flash_caches(ha); 8605 } 8606 break; 8607 default: 8608 EL(ha, "unknown subcode=%xh\n", cmd->SubCode); 8609 cmd->Status = EXT_STATUS_ERR; 8610 cmd->ResponseLen = 0; 8611 break; 8612 } 8613 8614 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8615 } 8616 8617 /* 8618 * ql_reset_cmd 8619 * Performs all EXT_CC_RESET_FW_OS functions. 8620 * 8621 * Input: 8622 * ha: adapter state pointer. 8623 * cmd: Local EXT_IOCTL cmd struct pointer. 8624 * 8625 * Returns: 8626 * None, request status indicated in cmd->Status. 8627 * 8628 * Context: 8629 * Kernel context. 8630 */ 8631 static void 8632 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd) 8633 { 8634 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8635 8636 switch (cmd->SubCode) { 8637 case EXT_SC_RESET_FC_FW: 8638 EL(ha, "isp_abort_needed\n"); 8639 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0); 8640 break; 8641 case EXT_SC_RESET_MPI_FW: 8642 if (!(CFG_IST(ha, CFG_CTRL_81XX))) { 8643 EL(ha, "invalid request for HBA\n"); 8644 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8645 cmd->ResponseLen = 0; 8646 } else { 8647 /* Wait for I/O to stop and daemon to stall. */ 8648 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) { 8649 EL(ha, "ql_suspend_hba failed\n"); 8650 cmd->Status = EXT_STATUS_BUSY; 8651 cmd->ResponseLen = 0; 8652 } else if (ql_restart_mpi(ha) != QL_SUCCESS) { 8653 cmd->Status = EXT_STATUS_ERR; 8654 cmd->ResponseLen = 0; 8655 } else { 8656 uint8_t timer; 8657 /* 8658 * While the restart_mpi mailbox cmd may be 8659 * done the MPI is not. Wait at least 6 sec. or 8660 * exit if the loop comes up. 8661 */ 8662 for (timer = 6; timer; timer--) { 8663 if (!(ha->task_daemon_flags & 8664 LOOP_DOWN)) { 8665 break; 8666 } 8667 /* Delay for 1 second. */ 8668 ql_delay(ha, 1000000); 8669 } 8670 } 8671 ql_restart_hba(ha); 8672 } 8673 break; 8674 default: 8675 EL(ha, "unknown subcode=%xh\n", cmd->SubCode); 8676 cmd->Status = EXT_STATUS_ERR; 8677 cmd->ResponseLen = 0; 8678 break; 8679 } 8680 8681 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8682 } 8683 8684 /* 8685 * ql_get_dcbx_parameters 8686 * Get DCBX parameters. 8687 * 8688 * Input: 8689 * ha: adapter state pointer. 8690 * cmd: User space CT arguments pointer. 8691 * mode: flags. 8692 */ 8693 static void 8694 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8695 { 8696 uint8_t *tmp_buf; 8697 int rval; 8698 8699 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8700 8701 if (!(CFG_IST(ha, CFG_CTRL_8081))) { 8702 EL(ha, "invalid request for HBA\n"); 8703 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8704 cmd->ResponseLen = 0; 8705 return; 8706 } 8707 8708 /* Allocate memory for command. */ 8709 tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP); 8710 /* Send command */ 8711 rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE, 8712 (caddr_t)tmp_buf); 8713 if (rval != QL_SUCCESS) { 8714 /* error */ 8715 EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval); 8716 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE); 8717 cmd->Status = EXT_STATUS_ERR; 8718 cmd->ResponseLen = 0; 8719 return; 8720 } 8721 8722 /* Copy the response */ 8723 if (ql_send_buffer_data((caddr_t)tmp_buf, 8724 (caddr_t)(uintptr_t)cmd->ResponseAdr, 8725 EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) { 8726 EL(ha, "failed, ddi_copyout\n"); 8727 cmd->Status = EXT_STATUS_COPY_ERR; 8728 cmd->ResponseLen = 0; 8729 } else { 8730 cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE; 8731 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8732 } 8733 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE); 8734 8735 } 8736 8737 /* 8738 * ql_qry_cna_port 8739 * Performs EXT_SC_QUERY_CNA_PORT subfunction. 8740 * 8741 * Input: 8742 * ha: adapter state pointer. 8743 * cmd: EXT_IOCTL cmd struct pointer. 8744 * mode: flags. 8745 * 8746 * Returns: 8747 * None, request status indicated in cmd->Status. 8748 * 8749 * Context: 8750 * Kernel context. 8751 */ 8752 static void 8753 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8754 { 8755 EXT_CNA_PORT cna_port = {0}; 8756 8757 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8758 8759 if (!(CFG_IST(ha, CFG_CTRL_8081))) { 8760 EL(ha, "invalid request for HBA\n"); 8761 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8762 cmd->ResponseLen = 0; 8763 return; 8764 } 8765 8766 if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) { 8767 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8768 cmd->DetailStatus = sizeof (EXT_CNA_PORT); 8769 EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n", 8770 cmd->ResponseLen); 8771 cmd->ResponseLen = 0; 8772 return; 8773 } 8774 8775 cna_port.VLanId = ha->fcoe_vlan_id; 8776 cna_port.FabricParam = ha->fabric_params; 8777 bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress, 8778 EXT_DEF_MAC_ADDRESS_SIZE); 8779 8780 if (ddi_copyout((void *)&cna_port, 8781 (void *)(uintptr_t)(cmd->ResponseAdr), 8782 sizeof (EXT_CNA_PORT), mode) != 0) { 8783 cmd->Status = EXT_STATUS_COPY_ERR; 8784 cmd->ResponseLen = 0; 8785 EL(ha, "failed, ddi_copyout\n"); 8786 } else { 8787 cmd->ResponseLen = sizeof (EXT_CNA_PORT); 8788 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8789 } 8790 } 8791 8792 /* 8793 * ql_qry_adapter_versions 8794 * Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction. 8795 * 8796 * Input: 8797 * ha: adapter state pointer. 8798 * cmd: EXT_IOCTL cmd struct pointer. 8799 * mode: flags. 8800 * 8801 * Returns: 8802 * None, request status indicated in cmd->Status. 8803 * 8804 * Context: 8805 * Kernel context. 8806 */ 8807 static void 8808 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd, 8809 int mode) 8810 { 8811 uint8_t is_8142, mpi_cap; 8812 uint32_t ver_len, transfer_size; 8813 PEXT_ADAPTERREGIONVERSION padapter_ver = NULL; 8814 8815 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8816 8817 /* 8142s do not have a EDC PHY firmware. */ 8818 mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8); 8819 8820 is_8142 = 0; 8821 /* Sizeof (Length + Reserved) = 8 Bytes */ 8822 if (mpi_cap == 0x02 || mpi_cap == 0x04) { 8823 ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1)) 8824 + 8; 8825 is_8142 = 1; 8826 } else { 8827 ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8; 8828 } 8829 8830 /* Allocate local memory for EXT_ADAPTERREGIONVERSION */ 8831 padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len, 8832 KM_SLEEP); 8833 8834 if (padapter_ver == NULL) { 8835 EL(ha, "failed, kmem_zalloc\n"); 8836 cmd->Status = EXT_STATUS_NO_MEMORY; 8837 cmd->ResponseLen = 0; 8838 return; 8839 } 8840 8841 padapter_ver->Length = 1; 8842 /* Copy MPI version */ 8843 padapter_ver->RegionVersion[0].Region = 8844 EXT_OPT_ROM_REGION_MPI_RISC_FW; 8845 padapter_ver->RegionVersion[0].Version[0] = 8846 ha->mpi_fw_major_version; 8847 padapter_ver->RegionVersion[0].Version[1] = 8848 ha->mpi_fw_minor_version; 8849 padapter_ver->RegionVersion[0].Version[2] = 8850 ha->mpi_fw_subminor_version; 8851 padapter_ver->RegionVersion[0].VersionLength = 3; 8852 padapter_ver->RegionVersion[0].Location = RUNNING_VERSION; 8853 8854 if (!is_8142) { 8855 padapter_ver->RegionVersion[1].Region = 8856 EXT_OPT_ROM_REGION_EDC_PHY_FW; 8857 padapter_ver->RegionVersion[1].Version[0] = 8858 ha->phy_fw_major_version; 8859 padapter_ver->RegionVersion[1].Version[1] = 8860 ha->phy_fw_minor_version; 8861 padapter_ver->RegionVersion[1].Version[2] = 8862 ha->phy_fw_subminor_version; 8863 padapter_ver->RegionVersion[1].VersionLength = 3; 8864 padapter_ver->RegionVersion[1].Location = RUNNING_VERSION; 8865 padapter_ver->Length = NO_OF_VERSIONS; 8866 } 8867 8868 if (cmd->ResponseLen < ver_len) { 8869 EL(ha, "failed, ResponseLen < ver_len, ", 8870 "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len); 8871 /* Calculate the No. of valid versions being returned. */ 8872 padapter_ver->Length = (uint32_t) 8873 ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION)); 8874 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8875 cmd->DetailStatus = ver_len; 8876 transfer_size = cmd->ResponseLen; 8877 } else { 8878 transfer_size = ver_len; 8879 } 8880 8881 if (ddi_copyout((void *)padapter_ver, 8882 (void *)(uintptr_t)(cmd->ResponseAdr), 8883 transfer_size, mode) != 0) { 8884 cmd->Status = EXT_STATUS_COPY_ERR; 8885 cmd->ResponseLen = 0; 8886 EL(ha, "failed, ddi_copyout\n"); 8887 } else { 8888 cmd->ResponseLen = ver_len; 8889 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8890 } 8891 8892 kmem_free(padapter_ver, ver_len); 8893 } 8894 8895 /* 8896 * ql_get_xgmac_statistics 8897 * Get XgMac information 8898 * 8899 * Input: 8900 * ha: adapter state pointer. 8901 * cmd: EXT_IOCTL cmd struct pointer. 8902 * mode: flags. 8903 * 8904 * Returns: 8905 * None, request status indicated in cmd->Status. 8906 * 8907 * Context: 8908 * Kernel context. 8909 */ 8910 static void 8911 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8912 { 8913 int rval; 8914 uint32_t size; 8915 int8_t *tmp_buf; 8916 EXT_MENLO_MANAGE_INFO info; 8917 8918 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8919 8920 /* Verify the size of request structure. */ 8921 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) { 8922 /* Return error */ 8923 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 8924 sizeof (EXT_MENLO_MANAGE_INFO)); 8925 cmd->Status = EXT_STATUS_INVALID_PARAM; 8926 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 8927 cmd->ResponseLen = 0; 8928 return; 8929 } 8930 8931 /* Get manage info request. */ 8932 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, 8933 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) { 8934 EL(ha, "failed, ddi_copyin\n"); 8935 cmd->Status = EXT_STATUS_COPY_ERR; 8936 cmd->ResponseLen = 0; 8937 return; 8938 } 8939 8940 size = info.TotalByteCount; 8941 if (!size) { 8942 /* parameter error */ 8943 cmd->Status = EXT_STATUS_INVALID_PARAM; 8944 cmd->DetailStatus = 0; 8945 EL(ha, "failed, size=%xh\n", size); 8946 cmd->ResponseLen = 0; 8947 return; 8948 } 8949 8950 /* Allocate memory for command. */ 8951 tmp_buf = kmem_zalloc(size, KM_SLEEP); 8952 8953 if (!(info.Operation & MENLO_OP_GET_INFO)) { 8954 EL(ha, "Invalid request for 81XX\n"); 8955 kmem_free(tmp_buf, size); 8956 cmd->Status = EXT_STATUS_ERR; 8957 cmd->ResponseLen = 0; 8958 return; 8959 } 8960 8961 rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf); 8962 8963 if (rval != QL_SUCCESS) { 8964 /* error */ 8965 EL(ha, "failed, get_xgmac_stats =%xh\n", rval); 8966 kmem_free(tmp_buf, size); 8967 cmd->Status = EXT_STATUS_ERR; 8968 cmd->ResponseLen = 0; 8969 return; 8970 } 8971 8972 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes, 8973 size, mode) != size) { 8974 EL(ha, "failed, ddi_copyout\n"); 8975 cmd->Status = EXT_STATUS_COPY_ERR; 8976 cmd->ResponseLen = 0; 8977 } else { 8978 cmd->ResponseLen = info.TotalByteCount; 8979 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8980 } 8981 kmem_free(tmp_buf, size); 8982 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8983 } 8984 8985 /* 8986 * ql_get_fcf_list 8987 * Get FCF list. 8988 * 8989 * Input: 8990 * ha: adapter state pointer. 8991 * cmd: User space CT arguments pointer. 8992 * mode: flags. 8993 */ 8994 static void 8995 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8996 { 8997 uint8_t *tmp_buf; 8998 int rval; 8999 EXT_FCF_LIST fcf_list = {0}; 9000 ql_fcf_list_desc_t mb_fcf_list = {0}; 9001 9002 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 9003 9004 if (!(CFG_IST(ha, CFG_CTRL_81XX))) { 9005 EL(ha, "invalid request for HBA\n"); 9006 cmd->Status = EXT_STATUS_INVALID_REQUEST; 9007 cmd->ResponseLen = 0; 9008 return; 9009 } 9010 /* Get manage info request. */ 9011 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, 9012 (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) { 9013 EL(ha, "failed, ddi_copyin\n"); 9014 cmd->Status = EXT_STATUS_COPY_ERR; 9015 cmd->ResponseLen = 0; 9016 return; 9017 } 9018 9019 if (!(fcf_list.BufSize)) { 9020 /* Return error */ 9021 EL(ha, "failed, fcf_list BufSize is=%xh\n", 9022 fcf_list.BufSize); 9023 cmd->Status = EXT_STATUS_INVALID_PARAM; 9024 cmd->ResponseLen = 0; 9025 return; 9026 } 9027 /* Allocate memory for command. */ 9028 tmp_buf = kmem_zalloc(fcf_list.BufSize, KM_SLEEP); 9029 /* build the descriptor */ 9030 if (fcf_list.Options) { 9031 mb_fcf_list.options = FCF_LIST_RETURN_ONE; 9032 } else { 9033 mb_fcf_list.options = FCF_LIST_RETURN_ALL; 9034 } 9035 mb_fcf_list.fcf_index = (uint16_t)fcf_list.FcfIndex; 9036 mb_fcf_list.buffer_size = fcf_list.BufSize; 9037 9038 /* Send command */ 9039 rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf); 9040 if (rval != QL_SUCCESS) { 9041 /* error */ 9042 EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval); 9043 kmem_free(tmp_buf, fcf_list.BufSize); 9044 cmd->Status = EXT_STATUS_ERR; 9045 cmd->ResponseLen = 0; 9046 return; 9047 } 9048 9049 /* Copy the response */ 9050 if (ql_send_buffer_data((caddr_t)tmp_buf, 9051 (caddr_t)(uintptr_t)cmd->ResponseAdr, 9052 fcf_list.BufSize, mode) != fcf_list.BufSize) { 9053 EL(ha, "failed, ddi_copyout\n"); 9054 cmd->Status = EXT_STATUS_COPY_ERR; 9055 cmd->ResponseLen = 0; 9056 } else { 9057 cmd->ResponseLen = mb_fcf_list.buffer_size; 9058 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 9059 } 9060 9061 kmem_free(tmp_buf, fcf_list.BufSize); 9062 } 9063 9064 /* 9065 * ql_get_resource_counts 9066 * Get Resource counts: 9067 * 9068 * Input: 9069 * ha: adapter state pointer. 9070 * cmd: User space CT arguments pointer. 9071 * mode: flags. 9072 */ 9073 static void 9074 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 9075 { 9076 int rval; 9077 ql_mbx_data_t mr; 9078 EXT_RESOURCE_CNTS tmp_rc_cnt = {0}; 9079 9080 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 9081 9082 if (!(CFG_IST(ha, CFG_CTRL_242581))) { 9083 EL(ha, "invalid request for HBA\n"); 9084 cmd->Status = EXT_STATUS_INVALID_REQUEST; 9085 cmd->ResponseLen = 0; 9086 return; 9087 } 9088 9089 if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) { 9090 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 9091 cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS); 9092 EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, " 9093 "Len=%xh\n", cmd->ResponseLen); 9094 cmd->ResponseLen = 0; 9095 return; 9096 } 9097 9098 rval = ql_get_resource_cnts(ha, &mr); 9099 if (rval != QL_SUCCESS) { 9100 EL(ha, "resource cnt mbx failed\n"); 9101 cmd->Status = EXT_STATUS_ERR; 9102 cmd->ResponseLen = 0; 9103 return; 9104 } 9105 9106 tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1]; 9107 tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2]; 9108 tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3]; 9109 tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6]; 9110 tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7]; 9111 tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10]; 9112 tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11]; 9113 tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12]; 9114 9115 rval = ddi_copyout((void *)&tmp_rc_cnt, 9116 (void *)(uintptr_t)(cmd->ResponseAdr), 9117 sizeof (EXT_RESOURCE_CNTS), mode); 9118 if (rval != 0) { 9119 cmd->Status = EXT_STATUS_COPY_ERR; 9120 cmd->ResponseLen = 0; 9121 EL(ha, "failed, ddi_copyout\n"); 9122 } else { 9123 cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS); 9124 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 9125 } 9126 }