1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright 2010 QLogic Corporation */
  23 
  24 /*
  25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  26  */
  27 /*
  28  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  29  */
  30 
  31 #pragma ident   "Copyright 2010 QLogic Corporation; ql_api.c"
  32 
  33 /*
  34  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
  35  *
  36  * ***********************************************************************
  37  * *                                                                    **
  38  * *                            NOTICE                                  **
  39  * *            COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION              **
  40  * *                    ALL RIGHTS RESERVED                             **
  41  * *                                                                    **
  42  * ***********************************************************************
  43  *
  44  */
  45 
  46 #include <ql_apps.h>
  47 #include <ql_api.h>
  48 #include <ql_debug.h>
  49 #include <ql_init.h>
  50 #include <ql_iocb.h>
  51 #include <ql_ioctl.h>
  52 #include <ql_isr.h>
  53 #include <ql_mbx.h>
  54 #include <ql_nx.h>
  55 #include <ql_xioctl.h>
  56 
  57 /*
  58  * Solaris external defines.
  59  */
  60 extern pri_t minclsyspri;
  61 extern pri_t maxclsyspri;
  62 
  63 /*
  64  * dev_ops functions prototypes
  65  */
  66 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
  67 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
  68 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
  69 static int ql_power(dev_info_t *, int, int);
  70 static int ql_quiesce(dev_info_t *);
  71 
  72 /*
  73  * FCA functions prototypes exported by means of the transport table
  74  */
  75 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
  76     fc_fca_bind_info_t *);
  77 static void ql_unbind_port(opaque_t);
  78 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
  79 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
  80 static int ql_els_send(opaque_t, fc_packet_t *);
  81 static int ql_get_cap(opaque_t, char *, void *);
  82 static int ql_set_cap(opaque_t, char *, void *);
  83 static int ql_getmap(opaque_t, fc_lilpmap_t *);
  84 static int ql_transport(opaque_t, fc_packet_t *);
  85 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
  86 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
  87 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
  88 static int ql_abort(opaque_t, fc_packet_t *, int);
  89 static int ql_reset(opaque_t, uint32_t);
  90 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
  91 static opaque_t ql_get_device(opaque_t, fc_portid_t);
  92 
  93 /*
  94  * FCA Driver Support Function Prototypes.
  95  */
  96 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
  97 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
  98     ql_srb_t *);
  99 static void ql_task_daemon(void *);
 100 static void ql_task_thread(ql_adapter_state_t *);
 101 static void ql_unsol_callback(ql_srb_t *);
 102 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
 103     fc_unsol_buf_t *);
 104 static void ql_timer(void *);
 105 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
 106 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
 107     uint32_t *, uint32_t *);
 108 static void ql_halt(ql_adapter_state_t *, int);
 109 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
 110 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
 111 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
 112 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
 113 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
 114 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
 115 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
 116 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
 117 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
 118 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
 119 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
 120 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
 121 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
 122 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
 123 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
 124 static int ql_login_port(ql_adapter_state_t *, port_id_t);
 125 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
 126 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
 127 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
 128 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
 129 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
 130 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
 131 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
 132 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
 133     ql_srb_t *);
 134 static int ql_kstat_update(kstat_t *, int);
 135 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
 136 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
 137 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
 138 static void ql_rst_aen(ql_adapter_state_t *);
 139 static void ql_restart_queues(ql_adapter_state_t *);
 140 static void ql_abort_queues(ql_adapter_state_t *);
 141 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
 142 static void ql_idle_check(ql_adapter_state_t *);
 143 static int ql_loop_resync(ql_adapter_state_t *);
 144 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 145 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
 146 static int ql_save_config_regs(dev_info_t *);
 147 static int ql_restore_config_regs(dev_info_t *);
 148 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
 149 static int ql_handle_rscn_update(ql_adapter_state_t *);
 150 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
 151 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
 152 static int ql_dump_firmware(ql_adapter_state_t *);
 153 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
 154 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
 155 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
 156 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
 157 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
 158 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
 159 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
 160     void *);
 161 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
 162     uint8_t);
 163 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
 164 static int ql_suspend_adapter(ql_adapter_state_t *);
 165 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
 166 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
 167 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
 168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
 169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
 170 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
 171 static int ql_setup_interrupts(ql_adapter_state_t *);
 172 static int ql_setup_msi(ql_adapter_state_t *);
 173 static int ql_setup_msix(ql_adapter_state_t *);
 174 static int ql_setup_fixed(ql_adapter_state_t *);
 175 static void ql_release_intr(ql_adapter_state_t *);
 176 static void ql_disable_intr(ql_adapter_state_t *);
 177 static int ql_legacy_intr(ql_adapter_state_t *);
 178 static int ql_init_mutex(ql_adapter_state_t *);
 179 static void ql_destroy_mutex(ql_adapter_state_t *);
 180 static void ql_iidma(ql_adapter_state_t *);
 181 
 182 static int ql_n_port_plogi(ql_adapter_state_t *);
 183 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
 184     els_descriptor_t *);
 185 static void ql_isp_els_request_ctor(els_descriptor_t *,
 186     els_passthru_entry_t *);
 187 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
 188 static int ql_wait_for_td_stop(ql_adapter_state_t *);
 189 static void ql_process_idc_event(ql_adapter_state_t *);
 190 
 191 /*
 192  * Global data
 193  */
 194 static uint8_t  ql_enable_pm = 1;
 195 static int      ql_flash_sbus_fpga = 0;
 196 uint32_t        ql_os_release_level;
 197 uint32_t        ql_disable_aif = 0;
 198 uint32_t        ql_disable_msi = 0;
 199 uint32_t        ql_disable_msix = 0;
 200 uint32_t        ql_enable_ets = 0;
 201 uint16_t        ql_osc_wait_count = 1000;
 202 
 203 /* Timer routine variables. */
 204 static timeout_id_t     ql_timer_timeout_id = NULL;
 205 static clock_t          ql_timer_ticks;
 206 
 207 /* Soft state head pointer. */
 208 void *ql_state = NULL;
 209 
 210 /* Head adapter link. */
 211 ql_head_t ql_hba = {
 212         NULL,
 213         NULL
 214 };
 215 
 216 /* Global hba index */
 217 uint32_t ql_gfru_hba_index = 1;
 218 
 219 /*
 220  * Some IP defines and globals
 221  */
 222 uint32_t        ql_ip_buffer_count = 128;
 223 uint32_t        ql_ip_low_water = 10;
 224 uint8_t         ql_ip_fast_post_count = 5;
 225 static int      ql_ip_mtu = 65280;              /* equivalent to FCIPMTU */
 226 
 227 /* Device AL_PA to Device Head Queue index array. */
 228 uint8_t ql_alpa_to_index[] = {
 229         0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
 230         0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
 231         0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
 232         0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
 233         0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
 234         0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
 235         0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
 236         0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
 237         0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
 238         0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
 239         0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
 240         0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
 241         0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
 242         0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
 243         0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
 244         0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
 245         0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
 246         0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
 247         0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
 248         0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
 249         0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
 250         0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
 251         0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
 252         0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
 253         0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
 254         0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
 255 };
 256 
 257 /* Device loop_id to ALPA array. */
 258 static uint8_t ql_index_to_alpa[] = {
 259         0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
 260         0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
 261         0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
 262         0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
 263         0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
 264         0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
 265         0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
 266         0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
 267         0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
 268         0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
 269         0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 270         0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
 271         0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
 272 };
 273 
 274 /* 2200 register offsets */
 275 static reg_off_t reg_off_2200 = {
 276         0x00,   /* flash_address */
 277         0x02,   /* flash_data */
 278         0x06,   /* ctrl_status */
 279         0x08,   /* ictrl */
 280         0x0a,   /* istatus */
 281         0x0c,   /* semaphore */
 282         0x0e,   /* nvram */
 283         0x18,   /* req_in */
 284         0x18,   /* req_out */
 285         0x1a,   /* resp_in */
 286         0x1a,   /* resp_out */
 287         0xff,   /* risc2host - n/a */
 288         24,     /* Number of mailboxes */
 289 
 290         /* Mailbox in register offsets 0 - 23 */
 291         0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
 292         0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
 293         0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
 294         /* 2200 does not have mailbox 24-31 - n/a */
 295         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 296 
 297         /* Mailbox out register offsets 0 - 23 */
 298         0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
 299         0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
 300         0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
 301         /* 2200 does not have mailbox 24-31 - n/a */
 302         0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 303 
 304         0x96,   /* fpm_diag_config */
 305         0xa4,   /* pcr */
 306         0xb0,   /* mctr */
 307         0xb8,   /* fb_cmd */
 308         0xc0,   /* hccr */
 309         0xcc,   /* gpiod */
 310         0xce,   /* gpioe */
 311         0xff,   /* host_to_host_sema - n/a */
 312         0xff,   /* pri_req_in - n/a */
 313         0xff,   /* pri_req_out - n/a */
 314         0xff,   /* atio_req_in - n/a */
 315         0xff,   /* atio_req_out - n/a */
 316         0xff,   /* io_base_addr - n/a */
 317         0xff,   /* nx_host_int - n/a */
 318         0xff    /* nx_risc_int - n/a */
 319 };
 320 
 321 /* 2300 register offsets */
 322 static reg_off_t reg_off_2300 = {
 323         0x00,   /* flash_address */
 324         0x02,   /* flash_data */
 325         0x06,   /* ctrl_status */
 326         0x08,   /* ictrl */
 327         0x0a,   /* istatus */
 328         0x0c,   /* semaphore */
 329         0x0e,   /* nvram */
 330         0x10,   /* req_in */
 331         0x12,   /* req_out */
 332         0x14,   /* resp_in */
 333         0x16,   /* resp_out */
 334         0x18,   /* risc2host */
 335         32,     /* Number of mailboxes */
 336 
 337         /* Mailbox in register offsets 0 - 31 */
 338         0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
 339         0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
 340         0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
 341         0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
 342 
 343         /* Mailbox out register offsets 0 - 31 */
 344         0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
 345         0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
 346         0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
 347         0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
 348 
 349         0x96,   /* fpm_diag_config */
 350         0xa4,   /* pcr */
 351         0xb0,   /* mctr */
 352         0x80,   /* fb_cmd */
 353         0xc0,   /* hccr */
 354         0xcc,   /* gpiod */
 355         0xce,   /* gpioe */
 356         0x1c,   /* host_to_host_sema */
 357         0xff,   /* pri_req_in - n/a */
 358         0xff,   /* pri_req_out - n/a */
 359         0xff,   /* atio_req_in - n/a */
 360         0xff,   /* atio_req_out - n/a */
 361         0xff,   /* io_base_addr - n/a */
 362         0xff,   /* nx_host_int - n/a */
 363         0xff    /* nx_risc_int - n/a */
 364 };
 365 
 366 /* 2400/2500 register offsets */
 367 reg_off_t reg_off_2400_2500 = {
 368         0x00,   /* flash_address */
 369         0x04,   /* flash_data */
 370         0x08,   /* ctrl_status */
 371         0x0c,   /* ictrl */
 372         0x10,   /* istatus */
 373         0xff,   /* semaphore - n/a */
 374         0xff,   /* nvram - n/a */
 375         0x1c,   /* req_in */
 376         0x20,   /* req_out */
 377         0x24,   /* resp_in */
 378         0x28,   /* resp_out */
 379         0x44,   /* risc2host */
 380         32,     /* Number of mailboxes */
 381 
 382         /* Mailbox in register offsets 0 - 31 */
 383         0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
 384         0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
 385         0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
 386         0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
 387 
 388         /* Mailbox out register offsets 0 - 31 */
 389         0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
 390         0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
 391         0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
 392         0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
 393 
 394         0xff,   /* fpm_diag_config  - n/a */
 395         0xff,   /* pcr - n/a */
 396         0xff,   /* mctr - n/a */
 397         0xff,   /* fb_cmd - n/a */
 398         0x48,   /* hccr */
 399         0x4c,   /* gpiod */
 400         0x50,   /* gpioe */
 401         0xff,   /* host_to_host_sema - n/a */
 402         0x2c,   /* pri_req_in */
 403         0x30,   /* pri_req_out */
 404         0x3c,   /* atio_req_in */
 405         0x40,   /* atio_req_out */
 406         0x54,   /* io_base_addr */
 407         0xff,   /* nx_host_int - n/a */
 408         0xff    /* nx_risc_int - n/a */
 409 };
 410 
 411 /* P3 register offsets */
 412 static reg_off_t reg_off_8021 = {
 413         0x00,   /* flash_address */
 414         0x04,   /* flash_data */
 415         0x08,   /* ctrl_status */
 416         0x0c,   /* ictrl */
 417         0x10,   /* istatus */
 418         0xff,   /* semaphore - n/a */
 419         0xff,   /* nvram - n/a */
 420         0xff,   /* req_in - n/a */
 421         0x0,    /* req_out */
 422         0x100,  /* resp_in */
 423         0x200,  /* resp_out */
 424         0x500,  /* risc2host */
 425         32,     /* Number of mailboxes */
 426 
 427         /* Mailbox in register offsets 0 - 31 */
 428         0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
 429         0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
 430         0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
 431         0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
 432 
 433         /* Mailbox out register offsets 0 - 31 */
 434         0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
 435         0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
 436         0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
 437         0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
 438 
 439         0xff,   /* fpm_diag_config  - n/a */
 440         0xff,   /* pcr - n/a */
 441         0xff,   /* mctr - n/a */
 442         0xff,   /* fb_cmd - n/a */
 443         0x48,   /* hccr */
 444         0x4c,   /* gpiod */
 445         0x50,   /* gpioe */
 446         0xff,   /* host_to_host_sema - n/a */
 447         0x2c,   /* pri_req_in */
 448         0x30,   /* pri_req_out */
 449         0x3c,   /* atio_req_in */
 450         0x40,   /* atio_req_out */
 451         0x54,   /* io_base_addr */
 452         0x380,  /* nx_host_int */
 453         0x504   /* nx_risc_int */
 454 };
 455 
 456 /* mutex for protecting variables shared by all instances of the driver */
 457 kmutex_t ql_global_mutex;
 458 kmutex_t ql_global_hw_mutex;
 459 kmutex_t ql_global_el_mutex;
 460 
 461 /* DMA access attribute structure. */
 462 static ddi_device_acc_attr_t ql_dev_acc_attr = {
 463         DDI_DEVICE_ATTR_V0,
 464         DDI_STRUCTURE_LE_ACC,
 465         DDI_STRICTORDER_ACC
 466 };
 467 
 468 /* I/O DMA attributes structures. */
 469 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
 470         DMA_ATTR_V0,                    /* dma_attr_version */
 471         QL_DMA_LOW_ADDRESS,             /* low DMA address range */
 472         QL_DMA_HIGH_64BIT_ADDRESS,      /* high DMA address range */
 473         QL_DMA_XFER_COUNTER,            /* DMA counter register */
 474         QL_DMA_ADDRESS_ALIGNMENT,       /* DMA address alignment */
 475         QL_DMA_BURSTSIZES,              /* DMA burstsizes */
 476         QL_DMA_MIN_XFER_SIZE,           /* min effective DMA size */
 477         QL_DMA_MAX_XFER_SIZE,           /* max DMA xfer size */
 478         QL_DMA_SEGMENT_BOUNDARY,        /* segment boundary */
 479         QL_DMA_SG_LIST_LENGTH,          /* s/g list length */
 480         QL_DMA_GRANULARITY,             /* granularity of device */
 481         QL_DMA_XFER_FLAGS               /* DMA transfer flags */
 482 };
 483 
 484 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
 485         DMA_ATTR_V0,                    /* dma_attr_version */
 486         QL_DMA_LOW_ADDRESS,             /* low DMA address range */
 487         QL_DMA_HIGH_32BIT_ADDRESS,      /* high DMA address range */
 488         QL_DMA_XFER_COUNTER,            /* DMA counter register */
 489         QL_DMA_ADDRESS_ALIGNMENT,       /* DMA address alignment */
 490         QL_DMA_BURSTSIZES,              /* DMA burstsizes */
 491         QL_DMA_MIN_XFER_SIZE,           /* min effective DMA size */
 492         QL_DMA_MAX_XFER_SIZE,           /* max DMA xfer size */
 493         QL_DMA_SEGMENT_BOUNDARY,        /* segment boundary */
 494         QL_DMA_SG_LIST_LENGTH,          /* s/g list length */
 495         QL_DMA_GRANULARITY,             /* granularity of device */
 496         QL_DMA_XFER_FLAGS               /* DMA transfer flags */
 497 };
 498 
 499 /* Load the default dma attributes */
 500 static  ddi_dma_attr_t  ql_32fcsm_cmd_dma_attr;
 501 static  ddi_dma_attr_t  ql_64fcsm_cmd_dma_attr;
 502 static  ddi_dma_attr_t  ql_32fcsm_rsp_dma_attr;
 503 static  ddi_dma_attr_t  ql_64fcsm_rsp_dma_attr;
 504 static  ddi_dma_attr_t  ql_32fcip_cmd_dma_attr;
 505 static  ddi_dma_attr_t  ql_64fcip_cmd_dma_attr;
 506 static  ddi_dma_attr_t  ql_32fcip_rsp_dma_attr;
 507 static  ddi_dma_attr_t  ql_64fcip_rsp_dma_attr;
 508 static  ddi_dma_attr_t  ql_32fcp_cmd_dma_attr;
 509 static  ddi_dma_attr_t  ql_64fcp_cmd_dma_attr;
 510 static  ddi_dma_attr_t  ql_32fcp_rsp_dma_attr;
 511 static  ddi_dma_attr_t  ql_64fcp_rsp_dma_attr;
 512 static  ddi_dma_attr_t  ql_32fcp_data_dma_attr;
 513 static  ddi_dma_attr_t  ql_64fcp_data_dma_attr;
 514 
 515 /* Static declarations of cb_ops entry point functions... */
 516 static struct cb_ops ql_cb_ops = {
 517         ql_open,                        /* b/c open */
 518         ql_close,                       /* b/c close */
 519         nodev,                          /* b strategy */
 520         nodev,                          /* b print */
 521         nodev,                          /* b dump */
 522         nodev,                          /* c read */
 523         nodev,                          /* c write */
 524         ql_ioctl,                       /* c ioctl */
 525         nodev,                          /* c devmap */
 526         nodev,                          /* c mmap */
 527         nodev,                          /* c segmap */
 528         nochpoll,                       /* c poll */
 529         nodev,                          /* cb_prop_op */
 530         NULL,                           /* streamtab  */
 531         D_MP | D_NEW | D_HOTPLUG,       /* Driver compatibility flag */
 532         CB_REV,                         /* cb_ops revision */
 533         nodev,                          /* c aread */
 534         nodev                           /* c awrite */
 535 };
 536 
 537 /* Static declarations of dev_ops entry point functions... */
 538 static struct dev_ops ql_devops = {
 539         DEVO_REV,                       /* devo_rev */
 540         0,                              /* refcnt */
 541         ql_getinfo,                     /* getinfo */
 542         nulldev,                        /* identify */
 543         nulldev,                        /* probe */
 544         ql_attach,                      /* attach */
 545         ql_detach,                      /* detach */
 546         nodev,                          /* reset */
 547         &ql_cb_ops,                 /* char/block ops */
 548         NULL,                           /* bus operations */
 549         ql_power,                       /* power management */
 550         ql_quiesce                      /* quiesce device */
 551 };
 552 
 553 /* ELS command code to text converter */
 554 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
 555 /* Mailbox command code to text converter */
 556 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
 557 
 558 char qlc_driver_version[] = QL_VERSION;
 559 
 560 /*
 561  * Loadable Driver Interface Structures.
 562  * Declare and initialize the module configuration section...
 563  */
 564 static struct modldrv modldrv = {
 565         &mod_driverops,                             /* type of module: driver */
 566         "SunFC Qlogic FCA v" QL_VERSION,        /* name of module */
 567         &ql_devops                          /* driver dev_ops */
 568 };
 569 
 570 static struct modlinkage modlinkage = {
 571         MODREV_1,
 572         &modldrv,
 573         NULL
 574 };
 575 
 576 /* ************************************************************************ */
 577 /*                              Loadable Module Routines.                   */
 578 /* ************************************************************************ */
 579 
 580 /*
 581  * _init
 582  *      Initializes a loadable module. It is called before any other
 583  *      routine in a loadable module.
 584  *
 585  * Returns:
 586  *      0 = success
 587  *
 588  * Context:
 589  *      Kernel context.
 590  */
 591 int
 592 _init(void)
 593 {
 594         uint16_t        w16;
 595         int             rval = 0;
 596 
 597         /* Get OS major release level. */
 598         for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
 599                 if (utsname.release[w16] == '.') {
 600                         w16++;
 601                         break;
 602                 }
 603         }
 604         if (w16 < sizeof (utsname.release)) {
 605                 (void) ql_bstr_to_dec(&utsname.release[w16],
 606                     &ql_os_release_level, 0);
 607         } else {
 608                 ql_os_release_level = 0;
 609         }
 610         if (ql_os_release_level < 6) {
 611                 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
 612                     QL_NAME, ql_os_release_level);
 613                 rval = EINVAL;
 614         }
 615         if (ql_os_release_level == 6) {
 616                 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
 617                 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
 618         }
 619 
 620         if (rval == 0) {
 621                 rval = ddi_soft_state_init(&ql_state,
 622                     sizeof (ql_adapter_state_t), 0);
 623         }
 624         if (rval == 0) {
 625                 /* allow the FC Transport to tweak the dev_ops */
 626                 fc_fca_init(&ql_devops);
 627 
 628                 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
 629                 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
 630                 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
 631                 rval = mod_install(&modlinkage);
 632                 if (rval != 0) {
 633                         mutex_destroy(&ql_global_hw_mutex);
 634                         mutex_destroy(&ql_global_mutex);
 635                         mutex_destroy(&ql_global_el_mutex);
 636                         ddi_soft_state_fini(&ql_state);
 637                 } else {
 638                         /*EMPTY*/
 639                         ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
 640                         ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
 641                         ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
 642                         ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
 643                         ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
 644                         ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
 645                         ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
 646                         ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
 647                         ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
 648                         ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
 649                         ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
 650                         ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
 651                         ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
 652                         ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
 653                         ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
 654                             ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
 655                             QL_FCSM_CMD_SGLLEN;
 656                         ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
 657                             ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
 658                             QL_FCSM_RSP_SGLLEN;
 659                         ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
 660                             ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
 661                             QL_FCIP_CMD_SGLLEN;
 662                         ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
 663                             ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
 664                             QL_FCIP_RSP_SGLLEN;
 665                         ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
 666                             ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
 667                             QL_FCP_CMD_SGLLEN;
 668                         ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
 669                             ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
 670                             QL_FCP_RSP_SGLLEN;
 671                 }
 672         }
 673 
 674         if (rval != 0) {
 675                 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
 676                     QL_NAME);
 677         }
 678 
 679         return (rval);
 680 }
 681 
 682 /*
 683  * _fini
 684  *      Prepares a module for unloading. It is called when the system
 685  *      wants to unload a module. If the module determines that it can
 686  *      be unloaded, then _fini() returns the value returned by
 687  *      mod_remove(). Upon successful return from _fini() no other
 688  *      routine in the module will be called before _init() is called.
 689  *
 690  * Returns:
 691  *      0 = success
 692  *
 693  * Context:
 694  *      Kernel context.
 695  */
 696 int
 697 _fini(void)
 698 {
 699         int     rval;
 700 
 701         rval = mod_remove(&modlinkage);
 702         if (rval == 0) {
 703                 mutex_destroy(&ql_global_hw_mutex);
 704                 mutex_destroy(&ql_global_mutex);
 705                 mutex_destroy(&ql_global_el_mutex);
 706                 ddi_soft_state_fini(&ql_state);
 707         }
 708 
 709         return (rval);
 710 }
 711 
 712 /*
 713  * _info
 714  *      Returns information about loadable module.
 715  *
 716  * Input:
 717  *      modinfo = pointer to module information structure.
 718  *
 719  * Returns:
 720  *      Value returned by mod_info().
 721  *
 722  * Context:
 723  *      Kernel context.
 724  */
 725 int
 726 _info(struct modinfo *modinfop)
 727 {
 728         return (mod_info(&modlinkage, modinfop));
 729 }
 730 
 731 /* ************************************************************************ */
 732 /*                      dev_ops functions                                   */
 733 /* ************************************************************************ */
 734 
 735 /*
 736  * ql_getinfo
 737  *      Returns the pointer associated with arg when cmd is
 738  *      set to DDI_INFO_DEVT2DEVINFO, or it should return the
 739  *      instance number associated with arg when cmd is set
 740  *      to DDI_INFO_DEV2INSTANCE.
 741  *
 742  * Input:
 743  *      dip = Do not use.
 744  *      cmd = command argument.
 745  *      arg = command specific argument.
 746  *      resultp = pointer to where request information is stored.
 747  *
 748  * Returns:
 749  *      DDI_SUCCESS or DDI_FAILURE.
 750  *
 751  * Context:
 752  *      Kernel context.
 753  */
 754 /* ARGSUSED */
 755 static int
 756 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
 757 {
 758         ql_adapter_state_t      *ha;
 759         int                     minor;
 760         int                     rval = DDI_FAILURE;
 761 
 762         minor = (int)(getminor((dev_t)arg));
 763         ha = ddi_get_soft_state(ql_state, minor);
 764         if (ha == NULL) {
 765                 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
 766                     getminor((dev_t)arg));
 767                 *resultp = NULL;
 768                 return (rval);
 769         }
 770 
 771         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
 772 
 773         switch (cmd) {
 774         case DDI_INFO_DEVT2DEVINFO:
 775                 *resultp = ha->dip;
 776                 rval = DDI_SUCCESS;
 777                 break;
 778         case DDI_INFO_DEVT2INSTANCE:
 779                 *resultp = (void *)(uintptr_t)(ha->instance);
 780                 rval = DDI_SUCCESS;
 781                 break;
 782         default:
 783                 EL(ha, "failed, unsupported cmd=%d\n", cmd);
 784                 rval = DDI_FAILURE;
 785                 break;
 786         }
 787 
 788         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
 789 
 790         return (rval);
 791 }
 792 
 793 /*
 794  * ql_attach
 795  *      Configure and attach an instance of the driver
 796  *      for a port.
 797  *
 798  * Input:
 799  *      dip = pointer to device information structure.
 800  *      cmd = attach type.
 801  *
 802  * Returns:
 803  *      DDI_SUCCESS or DDI_FAILURE.
 804  *
 805  * Context:
 806  *      Kernel context.
 807  */
 808 static int
 809 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 810 {
 811         off_t                   regsize;
 812         uint32_t                size;
 813         int                     rval, *ptr;
 814         int                     instance;
 815         uint_t                  progress = 0;
 816         char                    *buf;
 817         ushort_t                caps_ptr, cap;
 818         fc_fca_tran_t           *tran;
 819         ql_adapter_state_t      *ha = NULL;
 820 
 821         static char *pmcomps[] = {
 822                 NULL,
 823                 PM_LEVEL_D3_STR,                /* Device OFF */
 824                 PM_LEVEL_D0_STR,                /* Device ON */
 825         };
 826 
 827         QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
 828             ddi_get_instance(dip), cmd);
 829 
 830         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
 831 
 832         switch (cmd) {
 833         case DDI_ATTACH:
 834                 /* first get the instance */
 835                 instance = ddi_get_instance(dip);
 836 
 837                 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
 838                     QL_NAME, instance, QL_VERSION);
 839 
 840                 /* Correct OS version? */
 841                 if (ql_os_release_level != 11) {
 842                         cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
 843                             "11", QL_NAME, instance);
 844                         goto attach_failed;
 845                 }
 846 
 847                 /* Hardware is installed in a DMA-capable slot? */
 848                 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
 849                         cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
 850                             instance);
 851                         goto attach_failed;
 852                 }
 853 
 854                 /* No support for high-level interrupts */
 855                 if (ddi_intr_hilevel(dip, 0) != 0) {
 856                         cmn_err(CE_WARN, "%s(%d): High level interrupt"
 857                             " not supported", QL_NAME, instance);
 858                         goto attach_failed;
 859                 }
 860 
 861                 /* Allocate our per-device-instance structure */
 862                 if (ddi_soft_state_zalloc(ql_state,
 863                     instance) != DDI_SUCCESS) {
 864                         cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
 865                             QL_NAME, instance);
 866                         goto attach_failed;
 867                 }
 868                 progress |= QL_SOFT_STATE_ALLOCED;
 869 
 870                 ha = ddi_get_soft_state(ql_state, instance);
 871                 if (ha == NULL) {
 872                         cmn_err(CE_WARN, "%s(%d): can't get soft state",
 873                             QL_NAME, instance);
 874                         goto attach_failed;
 875                 }
 876                 ha->dip = dip;
 877                 ha->instance = instance;
 878                 ha->hba.base_address = ha;
 879                 ha->pha = ha;
 880 
 881                 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
 882                         cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
 883                             QL_NAME, instance);
 884                         goto attach_failed;
 885                 }
 886 
 887                 /* Get extended logging and dump flags. */
 888                 ql_common_properties(ha);
 889 
 890                 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
 891                     "sbus") == 0) {
 892                         EL(ha, "%s SBUS card detected", QL_NAME);
 893                         ha->cfg_flags |= CFG_SBUS_CARD;
 894                 }
 895 
 896                 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
 897                     DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
 898 
 899                 ha->outstanding_cmds = kmem_zalloc(
 900                     sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
 901                     KM_SLEEP);
 902 
 903                 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
 904                     QL_UB_LIMIT, KM_SLEEP);
 905 
 906                 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
 907                     KM_SLEEP);
 908 
 909                 (void) ddi_pathname(dip, buf);
 910                 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
 911                 if (ha->devpath == NULL) {
 912                         EL(ha, "devpath mem alloc failed\n");
 913                 } else {
 914                         (void) strcpy(ha->devpath, buf);
 915                         EL(ha, "devpath is: %s\n", ha->devpath);
 916                 }
 917 
 918                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
 919                         /*
 920                          * For cards where PCI is mapped to sbus e.g. Ivory.
 921                          *
 922                          * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
 923                          *      : 0x100 - 0x3FF PCI IO space for 2200
 924                          * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
 925                          *      : 0x100 - 0x3FF PCI IO Space for fpga
 926                          */
 927                         if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
 928                             0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
 929                             DDI_SUCCESS) {
 930                                 cmn_err(CE_WARN, "%s(%d): Unable to map device"
 931                                     " registers", QL_NAME, instance);
 932                                 goto attach_failed;
 933                         }
 934                         if (ddi_regs_map_setup(dip, 1,
 935                             (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
 936                             &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
 937                             DDI_SUCCESS) {
 938                                 /* We should not fail attach here */
 939                                 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
 940                                     QL_NAME, instance);
 941                                 ha->sbus_fpga_iobase = NULL;
 942                         }
 943                         progress |= QL_REGS_MAPPED;
 944 
 945                         /*
 946                          * We should map config space before adding interrupt
 947                          * So that the chip type (2200 or 2300) can be
 948                          * determined before the interrupt routine gets a
 949                          * chance to execute.
 950                          */
 951                         if (ddi_regs_map_setup(dip, 0,
 952                             (caddr_t *)&ha->sbus_config_base, 0, 0x100,
 953                             &ql_dev_acc_attr, &ha->sbus_config_handle) !=
 954                             DDI_SUCCESS) {
 955                                 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
 956                                     "config registers", QL_NAME, instance);
 957                                 goto attach_failed;
 958                         }
 959                         progress |= QL_CONFIG_SPACE_SETUP;
 960                 } else {
 961                         /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
 962                         rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
 963                             DDI_PROP_DONTPASS, "reg", &ptr, &size);
 964                         if (rval != DDI_PROP_SUCCESS) {
 965                                 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
 966                                     "address registers", QL_NAME, instance);
 967                                 goto attach_failed;
 968                         } else {
 969                                 ha->pci_bus_addr = ptr[0];
 970                                 ha->function_number = (uint8_t)
 971                                     (ha->pci_bus_addr >> 8 & 7);
 972                                 ddi_prop_free(ptr);
 973                         }
 974 
 975                         /*
 976                          * We should map config space before adding interrupt
 977                          * So that the chip type (2200 or 2300) can be
 978                          * determined before the interrupt routine gets a
 979                          * chance to execute.
 980                          */
 981                         if (pci_config_setup(ha->dip, &ha->pci_handle) !=
 982                             DDI_SUCCESS) {
 983                                 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
 984                                     "config space", QL_NAME, instance);
 985                                 goto attach_failed;
 986                         }
 987                         progress |= QL_CONFIG_SPACE_SETUP;
 988 
 989                         /*
 990                          * Setup the ISP2200 registers address mapping to be
 991                          * accessed by this particular driver.
 992                          * 0x0   Configuration Space
 993                          * 0x1   I/O Space
 994                          * 0x2   32-bit Memory Space address
 995                          * 0x3   64-bit Memory Space address
 996                          */
 997                         size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
 998                             2 : 1;
 999                         if (ddi_dev_regsize(dip, size, &regsize) !=
1000                             DDI_SUCCESS ||
1001                             ddi_regs_map_setup(dip, size, &ha->iobase,
1002                             0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1003                             DDI_SUCCESS) {
1004                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1005                                     "failed", QL_NAME, instance);
1006                                 goto attach_failed;
1007                         }
1008                         progress |= QL_REGS_MAPPED;
1009 
1010                         /*
1011                          * We need I/O space mappings for 23xx HBAs for
1012                          * loading flash (FCode). The chip has a bug due to
1013                          * which loading flash fails through mem space
1014                          * mappings in PCI-X mode.
1015                          */
1016                         if (size == 1) {
1017                                 ha->iomap_iobase = ha->iobase;
1018                                 ha->iomap_dev_handle = ha->dev_handle;
1019                         } else {
1020                                 if (ddi_dev_regsize(dip, 1, &regsize) !=
1021                                     DDI_SUCCESS ||
1022                                     ddi_regs_map_setup(dip, 1,
1023                                     &ha->iomap_iobase, 0, regsize,
1024                                     &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1025                                     DDI_SUCCESS) {
1026                                         cmn_err(CE_WARN, "%s(%d): regs_map_"
1027                                             "setup(I/O) failed", QL_NAME,
1028                                             instance);
1029                                         goto attach_failed;
1030                                 }
1031                                 progress |= QL_IOMAP_IOBASE_MAPPED;
1032                         }
1033                 }
1034 
1035                 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1036                     PCI_CONF_SUBSYSID);
1037                 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1038                     PCI_CONF_SUBVENID);
1039                 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1040                     PCI_CONF_VENID);
1041                 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1042                     PCI_CONF_DEVID);
1043                 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1044                     PCI_CONF_REVID);
1045 
1046                 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1047                     "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1048                     ha->subven_id, ha->subsys_id);
1049 
1050                 switch (ha->device_id) {
1051                 case 0x2300:
1052                 case 0x2312:
1053                 case 0x2322:
1054                 case 0x6312:
1055                 case 0x6322:
1056                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1057                                 ha->flags |= FUNCTION_1;
1058                         }
1059                         if ((ha->device_id == 0x6322) ||
1060                             (ha->device_id == 0x2322)) {
1061                                 ha->cfg_flags |= CFG_CTRL_6322;
1062                                 ha->fw_class = 0x6322;
1063                                 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1064                         } else {
1065                                 ha->cfg_flags |= CFG_CTRL_2300;
1066                                 ha->fw_class = 0x2300;
1067                                 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1068                         }
1069                         ha->reg_off = &reg_off_2300;
1070                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1071                                 goto attach_failed;
1072                         }
1073                         ha->fcp_cmd = ql_command_iocb;
1074                         ha->ip_cmd = ql_ip_iocb;
1075                         ha->ms_cmd = ql_ms_iocb;
1076                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1077                                 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1078                                 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1079                         } else {
1080                                 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1081                                 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1082                         }
1083                         break;
1084 
1085                 case 0x2200:
1086                         ha->cfg_flags |= CFG_CTRL_2200;
1087                         ha->reg_off = &reg_off_2200;
1088                         ha->fw_class = 0x2200;
1089                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1090                                 goto attach_failed;
1091                         }
1092                         ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1093                         ha->fcp_cmd = ql_command_iocb;
1094                         ha->ip_cmd = ql_ip_iocb;
1095                         ha->ms_cmd = ql_ms_iocb;
1096                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1097                                 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1098                                 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1099                         } else {
1100                                 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1101                                 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1102                         }
1103                         break;
1104 
1105                 case 0x2422:
1106                 case 0x2432:
1107                 case 0x5422:
1108                 case 0x5432:
1109                 case 0x8432:
1110                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1111                                 ha->flags |= FUNCTION_1;
1112                         }
1113                         ha->cfg_flags |= CFG_CTRL_2422;
1114                         if (ha->device_id == 0x8432) {
1115                                 ha->cfg_flags |= CFG_CTRL_MENLO;
1116                         } else {
1117                                 ha->flags |= VP_ENABLED;
1118                         }
1119 
1120                         ha->reg_off = &reg_off_2400_2500;
1121                         ha->fw_class = 0x2400;
1122                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1123                                 goto attach_failed;
1124                         }
1125                         ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1126                         ha->fcp_cmd = ql_command_24xx_iocb;
1127                         ha->ip_cmd = ql_ip_24xx_iocb;
1128                         ha->ms_cmd = ql_ms_24xx_iocb;
1129                         ha->els_cmd = ql_els_24xx_iocb;
1130                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1131                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1132                         break;
1133 
1134                 case 0x2522:
1135                 case 0x2532:
1136                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1137                                 ha->flags |= FUNCTION_1;
1138                         }
1139                         ha->cfg_flags |= CFG_CTRL_25XX;
1140                         ha->flags |= VP_ENABLED;
1141                         ha->fw_class = 0x2500;
1142                         ha->reg_off = &reg_off_2400_2500;
1143                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1144                                 goto attach_failed;
1145                         }
1146                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1147                         ha->fcp_cmd = ql_command_24xx_iocb;
1148                         ha->ip_cmd = ql_ip_24xx_iocb;
1149                         ha->ms_cmd = ql_ms_24xx_iocb;
1150                         ha->els_cmd = ql_els_24xx_iocb;
1151                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1152                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1153                         break;
1154 
1155                 case 0x8001:
1156                         if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1157                                 ha->flags |= FUNCTION_1;
1158                         }
1159                         ha->cfg_flags |= CFG_CTRL_81XX;
1160                         ha->flags |= VP_ENABLED;
1161                         ha->fw_class = 0x8100;
1162                         ha->reg_off = &reg_off_2400_2500;
1163                         if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1164                                 goto attach_failed;
1165                         }
1166                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1167                         ha->fcp_cmd = ql_command_24xx_iocb;
1168                         ha->ip_cmd = ql_ip_24xx_iocb;
1169                         ha->ms_cmd = ql_ms_24xx_iocb;
1170                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1171                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1172                         break;
1173 
1174                 case 0x8021:
1175                         if (ha->function_number & BIT_0) {
1176                                 ha->flags |= FUNCTION_1;
1177                         }
1178                         ha->cfg_flags |= CFG_CTRL_8021;
1179                         ha->reg_off = &reg_off_8021;
1180                         ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1181                         ha->fcp_cmd = ql_command_24xx_iocb;
1182                         ha->ms_cmd = ql_ms_24xx_iocb;
1183                         ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1184                         ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1185 
1186                         ha->nx_pcibase = ha->iobase;
1187                         ha->iobase += 0xBC000 + (ha->function_number << 11);
1188                         ha->iomap_iobase += 0xBC000 +
1189                             (ha->function_number << 11);
1190 
1191                         /* map doorbell */
1192                         if (ddi_dev_regsize(dip, 2, &regsize) != DDI_SUCCESS ||
1193                             ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1194                             0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1195                             DDI_SUCCESS) {
1196                                 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1197                                     "(doorbell) failed", QL_NAME, instance);
1198                                 goto attach_failed;
1199                         }
1200                         progress |= QL_DB_IOBASE_MAPPED;
1201 
1202                         ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1203                             (ha->function_number << 12));
1204                         ha->db_read = ha->nx_pcibase + (512 * 1024) +
1205                             (ha->function_number * 8);
1206 
1207                         ql_8021_update_crb_int_ptr(ha);
1208                         ql_8021_set_drv_active(ha);
1209                         break;
1210 
1211                 default:
1212                         cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1213                             QL_NAME, instance, ha->device_id);
1214                         goto attach_failed;
1215                 }
1216 
1217                 /* Setup hba buffer. */
1218 
1219                 size = CFG_IST(ha, CFG_CTRL_24258081) ?
1220                     (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1221                     (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1222                     RCVBUF_QUEUE_SIZE);
1223 
1224                 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1225                     QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1226                         cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1227                             "alloc failed", QL_NAME, instance);
1228                         goto attach_failed;
1229                 }
1230                 progress |= QL_HBA_BUFFER_SETUP;
1231 
1232                 /* Setup buffer pointers. */
1233                 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1234                     REQUEST_Q_BUFFER_OFFSET;
1235                 ha->request_ring_bp = (struct cmd_entry *)
1236                     ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1237 
1238                 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1239                     RESPONSE_Q_BUFFER_OFFSET;
1240                 ha->response_ring_bp = (struct sts_entry *)
1241                     ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1242 
1243                 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1244                     RCVBUF_Q_BUFFER_OFFSET;
1245                 ha->rcvbuf_ring_bp = (struct rcvbuf *)
1246                     ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1247 
1248                 /* Allocate resource for QLogic IOCTL */
1249                 (void) ql_alloc_xioctl_resource(ha);
1250 
1251                 /* Setup interrupts */
1252                 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1253                         cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1254                             "rval=%xh", QL_NAME, instance, rval);
1255                         goto attach_failed;
1256                 }
1257 
1258                 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1259 
1260                 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1261                         cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1262                             QL_NAME, instance);
1263                         goto attach_failed;
1264                 }
1265 
1266                 /*
1267                  * Allocate an N Port information structure
1268                  * for use when in P2P topology.
1269                  */
1270                 ha->n_port = (ql_n_port_info_t *)
1271                     kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1272                 if (ha->n_port == NULL) {
1273                         cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1274                             QL_NAME, instance);
1275                         goto attach_failed;
1276                 }
1277 
1278                 progress |= QL_N_PORT_INFO_CREATED;
1279 
1280                 /*
1281                  * Determine support for Power Management
1282                  */
1283                 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1284 
1285                 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1286                         cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1287                         if (cap == PCI_CAP_ID_PM) {
1288                                 ha->pm_capable = 1;
1289                                 break;
1290                         }
1291                         caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1292                             PCI_CAP_NEXT_PTR);
1293                 }
1294 
1295                 if (ha->pm_capable) {
1296                         /*
1297                          * Enable PM for 2200 based HBAs only.
1298                          */
1299                         if (ha->device_id != 0x2200) {
1300                                 ha->pm_capable = 0;
1301                         }
1302                 }
1303 
1304                 if (ha->pm_capable) {
1305                         ha->pm_capable = ql_enable_pm;
1306                 }
1307 
1308                 if (ha->pm_capable) {
1309                         /*
1310                          * Initialize power management bookkeeping;
1311                          * components are created idle.
1312                          */
1313                         (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1314                         pmcomps[0] = buf;
1315 
1316                         /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1317                         if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1318                             dip, "pm-components", pmcomps,
1319                             sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1320                             DDI_PROP_SUCCESS) {
1321                                 cmn_err(CE_WARN, "%s(%d): failed to create"
1322                                     " pm-components property", QL_NAME,
1323                                     instance);
1324 
1325                                 /* Initialize adapter. */
1326                                 ha->power_level = PM_LEVEL_D0;
1327                                 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1328                                         cmn_err(CE_WARN, "%s(%d): failed to"
1329                                             " initialize adapter", QL_NAME,
1330                                             instance);
1331                                         goto attach_failed;
1332                                 }
1333                         } else {
1334                                 ha->power_level = PM_LEVEL_D3;
1335                                 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1336                                     PM_LEVEL_D0) != DDI_SUCCESS) {
1337                                         cmn_err(CE_WARN, "%s(%d): failed to"
1338                                             " raise power or initialize"
1339                                             " adapter", QL_NAME, instance);
1340                                 }
1341                         }
1342                 } else {
1343                         /* Initialize adapter. */
1344                         ha->power_level = PM_LEVEL_D0;
1345                         if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1346                                 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1347                                     " adapter", QL_NAME, instance);
1348                         }
1349                 }
1350 
1351                 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1352                     ha->fw_subminor_version == 0) {
1353                         cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1354                             QL_NAME, ha->instance);
1355                 } else {
1356                         int     rval;
1357                         char    ver_fmt[256];
1358 
1359                         rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1360                             "Firmware version %d.%d.%d", ha->fw_major_version,
1361                             ha->fw_minor_version, ha->fw_subminor_version);
1362 
1363                         if (CFG_IST(ha, CFG_CTRL_81XX)) {
1364                                 rval = (int)snprintf(ver_fmt + rval,
1365                                     (size_t)sizeof (ver_fmt),
1366                                     ", MPI fw version %d.%d.%d",
1367                                     ha->mpi_fw_major_version,
1368                                     ha->mpi_fw_minor_version,
1369                                     ha->mpi_fw_subminor_version);
1370 
1371                                 if (ha->subsys_id == 0x17B ||
1372                                     ha->subsys_id == 0x17D) {
1373                                         (void) snprintf(ver_fmt + rval,
1374                                             (size_t)sizeof (ver_fmt),
1375                                             ", PHY fw version %d.%d.%d",
1376                                             ha->phy_fw_major_version,
1377                                             ha->phy_fw_minor_version,
1378                                             ha->phy_fw_subminor_version);
1379                                 }
1380                         }
1381                         cmn_err(CE_NOTE, "!%s(%d): %s",
1382                             QL_NAME, ha->instance, ver_fmt);
1383                 }
1384 
1385                 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1386                     "controller", KSTAT_TYPE_RAW,
1387                     (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1388                 if (ha->k_stats == NULL) {
1389                         cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1390                             QL_NAME, instance);
1391                         goto attach_failed;
1392                 }
1393                 progress |= QL_KSTAT_CREATED;
1394 
1395                 ha->adapter_stats->version = 1;
1396                 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1397                 ha->k_stats->ks_private = ha;
1398                 ha->k_stats->ks_update = ql_kstat_update;
1399                 ha->k_stats->ks_ndata = 1;
1400                 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1401                 kstat_install(ha->k_stats);
1402 
1403                 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1404                     instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1405                         cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1406                             QL_NAME, instance);
1407                         goto attach_failed;
1408                 }
1409                 progress |= QL_MINOR_NODE_CREATED;
1410 
1411                 /* Allocate a transport structure for this instance */
1412                 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1413                 if (tran == NULL) {
1414                         cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1415                             QL_NAME, instance);
1416                         goto attach_failed;
1417                 }
1418 
1419                 progress |= QL_FCA_TRAN_ALLOCED;
1420 
1421                 /* fill in the structure */
1422                 tran->fca_numports = 1;
1423                 tran->fca_version = FCTL_FCA_MODREV_5;
1424                 if (CFG_IST(ha, CFG_CTRL_2422)) {
1425                         tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1426                 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1427                         tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1428                 }
1429                 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1430                     tran->fca_perm_pwwn.raw_wwn, 8);
1431 
1432                 EL(ha, "FCA version %d\n", tran->fca_version);
1433 
1434                 /* Specify the amount of space needed in each packet */
1435                 tran->fca_pkt_size = sizeof (ql_srb_t);
1436 
1437                 /* command limits are usually dictated by hardware */
1438                 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1439 
1440                 /* dmaattr are static, set elsewhere. */
1441                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1442                         tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1443                         tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1444                         tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1445                         tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1446                         tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1447                         tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1448                         tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1449                         tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1450                 } else {
1451                         tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1452                         tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1453                         tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1454                         tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1455                         tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1456                         tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1457                         tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1458                         tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1459                 }
1460 
1461                 tran->fca_acc_attr = &ql_dev_acc_attr;
1462                 tran->fca_iblock = &(ha->iblock_cookie);
1463 
1464                 /* the remaining values are simply function vectors */
1465                 tran->fca_bind_port = ql_bind_port;
1466                 tran->fca_unbind_port = ql_unbind_port;
1467                 tran->fca_init_pkt = ql_init_pkt;
1468                 tran->fca_un_init_pkt = ql_un_init_pkt;
1469                 tran->fca_els_send = ql_els_send;
1470                 tran->fca_get_cap = ql_get_cap;
1471                 tran->fca_set_cap = ql_set_cap;
1472                 tran->fca_getmap = ql_getmap;
1473                 tran->fca_transport = ql_transport;
1474                 tran->fca_ub_alloc = ql_ub_alloc;
1475                 tran->fca_ub_free = ql_ub_free;
1476                 tran->fca_ub_release = ql_ub_release;
1477                 tran->fca_abort = ql_abort;
1478                 tran->fca_reset = ql_reset;
1479                 tran->fca_port_manage = ql_port_manage;
1480                 tran->fca_get_device = ql_get_device;
1481 
1482                 /* give it to the FC transport */
1483                 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1484                         cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1485                             instance);
1486                         goto attach_failed;
1487                 }
1488                 progress |= QL_FCA_ATTACH_DONE;
1489 
1490                 /* Stash the structure so it can be freed at detach */
1491                 ha->tran = tran;
1492 
1493                 /* Acquire global state lock. */
1494                 GLOBAL_STATE_LOCK();
1495 
1496                 /* Add adapter structure to link list. */
1497                 ql_add_link_b(&ql_hba, &ha->hba);
1498 
1499                 /* Start one second driver timer. */
1500                 if (ql_timer_timeout_id == NULL) {
1501                         ql_timer_ticks = drv_usectohz(1000000);
1502                         ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1503                             ql_timer_ticks);
1504                 }
1505 
1506                 /* Release global state lock. */
1507                 GLOBAL_STATE_UNLOCK();
1508 
1509                 /* Determine and populate HBA fru info */
1510                 ql_setup_fruinfo(ha);
1511 
1512                 /* Setup task_daemon thread. */
1513                 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1514                     0, &p0, TS_RUN, minclsyspri);
1515 
1516                 progress |= QL_TASK_DAEMON_STARTED;
1517 
1518                 ddi_report_dev(dip);
1519 
1520                 /* Disable link reset in panic path */
1521                 ha->lip_on_panic = 1;
1522 
1523                 rval = DDI_SUCCESS;
1524                 break;
1525 
1526 attach_failed:
1527                 if (progress & QL_FCA_ATTACH_DONE) {
1528                         (void) fc_fca_detach(dip);
1529                         progress &= ~QL_FCA_ATTACH_DONE;
1530                 }
1531 
1532                 if (progress & QL_FCA_TRAN_ALLOCED) {
1533                         kmem_free(tran, sizeof (fc_fca_tran_t));
1534                         progress &= ~QL_FCA_TRAN_ALLOCED;
1535                 }
1536 
1537                 if (progress & QL_MINOR_NODE_CREATED) {
1538                         ddi_remove_minor_node(dip, "devctl");
1539                         progress &= ~QL_MINOR_NODE_CREATED;
1540                 }
1541 
1542                 if (progress & QL_KSTAT_CREATED) {
1543                         kstat_delete(ha->k_stats);
1544                         progress &= ~QL_KSTAT_CREATED;
1545                 }
1546 
1547                 if (progress & QL_N_PORT_INFO_CREATED) {
1548                         kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1549                         progress &= ~QL_N_PORT_INFO_CREATED;
1550                 }
1551 
1552                 if (progress & QL_TASK_DAEMON_STARTED) {
1553                         TASK_DAEMON_LOCK(ha);
1554 
1555                         ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1556 
1557                         cv_signal(&ha->cv_task_daemon);
1558 
1559                         /* Release task daemon lock. */
1560                         TASK_DAEMON_UNLOCK(ha);
1561 
1562                         /* Wait for for task daemon to stop running. */
1563                         while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1564                                 ql_delay(ha, 10000);
1565                         }
1566                         progress &= ~QL_TASK_DAEMON_STARTED;
1567                 }
1568 
1569                 if (progress & QL_DB_IOBASE_MAPPED) {
1570                         ql_8021_clr_drv_active(ha);
1571                         ddi_regs_map_free(&ha->db_dev_handle);
1572                         progress &= ~QL_DB_IOBASE_MAPPED;
1573                 }
1574                 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1575                         ddi_regs_map_free(&ha->iomap_dev_handle);
1576                         progress &= ~QL_IOMAP_IOBASE_MAPPED;
1577                 }
1578 
1579                 if (progress & QL_CONFIG_SPACE_SETUP) {
1580                         if (CFG_IST(ha, CFG_SBUS_CARD)) {
1581                                 ddi_regs_map_free(&ha->sbus_config_handle);
1582                         } else {
1583                                 pci_config_teardown(&ha->pci_handle);
1584                         }
1585                         progress &= ~QL_CONFIG_SPACE_SETUP;
1586                 }
1587 
1588                 if (progress & QL_INTR_ADDED) {
1589                         ql_disable_intr(ha);
1590                         ql_release_intr(ha);
1591                         progress &= ~QL_INTR_ADDED;
1592                 }
1593 
1594                 if (progress & QL_MUTEX_CV_INITED) {
1595                         ql_destroy_mutex(ha);
1596                         progress &= ~QL_MUTEX_CV_INITED;
1597                 }
1598 
1599                 if (progress & QL_HBA_BUFFER_SETUP) {
1600                         ql_free_phys(ha, &ha->hba_buf);
1601                         progress &= ~QL_HBA_BUFFER_SETUP;
1602                 }
1603 
1604                 if (progress & QL_REGS_MAPPED) {
1605                         ddi_regs_map_free(&ha->dev_handle);
1606                         if (ha->sbus_fpga_iobase != NULL) {
1607                                 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1608                         }
1609                         progress &= ~QL_REGS_MAPPED;
1610                 }
1611 
1612                 if (progress & QL_SOFT_STATE_ALLOCED) {
1613 
1614                         ql_fcache_rel(ha->fcache);
1615 
1616                         kmem_free(ha->adapter_stats,
1617                             sizeof (*ha->adapter_stats));
1618 
1619                         kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1620                             QL_UB_LIMIT);
1621 
1622                         kmem_free(ha->outstanding_cmds,
1623                             sizeof (*ha->outstanding_cmds) *
1624                             MAX_OUTSTANDING_COMMANDS);
1625 
1626                         if (ha->devpath != NULL) {
1627                                 kmem_free(ha->devpath,
1628                                     strlen(ha->devpath) + 1);
1629                         }
1630 
1631                         kmem_free(ha->dev, sizeof (*ha->dev) *
1632                             DEVICE_HEAD_LIST_SIZE);
1633 
1634                         if (ha->xioctl != NULL) {
1635                                 ql_free_xioctl_resource(ha);
1636                         }
1637 
1638                         if (ha->fw_module != NULL) {
1639                                 (void) ddi_modclose(ha->fw_module);
1640                         }
1641                         (void) ql_el_trace_desc_dtor(ha);
1642                         (void) ql_nvram_cache_desc_dtor(ha);
1643 
1644                         ddi_soft_state_free(ql_state, instance);
1645                         progress &= ~QL_SOFT_STATE_ALLOCED;
1646                 }
1647 
1648                 ddi_prop_remove_all(dip);
1649                 rval = DDI_FAILURE;
1650                 break;
1651 
1652         case DDI_RESUME:
1653                 rval = DDI_FAILURE;
1654 
1655                 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1656                 if (ha == NULL) {
1657                         cmn_err(CE_WARN, "%s(%d): can't get soft state",
1658                             QL_NAME, instance);
1659                         break;
1660                 }
1661 
1662                 ha->power_level = PM_LEVEL_D3;
1663                 if (ha->pm_capable) {
1664                         /*
1665                          * Get ql_power to do power on initialization
1666                          */
1667                         if (pm_raise_power(dip, QL_POWER_COMPONENT,
1668                             PM_LEVEL_D0) != DDI_SUCCESS) {
1669                                 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1670                                     " power", QL_NAME, instance);
1671                         }
1672                 }
1673 
1674                 /*
1675                  * There is a bug in DR that prevents PM framework
1676                  * from calling ql_power.
1677                  */
1678                 if (ha->power_level == PM_LEVEL_D3) {
1679                         ha->power_level = PM_LEVEL_D0;
1680 
1681                         if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1682                                 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1683                                     " adapter", QL_NAME, instance);
1684                         }
1685 
1686                         /* Wake up task_daemon. */
1687                         ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1688                             0);
1689                 }
1690 
1691                 /* Acquire global state lock. */
1692                 GLOBAL_STATE_LOCK();
1693 
1694                 /* Restart driver timer. */
1695                 if (ql_timer_timeout_id == NULL) {
1696                         ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1697                             ql_timer_ticks);
1698                 }
1699 
1700                 /* Release global state lock. */
1701                 GLOBAL_STATE_UNLOCK();
1702 
1703                 /* Wake up command start routine. */
1704                 ADAPTER_STATE_LOCK(ha);
1705                 ha->flags &= ~ADAPTER_SUSPENDED;
1706                 ADAPTER_STATE_UNLOCK(ha);
1707 
1708                 /*
1709                  * Transport doesn't make FC discovery in polled
1710                  * mode; So we need the daemon thread's services
1711                  * right here.
1712                  */
1713                 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1714 
1715                 rval = DDI_SUCCESS;
1716 
1717                 /* Restart IP if it was running. */
1718                 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1719                         (void) ql_initialize_ip(ha);
1720                         ql_isp_rcvbuf(ha);
1721                 }
1722                 break;
1723 
1724         default:
1725                 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1726                     " %x", QL_NAME, ddi_get_instance(dip), cmd);
1727                 rval = DDI_FAILURE;
1728                 break;
1729         }
1730 
1731         kmem_free(buf, MAXPATHLEN);
1732 
1733         if (rval != DDI_SUCCESS) {
1734                 /*EMPTY*/
1735                 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1736                     ddi_get_instance(dip), rval);
1737         } else {
1738                 /*EMPTY*/
1739                 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1740         }
1741 
1742         return (rval);
1743 }
1744 
1745 /*
1746  * ql_detach
1747  *      Used to remove all the states associated with a given
1748  *      instances of a device node prior to the removal of that
1749  *      instance from the system.
1750  *
1751  * Input:
1752  *      dip = pointer to device information structure.
1753  *      cmd = type of detach.
1754  *
1755  * Returns:
1756  *      DDI_SUCCESS or DDI_FAILURE.
1757  *
1758  * Context:
1759  *      Kernel context.
1760  */
1761 static int
1762 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1763 {
1764         ql_adapter_state_t      *ha, *vha;
1765         ql_tgt_t                *tq;
1766         int                     delay_cnt;
1767         uint16_t                index;
1768         ql_link_t               *link;
1769         char                    *buf;
1770         timeout_id_t            timer_id = NULL;
1771         int                     suspend, rval = DDI_SUCCESS;
1772 
1773         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1774         if (ha == NULL) {
1775                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1776                     ddi_get_instance(dip));
1777                 return (DDI_FAILURE);
1778         }
1779 
1780         QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1781 
1782         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1783 
1784         switch (cmd) {
1785         case DDI_DETACH:
1786                 ADAPTER_STATE_LOCK(ha);
1787                 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1788                 ADAPTER_STATE_UNLOCK(ha);
1789 
1790                 TASK_DAEMON_LOCK(ha);
1791 
1792                 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1793                         ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1794                         cv_signal(&ha->cv_task_daemon);
1795 
1796                         TASK_DAEMON_UNLOCK(ha);
1797 
1798                         (void) ql_wait_for_td_stop(ha);
1799 
1800                         TASK_DAEMON_LOCK(ha);
1801                         if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1802                                 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1803                                 EL(ha, "failed, could not stop task daemon\n");
1804                         }
1805                 }
1806                 TASK_DAEMON_UNLOCK(ha);
1807 
1808                 GLOBAL_STATE_LOCK();
1809 
1810                 /* Disable driver timer if no adapters. */
1811                 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1812                     ql_hba.last == &ha->hba) {
1813                         timer_id = ql_timer_timeout_id;
1814                         ql_timer_timeout_id = NULL;
1815                 }
1816                 ql_remove_link(&ql_hba, &ha->hba);
1817 
1818                 GLOBAL_STATE_UNLOCK();
1819 
1820                 if (timer_id) {
1821                         (void) untimeout(timer_id);
1822                 }
1823 
1824                 if (ha->pm_capable) {
1825                         if (pm_lower_power(dip, QL_POWER_COMPONENT,
1826                             PM_LEVEL_D3) != DDI_SUCCESS) {
1827                                 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1828                                     " power", QL_NAME, ha->instance);
1829                         }
1830                 }
1831 
1832                 /*
1833                  * If pm_lower_power shutdown the adapter, there
1834                  * isn't much else to do
1835                  */
1836                 if (ha->power_level != PM_LEVEL_D3) {
1837                         ql_halt(ha, PM_LEVEL_D3);
1838                 }
1839 
1840                 /* Remove virtual ports. */
1841                 while ((vha = ha->vp_next) != NULL) {
1842                         ql_vport_destroy(vha);
1843                 }
1844 
1845                 /* Free target queues. */
1846                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1847                         link = ha->dev[index].first;
1848                         while (link != NULL) {
1849                                 tq = link->base_address;
1850                                 link = link->next;
1851                                 ql_dev_free(ha, tq);
1852                         }
1853                 }
1854 
1855                 /*
1856                  * Free unsolicited buffers.
1857                  * If we are here then there are no ULPs still
1858                  * alive that wish to talk to ql so free up
1859                  * any SRB_IP_UB_UNUSED buffers that are
1860                  * lingering around
1861                  */
1862                 QL_UB_LOCK(ha);
1863                 for (index = 0; index < QL_UB_LIMIT; index++) {
1864                         fc_unsol_buf_t *ubp = ha->ub_array[index];
1865 
1866                         if (ubp != NULL) {
1867                                 ql_srb_t *sp = ubp->ub_fca_private;
1868 
1869                                 sp->flags |= SRB_UB_FREE_REQUESTED;
1870 
1871                                 while (!(sp->flags & SRB_UB_IN_FCA) ||
1872                                     (sp->flags & (SRB_UB_CALLBACK |
1873                                     SRB_UB_ACQUIRED))) {
1874                                         QL_UB_UNLOCK(ha);
1875                                         delay(drv_usectohz(100000));
1876                                         QL_UB_LOCK(ha);
1877                                 }
1878                                 ha->ub_array[index] = NULL;
1879 
1880                                 QL_UB_UNLOCK(ha);
1881                                 ql_free_unsolicited_buffer(ha, ubp);
1882                                 QL_UB_LOCK(ha);
1883                         }
1884                 }
1885                 QL_UB_UNLOCK(ha);
1886 
1887                 /* Free any saved RISC code. */
1888                 if (ha->risc_code != NULL) {
1889                         kmem_free(ha->risc_code, ha->risc_code_size);
1890                         ha->risc_code = NULL;
1891                         ha->risc_code_size = 0;
1892                 }
1893 
1894                 if (ha->fw_module != NULL) {
1895                         (void) ddi_modclose(ha->fw_module);
1896                         ha->fw_module = NULL;
1897                 }
1898 
1899                 /* Free resources. */
1900                 ddi_prop_remove_all(dip);
1901                 (void) fc_fca_detach(dip);
1902                 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1903                 ddi_remove_minor_node(dip, "devctl");
1904                 if (ha->k_stats != NULL) {
1905                         kstat_delete(ha->k_stats);
1906                 }
1907 
1908                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1909                         ddi_regs_map_free(&ha->sbus_config_handle);
1910                 } else {
1911                         if (CFG_IST(ha, CFG_CTRL_8021)) {
1912                                 ql_8021_clr_drv_active(ha);
1913                                 ddi_regs_map_free(&ha->db_dev_handle);
1914                         }
1915                         if (ha->iomap_dev_handle != ha->dev_handle) {
1916                                 ddi_regs_map_free(&ha->iomap_dev_handle);
1917                         }
1918                         pci_config_teardown(&ha->pci_handle);
1919                 }
1920 
1921                 ql_disable_intr(ha);
1922                 ql_release_intr(ha);
1923 
1924                 ql_free_xioctl_resource(ha);
1925 
1926                 ql_destroy_mutex(ha);
1927 
1928                 ql_free_phys(ha, &ha->hba_buf);
1929                 ql_free_phys(ha, &ha->fwexttracebuf);
1930                 ql_free_phys(ha, &ha->fwfcetracebuf);
1931 
1932                 ddi_regs_map_free(&ha->dev_handle);
1933                 if (ha->sbus_fpga_iobase != NULL) {
1934                         ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1935                 }
1936 
1937                 ql_fcache_rel(ha->fcache);
1938                 if (ha->vcache != NULL) {
1939                         kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1940                 }
1941 
1942                 if (ha->pi_attrs != NULL) {
1943                         kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1944                 }
1945 
1946                 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1947 
1948                 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1949 
1950                 kmem_free(ha->outstanding_cmds,
1951                     sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1952 
1953                 if (ha->n_port != NULL) {
1954                         kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1955                 }
1956 
1957                 if (ha->devpath != NULL) {
1958                         kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1959                 }
1960 
1961                 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1962 
1963                 EL(ha, "detached\n");
1964 
1965                 ddi_soft_state_free(ql_state, (int)ha->instance);
1966 
1967                 break;
1968 
1969         case DDI_SUSPEND:
1970                 ADAPTER_STATE_LOCK(ha);
1971 
1972                 delay_cnt = 0;
1973                 ha->flags |= ADAPTER_SUSPENDED;
1974                 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1975                         ADAPTER_STATE_UNLOCK(ha);
1976                         delay(drv_usectohz(1000000));
1977                         ADAPTER_STATE_LOCK(ha);
1978                 }
1979                 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1980                         ha->flags &= ~ADAPTER_SUSPENDED;
1981                         ADAPTER_STATE_UNLOCK(ha);
1982                         rval = DDI_FAILURE;
1983                         cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1984                             " busy %xh flags %xh", QL_NAME, ha->instance,
1985                             ha->busy, ha->flags);
1986                         break;
1987                 }
1988 
1989                 ADAPTER_STATE_UNLOCK(ha);
1990 
1991                 if (ha->flags & IP_INITIALIZED) {
1992                         (void) ql_shutdown_ip(ha);
1993                 }
1994 
1995                 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1996                         ADAPTER_STATE_LOCK(ha);
1997                         ha->flags &= ~ADAPTER_SUSPENDED;
1998                         ADAPTER_STATE_UNLOCK(ha);
1999                         cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2000                             QL_NAME, ha->instance, suspend);
2001 
2002                         /* Restart IP if it was running. */
2003                         if (ha->flags & IP_ENABLED &&
2004                             !(ha->flags & IP_INITIALIZED)) {
2005                                 (void) ql_initialize_ip(ha);
2006                                 ql_isp_rcvbuf(ha);
2007                         }
2008                         rval = DDI_FAILURE;
2009                         break;
2010                 }
2011 
2012                 /* Acquire global state lock. */
2013                 GLOBAL_STATE_LOCK();
2014 
2015                 /* Disable driver timer if last adapter. */
2016                 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2017                     ql_hba.last == &ha->hba) {
2018                         timer_id = ql_timer_timeout_id;
2019                         ql_timer_timeout_id = NULL;
2020                 }
2021                 GLOBAL_STATE_UNLOCK();
2022 
2023                 if (timer_id) {
2024                         (void) untimeout(timer_id);
2025                 }
2026 
2027                 EL(ha, "suspended\n");
2028 
2029                 break;
2030 
2031         default:
2032                 rval = DDI_FAILURE;
2033                 break;
2034         }
2035 
2036         kmem_free(buf, MAXPATHLEN);
2037 
2038         if (rval != DDI_SUCCESS) {
2039                 if (ha != NULL) {
2040                         EL(ha, "failed, rval = %xh\n", rval);
2041                 } else {
2042                         /*EMPTY*/
2043                         QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2044                             ddi_get_instance(dip), rval);
2045                 }
2046         } else {
2047                 /*EMPTY*/
2048                 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2049         }
2050 
2051         return (rval);
2052 }
2053 
2054 
2055 /*
2056  * ql_power
2057  *      Power a device attached to the system.
2058  *
2059  * Input:
2060  *      dip = pointer to device information structure.
2061  *      component = device.
2062  *      level = power level.
2063  *
2064  * Returns:
2065  *      DDI_SUCCESS or DDI_FAILURE.
2066  *
2067  * Context:
2068  *      Kernel context.
2069  */
2070 /* ARGSUSED */
2071 static int
2072 ql_power(dev_info_t *dip, int component, int level)
2073 {
2074         int                     rval = DDI_FAILURE;
2075         off_t                   csr;
2076         uint8_t                 saved_pm_val;
2077         ql_adapter_state_t      *ha;
2078         char                    *buf;
2079         char                    *path;
2080 
2081         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2082         if (ha == NULL || ha->pm_capable == 0) {
2083                 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2084                     ddi_get_instance(dip));
2085                 return (rval);
2086         }
2087 
2088         QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2089 
2090         buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2091         path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2092 
2093         if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2094             level != PM_LEVEL_D3)) {
2095                 EL(ha, "invalid, component=%xh or level=%xh\n",
2096                     component, level);
2097                 return (rval);
2098         }
2099 
2100         GLOBAL_HW_LOCK();
2101         csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2102         GLOBAL_HW_UNLOCK();
2103 
2104         (void) snprintf(buf, sizeof (buf),
2105             "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2106             ddi_pathname(dip, path));
2107 
2108         switch (level) {
2109         case PM_LEVEL_D0:       /* power up to D0 state - fully on */
2110 
2111                 QL_PM_LOCK(ha);
2112                 if (ha->power_level == PM_LEVEL_D0) {
2113                         QL_PM_UNLOCK(ha);
2114                         rval = DDI_SUCCESS;
2115                         break;
2116                 }
2117 
2118                 /*
2119                  * Enable interrupts now
2120                  */
2121                 saved_pm_val = ha->power_level;
2122                 ha->power_level = PM_LEVEL_D0;
2123                 QL_PM_UNLOCK(ha);
2124 
2125                 GLOBAL_HW_LOCK();
2126 
2127                 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2128 
2129                 /*
2130                  * Delay after reset, for chip to recover.
2131                  * Otherwise causes system PANIC
2132                  */
2133                 drv_usecwait(200000);
2134 
2135                 GLOBAL_HW_UNLOCK();
2136 
2137                 if (ha->config_saved) {
2138                         ha->config_saved = 0;
2139                         if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2140                                 QL_PM_LOCK(ha);
2141                                 ha->power_level = saved_pm_val;
2142                                 QL_PM_UNLOCK(ha);
2143                                 cmn_err(CE_WARN, "%s failed to restore "
2144                                     "config regs", buf);
2145                                 break;
2146                         }
2147                 }
2148 
2149                 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2150                         cmn_err(CE_WARN, "%s adapter initialization failed",
2151                             buf);
2152                 }
2153 
2154                 /* Wake up task_daemon. */
2155                 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2156                     TASK_DAEMON_SLEEPING_FLG, 0);
2157 
2158                 /* Restart IP if it was running. */
2159                 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2160                         (void) ql_initialize_ip(ha);
2161                         ql_isp_rcvbuf(ha);
2162                 }
2163 
2164                 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2165                     ha->instance, QL_NAME);
2166 
2167                 rval = DDI_SUCCESS;
2168                 break;
2169 
2170         case PM_LEVEL_D3:       /* power down to D3 state - off */
2171 
2172                 QL_PM_LOCK(ha);
2173 
2174                 if (ha->busy || ((ha->task_daemon_flags &
2175                     TASK_DAEMON_SLEEPING_FLG) == 0)) {
2176                         QL_PM_UNLOCK(ha);
2177                         break;
2178                 }
2179 
2180                 if (ha->power_level == PM_LEVEL_D3) {
2181                         rval = DDI_SUCCESS;
2182                         QL_PM_UNLOCK(ha);
2183                         break;
2184                 }
2185                 QL_PM_UNLOCK(ha);
2186 
2187                 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2188                         cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2189                             " config regs", QL_NAME, ha->instance, buf);
2190                         break;
2191                 }
2192                 ha->config_saved = 1;
2193 
2194                 /*
2195                  * Don't enable interrupts. Running mailbox commands with
2196                  * interrupts enabled could cause hangs since pm_run_scan()
2197                  * runs out of a callout thread and on single cpu systems
2198                  * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2199                  * would not get to run.
2200                  */
2201                 TASK_DAEMON_LOCK(ha);
2202                 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2203                 TASK_DAEMON_UNLOCK(ha);
2204 
2205                 ql_halt(ha, PM_LEVEL_D3);
2206 
2207                 /*
2208                  * Setup ql_intr to ignore interrupts from here on.
2209                  */
2210                 QL_PM_LOCK(ha);
2211                 ha->power_level = PM_LEVEL_D3;
2212                 QL_PM_UNLOCK(ha);
2213 
2214                 /*
2215                  * Wait for ISR to complete.
2216                  */
2217                 INTR_LOCK(ha);
2218                 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2219                 INTR_UNLOCK(ha);
2220 
2221                 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2222                     ha->instance, QL_NAME);
2223 
2224                 rval = DDI_SUCCESS;
2225                 break;
2226         }
2227 
2228         kmem_free(buf, MAXPATHLEN);
2229         kmem_free(path, MAXPATHLEN);
2230 
2231         QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2232 
2233         return (rval);
2234 }
2235 
2236 /*
2237  * ql_quiesce
2238  *      quiesce a device attached to the system.
2239  *
2240  * Input:
2241  *      dip = pointer to device information structure.
2242  *
2243  * Returns:
2244  *      DDI_SUCCESS
2245  *
2246  * Context:
2247  *      Kernel context.
2248  */
2249 static int
2250 ql_quiesce(dev_info_t *dip)
2251 {
2252         ql_adapter_state_t      *ha;
2253         uint32_t                timer;
2254         uint32_t                stat;
2255 
2256         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2257         if (ha == NULL) {
2258                 /* Oh well.... */
2259                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2260                     ddi_get_instance(dip));
2261                 return (DDI_SUCCESS);
2262         }
2263 
2264         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2265 
2266         if (CFG_IST(ha, CFG_CTRL_8021)) {
2267                 (void) ql_stop_firmware(ha);
2268         } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2269                 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2270                 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2271                 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2272                 for (timer = 0; timer < 30000; timer++) {
2273                         stat = RD32_IO_REG(ha, risc2host);
2274                         if (stat & BIT_15) {
2275                                 if ((stat & 0xff) < 0x12) {
2276                                         WRT32_IO_REG(ha, hccr,
2277                                             HC24_CLR_RISC_INT);
2278                                         break;
2279                                 }
2280                                 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2281                         }
2282                         drv_usecwait(100);
2283                 }
2284                 /* Reset the chip. */
2285                 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2286                     MWB_4096_BYTES);
2287                 drv_usecwait(100);
2288 
2289         } else {
2290                 /* Disable ISP interrupts. */
2291                 WRT16_IO_REG(ha, ictrl, 0);
2292                 /* Select RISC module registers. */
2293                 WRT16_IO_REG(ha, ctrl_status, 0);
2294                 /* Reset ISP semaphore. */
2295                 WRT16_IO_REG(ha, semaphore, 0);
2296                 /* Reset RISC module. */
2297                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2298                 /* Release RISC module. */
2299                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2300         }
2301 
2302         ql_disable_intr(ha);
2303 
2304         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2305 
2306         return (DDI_SUCCESS);
2307 }
2308 
2309 /* ************************************************************************ */
2310 /*              Fibre Channel Adapter (FCA) Transport Functions.            */
2311 /* ************************************************************************ */
2312 
2313 /*
2314  * ql_bind_port
2315  *      Handling port binding. The FC Transport attempts to bind an FCA port
2316  *      when it is ready to start transactions on the port. The FC Transport
2317  *      will call the fca_bind_port() function specified in the fca_transport
2318  *      structure it receives. The FCA must fill in the port_info structure
2319  *      passed in the call and also stash the information for future calls.
2320  *
2321  * Input:
2322  *      dip = pointer to FCA information structure.
2323  *      port_info = pointer to port information structure.
2324  *      bind_info = pointer to bind information structure.
2325  *
2326  * Returns:
2327  *      NULL = failure
2328  *
2329  * Context:
2330  *      Kernel context.
2331  */
2332 static opaque_t
2333 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2334     fc_fca_bind_info_t *bind_info)
2335 {
2336         ql_adapter_state_t      *ha, *vha;
2337         opaque_t                fca_handle = NULL;
2338         port_id_t               d_id;
2339         int                     port_npiv = bind_info->port_npiv;
2340         uchar_t                 *port_nwwn = bind_info->port_nwwn.raw_wwn;
2341         uchar_t                 *port_pwwn = bind_info->port_pwwn.raw_wwn;
2342 
2343         /* get state info based on the dip */
2344         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2345         if (ha == NULL) {
2346                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2347                     ddi_get_instance(dip));
2348                 return (NULL);
2349         }
2350         QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2351 
2352         /* Verify port number is supported. */
2353         if (port_npiv != 0) {
2354                 if (!(ha->flags & VP_ENABLED)) {
2355                         QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2356                             ha->instance);
2357                         port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2358                         return (NULL);
2359                 }
2360                 if (!(ha->flags & POINT_TO_POINT)) {
2361                         QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2362                             ha->instance);
2363                         port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2364                         return (NULL);
2365                 }
2366                 if (!(ha->flags & FDISC_ENABLED)) {
2367                         QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2368                             "FDISC\n", ha->instance);
2369                         port_info->pi_error = FC_NPIV_FDISC_FAILED;
2370                         return (NULL);
2371                 }
2372                 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2373                     MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2374                         QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2375                             "FC_OUTOFBOUNDS\n", ha->instance);
2376                         port_info->pi_error = FC_OUTOFBOUNDS;
2377                         return (NULL);
2378                 }
2379         } else if (bind_info->port_num != 0) {
2380                 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2381                     "supported\n", ha->instance, bind_info->port_num);
2382                 port_info->pi_error = FC_OUTOFBOUNDS;
2383                 return (NULL);
2384         }
2385 
2386         /* Locate port context. */
2387         for (vha = ha; vha != NULL; vha = vha->vp_next) {
2388                 if (vha->vp_index == bind_info->port_num) {
2389                         break;
2390                 }
2391         }
2392 
2393         /* If virtual port does not exist. */
2394         if (vha == NULL) {
2395                 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2396         }
2397 
2398         /* make sure this port isn't already bound */
2399         if (vha->flags & FCA_BOUND) {
2400                 port_info->pi_error = FC_ALREADY;
2401         } else {
2402                 if (vha->vp_index != 0) {
2403                         bcopy(port_nwwn,
2404                             vha->loginparams.node_ww_name.raw_wwn, 8);
2405                         bcopy(port_pwwn,
2406                             vha->loginparams.nport_ww_name.raw_wwn, 8);
2407                 }
2408                 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2409                         if (ql_vport_enable(vha) != QL_SUCCESS) {
2410                                 QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2411                                     "virtual port=%d\n", ha->instance,
2412                                     vha->vp_index);
2413                                 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2414                                 return (NULL);
2415                         }
2416                         cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2417                             "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2418                             "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2419                             QL_NAME, ha->instance, vha->vp_index,
2420                             port_pwwn[0], port_pwwn[1], port_pwwn[2],
2421                             port_pwwn[3], port_pwwn[4], port_pwwn[5],
2422                             port_pwwn[6], port_pwwn[7],
2423                             port_nwwn[0], port_nwwn[1], port_nwwn[2],
2424                             port_nwwn[3], port_nwwn[4], port_nwwn[5],
2425                             port_nwwn[6], port_nwwn[7]);
2426                 }
2427 
2428                 /* stash the bind_info supplied by the FC Transport */
2429                 vha->bind_info.port_handle = bind_info->port_handle;
2430                 vha->bind_info.port_statec_cb =
2431                     bind_info->port_statec_cb;
2432                 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2433 
2434                 /* Set port's source ID. */
2435                 port_info->pi_s_id.port_id = vha->d_id.b24;
2436 
2437                 /* copy out the default login parameters */
2438                 bcopy((void *)&vha->loginparams,
2439                     (void *)&port_info->pi_login_params,
2440                     sizeof (la_els_logi_t));
2441 
2442                 /* Set port's hard address if enabled. */
2443                 port_info->pi_hard_addr.hard_addr = 0;
2444                 if (bind_info->port_num == 0) {
2445                         d_id.b24 = ha->d_id.b24;
2446                         if (CFG_IST(ha, CFG_CTRL_24258081)) {
2447                                 if (ha->init_ctrl_blk.cb24.
2448                                     firmware_options_1[0] & BIT_0) {
2449                                         d_id.b.al_pa = ql_index_to_alpa[ha->
2450                                             init_ctrl_blk.cb24.
2451                                             hard_address[0]];
2452                                         port_info->pi_hard_addr.hard_addr =
2453                                             d_id.b24;
2454                                 }
2455                         } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2456                             BIT_0) {
2457                                 d_id.b.al_pa = ql_index_to_alpa[ha->
2458                                     init_ctrl_blk.cb.hard_address[0]];
2459                                 port_info->pi_hard_addr.hard_addr = d_id.b24;
2460                         }
2461 
2462                         /* Set the node id data */
2463                         if (ql_get_rnid_params(ha,
2464                             sizeof (port_info->pi_rnid_params.params),
2465                             (caddr_t)&port_info->pi_rnid_params.params) ==
2466                             QL_SUCCESS) {
2467                                 port_info->pi_rnid_params.status = FC_SUCCESS;
2468                         } else {
2469                                 port_info->pi_rnid_params.status = FC_FAILURE;
2470                         }
2471 
2472                         /* Populate T11 FC-HBA details */
2473                         ql_populate_hba_fru_details(ha, port_info);
2474                         ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2475                             KM_SLEEP);
2476                         if (ha->pi_attrs != NULL) {
2477                                 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2478                                     sizeof (fca_port_attrs_t));
2479                         }
2480                 } else {
2481                         port_info->pi_rnid_params.status = FC_FAILURE;
2482                         if (ha->pi_attrs != NULL) {
2483                                 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2484                                     sizeof (fca_port_attrs_t));
2485                         }
2486                 }
2487 
2488                 /* Generate handle for this FCA. */
2489                 fca_handle = (opaque_t)vha;
2490 
2491                 ADAPTER_STATE_LOCK(ha);
2492                 vha->flags |= FCA_BOUND;
2493                 ADAPTER_STATE_UNLOCK(ha);
2494                 /* Set port's current state. */
2495                 port_info->pi_port_state = vha->state;
2496         }
2497 
2498         QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2499             "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2500             port_info->pi_port_state, port_info->pi_s_id.port_id);
2501 
2502         return (fca_handle);
2503 }
2504 
2505 /*
2506  * ql_unbind_port
2507  *      To unbind a Fibre Channel Adapter from an FC Port driver.
2508  *
2509  * Input:
2510  *      fca_handle = handle setup by ql_bind_port().
2511  *
2512  * Context:
2513  *      Kernel context.
2514  */
2515 static void
2516 ql_unbind_port(opaque_t fca_handle)
2517 {
2518         ql_adapter_state_t      *ha;
2519         ql_tgt_t                *tq;
2520         uint32_t                flgs;
2521 
2522         ha = ql_fca_handle_to_state(fca_handle);
2523         if (ha == NULL) {
2524                 /*EMPTY*/
2525                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2526                     (void *)fca_handle);
2527         } else {
2528                 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2529                     ha->vp_index);
2530 
2531                 if (!(ha->flags & FCA_BOUND)) {
2532                         /*EMPTY*/
2533                         QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2534                             ha->instance, ha->vp_index);
2535                 } else {
2536                         if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2537                                 if ((tq = ql_loop_id_to_queue(ha,
2538                                     FL_PORT_24XX_HDL)) != NULL) {
2539                                         (void) ql_logout_fabric_port(ha, tq);
2540                                 }
2541                                 (void) ql_vport_control(ha, (uint8_t)
2542                                     (CFG_IST(ha, CFG_CTRL_2425) ?
2543                                     VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2544                                 flgs = FCA_BOUND | VP_ENABLED;
2545                         } else {
2546                                 flgs = FCA_BOUND;
2547                         }
2548                         ADAPTER_STATE_LOCK(ha);
2549                         ha->flags &= ~flgs;
2550                         ADAPTER_STATE_UNLOCK(ha);
2551                 }
2552 
2553                 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2554                     ha->vp_index);
2555         }
2556 }
2557 
2558 /*
2559  * ql_init_pkt
2560  *      Initialize FCA portion of packet.
2561  *
2562  * Input:
2563  *      fca_handle = handle setup by ql_bind_port().
2564  *      pkt = pointer to fc_packet.
2565  *
2566  * Returns:
2567  *      FC_SUCCESS - the packet has successfully been initialized.
2568  *      FC_UNBOUND - the fca_handle specified is not bound.
2569  *      FC_NOMEM - the FCA failed initialization due to an allocation error.
2570  *      FC_FAILURE - the FCA failed initialization for undisclosed reasons
2571  *
2572  * Context:
2573  *      Kernel context.
2574  */
2575 /* ARGSUSED */
2576 static int
2577 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2578 {
2579         ql_adapter_state_t      *ha;
2580         ql_srb_t                *sp;
2581         int                     rval = FC_SUCCESS;
2582 
2583         ha = ql_fca_handle_to_state(fca_handle);
2584         if (ha == NULL) {
2585                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2586                     (void *)fca_handle);
2587                 return (FC_UNBOUND);
2588         }
2589         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2590 
2591         sp = (ql_srb_t *)pkt->pkt_fca_private;
2592         sp->flags = 0;
2593 
2594         /* init cmd links */
2595         sp->cmd.base_address = sp;
2596         sp->cmd.prev = NULL;
2597         sp->cmd.next = NULL;
2598         sp->cmd.head = NULL;
2599 
2600         /* init watchdog links */
2601         sp->wdg.base_address = sp;
2602         sp->wdg.prev = NULL;
2603         sp->wdg.next = NULL;
2604         sp->wdg.head = NULL;
2605         sp->pkt = pkt;
2606         sp->ha = ha;
2607         sp->magic_number = QL_FCA_BRAND;
2608         sp->sg_dma.dma_handle = NULL;
2609 #ifndef __sparc
2610         if (CFG_IST(ha, CFG_CTRL_8021)) {
2611                 /* Setup DMA for scatter gather list. */
2612                 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2613                 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2614                 sp->sg_dma.cookie_count = 1;
2615                 sp->sg_dma.alignment = 64;
2616                 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2617                         rval = FC_NOMEM;
2618                 }
2619         }
2620 #endif  /* __sparc */
2621 
2622         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2623 
2624         return (rval);
2625 }
2626 
2627 /*
2628  * ql_un_init_pkt
2629  *      Release all local resources bound to packet.
2630  *
2631  * Input:
2632  *      fca_handle = handle setup by ql_bind_port().
2633  *      pkt = pointer to fc_packet.
2634  *
2635  * Returns:
2636  *      FC_SUCCESS - the packet has successfully been invalidated.
2637  *      FC_UNBOUND - the fca_handle specified is not bound.
2638  *      FC_BADPACKET - the packet has not been initialized or has
2639  *                      already been freed by this FCA.
2640  *
2641  * Context:
2642  *      Kernel context.
2643  */
2644 static int
2645 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2646 {
2647         ql_adapter_state_t *ha;
2648         int rval;
2649         ql_srb_t *sp;
2650 
2651         ha = ql_fca_handle_to_state(fca_handle);
2652         if (ha == NULL) {
2653                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2654                     (void *)fca_handle);
2655                 return (FC_UNBOUND);
2656         }
2657         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2658 
2659         sp = (ql_srb_t *)pkt->pkt_fca_private;
2660 
2661         if (sp->magic_number != QL_FCA_BRAND) {
2662                 EL(ha, "failed, FC_BADPACKET\n");
2663                 rval = FC_BADPACKET;
2664         } else {
2665                 sp->magic_number = NULL;
2666                 ql_free_phys(ha, &sp->sg_dma);
2667                 rval = FC_SUCCESS;
2668         }
2669 
2670         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2671 
2672         return (rval);
2673 }
2674 
2675 /*
2676  * ql_els_send
2677  *      Issue a extended link service request.
2678  *
2679  * Input:
2680  *      fca_handle = handle setup by ql_bind_port().
2681  *      pkt = pointer to fc_packet.
2682  *
2683  * Returns:
2684  *      FC_SUCCESS - the command was successful.
2685  *      FC_ELS_FREJECT - the command was rejected by a Fabric.
2686  *      FC_ELS_PREJECT - the command was rejected by an N-port.
2687  *      FC_TRANSPORT_ERROR - a transport error occurred.
2688  *      FC_UNBOUND - the fca_handle specified is not bound.
2689  *      FC_ELS_BAD - the FCA can not issue the requested ELS.
2690  *
2691  * Context:
2692  *      Kernel context.
2693  */
2694 static int
2695 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2696 {
2697         ql_adapter_state_t      *ha;
2698         int                     rval;
2699         clock_t                 timer = drv_usectohz(30000000);
2700         ls_code_t               els;
2701         la_els_rjt_t            rjt;
2702         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
2703 
2704         /* Verify proper command. */
2705         ha = ql_cmd_setup(fca_handle, pkt, &rval);
2706         if (ha == NULL) {
2707                 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2708                     rval, fca_handle);
2709                 return (FC_INVALID_REQUEST);
2710         }
2711         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2712 
2713         /* Wait for suspension to end. */
2714         TASK_DAEMON_LOCK(ha);
2715         while (ha->task_daemon_flags & QL_SUSPENDED) {
2716                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2717 
2718                 /* 30 seconds from now */
2719                 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2720                     &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2721                         /*
2722                          * The timeout time 'timer' was
2723                          * reached without the condition
2724                          * being signaled.
2725                          */
2726                         pkt->pkt_state = FC_PKT_TRAN_BSY;
2727                         pkt->pkt_reason = FC_REASON_XCHG_BSY;
2728 
2729                         /* Release task daemon lock. */
2730                         TASK_DAEMON_UNLOCK(ha);
2731 
2732                         EL(ha, "QL_SUSPENDED failed=%xh\n",
2733                             QL_FUNCTION_TIMEOUT);
2734                         return (FC_TRAN_BUSY);
2735                 }
2736         }
2737         /* Release task daemon lock. */
2738         TASK_DAEMON_UNLOCK(ha);
2739 
2740         /* Setup response header. */
2741         bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2742             sizeof (fc_frame_hdr_t));
2743 
2744         if (pkt->pkt_rsplen) {
2745                 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2746         }
2747 
2748         pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2749         pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2750         pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2751             R_CTL_SOLICITED_CONTROL;
2752         pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2753             F_CTL_END_SEQ;
2754 
2755         sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2756             SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2757             SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2758 
2759         sp->flags |= SRB_ELS_PKT;
2760 
2761         /* map the type of ELS to a function */
2762         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2763             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2764 
2765 #if 0
2766         QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2767         QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2768             sizeof (fc_frame_hdr_t) / 4);
2769         QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2770         QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2771 #endif
2772 
2773         sp->iocb = ha->els_cmd;
2774         sp->req_cnt = 1;
2775 
2776         switch (els.ls_code) {
2777         case LA_ELS_RJT:
2778         case LA_ELS_ACC:
2779                 EL(ha, "LA_ELS_RJT\n");
2780                 pkt->pkt_state = FC_PKT_SUCCESS;
2781                 rval = FC_SUCCESS;
2782                 break;
2783         case LA_ELS_PLOGI:
2784         case LA_ELS_PDISC:
2785                 rval = ql_els_plogi(ha, pkt);
2786                 break;
2787         case LA_ELS_FLOGI:
2788         case LA_ELS_FDISC:
2789                 rval = ql_els_flogi(ha, pkt);
2790                 break;
2791         case LA_ELS_LOGO:
2792                 rval = ql_els_logo(ha, pkt);
2793                 break;
2794         case LA_ELS_PRLI:
2795                 rval = ql_els_prli(ha, pkt);
2796                 break;
2797         case LA_ELS_PRLO:
2798                 rval = ql_els_prlo(ha, pkt);
2799                 break;
2800         case LA_ELS_ADISC:
2801                 rval = ql_els_adisc(ha, pkt);
2802                 break;
2803         case LA_ELS_LINIT:
2804                 rval = ql_els_linit(ha, pkt);
2805                 break;
2806         case LA_ELS_LPC:
2807                 rval = ql_els_lpc(ha, pkt);
2808                 break;
2809         case LA_ELS_LSTS:
2810                 rval = ql_els_lsts(ha, pkt);
2811                 break;
2812         case LA_ELS_SCR:
2813                 rval = ql_els_scr(ha, pkt);
2814                 break;
2815         case LA_ELS_RSCN:
2816                 rval = ql_els_rscn(ha, pkt);
2817                 break;
2818         case LA_ELS_FARP_REQ:
2819                 rval = ql_els_farp_req(ha, pkt);
2820                 break;
2821         case LA_ELS_FARP_REPLY:
2822                 rval = ql_els_farp_reply(ha, pkt);
2823                 break;
2824         case LA_ELS_RLS:
2825                 rval = ql_els_rls(ha, pkt);
2826                 break;
2827         case LA_ELS_RNID:
2828                 rval = ql_els_rnid(ha, pkt);
2829                 break;
2830         default:
2831                 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2832                     els.ls_code);
2833                 /* Build RJT. */
2834                 bzero(&rjt, sizeof (rjt));
2835                 rjt.ls_code.ls_code = LA_ELS_RJT;
2836                 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2837 
2838                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2839                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2840 
2841                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2842                 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2843                 rval = FC_SUCCESS;
2844                 break;
2845         }
2846 
2847 #if 0
2848         QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2849         QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2850             sizeof (fc_frame_hdr_t) / 4);
2851 #endif
2852         /*
2853          * Return success if the srb was consumed by an iocb. The packet
2854          * completion callback will be invoked by the response handler.
2855          */
2856         if (rval == QL_CONSUMED) {
2857                 rval = FC_SUCCESS;
2858         } else if (rval == FC_SUCCESS &&
2859             !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2860                 /* Do command callback only if no error */
2861                 ql_awaken_task_daemon(ha, sp, 0, 0);
2862         }
2863 
2864         if (rval != FC_SUCCESS) {
2865                 EL(ha, "failed, rval = %xh\n", rval);
2866         } else {
2867                 /*EMPTY*/
2868                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2869         }
2870         return (rval);
2871 }
2872 
2873 /*
2874  * ql_get_cap
2875  *      Export FCA hardware and software capabilities.
2876  *
2877  * Input:
2878  *      fca_handle = handle setup by ql_bind_port().
2879  *      cap = pointer to the capabilities string.
2880  *      ptr = buffer pointer for return capability.
2881  *
2882  * Returns:
2883  *      FC_CAP_ERROR - no such capability
2884  *      FC_CAP_FOUND - the capability was returned and cannot be set
2885  *      FC_CAP_SETTABLE - the capability was returned and can be set
2886  *      FC_UNBOUND - the fca_handle specified is not bound.
2887  *
2888  * Context:
2889  *      Kernel context.
2890  */
2891 static int
2892 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2893 {
2894         ql_adapter_state_t      *ha;
2895         int                     rval;
2896         uint32_t                *rptr = (uint32_t *)ptr;
2897 
2898         ha = ql_fca_handle_to_state(fca_handle);
2899         if (ha == NULL) {
2900                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2901                     (void *)fca_handle);
2902                 return (FC_UNBOUND);
2903         }
2904         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2905 
2906         if (strcmp(cap, FC_NODE_WWN) == 0) {
2907                 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2908                     ptr, 8);
2909                 rval = FC_CAP_FOUND;
2910         } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2911                 bcopy((void *)&ha->loginparams, ptr,
2912                     sizeof (la_els_logi_t));
2913                 rval = FC_CAP_FOUND;
2914         } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2915                 *rptr = (uint32_t)QL_UB_LIMIT;
2916                 rval = FC_CAP_FOUND;
2917         } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2918 
2919                 dev_info_t      *psydip = NULL;
2920 #ifdef __sparc
2921                 /*
2922                  * Disable streaming for certain 2 chip adapters
2923                  * below Psycho to handle Psycho byte hole issue.
2924                  */
2925                 if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2926                     (!CFG_IST(ha, CFG_SBUS_CARD))) {
2927                         for (psydip = ddi_get_parent(ha->dip); psydip;
2928                             psydip = ddi_get_parent(psydip)) {
2929                                 if (strcmp(ddi_driver_name(psydip),
2930                                     "pcipsy") == 0) {
2931                                         break;
2932                                 }
2933                         }
2934                 }
2935 #endif  /* __sparc */
2936 
2937                 if (psydip) {
2938                         *rptr = (uint32_t)FC_NO_STREAMING;
2939                         EL(ha, "No Streaming\n");
2940                 } else {
2941                         *rptr = (uint32_t)FC_ALLOW_STREAMING;
2942                         EL(ha, "Allow Streaming\n");
2943                 }
2944                 rval = FC_CAP_FOUND;
2945         } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2946                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2947                         *rptr = (uint32_t)CHAR_TO_SHORT(
2948                             ha->init_ctrl_blk.cb24.max_frame_length[0],
2949                             ha->init_ctrl_blk.cb24.max_frame_length[1]);
2950                 } else {
2951                         *rptr = (uint32_t)CHAR_TO_SHORT(
2952                             ha->init_ctrl_blk.cb.max_frame_length[0],
2953                             ha->init_ctrl_blk.cb.max_frame_length[1]);
2954                 }
2955                 rval = FC_CAP_FOUND;
2956         } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2957                 *rptr = FC_RESET_RETURN_ALL;
2958                 rval = FC_CAP_FOUND;
2959         } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2960                 *rptr = FC_NO_DVMA_SPACE;
2961                 rval = FC_CAP_FOUND;
2962         } else {
2963                 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2964                 rval = FC_CAP_ERROR;
2965         }
2966 
2967         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2968 
2969         return (rval);
2970 }
2971 
2972 /*
2973  * ql_set_cap
2974  *      Allow the FC Transport to set FCA capabilities if possible.
2975  *
2976  * Input:
2977  *      fca_handle = handle setup by ql_bind_port().
2978  *      cap = pointer to the capabilities string.
2979  *      ptr = buffer pointer for capability.
2980  *
2981  * Returns:
2982  *      FC_CAP_ERROR - no such capability
2983  *      FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2984  *      FC_CAP_SETTABLE - the capability was successfully set.
2985  *      FC_UNBOUND - the fca_handle specified is not bound.
2986  *
2987  * Context:
2988  *      Kernel context.
2989  */
2990 /* ARGSUSED */
2991 static int
2992 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2993 {
2994         ql_adapter_state_t      *ha;
2995         int                     rval;
2996 
2997         ha = ql_fca_handle_to_state(fca_handle);
2998         if (ha == NULL) {
2999                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3000                     (void *)fca_handle);
3001                 return (FC_UNBOUND);
3002         }
3003         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3004 
3005         if (strcmp(cap, FC_NODE_WWN) == 0) {
3006                 rval = FC_CAP_FOUND;
3007         } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3008                 rval = FC_CAP_FOUND;
3009         } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3010                 rval = FC_CAP_FOUND;
3011         } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3012                 rval = FC_CAP_FOUND;
3013         } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3014                 rval = FC_CAP_FOUND;
3015         } else {
3016                 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3017                 rval = FC_CAP_ERROR;
3018         }
3019 
3020         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3021 
3022         return (rval);
3023 }
3024 
3025 /*
3026  * ql_getmap
3027  *      Request of Arbitrated Loop (AL-PA) map.
3028  *
3029  * Input:
3030  *      fca_handle = handle setup by ql_bind_port().
3031  *      mapbuf= buffer pointer for map.
3032  *
3033  * Returns:
3034  *      FC_OLDPORT - the specified port is not operating in loop mode.
3035  *      FC_OFFLINE - the specified port is not online.
3036  *      FC_NOMAP - there is no loop map available for this port.
3037  *      FC_UNBOUND - the fca_handle specified is not bound.
3038  *      FC_SUCCESS - a valid map has been placed in mapbuf.
3039  *
3040  * Context:
3041  *      Kernel context.
3042  */
3043 static int
3044 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3045 {
3046         ql_adapter_state_t      *ha;
3047         clock_t                 timer = drv_usectohz(30000000);
3048         int                     rval = FC_SUCCESS;
3049 
3050         ha = ql_fca_handle_to_state(fca_handle);
3051         if (ha == NULL) {
3052                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3053                     (void *)fca_handle);
3054                 return (FC_UNBOUND);
3055         }
3056         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3057 
3058         mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3059         mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3060 
3061         /* Wait for suspension to end. */
3062         TASK_DAEMON_LOCK(ha);
3063         while (ha->task_daemon_flags & QL_SUSPENDED) {
3064                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3065 
3066                 /* 30 seconds from now */
3067                 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3068                     &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3069                         /*
3070                          * The timeout time 'timer' was
3071                          * reached without the condition
3072                          * being signaled.
3073                          */
3074 
3075                         /* Release task daemon lock. */
3076                         TASK_DAEMON_UNLOCK(ha);
3077 
3078                         EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3079                         return (FC_TRAN_BUSY);
3080                 }
3081         }
3082         /* Release task daemon lock. */
3083         TASK_DAEMON_UNLOCK(ha);
3084 
3085         if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3086             (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3087                 /*
3088                  * Now, since transport drivers cosider this as an
3089                  * offline condition, let's wait for few seconds
3090                  * for any loop transitions before we reset the.
3091                  * chip and restart all over again.
3092                  */
3093                 ql_delay(ha, 2000000);
3094                 EL(ha, "failed, FC_NOMAP\n");
3095                 rval = FC_NOMAP;
3096         } else {
3097                 /*EMPTY*/
3098                 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3099                     "data %xh %xh %xh %xh\n", ha->instance,
3100                     mapbuf->lilp_myalpa, mapbuf->lilp_length,
3101                     mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3102                     mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3103         }
3104 
3105         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3106 #if 0
3107         QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3108 #endif
3109         return (rval);
3110 }
3111 
3112 /*
3113  * ql_transport
3114  *      Issue an I/O request. Handles all regular requests.
3115  *
3116  * Input:
3117  *      fca_handle = handle setup by ql_bind_port().
3118  *      pkt = pointer to fc_packet.
3119  *
3120  * Returns:
3121  *      FC_SUCCESS - the packet was accepted for transport.
3122  *      FC_TRANSPORT_ERROR - a transport error occurred.
3123  *      FC_BADPACKET - the packet to be transported had not been
3124  *                      initialized by this FCA.
3125  *      FC_UNBOUND - the fca_handle specified is not bound.
3126  *
3127  * Context:
3128  *      Kernel context.
3129  */
3130 static int
3131 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3132 {
3133         ql_adapter_state_t      *ha;
3134         int                     rval = FC_TRANSPORT_ERROR;
3135         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
3136 
3137         /* Verify proper command. */
3138         ha = ql_cmd_setup(fca_handle, pkt, &rval);
3139         if (ha == NULL) {
3140                 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3141                     rval, fca_handle);
3142                 return (rval);
3143         }
3144         QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3145 #if 0
3146         QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3147             sizeof (fc_frame_hdr_t) / 4);
3148         QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3149         QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3150 #endif
3151 
3152         /* Reset SRB flags. */
3153         sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3154             SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3155             SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3156             SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3157             SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3158             SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3159             SRB_MS_PKT | SRB_ELS_PKT);
3160 
3161         pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3162         pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3163         pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3164         pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3165         pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3166 
3167         switch (pkt->pkt_cmd_fhdr.r_ctl) {
3168         case R_CTL_COMMAND:
3169                 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3170                         sp->flags |= SRB_FCP_CMD_PKT;
3171                         rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3172                 }
3173                 break;
3174 
3175         default:
3176                 /* Setup response header and buffer. */
3177                 if (pkt->pkt_rsplen) {
3178                         bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3179                 }
3180 
3181                 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3182                 case R_CTL_UNSOL_DATA:
3183                         if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3184                                 sp->flags |= SRB_IP_PKT;
3185                                 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3186                         }
3187                         break;
3188 
3189                 case R_CTL_UNSOL_CONTROL:
3190                         if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3191                                 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3192                                 rval = ql_fc_services(ha, pkt);
3193                         }
3194                         break;
3195 
3196                 case R_CTL_SOLICITED_DATA:
3197                 case R_CTL_STATUS:
3198                 default:
3199                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
3200                         pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3201                         rval = FC_TRANSPORT_ERROR;
3202                         EL(ha, "unknown, r_ctl=%xh\n",
3203                             pkt->pkt_cmd_fhdr.r_ctl);
3204                         break;
3205                 }
3206         }
3207 
3208         if (rval != FC_SUCCESS) {
3209                 EL(ha, "failed, rval = %xh\n", rval);
3210         } else {
3211                 /*EMPTY*/
3212                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3213         }
3214 
3215         return (rval);
3216 }
3217 
3218 /*
3219  * ql_ub_alloc
3220  *      Allocate buffers for unsolicited exchanges.
3221  *
3222  * Input:
3223  *      fca_handle = handle setup by ql_bind_port().
3224  *      tokens = token array for each buffer.
3225  *      size = size of each buffer.
3226  *      count = pointer to number of buffers.
3227  *      type = the FC-4 type the buffers are reserved for.
3228  *              1 = Extended Link Services, 5 = LLC/SNAP
3229  *
3230  * Returns:
3231  *      FC_FAILURE - buffers could not be allocated.
3232  *      FC_TOOMANY - the FCA could not allocate the requested
3233  *                      number of buffers.
3234  *      FC_SUCCESS - unsolicited buffers were allocated.
3235  *      FC_UNBOUND - the fca_handle specified is not bound.
3236  *
3237  * Context:
3238  *      Kernel context.
3239  */
3240 static int
3241 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3242     uint32_t *count, uint32_t type)
3243 {
3244         ql_adapter_state_t      *ha;
3245         caddr_t                 bufp = NULL;
3246         fc_unsol_buf_t          *ubp;
3247         ql_srb_t                *sp;
3248         uint32_t                index;
3249         uint32_t                cnt;
3250         uint32_t                ub_array_index = 0;
3251         int                     rval = FC_SUCCESS;
3252         int                     ub_updated = FALSE;
3253 
3254         /* Check handle. */
3255         ha = ql_fca_handle_to_state(fca_handle);
3256         if (ha == NULL) {
3257                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3258                     (void *)fca_handle);
3259                 return (FC_UNBOUND);
3260         }
3261         QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3262             ha->instance, ha->vp_index, *count);
3263 
3264         QL_PM_LOCK(ha);
3265         if (ha->power_level != PM_LEVEL_D0) {
3266                 QL_PM_UNLOCK(ha);
3267                 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3268                     ha->vp_index);
3269                 return (FC_FAILURE);
3270         }
3271         QL_PM_UNLOCK(ha);
3272 
3273         /* Acquire adapter state lock. */
3274         ADAPTER_STATE_LOCK(ha);
3275 
3276         /* Check the count. */
3277         if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3278                 *count = 0;
3279                 EL(ha, "failed, FC_TOOMANY\n");
3280                 rval = FC_TOOMANY;
3281         }
3282 
3283         /*
3284          * reset ub_array_index
3285          */
3286         ub_array_index = 0;
3287 
3288         /*
3289          * Now proceed to allocate any buffers required
3290          */
3291         for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3292                 /* Allocate all memory needed. */
3293                 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3294                     KM_SLEEP);
3295                 if (ubp == NULL) {
3296                         EL(ha, "failed, FC_FAILURE\n");
3297                         rval = FC_FAILURE;
3298                 } else {
3299                         sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3300                         if (sp == NULL) {
3301                                 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3302                                 rval = FC_FAILURE;
3303                         } else {
3304                                 if (type == FC_TYPE_IS8802_SNAP) {
3305 #ifdef  __sparc
3306                                         if (ql_get_dma_mem(ha,
3307                                             &sp->ub_buffer, size,
3308                                             BIG_ENDIAN_DMA,
3309                                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3310                                                 rval = FC_FAILURE;
3311                                                 kmem_free(ubp,
3312                                                     sizeof (fc_unsol_buf_t));
3313                                                 kmem_free(sp,
3314                                                     sizeof (ql_srb_t));
3315                                         } else {
3316                                                 bufp = sp->ub_buffer.bp;
3317                                                 sp->ub_size = size;
3318                                         }
3319 #else
3320                                         if (ql_get_dma_mem(ha,
3321                                             &sp->ub_buffer, size,
3322                                             LITTLE_ENDIAN_DMA,
3323                                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3324                                                 rval = FC_FAILURE;
3325                                                 kmem_free(ubp,
3326                                                     sizeof (fc_unsol_buf_t));
3327                                                 kmem_free(sp,
3328                                                     sizeof (ql_srb_t));
3329                                         } else {
3330                                                 bufp = sp->ub_buffer.bp;
3331                                                 sp->ub_size = size;
3332                                         }
3333 #endif
3334                                 } else {
3335                                         bufp = kmem_zalloc(size, KM_SLEEP);
3336                                         if (bufp == NULL) {
3337                                                 rval = FC_FAILURE;
3338                                                 kmem_free(ubp,
3339                                                     sizeof (fc_unsol_buf_t));
3340                                                 kmem_free(sp,
3341                                                     sizeof (ql_srb_t));
3342                                         } else {
3343                                                 sp->ub_size = size;
3344                                         }
3345                                 }
3346                         }
3347                 }
3348 
3349                 if (rval == FC_SUCCESS) {
3350                         /* Find next available slot. */
3351                         QL_UB_LOCK(ha);
3352                         while (ha->ub_array[ub_array_index] != NULL) {
3353                                 ub_array_index++;
3354                         }
3355 
3356                         ubp->ub_fca_private = (void *)sp;
3357 
3358                         /* init cmd links */
3359                         sp->cmd.base_address = sp;
3360                         sp->cmd.prev = NULL;
3361                         sp->cmd.next = NULL;
3362                         sp->cmd.head = NULL;
3363 
3364                         /* init wdg links */
3365                         sp->wdg.base_address = sp;
3366                         sp->wdg.prev = NULL;
3367                         sp->wdg.next = NULL;
3368                         sp->wdg.head = NULL;
3369                         sp->ha = ha;
3370 
3371                         ubp->ub_buffer = bufp;
3372                         ubp->ub_bufsize = size;
3373                         ubp->ub_port_handle = fca_handle;
3374                         ubp->ub_token = ub_array_index;
3375 
3376                         /* Save the token. */
3377                         tokens[index] = ub_array_index;
3378 
3379                         /* Setup FCA private information. */
3380                         sp->ub_type = type;
3381                         sp->handle = ub_array_index;
3382                         sp->flags |= SRB_UB_IN_FCA;
3383 
3384                         ha->ub_array[ub_array_index] = ubp;
3385                         ha->ub_allocated++;
3386                         ub_updated = TRUE;
3387                         QL_UB_UNLOCK(ha);
3388                 }
3389         }
3390 
3391         /* Release adapter state lock. */
3392         ADAPTER_STATE_UNLOCK(ha);
3393 
3394         /* IP buffer. */
3395         if (ub_updated) {
3396                 if ((type == FC_TYPE_IS8802_SNAP) &&
3397                     (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3398 
3399                         ADAPTER_STATE_LOCK(ha);
3400                         ha->flags |= IP_ENABLED;
3401                         ADAPTER_STATE_UNLOCK(ha);
3402 
3403                         if (!(ha->flags & IP_INITIALIZED)) {
3404                                 if (CFG_IST(ha, CFG_CTRL_2422)) {
3405                                         ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3406                                             LSB(ql_ip_mtu);
3407                                         ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3408                                             MSB(ql_ip_mtu);
3409                                         ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3410                                             LSB(size);
3411                                         ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3412                                             MSB(size);
3413 
3414                                         cnt = CHAR_TO_SHORT(
3415                                             ha->ip_init_ctrl_blk.cb24.cc[0],
3416                                             ha->ip_init_ctrl_blk.cb24.cc[1]);
3417 
3418                                         if (cnt < *count) {
3419                                                 ha->ip_init_ctrl_blk.cb24.cc[0]
3420                                                     = LSB(*count);
3421                                                 ha->ip_init_ctrl_blk.cb24.cc[1]
3422                                                     = MSB(*count);
3423                                         }
3424                                 } else {
3425                                         ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3426                                             LSB(ql_ip_mtu);
3427                                         ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3428                                             MSB(ql_ip_mtu);
3429                                         ha->ip_init_ctrl_blk.cb.buf_size[0] =
3430                                             LSB(size);
3431                                         ha->ip_init_ctrl_blk.cb.buf_size[1] =
3432                                             MSB(size);
3433 
3434                                         cnt = CHAR_TO_SHORT(
3435                                             ha->ip_init_ctrl_blk.cb.cc[0],
3436                                             ha->ip_init_ctrl_blk.cb.cc[1]);
3437 
3438                                         if (cnt < *count) {
3439                                                 ha->ip_init_ctrl_blk.cb.cc[0] =
3440                                                     LSB(*count);
3441                                                 ha->ip_init_ctrl_blk.cb.cc[1] =
3442                                                     MSB(*count);
3443                                         }
3444                                 }
3445 
3446                                 (void) ql_initialize_ip(ha);
3447                         }
3448                         ql_isp_rcvbuf(ha);
3449                 }
3450         }
3451 
3452         if (rval != FC_SUCCESS) {
3453                 EL(ha, "failed=%xh\n", rval);
3454         } else {
3455                 /*EMPTY*/
3456                 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3457                     ha->vp_index);
3458         }
3459         return (rval);
3460 }
3461 
3462 /*
3463  * ql_ub_free
3464  *      Free unsolicited buffers.
3465  *
3466  * Input:
3467  *      fca_handle = handle setup by ql_bind_port().
3468  *      count = number of buffers.
3469  *      tokens = token array for each buffer.
3470  *
3471  * Returns:
3472  *      FC_SUCCESS - the requested buffers have been freed.
3473  *      FC_UNBOUND - the fca_handle specified is not bound.
3474  *      FC_UB_BADTOKEN - an invalid token was encountered.
3475  *                       No buffers have been released.
3476  *
3477  * Context:
3478  *      Kernel context.
3479  */
3480 static int
3481 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3482 {
3483         ql_adapter_state_t      *ha;
3484         ql_srb_t                *sp;
3485         uint32_t                index;
3486         uint64_t                ub_array_index;
3487         int                     rval = FC_SUCCESS;
3488 
3489         /* Check handle. */
3490         ha = ql_fca_handle_to_state(fca_handle);
3491         if (ha == NULL) {
3492                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3493                     (void *)fca_handle);
3494                 return (FC_UNBOUND);
3495         }
3496         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3497 
3498         /* Acquire adapter state lock. */
3499         ADAPTER_STATE_LOCK(ha);
3500 
3501         /* Check all returned tokens. */
3502         for (index = 0; index < count; index++) {
3503                 fc_unsol_buf_t  *ubp;
3504 
3505                 /* Check the token range. */
3506                 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3507                         EL(ha, "failed, FC_UB_BADTOKEN\n");
3508                         rval = FC_UB_BADTOKEN;
3509                         break;
3510                 }
3511 
3512                 /* Check the unsolicited buffer array. */
3513                 QL_UB_LOCK(ha);
3514                 ubp = ha->ub_array[ub_array_index];
3515 
3516                 if (ubp == NULL) {
3517                         EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3518                         rval = FC_UB_BADTOKEN;
3519                         QL_UB_UNLOCK(ha);
3520                         break;
3521                 }
3522 
3523                 /* Check the state of the unsolicited buffer. */
3524                 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3525                 sp->flags |= SRB_UB_FREE_REQUESTED;
3526 
3527                 while (!(sp->flags & SRB_UB_IN_FCA) ||
3528                     (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3529                         QL_UB_UNLOCK(ha);
3530                         ADAPTER_STATE_UNLOCK(ha);
3531                         delay(drv_usectohz(100000));
3532                         ADAPTER_STATE_LOCK(ha);
3533                         QL_UB_LOCK(ha);
3534                 }
3535                 ha->ub_array[ub_array_index] = NULL;
3536                 QL_UB_UNLOCK(ha);
3537                 ql_free_unsolicited_buffer(ha, ubp);
3538         }
3539 
3540         if (rval == FC_SUCCESS) {
3541                 /*
3542                  * Signal any pending hardware reset when there are
3543                  * no more unsolicited buffers in use.
3544                  */
3545                 if (ha->ub_allocated == 0) {
3546                         cv_broadcast(&ha->pha->cv_ub);
3547                 }
3548         }
3549 
3550         /* Release adapter state lock. */
3551         ADAPTER_STATE_UNLOCK(ha);
3552 
3553         if (rval != FC_SUCCESS) {
3554                 EL(ha, "failed=%xh\n", rval);
3555         } else {
3556                 /*EMPTY*/
3557                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3558         }
3559         return (rval);
3560 }
3561 
3562 /*
3563  * ql_ub_release
3564  *      Release unsolicited buffers from FC Transport
3565  *      to FCA for future use.
3566  *
3567  * Input:
3568  *      fca_handle = handle setup by ql_bind_port().
3569  *      count = number of buffers.
3570  *      tokens = token array for each buffer.
3571  *
3572  * Returns:
3573  *      FC_SUCCESS - the requested buffers have been released.
3574  *      FC_UNBOUND - the fca_handle specified is not bound.
3575  *      FC_UB_BADTOKEN - an invalid token was encountered.
3576  *              No buffers have been released.
3577  *
3578  * Context:
3579  *      Kernel context.
3580  */
3581 static int
3582 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3583 {
3584         ql_adapter_state_t      *ha;
3585         ql_srb_t                *sp;
3586         uint32_t                index;
3587         uint64_t                ub_array_index;
3588         int                     rval = FC_SUCCESS;
3589         int                     ub_ip_updated = FALSE;
3590 
3591         /* Check handle. */
3592         ha = ql_fca_handle_to_state(fca_handle);
3593         if (ha == NULL) {
3594                 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3595                     (void *)fca_handle);
3596                 return (FC_UNBOUND);
3597         }
3598         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3599 
3600         /* Acquire adapter state lock. */
3601         ADAPTER_STATE_LOCK(ha);
3602         QL_UB_LOCK(ha);
3603 
3604         /* Check all returned tokens. */
3605         for (index = 0; index < count; index++) {
3606                 /* Check the token range. */
3607                 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3608                         EL(ha, "failed, FC_UB_BADTOKEN\n");
3609                         rval = FC_UB_BADTOKEN;
3610                         break;
3611                 }
3612 
3613                 /* Check the unsolicited buffer array. */
3614                 if (ha->ub_array[ub_array_index] == NULL) {
3615                         EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3616                         rval = FC_UB_BADTOKEN;
3617                         break;
3618                 }
3619 
3620                 /* Check the state of the unsolicited buffer. */
3621                 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3622                 if (sp->flags & SRB_UB_IN_FCA) {
3623                         EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3624                         rval = FC_UB_BADTOKEN;
3625                         break;
3626                 }
3627         }
3628 
3629         /* If all tokens checkout, release the buffers. */
3630         if (rval == FC_SUCCESS) {
3631                 /* Check all returned tokens. */
3632                 for (index = 0; index < count; index++) {
3633                         fc_unsol_buf_t  *ubp;
3634 
3635                         ub_array_index = tokens[index];
3636                         ubp = ha->ub_array[ub_array_index];
3637                         sp = ubp->ub_fca_private;
3638 
3639                         ubp->ub_resp_flags = 0;
3640                         sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3641                         sp->flags |= SRB_UB_IN_FCA;
3642 
3643                         /* IP buffer. */
3644                         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3645                                 ub_ip_updated = TRUE;
3646                         }
3647                 }
3648         }
3649 
3650         QL_UB_UNLOCK(ha);
3651         /* Release adapter state lock. */
3652         ADAPTER_STATE_UNLOCK(ha);
3653 
3654         /*
3655          * XXX: We should call ql_isp_rcvbuf() to return a
3656          * buffer to ISP only if the number of buffers fall below
3657          * the low water mark.
3658          */
3659         if (ub_ip_updated) {
3660                 ql_isp_rcvbuf(ha);
3661         }
3662 
3663         if (rval != FC_SUCCESS) {
3664                 EL(ha, "failed, rval = %xh\n", rval);
3665         } else {
3666                 /*EMPTY*/
3667                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3668         }
3669         return (rval);
3670 }
3671 
3672 /*
3673  * ql_abort
3674  *      Abort a packet.
3675  *
3676  * Input:
3677  *      fca_handle = handle setup by ql_bind_port().
3678  *      pkt = pointer to fc_packet.
3679  *      flags = KM_SLEEP flag.
3680  *
3681  * Returns:
3682  *      FC_SUCCESS - the packet has successfully aborted.
3683  *      FC_ABORTED - the packet has successfully aborted.
3684  *      FC_ABORTING - the packet is being aborted.
3685  *      FC_ABORT_FAILED - the packet could not be aborted.
3686  *      FC_TRANSPORT_ERROR - a transport error occurred while attempting
3687  *              to abort the packet.
3688  *      FC_BADEXCHANGE - no packet found.
3689  *      FC_UNBOUND - the fca_handle specified is not bound.
3690  *
3691  * Context:
3692  *      Kernel context.
3693  */
3694 static int
3695 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3696 {
3697         port_id_t               d_id;
3698         ql_link_t               *link;
3699         ql_adapter_state_t      *ha, *pha;
3700         ql_srb_t                *sp;
3701         ql_tgt_t                *tq;
3702         ql_lun_t                *lq;
3703         int                     rval = FC_ABORTED;
3704 
3705         ha = ql_fca_handle_to_state(fca_handle);
3706         if (ha == NULL) {
3707                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3708                     (void *)fca_handle);
3709                 return (FC_UNBOUND);
3710         }
3711 
3712         pha = ha->pha;
3713 
3714         QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3715 
3716         /* Get target queue pointer. */
3717         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3718         tq = ql_d_id_to_queue(ha, d_id);
3719 
3720         if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3721                 if (tq == NULL) {
3722                         EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3723                         rval = FC_TRANSPORT_ERROR;
3724                 } else {
3725                         EL(ha, "failed, FC_OFFLINE\n");
3726                         rval = FC_OFFLINE;
3727                 }
3728                 return (rval);
3729         }
3730 
3731         sp = (ql_srb_t *)pkt->pkt_fca_private;
3732         lq = sp->lun_queue;
3733 
3734         /* Set poll flag if sleep wanted. */
3735         if (flags == KM_SLEEP) {
3736                 sp->flags |= SRB_POLL;
3737         }
3738 
3739         /* Acquire target queue lock. */
3740         DEVICE_QUEUE_LOCK(tq);
3741         REQUEST_RING_LOCK(ha);
3742 
3743         /* If command not already started. */
3744         if (!(sp->flags & SRB_ISP_STARTED)) {
3745                 /* Check pending queue for command. */
3746                 sp = NULL;
3747                 for (link = pha->pending_cmds.first; link != NULL;
3748                     link = link->next) {
3749                         sp = link->base_address;
3750                         if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3751                                 /* Remove srb from q. */
3752                                 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3753                                 break;
3754                         } else {
3755                                 sp = NULL;
3756                         }
3757                 }
3758                 REQUEST_RING_UNLOCK(ha);
3759 
3760                 if (sp == NULL) {
3761                         /* Check for cmd on device queue. */
3762                         for (link = lq->cmd.first; link != NULL;
3763                             link = link->next) {
3764                                 sp = link->base_address;
3765                                 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3766                                         /* Remove srb from q. */
3767                                         ql_remove_link(&lq->cmd, &sp->cmd);
3768                                         break;
3769                                 } else {
3770                                         sp = NULL;
3771                                 }
3772                         }
3773                 }
3774                 /* Release device lock */
3775                 DEVICE_QUEUE_UNLOCK(tq);
3776 
3777                 /* If command on target queue. */
3778                 if (sp != NULL) {
3779                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3780 
3781                         /* Set return status */
3782                         pkt->pkt_reason = CS_ABORTED;
3783 
3784                         sp->cmd.next = NULL;
3785                         ql_done(&sp->cmd);
3786                         rval = FC_ABORTED;
3787                 } else {
3788                         EL(ha, "failed, FC_BADEXCHANGE\n");
3789                         rval = FC_BADEXCHANGE;
3790                 }
3791         } else if (sp->flags & SRB_ISP_COMPLETED) {
3792                 /* Release device queue lock. */
3793                 REQUEST_RING_UNLOCK(ha);
3794                 DEVICE_QUEUE_UNLOCK(tq);
3795                 EL(ha, "failed, already done, FC_FAILURE\n");
3796                 rval = FC_FAILURE;
3797         } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3798             (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3799                 /*
3800                  * If here, target data/resp ctio is with Fw.
3801                  * Since firmware is supposed to terminate such I/Os
3802                  * with an error, we need not do any thing. If FW
3803                  * decides not to terminate those IOs and simply keep
3804                  * quite then we need to initiate cleanup here by
3805                  * calling ql_done.
3806                  */
3807                 REQUEST_RING_UNLOCK(ha);
3808                 DEVICE_QUEUE_UNLOCK(tq);
3809                 rval = FC_ABORTED;
3810         } else {
3811                 request_t       *ep = pha->request_ring_bp;
3812                 uint16_t        cnt;
3813 
3814                 if (sp->handle != 0) {
3815                         for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3816                                 if (sp->handle == ddi_get32(
3817                                     pha->hba_buf.acc_handle, &ep->handle)) {
3818                                         ep->entry_type = INVALID_ENTRY_TYPE;
3819                                         break;
3820                                 }
3821                                 ep++;
3822                         }
3823                 }
3824 
3825                 /* Release device queue lock. */
3826                 REQUEST_RING_UNLOCK(ha);
3827                 DEVICE_QUEUE_UNLOCK(tq);
3828 
3829                 sp->flags |= SRB_ABORTING;
3830                 (void) ql_abort_command(ha, sp);
3831                 pkt->pkt_reason = CS_ABORTED;
3832                 rval = FC_ABORTED;
3833         }
3834 
3835         QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3836 
3837         return (rval);
3838 }
3839 
3840 /*
3841  * ql_reset
3842  *      Reset link or hardware.
3843  *
3844  * Input:
3845  *      fca_handle = handle setup by ql_bind_port().
3846  *      cmd = reset type command.
3847  *
3848  * Returns:
3849  *      FC_SUCCESS - reset has successfully finished.
3850  *      FC_UNBOUND - the fca_handle specified is not bound.
3851  *      FC_FAILURE - reset failed.
3852  *
3853  * Context:
3854  *      Kernel context.
3855  */
3856 static int
3857 ql_reset(opaque_t fca_handle, uint32_t cmd)
3858 {
3859         ql_adapter_state_t      *ha;
3860         int                     rval = FC_SUCCESS, rval2;
3861 
3862         ha = ql_fca_handle_to_state(fca_handle);
3863         if (ha == NULL) {
3864                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3865                     (void *)fca_handle);
3866                 return (FC_UNBOUND);
3867         }
3868 
3869         QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3870             ha->vp_index, cmd);
3871 
3872         switch (cmd) {
3873         case FC_FCA_CORE:
3874                 /* dump firmware core if specified. */
3875                 if (ha->vp_index == 0) {
3876                         if (ql_dump_firmware(ha) != QL_SUCCESS) {
3877                                 EL(ha, "failed, FC_FAILURE\n");
3878                                 rval = FC_FAILURE;
3879                         }
3880                 }
3881                 break;
3882         case FC_FCA_LINK_RESET:
3883                 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3884                         if (ql_loop_reset(ha) != QL_SUCCESS) {
3885                                 EL(ha, "failed, FC_FAILURE-2\n");
3886                                 rval = FC_FAILURE;
3887                         }
3888                 }
3889                 break;
3890         case FC_FCA_RESET_CORE:
3891         case FC_FCA_RESET:
3892                 /* if dump firmware core if specified. */
3893                 if (cmd == FC_FCA_RESET_CORE) {
3894                         if (ha->vp_index != 0) {
3895                                 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3896                                     ? QL_SUCCESS : ql_loop_reset(ha);
3897                         } else {
3898                                 rval2 = ql_dump_firmware(ha);
3899                         }
3900                         if (rval2 != QL_SUCCESS) {
3901                                 EL(ha, "failed, FC_FAILURE-3\n");
3902                                 rval = FC_FAILURE;
3903                         }
3904                 }
3905 
3906                 /* Free up all unsolicited buffers. */
3907                 if (ha->ub_allocated != 0) {
3908                         /* Inform to release buffers. */
3909                         ha->state = FC_PORT_SPEED_MASK(ha->state);
3910                         ha->state |= FC_STATE_RESET_REQUESTED;
3911                         if (ha->flags & FCA_BOUND) {
3912                                 (ha->bind_info.port_statec_cb)
3913                                     (ha->bind_info.port_handle,
3914                                     ha->state);
3915                         }
3916                 }
3917 
3918                 ha->state = FC_PORT_SPEED_MASK(ha->state);
3919 
3920                 /* All buffers freed */
3921                 if (ha->ub_allocated == 0) {
3922                         /* Hardware reset. */
3923                         if (cmd == FC_FCA_RESET) {
3924                                 if (ha->vp_index == 0) {
3925                                         (void) ql_abort_isp(ha);
3926                                 } else if (!(ha->pha->task_daemon_flags &
3927                                     LOOP_DOWN)) {
3928                                         (void) ql_loop_reset(ha);
3929                                 }
3930                         }
3931 
3932                         /* Inform that the hardware has been reset */
3933                         ha->state |= FC_STATE_RESET;
3934                 } else {
3935                         /*
3936                          * the port driver expects an online if
3937                          * buffers are not freed.
3938                          */
3939                         if (ha->topology & QL_LOOP_CONNECTION) {
3940                                 ha->state |= FC_STATE_LOOP;
3941                         } else {
3942                                 ha->state |= FC_STATE_ONLINE;
3943                         }
3944                 }
3945 
3946                 TASK_DAEMON_LOCK(ha);
3947                 ha->task_daemon_flags |= FC_STATE_CHANGE;
3948                 TASK_DAEMON_UNLOCK(ha);
3949 
3950                 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3951 
3952                 break;
3953         default:
3954                 EL(ha, "unknown cmd=%xh\n", cmd);
3955                 break;
3956         }
3957 
3958         if (rval != FC_SUCCESS) {
3959                 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3960         } else {
3961                 /*EMPTY*/
3962                 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3963                     ha->vp_index);
3964         }
3965 
3966         return (rval);
3967 }
3968 
3969 /*
3970  * ql_port_manage
3971  *      Perform port management or diagnostics.
3972  *
3973  * Input:
3974  *      fca_handle = handle setup by ql_bind_port().
3975  *      cmd = pointer to command structure.
3976  *
3977  * Returns:
3978  *      FC_SUCCESS - the request completed successfully.
3979  *      FC_FAILURE - the request did not complete successfully.
3980  *      FC_UNBOUND - the fca_handle specified is not bound.
3981  *
3982  * Context:
3983  *      Kernel context.
3984  */
3985 static int
3986 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3987 {
3988         clock_t                 timer;
3989         uint16_t                index;
3990         uint32_t                *bp;
3991         port_id_t               d_id;
3992         ql_link_t               *link;
3993         ql_adapter_state_t      *ha, *pha;
3994         ql_tgt_t                *tq;
3995         dma_mem_t               buffer_xmt, buffer_rcv;
3996         size_t                  length;
3997         uint32_t                cnt;
3998         char                    buf[80];
3999         lbp_t                   *lb;
4000         ql_mbx_data_t           mr;
4001         app_mbx_cmd_t           *mcp;
4002         int                     i0;
4003         uint8_t                 *bptr;
4004         int                     rval2, rval = FC_SUCCESS;
4005         uint32_t                opcode;
4006         uint32_t                set_flags = 0;
4007 
4008         ha = ql_fca_handle_to_state(fca_handle);
4009         if (ha == NULL) {
4010                 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4011                     (void *)fca_handle);
4012                 return (FC_UNBOUND);
4013         }
4014         pha = ha->pha;
4015 
4016         QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4017             cmd->pm_cmd_code);
4018 
4019         ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4020 
4021         /*
4022          * Wait for all outstanding commands to complete
4023          */
4024         index = (uint16_t)ql_wait_outstanding(ha);
4025 
4026         if (index != MAX_OUTSTANDING_COMMANDS) {
4027                 ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4028                 ql_restart_queues(ha);
4029                 EL(ha, "failed, FC_TRAN_BUSY\n");
4030                 return (FC_TRAN_BUSY);
4031         }
4032 
4033         switch (cmd->pm_cmd_code) {
4034         case FC_PORT_BYPASS:
4035                 d_id.b24 = *cmd->pm_cmd_buf;
4036                 tq = ql_d_id_to_queue(ha, d_id);
4037                 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4038                         EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4039                         rval = FC_FAILURE;
4040                 }
4041                 break;
4042         case FC_PORT_UNBYPASS:
4043                 d_id.b24 = *cmd->pm_cmd_buf;
4044                 tq = ql_d_id_to_queue(ha, d_id);
4045                 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4046                         EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4047                         rval = FC_FAILURE;
4048                 }
4049                 break;
4050         case FC_PORT_GET_FW_REV:
4051                 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4052                     pha->fw_minor_version, pha->fw_subminor_version);
4053                 length = strlen(buf) + 1;
4054                 if (cmd->pm_data_len < length) {
4055                         cmd->pm_data_len = length;
4056                         EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4057                         rval = FC_FAILURE;
4058                 } else {
4059                         (void) strcpy(cmd->pm_data_buf, buf);
4060                 }
4061                 break;
4062 
4063         case FC_PORT_GET_FCODE_REV: {
4064                 caddr_t         fcode_ver_buf = NULL;
4065 
4066                 i0 = 0;
4067                 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4068                 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4069                     DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4070                     (caddr_t)&fcode_ver_buf, &i0);
4071                 length = (uint_t)i0;
4072 
4073                 if (rval2 != DDI_PROP_SUCCESS) {
4074                         EL(ha, "failed, getting version = %xh\n", rval2);
4075                         length = 20;
4076                         fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4077                         if (fcode_ver_buf != NULL) {
4078                                 (void) sprintf(fcode_ver_buf,
4079                                     "NO FCODE FOUND");
4080                         }
4081                 }
4082 
4083                 if (cmd->pm_data_len < length) {
4084                         EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4085                             "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4086                         cmd->pm_data_len = length;
4087                         rval = FC_FAILURE;
4088                 } else if (fcode_ver_buf != NULL) {
4089                         bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4090                             length);
4091                 }
4092 
4093                 if (fcode_ver_buf != NULL) {
4094                         kmem_free(fcode_ver_buf, length);
4095                 }
4096                 break;
4097         }
4098 
4099         case FC_PORT_GET_DUMP:
4100                 QL_DUMP_LOCK(pha);
4101                 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4102                         EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4103                             "length=%lxh\n", cmd->pm_data_len);
4104                         cmd->pm_data_len = pha->risc_dump_size;
4105                         rval = FC_FAILURE;
4106                 } else if (pha->ql_dump_state & QL_DUMPING) {
4107                         EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4108                         rval = FC_TRAN_BUSY;
4109                 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4110                         (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4111                         pha->ql_dump_state |= QL_DUMP_UPLOADED;
4112                 } else {
4113                         EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4114                         rval = FC_FAILURE;
4115                 }
4116                 QL_DUMP_UNLOCK(pha);
4117                 break;
4118         case FC_PORT_FORCE_DUMP:
4119                 PORTMANAGE_LOCK(ha);
4120                 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4121                         EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4122                         rval = FC_FAILURE;
4123                 }
4124                 PORTMANAGE_UNLOCK(ha);
4125                 break;
4126         case FC_PORT_DOWNLOAD_FW:
4127                 PORTMANAGE_LOCK(ha);
4128                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4129                         if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4130                             (uint32_t)cmd->pm_data_len,
4131                             ha->flash_fw_addr << 2) != QL_SUCCESS) {
4132                                 EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4133                                 rval = FC_FAILURE;
4134                         }
4135                         ql_reset_chip(ha);
4136                         set_flags |= ISP_ABORT_NEEDED;
4137                 } else {
4138                         /* Save copy of the firmware. */
4139                         if (pha->risc_code != NULL) {
4140                                 kmem_free(pha->risc_code, pha->risc_code_size);
4141                                 pha->risc_code = NULL;
4142                                 pha->risc_code_size = 0;
4143                         }
4144 
4145                         pha->risc_code = kmem_alloc(cmd->pm_data_len,
4146                             KM_SLEEP);
4147                         if (pha->risc_code != NULL) {
4148                                 pha->risc_code_size =
4149                                     (uint32_t)cmd->pm_data_len;
4150                                 bcopy(cmd->pm_data_buf, pha->risc_code,
4151                                     cmd->pm_data_len);
4152 
4153                                 /* Do abort to force reload. */
4154                                 ql_reset_chip(ha);
4155                                 if (ql_abort_isp(ha) != QL_SUCCESS) {
4156                                         kmem_free(pha->risc_code,
4157                                             pha->risc_code_size);
4158                                         pha->risc_code = NULL;
4159                                         pha->risc_code_size = 0;
4160                                         ql_reset_chip(ha);
4161                                         (void) ql_abort_isp(ha);
4162                                         EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4163                                             " FC_FAILURE\n");
4164                                         rval = FC_FAILURE;
4165                                 }
4166                         }
4167                 }
4168                 PORTMANAGE_UNLOCK(ha);
4169                 break;
4170         case FC_PORT_GET_DUMP_SIZE:
4171                 bp = (uint32_t *)cmd->pm_data_buf;
4172                 *bp = pha->risc_dump_size;
4173                 break;
4174         case FC_PORT_DIAG:
4175                 /*
4176                  * Prevents concurrent diags
4177                  */
4178                 PORTMANAGE_LOCK(ha);
4179 
4180                 /* Wait for suspension to end. */
4181                 for (timer = 0; timer < 3000 &&
4182                     pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4183                         ql_delay(ha, 10000);
4184                 }
4185 
4186                 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4187                         EL(ha, "failed, FC_TRAN_BUSY-2\n");
4188                         rval = FC_TRAN_BUSY;
4189                         PORTMANAGE_UNLOCK(ha);
4190                         break;
4191                 }
4192 
4193                 switch (cmd->pm_cmd_flags) {
4194                 case QL_DIAG_EXEFMW:
4195                         if (ql_start_firmware(ha) != QL_SUCCESS) {
4196                                 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4197                                 rval = FC_FAILURE;
4198                         }
4199                         break;
4200                 case QL_DIAG_CHKCMDQUE:
4201                         for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4202                             i0++) {
4203                                 cnt += (pha->outstanding_cmds[i0] != NULL);
4204                         }
4205                         if (cnt != 0) {
4206                                 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4207                                     "FC_FAILURE\n");
4208                                 rval = FC_FAILURE;
4209                         }
4210                         break;
4211                 case QL_DIAG_FMWCHKSUM:
4212                         if (ql_verify_checksum(ha) != QL_SUCCESS) {
4213                                 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4214                                     "FC_FAILURE\n");
4215                                 rval = FC_FAILURE;
4216                         }
4217                         break;
4218                 case QL_DIAG_SLFTST:
4219                         if (ql_online_selftest(ha) != QL_SUCCESS) {
4220                                 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4221                                 rval = FC_FAILURE;
4222                         }
4223                         ql_reset_chip(ha);
4224                         set_flags |= ISP_ABORT_NEEDED;
4225                         break;
4226                 case QL_DIAG_REVLVL:
4227                         if (cmd->pm_stat_len <
4228                             sizeof (ql_adapter_revlvl_t)) {
4229                                 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4230                                     "slen=%lxh, rlvllen=%lxh\n",
4231                                     cmd->pm_stat_len,
4232                                     sizeof (ql_adapter_revlvl_t));
4233                                 rval = FC_NOMEM;
4234                         } else {
4235                                 bcopy((void *)&(pha->adapter_stats->revlvl),
4236                                     cmd->pm_stat_buf,
4237                                     (size_t)cmd->pm_stat_len);
4238                                 cmd->pm_stat_len =
4239                                     sizeof (ql_adapter_revlvl_t);
4240                         }
4241                         break;
4242                 case QL_DIAG_LPBMBX:
4243 
4244                         if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4245                                 EL(ha, "failed, QL_DIAG_LPBMBX "
4246                                     "FC_INVALID_REQUEST, pmlen=%lxh, "
4247                                     "reqd=%lxh\n", cmd->pm_data_len,
4248                                     sizeof (struct app_mbx_cmd));
4249                                 rval = FC_INVALID_REQUEST;
4250                                 break;
4251                         }
4252                         /*
4253                          * Don't do the wrap test on a 2200 when the
4254                          * firmware is running.
4255                          */
4256                         if (!CFG_IST(ha, CFG_CTRL_2200)) {
4257                                 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4258                                 mr.mb[1] = mcp->mb[1];
4259                                 mr.mb[2] = mcp->mb[2];
4260                                 mr.mb[3] = mcp->mb[3];
4261                                 mr.mb[4] = mcp->mb[4];
4262                                 mr.mb[5] = mcp->mb[5];
4263                                 mr.mb[6] = mcp->mb[6];
4264                                 mr.mb[7] = mcp->mb[7];
4265 
4266                                 bcopy(&mr.mb[0], &mr.mb[10],
4267                                     sizeof (uint16_t) * 8);
4268 
4269                                 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4270                                         EL(ha, "failed, QL_DIAG_LPBMBX "
4271                                             "FC_FAILURE\n");
4272                                         rval = FC_FAILURE;
4273                                         break;
4274                                 } else {
4275                                         for (i0 = 1; i0 < 8; i0++) {
4276                                                 if (mr.mb[i0] !=
4277                                                     mr.mb[i0 + 10]) {
4278                                                         EL(ha, "failed, "
4279                                                             "QL_DIAG_LPBMBX "
4280                                                             "FC_FAILURE-2\n");
4281                                                         rval = FC_FAILURE;
4282                                                         break;
4283                                                 }
4284                                         }
4285                                 }
4286 
4287                                 if (rval == FC_FAILURE) {
4288                                         (void) ql_flash_errlog(ha,
4289                                             FLASH_ERRLOG_ISP_ERR, 0,
4290                                             RD16_IO_REG(ha, hccr),
4291                                             RD16_IO_REG(ha, istatus));
4292                                         set_flags |= ISP_ABORT_NEEDED;
4293                                 }
4294                         }
4295                         break;
4296                 case QL_DIAG_LPBDTA:
4297                         /*
4298                          * For loopback data, we receive the
4299                          * data back in pm_stat_buf. This provides
4300                          * the user an opportunity to compare the
4301                          * transmitted and received data.
4302                          *
4303                          * NB: lb->options are:
4304                          *      0 --> Ten bit loopback
4305                          *      1 --> One bit loopback
4306                          *      2 --> External loopback
4307                          */
4308                         if (cmd->pm_data_len > 65536) {
4309                                 rval = FC_TOOMANY;
4310                                 EL(ha, "failed, QL_DIAG_LPBDTA "
4311                                     "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4312                                 break;
4313                         }
4314                         if (ql_get_dma_mem(ha, &buffer_xmt,
4315                             (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4316                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4317                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4318                                 rval = FC_NOMEM;
4319                                 break;
4320                         }
4321                         if (ql_get_dma_mem(ha, &buffer_rcv,
4322                             (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4323                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4324                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4325                                 rval = FC_NOMEM;
4326                                 break;
4327                         }
4328                         ddi_rep_put8(buffer_xmt.acc_handle,
4329                             (uint8_t *)cmd->pm_data_buf,
4330                             (uint8_t *)buffer_xmt.bp,
4331                             cmd->pm_data_len, DDI_DEV_AUTOINCR);
4332 
4333                         /* 22xx's adapter must be in loop mode for test. */
4334                         if (CFG_IST(ha, CFG_CTRL_2200)) {
4335                                 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4336                                 if (ha->flags & POINT_TO_POINT ||
4337                                     (ha->task_daemon_flags & LOOP_DOWN &&
4338                                     *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4339                                         cnt = *bptr;
4340                                         *bptr = (uint8_t)
4341                                             (*bptr & ~(BIT_6|BIT_5|BIT_4));
4342                                         (void) ql_abort_isp(ha);
4343                                         *bptr = (uint8_t)cnt;
4344                                 }
4345                         }
4346 
4347                         /* Shutdown IP. */
4348                         if (pha->flags & IP_INITIALIZED) {
4349                                 (void) ql_shutdown_ip(pha);
4350                         }
4351 
4352                         lb = (lbp_t *)cmd->pm_cmd_buf;
4353                         lb->transfer_count =
4354                             (uint32_t)cmd->pm_data_len;
4355                         lb->transfer_segment_count = 0;
4356                         lb->receive_segment_count = 0;
4357                         lb->transfer_data_address =
4358                             buffer_xmt.cookie.dmac_address;
4359                         lb->receive_data_address =
4360                             buffer_rcv.cookie.dmac_address;
4361 
4362                         if (ql_loop_back(ha, 0, lb,
4363                             buffer_xmt.cookie.dmac_notused,
4364                             buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4365                                 bzero((void *)cmd->pm_stat_buf,
4366                                     cmd->pm_stat_len);
4367                                 ddi_rep_get8(buffer_rcv.acc_handle,
4368                                     (uint8_t *)cmd->pm_stat_buf,
4369                                     (uint8_t *)buffer_rcv.bp,
4370                                     cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4371                                 rval = FC_SUCCESS;
4372                         } else {
4373                                 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4374                                 rval = FC_FAILURE;
4375                         }
4376 
4377                         ql_free_phys(ha, &buffer_xmt);
4378                         ql_free_phys(ha, &buffer_rcv);
4379 
4380                         /* Needed to recover the f/w */
4381                         set_flags |= ISP_ABORT_NEEDED;
4382 
4383                         /* Restart IP if it was shutdown. */
4384                         if (pha->flags & IP_ENABLED &&
4385                             !(pha->flags & IP_INITIALIZED)) {
4386                                 (void) ql_initialize_ip(pha);
4387                                 ql_isp_rcvbuf(pha);
4388                         }
4389 
4390                         break;
4391                 case QL_DIAG_ECHO: {
4392                         /*
4393                          * issue an echo command with a user supplied
4394                          * data pattern and destination address
4395                          */
4396                         echo_t          echo;           /* temp echo struct */
4397 
4398                         /* Setup echo cmd & adjust for platform */
4399                         opcode = QL_ECHO_CMD;
4400                         BIG_ENDIAN_32(&opcode);
4401 
4402                         /*
4403                          * due to limitations in the ql
4404                          * firmaware the echo data field is
4405                          * limited to 220
4406                          */
4407                         if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4408                             (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4409                                 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4410                                     "cmdl1=%lxh, statl2=%lxh\n",
4411                                     cmd->pm_cmd_len, cmd->pm_stat_len);
4412                                 rval = FC_TOOMANY;
4413                                 break;
4414                         }
4415 
4416                         /*
4417                          * the input data buffer has the user
4418                          * supplied data pattern.  The "echoed"
4419                          * data will be DMAed into the output
4420                          * data buffer.  Therefore the length
4421                          * of the output buffer must be equal
4422                          * to or greater then the input buffer
4423                          * length
4424                          */
4425                         if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4426                                 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4427                                     " cmdl1=%lxh, statl2=%lxh\n",
4428                                     cmd->pm_cmd_len, cmd->pm_stat_len);
4429                                 rval = FC_TOOMANY;
4430                                 break;
4431                         }
4432                         /* add four bytes for the opcode */
4433                         echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4434 
4435                         /*
4436                          * are we 32 or 64 bit addressed???
4437                          * We need to get the appropriate
4438                          * DMA and set the command options;
4439                          * 64 bit (bit 6) or 32 bit
4440                          * (no bit 6) addressing.
4441                          * while we are at it lets ask for
4442                          * real echo (bit 15)
4443                          */
4444                         echo.options = BIT_15;
4445                         if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4446                             !(CFG_IST(ha, CFG_CTRL_8081))) {
4447                                 echo.options = (uint16_t)
4448                                     (echo.options | BIT_6);
4449                         }
4450 
4451                         /*
4452                          * Set up the DMA mappings for the
4453                          * output and input data buffers.
4454                          * First the output buffer
4455                          */
4456                         if (ql_get_dma_mem(ha, &buffer_xmt,
4457                             (uint32_t)(cmd->pm_data_len + 4),
4458                             LITTLE_ENDIAN_DMA,
4459                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4460                                 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4461                                 rval = FC_NOMEM;
4462                                 break;
4463                         }
4464                         echo.transfer_data_address = buffer_xmt.cookie;
4465 
4466                         /* Next the input buffer */
4467                         if (ql_get_dma_mem(ha, &buffer_rcv,
4468                             (uint32_t)(cmd->pm_data_len + 4),
4469                             LITTLE_ENDIAN_DMA,
4470                             QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4471                                 /*
4472                                  * since we could not allocate
4473                                  * DMA space for the input
4474                                  * buffer we need to clean up
4475                                  * by freeing the DMA space
4476                                  * we allocated for the output
4477                                  * buffer
4478                                  */
4479                                 ql_free_phys(ha, &buffer_xmt);
4480                                 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4481                                 rval = FC_NOMEM;
4482                                 break;
4483                         }
4484                         echo.receive_data_address = buffer_rcv.cookie;
4485 
4486                         /*
4487                          * copy the 4 byte ECHO op code to the
4488                          * allocated DMA space
4489                          */
4490                         ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4491                             (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4492 
4493                         /*
4494                          * copy the user supplied data to the
4495                          * allocated DMA space
4496                          */
4497                         ddi_rep_put8(buffer_xmt.acc_handle,
4498                             (uint8_t *)cmd->pm_cmd_buf,
4499                             (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4500                             DDI_DEV_AUTOINCR);
4501 
4502                         /* Shutdown IP. */
4503                         if (pha->flags & IP_INITIALIZED) {
4504                                 (void) ql_shutdown_ip(pha);
4505                         }
4506 
4507                         /* send the echo */
4508                         if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4509                                 ddi_rep_put8(buffer_rcv.acc_handle,
4510                                     (uint8_t *)buffer_rcv.bp + 4,
4511                                     (uint8_t *)cmd->pm_stat_buf,
4512                                     cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4513                         } else {
4514                                 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4515                                 rval = FC_FAILURE;
4516                         }
4517 
4518                         /* Restart IP if it was shutdown. */
4519                         if (pha->flags & IP_ENABLED &&
4520                             !(pha->flags & IP_INITIALIZED)) {
4521                                 (void) ql_initialize_ip(pha);
4522                                 ql_isp_rcvbuf(pha);
4523                         }
4524                         /* free up our DMA buffers */
4525                         ql_free_phys(ha, &buffer_xmt);
4526                         ql_free_phys(ha, &buffer_rcv);
4527                         break;
4528                 }
4529                 default:
4530                         EL(ha, "unknown=%xh, FC_PORT_DIAG "
4531                             "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4532                         rval = FC_INVALID_REQUEST;
4533                         break;
4534                 }
4535                 PORTMANAGE_UNLOCK(ha);
4536                 break;
4537         case FC_PORT_LINK_STATE:
4538                 /* Check for name equal to null. */
4539                 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4540                     index++) {
4541                         if (cmd->pm_cmd_buf[index] != 0) {
4542                                 break;
4543                         }
4544                 }
4545 
4546                 /* If name not null. */
4547                 if (index < 8 && cmd->pm_cmd_len >= 8) {
4548                         /* Locate device queue. */
4549                         tq = NULL;
4550                         for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4551                             tq == NULL; index++) {
4552                                 for (link = ha->dev[index].first; link != NULL;
4553                                     link = link->next) {
4554                                         tq = link->base_address;
4555 
4556                                         if (bcmp((void *)&tq->port_name[0],
4557                                             (void *)cmd->pm_cmd_buf, 8) == 0) {
4558                                                 break;
4559                                         } else {
4560                                                 tq = NULL;
4561                                         }
4562                                 }
4563                         }
4564 
4565                         if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4566                                 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4567                                 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4568                         } else {
4569                                 cnt = FC_PORT_SPEED_MASK(ha->state) |
4570                                     FC_STATE_OFFLINE;
4571                                 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4572                                 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4573                         }
4574                 } else {
4575                         cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4576                         cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4577                 }
4578                 break;
4579         case FC_PORT_INITIALIZE:
4580                 if (cmd->pm_cmd_len >= 8) {
4581                         tq = NULL;
4582                         for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4583                             tq == NULL; index++) {
4584                                 for (link = ha->dev[index].first; link != NULL;
4585                                     link = link->next) {
4586                                         tq = link->base_address;
4587 
4588                                         if (bcmp((void *)&tq->port_name[0],
4589                                             (void *)cmd->pm_cmd_buf, 8) == 0) {
4590                                                 if (!VALID_DEVICE_ID(ha,
4591                                                     tq->loop_id)) {
4592                                                         tq = NULL;
4593                                                 }
4594                                                 break;
4595                                         } else {
4596                                                 tq = NULL;
4597                                         }
4598                                 }
4599                         }
4600 
4601                         if (tq == NULL || ql_target_reset(ha, tq,
4602                             ha->loop_reset_delay) != QL_SUCCESS) {
4603                                 EL(ha, "failed, FC_PORT_INITIALIZE "
4604                                     "FC_FAILURE\n");
4605                                 rval = FC_FAILURE;
4606                         }
4607                 } else {
4608                         EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4609                             "clen=%lxh\n", cmd->pm_cmd_len);
4610 
4611                         rval = FC_FAILURE;
4612                 }
4613                 break;
4614         case FC_PORT_RLS:
4615                 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4616                         EL(ha, "failed, buffer size passed: %lxh, "
4617                             "req: %lxh\n", cmd->pm_data_len,
4618                             (sizeof (fc_rls_acc_t)));
4619                         rval = FC_FAILURE;
4620                 } else if (LOOP_NOT_READY(pha)) {
4621                         EL(ha, "loop NOT ready\n");
4622                         bzero(cmd->pm_data_buf, cmd->pm_data_len);
4623                 } else if (ql_get_link_status(ha, ha->loop_id,
4624                     cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4625                         EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4626                         rval = FC_FAILURE;
4627 #ifdef _BIG_ENDIAN
4628                 } else {
4629                         fc_rls_acc_t            *rls;
4630 
4631                         rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4632                         LITTLE_ENDIAN_32(&rls->rls_link_fail);
4633                         LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4634                         LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4635                         LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4636 #endif /* _BIG_ENDIAN */
4637                 }
4638                 break;
4639         case FC_PORT_GET_NODE_ID:
4640                 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4641                     cmd->pm_data_buf) != QL_SUCCESS) {
4642                         EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4643                         rval = FC_FAILURE;
4644                 }
4645                 break;
4646         case FC_PORT_SET_NODE_ID:
4647                 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4648                     cmd->pm_data_buf) != QL_SUCCESS) {
4649                         EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4650                         rval = FC_FAILURE;
4651                 }
4652                 break;
4653         case FC_PORT_DOWNLOAD_FCODE:
4654                 PORTMANAGE_LOCK(ha);
4655                 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4656                         rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4657                             (uint32_t)cmd->pm_data_len);
4658                 } else {
4659                         if (cmd->pm_data_buf[0] == 4 &&
4660                             cmd->pm_data_buf[8] == 0 &&
4661                             cmd->pm_data_buf[9] == 0x10 &&
4662                             cmd->pm_data_buf[10] == 0 &&
4663                             cmd->pm_data_buf[11] == 0) {
4664                                 rval = ql_24xx_load_flash(ha,
4665                                     (uint8_t *)cmd->pm_data_buf,
4666                                     (uint32_t)cmd->pm_data_len,
4667                                     ha->flash_fw_addr << 2);
4668                         } else {
4669                                 rval = ql_24xx_load_flash(ha,
4670                                     (uint8_t *)cmd->pm_data_buf,
4671                                     (uint32_t)cmd->pm_data_len, 0);
4672                         }
4673                 }
4674 
4675                 if (rval != QL_SUCCESS) {
4676                         EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4677                         rval = FC_FAILURE;
4678                 } else {
4679                         rval = FC_SUCCESS;
4680                 }
4681                 ql_reset_chip(ha);
4682                 set_flags |= ISP_ABORT_NEEDED;
4683                 PORTMANAGE_UNLOCK(ha);
4684                 break;
4685         default:
4686                 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4687                 rval = FC_BADCMD;
4688                 break;
4689         }
4690 
4691         /* Wait for suspension to end. */
4692         ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4693         timer = 0;
4694 
4695         while (timer++ < 3000 &&
4696             ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4697                 ql_delay(ha, 10000);
4698         }
4699 
4700         ql_restart_queues(ha);
4701 
4702         if (rval != FC_SUCCESS) {
4703                 EL(ha, "failed, rval = %xh\n", rval);
4704         } else {
4705                 /*EMPTY*/
4706                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4707         }
4708 
4709         return (rval);
4710 }
4711 
4712 static opaque_t
4713 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4714 {
4715         port_id_t               id;
4716         ql_adapter_state_t      *ha;
4717         ql_tgt_t                *tq;
4718 
4719         id.r.rsvd_1 = 0;
4720         id.b24 = d_id.port_id;
4721 
4722         ha = ql_fca_handle_to_state(fca_handle);
4723         if (ha == NULL) {
4724                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4725                     (void *)fca_handle);
4726                 return (NULL);
4727         }
4728         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4729 
4730         tq = ql_d_id_to_queue(ha, id);
4731 
4732         if (tq == NULL) {
4733                 EL(ha, "failed, tq=NULL\n");
4734         } else {
4735                 /*EMPTY*/
4736                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4737         }
4738         return (tq);
4739 }
4740 
4741 /* ************************************************************************ */
4742 /*                      FCA Driver Local Support Functions.                 */
4743 /* ************************************************************************ */
4744 
4745 /*
4746  * ql_cmd_setup
4747  *      Verifies proper command.
4748  *
4749  * Input:
4750  *      fca_handle = handle setup by ql_bind_port().
4751  *      pkt = pointer to fc_packet.
4752  *      rval = pointer for return value.
4753  *
4754  * Returns:
4755  *      Adapter state pointer, NULL = failure.
4756  *
4757  * Context:
4758  *      Kernel context.
4759  */
4760 static ql_adapter_state_t *
4761 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4762 {
4763         ql_adapter_state_t      *ha, *pha;
4764         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
4765         ql_tgt_t                *tq;
4766         port_id_t               d_id;
4767 
4768         pkt->pkt_resp_resid = 0;
4769         pkt->pkt_data_resid = 0;
4770 
4771         /* check that the handle is assigned by this FCA */
4772         ha = ql_fca_handle_to_state(fca_handle);
4773         if (ha == NULL) {
4774                 *rval = FC_UNBOUND;
4775                 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4776                     (void *)fca_handle);
4777                 return (NULL);
4778         }
4779         pha = ha->pha;
4780 
4781         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4782 
4783         if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4784                 return (ha);
4785         }
4786 
4787         if (!(pha->flags & ONLINE)) {
4788                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4789                 pkt->pkt_reason = FC_REASON_HW_ERROR;
4790                 *rval = FC_TRANSPORT_ERROR;
4791                 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4792                 return (NULL);
4793         }
4794 
4795         /* Exit on loop down. */
4796         if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4797             pha->task_daemon_flags & LOOP_DOWN &&
4798             pha->loop_down_timer <= pha->loop_down_abort_time) {
4799                 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4800                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4801                 *rval = FC_OFFLINE;
4802                 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4803                 return (NULL);
4804         }
4805 
4806         if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4807             pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4808                 tq = (ql_tgt_t *)pkt->pkt_fca_device;
4809                 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4810                         d_id.r.rsvd_1 = 0;
4811                         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4812                         tq = ql_d_id_to_queue(ha, d_id);
4813 
4814                         pkt->pkt_fca_device = (opaque_t)tq;
4815                 }
4816 
4817                 if (tq != NULL) {
4818                         DEVICE_QUEUE_LOCK(tq);
4819                         if (tq->flags & (TQF_RSCN_RCVD |
4820                             TQF_NEED_AUTHENTICATION)) {
4821                                 *rval = FC_DEVICE_BUSY;
4822                                 DEVICE_QUEUE_UNLOCK(tq);
4823                                 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4824                                     tq->flags, tq->d_id.b24);
4825                                 return (NULL);
4826                         }
4827                         DEVICE_QUEUE_UNLOCK(tq);
4828                 }
4829         }
4830 
4831         /*
4832          * Check DMA pointers.
4833          */
4834         *rval = DDI_SUCCESS;
4835         if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4836                 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4837                 *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4838                 if (*rval == DDI_SUCCESS) {
4839                         *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4840                 }
4841         }
4842 
4843         if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4844             pkt->pkt_rsplen != 0) {
4845                 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4846                 *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4847                 if (*rval == DDI_SUCCESS) {
4848                         *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4849                 }
4850         }
4851 
4852         /*
4853          * Minimum branch conditional; Change it with care.
4854          */
4855         if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4856             (pkt->pkt_datalen != 0)) != 0) {
4857                 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4858                 *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4859                 if (*rval == DDI_SUCCESS) {
4860                         *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4861                 }
4862         }
4863 
4864         if (*rval != DDI_SUCCESS) {
4865                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4866                 pkt->pkt_reason = FC_REASON_DMA_ERROR;
4867 
4868                 /* Do command callback. */
4869                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4870                         ql_awaken_task_daemon(ha, sp, 0, 0);
4871                 }
4872                 *rval = FC_BADPACKET;
4873                 EL(ha, "failed, bad DMA pointers\n");
4874                 return (NULL);
4875         }
4876 
4877         if (sp->magic_number != QL_FCA_BRAND) {
4878                 *rval = FC_BADPACKET;
4879                 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4880                 return (NULL);
4881         }
4882         *rval = FC_SUCCESS;
4883 
4884         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4885 
4886         return (ha);
4887 }
4888 
4889 /*
4890  * ql_els_plogi
4891  *      Issue a extended link service port login request.
4892  *
4893  * Input:
4894  *      ha = adapter state pointer.
4895  *      pkt = pointer to fc_packet.
4896  *
4897  * Returns:
4898  *      FC_SUCCESS - the packet was accepted for transport.
4899  *      FC_TRANSPORT_ERROR - a transport error occurred.
4900  *
4901  * Context:
4902  *      Kernel context.
4903  */
4904 static int
4905 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4906 {
4907         ql_tgt_t                *tq = NULL;
4908         port_id_t               d_id;
4909         la_els_logi_t           acc;
4910         class_svc_param_t       *class3_param;
4911         int                     ret;
4912         int                     rval = FC_SUCCESS;
4913 
4914         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4915             pkt->pkt_cmd_fhdr.d_id);
4916 
4917         TASK_DAEMON_LOCK(ha);
4918         if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4919                 TASK_DAEMON_UNLOCK(ha);
4920                 QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4921                 return (FC_OFFLINE);
4922         }
4923         TASK_DAEMON_UNLOCK(ha);
4924 
4925         bzero(&acc, sizeof (acc));
4926         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4927 
4928         ret = QL_SUCCESS;
4929 
4930         if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4931                 /*
4932                  * In p2p topology he sends a PLOGI after determining
4933                  * he has the N_Port login initiative.
4934                  */
4935                 ret = ql_p2p_plogi(ha, pkt);
4936         }
4937         if (ret == QL_CONSUMED) {
4938                 return (ret);
4939         }
4940 
4941         switch (ret = ql_login_port(ha, d_id)) {
4942         case QL_SUCCESS:
4943                 tq = ql_d_id_to_queue(ha, d_id);
4944                 break;
4945 
4946         case QL_LOOP_ID_USED:
4947                 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4948                         tq = ql_d_id_to_queue(ha, d_id);
4949                 }
4950                 break;
4951 
4952         default:
4953                 break;
4954         }
4955 
4956         if (ret != QL_SUCCESS) {
4957                 /*
4958                  * Invalidate this entry so as to seek a fresh loop ID
4959                  * in case firmware reassigns it to something else
4960                  */
4961                 tq = ql_d_id_to_queue(ha, d_id);
4962                 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4963                         tq->loop_id = PORT_NO_LOOP_ID;
4964                 }
4965         } else if (tq) {
4966                 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4967         }
4968 
4969         if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4970             (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4971 
4972                 /* Build ACC. */
4973                 acc.ls_code.ls_code = LA_ELS_ACC;
4974                 acc.common_service.fcph_version = 0x2006;
4975                 acc.common_service.cmn_features = 0x8800;
4976                 acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4977                 acc.common_service.conc_sequences = 0xff;
4978                 acc.common_service.relative_offset = 0x03;
4979                 acc.common_service.e_d_tov = 0x7d0;
4980 
4981                 bcopy((void *)&tq->port_name[0],
4982                     (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4983                 bcopy((void *)&tq->node_name[0],
4984                     (void *)&acc.node_ww_name.raw_wwn[0], 8);
4985 
4986                 class3_param = (class_svc_param_t *)&acc.class_3;
4987                 class3_param->class_valid_svc_opt = 0x8000;
4988                 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4989                 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4990                 class3_param->conc_sequences = tq->class3_conc_sequences;
4991                 class3_param->open_sequences_per_exch =
4992                     tq->class3_open_sequences_per_exch;
4993 
4994                 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4995                         acc.ls_code.ls_code = LA_ELS_RJT;
4996                         pkt->pkt_state = FC_PKT_TRAN_BSY;
4997                         pkt->pkt_reason = FC_REASON_XCHG_BSY;
4998                         EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4999                         rval = FC_TRAN_BUSY;
5000                 } else {
5001                         DEVICE_QUEUE_LOCK(tq);
5002                         tq->logout_sent = 0;
5003                         tq->flags &= ~TQF_NEED_AUTHENTICATION;
5004                         if (CFG_IST(ha, CFG_CTRL_242581)) {
5005                                 tq->flags |= TQF_IIDMA_NEEDED;
5006                         }
5007                         DEVICE_QUEUE_UNLOCK(tq);
5008 
5009                         if (CFG_IST(ha, CFG_CTRL_242581)) {
5010                                 TASK_DAEMON_LOCK(ha);
5011                                 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5012                                 TASK_DAEMON_UNLOCK(ha);
5013                         }
5014 
5015                         pkt->pkt_state = FC_PKT_SUCCESS;
5016                 }
5017         } else {
5018                 /* Build RJT. */
5019                 acc.ls_code.ls_code = LA_ELS_RJT;
5020 
5021                 switch (ret) {
5022                 case QL_FUNCTION_TIMEOUT:
5023                         pkt->pkt_state = FC_PKT_TIMEOUT;
5024                         pkt->pkt_reason = FC_REASON_HW_ERROR;
5025                         break;
5026 
5027                 case QL_MEMORY_ALLOC_FAILED:
5028                         pkt->pkt_state = FC_PKT_LOCAL_BSY;
5029                         pkt->pkt_reason = FC_REASON_NOMEM;
5030                         rval = FC_TRAN_BUSY;
5031                         break;
5032 
5033                 case QL_FABRIC_NOT_INITIALIZED:
5034                         pkt->pkt_state = FC_PKT_FABRIC_BSY;
5035                         pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5036                         rval = FC_TRAN_BUSY;
5037                         break;
5038 
5039                 default:
5040                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5041                         pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5042                         break;
5043                 }
5044 
5045                 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5046                     "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5047                     pkt->pkt_reason, ret, rval);
5048         }
5049 
5050         if (tq != NULL) {
5051                 DEVICE_QUEUE_LOCK(tq);
5052                 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5053                 if (rval == FC_TRAN_BUSY) {
5054                         if (tq->d_id.b24 != BROADCAST_ADDR) {
5055                                 tq->flags |= TQF_NEED_AUTHENTICATION;
5056                         }
5057                 }
5058                 DEVICE_QUEUE_UNLOCK(tq);
5059         }
5060 
5061         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5062             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5063 
5064         if (rval != FC_SUCCESS) {
5065                 EL(ha, "failed, rval = %xh\n", rval);
5066         } else {
5067                 /*EMPTY*/
5068                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5069         }
5070         return (rval);
5071 }
5072 
5073 /*
5074  * ql_p2p_plogi
5075  *      Start an extended link service port login request using
5076  *      an ELS Passthru iocb.
5077  *
5078  * Input:
5079  *      ha = adapter state pointer.
5080  *      pkt = pointer to fc_packet.
5081  *
5082  * Returns:
5083  *      QL_CONSUMMED - the iocb was queued for transport.
5084  *
5085  * Context:
5086  *      Kernel context.
5087  */
5088 static int
5089 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5090 {
5091         uint16_t        id;
5092         ql_tgt_t        tmp;
5093         ql_tgt_t        *tq = &tmp;
5094         int             rval;
5095         port_id_t       d_id;
5096         ql_srb_t        *sp = (ql_srb_t *)pkt->pkt_fca_private;
5097 
5098         tq->d_id.b.al_pa = 0;
5099         tq->d_id.b.area = 0;
5100         tq->d_id.b.domain = 0;
5101 
5102         /*
5103          * Verify that the port database hasn't moved beneath our feet by
5104          * switching to the appropriate n_port_handle if necessary.  This is
5105          * less unplesant than the error recovery if the wrong one is used.
5106          */
5107         for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5108                 tq->loop_id = id;
5109                 rval = ql_get_port_database(ha, tq, PDF_NONE);
5110                 EL(ha, "rval=%xh\n", rval);
5111                 /* check all the ones not logged in for possible use */
5112                 if (rval == QL_NOT_LOGGED_IN) {
5113                         if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5114                                 ha->n_port->n_port_handle = tq->loop_id;
5115                                 EL(ha, "n_port_handle =%xh, master state=%x\n",
5116                                     tq->loop_id, tq->master_state);
5117                                 break;
5118                         }
5119                         /*
5120                          * Use a 'port unavailable' entry only
5121                          * if we used it before.
5122                          */
5123                         if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5124                                 /* if the port_id matches, reuse it */
5125                                 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5126                                         EL(ha, "n_port_handle =%xh,"
5127                                             "master state=%xh\n",
5128                                             tq->loop_id, tq->master_state);
5129                                         break;
5130                                 } else if (tq->loop_id ==
5131                                     ha->n_port->n_port_handle) {
5132                                     // avoid a lint error
5133                                         uint16_t *hndl;
5134                                         uint16_t val;
5135 
5136                                         hndl = &ha->n_port->n_port_handle;
5137                                         val = *hndl;
5138                                         val++;
5139                                         val++;
5140                                         *hndl = val;
5141                                 }
5142                         EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5143                             "master state=%x\n", rval, id, tq->loop_id,
5144                             tq->master_state);
5145                         }
5146 
5147                 }
5148                 if (rval == QL_SUCCESS) {
5149                         if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5150                                 ha->n_port->n_port_handle = tq->loop_id;
5151                                 EL(ha, "n_port_handle =%xh, master state=%x\n",
5152                                     tq->loop_id, tq->master_state);
5153                                 break;
5154                         }
5155                         EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5156                             "master state=%x\n", rval, id, tq->loop_id,
5157                             tq->master_state);
5158                 }
5159         }
5160         (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5161 
5162         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5163         tq = ql_d_id_to_queue(ha, d_id);
5164         ql_timeout_insert(ha, tq, sp);
5165         ql_start_iocb(ha, sp);
5166 
5167         return (QL_CONSUMED);
5168 }
5169 
5170 
5171 /*
5172  * ql_els_flogi
5173  *      Issue a extended link service fabric login request.
5174  *
5175  * Input:
5176  *      ha = adapter state pointer.
5177  *      pkt = pointer to fc_packet.
5178  *
5179  * Returns:
5180  *      FC_SUCCESS - the packet was accepted for transport.
5181  *      FC_TRANSPORT_ERROR - a transport error occurred.
5182  *
5183  * Context:
5184  *      Kernel context.
5185  */
5186 static int
5187 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5188 {
5189         ql_tgt_t                *tq = NULL;
5190         port_id_t               d_id;
5191         la_els_logi_t           acc;
5192         class_svc_param_t       *class3_param;
5193         int                     rval = FC_SUCCESS;
5194         int                     accept = 0;
5195 
5196         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5197             pkt->pkt_cmd_fhdr.d_id);
5198 
5199         bzero(&acc, sizeof (acc));
5200         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5201 
5202         if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5203                 /*
5204                  * d_id of zero in a FLOGI accept response in a point to point
5205                  * topology triggers evaluation of N Port login initiative.
5206                  */
5207                 pkt->pkt_resp_fhdr.d_id = 0;
5208                 /*
5209                  * An N_Port already logged in with the firmware
5210                  * will have the only database entry.
5211                  */
5212                 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5213                         tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5214                 }
5215 
5216                 if (tq != NULL) {
5217                         /*
5218                          * If the target port has initiative send
5219                          * up a PLOGI about the new device.
5220                          */
5221                         if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5222                             (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5223                             &ha->init_ctrl_blk.cb24.port_name[0] :
5224                             &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5225                                 ha->send_plogi_timer = 3;
5226                         } else {
5227                                 ha->send_plogi_timer = 0;
5228                         }
5229                         pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5230                 } else {
5231                         /*
5232                          * An N_Port not logged in with the firmware will not
5233                          * have a database entry.  We accept anyway and rely
5234                          * on a PLOGI from the upper layers to set the d_id
5235                          * and s_id.
5236                          */
5237                         accept = 1;
5238                 }
5239         } else {
5240                 tq = ql_d_id_to_queue(ha, d_id);
5241         }
5242         if ((tq != NULL) || (accept != NULL)) {
5243                 /* Build ACC. */
5244                 pkt->pkt_state = FC_PKT_SUCCESS;
5245                 class3_param = (class_svc_param_t *)&acc.class_3;
5246 
5247                 acc.ls_code.ls_code = LA_ELS_ACC;
5248                 acc.common_service.fcph_version = 0x2006;
5249                 if (ha->topology & QL_N_PORT) {
5250                         /* clear F_Port indicator */
5251                         acc.common_service.cmn_features = 0x0800;
5252                 } else {
5253                         acc.common_service.cmn_features = 0x1b00;
5254                 }
5255                 CFG_IST(ha, CFG_CTRL_24258081) ?
5256                     (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5257                     ha->init_ctrl_blk.cb24.max_frame_length[0],
5258                     ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5259                     (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5260                     ha->init_ctrl_blk.cb.max_frame_length[0],
5261                     ha->init_ctrl_blk.cb.max_frame_length[1]));
5262                 acc.common_service.conc_sequences = 0xff;
5263                 acc.common_service.relative_offset = 0x03;
5264                 acc.common_service.e_d_tov = 0x7d0;
5265                 if (accept) {
5266                         /* Use the saved N_Port WWNN and WWPN */
5267                         if (ha->n_port != NULL) {
5268                                 bcopy((void *)&ha->n_port->port_name[0],
5269                                     (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5270                                 bcopy((void *)&ha->n_port->node_name[0],
5271                                     (void *)&acc.node_ww_name.raw_wwn[0], 8);
5272                                 /* mark service options invalid */
5273                                 class3_param->class_valid_svc_opt = 0x0800;
5274                         } else {
5275                                 EL(ha, "ha->n_port is NULL\n");
5276                                 /* Build RJT. */
5277                                 acc.ls_code.ls_code = LA_ELS_RJT;
5278 
5279                                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5280                                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5281                         }
5282                 } else {
5283                         bcopy((void *)&tq->port_name[0],
5284                             (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5285                         bcopy((void *)&tq->node_name[0],
5286                             (void *)&acc.node_ww_name.raw_wwn[0], 8);
5287 
5288                         class3_param = (class_svc_param_t *)&acc.class_3;
5289                         class3_param->class_valid_svc_opt = 0x8800;
5290                         class3_param->recipient_ctl = tq->class3_recipient_ctl;
5291                         class3_param->rcv_data_size = tq->class3_rcv_data_size;
5292                         class3_param->conc_sequences =
5293                             tq->class3_conc_sequences;
5294                         class3_param->open_sequences_per_exch =
5295                             tq->class3_open_sequences_per_exch;
5296                 }
5297         } else {
5298                 /* Build RJT. */
5299                 acc.ls_code.ls_code = LA_ELS_RJT;
5300 
5301                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5302                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5303                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5304         }
5305 
5306         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5307             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5308 
5309         if (rval != FC_SUCCESS) {
5310                 EL(ha, "failed, rval = %xh\n", rval);
5311         } else {
5312                 /*EMPTY*/
5313                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5314         }
5315         return (rval);
5316 }
5317 
5318 /*
5319  * ql_els_logo
5320  *      Issue a extended link service logout request.
5321  *
5322  * Input:
5323  *      ha = adapter state pointer.
5324  *      pkt = pointer to fc_packet.
5325  *
5326  * Returns:
5327  *      FC_SUCCESS - the packet was accepted for transport.
5328  *      FC_TRANSPORT_ERROR - a transport error occurred.
5329  *
5330  * Context:
5331  *      Kernel context.
5332  */
5333 static int
5334 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5335 {
5336         port_id_t       d_id;
5337         ql_tgt_t        *tq;
5338         la_els_logo_t   acc;
5339         int             rval = FC_SUCCESS;
5340 
5341         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5342             pkt->pkt_cmd_fhdr.d_id);
5343 
5344         bzero(&acc, sizeof (acc));
5345         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5346 
5347         tq = ql_d_id_to_queue(ha, d_id);
5348         if (tq) {
5349                 DEVICE_QUEUE_LOCK(tq);
5350                 if (tq->d_id.b24 == BROADCAST_ADDR) {
5351                         DEVICE_QUEUE_UNLOCK(tq);
5352                         return (FC_SUCCESS);
5353                 }
5354 
5355                 tq->flags |= TQF_NEED_AUTHENTICATION;
5356 
5357                 do {
5358                         DEVICE_QUEUE_UNLOCK(tq);
5359                         (void) ql_abort_device(ha, tq, 1);
5360 
5361                         /*
5362                          * Wait for commands to drain in F/W (doesn't
5363                          * take more than a few milliseconds)
5364                          */
5365                         ql_delay(ha, 10000);
5366 
5367                         DEVICE_QUEUE_LOCK(tq);
5368                 } while (tq->outcnt);
5369 
5370                 DEVICE_QUEUE_UNLOCK(tq);
5371         }
5372 
5373         if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5374                 /* Build ACC. */
5375                 acc.ls_code.ls_code = LA_ELS_ACC;
5376 
5377                 pkt->pkt_state = FC_PKT_SUCCESS;
5378         } else {
5379                 /* Build RJT. */
5380                 acc.ls_code.ls_code = LA_ELS_RJT;
5381 
5382                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5383                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5384                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5385         }
5386 
5387         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5388             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5389 
5390         if (rval != FC_SUCCESS) {
5391                 EL(ha, "failed, rval = %xh\n", rval);
5392         } else {
5393                 /*EMPTY*/
5394                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5395         }
5396         return (rval);
5397 }
5398 
5399 /*
5400  * ql_els_prli
5401  *      Issue a extended link service process login request.
5402  *
5403  * Input:
5404  *      ha = adapter state pointer.
5405  *      pkt = pointer to fc_packet.
5406  *
5407  * Returns:
5408  *      FC_SUCCESS - the packet was accepted for transport.
5409  *      FC_TRANSPORT_ERROR - a transport error occurred.
5410  *
5411  * Context:
5412  *      Kernel context.
5413  */
5414 static int
5415 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5416 {
5417         ql_tgt_t                *tq;
5418         port_id_t               d_id;
5419         la_els_prli_t           acc;
5420         prli_svc_param_t        *param;
5421         ql_srb_t                *sp = (ql_srb_t *)pkt->pkt_fca_private;
5422         int                     rval = FC_SUCCESS;
5423 
5424         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5425             pkt->pkt_cmd_fhdr.d_id);
5426 
5427         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5428 
5429         tq = ql_d_id_to_queue(ha, d_id);
5430         if (tq != NULL) {
5431                 (void) ql_get_port_database(ha, tq, PDF_NONE);
5432 
5433                 if ((ha->topology & QL_N_PORT) &&
5434                     (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5435                         ql_timeout_insert(ha, tq, sp);
5436                         ql_start_iocb(ha, sp);
5437                         rval = QL_CONSUMED;
5438                 } else {
5439                         /* Build ACC. */
5440                         bzero(&acc, sizeof (acc));
5441                         acc.ls_code = LA_ELS_ACC;
5442                         acc.page_length = 0x10;
5443                         acc.payload_length = tq->prli_payload_length;
5444 
5445                         param = (prli_svc_param_t *)&acc.service_params[0];
5446                         param->type = 0x08;
5447                         param->rsvd = 0x00;
5448                         param->process_assoc_flags = tq->prli_svc_param_word_0;
5449                         param->process_flags = tq->prli_svc_param_word_3;
5450 
5451                         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5452                             (uint8_t *)pkt->pkt_resp, sizeof (acc),
5453                             DDI_DEV_AUTOINCR);
5454 
5455                         pkt->pkt_state = FC_PKT_SUCCESS;
5456                 }
5457         } else {
5458                 la_els_rjt_t rjt;
5459 
5460                 /* Build RJT. */
5461                 bzero(&rjt, sizeof (rjt));
5462                 rjt.ls_code.ls_code = LA_ELS_RJT;
5463 
5464                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5465                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5466 
5467                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5468                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5469                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5470         }
5471 
5472         if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5473                 EL(ha, "failed, rval = %xh\n", rval);
5474         } else {
5475                 /*EMPTY*/
5476                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5477         }
5478         return (rval);
5479 }
5480 
5481 /*
5482  * ql_els_prlo
5483  *      Issue a extended link service process logout request.
5484  *
5485  * Input:
5486  *      ha = adapter state pointer.
5487  *      pkt = pointer to fc_packet.
5488  *
5489  * Returns:
5490  *      FC_SUCCESS - the packet was accepted for transport.
5491  *      FC_TRANSPORT_ERROR - a transport error occurred.
5492  *
5493  * Context:
5494  *      Kernel context.
5495  */
5496 /* ARGSUSED */
5497 static int
5498 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5499 {
5500         la_els_prli_t   acc;
5501         int             rval = FC_SUCCESS;
5502 
5503         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5504             pkt->pkt_cmd_fhdr.d_id);
5505 
5506         /* Build ACC. */
5507         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5508             (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5509 
5510         acc.ls_code = LA_ELS_ACC;
5511         acc.service_params[2] = 1;
5512 
5513         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5514             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5515 
5516         pkt->pkt_state = FC_PKT_SUCCESS;
5517 
5518         if (rval != FC_SUCCESS) {
5519                 EL(ha, "failed, rval = %xh\n", rval);
5520         } else {
5521                 /*EMPTY*/
5522                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5523         }
5524         return (rval);
5525 }
5526 
5527 /*
5528  * ql_els_adisc
5529  *      Issue a extended link service address discovery request.
5530  *
5531  * Input:
5532  *      ha = adapter state pointer.
5533  *      pkt = pointer to fc_packet.
5534  *
5535  * Returns:
5536  *      FC_SUCCESS - the packet was accepted for transport.
5537  *      FC_TRANSPORT_ERROR - a transport error occurred.
5538  *
5539  * Context:
5540  *      Kernel context.
5541  */
5542 static int
5543 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5544 {
5545         ql_dev_id_list_t        *list;
5546         uint32_t                list_size;
5547         ql_link_t               *link;
5548         ql_tgt_t                *tq;
5549         ql_lun_t                *lq;
5550         port_id_t               d_id;
5551         la_els_adisc_t          acc;
5552         uint16_t                index, loop_id;
5553         ql_mbx_data_t           mr;
5554         int                     rval = FC_SUCCESS;
5555 
5556         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5557 
5558         bzero(&acc, sizeof (acc));
5559         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5560 
5561         /*
5562          * MBC_GET_PORT_DATABASE causes ADISC to go out to
5563          * the device from the firmware
5564          */
5565         index = ql_alpa_to_index[d_id.b.al_pa];
5566         tq = NULL;
5567         for (link = ha->dev[index].first; link != NULL; link = link->next) {
5568                 tq = link->base_address;
5569                 if (tq->d_id.b24 == d_id.b24) {
5570                         break;
5571                 } else {
5572                         tq = NULL;
5573                 }
5574         }
5575 
5576         if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5577                 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5578                 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5579 
5580                 if (list != NULL &&
5581                     ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5582                     QL_SUCCESS) {
5583 
5584                         for (index = 0; index < mr.mb[1]; index++) {
5585                                 ql_dev_list(ha, list, index, &d_id, &loop_id);
5586 
5587                                 if (tq->d_id.b24 == d_id.b24) {
5588                                         tq->loop_id = loop_id;
5589                                         break;
5590                                 }
5591                         }
5592                 } else {
5593                         cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5594                             QL_NAME, ha->instance, d_id.b24);
5595                         tq = NULL;
5596                 }
5597                 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5598                         cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5599                             QL_NAME, ha->instance, tq->d_id.b24);
5600                         tq = NULL;
5601                 }
5602 
5603                 if (list != NULL) {
5604                         kmem_free(list, list_size);
5605                 }
5606         }
5607 
5608         if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5609             ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5610 
5611                 /* Build ACC. */
5612 
5613                 DEVICE_QUEUE_LOCK(tq);
5614                 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5615                 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5616                         for (link = tq->lun_queues.first; link != NULL;
5617                             link = link->next) {
5618                                 lq = link->base_address;
5619 
5620                                 if (lq->cmd.first != NULL) {
5621                                         ql_next(ha, lq);
5622                                         DEVICE_QUEUE_LOCK(tq);
5623                                 }
5624                         }
5625                 }
5626                 DEVICE_QUEUE_UNLOCK(tq);
5627 
5628                 acc.ls_code.ls_code = LA_ELS_ACC;
5629                 acc.hard_addr.hard_addr = tq->hard_addr.b24;
5630 
5631                 bcopy((void *)&tq->port_name[0],
5632                     (void *)&acc.port_wwn.raw_wwn[0], 8);
5633                 bcopy((void *)&tq->node_name[0],
5634                     (void *)&acc.node_wwn.raw_wwn[0], 8);
5635 
5636                 acc.nport_id.port_id = tq->d_id.b24;
5637 
5638                 pkt->pkt_state = FC_PKT_SUCCESS;
5639         } else {
5640                 /* Build RJT. */
5641                 acc.ls_code.ls_code = LA_ELS_RJT;
5642 
5643                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5644                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5645                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5646         }
5647 
5648         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5649             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5650 
5651         if (rval != FC_SUCCESS) {
5652                 EL(ha, "failed, rval = %xh\n", rval);
5653         } else {
5654                 /*EMPTY*/
5655                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5656         }
5657         return (rval);
5658 }
5659 
5660 /*
5661  * ql_els_linit
5662  *      Issue a extended link service loop initialize request.
5663  *
5664  * Input:
5665  *      ha = adapter state pointer.
5666  *      pkt = pointer to fc_packet.
5667  *
5668  * Returns:
5669  *      FC_SUCCESS - the packet was accepted for transport.
5670  *      FC_TRANSPORT_ERROR - a transport error occurred.
5671  *
5672  * Context:
5673  *      Kernel context.
5674  */
5675 static int
5676 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5677 {
5678         ddi_dma_cookie_t        *cp;
5679         uint32_t                cnt;
5680         conv_num_t              n;
5681         port_id_t               d_id;
5682         int                     rval = FC_SUCCESS;
5683 
5684         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5685 
5686         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5687         if (ha->topology & QL_SNS_CONNECTION) {
5688                 fc_linit_req_t els;
5689                 lfa_cmd_t lfa;
5690 
5691                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5692                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5693 
5694                 /* Setup LFA mailbox command data. */
5695                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5696 
5697                 lfa.resp_buffer_length[0] = 4;
5698 
5699                 cp = pkt->pkt_resp_cookie;
5700                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5701                         n.size64 = (uint64_t)cp->dmac_laddress;
5702                         LITTLE_ENDIAN_64(&n.size64);
5703                 } else {
5704                         n.size32[0] = LSD(cp->dmac_laddress);
5705                         LITTLE_ENDIAN_32(&n.size32[0]);
5706                         n.size32[1] = MSD(cp->dmac_laddress);
5707                         LITTLE_ENDIAN_32(&n.size32[1]);
5708                 }
5709 
5710                 /* Set buffer address. */
5711                 for (cnt = 0; cnt < 8; cnt++) {
5712                         lfa.resp_buffer_address[cnt] = n.size8[cnt];
5713                 }
5714 
5715                 lfa.subcommand_length[0] = 4;
5716                 n.size32[0] = d_id.b24;
5717                 LITTLE_ENDIAN_32(&n.size32[0]);
5718                 lfa.addr[0] = n.size8[0];
5719                 lfa.addr[1] = n.size8[1];
5720                 lfa.addr[2] = n.size8[2];
5721                 lfa.subcommand[1] = 0x70;
5722                 lfa.payload[2] = els.func;
5723                 lfa.payload[4] = els.lip_b3;
5724                 lfa.payload[5] = els.lip_b4;
5725 
5726                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5727                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5728                 } else {
5729                         pkt->pkt_state = FC_PKT_SUCCESS;
5730                 }
5731         } else {
5732                 fc_linit_resp_t rjt;
5733 
5734                 /* Build RJT. */
5735                 bzero(&rjt, sizeof (rjt));
5736                 rjt.ls_code.ls_code = LA_ELS_RJT;
5737 
5738                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5739                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5740 
5741                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5742                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5743                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5744         }
5745 
5746         if (rval != FC_SUCCESS) {
5747                 EL(ha, "failed, rval = %xh\n", rval);
5748         } else {
5749                 /*EMPTY*/
5750                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5751         }
5752         return (rval);
5753 }
5754 
5755 /*
5756  * ql_els_lpc
5757  *      Issue a extended link service loop control request.
5758  *
5759  * Input:
5760  *      ha = adapter state pointer.
5761  *      pkt = pointer to fc_packet.
5762  *
5763  * Returns:
5764  *      FC_SUCCESS - the packet was accepted for transport.
5765  *      FC_TRANSPORT_ERROR - a transport error occurred.
5766  *
5767  * Context:
5768  *      Kernel context.
5769  */
5770 static int
5771 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5772 {
5773         ddi_dma_cookie_t        *cp;
5774         uint32_t                cnt;
5775         conv_num_t              n;
5776         port_id_t               d_id;
5777         int                     rval = FC_SUCCESS;
5778 
5779         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5780 
5781         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5782         if (ha->topology & QL_SNS_CONNECTION) {
5783                 ql_lpc_t els;
5784                 lfa_cmd_t lfa;
5785 
5786                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5787                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5788 
5789                 /* Setup LFA mailbox command data. */
5790                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5791 
5792                 lfa.resp_buffer_length[0] = 4;
5793 
5794                 cp = pkt->pkt_resp_cookie;
5795                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5796                         n.size64 = (uint64_t)(cp->dmac_laddress);
5797                         LITTLE_ENDIAN_64(&n.size64);
5798                 } else {
5799                         n.size32[0] = cp->dmac_address;
5800                         LITTLE_ENDIAN_32(&n.size32[0]);
5801                         n.size32[1] = 0;
5802                 }
5803 
5804                 /* Set buffer address. */
5805                 for (cnt = 0; cnt < 8; cnt++) {
5806                         lfa.resp_buffer_address[cnt] = n.size8[cnt];
5807                 }
5808 
5809                 lfa.subcommand_length[0] = 20;
5810                 n.size32[0] = d_id.b24;
5811                 LITTLE_ENDIAN_32(&n.size32[0]);
5812                 lfa.addr[0] = n.size8[0];
5813                 lfa.addr[1] = n.size8[1];
5814                 lfa.addr[2] = n.size8[2];
5815                 lfa.subcommand[1] = 0x71;
5816                 lfa.payload[4] = els.port_control;
5817                 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5818 
5819                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5820                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5821                 } else {
5822                         pkt->pkt_state = FC_PKT_SUCCESS;
5823                 }
5824         } else {
5825                 ql_lpc_resp_t rjt;
5826 
5827                 /* Build RJT. */
5828                 bzero(&rjt, sizeof (rjt));
5829                 rjt.ls_code.ls_code = LA_ELS_RJT;
5830 
5831                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5832                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5833 
5834                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5835                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5836                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5837         }
5838 
5839         if (rval != FC_SUCCESS) {
5840                 EL(ha, "failed, rval = %xh\n", rval);
5841         } else {
5842                 /*EMPTY*/
5843                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5844         }
5845         return (rval);
5846 }
5847 
5848 /*
5849  * ql_els_lsts
5850  *      Issue a extended link service loop status request.
5851  *
5852  * Input:
5853  *      ha = adapter state pointer.
5854  *      pkt = pointer to fc_packet.
5855  *
5856  * Returns:
5857  *      FC_SUCCESS - the packet was accepted for transport.
5858  *      FC_TRANSPORT_ERROR - a transport error occurred.
5859  *
5860  * Context:
5861  *      Kernel context.
5862  */
5863 static int
5864 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5865 {
5866         ddi_dma_cookie_t        *cp;
5867         uint32_t                cnt;
5868         conv_num_t              n;
5869         port_id_t               d_id;
5870         int                     rval = FC_SUCCESS;
5871 
5872         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5873 
5874         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5875         if (ha->topology & QL_SNS_CONNECTION) {
5876                 fc_lsts_req_t els;
5877                 lfa_cmd_t lfa;
5878 
5879                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5880                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5881 
5882                 /* Setup LFA mailbox command data. */
5883                 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5884 
5885                 lfa.resp_buffer_length[0] = 84;
5886 
5887                 cp = pkt->pkt_resp_cookie;
5888                 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5889                         n.size64 = cp->dmac_laddress;
5890                         LITTLE_ENDIAN_64(&n.size64);
5891                 } else {
5892                         n.size32[0] = cp->dmac_address;
5893                         LITTLE_ENDIAN_32(&n.size32[0]);
5894                         n.size32[1] = 0;
5895                 }
5896 
5897                 /* Set buffer address. */
5898                 for (cnt = 0; cnt < 8; cnt++) {
5899                         lfa.resp_buffer_address[cnt] = n.size8[cnt];
5900                 }
5901 
5902                 lfa.subcommand_length[0] = 2;
5903                 n.size32[0] = d_id.b24;
5904                 LITTLE_ENDIAN_32(&n.size32[0]);
5905                 lfa.addr[0] = n.size8[0];
5906                 lfa.addr[1] = n.size8[1];
5907                 lfa.addr[2] = n.size8[2];
5908                 lfa.subcommand[1] = 0x72;
5909 
5910                 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5911                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5912                 } else {
5913                         pkt->pkt_state = FC_PKT_SUCCESS;
5914                 }
5915         } else {
5916                 fc_lsts_resp_t rjt;
5917 
5918                 /* Build RJT. */
5919                 bzero(&rjt, sizeof (rjt));
5920                 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5921 
5922                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5923                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5924 
5925                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5926                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5927                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5928         }
5929 
5930         if (rval != FC_SUCCESS) {
5931                 EL(ha, "failed=%xh\n", rval);
5932         } else {
5933                 /*EMPTY*/
5934                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5935         }
5936         return (rval);
5937 }
5938 
5939 /*
5940  * ql_els_scr
5941  *      Issue a extended link service state change registration request.
5942  *
5943  * Input:
5944  *      ha = adapter state pointer.
5945  *      pkt = pointer to fc_packet.
5946  *
5947  * Returns:
5948  *      FC_SUCCESS - the packet was accepted for transport.
5949  *      FC_TRANSPORT_ERROR - a transport error occurred.
5950  *
5951  * Context:
5952  *      Kernel context.
5953  */
5954 static int
5955 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5956 {
5957         fc_scr_resp_t   acc;
5958         int             rval = FC_SUCCESS;
5959 
5960         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5961 
5962         bzero(&acc, sizeof (acc));
5963         if (ha->topology & QL_SNS_CONNECTION) {
5964                 fc_scr_req_t els;
5965 
5966                 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5967                     (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5968 
5969                 if (ql_send_change_request(ha, els.scr_func) ==
5970                     QL_SUCCESS) {
5971                         /* Build ACC. */
5972                         acc.scr_acc = LA_ELS_ACC;
5973 
5974                         pkt->pkt_state = FC_PKT_SUCCESS;
5975                 } else {
5976                         /* Build RJT. */
5977                         acc.scr_acc = LA_ELS_RJT;
5978 
5979                         pkt->pkt_state = FC_PKT_TRAN_ERROR;
5980                         pkt->pkt_reason = FC_REASON_HW_ERROR;
5981                         EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5982                 }
5983         } else {
5984                 /* Build RJT. */
5985                 acc.scr_acc = LA_ELS_RJT;
5986 
5987                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5988                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5989                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5990         }
5991 
5992         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5993             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5994 
5995         if (rval != FC_SUCCESS) {
5996                 EL(ha, "failed, rval = %xh\n", rval);
5997         } else {
5998                 /*EMPTY*/
5999                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6000         }
6001         return (rval);
6002 }
6003 
6004 /*
6005  * ql_els_rscn
6006  *      Issue a extended link service register state
6007  *      change notification request.
6008  *
6009  * Input:
6010  *      ha = adapter state pointer.
6011  *      pkt = pointer to fc_packet.
6012  *
6013  * Returns:
6014  *      FC_SUCCESS - the packet was accepted for transport.
6015  *      FC_TRANSPORT_ERROR - a transport error occurred.
6016  *
6017  * Context:
6018  *      Kernel context.
6019  */
6020 static int
6021 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6022 {
6023         ql_rscn_resp_t  acc;
6024         int             rval = FC_SUCCESS;
6025 
6026         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6027 
6028         bzero(&acc, sizeof (acc));
6029         if (ha->topology & QL_SNS_CONNECTION) {
6030                 /* Build ACC. */
6031                 acc.scr_acc = LA_ELS_ACC;
6032 
6033                 pkt->pkt_state = FC_PKT_SUCCESS;
6034         } else {
6035                 /* Build RJT. */
6036                 acc.scr_acc = LA_ELS_RJT;
6037 
6038                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6039                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6040                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6041         }
6042 
6043         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6044             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6045 
6046         if (rval != FC_SUCCESS) {
6047                 EL(ha, "failed, rval = %xh\n", rval);
6048         } else {
6049                 /*EMPTY*/
6050                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6051         }
6052         return (rval);
6053 }
6054 
6055 /*
6056  * ql_els_farp_req
6057  *      Issue FC Address Resolution Protocol (FARP)
6058  *      extended link service request.
6059  *
6060  *      Note: not supported.
6061  *
6062  * Input:
6063  *      ha = adapter state pointer.
6064  *      pkt = pointer to fc_packet.
6065  *
6066  * Returns:
6067  *      FC_SUCCESS - the packet was accepted for transport.
6068  *      FC_TRANSPORT_ERROR - a transport error occurred.
6069  *
6070  * Context:
6071  *      Kernel context.
6072  */
6073 static int
6074 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6075 {
6076         ql_acc_rjt_t    acc;
6077         int             rval = FC_SUCCESS;
6078 
6079         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6080 
6081         bzero(&acc, sizeof (acc));
6082 
6083         /* Build ACC. */
6084         acc.ls_code.ls_code = LA_ELS_ACC;
6085 
6086         pkt->pkt_state = FC_PKT_SUCCESS;
6087 
6088         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6089             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6090 
6091         if (rval != FC_SUCCESS) {
6092                 EL(ha, "failed, rval = %xh\n", rval);
6093         } else {
6094                 /*EMPTY*/
6095                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6096         }
6097         return (rval);
6098 }
6099 
6100 /*
6101  * ql_els_farp_reply
6102  *      Issue FC Address Resolution Protocol (FARP)
6103  *      extended link service reply.
6104  *
6105  *      Note: not supported.
6106  *
6107  * Input:
6108  *      ha = adapter state pointer.
6109  *      pkt = pointer to fc_packet.
6110  *
6111  * Returns:
6112  *      FC_SUCCESS - the packet was accepted for transport.
6113  *      FC_TRANSPORT_ERROR - a transport error occurred.
6114  *
6115  * Context:
6116  *      Kernel context.
6117  */
6118 /* ARGSUSED */
6119 static int
6120 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6121 {
6122         ql_acc_rjt_t    acc;
6123         int             rval = FC_SUCCESS;
6124 
6125         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6126 
6127         bzero(&acc, sizeof (acc));
6128 
6129         /* Build ACC. */
6130         acc.ls_code.ls_code = LA_ELS_ACC;
6131 
6132         pkt->pkt_state = FC_PKT_SUCCESS;
6133 
6134         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6135             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6136 
6137         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6138 
6139         return (rval);
6140 }
6141 
6142 static int
6143 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6144 {
6145         uchar_t                 *rnid_acc;
6146         port_id_t               d_id;
6147         ql_link_t               *link;
6148         ql_tgt_t                *tq;
6149         uint16_t                index;
6150         la_els_rnid_acc_t       acc;
6151         la_els_rnid_t           *req;
6152         size_t                  req_len;
6153 
6154         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6155 
6156         req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6157         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6158         index = ql_alpa_to_index[d_id.b.al_pa];
6159 
6160         tq = NULL;
6161         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6162                 tq = link->base_address;
6163                 if (tq->d_id.b24 == d_id.b24) {
6164                         break;
6165                 } else {
6166                         tq = NULL;
6167                 }
6168         }
6169 
6170         /* Allocate memory for rnid status block */
6171         rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6172 
6173         bzero(&acc, sizeof (acc));
6174 
6175         req = (la_els_rnid_t *)pkt->pkt_cmd;
6176         if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6177             (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6178             (caddr_t)rnid_acc) != QL_SUCCESS)) {
6179 
6180                 kmem_free(rnid_acc, req_len);
6181                 acc.ls_code.ls_code = LA_ELS_RJT;
6182 
6183                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6184                     (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6185 
6186                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6187                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6188                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6189 
6190                 return (FC_FAILURE);
6191         }
6192 
6193         acc.ls_code.ls_code = LA_ELS_ACC;
6194         bcopy(rnid_acc, &acc.hdr, req_len);
6195         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6196             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6197 
6198         kmem_free(rnid_acc, req_len);
6199         pkt->pkt_state = FC_PKT_SUCCESS;
6200 
6201         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6202 
6203         return (FC_SUCCESS);
6204 }
6205 
6206 static int
6207 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6208 {
6209         fc_rls_acc_t            *rls_acc;
6210         port_id_t               d_id;
6211         ql_link_t               *link;
6212         ql_tgt_t                *tq;
6213         uint16_t                index;
6214         la_els_rls_acc_t        acc;
6215 
6216         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6217 
6218         d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6219         index = ql_alpa_to_index[d_id.b.al_pa];
6220 
6221         tq = NULL;
6222         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6223                 tq = link->base_address;
6224                 if (tq->d_id.b24 == d_id.b24) {
6225                         break;
6226                 } else {
6227                         tq = NULL;
6228                 }
6229         }
6230 
6231         /* Allocate memory for link error status block */
6232         rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6233 
6234         bzero(&acc, sizeof (la_els_rls_acc_t));
6235 
6236         if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6237             (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6238             (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6239 
6240                 kmem_free(rls_acc, sizeof (*rls_acc));
6241                 acc.ls_code.ls_code = LA_ELS_RJT;
6242 
6243                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6244                     (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6245 
6246                 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6247                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6248                 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6249 
6250                 return (FC_FAILURE);
6251         }
6252 
6253         LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6254         LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6255         LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6256         LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6257         LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6258 
6259         acc.ls_code.ls_code = LA_ELS_ACC;
6260         acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6261         acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6262         acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6263         acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6264         acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6265         ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6266             (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6267 
6268         kmem_free(rls_acc, sizeof (*rls_acc));
6269         pkt->pkt_state = FC_PKT_SUCCESS;
6270 
6271         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6272 
6273         return (FC_SUCCESS);
6274 }
6275 
6276 static int
6277 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6278 {
6279         port_id_t       d_id;
6280         ql_srb_t        *sp;
6281         fc_unsol_buf_t  *ubp;
6282         ql_link_t       *link, *next_link;
6283         int             rval = FC_SUCCESS;
6284         int             cnt = 5;
6285 
6286         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6287 
6288         /*
6289          * we need to ensure that q->outcnt == 0, otherwise
6290          * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6291          * will confuse ulps.
6292          */
6293 
6294         DEVICE_QUEUE_LOCK(tq);
6295         do {
6296                 /*
6297                  * wait for the cmds to get drained. If they
6298                  * don't get drained then the transport will
6299                  * retry PLOGI after few secs.
6300                  */
6301                 if (tq->outcnt != 0) {
6302                         rval = FC_TRAN_BUSY;
6303                         DEVICE_QUEUE_UNLOCK(tq);
6304                         ql_delay(ha, 10000);
6305                         DEVICE_QUEUE_LOCK(tq);
6306                         cnt--;
6307                         if (!cnt) {
6308                                 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6309                                     " for %xh outcount %xh", QL_NAME,
6310                                     ha->instance, tq->d_id.b24, tq->outcnt);
6311                         }
6312                 } else {
6313                         rval = FC_SUCCESS;
6314                         break;
6315                 }
6316         } while (cnt > 0);
6317         DEVICE_QUEUE_UNLOCK(tq);
6318 
6319         /*
6320          * return, if busy or if the plogi was asynchronous.
6321          */
6322         if ((rval != FC_SUCCESS) ||
6323             (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6324             pkt->pkt_comp)) {
6325                 QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6326                     ha->instance);
6327                 return (rval);
6328         }
6329 
6330         /*
6331          * Let us give daemon sufficient time and hopefully
6332          * when transport retries PLOGI, it would have flushed
6333          * callback queue.
6334          */
6335         TASK_DAEMON_LOCK(ha);
6336         for (link = ha->callback_queue.first; link != NULL;
6337             link = next_link) {
6338                 next_link = link->next;
6339                 sp = link->base_address;
6340                 if (sp->flags & SRB_UB_CALLBACK) {
6341                         ubp = ha->ub_array[sp->handle];
6342                         d_id.b24 = ubp->ub_frame.s_id;
6343                 } else {
6344                         d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6345                 }
6346                 if (tq->d_id.b24 == d_id.b24) {
6347                         cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6348                             ha->instance, tq->d_id.b24);
6349                         rval = FC_TRAN_BUSY;
6350                         break;
6351                 }
6352         }
6353         TASK_DAEMON_UNLOCK(ha);
6354 
6355         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6356 
6357         return (rval);
6358 }
6359 
6360 /*
6361  * ql_login_port
6362  *      Logs in a device if not already logged in.
6363  *
6364  * Input:
6365  *      ha = adapter state pointer.
6366  *      d_id = 24 bit port ID.
6367  *      DEVICE_QUEUE_LOCK must be released.
6368  *
6369  * Returns:
6370  *      QL local function return status code.
6371  *
6372  * Context:
6373  *      Kernel context.
6374  */
6375 static int
6376 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6377 {
6378         ql_adapter_state_t      *vha;
6379         ql_link_t               *link;
6380         uint16_t                index;
6381         ql_tgt_t                *tq, *tq2;
6382         uint16_t                loop_id, first_loop_id, last_loop_id;
6383         int                     rval = QL_SUCCESS;
6384 
6385         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6386             d_id.b24);
6387 
6388         /* Get head queue index. */
6389         index = ql_alpa_to_index[d_id.b.al_pa];
6390 
6391         /* Check for device already has a queue. */
6392         tq = NULL;
6393         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6394                 tq = link->base_address;
6395                 if (tq->d_id.b24 == d_id.b24) {
6396                         loop_id = tq->loop_id;
6397                         break;
6398                 } else {
6399                         tq = NULL;
6400                 }
6401         }
6402 
6403         /* Let's stop issuing any IO and unsolicited logo */
6404         if ((tq != NULL) && (!(ddi_in_panic()))) {
6405                 DEVICE_QUEUE_LOCK(tq);
6406                 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6407                 tq->flags &= ~TQF_RSCN_RCVD;
6408                 DEVICE_QUEUE_UNLOCK(tq);
6409         }
6410         if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6411             !(tq->flags & TQF_FABRIC_DEVICE)) {
6412                 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6413         }
6414 
6415         /* Special case for Nameserver */
6416         if (d_id.b24 == 0xFFFFFC) {
6417                 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6418                     SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6419                 if (tq == NULL) {
6420                         ADAPTER_STATE_LOCK(ha);
6421                         tq = ql_dev_init(ha, d_id, loop_id);
6422                         ADAPTER_STATE_UNLOCK(ha);
6423                         if (tq == NULL) {
6424                                 EL(ha, "failed=%xh, d_id=%xh\n",
6425                                     QL_FUNCTION_FAILED, d_id.b24);
6426                                 return (QL_FUNCTION_FAILED);
6427                         }
6428                 }
6429                 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6430                         rval = ql_login_fabric_port(ha, tq, loop_id);
6431                         if (rval == QL_SUCCESS) {
6432                                 tq->loop_id = loop_id;
6433                                 tq->flags |= TQF_FABRIC_DEVICE;
6434                                 (void) ql_get_port_database(ha, tq, PDF_NONE);
6435                         }
6436                 } else {
6437                         ha->topology = (uint8_t)
6438                             (ha->topology | QL_SNS_CONNECTION);
6439                 }
6440         /* Check for device already logged in. */
6441         } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6442                 if (tq->flags & TQF_FABRIC_DEVICE) {
6443                         rval = ql_login_fabric_port(ha, tq, loop_id);
6444                         if (rval == QL_PORT_ID_USED) {
6445                                 rval = QL_SUCCESS;
6446                         }
6447                 } else if (LOCAL_LOOP_ID(loop_id)) {
6448                         rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6449                             (tq->flags & TQF_INITIATOR_DEVICE ?
6450                             LLF_NONE : LLF_PLOGI));
6451                         if (rval == QL_SUCCESS) {
6452                                 DEVICE_QUEUE_LOCK(tq);
6453                                 tq->loop_id = loop_id;
6454                                 DEVICE_QUEUE_UNLOCK(tq);
6455                         }
6456                 }
6457         } else if (ha->topology & QL_SNS_CONNECTION) {
6458                 /* Locate unused loop ID. */
6459                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6460                         first_loop_id = 0;
6461                         last_loop_id = LAST_N_PORT_HDL;
6462                 } else if (ha->topology & QL_F_PORT) {
6463                         first_loop_id = 0;
6464                         last_loop_id = SNS_LAST_LOOP_ID;
6465                 } else {
6466                         first_loop_id = SNS_FIRST_LOOP_ID;
6467                         last_loop_id = SNS_LAST_LOOP_ID;
6468                 }
6469 
6470                 /* Acquire adapter state lock. */
6471                 ADAPTER_STATE_LOCK(ha);
6472 
6473                 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6474                 if (tq == NULL) {
6475                         EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6476                             d_id.b24);
6477 
6478                         ADAPTER_STATE_UNLOCK(ha);
6479 
6480                         return (QL_FUNCTION_FAILED);
6481                 }
6482 
6483                 rval = QL_FUNCTION_FAILED;
6484                 loop_id = ha->pha->free_loop_id++;
6485                 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6486                     index--) {
6487                         if (loop_id < first_loop_id ||
6488                             loop_id > last_loop_id) {
6489                                 loop_id = first_loop_id;
6490                                 ha->pha->free_loop_id = (uint16_t)
6491                                     (loop_id + 1);
6492                         }
6493 
6494                         /* Bypass if loop ID used. */
6495                         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6496                                 tq2 = ql_loop_id_to_queue(vha, loop_id);
6497                                 if (tq2 != NULL && tq2 != tq) {
6498                                         break;
6499                                 }
6500                         }
6501                         if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6502                             loop_id == ha->loop_id) {
6503                                 loop_id = ha->pha->free_loop_id++;
6504                                 continue;
6505                         }
6506 
6507                         ADAPTER_STATE_UNLOCK(ha);
6508                         rval = ql_login_fabric_port(ha, tq, loop_id);
6509 
6510                         /*
6511                          * If PORT_ID_USED is returned
6512                          * the login_fabric_port() updates
6513                          * with the correct loop ID
6514                          */
6515                         switch (rval) {
6516                         case QL_PORT_ID_USED:
6517                                 /*
6518                                  * use f/w handle and try to
6519                                  * login again.
6520                                  */
6521                                 ADAPTER_STATE_LOCK(ha);
6522                                 ha->pha->free_loop_id--;
6523                                 ADAPTER_STATE_UNLOCK(ha);
6524                                 loop_id = tq->loop_id;
6525                                 break;
6526 
6527                         case QL_SUCCESS:
6528                                 tq->flags |= TQF_FABRIC_DEVICE;
6529                                 (void) ql_get_port_database(ha,
6530                                     tq, PDF_NONE);
6531                                 index = 1;
6532                                 break;
6533 
6534                         case QL_LOOP_ID_USED:
6535                                 tq->loop_id = PORT_NO_LOOP_ID;
6536                                 loop_id = ha->pha->free_loop_id++;
6537                                 break;
6538 
6539                         case QL_ALL_IDS_IN_USE:
6540                                 tq->loop_id = PORT_NO_LOOP_ID;
6541                                 index = 1;
6542                                 break;
6543 
6544                         default:
6545                                 tq->loop_id = PORT_NO_LOOP_ID;
6546                                 index = 1;
6547                                 break;
6548                         }
6549 
6550                         ADAPTER_STATE_LOCK(ha);
6551                 }
6552 
6553                 ADAPTER_STATE_UNLOCK(ha);
6554         } else {
6555                 rval = QL_FUNCTION_FAILED;
6556         }
6557 
6558         if (rval != QL_SUCCESS) {
6559                 EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6560         } else {
6561                 EL(ha, "d_id=%xh, loop_id=%xh, "
6562                     "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6563                     tq->loop_id, tq->port_name[0], tq->port_name[1],
6564                     tq->port_name[2], tq->port_name[3], tq->port_name[4],
6565                     tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6566         }
6567         return (rval);
6568 }
6569 
6570 /*
6571  * ql_login_fabric_port
6572  *      Issue login fabric port mailbox command.
6573  *
6574  * Input:
6575  *      ha:             adapter state pointer.
6576  *      tq:             target queue pointer.
6577  *      loop_id:        FC Loop ID.
6578  *
6579  * Returns:
6580  *      ql local function return status code.
6581  *
6582  * Context:
6583  *      Kernel context.
6584  */
6585 static int
6586 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6587 {
6588         int             rval;
6589         int             index;
6590         int             retry = 0;
6591         port_id_t       d_id;
6592         ql_tgt_t        *newq;
6593         ql_mbx_data_t   mr;
6594 
6595         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6596             tq->d_id.b24);
6597 
6598         /*
6599          * QL_PARAMETER_ERROR also means the firmware is
6600          * not able to allocate PCB entry due to resource
6601          * issues, or collision.
6602          */
6603         do {
6604                 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6605                 if ((rval == QL_PARAMETER_ERROR) ||
6606                     ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6607                     mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6608                         retry++;
6609                         drv_usecwait(10 * MILLISEC);
6610                 } else {
6611                         break;
6612                 }
6613         } while (retry < 5);
6614 
6615         switch (rval) {
6616         case QL_SUCCESS:
6617                 tq->loop_id = loop_id;
6618                 break;
6619 
6620         case QL_PORT_ID_USED:
6621                 /*
6622                  * This Loop ID should NOT be in use in drivers
6623                  */
6624                 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6625 
6626                 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6627                         cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6628                             "dup loop_id=%xh, d_id=%xh", ha->instance,
6629                             newq->loop_id, newq->d_id.b24);
6630                         ql_send_logo(ha, newq, NULL);
6631                 }
6632 
6633                 tq->loop_id = mr.mb[1];
6634                 break;
6635 
6636         case QL_LOOP_ID_USED:
6637                 d_id.b.al_pa = LSB(mr.mb[2]);
6638                 d_id.b.area = MSB(mr.mb[2]);
6639                 d_id.b.domain = LSB(mr.mb[1]);
6640 
6641                 newq = ql_d_id_to_queue(ha, d_id);
6642                 if (newq && (newq->loop_id != loop_id)) {
6643                         /*
6644                          * This should NEVER ever happen; but this
6645                          * code is needed to bail out when the worst
6646                          * case happens - or as used to happen before
6647                          */
6648                         QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6649                             "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6650                             "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6651                             ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6652                             newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6653                             newq->d_id.b24, loop_id);
6654 
6655                         if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6656                                 ADAPTER_STATE_LOCK(ha);
6657 
6658                                 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6659                                 ql_add_link_b(&ha->dev[index], &newq->device);
6660 
6661                                 newq->d_id.b24 = d_id.b24;
6662 
6663                                 index = ql_alpa_to_index[d_id.b.al_pa];
6664                                 ql_add_link_b(&ha->dev[index], &newq->device);
6665 
6666                                 ADAPTER_STATE_UNLOCK(ha);
6667                         }
6668 
6669                         (void) ql_get_port_database(ha, newq, PDF_NONE);
6670 
6671                 }
6672 
6673                 /*
6674                  * Invalidate the loop ID for the
6675                  * us to obtain a new one.
6676                  */
6677                 tq->loop_id = PORT_NO_LOOP_ID;
6678                 break;
6679 
6680         case QL_ALL_IDS_IN_USE:
6681                 rval = QL_FUNCTION_FAILED;
6682                 EL(ha, "no loop id's available\n");
6683                 break;
6684 
6685         default:
6686                 if (rval == QL_COMMAND_ERROR) {
6687                         switch (mr.mb[1]) {
6688                         case 2:
6689                         case 3:
6690                                 rval = QL_MEMORY_ALLOC_FAILED;
6691                                 break;
6692 
6693                         case 4:
6694                                 rval = QL_FUNCTION_TIMEOUT;
6695                                 break;
6696                         case 7:
6697                                 rval = QL_FABRIC_NOT_INITIALIZED;
6698                                 break;
6699                         default:
6700                                 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6701                                 break;
6702                         }
6703                 } else {
6704                         cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6705                             " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6706                             ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6707                 }
6708                 break;
6709         }
6710 
6711         if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6712             rval != QL_LOOP_ID_USED) {
6713                 EL(ha, "failed=%xh\n", rval);
6714         } else {
6715                 /*EMPTY*/
6716                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6717         }
6718         return (rval);
6719 }
6720 
6721 /*
6722  * ql_logout_port
6723  *      Logs out a device if possible.
6724  *
6725  * Input:
6726  *      ha:     adapter state pointer.
6727  *      d_id:   24 bit port ID.
6728  *
6729  * Returns:
6730  *      QL local function return status code.
6731  *
6732  * Context:
6733  *      Kernel context.
6734  */
6735 static int
6736 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6737 {
6738         ql_link_t       *link;
6739         ql_tgt_t        *tq;
6740         uint16_t        index;
6741 
6742         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6743 
6744         /* Get head queue index. */
6745         index = ql_alpa_to_index[d_id.b.al_pa];
6746 
6747         /* Get device queue. */
6748         tq = NULL;
6749         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6750                 tq = link->base_address;
6751                 if (tq->d_id.b24 == d_id.b24) {
6752                         break;
6753                 } else {
6754                         tq = NULL;
6755                 }
6756         }
6757 
6758         if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6759                 (void) ql_logout_fabric_port(ha, tq);
6760                 tq->loop_id = PORT_NO_LOOP_ID;
6761         }
6762 
6763         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6764 
6765         return (QL_SUCCESS);
6766 }
6767 
6768 /*
6769  * ql_dev_init
6770  *      Initialize/allocate device queue.
6771  *
6772  * Input:
6773  *      ha:             adapter state pointer.
6774  *      d_id:           device destination ID
6775  *      loop_id:        device loop ID
6776  *      ADAPTER_STATE_LOCK must be already obtained.
6777  *
6778  * Returns:
6779  *      NULL = failure
6780  *
6781  * Context:
6782  *      Kernel context.
6783  */
6784 ql_tgt_t *
6785 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6786 {
6787         ql_link_t       *link;
6788         uint16_t        index;
6789         ql_tgt_t        *tq;
6790 
6791         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6792             ha->instance, d_id.b24, loop_id);
6793 
6794         index = ql_alpa_to_index[d_id.b.al_pa];
6795 
6796         /* If device queue exists, set proper loop ID. */
6797         tq = NULL;
6798         for (link = ha->dev[index].first; link != NULL; link = link->next) {
6799                 tq = link->base_address;
6800                 if (tq->d_id.b24 == d_id.b24) {
6801                         tq->loop_id = loop_id;
6802 
6803                         /* Reset port down retry count. */
6804                         tq->port_down_retry_count = ha->port_down_retry_count;
6805                         tq->qfull_retry_count = ha->qfull_retry_count;
6806 
6807                         break;
6808                 } else {
6809                         tq = NULL;
6810                 }
6811         }
6812 
6813         /* If device does not have queue. */
6814         if (tq == NULL) {
6815                 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6816                 if (tq != NULL) {
6817                         /*
6818                          * mutex to protect the device queue,
6819                          * does not block interrupts.
6820                          */
6821                         mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6822                             (ha->iflags & IFLG_INTR_AIF) ?
6823                             (void *)(uintptr_t)ha->intr_pri :
6824                             (void *)(uintptr_t)ha->iblock_cookie);
6825 
6826                         tq->d_id.b24 = d_id.b24;
6827                         tq->loop_id = loop_id;
6828                         tq->device.base_address = tq;
6829                         tq->iidma_rate = IIDMA_RATE_INIT;
6830 
6831                         /* Reset port down retry count. */
6832                         tq->port_down_retry_count = ha->port_down_retry_count;
6833                         tq->qfull_retry_count = ha->qfull_retry_count;
6834 
6835                         /* Add device to device queue. */
6836                         ql_add_link_b(&ha->dev[index], &tq->device);
6837                 }
6838         }
6839 
6840         if (tq == NULL) {
6841                 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6842         } else {
6843                 /*EMPTY*/
6844                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6845         }
6846         return (tq);
6847 }
6848 
6849 /*
6850  * ql_dev_free
6851  *      Remove queue from device list and frees resources used by queue.
6852  *
6853  * Input:
6854  *      ha:     adapter state pointer.
6855  *      tq:     target queue pointer.
6856  *      ADAPTER_STATE_LOCK must be already obtained.
6857  *
6858  * Context:
6859  *      Kernel context.
6860  */
6861 void
6862 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6863 {
6864         ql_link_t       *link;
6865         uint16_t        index;
6866         ql_lun_t        *lq;
6867 
6868         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6869 
6870         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6871                 lq = link->base_address;
6872                 if (lq->cmd.first != NULL) {
6873                         return;
6874                 }
6875         }
6876 
6877         if (tq->outcnt == 0) {
6878                 /* Get head queue index. */
6879                 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6880                 for (link = ha->dev[index].first; link != NULL;
6881                     link = link->next) {
6882                         if (link->base_address == tq) {
6883                                 ql_remove_link(&ha->dev[index], link);
6884 
6885                                 link = tq->lun_queues.first;
6886                                 while (link != NULL) {
6887                                         lq = link->base_address;
6888                                         link = link->next;
6889 
6890                                         ql_remove_link(&tq->lun_queues,
6891                                             &lq->link);
6892                                         kmem_free(lq, sizeof (ql_lun_t));
6893                                 }
6894 
6895                                 mutex_destroy(&tq->mutex);
6896                                 kmem_free(tq, sizeof (ql_tgt_t));
6897                                 break;
6898                         }
6899                 }
6900         }
6901 
6902         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6903 }
6904 
6905 /*
6906  * ql_lun_queue
6907  *      Allocate LUN queue if does not exists.
6908  *
6909  * Input:
6910  *      ha:     adapter state pointer.
6911  *      tq:     target queue.
6912  *      lun:    LUN number.
6913  *
6914  * Returns:
6915  *      NULL = failure
6916  *
6917  * Context:
6918  *      Kernel context.
6919  */
6920 static ql_lun_t *
6921 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6922 {
6923         ql_lun_t        *lq;
6924         ql_link_t       *link;
6925 
6926         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6927 
6928         /* Fast path. */
6929         if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6930                 QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6931                 return (tq->last_lun_queue);
6932         }
6933 
6934         if (lun >= MAX_LUNS) {
6935                 EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6936                 return (NULL);
6937         }
6938         /* If device queue exists, set proper loop ID. */
6939         lq = NULL;
6940         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6941                 lq = link->base_address;
6942                 if (lq->lun_no == lun) {
6943                         QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6944                         tq->last_lun_queue = lq;
6945                         return (lq);
6946                 }
6947         }
6948 
6949         /* If queue does exist. */
6950         lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6951 
6952         /* Initialize LUN queue. */
6953         if (lq != NULL) {
6954                 lq->link.base_address = lq;
6955 
6956                 lq->lun_no = lun;
6957                 lq->target_queue = tq;
6958 
6959                 DEVICE_QUEUE_LOCK(tq);
6960                 ql_add_link_b(&tq->lun_queues, &lq->link);
6961                 DEVICE_QUEUE_UNLOCK(tq);
6962                 tq->last_lun_queue = lq;
6963         }
6964 
6965         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6966 
6967         return (lq);
6968 }
6969 
6970 /*
6971  * ql_fcp_scsi_cmd
6972  *      Process fibre channel (FCP) SCSI protocol commands.
6973  *
6974  * Input:
6975  *      ha = adapter state pointer.
6976  *      pkt = pointer to fc_packet.
6977  *      sp = srb pointer.
6978  *
6979  * Returns:
6980  *      FC_SUCCESS - the packet was accepted for transport.
6981  *      FC_TRANSPORT_ERROR - a transport error occurred.
6982  *
6983  * Context:
6984  *      Kernel context.
6985  */
6986 static int
6987 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6988 {
6989         port_id_t       d_id;
6990         ql_tgt_t        *tq;
6991         uint64_t        *ptr;
6992         uint16_t        lun;
6993 
6994         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6995 
6996         tq = (ql_tgt_t *)pkt->pkt_fca_device;
6997         if (tq == NULL) {
6998                 d_id.r.rsvd_1 = 0;
6999                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7000                 tq = ql_d_id_to_queue(ha, d_id);
7001         }
7002 
7003         sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7004         lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7005             hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7006 
7007         if (tq != NULL &&
7008             (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7009 
7010                 /*
7011                  * zero out FCP response; 24 Bytes
7012                  */
7013                 ptr = (uint64_t *)pkt->pkt_resp;
7014                 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7015 
7016                 /* Handle task management function. */
7017                 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7018                     sp->fcp->fcp_cntl.cntl_clr_aca |
7019                     sp->fcp->fcp_cntl.cntl_reset_tgt |
7020                     sp->fcp->fcp_cntl.cntl_reset_lun |
7021                     sp->fcp->fcp_cntl.cntl_clr_tsk |
7022                     sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7023                         ql_task_mgmt(ha, tq, pkt, sp);
7024                 } else {
7025                         ha->pha->xioctl->IosRequested++;
7026                         ha->pha->xioctl->BytesRequested += (uint32_t)
7027                             sp->fcp->fcp_data_len;
7028 
7029                         /*
7030                          * Setup for commands with data transfer
7031                          */
7032                         sp->iocb = ha->fcp_cmd;
7033                         sp->req_cnt = 1;
7034                         if (sp->fcp->fcp_data_len != 0) {
7035                                 /*
7036                                  * FCP data is bound to pkt_data_dma
7037                                  */
7038                                 if (sp->fcp->fcp_cntl.cntl_write_data) {
7039                                         (void) ddi_dma_sync(pkt->pkt_data_dma,
7040                                             0, 0, DDI_DMA_SYNC_FORDEV);
7041                                 }
7042 
7043                                 /* Setup IOCB count. */
7044                                 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7045                                     (!CFG_IST(ha, CFG_CTRL_8021) ||
7046                                     sp->sg_dma.dma_handle == NULL)) {
7047                                         uint32_t        cnt;
7048 
7049                                         cnt = pkt->pkt_data_cookie_cnt -
7050                                             ha->cmd_segs;
7051                                         sp->req_cnt = (uint16_t)
7052                                             (cnt / ha->cmd_cont_segs);
7053                                         if (cnt % ha->cmd_cont_segs) {
7054                                                 sp->req_cnt = (uint16_t)
7055                                                     (sp->req_cnt + 2);
7056                                         } else {
7057                                                 sp->req_cnt++;
7058                                         }
7059                                 }
7060                         }
7061                         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7062 
7063                         return (ql_start_cmd(ha, tq, pkt, sp));
7064                 }
7065         } else {
7066                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7067                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7068 
7069                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7070                         ql_awaken_task_daemon(ha, sp, 0, 0);
7071         }
7072 
7073         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7074 
7075         return (FC_SUCCESS);
7076 }
7077 
7078 /*
7079  * ql_task_mgmt
7080  *      Task management function processor.
7081  *
7082  * Input:
7083  *      ha:     adapter state pointer.
7084  *      tq:     target queue pointer.
7085  *      pkt:    pointer to fc_packet.
7086  *      sp:     SRB pointer.
7087  *
7088  * Context:
7089  *      Kernel context.
7090  */
7091 static void
7092 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7093     ql_srb_t *sp)
7094 {
7095         fcp_rsp_t               *fcpr;
7096         struct fcp_rsp_info     *rsp;
7097         uint16_t                lun;
7098 
7099         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7100 
7101         fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7102         rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7103 
7104         bzero(fcpr, pkt->pkt_rsplen);
7105 
7106         fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7107         fcpr->fcp_response_len = 8;
7108         lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7109             hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7110 
7111         if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7112                 if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7113                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7114                 }
7115         } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7116                 if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7117                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7118                 }
7119         } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7120                 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7121                     QL_SUCCESS) {
7122                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7123                 }
7124         } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7125                 if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7126                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7127                 }
7128         } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7129                 if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7130                         rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7131                 }
7132         } else {
7133                 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7134         }
7135 
7136         pkt->pkt_state = FC_PKT_SUCCESS;
7137 
7138         /* Do command callback. */
7139         if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7140                 ql_awaken_task_daemon(ha, sp, 0, 0);
7141         }
7142 
7143         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7144 }
7145 
7146 /*
7147  * ql_fcp_ip_cmd
7148  *      Process fibre channel (FCP) Internet (IP) protocols commands.
7149  *
7150  * Input:
7151  *      ha:     adapter state pointer.
7152  *      pkt:    pointer to fc_packet.
7153  *      sp:     SRB pointer.
7154  *
7155  * Returns:
7156  *      FC_SUCCESS - the packet was accepted for transport.
7157  *      FC_TRANSPORT_ERROR - a transport error occurred.
7158  *
7159  * Context:
7160  *      Kernel context.
7161  */
7162 static int
7163 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7164 {
7165         port_id_t       d_id;
7166         ql_tgt_t        *tq;
7167 
7168         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7169 
7170         tq = (ql_tgt_t *)pkt->pkt_fca_device;
7171         if (tq == NULL) {
7172                 d_id.r.rsvd_1 = 0;
7173                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7174                 tq = ql_d_id_to_queue(ha, d_id);
7175         }
7176 
7177         if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7178                 /*
7179                  * IP data is bound to pkt_cmd_dma
7180                  */
7181                 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7182                     0, 0, DDI_DMA_SYNC_FORDEV);
7183 
7184                 /* Setup IOCB count. */
7185                 sp->iocb = ha->ip_cmd;
7186                 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7187                         uint32_t        cnt;
7188 
7189                         cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7190                         sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7191                         if (cnt % ha->cmd_cont_segs) {
7192                                 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7193                         } else {
7194                                 sp->req_cnt++;
7195                         }
7196                 } else {
7197                         sp->req_cnt = 1;
7198                 }
7199                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7200 
7201                 return (ql_start_cmd(ha, tq, pkt, sp));
7202         } else {
7203                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7204                 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7205 
7206                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7207                         ql_awaken_task_daemon(ha, sp, 0, 0);
7208         }
7209 
7210         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7211 
7212         return (FC_SUCCESS);
7213 }
7214 
7215 /*
7216  * ql_fc_services
7217  *      Process fibre channel services (name server).
7218  *
7219  * Input:
7220  *      ha:     adapter state pointer.
7221  *      pkt:    pointer to fc_packet.
7222  *
7223  * Returns:
7224  *      FC_SUCCESS - the packet was accepted for transport.
7225  *      FC_TRANSPORT_ERROR - a transport error occurred.
7226  *
7227  * Context:
7228  *      Kernel context.
7229  */
7230 static int
7231 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7232 {
7233         uint32_t        cnt;
7234         fc_ct_header_t  hdr;
7235         la_els_rjt_t    rjt;
7236         port_id_t       d_id;
7237         ql_tgt_t        *tq;
7238         ql_srb_t        *sp;
7239         int             rval;
7240 
7241         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7242 
7243         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7244             (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7245 
7246         bzero(&rjt, sizeof (rjt));
7247 
7248         /* Do some sanity checks */
7249         cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7250             sizeof (fc_ct_header_t));
7251         if (cnt > (uint32_t)pkt->pkt_rsplen) {
7252                 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7253                     pkt->pkt_rsplen);
7254                 return (FC_ELS_MALFORMED);
7255         }
7256 
7257         switch (hdr.ct_fcstype) {
7258         case FCSTYPE_DIRECTORY:
7259         case FCSTYPE_MGMTSERVICE:
7260                 /* An FCA must make sure that the header is in big endian */
7261                 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7262 
7263                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7264                 tq = ql_d_id_to_queue(ha, d_id);
7265                 sp = (ql_srb_t *)pkt->pkt_fca_private;
7266                 if (tq == NULL ||
7267                     (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7268                         pkt->pkt_state = FC_PKT_LOCAL_RJT;
7269                         pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7270                         rval = QL_SUCCESS;
7271                         break;
7272                 }
7273 
7274                 /*
7275                  * Services data is bound to pkt_cmd_dma
7276                  */
7277                 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7278                     DDI_DMA_SYNC_FORDEV);
7279 
7280                 sp->flags |= SRB_MS_PKT;
7281                 sp->retry_count = 32;
7282 
7283                 /* Setup IOCB count. */
7284                 sp->iocb = ha->ms_cmd;
7285                 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7286                         cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7287                         sp->req_cnt =
7288                             (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7289                         if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7290                                 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7291                         } else {
7292                                 sp->req_cnt++;
7293                         }
7294                 } else {
7295                         sp->req_cnt = 1;
7296                 }
7297                 rval = ql_start_cmd(ha, tq, pkt, sp);
7298 
7299                 QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7300                     ha->instance, rval);
7301 
7302                 return (rval);
7303 
7304         default:
7305                 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7306                 rval = QL_FUNCTION_PARAMETER_ERROR;
7307                 break;
7308         }
7309 
7310         if (rval != QL_SUCCESS) {
7311                 /* Build RJT. */
7312                 rjt.ls_code.ls_code = LA_ELS_RJT;
7313                 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7314 
7315                 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7316                     (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7317 
7318                 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7319                 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7320                 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7321         }
7322 
7323         /* Do command callback. */
7324         if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7325                 ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7326                     0, 0);
7327         }
7328 
7329         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7330 
7331         return (FC_SUCCESS);
7332 }
7333 
7334 /*
7335  * ql_cthdr_endian
7336  *      Change endianess of ct passthrough header and payload.
7337  *
7338  * Input:
7339  *      acc_handle:     DMA buffer access handle.
7340  *      ct_hdr:         Pointer to header.
7341  *      restore:        Restore first flag.
7342  *
7343  * Context:
7344  *      Interrupt or Kernel context, no mailbox commands allowed.
7345  */
7346 void
7347 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7348     boolean_t restore)
7349 {
7350         uint8_t         i, *bp;
7351         fc_ct_header_t  hdr;
7352         uint32_t        *hdrp = (uint32_t *)&hdr;
7353 
7354         ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7355             (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7356 
7357         if (restore) {
7358                 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7359                         *hdrp = BE_32(*hdrp);
7360                         hdrp++;
7361                 }
7362         }
7363 
7364         if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7365                 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7366 
7367                 switch (hdr.ct_cmdrsp) {
7368                 case NS_GA_NXT:
7369                 case NS_GPN_ID:
7370                 case NS_GNN_ID:
7371                 case NS_GCS_ID:
7372                 case NS_GFT_ID:
7373                 case NS_GSPN_ID:
7374                 case NS_GPT_ID:
7375                 case NS_GID_FT:
7376                 case NS_GID_PT:
7377                 case NS_RPN_ID:
7378                 case NS_RNN_ID:
7379                 case NS_RSPN_ID:
7380                 case NS_DA_ID:
7381                         BIG_ENDIAN_32(bp);
7382                         break;
7383                 case NS_RFT_ID:
7384                 case NS_RCS_ID:
7385                 case NS_RPT_ID:
7386                         BIG_ENDIAN_32(bp);
7387                         bp += 4;
7388                         BIG_ENDIAN_32(bp);
7389                         break;
7390                 case NS_GNN_IP:
7391                 case NS_GIPA_IP:
7392                         BIG_ENDIAN(bp, 16);
7393                         break;
7394                 case NS_RIP_NN:
7395                         bp += 8;
7396                         BIG_ENDIAN(bp, 16);
7397                         break;
7398                 case NS_RIPA_NN:
7399                         bp += 8;
7400                         BIG_ENDIAN_64(bp);
7401                         break;
7402                 default:
7403                         break;
7404                 }
7405         }
7406 
7407         if (restore == B_FALSE) {
7408                 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7409                         *hdrp = BE_32(*hdrp);
7410                         hdrp++;
7411                 }
7412         }
7413 
7414         ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7415             (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7416 }
7417 
7418 /*
7419  * ql_start_cmd
7420  *      Finishes starting fibre channel protocol (FCP) command.
7421  *
7422  * Input:
7423  *      ha:     adapter state pointer.
7424  *      tq:     target queue pointer.
7425  *      pkt:    pointer to fc_packet.
7426  *      sp:     SRB pointer.
7427  *
7428  * Context:
7429  *      Kernel context.
7430  */
7431 static int
7432 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7433     ql_srb_t *sp)
7434 {
7435         int             rval = FC_SUCCESS;
7436         time_t          poll_wait = 0;
7437         ql_lun_t        *lq = sp->lun_queue;
7438 
7439         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7440 
7441         sp->handle = 0;
7442 
7443         /* Set poll for finish. */
7444         if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7445                 sp->flags |= SRB_POLL;
7446                 if (pkt->pkt_timeout == 0) {
7447                         pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7448                 }
7449         }
7450 
7451         /* Acquire device queue lock. */
7452         DEVICE_QUEUE_LOCK(tq);
7453 
7454         /*
7455          * If we need authentication, report device busy to
7456          * upper layers to retry later
7457          */
7458         if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7459                 DEVICE_QUEUE_UNLOCK(tq);
7460                 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7461                     tq->d_id.b24);
7462                 return (FC_DEVICE_BUSY);
7463         }
7464 
7465         /* Insert command onto watchdog queue. */
7466         if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7467                 ql_timeout_insert(ha, tq, sp);
7468         } else {
7469                 /*
7470                  * Run dump requests in polled mode as kernel threads
7471                  * and interrupts may have been disabled.
7472                  */
7473                 sp->flags |= SRB_POLL;
7474                 sp->init_wdg_q_time = 0;
7475                 sp->isp_timeout = 0;
7476         }
7477 
7478         /* If a polling command setup wait time. */
7479         if (sp->flags & SRB_POLL) {
7480                 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7481                         poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7482                 } else {
7483                         poll_wait = pkt->pkt_timeout;
7484                 }
7485         }
7486 
7487         if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7488             (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7489                 /* Set ending status. */
7490                 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7491 
7492                 /* Call done routine to handle completions. */
7493                 sp->cmd.next = NULL;
7494                 DEVICE_QUEUE_UNLOCK(tq);
7495                 ql_done(&sp->cmd);
7496         } else {
7497                 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7498                         int do_lip = 0;
7499 
7500                         DEVICE_QUEUE_UNLOCK(tq);
7501 
7502                         ADAPTER_STATE_LOCK(ha);
7503                         if ((do_lip = ha->pha->lip_on_panic) == 0) {
7504                                 ha->pha->lip_on_panic++;
7505                         }
7506                         ADAPTER_STATE_UNLOCK(ha);
7507 
7508                         if (!do_lip) {
7509 
7510                                 /*
7511                                  * That Qlogic F/W performs PLOGI, PRLI, etc
7512                                  * is helpful here. If a PLOGI fails for some
7513                                  * reason, you would get CS_PORT_LOGGED_OUT
7514                                  * or some such error; and we should get a
7515                                  * careful polled mode login kicked off inside
7516                                  * of this driver itself. You don't have FC
7517                                  * transport's services as all threads are
7518                                  * suspended, interrupts disabled, and so
7519                                  * on. Right now we do re-login if the packet
7520                                  * state isn't FC_PKT_SUCCESS.
7521                                  */
7522                                 (void) ql_abort_isp(ha);
7523                         }
7524 
7525                         ql_start_iocb(ha, sp);
7526                 } else {
7527                         /* Add the command to the device queue */
7528                         if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7529                                 ql_add_link_t(&lq->cmd, &sp->cmd);
7530                         } else {
7531                                 ql_add_link_b(&lq->cmd, &sp->cmd);
7532                         }
7533 
7534                         sp->flags |= SRB_IN_DEVICE_QUEUE;
7535 
7536                         /* Check whether next message can be processed */
7537                         ql_next(ha, lq);
7538                 }
7539         }
7540 
7541         /* If polling, wait for finish. */
7542         if (poll_wait) {
7543                 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7544                         int     res;
7545 
7546                         res = ql_abort((opaque_t)ha, pkt, 0);
7547                         if (res != FC_SUCCESS && res != FC_ABORTED) {
7548                                 DEVICE_QUEUE_LOCK(tq);
7549                                 ql_remove_link(&lq->cmd, &sp->cmd);
7550                                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7551                                 DEVICE_QUEUE_UNLOCK(tq);
7552                         }
7553                 }
7554 
7555                 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7556                         EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7557                         rval = FC_TRANSPORT_ERROR;
7558                 }
7559 
7560                 if (ddi_in_panic()) {
7561                         if (pkt->pkt_state != FC_PKT_SUCCESS) {
7562                                 port_id_t d_id;
7563 
7564                                 /*
7565                                  * successful LOGIN implies by design
7566                                  * that PRLI also succeeded for disks
7567                                  * Note also that there is no special
7568                                  * mailbox command to send PRLI.
7569                                  */
7570                                 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7571                                 (void) ql_login_port(ha, d_id);
7572                         }
7573                 }
7574 
7575                 /*
7576                  * This should only happen during CPR dumping
7577                  */
7578                 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7579                     pkt->pkt_comp) {
7580                         sp->flags &= ~SRB_POLL;
7581                         (*pkt->pkt_comp)(pkt);
7582                 }
7583         }
7584 
7585         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7586 
7587         return (rval);
7588 }
7589 
7590 /*
7591  * ql_poll_cmd
7592  *      Polls commands for completion.
7593  *
7594  * Input:
7595  *      ha = adapter state pointer.
7596  *      sp = SRB command pointer.
7597  *      poll_wait = poll wait time in seconds.
7598  *
7599  * Returns:
7600  *      QL local function return status code.
7601  *
7602  * Context:
7603  *      Kernel context.
7604  */
7605 static int
7606 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7607 {
7608         int                     rval = QL_SUCCESS;
7609         time_t                  msecs_left = poll_wait * 100;   /* 10ms inc */
7610         ql_adapter_state_t      *ha = vha->pha;
7611 
7612         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7613 
7614         while (sp->flags & SRB_POLL) {
7615 
7616                 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7617                     ha->idle_timer >= 15 || ddi_in_panic()) {
7618 
7619                         /* If waiting for restart, do it now. */
7620                         if (ha->port_retry_timer != 0) {
7621                                 ADAPTER_STATE_LOCK(ha);
7622                                 ha->port_retry_timer = 0;
7623                                 ADAPTER_STATE_UNLOCK(ha);
7624 
7625                                 TASK_DAEMON_LOCK(ha);
7626                                 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7627                                 TASK_DAEMON_UNLOCK(ha);
7628                         }
7629 
7630                         if (INTERRUPT_PENDING(ha)) {
7631                                 (void) ql_isr((caddr_t)ha);
7632                                 INTR_LOCK(ha);
7633                                 ha->intr_claimed = TRUE;
7634                                 INTR_UNLOCK(ha);
7635                         }
7636 
7637                         /*
7638                          * Call task thread function in case the
7639                          * daemon is not running.
7640                          */
7641                         TASK_DAEMON_LOCK(ha);
7642 
7643                         if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7644                             QL_TASK_PENDING(ha)) {
7645                                 ha->task_daemon_flags |= TASK_THREAD_CALLED;
7646                                 ql_task_thread(ha);
7647                                 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7648                         }
7649 
7650                         TASK_DAEMON_UNLOCK(ha);
7651                 }
7652 
7653                 if (msecs_left < 10) {
7654                         rval = QL_FUNCTION_TIMEOUT;
7655                         break;
7656                 }
7657 
7658                 /*
7659                  * Polling interval is 10 milli seconds; Increasing
7660                  * the polling interval to seconds since disk IO
7661                  * timeout values are ~60 seconds is tempting enough,
7662                  * but CPR dump time increases, and so will the crash
7663                  * dump time; Don't toy with the settings without due
7664                  * consideration for all the scenarios that will be
7665                  * impacted.
7666                  */
7667                 ql_delay(ha, 10000);
7668                 msecs_left -= 10;
7669         }
7670 
7671         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7672 
7673         return (rval);
7674 }
7675 
7676 /*
7677  * ql_next
7678  *      Retrieve and process next job in the device queue.
7679  *
7680  * Input:
7681  *      ha:     adapter state pointer.
7682  *      lq:     LUN queue pointer.
7683  *      DEVICE_QUEUE_LOCK must be already obtained.
7684  *
7685  * Output:
7686  *      Releases DEVICE_QUEUE_LOCK upon exit.
7687  *
7688  * Context:
7689  *      Interrupt or Kernel context, no mailbox commands allowed.
7690  */
7691 void
7692 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7693 {
7694         ql_srb_t                *sp;
7695         ql_link_t               *link;
7696         ql_tgt_t                *tq = lq->target_queue;
7697         ql_adapter_state_t      *ha = vha->pha;
7698 
7699         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7700 
7701         if (ddi_in_panic()) {
7702                 DEVICE_QUEUE_UNLOCK(tq);
7703                 QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7704                     ha->instance);
7705                 return;
7706         }
7707 
7708         while ((link = lq->cmd.first) != NULL) {
7709                 sp = link->base_address;
7710 
7711                 /* Exit if can not start commands. */
7712                 if (DRIVER_SUSPENDED(ha) ||
7713                     (ha->flags & ONLINE) == 0 ||
7714                     !VALID_DEVICE_ID(ha, tq->loop_id) ||
7715                     sp->flags & SRB_ABORT ||
7716                     tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7717                     TQF_QUEUE_SUSPENDED)) {
7718                         EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7719                             "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7720                             ha->task_daemon_flags, tq->flags, sp->flags,
7721                             ha->flags, tq->loop_id);
7722                         break;
7723                 }
7724 
7725                 /*
7726                  * Find out the LUN number for untagged command use.
7727                  * If there is an untagged command pending for the LUN,
7728                  * we would not submit another untagged command
7729                  * or if reached LUN execution throttle.
7730                  */
7731                 if (sp->flags & SRB_FCP_CMD_PKT) {
7732                         if (lq->flags & LQF_UNTAGGED_PENDING ||
7733                             lq->lun_outcnt >= ha->execution_throttle) {
7734                                 QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7735                                     "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7736                                     tq->d_id.b24, lq->flags, lq->lun_outcnt);
7737                                 break;
7738                         }
7739                         if (sp->fcp->fcp_cntl.cntl_qtype ==
7740                             FCP_QTYPE_UNTAGGED) {
7741                                 /*
7742                                  * Set the untagged-flag for the LUN
7743                                  * so that no more untagged commands
7744                                  * can be submitted for this LUN.
7745                                  */
7746                                 lq->flags |= LQF_UNTAGGED_PENDING;
7747                         }
7748 
7749                         /* Count command as sent. */
7750                         lq->lun_outcnt++;
7751                 }
7752 
7753                 /* Remove srb from device queue. */
7754                 ql_remove_link(&lq->cmd, &sp->cmd);
7755                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7756 
7757                 tq->outcnt++;
7758 
7759                 ql_start_iocb(vha, sp);
7760         }
7761 
7762         /* Release device queue lock. */
7763         DEVICE_QUEUE_UNLOCK(tq);
7764 
7765         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7766 }
7767 
7768 /*
7769  * ql_done
7770  *      Process completed commands.
7771  *
7772  * Input:
7773  *      link:   first command link in chain.
7774  *
7775  * Context:
7776  *      Interrupt or Kernel context, no mailbox commands allowed.
7777  */
7778 void
7779 ql_done(ql_link_t *link)
7780 {
7781         ql_adapter_state_t      *ha;
7782         ql_link_t               *next_link;
7783         ql_srb_t                *sp;
7784         ql_tgt_t                *tq;
7785         ql_lun_t                *lq;
7786 
7787         QL_PRINT_3(CE_CONT, "started\n");
7788 
7789         for (; link != NULL; link = next_link) {
7790                 next_link = link->next;
7791                 sp = link->base_address;
7792                 ha = sp->ha;
7793 
7794                 if (sp->flags & SRB_UB_CALLBACK) {
7795                         QL_UB_LOCK(ha);
7796                         if (sp->flags & SRB_UB_IN_ISP) {
7797                                 if (ha->ub_outcnt != 0) {
7798                                         ha->ub_outcnt--;
7799                                 }
7800                                 QL_UB_UNLOCK(ha);
7801                                 ql_isp_rcvbuf(ha);
7802                                 QL_UB_LOCK(ha);
7803                         }
7804                         QL_UB_UNLOCK(ha);
7805                         ql_awaken_task_daemon(ha, sp, 0, 0);
7806                 } else {
7807                         /* Free outstanding command slot. */
7808                         if (sp->handle != 0) {
7809                                 ha->outstanding_cmds[
7810                                     sp->handle & OSC_INDEX_MASK] = NULL;
7811                                 sp->handle = 0;
7812                                 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7813                         }
7814 
7815                         /* Acquire device queue lock. */
7816                         lq = sp->lun_queue;
7817                         tq = lq->target_queue;
7818                         DEVICE_QUEUE_LOCK(tq);
7819 
7820                         /* Decrement outstanding commands on device. */
7821                         if (tq->outcnt != 0) {
7822                                 tq->outcnt--;
7823                         }
7824 
7825                         if (sp->flags & SRB_FCP_CMD_PKT) {
7826                                 if (sp->fcp->fcp_cntl.cntl_qtype ==
7827                                     FCP_QTYPE_UNTAGGED) {
7828                                         /*
7829                                          * Clear the flag for this LUN so that
7830                                          * untagged commands can be submitted
7831                                          * for it.
7832                                          */
7833                                         lq->flags &= ~LQF_UNTAGGED_PENDING;
7834                                 }
7835 
7836                                 if (lq->lun_outcnt != 0) {
7837                                         lq->lun_outcnt--;
7838                                 }
7839                         }
7840 
7841                         /* Reset port down retry count on good completion. */
7842                         if (sp->pkt->pkt_reason == CS_COMPLETE) {
7843                                 tq->port_down_retry_count =
7844                                     ha->port_down_retry_count;
7845                                 tq->qfull_retry_count = ha->qfull_retry_count;
7846                         }
7847 
7848 
7849                         /* Alter aborted status for fast timeout feature */
7850                         if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7851                             (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7852                             !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7853                             sp->flags & SRB_RETRY &&
7854                             (sp->flags & SRB_WATCHDOG_ENABLED &&
7855                             sp->wdg_q_time > 1)) {
7856                                 EL(ha, "fast abort modify change\n");
7857                                 sp->flags &= ~(SRB_RETRY);
7858                                 sp->pkt->pkt_reason = CS_TIMEOUT;
7859                         }
7860 
7861                         /* Place request back on top of target command queue */
7862                         if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7863                             !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7864                             sp->flags & SRB_RETRY &&
7865                             (sp->flags & SRB_WATCHDOG_ENABLED &&
7866                             sp->wdg_q_time > 1)) {
7867                                 sp->flags &= ~(SRB_ISP_STARTED |
7868                                     SRB_ISP_COMPLETED | SRB_RETRY);
7869 
7870                                 /* Reset watchdog timer */
7871                                 sp->wdg_q_time = sp->init_wdg_q_time;
7872 
7873                                 /* Issue marker command on reset status. */
7874                                 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7875                                     (sp->pkt->pkt_reason == CS_RESET ||
7876                                     (CFG_IST(ha, CFG_CTRL_24258081) &&
7877                                     sp->pkt->pkt_reason == CS_ABORTED))) {
7878                                         (void) ql_marker(ha, tq->loop_id, 0,
7879                                             MK_SYNC_ID);
7880                                 }
7881 
7882                                 ql_add_link_t(&lq->cmd, &sp->cmd);
7883                                 sp->flags |= SRB_IN_DEVICE_QUEUE;
7884                                 ql_next(ha, lq);
7885                         } else {
7886                                 /* Remove command from watchdog queue. */
7887                                 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7888                                         ql_remove_link(&tq->wdg, &sp->wdg);
7889                                         sp->flags &= ~SRB_WATCHDOG_ENABLED;
7890                                 }
7891 
7892                                 if (lq->cmd.first != NULL) {
7893                                         ql_next(ha, lq);
7894                                 } else {
7895                                         /* Release LU queue specific lock. */
7896                                         DEVICE_QUEUE_UNLOCK(tq);
7897                                         if (ha->pha->pending_cmds.first !=
7898                                             NULL) {
7899                                                 ql_start_iocb(ha, NULL);
7900                                         }
7901                                 }
7902 
7903                                 /* Sync buffers if required.  */
7904                                 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7905                                         (void) ddi_dma_sync(
7906                                             sp->pkt->pkt_resp_dma,
7907                                             0, 0, DDI_DMA_SYNC_FORCPU);
7908                                 }
7909 
7910                                 /* Map ISP completion codes. */
7911                                 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7912                                 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7913                                 switch (sp->pkt->pkt_reason) {
7914                                 case CS_COMPLETE:
7915                                         sp->pkt->pkt_state = FC_PKT_SUCCESS;
7916                                         break;
7917                                 case CS_RESET:
7918                                         /* Issue marker command. */
7919                                         if (!(ha->task_daemon_flags &
7920                                             LOOP_DOWN)) {
7921                                                 (void) ql_marker(ha,
7922                                                     tq->loop_id, 0,
7923                                                     MK_SYNC_ID);
7924                                         }
7925                                         sp->pkt->pkt_state =
7926                                             FC_PKT_PORT_OFFLINE;
7927                                         sp->pkt->pkt_reason =
7928                                             FC_REASON_ABORTED;
7929                                         break;
7930                                 case CS_RESOUCE_UNAVAILABLE:
7931                                         sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7932                                         sp->pkt->pkt_reason =
7933                                             FC_REASON_PKT_BUSY;
7934                                         break;
7935 
7936                                 case CS_TIMEOUT:
7937                                         sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7938                                         sp->pkt->pkt_reason =
7939                                             FC_REASON_HW_ERROR;
7940                                         break;
7941                                 case CS_DATA_OVERRUN:
7942                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7943                                         sp->pkt->pkt_reason =
7944                                             FC_REASON_OVERRUN;
7945                                         break;
7946                                 case CS_PORT_UNAVAILABLE:
7947                                 case CS_PORT_LOGGED_OUT:
7948                                         sp->pkt->pkt_state =
7949                                             FC_PKT_PORT_OFFLINE;
7950                                         sp->pkt->pkt_reason =
7951                                             FC_REASON_LOGIN_REQUIRED;
7952                                         ql_send_logo(ha, tq, NULL);
7953                                         break;
7954                                 case CS_PORT_CONFIG_CHG:
7955                                         sp->pkt->pkt_state =
7956                                             FC_PKT_PORT_OFFLINE;
7957                                         sp->pkt->pkt_reason =
7958                                             FC_REASON_OFFLINE;
7959                                         break;
7960                                 case CS_QUEUE_FULL:
7961                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7962                                         sp->pkt->pkt_reason = FC_REASON_QFULL;
7963                                         break;
7964 
7965                                 case CS_ABORTED:
7966                                         DEVICE_QUEUE_LOCK(tq);
7967                                         if (tq->flags & (TQF_RSCN_RCVD |
7968                                             TQF_NEED_AUTHENTICATION)) {
7969                                                 sp->pkt->pkt_state =
7970                                                     FC_PKT_PORT_OFFLINE;
7971                                                 sp->pkt->pkt_reason =
7972                                                     FC_REASON_LOGIN_REQUIRED;
7973                                         } else {
7974                                                 sp->pkt->pkt_state =
7975                                                     FC_PKT_LOCAL_RJT;
7976                                                 sp->pkt->pkt_reason =
7977                                                     FC_REASON_ABORTED;
7978                                         }
7979                                         DEVICE_QUEUE_UNLOCK(tq);
7980                                         break;
7981 
7982                                 case CS_TRANSPORT:
7983                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7984                                         sp->pkt->pkt_reason =
7985                                             FC_PKT_TRAN_ERROR;
7986                                         break;
7987 
7988                                 case CS_DATA_UNDERRUN:
7989                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7990                                         sp->pkt->pkt_reason =
7991                                             FC_REASON_UNDERRUN;
7992                                         break;
7993                                 case CS_DMA_ERROR:
7994                                 case CS_BAD_PAYLOAD:
7995                                 case CS_UNKNOWN:
7996                                 case CS_CMD_FAILED:
7997                                 default:
7998                                         sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7999                                         sp->pkt->pkt_reason =
8000                                             FC_REASON_HW_ERROR;
8001                                         break;
8002                                 }
8003 
8004                                 /* Now call the pkt completion callback */
8005                                 if (sp->flags & SRB_POLL) {
8006                                         sp->flags &= ~SRB_POLL;
8007                                 } else if (sp->pkt->pkt_comp) {
8008                                         if (sp->pkt->pkt_tran_flags &
8009                                             FC_TRAN_IMMEDIATE_CB) {
8010                                                 (*sp->pkt->pkt_comp)(sp->pkt);
8011                                         } else {
8012                                                 ql_awaken_task_daemon(ha, sp,
8013                                                     0, 0);
8014                                         }
8015                                 }
8016                         }
8017                 }
8018         }
8019 
8020         QL_PRINT_3(CE_CONT, "done\n");
8021 }
8022 
8023 /*
8024  * ql_awaken_task_daemon
8025  *      Adds command completion callback to callback queue and/or
8026  *      awakens task daemon thread.
8027  *
8028  * Input:
8029  *      ha:             adapter state pointer.
8030  *      sp:             srb pointer.
8031  *      set_flags:      task daemon flags to set.
8032  *      reset_flags:    task daemon flags to reset.
8033  *
8034  * Context:
8035  *      Interrupt or Kernel context, no mailbox commands allowed.
8036  */
8037 void
8038 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8039     uint32_t set_flags, uint32_t reset_flags)
8040 {
8041         ql_adapter_state_t      *ha = vha->pha;
8042 
8043         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8044 
8045         /* Acquire task daemon lock. */
8046         TASK_DAEMON_LOCK(ha);
8047 
8048         if (set_flags & ISP_ABORT_NEEDED) {
8049                 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8050                         set_flags &= ~ISP_ABORT_NEEDED;
8051                 }
8052         }
8053 
8054         ha->task_daemon_flags |= set_flags;
8055         ha->task_daemon_flags &= ~reset_flags;
8056 
8057         if (QL_DAEMON_SUSPENDED(ha)) {
8058                 if (sp != NULL) {
8059                         TASK_DAEMON_UNLOCK(ha);
8060 
8061                         /* Do callback. */
8062                         if (sp->flags & SRB_UB_CALLBACK) {
8063                                 ql_unsol_callback(sp);
8064                         } else {
8065                                 (*sp->pkt->pkt_comp)(sp->pkt);
8066                         }
8067                 } else {
8068                         if (!(curthread->t_flag & T_INTR_THREAD) &&
8069                             !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8070                                 ha->task_daemon_flags |= TASK_THREAD_CALLED;
8071                                 ql_task_thread(ha);
8072                                 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8073                         }
8074 
8075                         TASK_DAEMON_UNLOCK(ha);
8076                 }
8077         } else {
8078                 if (sp != NULL) {
8079                         ql_add_link_b(&ha->callback_queue, &sp->cmd);
8080                 }
8081 
8082                 if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8083                         cv_broadcast(&ha->cv_task_daemon);
8084                 }
8085                 TASK_DAEMON_UNLOCK(ha);
8086         }
8087 
8088         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8089 }
8090 
8091 /*
8092  * ql_task_daemon
8093  *      Thread that is awaken by the driver when a
8094  *      background needs to be done.
8095  *
8096  * Input:
8097  *      arg = adapter state pointer.
8098  *
8099  * Context:
8100  *      Kernel context.
8101  */
8102 static void
8103 ql_task_daemon(void *arg)
8104 {
8105         ql_adapter_state_t      *ha = (void *)arg;
8106 
8107         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8108 
8109         CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8110             "ql_task_daemon");
8111 
8112         /* Acquire task daemon lock. */
8113         TASK_DAEMON_LOCK(ha);
8114 
8115         ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8116 
8117         while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8118                 ql_task_thread(ha);
8119 
8120                 QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8121 
8122                 /*
8123                  * Before we wait on the conditional variable, we
8124                  * need to check if STOP_FLG is set for us to terminate
8125                  */
8126                 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8127                         break;
8128                 }
8129 
8130                 /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8131                 CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8132 
8133                 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8134 
8135                 /* If killed, stop task daemon */
8136                 if (cv_wait_sig(&ha->cv_task_daemon,
8137                     &ha->task_daemon_mutex) == 0) {
8138                         ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8139                 }
8140 
8141                 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8142 
8143                 /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8144                 CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8145 
8146                 QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8147         }
8148 
8149         ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8150             TASK_DAEMON_ALIVE_FLG);
8151 
8152         /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8153         CALLB_CPR_EXIT(&ha->cprinfo);
8154 
8155         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8156 
8157         thread_exit();
8158 }
8159 
8160 /*
8161  * ql_task_thread
8162  *      Thread run by daemon.
8163  *
8164  * Input:
8165  *      ha = adapter state pointer.
8166  *      TASK_DAEMON_LOCK must be acquired prior to call.
8167  *
8168  * Context:
8169  *      Kernel context.
8170  */
8171 static void
8172 ql_task_thread(ql_adapter_state_t *ha)
8173 {
8174         int                     loop_again;
8175         ql_srb_t                *sp;
8176         ql_head_t               *head;
8177         ql_link_t               *link;
8178         caddr_t                 msg;
8179         ql_adapter_state_t      *vha;
8180 
8181         do {
8182                 QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8183                     ha->instance, ha->task_daemon_flags);
8184 
8185                 loop_again = FALSE;
8186 
8187                 QL_PM_LOCK(ha);
8188                 if (ha->power_level != PM_LEVEL_D0) {
8189                         QL_PM_UNLOCK(ha);
8190                         ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8191                         break;
8192                 }
8193                 QL_PM_UNLOCK(ha);
8194 
8195                 /* IDC event. */
8196                 if (ha->task_daemon_flags & IDC_EVENT) {
8197                         ha->task_daemon_flags &= ~IDC_EVENT;
8198                         TASK_DAEMON_UNLOCK(ha);
8199                         ql_process_idc_event(ha);
8200                         TASK_DAEMON_LOCK(ha);
8201                         loop_again = TRUE;
8202                 }
8203 
8204                 if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8205                     (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8206                     (ha->flags & ONLINE) == 0) {
8207                         ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8208                         break;
8209                 }
8210                 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8211 
8212                 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8213                         TASK_DAEMON_UNLOCK(ha);
8214                         if (ha->log_parity_pause == B_TRUE) {
8215                                 (void) ql_flash_errlog(ha,
8216                                     FLASH_ERRLOG_PARITY_ERR, 0,
8217                                     MSW(ha->parity_stat_err),
8218                                     LSW(ha->parity_stat_err));
8219                                 ha->log_parity_pause = B_FALSE;
8220                         }
8221                         ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8222                         TASK_DAEMON_LOCK(ha);
8223                         loop_again = TRUE;
8224                 }
8225 
8226                 /* Idle Check. */
8227                 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8228                         ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8229                         if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8230                                 TASK_DAEMON_UNLOCK(ha);
8231                                 ql_idle_check(ha);
8232                                 TASK_DAEMON_LOCK(ha);
8233                                 loop_again = TRUE;
8234                         }
8235                 }
8236 
8237                 /* Crystal+ port#0 bypass transition */
8238                 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8239                         ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8240                         TASK_DAEMON_UNLOCK(ha);
8241                         (void) ql_initiate_lip(ha);
8242                         TASK_DAEMON_LOCK(ha);
8243                         loop_again = TRUE;
8244                 }
8245 
8246                 /* Abort queues needed. */
8247                 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8248                         ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8249                         TASK_DAEMON_UNLOCK(ha);
8250                         ql_abort_queues(ha);
8251                         TASK_DAEMON_LOCK(ha);
8252                 }
8253 
8254                 /* Not suspended, awaken waiting routines. */
8255                 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8256                     ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8257                         ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8258                         cv_broadcast(&ha->cv_dr_suspended);
8259                         loop_again = TRUE;
8260                 }
8261 
8262                 /* Handle RSCN changes. */
8263                 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8264                         if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8265                                 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8266                                 TASK_DAEMON_UNLOCK(ha);
8267                                 (void) ql_handle_rscn_update(vha);
8268                                 TASK_DAEMON_LOCK(ha);
8269                                 loop_again = TRUE;
8270                         }
8271                 }
8272 
8273                 /* Handle state changes. */
8274                 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8275                         if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8276                             !(ha->task_daemon_flags &
8277                             TASK_DAEMON_POWERING_DOWN)) {
8278                                 /* Report state change. */
8279                                 EL(vha, "state change = %xh\n", vha->state);
8280                                 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8281 
8282                                 if (vha->task_daemon_flags &
8283                                     COMMAND_WAIT_NEEDED) {
8284                                         vha->task_daemon_flags &=
8285                                             ~COMMAND_WAIT_NEEDED;
8286                                         if (!(ha->task_daemon_flags &
8287                                             COMMAND_WAIT_ACTIVE)) {
8288                                                 ha->task_daemon_flags |=
8289                                                     COMMAND_WAIT_ACTIVE;
8290                                                 TASK_DAEMON_UNLOCK(ha);
8291                                                 ql_cmd_wait(ha);
8292                                                 TASK_DAEMON_LOCK(ha);
8293                                                 ha->task_daemon_flags &=
8294                                                     ~COMMAND_WAIT_ACTIVE;
8295                                         }
8296                                 }
8297 
8298                                 msg = NULL;
8299                                 if (FC_PORT_STATE_MASK(vha->state) ==
8300                                     FC_STATE_OFFLINE) {
8301                                         if (vha->task_daemon_flags &
8302                                             STATE_ONLINE) {
8303                                                 if (ha->topology &
8304                                                     QL_LOOP_CONNECTION) {
8305                                                         msg = "Loop OFFLINE";
8306                                                 } else {
8307                                                         msg = "Link OFFLINE";
8308                                                 }
8309                                         }
8310                                         vha->task_daemon_flags &=
8311                                             ~STATE_ONLINE;
8312                                 } else if (FC_PORT_STATE_MASK(vha->state) ==
8313                                     FC_STATE_LOOP) {
8314                                         if (!(vha->task_daemon_flags &
8315                                             STATE_ONLINE)) {
8316                                                 msg = "Loop ONLINE";
8317                                         }
8318                                         vha->task_daemon_flags |= STATE_ONLINE;
8319                                 } else if (FC_PORT_STATE_MASK(vha->state) ==
8320                                     FC_STATE_ONLINE) {
8321                                         if (!(vha->task_daemon_flags &
8322                                             STATE_ONLINE)) {
8323                                                 msg = "Link ONLINE";
8324                                         }
8325                                         vha->task_daemon_flags |= STATE_ONLINE;
8326                                 } else {
8327                                         msg = "Unknown Link state";
8328                                 }
8329 
8330                                 if (msg != NULL) {
8331                                         cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8332                                             "%s", QL_NAME, ha->instance,
8333                                             vha->vp_index, msg);
8334                                 }
8335 
8336                                 if (vha->flags & FCA_BOUND) {
8337                                         QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8338                                             "cb state=%xh\n", ha->instance,
8339                                             vha->vp_index, vha->state);
8340                                         TASK_DAEMON_UNLOCK(ha);
8341                                         (vha->bind_info.port_statec_cb)
8342                                             (vha->bind_info.port_handle,
8343                                             vha->state);
8344                                         TASK_DAEMON_LOCK(ha);
8345                                 }
8346                                 loop_again = TRUE;
8347                         }
8348                 }
8349 
8350                 if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8351                     !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8352                         EL(ha, "processing LIP reset\n");
8353                         ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8354                         TASK_DAEMON_UNLOCK(ha);
8355                         for (vha = ha; vha != NULL; vha = vha->vp_next) {
8356                                 if (vha->flags & FCA_BOUND) {
8357                                         QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8358                                             "cb reset\n", ha->instance,
8359                                             vha->vp_index);
8360                                         (vha->bind_info.port_statec_cb)
8361                                             (vha->bind_info.port_handle,
8362                                             FC_STATE_TARGET_PORT_RESET);
8363                                 }
8364                         }
8365                         TASK_DAEMON_LOCK(ha);
8366                         loop_again = TRUE;
8367                 }
8368 
8369                 if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8370                     FIRMWARE_UP)) {
8371                         /*
8372                          * The firmware needs more unsolicited
8373                          * buffers. We cannot allocate any new
8374                          * buffers unless the ULP module requests
8375                          * for new buffers. All we can do here is
8376                          * to give received buffers from the pool
8377                          * that is already allocated
8378                          */
8379                         ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8380                         TASK_DAEMON_UNLOCK(ha);
8381                         ql_isp_rcvbuf(ha);
8382                         TASK_DAEMON_LOCK(ha);
8383                         loop_again = TRUE;
8384                 }
8385 
8386                 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8387                         TASK_DAEMON_UNLOCK(ha);
8388                         (void) ql_abort_isp(ha);
8389                         TASK_DAEMON_LOCK(ha);
8390                         loop_again = TRUE;
8391                 }
8392 
8393                 if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8394                     COMMAND_WAIT_NEEDED))) {
8395                         if (QL_IS_SET(ha->task_daemon_flags,
8396                             RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8397                                 ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8398                                 if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8399                                         ha->task_daemon_flags |= RESET_ACTIVE;
8400                                         TASK_DAEMON_UNLOCK(ha);
8401                                         for (vha = ha; vha != NULL;
8402                                             vha = vha->vp_next) {
8403                                                 ql_rst_aen(vha);
8404                                         }
8405                                         TASK_DAEMON_LOCK(ha);
8406                                         ha->task_daemon_flags &= ~RESET_ACTIVE;
8407                                         loop_again = TRUE;
8408                                 }
8409                         }
8410 
8411                         if (QL_IS_SET(ha->task_daemon_flags,
8412                             LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8413                                 if (!(ha->task_daemon_flags &
8414                                     LOOP_RESYNC_ACTIVE)) {
8415                                         ha->task_daemon_flags |=
8416                                             LOOP_RESYNC_ACTIVE;
8417                                         TASK_DAEMON_UNLOCK(ha);
8418                                         (void) ql_loop_resync(ha);
8419                                         TASK_DAEMON_LOCK(ha);
8420                                         loop_again = TRUE;
8421                                 }
8422                         }
8423                 }
8424 
8425                 /* Port retry needed. */
8426                 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8427                         ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8428                         ADAPTER_STATE_LOCK(ha);
8429                         ha->port_retry_timer = 0;
8430                         ADAPTER_STATE_UNLOCK(ha);
8431 
8432                         TASK_DAEMON_UNLOCK(ha);
8433                         ql_restart_queues(ha);
8434                         TASK_DAEMON_LOCK(ha);
8435                         loop_again = B_TRUE;
8436                 }
8437 
8438                 /* iiDMA setting needed? */
8439                 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8440                         ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8441 
8442                         TASK_DAEMON_UNLOCK(ha);
8443                         ql_iidma(ha);
8444                         TASK_DAEMON_LOCK(ha);
8445                         loop_again = B_TRUE;
8446                 }
8447 
8448                 if (ha->task_daemon_flags & SEND_PLOGI) {
8449                         ha->task_daemon_flags &= ~SEND_PLOGI;
8450                         TASK_DAEMON_UNLOCK(ha);
8451                         (void) ql_n_port_plogi(ha);
8452                         TASK_DAEMON_LOCK(ha);
8453                 }
8454 
8455                 head = &ha->callback_queue;
8456                 if (head->first != NULL) {
8457                         sp = head->first->base_address;
8458                         link = &sp->cmd;
8459 
8460                         /* Dequeue command. */
8461                         ql_remove_link(head, link);
8462 
8463                         /* Release task daemon lock. */
8464                         TASK_DAEMON_UNLOCK(ha);
8465 
8466                         /* Do callback. */
8467                         if (sp->flags & SRB_UB_CALLBACK) {
8468                                 ql_unsol_callback(sp);
8469                         } else {
8470                                 (*sp->pkt->pkt_comp)(sp->pkt);
8471                         }
8472 
8473                         /* Acquire task daemon lock. */
8474                         TASK_DAEMON_LOCK(ha);
8475 
8476                         loop_again = TRUE;
8477                 }
8478 
8479         } while (loop_again);
8480 }
8481 
8482 /*
8483  * ql_idle_check
8484  *      Test for adapter is alive and well.
8485  *
8486  * Input:
8487  *      ha:     adapter state pointer.
8488  *
8489  * Context:
8490  *      Kernel context.
8491  */
8492 static void
8493 ql_idle_check(ql_adapter_state_t *ha)
8494 {
8495         ddi_devstate_t  state;
8496         int             rval;
8497         ql_mbx_data_t   mr;
8498 
8499         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8500 
8501         /* Firmware Ready Test. */
8502         rval = ql_get_firmware_state(ha, &mr);
8503         if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8504             (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8505                 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8506                 state = ddi_get_devstate(ha->dip);
8507                 if (state == DDI_DEVSTATE_UP) {
8508                         /*EMPTY*/
8509                         ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8510                             DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8511                 }
8512                 TASK_DAEMON_LOCK(ha);
8513                 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8514                         EL(ha, "fstate_ready, isp_abort_needed\n");
8515                         ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8516                 }
8517                 TASK_DAEMON_UNLOCK(ha);
8518         }
8519 
8520         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8521 }
8522 
8523 /*
8524  * ql_unsol_callback
8525  *      Handle unsolicited buffer callbacks.
8526  *
8527  * Input:
8528  *      ha = adapter state pointer.
8529  *      sp = srb pointer.
8530  *
8531  * Context:
8532  *      Kernel context.
8533  */
8534 static void
8535 ql_unsol_callback(ql_srb_t *sp)
8536 {
8537         fc_affected_id_t        *af;
8538         fc_unsol_buf_t          *ubp;
8539         uchar_t                 r_ctl;
8540         uchar_t                 ls_code;
8541         ql_tgt_t                *tq;
8542         ql_adapter_state_t      *ha = sp->ha, *pha = sp->ha->pha;
8543 
8544         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8545 
8546         ubp = ha->ub_array[sp->handle];
8547         r_ctl = ubp->ub_frame.r_ctl;
8548         ls_code = ubp->ub_buffer[0];
8549 
8550         if (sp->lun_queue == NULL) {
8551                 tq = NULL;
8552         } else {
8553                 tq = sp->lun_queue->target_queue;
8554         }
8555 
8556         QL_UB_LOCK(ha);
8557         if (sp->flags & SRB_UB_FREE_REQUESTED ||
8558             pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8559                 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8560                     SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8561                 sp->flags |= SRB_UB_IN_FCA;
8562                 QL_UB_UNLOCK(ha);
8563                 return;
8564         }
8565 
8566         /* Process RSCN */
8567         if (sp->flags & SRB_UB_RSCN) {
8568                 int sendup = 1;
8569 
8570                 /*
8571                  * Defer RSCN posting until commands return
8572                  */
8573                 QL_UB_UNLOCK(ha);
8574 
8575                 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8576 
8577                 /* Abort outstanding commands */
8578                 sendup = ql_process_rscn(ha, af);
8579                 if (sendup == 0) {
8580 
8581                         TASK_DAEMON_LOCK(ha);
8582                         ql_add_link_b(&pha->callback_queue, &sp->cmd);
8583                         TASK_DAEMON_UNLOCK(ha);
8584 
8585                         /*
8586                          * Wait for commands to drain in F/W (doesn't take
8587                          * more than a few milliseconds)
8588                          */
8589                         ql_delay(ha, 10000);
8590 
8591                         QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8592                             "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8593                             af->aff_format, af->aff_d_id);
8594                         return;
8595                 }
8596 
8597                 QL_UB_LOCK(ha);
8598 
8599                 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8600                     af->aff_format, af->aff_d_id);
8601         }
8602 
8603         /* Process UNSOL LOGO */
8604         if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8605                 QL_UB_UNLOCK(ha);
8606 
8607                 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8608                         TASK_DAEMON_LOCK(ha);
8609                         ql_add_link_b(&pha->callback_queue, &sp->cmd);
8610                         TASK_DAEMON_UNLOCK(ha);
8611                         QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8612                             "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8613                         return;
8614                 }
8615 
8616                 QL_UB_LOCK(ha);
8617                 EL(ha, "sending unsol logout for %xh to transport\n",
8618                     ubp->ub_frame.s_id);
8619         }
8620 
8621         sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8622             SRB_UB_FCP);
8623 
8624         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8625                 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8626                     ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8627         }
8628         QL_UB_UNLOCK(ha);
8629 
8630         (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8631             ubp, sp->ub_type);
8632 
8633         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8634 }
8635 
8636 /*
8637  * ql_send_logo
8638  *
8639  * Input:
8640  *      ha:     adapter state pointer.
8641  *      tq:     target queue pointer.
8642  *      done_q: done queue pointer.
8643  *
8644  * Context:
8645  *      Interrupt or Kernel context, no mailbox commands allowed.
8646  */
8647 void
8648 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8649 {
8650         fc_unsol_buf_t          *ubp;
8651         ql_srb_t                *sp;
8652         la_els_logo_t           *payload;
8653         ql_adapter_state_t      *ha = vha->pha;
8654 
8655         QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8656             tq->d_id.b24);
8657 
8658         if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8659                 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8660                 return;
8661         }
8662 
8663         if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8664             tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8665 
8666                 /* Locate a buffer to use. */
8667                 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8668                 if (ubp == NULL) {
8669                         EL(vha, "Failed, get_unsolicited_buffer\n");
8670                         return;
8671                 }
8672 
8673                 DEVICE_QUEUE_LOCK(tq);
8674                 tq->flags |= TQF_NEED_AUTHENTICATION;
8675                 tq->logout_sent++;
8676                 DEVICE_QUEUE_UNLOCK(tq);
8677 
8678                 EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8679 
8680                 sp = ubp->ub_fca_private;
8681 
8682                 /* Set header. */
8683                 ubp->ub_frame.d_id = vha->d_id.b24;
8684                 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8685                 ubp->ub_frame.s_id = tq->d_id.b24;
8686                 ubp->ub_frame.rsvd = 0;
8687                 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8688                     F_CTL_SEQ_INITIATIVE;
8689                 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8690                 ubp->ub_frame.seq_cnt = 0;
8691                 ubp->ub_frame.df_ctl = 0;
8692                 ubp->ub_frame.seq_id = 0;
8693                 ubp->ub_frame.rx_id = 0xffff;
8694                 ubp->ub_frame.ox_id = 0xffff;
8695 
8696                 /* set payload. */
8697                 payload = (la_els_logo_t *)ubp->ub_buffer;
8698                 bzero(payload, sizeof (la_els_logo_t));
8699                 /* Make sure ls_code in payload is always big endian */
8700                 ubp->ub_buffer[0] = LA_ELS_LOGO;
8701                 ubp->ub_buffer[1] = 0;
8702                 ubp->ub_buffer[2] = 0;
8703                 ubp->ub_buffer[3] = 0;
8704                 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8705                     &payload->nport_ww_name.raw_wwn[0], 8);
8706                 payload->nport_id.port_id = tq->d_id.b24;
8707 
8708                 QL_UB_LOCK(ha);
8709                 sp->flags |= SRB_UB_CALLBACK;
8710                 QL_UB_UNLOCK(ha);
8711                 if (tq->lun_queues.first != NULL) {
8712                         sp->lun_queue = (tq->lun_queues.first)->base_address;
8713                 } else {
8714                         sp->lun_queue = ql_lun_queue(vha, tq, 0);
8715                 }
8716                 if (done_q) {
8717                         ql_add_link_b(done_q, &sp->cmd);
8718                 } else {
8719                         ql_awaken_task_daemon(ha, sp, 0, 0);
8720                 }
8721         }
8722 
8723         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8724 }
8725 
8726 static int
8727 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8728 {
8729         port_id_t       d_id;
8730         ql_srb_t        *sp;
8731         ql_link_t       *link;
8732         int             sendup = 1;
8733 
8734         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8735 
8736         DEVICE_QUEUE_LOCK(tq);
8737         if (tq->outcnt) {
8738                 DEVICE_QUEUE_UNLOCK(tq);
8739                 sendup = 0;
8740                 (void) ql_abort_device(ha, tq, 1);
8741                 ql_delay(ha, 10000);
8742         } else {
8743                 DEVICE_QUEUE_UNLOCK(tq);
8744                 TASK_DAEMON_LOCK(ha);
8745 
8746                 for (link = ha->pha->callback_queue.first; link != NULL;
8747                     link = link->next) {
8748                         sp = link->base_address;
8749                         if (sp->flags & SRB_UB_CALLBACK) {
8750                                 continue;
8751                         }
8752                         d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8753 
8754                         if (tq->d_id.b24 == d_id.b24) {
8755                                 sendup = 0;
8756                                 break;
8757                         }
8758                 }
8759 
8760                 TASK_DAEMON_UNLOCK(ha);
8761         }
8762 
8763         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8764 
8765         return (sendup);
8766 }
8767 
8768 static int
8769 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8770 {
8771         fc_unsol_buf_t          *ubp;
8772         ql_srb_t                *sp;
8773         la_els_logi_t           *payload;
8774         class_svc_param_t       *class3_param;
8775 
8776         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8777 
8778         if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8779             LOOP_DOWN)) {
8780                 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8781                 return (QL_FUNCTION_FAILED);
8782         }
8783 
8784         /* Locate a buffer to use. */
8785         ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8786         if (ubp == NULL) {
8787                 EL(ha, "Failed\n");
8788                 return (QL_FUNCTION_FAILED);
8789         }
8790 
8791         QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8792             ha->instance, tq->d_id.b24);
8793 
8794         EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8795 
8796         sp = ubp->ub_fca_private;
8797 
8798         /* Set header. */
8799         ubp->ub_frame.d_id = ha->d_id.b24;
8800         ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8801         ubp->ub_frame.s_id = tq->d_id.b24;
8802         ubp->ub_frame.rsvd = 0;
8803         ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8804             F_CTL_SEQ_INITIATIVE;
8805         ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8806         ubp->ub_frame.seq_cnt = 0;
8807         ubp->ub_frame.df_ctl = 0;
8808         ubp->ub_frame.seq_id = 0;
8809         ubp->ub_frame.rx_id = 0xffff;
8810         ubp->ub_frame.ox_id = 0xffff;
8811 
8812         /* set payload. */
8813         payload = (la_els_logi_t *)ubp->ub_buffer;
8814         bzero(payload, sizeof (payload));
8815 
8816         payload->ls_code.ls_code = LA_ELS_PLOGI;
8817         payload->common_service.fcph_version = 0x2006;
8818         payload->common_service.cmn_features = 0x8800;
8819 
8820         CFG_IST(ha, CFG_CTRL_24258081) ?
8821             (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8822             ha->init_ctrl_blk.cb24.max_frame_length[0],
8823             ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8824             (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8825             ha->init_ctrl_blk.cb.max_frame_length[0],
8826             ha->init_ctrl_blk.cb.max_frame_length[1]));
8827 
8828         payload->common_service.conc_sequences = 0xff;
8829         payload->common_service.relative_offset = 0x03;
8830         payload->common_service.e_d_tov = 0x7d0;
8831 
8832         bcopy((void *)&tq->port_name[0],
8833             (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8834 
8835         bcopy((void *)&tq->node_name[0],
8836             (void *)&payload->node_ww_name.raw_wwn[0], 8);
8837 
8838         class3_param = (class_svc_param_t *)&payload->class_3;
8839         class3_param->class_valid_svc_opt = 0x8000;
8840         class3_param->recipient_ctl = tq->class3_recipient_ctl;
8841         class3_param->rcv_data_size = tq->class3_rcv_data_size;
8842         class3_param->conc_sequences = tq->class3_conc_sequences;
8843         class3_param->open_sequences_per_exch =
8844             tq->class3_open_sequences_per_exch;
8845 
8846         QL_UB_LOCK(ha);
8847         sp->flags |= SRB_UB_CALLBACK;
8848         QL_UB_UNLOCK(ha);
8849 
8850         ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8851 
8852         if (done_q) {
8853                 ql_add_link_b(done_q, &sp->cmd);
8854         } else {
8855                 ql_awaken_task_daemon(ha, sp, 0, 0);
8856         }
8857 
8858         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8859 
8860         return (QL_SUCCESS);
8861 }
8862 
8863 /*
8864  * Abort outstanding commands in the Firmware, clear internally
8865  * queued commands in the driver, Synchronize the target with
8866  * the Firmware
8867  */
8868 int
8869 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8870 {
8871         ql_link_t       *link, *link2;
8872         ql_lun_t        *lq;
8873         int             rval = QL_SUCCESS;
8874         ql_srb_t        *sp;
8875         ql_head_t       done_q = { NULL, NULL };
8876 
8877         QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8878 
8879         /*
8880          * First clear, internally queued commands
8881          */
8882         DEVICE_QUEUE_LOCK(tq);
8883         for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8884                 lq = link->base_address;
8885 
8886                 link2 = lq->cmd.first;
8887                 while (link2 != NULL) {
8888                         sp = link2->base_address;
8889                         link2 = link2->next;
8890 
8891                         if (sp->flags & SRB_ABORT) {
8892                                 continue;
8893                         }
8894 
8895                         /* Remove srb from device command queue. */
8896                         ql_remove_link(&lq->cmd, &sp->cmd);
8897                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8898 
8899                         /* Set ending status. */
8900                         sp->pkt->pkt_reason = CS_ABORTED;
8901 
8902                         /* Call done routine to handle completions. */
8903                         ql_add_link_b(&done_q, &sp->cmd);
8904                 }
8905         }
8906         DEVICE_QUEUE_UNLOCK(tq);
8907 
8908         if (done_q.first != NULL) {
8909                 ql_done(done_q.first);
8910         }
8911 
8912         if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8913                 rval = ql_abort_target(ha, tq, 0);
8914         }
8915 
8916         if (rval != QL_SUCCESS) {
8917                 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8918         } else {
8919                 /*EMPTY*/
8920                 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8921                     ha->vp_index);
8922         }
8923 
8924         return (rval);
8925 }
8926 
8927 /*
8928  * ql_rcv_rscn_els
8929  *      Processes received RSCN extended link service.
8930  *
8931  * Input:
8932  *      ha:     adapter state pointer.
8933  *      mb:     array containing input mailbox registers.
8934  *      done_q: done queue pointer.
8935  *
8936  * Context:
8937  *      Interrupt or Kernel context, no mailbox commands allowed.
8938  */
8939 void
8940 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8941 {
8942         fc_unsol_buf_t          *ubp;
8943         ql_srb_t                *sp;
8944         fc_rscn_t               *rn;
8945         fc_affected_id_t        *af;
8946         port_id_t               d_id;
8947 
8948         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8949 
8950         /* Locate a buffer to use. */
8951         ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8952         if (ubp != NULL) {
8953                 sp = ubp->ub_fca_private;
8954 
8955                 /* Set header. */
8956                 ubp->ub_frame.d_id = ha->d_id.b24;
8957                 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8958                 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8959                 ubp->ub_frame.rsvd = 0;
8960                 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8961                     F_CTL_SEQ_INITIATIVE;
8962                 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8963                 ubp->ub_frame.seq_cnt = 0;
8964                 ubp->ub_frame.df_ctl = 0;
8965                 ubp->ub_frame.seq_id = 0;
8966                 ubp->ub_frame.rx_id = 0xffff;
8967                 ubp->ub_frame.ox_id = 0xffff;
8968 
8969                 /* set payload. */
8970                 rn = (fc_rscn_t *)ubp->ub_buffer;
8971                 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8972 
8973                 rn->rscn_code = LA_ELS_RSCN;
8974                 rn->rscn_len = 4;
8975                 rn->rscn_payload_len = 8;
8976                 d_id.b.al_pa = LSB(mb[2]);
8977                 d_id.b.area = MSB(mb[2]);
8978                 d_id.b.domain = LSB(mb[1]);
8979                 af->aff_d_id = d_id.b24;
8980                 af->aff_format = MSB(mb[1]);
8981 
8982                 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8983                     af->aff_d_id);
8984 
8985                 ql_update_rscn(ha, af);
8986 
8987                 QL_UB_LOCK(ha);
8988                 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8989                 QL_UB_UNLOCK(ha);
8990                 ql_add_link_b(done_q, &sp->cmd);
8991         }
8992 
8993         if (ubp == NULL) {
8994                 EL(ha, "Failed, get_unsolicited_buffer\n");
8995         } else {
8996                 /*EMPTY*/
8997                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8998         }
8999 }
9000 
9001 /*
9002  * ql_update_rscn
9003  *      Update devices from received RSCN.
9004  *
9005  * Input:
9006  *      ha:     adapter state pointer.
9007  *      af:     pointer to RSCN data.
9008  *
9009  * Context:
9010  *      Interrupt or Kernel context, no mailbox commands allowed.
9011  */
9012 static void
9013 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9014 {
9015         ql_link_t       *link;
9016         uint16_t        index;
9017         ql_tgt_t        *tq;
9018 
9019         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9020 
9021         if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9022                 port_id_t d_id;
9023 
9024                 d_id.r.rsvd_1 = 0;
9025                 d_id.b24 = af->aff_d_id;
9026 
9027                 tq = ql_d_id_to_queue(ha, d_id);
9028                 if (tq) {
9029                         EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9030                         DEVICE_QUEUE_LOCK(tq);
9031                         tq->flags |= TQF_RSCN_RCVD;
9032                         DEVICE_QUEUE_UNLOCK(tq);
9033                 }
9034                 QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9035                     ha->instance);
9036 
9037                 return;
9038         }
9039 
9040         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9041                 for (link = ha->dev[index].first; link != NULL;
9042                     link = link->next) {
9043                         tq = link->base_address;
9044 
9045                         switch (af->aff_format) {
9046                         case FC_RSCN_FABRIC_ADDRESS:
9047                                 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9048                                         EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9049                                             tq->d_id.b24);
9050                                         DEVICE_QUEUE_LOCK(tq);
9051                                         tq->flags |= TQF_RSCN_RCVD;
9052                                         DEVICE_QUEUE_UNLOCK(tq);
9053                                 }
9054                                 break;
9055 
9056                         case FC_RSCN_AREA_ADDRESS:
9057                                 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9058                                         EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9059                                             tq->d_id.b24);
9060                                         DEVICE_QUEUE_LOCK(tq);
9061                                         tq->flags |= TQF_RSCN_RCVD;
9062                                         DEVICE_QUEUE_UNLOCK(tq);
9063                                 }
9064                                 break;
9065 
9066                         case FC_RSCN_DOMAIN_ADDRESS:
9067                                 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9068                                         EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9069                                             tq->d_id.b24);
9070                                         DEVICE_QUEUE_LOCK(tq);
9071                                         tq->flags |= TQF_RSCN_RCVD;
9072                                         DEVICE_QUEUE_UNLOCK(tq);
9073                                 }
9074                                 break;
9075 
9076                         default:
9077                                 break;
9078                         }
9079                 }
9080         }
9081         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9082 }
9083 
9084 /*
9085  * ql_process_rscn
9086  *
9087  * Input:
9088  *      ha:     adapter state pointer.
9089  *      af:     RSCN payload pointer.
9090  *
9091  * Context:
9092  *      Kernel context.
9093  */
9094 static int
9095 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9096 {
9097         int             sendit;
9098         int             sendup = 1;
9099         ql_link_t       *link;
9100         uint16_t        index;
9101         ql_tgt_t        *tq;
9102 
9103         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9104 
9105         if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9106                 port_id_t d_id;
9107 
9108                 d_id.r.rsvd_1 = 0;
9109                 d_id.b24 = af->aff_d_id;
9110 
9111                 tq = ql_d_id_to_queue(ha, d_id);
9112                 if (tq) {
9113                         sendup = ql_process_rscn_for_device(ha, tq);
9114                 }
9115 
9116                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9117 
9118                 return (sendup);
9119         }
9120 
9121         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9122                 for (link = ha->dev[index].first; link != NULL;
9123                     link = link->next) {
9124 
9125                         tq = link->base_address;
9126                         if (tq == NULL) {
9127                                 continue;
9128                         }
9129 
9130                         switch (af->aff_format) {
9131                         case FC_RSCN_FABRIC_ADDRESS:
9132                                 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9133                                         sendit = ql_process_rscn_for_device(
9134                                             ha, tq);
9135                                         if (sendup) {
9136                                                 sendup = sendit;
9137                                         }
9138                                 }
9139                                 break;
9140 
9141                         case FC_RSCN_AREA_ADDRESS:
9142                                 if ((tq->d_id.b24 & 0xffff00) ==
9143                                     af->aff_d_id) {
9144                                         sendit = ql_process_rscn_for_device(
9145                                             ha, tq);
9146 
9147                                         if (sendup) {
9148                                                 sendup = sendit;
9149                                         }
9150                                 }
9151                                 break;
9152 
9153                         case FC_RSCN_DOMAIN_ADDRESS:
9154                                 if ((tq->d_id.b24 & 0xff0000) ==
9155                                     af->aff_d_id) {
9156                                         sendit = ql_process_rscn_for_device(
9157                                             ha, tq);
9158 
9159                                         if (sendup) {
9160                                                 sendup = sendit;
9161                                         }
9162                                 }
9163                                 break;
9164 
9165                         default:
9166                                 break;
9167                         }
9168                 }
9169         }
9170 
9171         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9172 
9173         return (sendup);
9174 }
9175 
9176 /*
9177  * ql_process_rscn_for_device
9178  *
9179  * Input:
9180  *      ha:     adapter state pointer.
9181  *      tq:     target queue pointer.
9182  *
9183  * Context:
9184  *      Kernel context.
9185  */
9186 static int
9187 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9188 {
9189         int sendup = 1;
9190 
9191         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9192 
9193         DEVICE_QUEUE_LOCK(tq);
9194 
9195         /*
9196          * Let FCP-2 compliant devices continue I/Os
9197          * with their low level recoveries.
9198          */
9199         if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9200             (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9201                 /*
9202                  * Cause ADISC to go out
9203                  */
9204                 DEVICE_QUEUE_UNLOCK(tq);
9205 
9206                 (void) ql_get_port_database(ha, tq, PDF_NONE);
9207 
9208                 DEVICE_QUEUE_LOCK(tq);
9209                 tq->flags &= ~TQF_RSCN_RCVD;
9210 
9211         } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9212                 if (tq->d_id.b24 != BROADCAST_ADDR) {
9213                         tq->flags |= TQF_NEED_AUTHENTICATION;
9214                 }
9215 
9216                 DEVICE_QUEUE_UNLOCK(tq);
9217 
9218                 (void) ql_abort_device(ha, tq, 1);
9219 
9220                 DEVICE_QUEUE_LOCK(tq);
9221 
9222                 if (tq->outcnt) {
9223                         sendup = 0;
9224                 } else {
9225                         tq->flags &= ~TQF_RSCN_RCVD;
9226                 }
9227         } else {
9228                 tq->flags &= ~TQF_RSCN_RCVD;
9229         }
9230 
9231         if (sendup) {
9232                 if (tq->d_id.b24 != BROADCAST_ADDR) {
9233                         tq->flags |= TQF_NEED_AUTHENTICATION;
9234                 }
9235         }
9236 
9237         DEVICE_QUEUE_UNLOCK(tq);
9238 
9239         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9240 
9241         return (sendup);
9242 }
9243 
9244 static int
9245 ql_handle_rscn_update(ql_adapter_state_t *ha)
9246 {
9247         int                     rval;
9248         ql_tgt_t                *tq;
9249         uint16_t                index, loop_id;
9250         ql_dev_id_list_t        *list;
9251         uint32_t                list_size;
9252         port_id_t               d_id;
9253         ql_mbx_data_t           mr;
9254         ql_head_t               done_q = { NULL, NULL };
9255 
9256         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9257 
9258         list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9259         list = kmem_zalloc(list_size, KM_SLEEP);
9260         if (list == NULL) {
9261                 rval = QL_MEMORY_ALLOC_FAILED;
9262                 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9263                 return (rval);
9264         }
9265 
9266         /*
9267          * Get data from RISC code d_id list to init each device queue.
9268          */
9269         rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9270         if (rval != QL_SUCCESS) {
9271                 kmem_free(list, list_size);
9272                 EL(ha, "get_id_list failed=%xh\n", rval);
9273                 return (rval);
9274         }
9275 
9276         /* Acquire adapter state lock. */
9277         ADAPTER_STATE_LOCK(ha);
9278 
9279         /* Check for new devices */
9280         for (index = 0; index < mr.mb[1]; index++) {
9281                 ql_dev_list(ha, list, index, &d_id, &loop_id);
9282 
9283                 if (VALID_DEVICE_ID(ha, loop_id)) {
9284                         d_id.r.rsvd_1 = 0;
9285 
9286                         tq = ql_d_id_to_queue(ha, d_id);
9287                         if (tq != NULL) {
9288                                 continue;
9289                         }
9290 
9291                         tq = ql_dev_init(ha, d_id, loop_id);
9292 
9293                         /* Test for fabric device. */
9294                         if (d_id.b.domain != ha->d_id.b.domain ||
9295                             d_id.b.area != ha->d_id.b.area) {
9296                                 tq->flags |= TQF_FABRIC_DEVICE;
9297                         }
9298 
9299                         ADAPTER_STATE_UNLOCK(ha);
9300                         if (ql_get_port_database(ha, tq, PDF_NONE) !=
9301                             QL_SUCCESS) {
9302                                 tq->loop_id = PORT_NO_LOOP_ID;
9303                         }
9304                         ADAPTER_STATE_LOCK(ha);
9305 
9306                         /*
9307                          * Send up a PLOGI about the new device
9308                          */
9309                         if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9310                                 (void) ql_send_plogi(ha, tq, &done_q);
9311                         }
9312                 }
9313         }
9314 
9315         /* Release adapter state lock. */
9316         ADAPTER_STATE_UNLOCK(ha);
9317 
9318         if (done_q.first != NULL) {
9319                 ql_done(done_q.first);
9320         }
9321 
9322         kmem_free(list, list_size);
9323 
9324         if (rval != QL_SUCCESS) {
9325                 EL(ha, "failed=%xh\n", rval);
9326         } else {
9327                 /*EMPTY*/
9328                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9329         }
9330 
9331         return (rval);
9332 }
9333 
9334 /*
9335  * ql_free_unsolicited_buffer
9336  *      Frees allocated buffer.
9337  *
9338  * Input:
9339  *      ha = adapter state pointer.
9340  *      index = buffer array index.
9341  *      ADAPTER_STATE_LOCK must be already obtained.
9342  *
9343  * Context:
9344  *      Kernel context.
9345  */
9346 static void
9347 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9348 {
9349         ql_srb_t        *sp;
9350         int             status;
9351 
9352         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9353 
9354         sp = ubp->ub_fca_private;
9355         if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9356                 /* Disconnect IP from system buffers. */
9357                 if (ha->flags & IP_INITIALIZED) {
9358                         ADAPTER_STATE_UNLOCK(ha);
9359                         status = ql_shutdown_ip(ha);
9360                         ADAPTER_STATE_LOCK(ha);
9361                         if (status != QL_SUCCESS) {
9362                                 cmn_err(CE_WARN,
9363                                     "!Qlogic %s(%d): Failed to shutdown IP",
9364                                     QL_NAME, ha->instance);
9365                                 return;
9366                         }
9367 
9368                         ha->flags &= ~IP_ENABLED;
9369                 }
9370 
9371                 ql_free_phys(ha, &sp->ub_buffer);
9372         } else {
9373                 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9374         }
9375 
9376         kmem_free(sp, sizeof (ql_srb_t));
9377         kmem_free(ubp, sizeof (fc_unsol_buf_t));
9378 
9379         if (ha->ub_allocated != 0) {
9380                 ha->ub_allocated--;
9381         }
9382 
9383         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9384 }
9385 
9386 /*
9387  * ql_get_unsolicited_buffer
9388  *      Locates a free unsolicited buffer.
9389  *
9390  * Input:
9391  *      ha = adapter state pointer.
9392  *      type = buffer type.
9393  *
9394  * Returns:
9395  *      Unsolicited buffer pointer.
9396  *
9397  * Context:
9398  *      Interrupt or Kernel context, no mailbox commands allowed.
9399  */
9400 fc_unsol_buf_t *
9401 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9402 {
9403         fc_unsol_buf_t  *ubp;
9404         ql_srb_t        *sp;
9405         uint16_t        index;
9406 
9407         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9408 
9409         /* Locate a buffer to use. */
9410         ubp = NULL;
9411 
9412         QL_UB_LOCK(ha);
9413         for (index = 0; index < QL_UB_LIMIT; index++) {
9414                 ubp = ha->ub_array[index];
9415                 if (ubp != NULL) {
9416                         sp = ubp->ub_fca_private;
9417                         if ((sp->ub_type == type) &&
9418                             (sp->flags & SRB_UB_IN_FCA) &&
9419                             (!(sp->flags & (SRB_UB_CALLBACK |
9420                             SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9421                                 sp->flags |= SRB_UB_ACQUIRED;
9422                                 ubp->ub_resp_flags = 0;
9423                                 break;
9424                         }
9425                         ubp = NULL;
9426                 }
9427         }
9428         QL_UB_UNLOCK(ha);
9429 
9430         if (ubp) {
9431                 ubp->ub_resp_token = NULL;
9432                 ubp->ub_class = FC_TRAN_CLASS3;
9433         }
9434 
9435         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9436 
9437         return (ubp);
9438 }
9439 
9440 /*
9441  * ql_ub_frame_hdr
9442  *      Processes received unsolicited buffers from ISP.
9443  *
9444  * Input:
9445  *      ha:     adapter state pointer.
9446  *      tq:     target queue pointer.
9447  *      index:  unsolicited buffer array index.
9448  *      done_q: done queue pointer.
9449  *
9450  * Returns:
9451  *      ql local function return status code.
9452  *
9453  * Context:
9454  *      Interrupt or Kernel context, no mailbox commands allowed.
9455  */
9456 int
9457 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9458     ql_head_t *done_q)
9459 {
9460         fc_unsol_buf_t  *ubp;
9461         ql_srb_t        *sp;
9462         uint16_t        loop_id;
9463         int             rval = QL_FUNCTION_FAILED;
9464 
9465         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9466 
9467         QL_UB_LOCK(ha);
9468         if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9469                 EL(ha, "Invalid buffer index=%xh\n", index);
9470                 QL_UB_UNLOCK(ha);
9471                 return (rval);
9472         }
9473 
9474         sp = ubp->ub_fca_private;
9475         if (sp->flags & SRB_UB_FREE_REQUESTED) {
9476                 EL(ha, "buffer freed index=%xh\n", index);
9477                 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9478                     SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9479 
9480                 sp->flags |= SRB_UB_IN_FCA;
9481 
9482                 QL_UB_UNLOCK(ha);
9483                 return (rval);
9484         }
9485 
9486         if ((sp->handle == index) &&
9487             (sp->flags & SRB_UB_IN_ISP) &&
9488             (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9489             (!(sp->flags & SRB_UB_ACQUIRED))) {
9490                 /* set broadcast D_ID */
9491                 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9492                     BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9493                 if (tq->ub_loop_id == loop_id) {
9494                         if (ha->topology & QL_FL_PORT) {
9495                                 ubp->ub_frame.d_id = 0x000000;
9496                         } else {
9497                                 ubp->ub_frame.d_id = 0xffffff;
9498                         }
9499                 } else {
9500                         ubp->ub_frame.d_id = ha->d_id.b24;
9501                 }
9502                 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9503                 ubp->ub_frame.rsvd = 0;
9504                 ubp->ub_frame.s_id = tq->d_id.b24;
9505                 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9506                 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9507                 ubp->ub_frame.df_ctl = 0;
9508                 ubp->ub_frame.seq_id = tq->ub_seq_id;
9509                 ubp->ub_frame.rx_id = 0xffff;
9510                 ubp->ub_frame.ox_id = 0xffff;
9511                 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9512                     sp->ub_size : tq->ub_sequence_length;
9513                 ubp->ub_frame.ro = tq->ub_frame_ro;
9514 
9515                 tq->ub_sequence_length = (uint16_t)
9516                     (tq->ub_sequence_length - ubp->ub_bufsize);
9517                 tq->ub_frame_ro += ubp->ub_bufsize;
9518                 tq->ub_seq_cnt++;
9519 
9520                 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9521                         if (tq->ub_seq_cnt == 1) {
9522                                 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9523                                     F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9524                         } else {
9525                                 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9526                                     F_CTL_END_SEQ;
9527                         }
9528                         tq->ub_total_seg_cnt = 0;
9529                 } else if (tq->ub_seq_cnt == 1) {
9530                         ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9531                             F_CTL_FIRST_SEQ;
9532                         ubp->ub_frame.df_ctl = 0x20;
9533                 }
9534 
9535                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9536                     ha->instance, ubp->ub_frame.d_id);
9537                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9538                     ha->instance, ubp->ub_frame.s_id);
9539                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9540                     ha->instance, ubp->ub_frame.seq_cnt);
9541                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9542                     ha->instance, ubp->ub_frame.seq_id);
9543                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9544                     ha->instance, ubp->ub_frame.ro);
9545                 QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9546                     ha->instance, ubp->ub_frame.f_ctl);
9547                 QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9548                     ha->instance, ubp->ub_bufsize);
9549                 QL_DUMP_3(ubp->ub_buffer, 8,
9550                     ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9551 
9552                 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9553                 ql_add_link_b(done_q, &sp->cmd);
9554                 rval = QL_SUCCESS;
9555         } else {
9556                 if (sp->handle != index) {
9557                         EL(ha, "Bad index=%xh, expect=%xh\n", index,
9558                             sp->handle);
9559                 }
9560                 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9561                         EL(ha, "buffer was already in driver, index=%xh\n",
9562                             index);
9563                 }
9564                 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9565                         EL(ha, "buffer was not an IP buffer, index=%xh\n",
9566                             index);
9567                 }
9568                 if (sp->flags & SRB_UB_ACQUIRED) {
9569                         EL(ha, "buffer was being used by driver, index=%xh\n",
9570                             index);
9571                 }
9572         }
9573         QL_UB_UNLOCK(ha);
9574 
9575         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9576 
9577         return (rval);
9578 }
9579 
9580 /*
9581  * ql_timer
9582  *      One second timer function.
9583  *
9584  * Input:
9585  *      ql_hba.first = first link in adapter list.
9586  *
9587  * Context:
9588  *      Interrupt context, no mailbox commands allowed.
9589  */
9590 static void
9591 ql_timer(void *arg)
9592 {
9593         ql_link_t               *link;
9594         uint32_t                set_flags;
9595         uint32_t                reset_flags;
9596         ql_adapter_state_t      *ha = NULL, *vha;
9597 
9598         QL_PRINT_6(CE_CONT, "started\n");
9599 
9600         /* Acquire global state lock. */
9601         GLOBAL_STATE_LOCK();
9602         if (ql_timer_timeout_id == NULL) {
9603                 /* Release global state lock. */
9604                 GLOBAL_STATE_UNLOCK();
9605                 return;
9606         }
9607 
9608         for (link = ql_hba.first; link != NULL; link = link->next) {
9609                 ha = link->base_address;
9610 
9611                 /* Skip adapter if suspended of stalled. */
9612                 ADAPTER_STATE_LOCK(ha);
9613                 if (ha->flags & ADAPTER_SUSPENDED ||
9614                     ha->task_daemon_flags & DRIVER_STALL) {
9615                         ADAPTER_STATE_UNLOCK(ha);
9616                         continue;
9617                 }
9618                 ha->flags |= ADAPTER_TIMER_BUSY;
9619                 ADAPTER_STATE_UNLOCK(ha);
9620 
9621                 QL_PM_LOCK(ha);
9622                 if (ha->power_level != PM_LEVEL_D0) {
9623                         QL_PM_UNLOCK(ha);
9624 
9625                         ADAPTER_STATE_LOCK(ha);
9626                         ha->flags &= ~ADAPTER_TIMER_BUSY;
9627                         ADAPTER_STATE_UNLOCK(ha);
9628                         continue;
9629                 }
9630                 ha->busy++;
9631                 QL_PM_UNLOCK(ha);
9632 
9633                 set_flags = 0;
9634                 reset_flags = 0;
9635 
9636                 /* Port retry timer handler. */
9637                 if (LOOP_READY(ha)) {
9638                         ADAPTER_STATE_LOCK(ha);
9639                         if (ha->port_retry_timer != 0) {
9640                                 ha->port_retry_timer--;
9641                                 if (ha->port_retry_timer == 0) {
9642                                         set_flags |= PORT_RETRY_NEEDED;
9643                                 }
9644                         }
9645                         ADAPTER_STATE_UNLOCK(ha);
9646                 }
9647 
9648                 /* Loop down timer handler. */
9649                 if (LOOP_RECONFIGURE(ha) == 0) {
9650                         if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9651                                 ha->loop_down_timer--;
9652                                 /*
9653                                  * give the firmware loop down dump flag
9654                                  * a chance to work.
9655                                  */
9656                                 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9657                                         if (CFG_IST(ha,
9658                                             CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9659                                                 (void) ql_binary_fw_dump(ha,
9660                                                     TRUE);
9661                                         }
9662                                         EL(ha, "loop_down_reset, "
9663                                             "isp_abort_needed\n");
9664                                         set_flags |= ISP_ABORT_NEEDED;
9665                                 }
9666                         }
9667                         if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9668                                 /* Command abort time handler. */
9669                                 if (ha->loop_down_timer ==
9670                                     ha->loop_down_abort_time) {
9671                                         ADAPTER_STATE_LOCK(ha);
9672                                         ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9673                                         ADAPTER_STATE_UNLOCK(ha);
9674                                         set_flags |= ABORT_QUEUES_NEEDED;
9675                                         EL(ha, "loop_down_abort_time, "
9676                                             "abort_queues_needed\n");
9677                                 }
9678 
9679                                 /* Watchdog timer handler. */
9680                                 if (ha->watchdog_timer == 0) {
9681                                         ha->watchdog_timer = WATCHDOG_TIME;
9682                                 } else if (LOOP_READY(ha)) {
9683                                         ha->watchdog_timer--;
9684                                         if (ha->watchdog_timer == 0) {
9685                                                 for (vha = ha; vha != NULL;
9686                                                     vha = vha->vp_next) {
9687                                                         ql_watchdog(vha,
9688                                                             &set_flags,
9689                                                             &reset_flags);
9690                                                 }
9691                                                 ha->watchdog_timer =
9692                                                     WATCHDOG_TIME;
9693                                         }
9694                                 }
9695                         }
9696                 }
9697 
9698                 /* Idle timer handler. */
9699                 if (!DRIVER_SUSPENDED(ha)) {
9700                         if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9701 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9702                                 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9703 #endif
9704                                 ha->idle_timer = 0;
9705                         }
9706                         if (ha->send_plogi_timer != NULL) {
9707                                 ha->send_plogi_timer--;
9708                                 if (ha->send_plogi_timer == NULL) {
9709                                         set_flags |= SEND_PLOGI;
9710                                 }
9711                         }
9712                 }
9713                 ADAPTER_STATE_LOCK(ha);
9714                 if (ha->idc_restart_timer != 0) {
9715                         ha->idc_restart_timer--;
9716                         if (ha->idc_restart_timer == 0) {
9717                                 ha->idc_restart_cnt = 0;
9718                                 reset_flags |= DRIVER_STALL;
9719                         }
9720                 }
9721                 if (ha->idc_flash_acc_timer != 0) {
9722                         ha->idc_flash_acc_timer--;
9723                         if (ha->idc_flash_acc_timer == 0 &&
9724                             ha->idc_flash_acc != 0) {
9725                                 ha->idc_flash_acc = 1;
9726                                 ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9727                                 ha->idc_mb[1] = 0;
9728                                 ha->idc_mb[2] = IDC_OPC_DRV_START;
9729                                 set_flags |= IDC_EVENT;
9730                         }
9731                 }
9732                 ADAPTER_STATE_UNLOCK(ha);
9733 
9734                 if (set_flags != 0 || reset_flags != 0) {
9735                         ql_awaken_task_daemon(ha, NULL, set_flags,
9736                             reset_flags);
9737                 }
9738 
9739                 if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9740                         ql_blink_led(ha);
9741                 }
9742 
9743                 /* Update the IO stats */
9744                 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9745                         ha->xioctl->IOInputMByteCnt +=
9746                             (ha->xioctl->IOInputByteCnt / 0x100000);
9747                         ha->xioctl->IOInputByteCnt %= 0x100000;
9748                 }
9749 
9750                 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9751                         ha->xioctl->IOOutputMByteCnt +=
9752                             (ha->xioctl->IOOutputByteCnt / 0x100000);
9753                         ha->xioctl->IOOutputByteCnt %= 0x100000;
9754                 }
9755 
9756                 if (CFG_IST(ha, CFG_CTRL_8021)) {
9757                         (void) ql_8021_idc_handler(ha);
9758                 }
9759 
9760                 ADAPTER_STATE_LOCK(ha);
9761                 ha->flags &= ~ADAPTER_TIMER_BUSY;
9762                 ADAPTER_STATE_UNLOCK(ha);
9763 
9764                 QL_PM_LOCK(ha);
9765                 ha->busy--;
9766                 QL_PM_UNLOCK(ha);
9767         }
9768 
9769         /* Restart timer, if not being stopped. */
9770         if (ql_timer_timeout_id != NULL) {
9771                 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9772         }
9773 
9774         /* Release global state lock. */
9775         GLOBAL_STATE_UNLOCK();
9776 
9777         QL_PRINT_6(CE_CONT, "done\n");
9778 }
9779 
9780 /*
9781  * ql_timeout_insert
9782  *      Function used to insert a command block onto the
9783  *      watchdog timer queue.
9784  *
9785  *      Note: Must insure that pkt_time is not zero
9786  *                      before calling ql_timeout_insert.
9787  *
9788  * Input:
9789  *      ha:     adapter state pointer.
9790  *      tq:     target queue pointer.
9791  *      sp:     SRB pointer.
9792  *      DEVICE_QUEUE_LOCK must be already obtained.
9793  *
9794  * Context:
9795  *      Kernel context.
9796  */
9797 /* ARGSUSED */
9798 static void
9799 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9800 {
9801         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9802 
9803         if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9804                 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9805                 /*
9806                  * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9807                  * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9808                  * will expire in the next watchdog call, which could be in
9809                  * 1 microsecond.
9810                  *
9811                  */
9812                 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9813                     WATCHDOG_TIME;
9814                 /*
9815                  * Added an additional 10 to account for the
9816                  * firmware timer drift which can occur with
9817                  * very long timeout values.
9818                  */
9819                 sp->wdg_q_time += 10;
9820 
9821                 /*
9822                  * Add 6 more to insure watchdog does not timeout at the same
9823                  * time as ISP RISC code timeout.
9824                  */
9825                 sp->wdg_q_time += 6;
9826 
9827                 /* Save initial time for resetting watchdog time. */
9828                 sp->init_wdg_q_time = sp->wdg_q_time;
9829 
9830                 /* Insert command onto watchdog queue. */
9831                 ql_add_link_b(&tq->wdg, &sp->wdg);
9832 
9833                 sp->flags |= SRB_WATCHDOG_ENABLED;
9834         } else {
9835                 sp->isp_timeout = 0;
9836                 sp->wdg_q_time = 0;
9837                 sp->init_wdg_q_time = 0;
9838         }
9839 
9840         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9841 }
9842 
9843 /*
9844  * ql_watchdog
9845  *      Timeout handler that runs in interrupt context. The
9846  *      ql_adapter_state_t * argument is the parameter set up when the
9847  *      timeout was initialized (state structure pointer).
9848  *      Function used to update timeout values and if timeout
9849  *      has occurred command will be aborted.
9850  *
9851  * Input:
9852  *      ha:             adapter state pointer.
9853  *      set_flags:      task daemon flags to set.
9854  *      reset_flags:    task daemon flags to reset.
9855  *
9856  * Context:
9857  *      Interrupt context, no mailbox commands allowed.
9858  */
9859 static void
9860 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9861 {
9862         ql_srb_t        *sp;
9863         ql_link_t       *link;
9864         ql_link_t       *next_cmd;
9865         ql_link_t       *next_device;
9866         ql_tgt_t        *tq;
9867         ql_lun_t        *lq;
9868         uint16_t        index;
9869         int             q_sane;
9870 
9871         QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9872 
9873         /* Loop through all targets. */
9874         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9875                 for (link = ha->dev[index].first; link != NULL;
9876                     link = next_device) {
9877                         tq = link->base_address;
9878 
9879                         /* Try to acquire device queue lock. */
9880                         if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9881                                 next_device = NULL;
9882                                 continue;
9883                         }
9884 
9885                         next_device = link->next;
9886 
9887                         if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9888                             (tq->port_down_retry_count == 0)) {
9889                                 /* Release device queue lock. */
9890                                 DEVICE_QUEUE_UNLOCK(tq);
9891                                 continue;
9892                         }
9893 
9894                         /* Find out if this device is in a sane state. */
9895                         if (tq->flags & (TQF_RSCN_RCVD |
9896                             TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9897                                 q_sane = 0;
9898                         } else {
9899                                 q_sane = 1;
9900                         }
9901                         /* Loop through commands on watchdog queue. */
9902                         for (link = tq->wdg.first; link != NULL;
9903                             link = next_cmd) {
9904                                 next_cmd = link->next;
9905                                 sp = link->base_address;
9906                                 lq = sp->lun_queue;
9907 
9908                                 /*
9909                                  * For SCSI commands, if everything seems to
9910                                  * be going fine and this packet is stuck
9911                                  * because of throttling at LUN or target
9912                                  * level then do not decrement the
9913                                  * sp->wdg_q_time
9914                                  */
9915                                 if (ha->task_daemon_flags & STATE_ONLINE &&
9916                                     (sp->flags & SRB_ISP_STARTED) == 0 &&
9917                                     q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9918                                     lq->lun_outcnt >= ha->execution_throttle) {
9919                                         continue;
9920                                 }
9921 
9922                                 if (sp->wdg_q_time != 0) {
9923                                         sp->wdg_q_time--;
9924 
9925                                         /* Timeout? */
9926                                         if (sp->wdg_q_time != 0) {
9927                                                 continue;
9928                                         }
9929 
9930                                         ql_remove_link(&tq->wdg, &sp->wdg);
9931                                         sp->flags &= ~SRB_WATCHDOG_ENABLED;
9932 
9933                                         if (sp->flags & SRB_ISP_STARTED) {
9934                                                 ql_cmd_timeout(ha, tq, sp,
9935                                                     set_flags, reset_flags);
9936 
9937                                                 DEVICE_QUEUE_UNLOCK(tq);
9938                                                 tq = NULL;
9939                                                 next_cmd = NULL;
9940                                                 next_device = NULL;
9941                                                 index = DEVICE_HEAD_LIST_SIZE;
9942                                         } else {
9943                                                 ql_cmd_timeout(ha, tq, sp,
9944                                                     set_flags, reset_flags);
9945                                         }
9946                                 }
9947                         }
9948 
9949                         /* Release device queue lock. */
9950                         if (tq != NULL) {
9951                                 DEVICE_QUEUE_UNLOCK(tq);
9952                         }
9953                 }
9954         }
9955 
9956         QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9957 }
9958 
9959 /*
9960  * ql_cmd_timeout
9961  *      Command timeout handler.
9962  *
9963  * Input:
9964  *      ha:             adapter state pointer.
9965  *      tq:             target queue pointer.
9966  *      sp:             SRB pointer.
9967  *      set_flags:      task daemon flags to set.
9968  *      reset_flags:    task daemon flags to reset.
9969  *
9970  * Context:
9971  *      Interrupt context, no mailbox commands allowed.
9972  */
9973 /* ARGSUSED */
9974 static void
9975 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9976     uint32_t *set_flags, uint32_t *reset_flags)
9977 {
9978         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9979 
9980         if (!(sp->flags & SRB_ISP_STARTED)) {
9981 
9982                 EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9983 
9984                 REQUEST_RING_LOCK(ha);
9985 
9986                 /* if it's on a queue */
9987                 if (sp->cmd.head) {
9988                         /*
9989                          * The pending_cmds que needs to be
9990                          * protected by the ring lock
9991                          */
9992                         ql_remove_link(sp->cmd.head, &sp->cmd);
9993                 }
9994                 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9995 
9996                 /* Release device queue lock. */
9997                 REQUEST_RING_UNLOCK(ha);
9998                 DEVICE_QUEUE_UNLOCK(tq);
9999 
10000                 /* Set timeout status */
10001                 sp->pkt->pkt_reason = CS_TIMEOUT;
10002 
10003                 /* Ensure no retry */
10004                 sp->flags &= ~SRB_RETRY;
10005 
10006                 /* Call done routine to handle completion. */
10007                 ql_done(&sp->cmd);
10008 
10009                 DEVICE_QUEUE_LOCK(tq);
10010         } else if (CFG_IST(ha, CFG_CTRL_8021)) {
10011                 int             rval;
10012                 uint32_t        index;
10013 
10014                 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10015                     "spf=%xh\n", (void *)sp,
10016                     (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10017                     sp->handle & OSC_INDEX_MASK, sp->flags);
10018 
10019                 DEVICE_QUEUE_UNLOCK(tq);
10020 
10021                 INTR_LOCK(ha);
10022                 ha->pha->xioctl->ControllerErrorCount++;
10023                 if (sp->handle) {
10024                         ha->pha->timeout_cnt++;
10025                         index = sp->handle & OSC_INDEX_MASK;
10026                         if (ha->pha->outstanding_cmds[index] == sp) {
10027                                 sp->request_ring_ptr->entry_type =
10028                                     INVALID_ENTRY_TYPE;
10029                                 sp->request_ring_ptr->entry_count = 0;
10030                                 ha->pha->outstanding_cmds[index] = 0;
10031                         }
10032                         INTR_UNLOCK(ha);
10033 
10034                         rval = ql_abort_command(ha, sp);
10035                         if (rval == QL_FUNCTION_TIMEOUT ||
10036                             rval == QL_LOCK_TIMEOUT ||
10037                             rval == QL_FUNCTION_PARAMETER_ERROR ||
10038                             ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10039                                 *set_flags |= ISP_ABORT_NEEDED;
10040                                 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10041                                     "needed\n", rval, ha->pha->timeout_cnt);
10042                         }
10043 
10044                         sp->handle = 0;
10045                         sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10046                 } else {
10047                         INTR_UNLOCK(ha);
10048                 }
10049 
10050                 /* Set timeout status */
10051                 sp->pkt->pkt_reason = CS_TIMEOUT;
10052 
10053                 /* Ensure no retry */
10054                 sp->flags &= ~SRB_RETRY;
10055 
10056                 /* Call done routine to handle completion. */
10057                 ql_done(&sp->cmd);
10058 
10059                 DEVICE_QUEUE_LOCK(tq);
10060 
10061         } else {
10062                 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10063                     "spf=%xh, isp_abort_needed\n", (void *)sp,
10064                     (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10065                     sp->handle & OSC_INDEX_MASK, sp->flags);
10066 
10067                 /* Release device queue lock. */
10068                 DEVICE_QUEUE_UNLOCK(tq);
10069 
10070                 INTR_LOCK(ha);
10071                 ha->pha->xioctl->ControllerErrorCount++;
10072                 INTR_UNLOCK(ha);
10073 
10074                 /* Set ISP needs to be reset */
10075                 sp->flags |= SRB_COMMAND_TIMEOUT;
10076 
10077                 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10078                         (void) ql_binary_fw_dump(ha, TRUE);
10079                 }
10080 
10081                 *set_flags |= ISP_ABORT_NEEDED;
10082 
10083                 DEVICE_QUEUE_LOCK(tq);
10084         }
10085 
10086         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10087 }
10088 
10089 /*
10090  * ql_rst_aen
10091  *      Processes asynchronous reset.
10092  *
10093  * Input:
10094  *      ha = adapter state pointer.
10095  *
10096  * Context:
10097  *      Kernel context.
10098  */
10099 static void
10100 ql_rst_aen(ql_adapter_state_t *ha)
10101 {
10102         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10103 
10104         /* Issue marker command. */
10105         (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10106 
10107         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10108 }
10109 
10110 /*
10111  * ql_cmd_wait
10112  *      Stall driver until all outstanding commands are returned.
10113  *
10114  * Input:
10115  *      ha = adapter state pointer.
10116  *
10117  * Context:
10118  *      Kernel context.
10119  */
10120 void
10121 ql_cmd_wait(ql_adapter_state_t *ha)
10122 {
10123         uint16_t                index;
10124         ql_link_t               *link;
10125         ql_tgt_t                *tq;
10126         ql_adapter_state_t      *vha;
10127 
10128         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10129 
10130         /* Wait for all outstanding commands to be returned. */
10131         (void) ql_wait_outstanding(ha);
10132 
10133         /*
10134          * clear out internally queued commands
10135          */
10136         for (vha = ha; vha != NULL; vha = vha->vp_next) {
10137                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10138                         for (link = vha->dev[index].first; link != NULL;
10139                             link = link->next) {
10140                                 tq = link->base_address;
10141                                 if (tq &&
10142                                     (!(tq->prli_svc_param_word_3 &
10143                                     PRLI_W3_RETRY))) {
10144                                         (void) ql_abort_device(vha, tq, 0);
10145                                 }
10146                         }
10147                 }
10148         }
10149 
10150         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10151 }
10152 
10153 /*
10154  * ql_wait_outstanding
10155  *      Wait for all outstanding commands to complete.
10156  *
10157  * Input:
10158  *      ha = adapter state pointer.
10159  *
10160  * Returns:
10161  *      index - the index for ql_srb into outstanding_cmds.
10162  *
10163  * Context:
10164  *      Kernel context.
10165  */
10166 static uint16_t
10167 ql_wait_outstanding(ql_adapter_state_t *ha)
10168 {
10169         ql_srb_t        *sp;
10170         uint16_t        index, count;
10171 
10172         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10173 
10174         count = ql_osc_wait_count;
10175         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10176                 if (ha->pha->pending_cmds.first != NULL) {
10177                         ql_start_iocb(ha, NULL);
10178                         index = 1;
10179                 }
10180                 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10181                     (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10182                         if (count-- != 0) {
10183                                 ql_delay(ha, 10000);
10184                                 index = 0;
10185                         } else {
10186                                 EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10187                                     (void *)sp, index, sp->handle);
10188                                 break;
10189                         }
10190                 }
10191         }
10192 
10193         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10194 
10195         return (index);
10196 }
10197 
10198 /*
10199  * ql_restart_queues
10200  *      Restart device queues.
10201  *
10202  * Input:
10203  *      ha = adapter state pointer.
10204  *      DEVICE_QUEUE_LOCK must be released.
10205  *
10206  * Context:
10207  *      Interrupt or Kernel context, no mailbox commands allowed.
10208  */
10209 static void
10210 ql_restart_queues(ql_adapter_state_t *ha)
10211 {
10212         ql_link_t               *link, *link2;
10213         ql_tgt_t                *tq;
10214         ql_lun_t                *lq;
10215         uint16_t                index;
10216         ql_adapter_state_t      *vha;
10217 
10218         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10219 
10220         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10221                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10222                         for (link = vha->dev[index].first; link != NULL;
10223                             link = link->next) {
10224                                 tq = link->base_address;
10225 
10226                                 /* Acquire device queue lock. */
10227                                 DEVICE_QUEUE_LOCK(tq);
10228 
10229                                 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10230 
10231                                 for (link2 = tq->lun_queues.first;
10232                                     link2 != NULL; link2 = link2->next) {
10233                                         lq = link2->base_address;
10234 
10235                                         if (lq->cmd.first != NULL) {
10236                                                 ql_next(vha, lq);
10237                                                 DEVICE_QUEUE_LOCK(tq);
10238                                         }
10239                                 }
10240 
10241                                 /* Release device queue lock. */
10242                                 DEVICE_QUEUE_UNLOCK(tq);
10243                         }
10244                 }
10245         }
10246 
10247         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10248 }
10249 
10250 /*
10251  * ql_iidma
10252  *      Setup iiDMA parameters to firmware
10253  *
10254  * Input:
10255  *      ha = adapter state pointer.
10256  *      DEVICE_QUEUE_LOCK must be released.
10257  *
10258  * Context:
10259  *      Interrupt or Kernel context, no mailbox commands allowed.
10260  */
10261 static void
10262 ql_iidma(ql_adapter_state_t *ha)
10263 {
10264         ql_link_t       *link;
10265         ql_tgt_t        *tq;
10266         uint16_t        index;
10267         char            buf[256];
10268         uint32_t        data;
10269 
10270         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10271 
10272         if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10273                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10274                 return;
10275         }
10276 
10277         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10278                 for (link = ha->dev[index].first; link != NULL;
10279                     link = link->next) {
10280                         tq = link->base_address;
10281 
10282                         /* Acquire device queue lock. */
10283                         DEVICE_QUEUE_LOCK(tq);
10284 
10285                         if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10286                                 DEVICE_QUEUE_UNLOCK(tq);
10287                                 continue;
10288                         }
10289 
10290                         tq->flags &= ~TQF_IIDMA_NEEDED;
10291 
10292                         if ((tq->loop_id > LAST_N_PORT_HDL) ||
10293                             (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10294                                 DEVICE_QUEUE_UNLOCK(tq);
10295                                 continue;
10296                         }
10297 
10298                         /* Get the iiDMA persistent data */
10299                         if (tq->iidma_rate == IIDMA_RATE_INIT) {
10300                                 (void) sprintf(buf,
10301                                     "iidma-rate-%02x%02x%02x%02x%02x"
10302                                     "%02x%02x%02x", tq->port_name[0],
10303                                     tq->port_name[1], tq->port_name[2],
10304                                     tq->port_name[3], tq->port_name[4],
10305                                     tq->port_name[5], tq->port_name[6],
10306                                     tq->port_name[7]);
10307 
10308                                 if ((data = ql_get_prop(ha, buf)) ==
10309                                     0xffffffff) {
10310                                         tq->iidma_rate = IIDMA_RATE_NDEF;
10311                                 } else {
10312                                         switch (data) {
10313                                         case IIDMA_RATE_1GB:
10314                                         case IIDMA_RATE_2GB:
10315                                         case IIDMA_RATE_4GB:
10316                                         case IIDMA_RATE_10GB:
10317                                                 tq->iidma_rate = data;
10318                                                 break;
10319                                         case IIDMA_RATE_8GB:
10320                                                 if (CFG_IST(ha,
10321                                                     CFG_CTRL_25XX)) {
10322                                                         tq->iidma_rate = data;
10323                                                 } else {
10324                                                         tq->iidma_rate =
10325                                                             IIDMA_RATE_4GB;
10326                                                 }
10327                                                 break;
10328                                         default:
10329                                                 EL(ha, "invalid data for "
10330                                                     "parameter: %s: %xh\n",
10331                                                     buf, data);
10332                                                 tq->iidma_rate =
10333                                                     IIDMA_RATE_NDEF;
10334                                                 break;
10335                                         }
10336                                 }
10337                         }
10338 
10339                         /* Set the firmware's iiDMA rate */
10340                         if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10341                             !(CFG_IST(ha, CFG_CTRL_8081))) {
10342                                 data = ql_iidma_rate(ha, tq->loop_id,
10343                                     &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10344                                 if (data != QL_SUCCESS) {
10345                                         EL(ha, "mbx failed: %xh\n", data);
10346                                 }
10347                         }
10348 
10349                         /* Release device queue lock. */
10350                         DEVICE_QUEUE_UNLOCK(tq);
10351                 }
10352         }
10353 
10354         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10355 }
10356 
10357 /*
10358  * ql_abort_queues
10359  *      Abort all commands on device queues.
10360  *
10361  * Input:
10362  *      ha = adapter state pointer.
10363  *
10364  * Context:
10365  *      Interrupt or Kernel context, no mailbox commands allowed.
10366  */
10367 static void
10368 ql_abort_queues(ql_adapter_state_t *ha)
10369 {
10370         ql_link_t               *link;
10371         ql_tgt_t                *tq;
10372         ql_srb_t                *sp;
10373         uint16_t                index;
10374         ql_adapter_state_t      *vha;
10375 
10376         QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10377 
10378         /* Return all commands in outstanding command list. */
10379         INTR_LOCK(ha);
10380 
10381         /* Place all commands in outstanding cmd list on device queue. */
10382         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10383                 if (ha->pending_cmds.first != NULL) {
10384                         INTR_UNLOCK(ha);
10385                         ql_start_iocb(ha, NULL);
10386                         /* Delay for system */
10387                         ql_delay(ha, 10000);
10388                         INTR_LOCK(ha);
10389                         index = 1;
10390                 }
10391                 sp = ha->outstanding_cmds[index];
10392 
10393                 /* skip devices capable of FCP2 retrys */
10394                 if ((sp != NULL) &&
10395                     ((tq = sp->lun_queue->target_queue) != NULL) &&
10396                     (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10397                         ha->outstanding_cmds[index] = NULL;
10398                         sp->handle = 0;
10399                         sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10400 
10401                         INTR_UNLOCK(ha);
10402 
10403                         /* Set ending status. */
10404                         sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10405                         sp->flags |= SRB_ISP_COMPLETED;
10406 
10407                         /* Call done routine to handle completions. */
10408                         sp->cmd.next = NULL;
10409                         ql_done(&sp->cmd);
10410 
10411                         INTR_LOCK(ha);
10412                 }
10413         }
10414         INTR_UNLOCK(ha);
10415 
10416         for (vha = ha; vha != NULL; vha = vha->vp_next) {
10417                 QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10418                     vha->instance, vha->vp_index);
10419                 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10420                         for (link = vha->dev[index].first; link != NULL;
10421                             link = link->next) {
10422                                 tq = link->base_address;
10423                                 /* skip devices capable of FCP2 retrys */
10424                                 if (!(tq->prli_svc_param_word_3 &
10425                                     PRLI_W3_RETRY)) {
10426                                         /*
10427                                          * Set port unavailable status and
10428                                          * return all commands on a devices
10429                                          * queues.
10430                                          */
10431                                         ql_abort_device_queues(ha, tq);
10432                                 }
10433                         }
10434                 }
10435         }
10436         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10437 }
10438 
10439 /*
10440  * ql_abort_device_queues
10441  *      Abort all commands on device queues.
10442  *
10443  * Input:
10444  *      ha = adapter state pointer.
10445  *
10446  * Context:
10447  *      Interrupt or Kernel context, no mailbox commands allowed.
10448  */
10449 static void
10450 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10451 {
10452         ql_link_t       *lun_link, *cmd_link;
10453         ql_srb_t        *sp;
10454         ql_lun_t        *lq;
10455 
10456         QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10457 
10458         DEVICE_QUEUE_LOCK(tq);
10459 
10460         for (lun_link = tq->lun_queues.first; lun_link != NULL;
10461             lun_link = lun_link->next) {
10462                 lq = lun_link->base_address;
10463 
10464                 cmd_link = lq->cmd.first;
10465                 while (cmd_link != NULL) {
10466                         sp = cmd_link->base_address;
10467 
10468                         if (sp->flags & SRB_ABORT) {
10469                                 cmd_link = cmd_link->next;
10470                                 continue;
10471                         }
10472 
10473                         /* Remove srb from device cmd queue. */
10474                         ql_remove_link(&lq->cmd, &sp->cmd);
10475 
10476                         sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10477 
10478                         DEVICE_QUEUE_UNLOCK(tq);
10479 
10480                         /* Set ending status. */
10481                         sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10482 
10483                         /* Call done routine to handle completion. */
10484                         ql_done(&sp->cmd);
10485 
10486                         /* Delay for system */
10487                         ql_delay(ha, 10000);
10488 
10489                         DEVICE_QUEUE_LOCK(tq);
10490                         cmd_link = lq->cmd.first;
10491                 }
10492         }
10493         DEVICE_QUEUE_UNLOCK(tq);
10494 
10495         QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10496 }
10497 
10498 /*
10499  * ql_loop_resync
10500  *      Resync with fibre channel devices.
10501  *
10502  * Input:
10503  *      ha = adapter state pointer.
10504  *      DEVICE_QUEUE_LOCK must be released.
10505  *
10506  * Returns:
10507  *      ql local function return status code.
10508  *
10509  * Context:
10510  *      Kernel context.
10511  */
10512 static int
10513 ql_loop_resync(ql_adapter_state_t *ha)
10514 {
10515         int rval;
10516 
10517         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10518 
10519         if (ha->flags & IP_INITIALIZED) {
10520                 (void) ql_shutdown_ip(ha);
10521         }
10522 
10523         rval = ql_fw_ready(ha, 10);
10524 
10525         TASK_DAEMON_LOCK(ha);
10526         ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10527         TASK_DAEMON_UNLOCK(ha);
10528 
10529         /* Set loop online, if it really is. */
10530         if (rval == QL_SUCCESS) {
10531                 ql_loop_online(ha);
10532                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10533         } else {
10534                 EL(ha, "failed, rval = %xh\n", rval);
10535         }
10536 
10537         return (rval);
10538 }
10539 
10540 /*
10541  * ql_loop_online
10542  *      Set loop online status if it really is online.
10543  *
10544  * Input:
10545  *      ha = adapter state pointer.
10546  *      DEVICE_QUEUE_LOCK must be released.
10547  *
10548  * Context:
10549  *      Kernel context.
10550  */
10551 void
10552 ql_loop_online(ql_adapter_state_t *ha)
10553 {
10554         ql_adapter_state_t      *vha;
10555 
10556         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10557 
10558         /* Inform the FC Transport that the hardware is online. */
10559         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10560                 if (!(vha->task_daemon_flags &
10561                     (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10562                         /* Restart IP if it was shutdown. */
10563                         if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10564                             !(vha->flags & IP_INITIALIZED)) {
10565                                 (void) ql_initialize_ip(vha);
10566                                 ql_isp_rcvbuf(vha);
10567                         }
10568 
10569                         if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10570                             FC_PORT_STATE_MASK(vha->state) !=
10571                             FC_STATE_ONLINE) {
10572                                 vha->state = FC_PORT_SPEED_MASK(vha->state);
10573                                 if (vha->topology & QL_LOOP_CONNECTION) {
10574                                         vha->state |= FC_STATE_LOOP;
10575                                 } else {
10576                                         vha->state |= FC_STATE_ONLINE;
10577                                 }
10578                                 TASK_DAEMON_LOCK(ha);
10579                                 vha->task_daemon_flags |= FC_STATE_CHANGE;
10580                                 TASK_DAEMON_UNLOCK(ha);
10581                         }
10582                 }
10583         }
10584 
10585         ql_awaken_task_daemon(ha, NULL, 0, 0);
10586 
10587         /* Restart device queues that may have been stopped. */
10588         ql_restart_queues(ha);
10589 
10590         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10591 }
10592 
10593 /*
10594  * ql_fca_handle_to_state
10595  *      Verifies handle to be correct.
10596  *
10597  * Input:
10598  *      fca_handle = pointer to state structure.
10599  *
10600  * Returns:
10601  *      NULL = failure
10602  *
10603  * Context:
10604  *      Kernel context.
10605  */
10606 static ql_adapter_state_t *
10607 ql_fca_handle_to_state(opaque_t fca_handle)
10608 {
10609 #ifdef  QL_DEBUG_ROUTINES
10610         ql_link_t               *link;
10611         ql_adapter_state_t      *ha = NULL;
10612         ql_adapter_state_t      *vha = NULL;
10613 
10614         for (link = ql_hba.first; link != NULL; link = link->next) {
10615                 ha = link->base_address;
10616                 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10617                         if ((opaque_t)vha == fca_handle) {
10618                                 ha = vha;
10619                                 break;
10620                         }
10621                 }
10622                 if ((opaque_t)ha == fca_handle) {
10623                         break;
10624                 } else {
10625                         ha = NULL;
10626                 }
10627         }
10628 
10629         if (ha == NULL) {
10630                 /*EMPTY*/
10631                 QL_PRINT_2(CE_CONT, "failed\n");
10632         }
10633 
10634 #endif /* QL_DEBUG_ROUTINES */
10635 
10636         return ((ql_adapter_state_t *)fca_handle);
10637 }
10638 
10639 /*
10640  * ql_d_id_to_queue
10641  *      Locate device queue that matches destination ID.
10642  *
10643  * Input:
10644  *      ha = adapter state pointer.
10645  *      d_id = destination ID
10646  *
10647  * Returns:
10648  *      NULL = failure
10649  *
10650  * Context:
10651  *      Interrupt or Kernel context, no mailbox commands allowed.
10652  */
10653 ql_tgt_t *
10654 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10655 {
10656         uint16_t        index;
10657         ql_tgt_t        *tq;
10658         ql_link_t       *link;
10659 
10660         /* Get head queue index. */
10661         index = ql_alpa_to_index[d_id.b.al_pa];
10662 
10663         for (link = ha->dev[index].first; link != NULL; link = link->next) {
10664                 tq = link->base_address;
10665                 if (tq->d_id.b24 == d_id.b24 &&
10666                     VALID_DEVICE_ID(ha, tq->loop_id)) {
10667                         return (tq);
10668                 }
10669         }
10670 
10671         return (NULL);
10672 }
10673 
10674 /*
10675  * ql_loop_id_to_queue
10676  *      Locate device queue that matches loop ID.
10677  *
10678  * Input:
10679  *      ha:             adapter state pointer.
10680  *      loop_id:        destination ID
10681  *
10682  * Returns:
10683  *      NULL = failure
10684  *
10685  * Context:
10686  *      Interrupt or Kernel context, no mailbox commands allowed.
10687  */
10688 ql_tgt_t *
10689 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10690 {
10691         uint16_t        index;
10692         ql_tgt_t        *tq;
10693         ql_link_t       *link;
10694 
10695         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10696                 for (link = ha->dev[index].first; link != NULL;
10697                     link = link->next) {
10698                         tq = link->base_address;
10699                         if (tq->loop_id == loop_id) {
10700                                 return (tq);
10701                         }
10702                 }
10703         }
10704 
10705         return (NULL);
10706 }
10707 
10708 /*
10709  * ql_kstat_update
10710  *      Updates kernel statistics.
10711  *
10712  * Input:
10713  *      ksp - driver kernel statistics structure pointer.
10714  *      rw - function to perform
10715  *
10716  * Returns:
10717  *      0 or EACCES
10718  *
10719  * Context:
10720  *      Kernel context.
10721  */
10722 /* ARGSUSED */
10723 static int
10724 ql_kstat_update(kstat_t *ksp, int rw)
10725 {
10726         int                     rval;
10727 
10728         QL_PRINT_3(CE_CONT, "started\n");
10729 
10730         if (rw == KSTAT_WRITE) {
10731                 rval = EACCES;
10732         } else {
10733                 rval = 0;
10734         }
10735 
10736         if (rval != 0) {
10737                 /*EMPTY*/
10738                 QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10739         } else {
10740                 /*EMPTY*/
10741                 QL_PRINT_3(CE_CONT, "done\n");
10742         }
10743         return (rval);
10744 }
10745 
10746 /*
10747  * ql_load_flash
10748  *      Loads flash.
10749  *
10750  * Input:
10751  *      ha:     adapter state pointer.
10752  *      dp:     data pointer.
10753  *      size:   data length.
10754  *
10755  * Returns:
10756  *      ql local function return status code.
10757  *
10758  * Context:
10759  *      Kernel context.
10760  */
10761 int
10762 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10763 {
10764         uint32_t        cnt;
10765         int             rval;
10766         uint32_t        size_to_offset;
10767         uint32_t        size_to_compare;
10768         int             erase_all;
10769 
10770         if (CFG_IST(ha, CFG_CTRL_24258081)) {
10771                 return (ql_24xx_load_flash(ha, dp, size, 0));
10772         }
10773 
10774         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10775 
10776         size_to_compare = 0x20000;
10777         size_to_offset = 0;
10778         erase_all = 0;
10779         if (CFG_IST(ha, CFG_SBUS_CARD)) {
10780                 if (size == 0x80000) {
10781                         /* Request to flash the entire chip. */
10782                         size_to_compare = 0x80000;
10783                         erase_all = 1;
10784                 } else {
10785                         size_to_compare = 0x40000;
10786                         if (ql_flash_sbus_fpga) {
10787                                 size_to_offset = 0x40000;
10788                         }
10789                 }
10790         }
10791         if (size > size_to_compare) {
10792                 rval = QL_FUNCTION_PARAMETER_ERROR;
10793                 EL(ha, "failed=%xh\n", rval);
10794                 return (rval);
10795         }
10796 
10797         GLOBAL_HW_LOCK();
10798 
10799         /* Enable Flash Read/Write. */
10800         ql_flash_enable(ha);
10801 
10802         /* Erase flash prior to write. */
10803         rval = ql_erase_flash(ha, erase_all);
10804 
10805         if (rval == QL_SUCCESS) {
10806                 /* Write data to flash. */
10807                 for (cnt = 0; cnt < size; cnt++) {
10808                         /* Allow other system activity. */
10809                         if (cnt % 0x1000 == 0) {
10810                                 ql_delay(ha, 10000);
10811                         }
10812                         rval = ql_program_flash_address(ha,
10813                             cnt + size_to_offset, *dp++);
10814                         if (rval != QL_SUCCESS) {
10815                                 break;
10816                         }
10817                 }
10818         }
10819 
10820         ql_flash_disable(ha);
10821 
10822         GLOBAL_HW_UNLOCK();
10823 
10824         if (rval != QL_SUCCESS) {
10825                 EL(ha, "failed=%xh\n", rval);
10826         } else {
10827                 /*EMPTY*/
10828                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10829         }
10830         return (rval);
10831 }
10832 
10833 /*
10834  * ql_program_flash_address
10835  *      Program flash address.
10836  *
10837  * Input:
10838  *      ha = adapter state pointer.
10839  *      addr = flash byte address.
10840  *      data = data to be written to flash.
10841  *
10842  * Returns:
10843  *      ql local function return status code.
10844  *
10845  * Context:
10846  *      Kernel context.
10847  */
10848 static int
10849 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10850 {
10851         int rval;
10852 
10853         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10854 
10855         if (CFG_IST(ha, CFG_SBUS_CARD)) {
10856                 ql_write_flash_byte(ha, 0x5555, 0xa0);
10857                 ql_write_flash_byte(ha, addr, data);
10858         } else {
10859                 /* Write Program Command Sequence */
10860                 ql_write_flash_byte(ha, 0x5555, 0xaa);
10861                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10862                 ql_write_flash_byte(ha, 0x5555, 0xa0);
10863                 ql_write_flash_byte(ha, addr, data);
10864         }
10865 
10866         /* Wait for write to complete. */
10867         rval = ql_poll_flash(ha, addr, data);
10868 
10869         if (rval != QL_SUCCESS) {
10870                 EL(ha, "failed=%xh\n", rval);
10871         } else {
10872                 /*EMPTY*/
10873                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10874         }
10875         return (rval);
10876 }
10877 
10878 /*
10879  * ql_erase_flash
10880  *      Erases entire flash.
10881  *
10882  * Input:
10883  *      ha = adapter state pointer.
10884  *
10885  * Returns:
10886  *      ql local function return status code.
10887  *
10888  * Context:
10889  *      Kernel context.
10890  */
10891 int
10892 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10893 {
10894         int             rval;
10895         uint32_t        erase_delay = 2000000;
10896         uint32_t        sStartAddr;
10897         uint32_t        ssize;
10898         uint32_t        cnt;
10899         uint8_t         *bfp;
10900         uint8_t         *tmp;
10901 
10902         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10903 
10904         if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10905 
10906                 if (ql_flash_sbus_fpga == 1) {
10907                         ssize = QL_SBUS_FCODE_SIZE;
10908                         sStartAddr = QL_FCODE_OFFSET;
10909                 } else {
10910                         ssize = QL_FPGA_SIZE;
10911                         sStartAddr = QL_FPGA_OFFSET;
10912                 }
10913 
10914                 erase_delay = 20000000;
10915 
10916                 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10917 
10918                 /* Save the section of flash we're not updating to buffer */
10919                 tmp = bfp;
10920                 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10921                         /* Allow other system activity. */
10922                         if (cnt % 0x1000 == 0) {
10923                                 ql_delay(ha, 10000);
10924                         }
10925                         *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10926                 }
10927         }
10928 
10929         /* Chip Erase Command Sequence */
10930         ql_write_flash_byte(ha, 0x5555, 0xaa);
10931         ql_write_flash_byte(ha, 0x2aaa, 0x55);
10932         ql_write_flash_byte(ha, 0x5555, 0x80);
10933         ql_write_flash_byte(ha, 0x5555, 0xaa);
10934         ql_write_flash_byte(ha, 0x2aaa, 0x55);
10935         ql_write_flash_byte(ha, 0x5555, 0x10);
10936 
10937         ql_delay(ha, erase_delay);
10938 
10939         /* Wait for erase to complete. */
10940         rval = ql_poll_flash(ha, 0, 0x80);
10941 
10942         if (rval != QL_SUCCESS) {
10943                 EL(ha, "failed=%xh\n", rval);
10944                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10945                         kmem_free(bfp, ssize);
10946                 }
10947                 return (rval);
10948         }
10949 
10950         /* restore the section we saved in the buffer */
10951         if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10952                 /* Restore the section we saved off */
10953                 tmp = bfp;
10954                 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10955                         /* Allow other system activity. */
10956                         if (cnt % 0x1000 == 0) {
10957                                 ql_delay(ha, 10000);
10958                         }
10959                         rval = ql_program_flash_address(ha, cnt, *tmp++);
10960                         if (rval != QL_SUCCESS) {
10961                                 break;
10962                         }
10963                 }
10964 
10965                 kmem_free(bfp, ssize);
10966         }
10967 
10968         if (rval != QL_SUCCESS) {
10969                 EL(ha, "failed=%xh\n", rval);
10970         } else {
10971                 /*EMPTY*/
10972                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10973         }
10974         return (rval);
10975 }
10976 
10977 /*
10978  * ql_poll_flash
10979  *      Polls flash for completion.
10980  *
10981  * Input:
10982  *      ha = adapter state pointer.
10983  *      addr = flash byte address.
10984  *      data = data to be polled.
10985  *
10986  * Returns:
10987  *      ql local function return status code.
10988  *
10989  * Context:
10990  *      Kernel context.
10991  */
10992 int
10993 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10994 {
10995         uint8_t         flash_data;
10996         uint32_t        cnt;
10997         int             rval = QL_FUNCTION_FAILED;
10998 
10999         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11000 
11001         poll_data = (uint8_t)(poll_data & BIT_7);
11002 
11003         /* Wait for 30 seconds for command to finish. */
11004         for (cnt = 30000000; cnt; cnt--) {
11005                 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11006 
11007                 if ((flash_data & BIT_7) == poll_data) {
11008                         rval = QL_SUCCESS;
11009                         break;
11010                 }
11011                 if (flash_data & BIT_5 && cnt > 2) {
11012                         cnt = 2;
11013                 }
11014                 drv_usecwait(1);
11015         }
11016 
11017         if (rval != QL_SUCCESS) {
11018                 EL(ha, "failed=%xh\n", rval);
11019         } else {
11020                 /*EMPTY*/
11021                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11022         }
11023         return (rval);
11024 }
11025 
11026 /*
11027  * ql_flash_enable
11028  *      Setup flash for reading/writing.
11029  *
11030  * Input:
11031  *      ha = adapter state pointer.
11032  *
11033  * Context:
11034  *      Kernel context.
11035  */
11036 void
11037 ql_flash_enable(ql_adapter_state_t *ha)
11038 {
11039         uint16_t        data;
11040 
11041         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11042 
11043         /* Enable Flash Read/Write. */
11044         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11045                 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11046                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11047                 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11048                 ddi_put16(ha->sbus_fpga_dev_handle,
11049                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11050                 /* Read reset command sequence */
11051                 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11052                 ql_write_flash_byte(ha, 0x555, 0x55);
11053                 ql_write_flash_byte(ha, 0xaaa, 0x20);
11054                 ql_write_flash_byte(ha, 0x555, 0xf0);
11055         } else {
11056                 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11057                     ISP_FLASH_ENABLE);
11058                 WRT16_IO_REG(ha, ctrl_status, data);
11059 
11060                 /* Read/Reset Command Sequence */
11061                 ql_write_flash_byte(ha, 0x5555, 0xaa);
11062                 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11063                 ql_write_flash_byte(ha, 0x5555, 0xf0);
11064         }
11065         (void) ql_read_flash_byte(ha, 0);
11066 
11067         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11068 }
11069 
11070 /*
11071  * ql_flash_disable
11072  *      Disable flash and allow RISC to run.
11073  *
11074  * Input:
11075  *      ha = adapter state pointer.
11076  *
11077  * Context:
11078  *      Kernel context.
11079  */
11080 void
11081 ql_flash_disable(ql_adapter_state_t *ha)
11082 {
11083         uint16_t        data;
11084 
11085         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11086 
11087         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11088                 /*
11089                  * Lock the flash back up.
11090                  */
11091                 ql_write_flash_byte(ha, 0x555, 0x90);
11092                 ql_write_flash_byte(ha, 0x555, 0x0);
11093 
11094                 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11095                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11096                 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11097                 ddi_put16(ha->sbus_fpga_dev_handle,
11098                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11099         } else {
11100                 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11101                     ~ISP_FLASH_ENABLE);
11102                 WRT16_IO_REG(ha, ctrl_status, data);
11103         }
11104 
11105         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11106 }
11107 
11108 /*
11109  * ql_write_flash_byte
11110  *      Write byte to flash.
11111  *
11112  * Input:
11113  *      ha = adapter state pointer.
11114  *      addr = flash byte address.
11115  *      data = data to be written.
11116  *
11117  * Context:
11118  *      Kernel context.
11119  */
11120 void
11121 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11122 {
11123         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11124                 ddi_put16(ha->sbus_fpga_dev_handle,
11125                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11126                     LSW(addr));
11127                 ddi_put16(ha->sbus_fpga_dev_handle,
11128                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11129                     MSW(addr));
11130                 ddi_put16(ha->sbus_fpga_dev_handle,
11131                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11132                     (uint16_t)data);
11133         } else {
11134                 uint16_t bank_select;
11135 
11136                 /* Setup bit 16 of flash address. */
11137                 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11138 
11139                 if (CFG_IST(ha, CFG_CTRL_6322)) {
11140                         bank_select = (uint16_t)(bank_select & ~0xf0);
11141                         bank_select = (uint16_t)(bank_select |
11142                             ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11143                         WRT16_IO_REG(ha, ctrl_status, bank_select);
11144                 } else {
11145                         if (addr & BIT_16 && !(bank_select &
11146                             ISP_FLASH_64K_BANK)) {
11147                                 bank_select = (uint16_t)(bank_select |
11148                                     ISP_FLASH_64K_BANK);
11149                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11150                         } else if (!(addr & BIT_16) && bank_select &
11151                             ISP_FLASH_64K_BANK) {
11152                                 bank_select = (uint16_t)(bank_select &
11153                                     ~ISP_FLASH_64K_BANK);
11154                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11155                         }
11156                 }
11157 
11158                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11159                         WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11160                         WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11161                 } else {
11162                         WRT16_IOMAP_REG(ha, flash_address, addr);
11163                         WRT16_IOMAP_REG(ha, flash_data, data);
11164                 }
11165         }
11166 }
11167 
11168 /*
11169  * ql_read_flash_byte
11170  *      Reads byte from flash, but must read a word from chip.
11171  *
11172  * Input:
11173  *      ha = adapter state pointer.
11174  *      addr = flash byte address.
11175  *
11176  * Returns:
11177  *      byte from flash.
11178  *
11179  * Context:
11180  *      Kernel context.
11181  */
11182 uint8_t
11183 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11184 {
11185         uint8_t data;
11186 
11187         if (CFG_IST(ha, CFG_SBUS_CARD)) {
11188                 ddi_put16(ha->sbus_fpga_dev_handle,
11189                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11190                     LSW(addr));
11191                 ddi_put16(ha->sbus_fpga_dev_handle,
11192                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11193                     MSW(addr));
11194                 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11195                     (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11196         } else {
11197                 uint16_t        bank_select;
11198 
11199                 /* Setup bit 16 of flash address. */
11200                 bank_select = RD16_IO_REG(ha, ctrl_status);
11201                 if (CFG_IST(ha, CFG_CTRL_6322)) {
11202                         bank_select = (uint16_t)(bank_select & ~0xf0);
11203                         bank_select = (uint16_t)(bank_select |
11204                             ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11205                         WRT16_IO_REG(ha, ctrl_status, bank_select);
11206                 } else {
11207                         if (addr & BIT_16 &&
11208                             !(bank_select & ISP_FLASH_64K_BANK)) {
11209                                 bank_select = (uint16_t)(bank_select |
11210                                     ISP_FLASH_64K_BANK);
11211                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11212                         } else if (!(addr & BIT_16) &&
11213                             bank_select & ISP_FLASH_64K_BANK) {
11214                                 bank_select = (uint16_t)(bank_select &
11215                                     ~ISP_FLASH_64K_BANK);
11216                                 WRT16_IO_REG(ha, ctrl_status, bank_select);
11217                         }
11218                 }
11219 
11220                 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11221                         WRT16_IO_REG(ha, flash_address, addr);
11222                         data = (uint8_t)RD16_IO_REG(ha, flash_data);
11223                 } else {
11224                         WRT16_IOMAP_REG(ha, flash_address, addr);
11225                         data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11226                 }
11227         }
11228 
11229         return (data);
11230 }
11231 
11232 /*
11233  * ql_24xx_flash_id
11234  *      Get flash IDs.
11235  *
11236  * Input:
11237  *      ha:             adapter state pointer.
11238  *
11239  * Returns:
11240  *      ql local function return status code.
11241  *
11242  * Context:
11243  *      Kernel context.
11244  */
11245 int
11246 ql_24xx_flash_id(ql_adapter_state_t *vha)
11247 {
11248         int                     rval;
11249         uint32_t                fdata = 0;
11250         ql_adapter_state_t      *ha = vha->pha;
11251         ql_xioctl_t             *xp = ha->xioctl;
11252 
11253         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11254 
11255         rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11256 
11257         if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11258                 fdata = 0;
11259                 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11260                     (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11261         }
11262 
11263         if (rval != QL_SUCCESS) {
11264                 EL(ha, "24xx read_flash failed=%xh\n", rval);
11265         } else if (fdata != 0) {
11266                 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11267                 xp->fdesc.flash_id = MSB(LSW(fdata));
11268                 xp->fdesc.flash_len = LSB(MSW(fdata));
11269         } else {
11270                 xp->fdesc.flash_manuf = ATMEL_FLASH;
11271                 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11272                 xp->fdesc.flash_len = 0;
11273         }
11274 
11275         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11276 
11277         return (rval);
11278 }
11279 
11280 /*
11281  * ql_24xx_load_flash
11282  *      Loads flash.
11283  *
11284  * Input:
11285  *      ha = adapter state pointer.
11286  *      dp = data pointer.
11287  *      size = data length in bytes.
11288  *      faddr = 32bit word flash byte address.
11289  *
11290  * Returns:
11291  *      ql local function return status code.
11292  *
11293  * Context:
11294  *      Kernel context.
11295  */
11296 int
11297 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11298     uint32_t faddr)
11299 {
11300         int                     rval;
11301         uint32_t                cnt, rest_addr, fdata, wc;
11302         dma_mem_t               dmabuf = {0};
11303         ql_adapter_state_t      *ha = vha->pha;
11304         ql_xioctl_t             *xp = ha->xioctl;
11305 
11306         QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11307             ha->instance, faddr, size);
11308 
11309         /* start address must be 32 bit word aligned */
11310         if ((faddr & 0x3) != 0) {
11311                 EL(ha, "incorrect buffer size alignment\n");
11312                 return (QL_FUNCTION_PARAMETER_ERROR);
11313         }
11314 
11315         /* Allocate DMA buffer */
11316         if (CFG_IST(ha, CFG_CTRL_2581)) {
11317                 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11318                     LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11319                     QL_SUCCESS) {
11320                         EL(ha, "dma alloc failed, rval=%xh\n", rval);
11321                         return (rval);
11322                 }
11323         }
11324 
11325         GLOBAL_HW_LOCK();
11326 
11327         /* Enable flash write */
11328         if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11329                 GLOBAL_HW_UNLOCK();
11330                 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11331                 ql_free_phys(ha, &dmabuf);
11332                 return (rval);
11333         }
11334 
11335         /* setup mask of address range within a sector */
11336         rest_addr = (xp->fdesc.block_size - 1) >> 2;
11337 
11338         faddr = faddr >> 2;       /* flash gets 32 bit words */
11339 
11340         /*
11341          * Write data to flash.
11342          */
11343         cnt = 0;
11344         size = (size + 3) >> 2;   /* Round up & convert to dwords */
11345 
11346         while (cnt < size) {
11347                 /* Beginning of a sector? */
11348                 if ((faddr & rest_addr) == 0) {
11349                         if (CFG_IST(ha, CFG_CTRL_8021)) {
11350                                 fdata = ha->flash_data_addr | faddr;
11351                                 rval = ql_8021_rom_erase(ha, fdata);
11352                                 if (rval != QL_SUCCESS) {
11353                                         EL(ha, "8021 erase sector status="
11354                                             "%xh, start=%xh, end=%xh"
11355                                             "\n", rval, fdata,
11356                                             fdata + rest_addr);
11357                                         break;
11358                                 }
11359                         } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11360                                 fdata = ha->flash_data_addr | faddr;
11361                                 rval = ql_flash_access(ha,
11362                                     FAC_ERASE_SECTOR, fdata, fdata +
11363                                     rest_addr, 0);
11364                                 if (rval != QL_SUCCESS) {
11365                                         EL(ha, "erase sector status="
11366                                             "%xh, start=%xh, end=%xh"
11367                                             "\n", rval, fdata,
11368                                             fdata + rest_addr);
11369                                         break;
11370                                 }
11371                         } else {
11372                                 fdata = (faddr & ~rest_addr) << 2;
11373                                 fdata = (fdata & 0xff00) |
11374                                     (fdata << 16 & 0xff0000) |
11375                                     (fdata >> 16 & 0xff);
11376 
11377                                 if (rest_addr == 0x1fff) {
11378                                         /* 32kb sector block erase */
11379                                         rval = ql_24xx_write_flash(ha,
11380                                             FLASH_CONF_ADDR | 0x0352,
11381                                             fdata);
11382                                 } else {
11383                                         /* 64kb sector block erase */
11384                                         rval = ql_24xx_write_flash(ha,
11385                                             FLASH_CONF_ADDR | 0x03d8,
11386                                             fdata);
11387                                 }
11388                                 if (rval != QL_SUCCESS) {
11389                                         EL(ha, "Unable to flash sector"
11390                                             ": address=%xh\n", faddr);
11391                                         break;
11392                                 }
11393                         }
11394                 }
11395 
11396                 /* Write data */
11397                 if (CFG_IST(ha, CFG_CTRL_2581) &&
11398                     ((faddr & 0x3f) == 0)) {
11399                         /*
11400                          * Limit write up to sector boundary.
11401                          */
11402                         wc = ((~faddr & (rest_addr>>1)) + 1);
11403 
11404                         if (size - cnt < wc) {
11405                                 wc = size - cnt;
11406                         }
11407 
11408                         ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11409                             (uint8_t *)dmabuf.bp, wc<<2,
11410                             DDI_DEV_AUTOINCR);
11411 
11412                         rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11413                             faddr, dmabuf.cookie.dmac_laddress, wc);
11414                         if (rval != QL_SUCCESS) {
11415                                 EL(ha, "unable to dma to flash "
11416                                     "address=%xh\n", faddr << 2);
11417                                 break;
11418                         }
11419 
11420                         cnt += wc;
11421                         faddr += wc;
11422                         dp += wc << 2;
11423                 } else {
11424                         fdata = *dp++;
11425                         fdata |= *dp++ << 8;
11426                         fdata |= *dp++ << 16;
11427                         fdata |= *dp++ << 24;
11428                         rval = ql_24xx_write_flash(ha,
11429                             ha->flash_data_addr | faddr, fdata);
11430                         if (rval != QL_SUCCESS) {
11431                                 EL(ha, "Unable to program flash "
11432                                     "address=%xh data=%xh\n", faddr,
11433                                     *dp);
11434                                 break;
11435                         }
11436                         cnt++;
11437                         faddr++;
11438 
11439                         /* Allow other system activity. */
11440                         if (cnt % 0x1000 == 0) {
11441                                 ql_delay(ha, 10000);
11442                         }
11443                 }
11444         }
11445 
11446         ql_24xx_protect_flash(ha);
11447 
11448         ql_free_phys(ha, &dmabuf);
11449 
11450         GLOBAL_HW_UNLOCK();
11451 
11452         if (rval != QL_SUCCESS) {
11453                 EL(ha, "failed=%xh\n", rval);
11454         } else {
11455                 /*EMPTY*/
11456                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11457         }
11458         return (rval);
11459 }
11460 
11461 /*
11462  * ql_24xx_read_flash
11463  *      Reads a 32bit word from ISP24xx NVRAM/FLASH.
11464  *
11465  * Input:
11466  *      ha:     adapter state pointer.
11467  *      faddr:  NVRAM/FLASH address.
11468  *      bp:     data pointer.
11469  *
11470  * Returns:
11471  *      ql local function return status code.
11472  *
11473  * Context:
11474  *      Kernel context.
11475  */
11476 int
11477 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11478 {
11479         uint32_t                timer;
11480         int                     rval = QL_SUCCESS;
11481         ql_adapter_state_t      *ha = vha->pha;
11482 
11483         if (CFG_IST(ha, CFG_CTRL_8021)) {
11484                 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11485                         EL(ha, "8021 access error\n");
11486                 }
11487                 return (rval);
11488         }
11489 
11490         /* Clear access error flag */
11491         WRT32_IO_REG(ha, ctrl_status,
11492             RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11493 
11494         WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11495 
11496         /* Wait for READ cycle to complete. */
11497         for (timer = 300000; timer; timer--) {
11498                 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11499                         break;
11500                 }
11501                 drv_usecwait(10);
11502         }
11503 
11504         if (timer == 0) {
11505                 EL(ha, "failed, timeout\n");
11506                 rval = QL_FUNCTION_TIMEOUT;
11507         } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11508                 EL(ha, "failed, access error\n");
11509                 rval = QL_FUNCTION_FAILED;
11510         }
11511 
11512         *bp = RD32_IO_REG(ha, flash_data);
11513 
11514         return (rval);
11515 }
11516 
11517 /*
11518  * ql_24xx_write_flash
11519  *      Writes a 32bit word to ISP24xx NVRAM/FLASH.
11520  *
11521  * Input:
11522  *      ha:     adapter state pointer.
11523  *      addr:   NVRAM/FLASH address.
11524  *      value:  data.
11525  *
11526  * Returns:
11527  *      ql local function return status code.
11528  *
11529  * Context:
11530  *      Kernel context.
11531  */
11532 int
11533 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11534 {
11535         uint32_t                timer, fdata;
11536         int                     rval = QL_SUCCESS;
11537         ql_adapter_state_t      *ha = vha->pha;
11538 
11539         if (CFG_IST(ha, CFG_CTRL_8021)) {
11540                 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11541                         EL(ha, "8021 access error\n");
11542                 }
11543                 return (rval);
11544         }
11545         /* Clear access error flag */
11546         WRT32_IO_REG(ha, ctrl_status,
11547             RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11548 
11549         WRT32_IO_REG(ha, flash_data, data);
11550         RD32_IO_REG(ha, flash_data);            /* PCI Posting. */
11551         WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11552 
11553         /* Wait for Write cycle to complete. */
11554         for (timer = 3000000; timer; timer--) {
11555                 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11556                         /* Check flash write in progress. */
11557                         if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11558                                 (void) ql_24xx_read_flash(ha,
11559                                     FLASH_CONF_ADDR | 0x005, &fdata);
11560                                 if (!(fdata & BIT_0)) {
11561                                         break;
11562                                 }
11563                         } else {
11564                                 break;
11565                         }
11566                 }
11567                 drv_usecwait(10);
11568         }
11569         if (timer == 0) {
11570                 EL(ha, "failed, timeout\n");
11571                 rval = QL_FUNCTION_TIMEOUT;
11572         } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11573                 EL(ha, "access error\n");
11574                 rval = QL_FUNCTION_FAILED;
11575         }
11576 
11577         return (rval);
11578 }
11579 /*
11580  * ql_24xx_unprotect_flash
11581  *      Enable writes
11582  *
11583  * Input:
11584  *      ha:     adapter state pointer.
11585  *
11586  * Returns:
11587  *      ql local function return status code.
11588  *
11589  * Context:
11590  *      Kernel context.
11591  */
11592 int
11593 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11594 {
11595         int                     rval;
11596         uint32_t                fdata;
11597         ql_adapter_state_t      *ha = vha->pha;
11598         ql_xioctl_t             *xp = ha->xioctl;
11599 
11600         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11601 
11602         if (CFG_IST(ha, CFG_CTRL_8021)) {
11603                 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11604                 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11605                 if (rval != QL_SUCCESS) {
11606                         EL(ha, "8021 access error\n");
11607                 }
11608                 return (rval);
11609         }
11610         if (CFG_IST(ha, CFG_CTRL_81XX)) {
11611                 if (ha->task_daemon_flags & FIRMWARE_UP) {
11612                         if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11613                             0)) != QL_SUCCESS) {
11614                                 EL(ha, "status=%xh\n", rval);
11615                         }
11616                         QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11617                             ha->instance);
11618                         return (rval);
11619                 }
11620         } else {
11621                 /* Enable flash write. */
11622                 WRT32_IO_REG(ha, ctrl_status,
11623                     RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11624                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
11625         }
11626 
11627         /*
11628          * Remove block write protection (SST and ST) and
11629          * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11630          * Unprotect sectors.
11631          */
11632         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11633             xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11634 
11635         if (xp->fdesc.unprotect_sector_cmd != 0) {
11636                 for (fdata = 0; fdata < 0x10; fdata++) {
11637                         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11638                             0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11639                 }
11640 
11641                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11642                     xp->fdesc.unprotect_sector_cmd, 0x00400f);
11643                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11644                     xp->fdesc.unprotect_sector_cmd, 0x00600f);
11645                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11646                     xp->fdesc.unprotect_sector_cmd, 0x00800f);
11647         }
11648 
11649         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11650 
11651         return (QL_SUCCESS);
11652 }
11653 
11654 /*
11655  * ql_24xx_protect_flash
11656  *      Disable writes
11657  *
11658  * Input:
11659  *      ha:     adapter state pointer.
11660  *
11661  * Context:
11662  *      Kernel context.
11663  */
11664 void
11665 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11666 {
11667         int                     rval;
11668         uint32_t                fdata;
11669         ql_adapter_state_t      *ha = vha->pha;
11670         ql_xioctl_t             *xp = ha->xioctl;
11671 
11672         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11673 
11674         if (CFG_IST(ha, CFG_CTRL_8021)) {
11675                 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11676                 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11677                 if (rval != QL_SUCCESS) {
11678                         EL(ha, "8021 access error\n");
11679                 }
11680                 return;
11681         }
11682         if (CFG_IST(ha, CFG_CTRL_81XX)) {
11683                 if (ha->task_daemon_flags & FIRMWARE_UP) {
11684                         if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11685                             0)) != QL_SUCCESS) {
11686                                 EL(ha, "status=%xh\n", rval);
11687                         }
11688                         QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11689                             ha->instance);
11690                         return;
11691                 }
11692         } else {
11693                 /* Enable flash write. */
11694                 WRT32_IO_REG(ha, ctrl_status,
11695                     RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11696                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
11697         }
11698 
11699         /*
11700          * Protect sectors.
11701          * Set block write protection (SST and ST) and
11702          * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11703          */
11704         if (xp->fdesc.protect_sector_cmd != 0) {
11705                 for (fdata = 0; fdata < 0x10; fdata++) {
11706                         (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11707                             0x330 | xp->fdesc.protect_sector_cmd, fdata);
11708                 }
11709                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11710                     xp->fdesc.protect_sector_cmd, 0x00400f);
11711                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11712                     xp->fdesc.protect_sector_cmd, 0x00600f);
11713                 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11714                     xp->fdesc.protect_sector_cmd, 0x00800f);
11715 
11716                 /* TODO: ??? */
11717                 (void) ql_24xx_write_flash(ha,
11718                     FLASH_CONF_ADDR | 0x101, 0x80);
11719         } else {
11720                 (void) ql_24xx_write_flash(ha,
11721                     FLASH_CONF_ADDR | 0x101, 0x9c);
11722         }
11723 
11724         /* Disable flash write. */
11725         if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11726                 WRT32_IO_REG(ha, ctrl_status,
11727                     RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11728                 RD32_IO_REG(ha, ctrl_status);   /* PCI Posting. */
11729         }
11730 
11731         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11732 }
11733 
11734 /*
11735  * ql_dump_firmware
11736  *      Save RISC code state information.
11737  *
11738  * Input:
11739  *      ha = adapter state pointer.
11740  *
11741  * Returns:
11742  *      QL local function return status code.
11743  *
11744  * Context:
11745  *      Kernel context.
11746  */
11747 static int
11748 ql_dump_firmware(ql_adapter_state_t *vha)
11749 {
11750         int                     rval;
11751         clock_t                 timer = drv_usectohz(30000000);
11752         ql_adapter_state_t      *ha = vha->pha;
11753 
11754         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11755 
11756         QL_DUMP_LOCK(ha);
11757 
11758         if (ha->ql_dump_state & QL_DUMPING ||
11759             (ha->ql_dump_state & QL_DUMP_VALID &&
11760             !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11761                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11762                 QL_DUMP_UNLOCK(ha);
11763                 return (QL_SUCCESS);
11764         }
11765 
11766         QL_DUMP_UNLOCK(ha);
11767 
11768         ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11769 
11770         /*
11771          * Wait for all outstanding commands to complete
11772          */
11773         (void) ql_wait_outstanding(ha);
11774 
11775         /* Dump firmware. */
11776         rval = ql_binary_fw_dump(ha, TRUE);
11777 
11778         /* Do abort to force restart. */
11779         ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11780         EL(ha, "restarting, isp_abort_needed\n");
11781 
11782         /* Acquire task daemon lock. */
11783         TASK_DAEMON_LOCK(ha);
11784 
11785         /* Wait for suspension to end. */
11786         while (ha->task_daemon_flags & QL_SUSPENDED) {
11787                 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11788 
11789                 /* 30 seconds from now */
11790                 if (cv_reltimedwait(&ha->cv_dr_suspended,
11791                     &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11792                         /*
11793                          * The timeout time 'timer' was
11794                          * reached without the condition
11795                          * being signaled.
11796                          */
11797                         break;
11798                 }
11799         }
11800 
11801         /* Release task daemon lock. */
11802         TASK_DAEMON_UNLOCK(ha);
11803 
11804         if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11805                 /*EMPTY*/
11806                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11807         } else {
11808                 EL(ha, "failed, rval = %xh\n", rval);
11809         }
11810         return (rval);
11811 }
11812 
11813 /*
11814  * ql_binary_fw_dump
11815  *      Dumps binary data from firmware.
11816  *
11817  * Input:
11818  *      ha = adapter state pointer.
11819  *      lock_needed = mailbox lock needed.
11820  *
11821  * Returns:
11822  *      ql local function return status code.
11823  *
11824  * Context:
11825  *      Interrupt or Kernel context, no mailbox commands allowed.
11826  */
11827 int
11828 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11829 {
11830         clock_t                 timer;
11831         mbx_cmd_t               mc;
11832         mbx_cmd_t               *mcp = &mc;
11833         int                     rval = QL_SUCCESS;
11834         ql_adapter_state_t      *ha = vha->pha;
11835 
11836         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11837 
11838         if (CFG_IST(ha, CFG_CTRL_8021)) {
11839                 EL(ha, "8021 not supported\n");
11840                 return (QL_NOT_SUPPORTED);
11841         }
11842 
11843         QL_DUMP_LOCK(ha);
11844 
11845         if (ha->ql_dump_state & QL_DUMPING ||
11846             (ha->ql_dump_state & QL_DUMP_VALID &&
11847             !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11848                 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11849                 QL_DUMP_UNLOCK(ha);
11850                 return (QL_DATA_EXISTS);
11851         }
11852 
11853         ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11854         ha->ql_dump_state |= QL_DUMPING;
11855 
11856         QL_DUMP_UNLOCK(ha);
11857 
11858         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11859 
11860                 /* Insert Time Stamp */
11861                 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11862                     FTO_INSERT_TIME_STAMP);
11863                 if (rval != QL_SUCCESS) {
11864                         EL(ha, "f/w extended trace insert"
11865                             "time stamp failed: %xh\n", rval);
11866                 }
11867         }
11868 
11869         if (lock_needed == TRUE) {
11870                 /* Acquire mailbox register lock. */
11871                 MBX_REGISTER_LOCK(ha);
11872                 timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11873 
11874                 /* Check for mailbox available, if not wait for signal. */
11875                 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11876                         ha->mailbox_flags = (uint8_t)
11877                             (ha->mailbox_flags | MBX_WANT_FLG);
11878 
11879                         /* 30 seconds from now */
11880                         if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11881                             timer, TR_CLOCK_TICK) == -1) {
11882                                 /*
11883                                  * The timeout time 'timer' was
11884                                  * reached without the condition
11885                                  * being signaled.
11886                                  */
11887 
11888                                 /* Release mailbox register lock. */
11889                                 MBX_REGISTER_UNLOCK(ha);
11890 
11891                                 EL(ha, "failed, rval = %xh\n",
11892                                     QL_FUNCTION_TIMEOUT);
11893                                 return (QL_FUNCTION_TIMEOUT);
11894                         }
11895                 }
11896 
11897                 /* Set busy flag. */
11898                 ha->mailbox_flags = (uint8_t)
11899                     (ha->mailbox_flags | MBX_BUSY_FLG);
11900                 mcp->timeout = 120;
11901                 ha->mcp = mcp;
11902 
11903                 /* Release mailbox register lock. */
11904                 MBX_REGISTER_UNLOCK(ha);
11905         }
11906 
11907         /* Free previous dump buffer. */
11908         if (ha->ql_dump_ptr != NULL) {
11909                 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11910                 ha->ql_dump_ptr = NULL;
11911         }
11912 
11913         if (CFG_IST(ha, CFG_CTRL_2422)) {
11914                 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11915                     ha->fw_ext_memory_size);
11916         } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11917                 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11918                     ha->fw_ext_memory_size);
11919         } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11920                 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11921                     ha->fw_ext_memory_size);
11922         } else {
11923                 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11924         }
11925 
11926         if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11927             NULL) {
11928                 rval = QL_MEMORY_ALLOC_FAILED;
11929         } else {
11930                 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11931                         rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11932                 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11933                         rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11934                 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11935                         rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11936                 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11937                         rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11938                 } else {
11939                         rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11940                 }
11941         }
11942 
11943         /* Reset ISP chip. */
11944         ql_reset_chip(ha);
11945 
11946         QL_DUMP_LOCK(ha);
11947 
11948         if (rval != QL_SUCCESS) {
11949                 if (ha->ql_dump_ptr != NULL) {
11950                         kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11951                         ha->ql_dump_ptr = NULL;
11952                 }
11953                 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11954                     QL_DUMP_UPLOADED);
11955                 EL(ha, "failed, rval = %xh\n", rval);
11956         } else {
11957                 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11958                 ha->ql_dump_state |= QL_DUMP_VALID;
11959                 EL(ha, "done\n");
11960         }
11961 
11962         QL_DUMP_UNLOCK(ha);
11963 
11964         return (rval);
11965 }
11966 
11967 /*
11968  * ql_ascii_fw_dump
11969  *      Converts firmware binary dump to ascii.
11970  *
11971  * Input:
11972  *      ha = adapter state pointer.
11973  *      bptr = buffer pointer.
11974  *
11975  * Returns:
11976  *      Amount of data buffer used.
11977  *
11978  * Context:
11979  *      Kernel context.
11980  */
11981 size_t
11982 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11983 {
11984         uint32_t                cnt;
11985         caddr_t                 bp;
11986         int                     mbox_cnt;
11987         ql_adapter_state_t      *ha = vha->pha;
11988         ql_fw_dump_t            *fw = ha->ql_dump_ptr;
11989 
11990         if (CFG_IST(ha, CFG_CTRL_2422)) {
11991                 return (ql_24xx_ascii_fw_dump(ha, bufp));
11992         } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11993                 return (ql_2581_ascii_fw_dump(ha, bufp));
11994         }
11995 
11996         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11997 
11998         if (CFG_IST(ha, CFG_CTRL_2300)) {
11999                 (void) sprintf(bufp, "\nISP 2300IP ");
12000         } else if (CFG_IST(ha, CFG_CTRL_6322)) {
12001                 (void) sprintf(bufp, "\nISP 6322FLX ");
12002         } else {
12003                 (void) sprintf(bufp, "\nISP 2200IP ");
12004         }
12005 
12006         bp = bufp + strlen(bufp);
12007         (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12008             ha->fw_major_version, ha->fw_minor_version,
12009             ha->fw_subminor_version);
12010 
12011         (void) strcat(bufp, "\nPBIU Registers:");
12012         bp = bufp + strlen(bufp);
12013         for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12014                 if (cnt % 8 == 0) {
12015                         *bp++ = '\n';
12016                 }
12017                 (void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
12018                 bp = bp + 6;
12019         }
12020 
12021         if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12022                 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12023                     "registers:");
12024                 bp = bufp + strlen(bufp);
12025                 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12026                         if (cnt % 8 == 0) {
12027                                 *bp++ = '\n';
12028                         }
12029                         (void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
12030                         bp = bp + 6;
12031                 }
12032         }
12033 
12034         (void) strcat(bp, "\n\nMailbox Registers:");
12035         bp = bufp + strlen(bufp);
12036         mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12037         for (cnt = 0; cnt < mbox_cnt; cnt++) {
12038                 if (cnt % 8 == 0) {
12039                         *bp++ = '\n';
12040                 }
12041                 (void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
12042                 bp = bp + 6;
12043         }
12044 
12045         if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12046                 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12047                 bp = bufp + strlen(bufp);
12048                 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12049                         if (cnt % 8 == 0) {
12050                                 *bp++ = '\n';
12051                         }
12052                         (void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
12053                         bp = bp + 6;
12054                 }
12055         }
12056 
12057         (void) strcat(bp, "\n\nDMA Registers:");
12058         bp = bufp + strlen(bufp);
12059         for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12060                 if (cnt % 8 == 0) {
12061                         *bp++ = '\n';
12062                 }
12063                 (void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
12064                 bp = bp + 6;
12065         }
12066 
12067         (void) strcat(bp, "\n\nRISC Hardware Registers:");
12068         bp = bufp + strlen(bufp);
12069         for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12070                 if (cnt % 8 == 0) {
12071                         *bp++ = '\n';
12072                 }
12073                 (void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
12074                 bp = bp + 6;
12075         }
12076 
12077         (void) strcat(bp, "\n\nRISC GP0 Registers:");
12078         bp = bufp + strlen(bufp);
12079         for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12080                 if (cnt % 8 == 0) {
12081                         *bp++ = '\n';
12082                 }
12083                 (void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
12084                 bp = bp + 6;
12085         }
12086 
12087         (void) strcat(bp, "\n\nRISC GP1 Registers:");
12088         bp = bufp + strlen(bufp);
12089         for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12090                 if (cnt % 8 == 0) {
12091                         *bp++ = '\n';
12092                 }
12093                 (void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
12094                 bp = bp + 6;
12095         }
12096 
12097         (void) strcat(bp, "\n\nRISC GP2 Registers:");
12098         bp = bufp + strlen(bufp);
12099         for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12100                 if (cnt % 8 == 0) {
12101                         *bp++ = '\n';
12102                 }
12103                 (void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
12104                 bp = bp + 6;
12105         }
12106 
12107         (void) strcat(bp, "\n\nRISC GP3 Registers:");
12108         bp = bufp + strlen(bufp);
12109         for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12110                 if (cnt % 8 == 0) {
12111                         *bp++ = '\n';
12112                 }
12113                 (void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
12114                 bp = bp + 6;
12115         }
12116 
12117         (void) strcat(bp, "\n\nRISC GP4 Registers:");
12118         bp = bufp + strlen(bufp);
12119         for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12120                 if (cnt % 8 == 0) {
12121                         *bp++ = '\n';
12122                 }
12123                 (void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
12124                 bp = bp + 6;
12125         }
12126 
12127         (void) strcat(bp, "\n\nRISC GP5 Registers:");
12128         bp = bufp + strlen(bufp);
12129         for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12130                 if (cnt % 8 == 0) {
12131                         *bp++ = '\n';
12132                 }
12133                 (void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
12134                 bp = bp + 6;
12135         }
12136 
12137         (void) strcat(bp, "\n\nRISC GP6 Registers:");
12138         bp = bufp + strlen(bufp);
12139         for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12140                 if (cnt % 8 == 0) {
12141                         *bp++ = '\n';
12142                 }
12143                 (void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
12144                 bp = bp + 6;
12145         }
12146 
12147         (void) strcat(bp, "\n\nRISC GP7 Registers:");
12148         bp = bufp + strlen(bufp);
12149         for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12150                 if (cnt % 8 == 0) {
12151                         *bp++ = '\n';
12152                 }
12153                 (void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
12154                 bp = bp + 6;
12155         }
12156 
12157         (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12158         bp = bufp + strlen(bufp);
12159         for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12160                 if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12161                     CFG_CTRL_6322)) == 0))) {
12162                         break;
12163                 }
12164                 if (cnt % 8 == 0) {
12165                         *bp++ = '\n';
12166                 }
12167                 (void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
12168                 bp = bp + 6;
12169         }
12170 
12171         (void) strcat(bp, "\n\nFPM B0 Registers:");
12172         bp = bufp + strlen(bufp);
12173         for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12174                 if (cnt % 8 == 0) {
12175                         *bp++ = '\n';
12176                 }
12177                 (void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
12178                 bp = bp + 6;
12179         }
12180 
12181         (void) strcat(bp, "\n\nFPM B1 Registers:");
12182         bp = bufp + strlen(bufp);
12183         for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12184                 if (cnt % 8 == 0) {
12185                         *bp++ = '\n';
12186                 }
12187                 (void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
12188                 bp = bp + 6;
12189         }
12190 
12191         if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12192                 (void) strcat(bp, "\n\nCode RAM Dump:");
12193                 bp = bufp + strlen(bufp);
12194                 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12195                         if (cnt % 8 == 0) {
12196                                 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12197                                 bp = bp + 8;
12198                         }
12199                         (void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12200                         bp = bp + 6;
12201                 }
12202 
12203                 (void) strcat(bp, "\n\nStack RAM Dump:");
12204                 bp = bufp + strlen(bufp);
12205                 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12206                         if (cnt % 8 == 0) {
12207                                 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12208                                 bp = bp + 8;
12209                         }
12210                         (void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
12211                         bp = bp + 6;
12212                 }
12213 
12214                 (void) strcat(bp, "\n\nData RAM Dump:");
12215                 bp = bufp + strlen(bufp);
12216                 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12217                         if (cnt % 8 == 0) {
12218                                 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12219                                 bp = bp + 8;
12220                         }
12221                         (void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
12222                         bp = bp + 6;
12223                 }
12224         } else {
12225                 (void) strcat(bp, "\n\nRISC SRAM:");
12226                 bp = bufp + strlen(bufp);
12227                 for (cnt = 0; cnt < 0xf000; cnt++) {
12228                         if (cnt % 8 == 0) {
12229                                 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12230                                 bp = bp + 7;
12231                         }
12232                         (void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12233                         bp = bp + 6;
12234                 }
12235         }
12236 
12237         (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12238         bp += strlen(bp);
12239 
12240         (void) sprintf(bp, "\n\nRequest Queue");
12241         bp += strlen(bp);
12242         for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12243                 if (cnt % 8 == 0) {
12244                         (void) sprintf(bp, "\n%08x: ", cnt);
12245                         bp += strlen(bp);
12246                 }
12247                 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12248                 bp += strlen(bp);
12249         }
12250 
12251         (void) sprintf(bp, "\n\nResponse Queue");
12252         bp += strlen(bp);
12253         for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12254                 if (cnt % 8 == 0) {
12255                         (void) sprintf(bp, "\n%08x: ", cnt);
12256                         bp += strlen(bp);
12257                 }
12258                 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12259                 bp += strlen(bp);
12260         }
12261 
12262         (void) sprintf(bp, "\n");
12263 
12264         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12265 
12266         return (strlen(bufp));
12267 }
12268 
12269 /*
12270  * ql_24xx_ascii_fw_dump
12271  *      Converts ISP24xx firmware binary dump to ascii.
12272  *
12273  * Input:
12274  *      ha = adapter state pointer.
12275  *      bptr = buffer pointer.
12276  *
12277  * Returns:
12278  *      Amount of data buffer used.
12279  *
12280  * Context:
12281  *      Kernel context.
12282  */
12283 static size_t
12284 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12285 {
12286         uint32_t                cnt;
12287         caddr_t                 bp = bufp;
12288         ql_24xx_fw_dump_t       *fw = ha->ql_dump_ptr;
12289 
12290         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12291 
12292         (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12293             ha->fw_major_version, ha->fw_minor_version,
12294             ha->fw_subminor_version, ha->fw_attributes);
12295         bp += strlen(bp);
12296 
12297         (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12298 
12299         (void) strcat(bp, "\nHost Interface Registers");
12300         bp += strlen(bp);
12301         for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12302                 if (cnt % 8 == 0) {
12303                         (void) sprintf(bp++, "\n");
12304                 }
12305 
12306                 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12307                 bp += 9;
12308         }
12309 
12310         (void) sprintf(bp, "\n\nMailbox Registers");
12311         bp += strlen(bp);
12312         for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12313                 if (cnt % 16 == 0) {
12314                         (void) sprintf(bp++, "\n");
12315                 }
12316 
12317                 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12318                 bp += 5;
12319         }
12320 
12321         (void) sprintf(bp, "\n\nXSEQ GP Registers");
12322         bp += strlen(bp);
12323         for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12324                 if (cnt % 8 == 0) {
12325                         (void) sprintf(bp++, "\n");
12326                 }
12327 
12328                 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12329                 bp += 9;
12330         }
12331 
12332         (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12333         bp += strlen(bp);
12334         for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12335                 if (cnt % 8 == 0) {
12336                         (void) sprintf(bp++, "\n");
12337                 }
12338 
12339                 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12340                 bp += 9;
12341         }
12342 
12343         (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12344         bp += strlen(bp);
12345         for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12346                 if (cnt % 8 == 0) {
12347                         (void) sprintf(bp++, "\n");
12348                 }
12349 
12350                 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12351                 bp += 9;
12352         }
12353 
12354         (void) sprintf(bp, "\n\nRSEQ GP Registers");
12355         bp += strlen(bp);
12356         for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12357                 if (cnt % 8 == 0) {
12358                         (void) sprintf(bp++, "\n");
12359                 }
12360 
12361                 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12362                 bp += 9;
12363         }
12364 
12365         (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12366         bp += strlen(bp);
12367         for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12368                 if (cnt % 8 == 0) {
12369                         (void) sprintf(bp++, "\n");
12370                 }
12371 
12372                 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12373                 bp += 9;
12374         }
12375 
12376         (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12377         bp += strlen(bp);
12378         for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12379                 if (cnt % 8 == 0) {
12380                         (void) sprintf(bp++, "\n");
12381                 }
12382 
12383                 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12384                 bp += 9;
12385         }
12386 
12387         (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12388         bp += strlen(bp);
12389         for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12390                 if (cnt % 8 == 0) {
12391                         (void) sprintf(bp++, "\n");
12392                 }
12393 
12394                 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12395                 bp += 9;
12396         }
12397 
12398         (void) sprintf(bp, "\n\nCommand DMA Registers");
12399         bp += strlen(bp);
12400         for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12401                 if (cnt % 8 == 0) {
12402                         (void) sprintf(bp++, "\n");
12403                 }
12404 
12405                 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12406                 bp += 9;
12407         }
12408 
12409         (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12410         bp += strlen(bp);
12411         for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12412                 if (cnt % 8 == 0) {
12413                         (void) sprintf(bp++, "\n");
12414                 }
12415 
12416                 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12417                 bp += 9;
12418         }
12419 
12420         (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12421         bp += strlen(bp);
12422         for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12423                 if (cnt % 8 == 0) {
12424                         (void) sprintf(bp++, "\n");
12425                 }
12426 
12427                 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12428                 bp += 9;
12429         }
12430 
12431         (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12432         bp += strlen(bp);
12433         for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12434                 if (cnt % 8 == 0) {
12435                         (void) sprintf(bp++, "\n");
12436                 }
12437 
12438                 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12439                 bp += 9;
12440         }
12441 
12442         (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12443         bp += strlen(bp);
12444         for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12445                 if (cnt % 8 == 0) {
12446                         (void) sprintf(bp++, "\n");
12447                 }
12448 
12449                 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12450                 bp += 9;
12451         }
12452 
12453         (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12454         bp += strlen(bp);
12455         for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12456                 if (cnt % 8 == 0) {
12457                         (void) sprintf(bp++, "\n");
12458                 }
12459 
12460                 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12461                 bp += 9;
12462         }
12463 
12464         (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12465         bp += strlen(bp);
12466         for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12467                 if (cnt % 8 == 0) {
12468                         (void) sprintf(bp++, "\n");
12469                 }
12470 
12471                 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12472                 bp += 9;
12473         }
12474 
12475         (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12476         bp += strlen(bp);
12477         for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12478                 if (cnt % 8 == 0) {
12479                         (void) sprintf(bp++, "\n");
12480                 }
12481 
12482                 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12483                 bp += 9;
12484         }
12485 
12486         (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12487         bp += strlen(bp);
12488         for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12489                 if (cnt % 8 == 0) {
12490                         (void) sprintf(bp++, "\n");
12491                 }
12492 
12493                 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12494                 bp += 9;
12495         }
12496 
12497         (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12498         bp += strlen(bp);
12499         for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12500                 if (cnt % 8 == 0) {
12501                         (void) sprintf(bp++, "\n");
12502                 }
12503 
12504                 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12505                 bp += 9;
12506         }
12507 
12508         (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12509         bp += strlen(bp);
12510         for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12511                 if (cnt % 8 == 0) {
12512                         (void) sprintf(bp++, "\n");
12513                 }
12514 
12515                 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12516                 bp += 9;
12517         }
12518 
12519         (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12520         bp += strlen(bp);
12521         for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12522                 if (cnt % 8 == 0) {
12523                         (void) sprintf(bp++, "\n");
12524                 }
12525 
12526                 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12527                 bp += 9;
12528         }
12529 
12530         (void) sprintf(bp, "\n\nRISC GP Registers");
12531         bp += strlen(bp);
12532         for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12533                 if (cnt % 8 == 0) {
12534                         (void) sprintf(bp++, "\n");
12535                 }
12536 
12537                 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12538                 bp += 9;
12539         }
12540 
12541         (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12542         bp += strlen(bp);
12543         for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12544                 if (cnt % 8 == 0) {
12545                         (void) sprintf(bp++, "\n");
12546                 }
12547 
12548                 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12549                 bp += 9;
12550         }
12551 
12552         (void) sprintf(bp, "\n\nLMC Registers");
12553         bp += strlen(bp);
12554         for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12555                 if (cnt % 8 == 0) {
12556                         (void) sprintf(bp++, "\n");
12557                 }
12558 
12559                 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12560                 bp += 9;
12561         }
12562 
12563         (void) sprintf(bp, "\n\nFPM Hardware Registers");
12564         bp += strlen(bp);
12565         for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12566                 if (cnt % 8 == 0) {
12567                         (void) sprintf(bp++, "\n");
12568                 }
12569 
12570                 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12571                 bp += 9;
12572         }
12573 
12574         (void) sprintf(bp, "\n\nFB Hardware Registers");
12575         bp += strlen(bp);
12576         for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12577                 if (cnt % 8 == 0) {
12578                         (void) sprintf(bp++, "\n");
12579                 }
12580 
12581                 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12582                 bp += 9;
12583         }
12584 
12585         (void) sprintf(bp, "\n\nCode RAM");
12586         bp += strlen(bp);
12587         for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12588                 if (cnt % 8 == 0) {
12589                         (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12590                         bp += 11;
12591                 }
12592 
12593                 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12594                 bp += 9;
12595         }
12596 
12597         (void) sprintf(bp, "\n\nExternal Memory");
12598         bp += strlen(bp);
12599         for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12600                 if (cnt % 8 == 0) {
12601                         (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12602                         bp += 11;
12603                 }
12604                 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12605                 bp += 9;
12606         }
12607 
12608         (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12609         bp += strlen(bp);
12610 
12611         (void) sprintf(bp, "\n\nRequest Queue");
12612         bp += strlen(bp);
12613         for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12614                 if (cnt % 8 == 0) {
12615                         (void) sprintf(bp, "\n%08x: ", cnt);
12616                         bp += strlen(bp);
12617                 }
12618                 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12619                 bp += strlen(bp);
12620         }
12621 
12622         (void) sprintf(bp, "\n\nResponse Queue");
12623         bp += strlen(bp);
12624         for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12625                 if (cnt % 8 == 0) {
12626                         (void) sprintf(bp, "\n%08x: ", cnt);
12627                         bp += strlen(bp);
12628                 }
12629                 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12630                 bp += strlen(bp);
12631         }
12632 
12633         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12634             (ha->fwexttracebuf.bp != NULL)) {
12635                 uint32_t cnt_b = 0;
12636                 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12637 
12638                 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12639                 bp += strlen(bp);
12640                 /* show data address as a byte address, data as long words */
12641                 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12642                         cnt_b = cnt * 4;
12643                         if (cnt_b % 32 == 0) {
12644                                 (void) sprintf(bp, "\n%08x: ",
12645                                     (int)(w64 + cnt_b));
12646                                 bp += 11;
12647                         }
12648                         (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12649                         bp += 9;
12650                 }
12651         }
12652 
12653         if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12654             (ha->fwfcetracebuf.bp != NULL)) {
12655                 uint32_t cnt_b = 0;
12656                 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12657 
12658                 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12659                 bp += strlen(bp);
12660                 /* show data address as a byte address, data as long words */
12661                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12662                         cnt_b = cnt * 4;
12663                         if (cnt_b % 32 == 0) {
12664                                 (void) sprintf(bp, "\n%08x: ",
12665                                     (int)(w64 + cnt_b));
12666                                 bp += 11;
12667                         }
12668                         (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12669                         bp += 9;
12670                 }
12671         }
12672 
12673         (void) sprintf(bp, "\n\n");
12674         bp += strlen(bp);
12675 
12676         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12677 
12678         QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12679 
12680         return (cnt);
12681 }
12682 
12683 /*
12684  * ql_2581_ascii_fw_dump
12685  *      Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12686  *
12687  * Input:
12688  *      ha = adapter state pointer.
12689  *      bptr = buffer pointer.
12690  *
12691  * Returns:
12692  *      Amount of data buffer used.
12693  *
12694  * Context:
12695  *      Kernel context.
12696  */
12697 static size_t
12698 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12699 {
12700         uint32_t                cnt;
12701         uint32_t                cnt1;
12702         caddr_t                 bp = bufp;
12703         ql_25xx_fw_dump_t       *fw = ha->ql_dump_ptr;
12704 
12705         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12706 
12707         (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12708             ha->fw_major_version, ha->fw_minor_version,
12709             ha->fw_subminor_version, ha->fw_attributes);
12710         bp += strlen(bp);
12711 
12712         (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12713         bp += strlen(bp);
12714 
12715         (void) sprintf(bp, "\nHostRisc Registers");
12716         bp += strlen(bp);
12717         for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12718                 if (cnt % 8 == 0) {
12719                         (void) sprintf(bp++, "\n");
12720                 }
12721                 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12722                 bp += 9;
12723         }
12724 
12725         (void) sprintf(bp, "\n\nPCIe Registers");
12726         bp += strlen(bp);
12727         for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12728                 if (cnt % 8 == 0) {
12729                         (void) sprintf(bp++, "\n");
12730                 }
12731                 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12732                 bp += 9;
12733         }
12734 
12735         (void) strcat(bp, "\n\nHost Interface Registers");
12736         bp += strlen(bp);
12737         for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12738                 if (cnt % 8 == 0) {
12739                         (void) sprintf(bp++, "\n");
12740                 }
12741                 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12742                 bp += 9;
12743         }
12744 
12745         (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12746         bp += strlen(bp);
12747         for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12748                 if (cnt % 8 == 0) {
12749                         (void) sprintf(bp++, "\n");
12750                 }
12751                 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12752                 bp += 9;
12753         }
12754 
12755         (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12756             fw->risc_io);
12757         bp += strlen(bp);
12758 
12759         (void) sprintf(bp, "\n\nMailbox Registers");
12760         bp += strlen(bp);
12761         for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12762                 if (cnt % 16 == 0) {
12763                         (void) sprintf(bp++, "\n");
12764                 }
12765                 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12766                 bp += 5;
12767         }
12768 
12769         (void) sprintf(bp, "\n\nXSEQ GP Registers");
12770         bp += strlen(bp);
12771         for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12772                 if (cnt % 8 == 0) {
12773                         (void) sprintf(bp++, "\n");
12774                 }
12775                 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12776                 bp += 9;
12777         }
12778 
12779         (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12780         bp += strlen(bp);
12781         for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12782                 if (cnt % 8 == 0) {
12783                         (void) sprintf(bp++, "\n");
12784                 }
12785                 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12786                 bp += 9;
12787         }
12788 
12789         (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12790         bp += strlen(bp);
12791         for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12792                 if (cnt % 8 == 0) {
12793                         (void) sprintf(bp++, "\n");
12794                 }
12795                 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12796                 bp += 9;
12797         }
12798 
12799         (void) sprintf(bp, "\n\nRSEQ GP Registers");
12800         bp += strlen(bp);
12801         for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12802                 if (cnt % 8 == 0) {
12803                         (void) sprintf(bp++, "\n");
12804                 }
12805                 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12806                 bp += 9;
12807         }
12808 
12809         (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12810         bp += strlen(bp);
12811         for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12812                 if (cnt % 8 == 0) {
12813                         (void) sprintf(bp++, "\n");
12814                 }
12815                 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12816                 bp += 9;
12817         }
12818 
12819         (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12820         bp += strlen(bp);
12821         for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12822                 if (cnt % 8 == 0) {
12823                         (void) sprintf(bp++, "\n");
12824                 }
12825                 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12826                 bp += 9;
12827         }
12828 
12829         (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12830         bp += strlen(bp);
12831         for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12832                 if (cnt % 8 == 0) {
12833                         (void) sprintf(bp++, "\n");
12834                 }
12835                 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12836                 bp += 9;
12837         }
12838 
12839         (void) sprintf(bp, "\n\nASEQ GP Registers");
12840         bp += strlen(bp);
12841         for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12842                 if (cnt % 8 == 0) {
12843                         (void) sprintf(bp++, "\n");
12844                 }
12845                 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12846                 bp += 9;
12847         }
12848 
12849         (void) sprintf(bp, "\n\nASEQ-0 Registers");
12850         bp += strlen(bp);
12851         for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12852                 if (cnt % 8 == 0) {
12853                         (void) sprintf(bp++, "\n");
12854                 }
12855                 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12856                 bp += 9;
12857         }
12858 
12859         (void) sprintf(bp, "\n\nASEQ-1 Registers");
12860         bp += strlen(bp);
12861         for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12862                 if (cnt % 8 == 0) {
12863                         (void) sprintf(bp++, "\n");
12864                 }
12865                 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12866                 bp += 9;
12867         }
12868 
12869         (void) sprintf(bp, "\n\nASEQ-2 Registers");
12870         bp += strlen(bp);
12871         for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12872                 if (cnt % 8 == 0) {
12873                         (void) sprintf(bp++, "\n");
12874                 }
12875                 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12876                 bp += 9;
12877         }
12878 
12879         (void) sprintf(bp, "\n\nCommand DMA Registers");
12880         bp += strlen(bp);
12881         for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12882                 if (cnt % 8 == 0) {
12883                         (void) sprintf(bp++, "\n");
12884                 }
12885                 (void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12886                 bp += 9;
12887         }
12888 
12889         (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12890         bp += strlen(bp);
12891         for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12892                 if (cnt % 8 == 0) {
12893                         (void) sprintf(bp++, "\n");
12894                 }
12895                 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12896                 bp += 9;
12897         }
12898 
12899         (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12900         bp += strlen(bp);
12901         for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12902                 if (cnt % 8 == 0) {
12903                         (void) sprintf(bp++, "\n");
12904                 }
12905                 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12906                 bp += 9;
12907         }
12908 
12909         (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12910         bp += strlen(bp);
12911         for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12912                 if (cnt % 8 == 0) {
12913                         (void) sprintf(bp++, "\n");
12914                 }
12915                 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12916                 bp += 9;
12917         }
12918 
12919         (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12920         bp += strlen(bp);
12921         for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12922                 if (cnt % 8 == 0) {
12923                         (void) sprintf(bp++, "\n");
12924                 }
12925                 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12926                 bp += 9;
12927         }
12928 
12929         (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12930         bp += strlen(bp);
12931         for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12932                 if (cnt % 8 == 0) {
12933                         (void) sprintf(bp++, "\n");
12934                 }
12935                 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12936                 bp += 9;
12937         }
12938 
12939         (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12940         bp += strlen(bp);
12941         for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12942                 if (cnt % 8 == 0) {
12943                         (void) sprintf(bp++, "\n");
12944                 }
12945                 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12946                 bp += 9;
12947         }
12948 
12949         (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12950         bp += strlen(bp);
12951         for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12952                 if (cnt % 8 == 0) {
12953                         (void) sprintf(bp++, "\n");
12954                 }
12955                 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12956                 bp += 9;
12957         }
12958 
12959         (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12960         bp += strlen(bp);
12961         for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12962                 if (cnt % 8 == 0) {
12963                         (void) sprintf(bp++, "\n");
12964                 }
12965                 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12966                 bp += 9;
12967         }
12968 
12969         (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12970         bp += strlen(bp);
12971         for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12972                 if (cnt % 8 == 0) {
12973                         (void) sprintf(bp++, "\n");
12974                 }
12975                 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12976                 bp += 9;
12977         }
12978 
12979         (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12980         bp += strlen(bp);
12981         for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12982                 if (cnt % 8 == 0) {
12983                         (void) sprintf(bp++, "\n");
12984                 }
12985                 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12986                 bp += 9;
12987         }
12988 
12989         (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12990         bp += strlen(bp);
12991         for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12992                 if (cnt % 8 == 0) {
12993                         (void) sprintf(bp++, "\n");
12994                 }
12995                 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12996                 bp += 9;
12997         }
12998 
12999         (void) sprintf(bp, "\n\nRISC GP Registers");
13000         bp += strlen(bp);
13001         for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13002                 if (cnt % 8 == 0) {
13003                         (void) sprintf(bp++, "\n");
13004                 }
13005                 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13006                 bp += 9;
13007         }
13008 
13009         (void) sprintf(bp, "\n\nLMC Registers");
13010         bp += strlen(bp);
13011         for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13012                 if (cnt % 8 == 0) {
13013                         (void) sprintf(bp++, "\n");
13014                 }
13015                 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13016                 bp += 9;
13017         }
13018 
13019         (void) sprintf(bp, "\n\nFPM Hardware Registers");
13020         bp += strlen(bp);
13021         cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13022             (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13023             (uint32_t)(sizeof (fw->fpm_hdw_reg));
13024         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13025                 if (cnt % 8 == 0) {
13026                         (void) sprintf(bp++, "\n");
13027                 }
13028                 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13029                 bp += 9;
13030         }
13031 
13032         (void) sprintf(bp, "\n\nFB Hardware Registers");
13033         bp += strlen(bp);
13034         cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13035             (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13036             (uint32_t)(sizeof (fw->fb_hdw_reg));
13037         for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13038                 if (cnt % 8 == 0) {
13039                         (void) sprintf(bp++, "\n");
13040                 }
13041                 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13042                 bp += 9;
13043         }
13044 
13045         (void) sprintf(bp, "\n\nCode RAM");
13046         bp += strlen(bp);
13047         for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13048                 if (cnt % 8 == 0) {
13049                         (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13050                         bp += 11;
13051                 }
13052                 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13053                 bp += 9;
13054         }
13055 
13056         (void) sprintf(bp, "\n\nExternal Memory");
13057         bp += strlen(bp);
13058         for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13059                 if (cnt % 8 == 0) {
13060                         (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13061                         bp += 11;
13062                 }
13063                 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13064                 bp += 9;
13065         }
13066 
13067         (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13068         bp += strlen(bp);
13069 
13070         (void) sprintf(bp, "\n\nRequest Queue");
13071         bp += strlen(bp);
13072         for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13073                 if (cnt % 8 == 0) {
13074                         (void) sprintf(bp, "\n%08x: ", cnt);
13075                         bp += strlen(bp);
13076                 }
13077                 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13078                 bp += strlen(bp);
13079         }
13080 
13081         (void) sprintf(bp, "\n\nResponse Queue");
13082         bp += strlen(bp);
13083         for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13084                 if (cnt % 8 == 0) {
13085                         (void) sprintf(bp, "\n%08x: ", cnt);
13086                         bp += strlen(bp);
13087                 }
13088                 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13089                 bp += strlen(bp);
13090         }
13091 
13092         if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13093             (ha->fwexttracebuf.bp != NULL)) {
13094                 uint32_t cnt_b = 0;
13095                 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13096 
13097                 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13098                 bp += strlen(bp);
13099                 /* show data address as a byte address, data as long words */
13100                 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13101                         cnt_b = cnt * 4;
13102                         if (cnt_b % 32 == 0) {
13103                                 (void) sprintf(bp, "\n%08x: ",
13104                                     (int)(w64 + cnt_b));
13105                                 bp += 11;
13106                         }
13107                         (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13108                         bp += 9;
13109                 }
13110         }
13111 
13112         if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13113             (ha->fwfcetracebuf.bp != NULL)) {
13114                 uint32_t cnt_b = 0;
13115                 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13116 
13117                 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13118                 bp += strlen(bp);
13119                 /* show data address as a byte address, data as long words */
13120                 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13121                         cnt_b = cnt * 4;
13122                         if (cnt_b % 32 == 0) {
13123                                 (void) sprintf(bp, "\n%08x: ",
13124                                     (int)(w64 + cnt_b));
13125                                 bp += 11;
13126                         }
13127                         (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13128                         bp += 9;
13129                 }
13130         }
13131 
13132         (void) sprintf(bp, "\n\n");
13133         bp += strlen(bp);
13134 
13135         cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13136 
13137         QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13138 
13139         return (cnt);
13140 }
13141 
13142 /*
13143  * ql_2200_binary_fw_dump
13144  *
13145  * Input:
13146  *      ha:     adapter state pointer.
13147  *      fw:     firmware dump context pointer.
13148  *
13149  * Returns:
13150  *      ql local function return status code.
13151  *
13152  * Context:
13153  *      Interrupt or Kernel context, no mailbox commands allowed.
13154  */
13155 static int
13156 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13157 {
13158         uint32_t        cnt;
13159         uint16_t        risc_address;
13160         clock_t         timer;
13161         mbx_cmd_t       mc;
13162         mbx_cmd_t       *mcp = &mc;
13163         int             rval = QL_SUCCESS;
13164 
13165         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13166 
13167         /* Disable ISP interrupts. */
13168         WRT16_IO_REG(ha, ictrl, 0);
13169         ADAPTER_STATE_LOCK(ha);
13170         ha->flags &= ~INTERRUPTS_ENABLED;
13171         ADAPTER_STATE_UNLOCK(ha);
13172 
13173         /* Release mailbox registers. */
13174         WRT16_IO_REG(ha, semaphore, 0);
13175 
13176         /* Pause RISC. */
13177         WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13178         timer = 30000;
13179         while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13180                 if (timer-- != 0) {
13181                         drv_usecwait(MILLISEC);
13182                 } else {
13183                         rval = QL_FUNCTION_TIMEOUT;
13184                         break;
13185                 }
13186         }
13187 
13188         if (rval == QL_SUCCESS) {
13189                 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13190                     sizeof (fw->pbiu_reg) / 2, 16);
13191 
13192                 /* In 2200 we only read 8 mailboxes */
13193                 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13194                     8, 16);
13195 
13196                 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13197                     sizeof (fw->dma_reg) / 2, 16);
13198 
13199                 WRT16_IO_REG(ha, ctrl_status, 0);
13200                 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13201                     sizeof (fw->risc_hdw_reg) / 2, 16);
13202 
13203                 WRT16_IO_REG(ha, pcr, 0x2000);
13204                 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13205                     sizeof (fw->risc_gp0_reg) / 2, 16);
13206 
13207                 WRT16_IO_REG(ha, pcr, 0x2100);
13208                 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13209                     sizeof (fw->risc_gp1_reg) / 2, 16);
13210 
13211                 WRT16_IO_REG(ha, pcr, 0x2200);
13212                 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13213                     sizeof (fw->risc_gp2_reg) / 2, 16);
13214 
13215                 WRT16_IO_REG(ha, pcr, 0x2300);
13216                 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13217                     sizeof (fw->risc_gp3_reg) / 2, 16);
13218 
13219                 WRT16_IO_REG(ha, pcr, 0x2400);
13220                 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13221                     sizeof (fw->risc_gp4_reg) / 2, 16);
13222 
13223                 WRT16_IO_REG(ha, pcr, 0x2500);
13224                 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13225                     sizeof (fw->risc_gp5_reg) / 2, 16);
13226 
13227                 WRT16_IO_REG(ha, pcr, 0x2600);
13228                 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13229                     sizeof (fw->risc_gp6_reg) / 2, 16);
13230 
13231                 WRT16_IO_REG(ha, pcr, 0x2700);
13232                 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13233                     sizeof (fw->risc_gp7_reg) / 2, 16);
13234 
13235                 WRT16_IO_REG(ha, ctrl_status, 0x10);
13236                 /* 2200 has only 16 registers */
13237                 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13238                     ha->iobase + 0x80, 16, 16);
13239 
13240                 WRT16_IO_REG(ha, ctrl_status, 0x20);
13241                 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13242                     sizeof (fw->fpm_b0_reg) / 2, 16);
13243 
13244                 WRT16_IO_REG(ha, ctrl_status, 0x30);
13245                 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13246                     sizeof (fw->fpm_b1_reg) / 2, 16);
13247 
13248                 /* Select FPM registers. */
13249                 WRT16_IO_REG(ha, ctrl_status, 0x20);
13250 
13251                 /* FPM Soft Reset. */
13252                 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13253 
13254                 /* Select frame buffer registers. */
13255                 WRT16_IO_REG(ha, ctrl_status, 0x10);
13256 
13257                 /* Reset frame buffer FIFOs. */
13258                 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13259 
13260                 /* Select RISC module registers. */
13261                 WRT16_IO_REG(ha, ctrl_status, 0);
13262 
13263                 /* Reset RISC module. */
13264                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13265 
13266                 /* Reset ISP semaphore. */
13267                 WRT16_IO_REG(ha, semaphore, 0);
13268 
13269                 /* Release RISC module. */
13270                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13271 
13272                 /* Wait for RISC to recover from reset. */
13273                 timer = 30000;
13274                 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13275                         if (timer-- != 0) {
13276                                 drv_usecwait(MILLISEC);
13277                         } else {
13278                                 rval = QL_FUNCTION_TIMEOUT;
13279                                 break;
13280                         }
13281                 }
13282 
13283                 /* Disable RISC pause on FPM parity error. */
13284                 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13285         }
13286 
13287         if (rval == QL_SUCCESS) {
13288                 /* Pause RISC. */
13289                 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13290                 timer = 30000;
13291                 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13292                         if (timer-- != 0) {
13293                                 drv_usecwait(MILLISEC);
13294                         } else {
13295                                 rval = QL_FUNCTION_TIMEOUT;
13296                                 break;
13297                         }
13298                 }
13299         }
13300 
13301         if (rval == QL_SUCCESS) {
13302                 /* Set memory configuration and timing. */
13303                 WRT16_IO_REG(ha, mctr, 0xf2);
13304 
13305                 /* Release RISC. */
13306                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13307 
13308                 /* Get RISC SRAM. */
13309                 risc_address = 0x1000;
13310                 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13311                 for (cnt = 0; cnt < 0xf000; cnt++) {
13312                         WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13313                         WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13314                         for (timer = 6000000; timer != 0; timer--) {
13315                                 /* Check for pending interrupts. */
13316                                 if (INTERRUPT_PENDING(ha)) {
13317                                         if (RD16_IO_REG(ha, semaphore) &
13318                                             BIT_0) {
13319                                                 WRT16_IO_REG(ha, hccr,
13320                                                     HC_CLR_RISC_INT);
13321                                                 mcp->mb[0] = RD16_IO_REG(ha,
13322                                                     mailbox_out[0]);
13323                                                 fw->risc_ram[cnt] =
13324                                                     RD16_IO_REG(ha,
13325                                                     mailbox_out[2]);
13326                                                 WRT16_IO_REG(ha,
13327                                                     semaphore, 0);
13328                                                 break;
13329                                         }
13330                                         WRT16_IO_REG(ha, hccr,
13331                                             HC_CLR_RISC_INT);
13332                                 }
13333                                 drv_usecwait(5);
13334                         }
13335 
13336                         if (timer == 0) {
13337                                 rval = QL_FUNCTION_TIMEOUT;
13338                         } else {
13339                                 rval = mcp->mb[0];
13340                         }
13341 
13342                         if (rval != QL_SUCCESS) {
13343                                 break;
13344                         }
13345                 }
13346         }
13347 
13348         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13349 
13350         return (rval);
13351 }
13352 
13353 /*
13354  * ql_2300_binary_fw_dump
13355  *
13356  * Input:
13357  *      ha:     adapter state pointer.
13358  *      fw:     firmware dump context pointer.
13359  *
13360  * Returns:
13361  *      ql local function return status code.
13362  *
13363  * Context:
13364  *      Interrupt or Kernel context, no mailbox commands allowed.
13365  */
13366 static int
13367 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13368 {
13369         clock_t timer;
13370         int     rval = QL_SUCCESS;
13371 
13372         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13373 
13374         /* Disable ISP interrupts. */
13375         WRT16_IO_REG(ha, ictrl, 0);
13376         ADAPTER_STATE_LOCK(ha);
13377         ha->flags &= ~INTERRUPTS_ENABLED;
13378         ADAPTER_STATE_UNLOCK(ha);
13379 
13380         /* Release mailbox registers. */
13381         WRT16_IO_REG(ha, semaphore, 0);
13382 
13383         /* Pause RISC. */
13384         WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13385         timer = 30000;
13386         while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13387                 if (timer-- != 0) {
13388                         drv_usecwait(MILLISEC);
13389                 } else {
13390                         rval = QL_FUNCTION_TIMEOUT;
13391                         break;
13392                 }
13393         }
13394 
13395         if (rval == QL_SUCCESS) {
13396                 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13397                     sizeof (fw->pbiu_reg) / 2, 16);
13398 
13399                 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13400                     sizeof (fw->risc_host_reg) / 2, 16);
13401 
13402                 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13403                     sizeof (fw->mailbox_reg) / 2, 16);
13404 
13405                 WRT16_IO_REG(ha, ctrl_status, 0x40);
13406                 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13407                     sizeof (fw->resp_dma_reg) / 2, 16);
13408 
13409                 WRT16_IO_REG(ha, ctrl_status, 0x50);
13410                 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13411                     sizeof (fw->dma_reg) / 2, 16);
13412 
13413                 WRT16_IO_REG(ha, ctrl_status, 0);
13414                 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13415                     sizeof (fw->risc_hdw_reg) / 2, 16);
13416 
13417                 WRT16_IO_REG(ha, pcr, 0x2000);
13418                 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13419                     sizeof (fw->risc_gp0_reg) / 2, 16);
13420 
13421                 WRT16_IO_REG(ha, pcr, 0x2200);
13422                 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13423                     sizeof (fw->risc_gp1_reg) / 2, 16);
13424 
13425                 WRT16_IO_REG(ha, pcr, 0x2400);
13426                 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13427                     sizeof (fw->risc_gp2_reg) / 2, 16);
13428 
13429                 WRT16_IO_REG(ha, pcr, 0x2600);
13430                 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13431                     sizeof (fw->risc_gp3_reg) / 2, 16);
13432 
13433                 WRT16_IO_REG(ha, pcr, 0x2800);
13434                 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13435                     sizeof (fw->risc_gp4_reg) / 2, 16);
13436 
13437                 WRT16_IO_REG(ha, pcr, 0x2A00);
13438                 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13439                     sizeof (fw->risc_gp5_reg) / 2, 16);
13440 
13441                 WRT16_IO_REG(ha, pcr, 0x2C00);
13442                 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13443                     sizeof (fw->risc_gp6_reg) / 2, 16);
13444 
13445                 WRT16_IO_REG(ha, pcr, 0x2E00);
13446                 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13447                     sizeof (fw->risc_gp7_reg) / 2, 16);
13448 
13449                 WRT16_IO_REG(ha, ctrl_status, 0x10);
13450                 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13451                     ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13452 
13453                 WRT16_IO_REG(ha, ctrl_status, 0x20);
13454                 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13455                     sizeof (fw->fpm_b0_reg) / 2, 16);
13456 
13457                 WRT16_IO_REG(ha, ctrl_status, 0x30);
13458                 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13459                     sizeof (fw->fpm_b1_reg) / 2, 16);
13460 
13461                 /* Select FPM registers. */
13462                 WRT16_IO_REG(ha, ctrl_status, 0x20);
13463 
13464                 /* FPM Soft Reset. */
13465                 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13466 
13467                 /* Select frame buffer registers. */
13468                 WRT16_IO_REG(ha, ctrl_status, 0x10);
13469 
13470                 /* Reset frame buffer FIFOs. */
13471                 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13472 
13473                 /* Select RISC module registers. */
13474                 WRT16_IO_REG(ha, ctrl_status, 0);
13475 
13476                 /* Reset RISC module. */
13477                 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13478 
13479                 /* Reset ISP semaphore. */
13480                 WRT16_IO_REG(ha, semaphore, 0);
13481 
13482                 /* Release RISC module. */
13483                 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13484 
13485                 /* Wait for RISC to recover from reset. */
13486                 timer = 30000;
13487                 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13488                         if (timer-- != 0) {
13489                                 drv_usecwait(MILLISEC);
13490                         } else {
13491                                 rval = QL_FUNCTION_TIMEOUT;
13492                                 break;
13493                         }
13494                 }
13495 
13496                 /* Disable RISC pause on FPM parity error. */
13497                 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13498         }
13499 
13500         /* Get RISC SRAM. */
13501         if (rval == QL_SUCCESS) {
13502                 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13503         }
13504         /* Get STACK SRAM. */
13505         if (rval == QL_SUCCESS) {
13506                 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13507         }
13508         /* Get DATA SRAM. */
13509         if (rval == QL_SUCCESS) {
13510                 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13511         }
13512 
13513         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13514 
13515         return (rval);
13516 }
13517 
13518 /*
13519  * ql_24xx_binary_fw_dump
13520  *
13521  * Input:
13522  *      ha:     adapter state pointer.
13523  *      fw:     firmware dump context pointer.
13524  *
13525  * Returns:
13526  *      ql local function return status code.
13527  *
13528  * Context:
13529  *      Interrupt or Kernel context, no mailbox commands allowed.
13530  */
13531 static int
13532 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13533 {
13534         uint32_t        *reg32;
13535         void            *bp;
13536         clock_t         timer;
13537         int             rval = QL_SUCCESS;
13538 
13539         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13540 
13541         fw->hccr = RD32_IO_REG(ha, hccr);
13542 
13543         /* Pause RISC. */
13544         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13545                 /* Disable ISP interrupts. */
13546                 WRT16_IO_REG(ha, ictrl, 0);
13547 
13548                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13549                 for (timer = 30000;
13550                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13551                     rval == QL_SUCCESS; timer--) {
13552                         if (timer) {
13553                                 drv_usecwait(100);
13554                         } else {
13555                                 rval = QL_FUNCTION_TIMEOUT;
13556                         }
13557                 }
13558         }
13559 
13560         if (rval == QL_SUCCESS) {
13561                 /* Host interface registers. */
13562                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13563                     sizeof (fw->host_reg) / 4, 32);
13564 
13565                 /* Disable ISP interrupts. */
13566                 WRT32_IO_REG(ha, ictrl, 0);
13567                 RD32_IO_REG(ha, ictrl);
13568                 ADAPTER_STATE_LOCK(ha);
13569                 ha->flags &= ~INTERRUPTS_ENABLED;
13570                 ADAPTER_STATE_UNLOCK(ha);
13571 
13572                 /* Shadow registers. */
13573 
13574                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13575                 RD32_IO_REG(ha, io_base_addr);
13576 
13577                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13578                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13579                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13580                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13581 
13582                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13583                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13584                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13585                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13586 
13587                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13588                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13589                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13590                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13591 
13592                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13593                 WRT_REG_DWORD(ha, reg32, 0xB0300000);
13594                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13595                 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13596 
13597                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13598                 WRT_REG_DWORD(ha, reg32, 0xB0400000);
13599                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13600                 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13601 
13602                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13603                 WRT_REG_DWORD(ha, reg32, 0xB0500000);
13604                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13605                 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13606 
13607                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13608                 WRT_REG_DWORD(ha, reg32, 0xB0600000);
13609                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13610                 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13611 
13612                 /* Mailbox registers. */
13613                 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13614                     sizeof (fw->mailbox_reg) / 2, 16);
13615 
13616                 /* Transfer sequence registers. */
13617 
13618                 /* XSEQ GP */
13619                 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13620                 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13621                     16, 32);
13622                 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13623                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13624                 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13625                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13626                 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13627                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13628                 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13629                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13630                 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13631                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13632                 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13633                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13634                 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13635                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13636 
13637                 /* XSEQ-0 */
13638                 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13639                 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13640                     sizeof (fw->xseq_0_reg) / 4, 32);
13641 
13642                 /* XSEQ-1 */
13643                 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13644                 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13645                     sizeof (fw->xseq_1_reg) / 4, 32);
13646 
13647                 /* Receive sequence registers. */
13648 
13649                 /* RSEQ GP */
13650                 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13651                 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13652                     16, 32);
13653                 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13654                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13655                 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13656                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13657                 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13658                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13659                 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13660                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13661                 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13662                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13663                 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13664                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13665                 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13666                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13667 
13668                 /* RSEQ-0 */
13669                 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13670                 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13671                     sizeof (fw->rseq_0_reg) / 4, 32);
13672 
13673                 /* RSEQ-1 */
13674                 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13675                 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13676                     sizeof (fw->rseq_1_reg) / 4, 32);
13677 
13678                 /* RSEQ-2 */
13679                 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13680                 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13681                     sizeof (fw->rseq_2_reg) / 4, 32);
13682 
13683                 /* Command DMA registers. */
13684 
13685                 WRT32_IO_REG(ha, io_base_addr, 0x7100);
13686                 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13687                     sizeof (fw->cmd_dma_reg) / 4, 32);
13688 
13689                 /* Queues. */
13690 
13691                 /* RequestQ0 */
13692                 WRT32_IO_REG(ha, io_base_addr, 0x7200);
13693                 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13694                     8, 32);
13695                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13696 
13697                 /* ResponseQ0 */
13698                 WRT32_IO_REG(ha, io_base_addr, 0x7300);
13699                 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13700                     8, 32);
13701                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13702 
13703                 /* RequestQ1 */
13704                 WRT32_IO_REG(ha, io_base_addr, 0x7400);
13705                 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13706                     8, 32);
13707                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13708 
13709                 /* Transmit DMA registers. */
13710 
13711                 /* XMT0 */
13712                 WRT32_IO_REG(ha, io_base_addr, 0x7600);
13713                 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13714                     16, 32);
13715                 WRT32_IO_REG(ha, io_base_addr, 0x7610);
13716                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13717 
13718                 /* XMT1 */
13719                 WRT32_IO_REG(ha, io_base_addr, 0x7620);
13720                 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13721                     16, 32);
13722                 WRT32_IO_REG(ha, io_base_addr, 0x7630);
13723                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13724 
13725                 /* XMT2 */
13726                 WRT32_IO_REG(ha, io_base_addr, 0x7640);
13727                 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13728                     16, 32);
13729                 WRT32_IO_REG(ha, io_base_addr, 0x7650);
13730                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13731 
13732                 /* XMT3 */
13733                 WRT32_IO_REG(ha, io_base_addr, 0x7660);
13734                 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13735                     16, 32);
13736                 WRT32_IO_REG(ha, io_base_addr, 0x7670);
13737                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13738 
13739                 /* XMT4 */
13740                 WRT32_IO_REG(ha, io_base_addr, 0x7680);
13741                 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13742                     16, 32);
13743                 WRT32_IO_REG(ha, io_base_addr, 0x7690);
13744                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13745 
13746                 /* XMT Common */
13747                 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13748                 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13749                     ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13750 
13751                 /* Receive DMA registers. */
13752 
13753                 /* RCVThread0 */
13754                 WRT32_IO_REG(ha, io_base_addr, 0x7700);
13755                 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13756                     ha->iobase + 0xC0, 16, 32);
13757                 WRT32_IO_REG(ha, io_base_addr, 0x7710);
13758                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13759 
13760                 /* RCVThread1 */
13761                 WRT32_IO_REG(ha, io_base_addr, 0x7720);
13762                 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13763                     ha->iobase + 0xC0, 16, 32);
13764                 WRT32_IO_REG(ha, io_base_addr, 0x7730);
13765                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13766 
13767                 /* RISC registers. */
13768 
13769                 /* RISC GP */
13770                 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13771                 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13772                     16, 32);
13773                 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13774                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13775                 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13776                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13777                 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13778                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13779                 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13780                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13781                 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13782                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13783                 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13784                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13785                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13786                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13787 
13788                 /* Local memory controller registers. */
13789 
13790                 /* LMC */
13791                 WRT32_IO_REG(ha, io_base_addr, 0x3000);
13792                 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13793                     16, 32);
13794                 WRT32_IO_REG(ha, io_base_addr, 0x3010);
13795                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13796                 WRT32_IO_REG(ha, io_base_addr, 0x3020);
13797                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13798                 WRT32_IO_REG(ha, io_base_addr, 0x3030);
13799                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13800                 WRT32_IO_REG(ha, io_base_addr, 0x3040);
13801                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13802                 WRT32_IO_REG(ha, io_base_addr, 0x3050);
13803                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13804                 WRT32_IO_REG(ha, io_base_addr, 0x3060);
13805                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13806 
13807                 /* Fibre Protocol Module registers. */
13808 
13809                 /* FPM hardware */
13810                 WRT32_IO_REG(ha, io_base_addr, 0x4000);
13811                 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13812                     16, 32);
13813                 WRT32_IO_REG(ha, io_base_addr, 0x4010);
13814                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13815                 WRT32_IO_REG(ha, io_base_addr, 0x4020);
13816                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13817                 WRT32_IO_REG(ha, io_base_addr, 0x4030);
13818                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13819                 WRT32_IO_REG(ha, io_base_addr, 0x4040);
13820                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13821                 WRT32_IO_REG(ha, io_base_addr, 0x4050);
13822                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13823                 WRT32_IO_REG(ha, io_base_addr, 0x4060);
13824                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13825                 WRT32_IO_REG(ha, io_base_addr, 0x4070);
13826                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13827                 WRT32_IO_REG(ha, io_base_addr, 0x4080);
13828                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13829                 WRT32_IO_REG(ha, io_base_addr, 0x4090);
13830                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13831                 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13832                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13833                 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13834                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13835 
13836                 /* Frame Buffer registers. */
13837 
13838                 /* FB hardware */
13839                 WRT32_IO_REG(ha, io_base_addr, 0x6000);
13840                 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13841                     16, 32);
13842                 WRT32_IO_REG(ha, io_base_addr, 0x6010);
13843                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13844                 WRT32_IO_REG(ha, io_base_addr, 0x6020);
13845                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13846                 WRT32_IO_REG(ha, io_base_addr, 0x6030);
13847                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13848                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13849                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13850                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13851                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13852                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13853                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13854                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13855                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13856                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
13857                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13858                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13859                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13860                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13861                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13862         }
13863 
13864         /* Get the request queue */
13865         if (rval == QL_SUCCESS) {
13866                 uint32_t        cnt;
13867                 uint32_t        *w32 = (uint32_t *)ha->request_ring_bp;
13868 
13869                 /* Sync DMA buffer. */
13870                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13871                     REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13872                     DDI_DMA_SYNC_FORKERNEL);
13873 
13874                 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13875                         fw->req_q[cnt] = *w32++;
13876                         LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13877                 }
13878         }
13879 
13880         /* Get the response queue */
13881         if (rval == QL_SUCCESS) {
13882                 uint32_t        cnt;
13883                 uint32_t        *w32 = (uint32_t *)ha->response_ring_bp;
13884 
13885                 /* Sync DMA buffer. */
13886                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13887                     RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13888                     DDI_DMA_SYNC_FORKERNEL);
13889 
13890                 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13891                         fw->rsp_q[cnt] = *w32++;
13892                         LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13893                 }
13894         }
13895 
13896         /* Reset RISC. */
13897         ql_reset_chip(ha);
13898 
13899         /* Memory. */
13900         if (rval == QL_SUCCESS) {
13901                 /* Code RAM. */
13902                 rval = ql_read_risc_ram(ha, 0x20000,
13903                     sizeof (fw->code_ram) / 4, fw->code_ram);
13904         }
13905         if (rval == QL_SUCCESS) {
13906                 /* External Memory. */
13907                 rval = ql_read_risc_ram(ha, 0x100000,
13908                     ha->fw_ext_memory_size / 4, fw->ext_mem);
13909         }
13910 
13911         /* Get the extended trace buffer */
13912         if (rval == QL_SUCCESS) {
13913                 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13914                     (ha->fwexttracebuf.bp != NULL)) {
13915                         uint32_t        cnt;
13916                         uint32_t        *w32 = ha->fwexttracebuf.bp;
13917 
13918                         /* Sync DMA buffer. */
13919                         (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13920                             FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13921 
13922                         for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13923                                 fw->ext_trace_buf[cnt] = *w32++;
13924                         }
13925                 }
13926         }
13927 
13928         /* Get the FC event trace buffer */
13929         if (rval == QL_SUCCESS) {
13930                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13931                     (ha->fwfcetracebuf.bp != NULL)) {
13932                         uint32_t        cnt;
13933                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
13934 
13935                         /* Sync DMA buffer. */
13936                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13937                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13938 
13939                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13940                                 fw->fce_trace_buf[cnt] = *w32++;
13941                         }
13942                 }
13943         }
13944 
13945         if (rval != QL_SUCCESS) {
13946                 EL(ha, "failed=%xh\n", rval);
13947         } else {
13948                 /*EMPTY*/
13949                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13950         }
13951 
13952         return (rval);
13953 }
13954 
13955 /*
13956  * ql_25xx_binary_fw_dump
13957  *
13958  * Input:
13959  *      ha:     adapter state pointer.
13960  *      fw:     firmware dump context pointer.
13961  *
13962  * Returns:
13963  *      ql local function return status code.
13964  *
13965  * Context:
13966  *      Interrupt or Kernel context, no mailbox commands allowed.
13967  */
13968 static int
13969 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13970 {
13971         uint32_t        *reg32;
13972         void            *bp;
13973         clock_t         timer;
13974         int             rval = QL_SUCCESS;
13975 
13976         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13977 
13978         fw->r2h_status = RD32_IO_REG(ha, risc2host);
13979 
13980         /* Pause RISC. */
13981         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13982                 /* Disable ISP interrupts. */
13983                 WRT16_IO_REG(ha, ictrl, 0);
13984 
13985                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13986                 for (timer = 30000;
13987                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13988                     rval == QL_SUCCESS; timer--) {
13989                         if (timer) {
13990                                 drv_usecwait(100);
13991                                 if (timer % 10000 == 0) {
13992                                         EL(ha, "risc pause %d\n", timer);
13993                                 }
13994                         } else {
13995                                 EL(ha, "risc pause timeout\n");
13996                                 rval = QL_FUNCTION_TIMEOUT;
13997                         }
13998                 }
13999         }
14000 
14001         if (rval == QL_SUCCESS) {
14002 
14003                 /* Host Interface registers */
14004 
14005                 /* HostRisc registers. */
14006                 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14007                 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14008                     16, 32);
14009                 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14010                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14011 
14012                 /* PCIe registers. */
14013                 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14014                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14015                 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14016                     3, 32);
14017                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14018                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14019 
14020                 /* Host interface registers. */
14021                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14022                     sizeof (fw->host_reg) / 4, 32);
14023 
14024                 /* Disable ISP interrupts. */
14025 
14026                 WRT32_IO_REG(ha, ictrl, 0);
14027                 RD32_IO_REG(ha, ictrl);
14028                 ADAPTER_STATE_LOCK(ha);
14029                 ha->flags &= ~INTERRUPTS_ENABLED;
14030                 ADAPTER_STATE_UNLOCK(ha);
14031 
14032                 /* Shadow registers. */
14033 
14034                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14035                 RD32_IO_REG(ha, io_base_addr);
14036 
14037                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14038                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14039                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14040                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14041 
14042                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14043                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14044                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14045                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14046 
14047                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14048                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14049                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14050                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14051 
14052                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14053                 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14054                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14055                 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14056 
14057                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14058                 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14059                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14060                 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14061 
14062                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14063                 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14064                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14065                 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14066 
14067                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14068                 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14069                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14070                 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14071 
14072                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14073                 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14074                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14075                 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14076 
14077                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14078                 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14079                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14080                 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14081 
14082                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14083                 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14084                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14085                 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14086 
14087                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14088                 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14089                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14090                 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14091 
14092                 /* RISC I/O register. */
14093 
14094                 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14095                 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14096                     1, 32);
14097 
14098                 /* Mailbox registers. */
14099 
14100                 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14101                     sizeof (fw->mailbox_reg) / 2, 16);
14102 
14103                 /* Transfer sequence registers. */
14104 
14105                 /* XSEQ GP */
14106                 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14107                 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14108                     16, 32);
14109                 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14110                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14111                 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14112                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14113                 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14114                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14115                 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14116                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117                 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14118                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119                 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14120                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14121                 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14122                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14123 
14124                 /* XSEQ-0 */
14125                 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14126                 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14127                     16, 32);
14128                 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14129                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14130                 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14131                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14132 
14133                 /* XSEQ-1 */
14134                 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14135                 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14136                     16, 32);
14137 
14138                 /* Receive sequence registers. */
14139 
14140                 /* RSEQ GP */
14141                 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14142                 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14143                     16, 32);
14144                 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14145                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14146                 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14147                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14148                 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14149                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14150                 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14151                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14152                 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14153                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14154                 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14155                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14156                 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14157                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14158 
14159                 /* RSEQ-0 */
14160                 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14161                 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14162                     16, 32);
14163                 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14164                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14165 
14166                 /* RSEQ-1 */
14167                 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14168                 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14169                     sizeof (fw->rseq_1_reg) / 4, 32);
14170 
14171                 /* RSEQ-2 */
14172                 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14173                 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14174                     sizeof (fw->rseq_2_reg) / 4, 32);
14175 
14176                 /* Auxiliary sequencer registers. */
14177 
14178                 /* ASEQ GP */
14179                 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14180                 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14181                     16, 32);
14182                 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14183                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14184                 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14185                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14186                 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14187                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14188                 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14189                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14190                 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14191                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14192                 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14193                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14194                 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14195                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14196 
14197                 /* ASEQ-0 */
14198                 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14199                 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14200                     16, 32);
14201                 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14202                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14203 
14204                 /* ASEQ-1 */
14205                 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14206                 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14207                     16, 32);
14208 
14209                 /* ASEQ-2 */
14210                 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14211                 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14212                     16, 32);
14213 
14214                 /* Command DMA registers. */
14215 
14216                 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14217                 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14218                     sizeof (fw->cmd_dma_reg) / 4, 32);
14219 
14220                 /* Queues. */
14221 
14222                 /* RequestQ0 */
14223                 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14224                 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14225                     8, 32);
14226                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14227 
14228                 /* ResponseQ0 */
14229                 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14230                 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14231                     8, 32);
14232                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14233 
14234                 /* RequestQ1 */
14235                 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14236                 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14237                     8, 32);
14238                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14239 
14240                 /* Transmit DMA registers. */
14241 
14242                 /* XMT0 */
14243                 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14244                 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14245                     16, 32);
14246                 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14247                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14248 
14249                 /* XMT1 */
14250                 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14251                 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14252                     16, 32);
14253                 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14254                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14255 
14256                 /* XMT2 */
14257                 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14258                 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14259                     16, 32);
14260                 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14261                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14262 
14263                 /* XMT3 */
14264                 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14265                 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14266                     16, 32);
14267                 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14268                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14269 
14270                 /* XMT4 */
14271                 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14272                 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14273                     16, 32);
14274                 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14275                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14276 
14277                 /* XMT Common */
14278                 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14279                 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14280                     ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14281 
14282                 /* Receive DMA registers. */
14283 
14284                 /* RCVThread0 */
14285                 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14286                 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14287                     ha->iobase + 0xC0, 16, 32);
14288                 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14289                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14290 
14291                 /* RCVThread1 */
14292                 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14293                 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14294                     ha->iobase + 0xC0, 16, 32);
14295                 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14296                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14297 
14298                 /* RISC registers. */
14299 
14300                 /* RISC GP */
14301                 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14302                 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14303                     16, 32);
14304                 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14305                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14306                 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14307                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14308                 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14309                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14310                 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14311                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14312                 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14313                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14314                 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14315                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14316                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14317                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14318 
14319                 /* Local memory controller (LMC) registers. */
14320 
14321                 /* LMC */
14322                 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14323                 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14324                     16, 32);
14325                 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14326                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14327                 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14328                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14329                 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14330                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14331                 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14332                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14333                 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14334                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14335                 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14336                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14337                 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14338                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14339 
14340                 /* Fibre Protocol Module registers. */
14341 
14342                 /* FPM hardware */
14343                 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14344                 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14345                     16, 32);
14346                 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14347                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14348                 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14349                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14350                 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14351                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14352                 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14353                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14354                 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14355                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14356                 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14357                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14358                 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14359                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14360                 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14361                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14362                 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14363                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14364                 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14365                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14366                 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14367                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14368 
14369                 /* Frame Buffer registers. */
14370 
14371                 /* FB hardware */
14372                 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14373                 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14374                     16, 32);
14375                 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14376                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14377                 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14378                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14379                 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14380                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14381                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14382                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14383                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14384                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14385                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14386                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14387                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14388                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14389                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14390                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14391                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14392                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14393                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14394                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14395                 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14396                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14397         }
14398 
14399         /* Get the request queue */
14400         if (rval == QL_SUCCESS) {
14401                 uint32_t        cnt;
14402                 uint32_t        *w32 = (uint32_t *)ha->request_ring_bp;
14403 
14404                 /* Sync DMA buffer. */
14405                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14406                     REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14407                     DDI_DMA_SYNC_FORKERNEL);
14408 
14409                 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14410                         fw->req_q[cnt] = *w32++;
14411                         LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14412                 }
14413         }
14414 
14415         /* Get the respons queue */
14416         if (rval == QL_SUCCESS) {
14417                 uint32_t        cnt;
14418                 uint32_t        *w32 = (uint32_t *)ha->response_ring_bp;
14419 
14420                 /* Sync DMA buffer. */
14421                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14422                     RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14423                     DDI_DMA_SYNC_FORKERNEL);
14424 
14425                 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14426                         fw->rsp_q[cnt] = *w32++;
14427                         LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14428                 }
14429         }
14430 
14431         /* Reset RISC. */
14432 
14433         ql_reset_chip(ha);
14434 
14435         /* Memory. */
14436 
14437         if (rval == QL_SUCCESS) {
14438                 /* Code RAM. */
14439                 rval = ql_read_risc_ram(ha, 0x20000,
14440                     sizeof (fw->code_ram) / 4, fw->code_ram);
14441         }
14442         if (rval == QL_SUCCESS) {
14443                 /* External Memory. */
14444                 rval = ql_read_risc_ram(ha, 0x100000,
14445                     ha->fw_ext_memory_size / 4, fw->ext_mem);
14446         }
14447 
14448         /* Get the FC event trace buffer */
14449         if (rval == QL_SUCCESS) {
14450                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14451                     (ha->fwfcetracebuf.bp != NULL)) {
14452                         uint32_t        cnt;
14453                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
14454 
14455                         /* Sync DMA buffer. */
14456                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14457                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14458 
14459                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14460                                 fw->fce_trace_buf[cnt] = *w32++;
14461                         }
14462                 }
14463         }
14464 
14465         /* Get the extended trace buffer */
14466         if (rval == QL_SUCCESS) {
14467                 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14468                     (ha->fwexttracebuf.bp != NULL)) {
14469                         uint32_t        cnt;
14470                         uint32_t        *w32 = ha->fwexttracebuf.bp;
14471 
14472                         /* Sync DMA buffer. */
14473                         (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14474                             FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14475 
14476                         for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14477                                 fw->ext_trace_buf[cnt] = *w32++;
14478                         }
14479                 }
14480         }
14481 
14482         if (rval != QL_SUCCESS) {
14483                 EL(ha, "failed=%xh\n", rval);
14484         } else {
14485                 /*EMPTY*/
14486                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14487         }
14488 
14489         return (rval);
14490 }
14491 
14492 /*
14493  * ql_81xx_binary_fw_dump
14494  *
14495  * Input:
14496  *      ha:     adapter state pointer.
14497  *      fw:     firmware dump context pointer.
14498  *
14499  * Returns:
14500  *      ql local function return status code.
14501  *
14502  * Context:
14503  *      Interrupt or Kernel context, no mailbox commands allowed.
14504  */
14505 static int
14506 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14507 {
14508         uint32_t        *reg32;
14509         void            *bp;
14510         clock_t         timer;
14511         int             rval = QL_SUCCESS;
14512 
14513         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14514 
14515         fw->r2h_status = RD32_IO_REG(ha, risc2host);
14516 
14517         /* Pause RISC. */
14518         if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14519                 /* Disable ISP interrupts. */
14520                 WRT16_IO_REG(ha, ictrl, 0);
14521 
14522                 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14523                 for (timer = 30000;
14524                     (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14525                     rval == QL_SUCCESS; timer--) {
14526                         if (timer) {
14527                                 drv_usecwait(100);
14528                                 if (timer % 10000 == 0) {
14529                                         EL(ha, "risc pause %d\n", timer);
14530                                 }
14531                         } else {
14532                                 EL(ha, "risc pause timeout\n");
14533                                 rval = QL_FUNCTION_TIMEOUT;
14534                         }
14535                 }
14536         }
14537 
14538         if (rval == QL_SUCCESS) {
14539 
14540                 /* Host Interface registers */
14541 
14542                 /* HostRisc registers. */
14543                 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14544                 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14545                     16, 32);
14546                 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14547                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14548 
14549                 /* PCIe registers. */
14550                 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14551                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14552                 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14553                     3, 32);
14554                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14555                 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14556 
14557                 /* Host interface registers. */
14558                 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14559                     sizeof (fw->host_reg) / 4, 32);
14560 
14561                 /* Disable ISP interrupts. */
14562 
14563                 WRT32_IO_REG(ha, ictrl, 0);
14564                 RD32_IO_REG(ha, ictrl);
14565                 ADAPTER_STATE_LOCK(ha);
14566                 ha->flags &= ~INTERRUPTS_ENABLED;
14567                 ADAPTER_STATE_UNLOCK(ha);
14568 
14569                 /* Shadow registers. */
14570 
14571                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14572                 RD32_IO_REG(ha, io_base_addr);
14573 
14574                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14575                 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14576                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14577                 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14578 
14579                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14580                 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14581                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14582                 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14583 
14584                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14585                 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14586                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14587                 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14588 
14589                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14590                 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14591                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14592                 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14593 
14594                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14595                 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14596                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14597                 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14598 
14599                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14600                 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14601                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14602                 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14603 
14604                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14605                 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14606                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14607                 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14608 
14609                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14610                 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14611                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14612                 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14613 
14614                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14615                 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14616                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14617                 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14618 
14619                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14620                 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14621                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14622                 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14623 
14624                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14625                 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14626                 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14627                 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14628 
14629                 /* RISC I/O register. */
14630 
14631                 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14632                 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14633                     1, 32);
14634 
14635                 /* Mailbox registers. */
14636 
14637                 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14638                     sizeof (fw->mailbox_reg) / 2, 16);
14639 
14640                 /* Transfer sequence registers. */
14641 
14642                 /* XSEQ GP */
14643                 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14644                 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14645                     16, 32);
14646                 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14647                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14648                 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14649                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14650                 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14651                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14652                 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14653                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654                 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14655                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14656                 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14657                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14658                 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14659                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14660 
14661                 /* XSEQ-0 */
14662                 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14663                 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14664                     16, 32);
14665                 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14666                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14667                 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14668                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14669 
14670                 /* XSEQ-1 */
14671                 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14672                 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14673                     16, 32);
14674 
14675                 /* Receive sequence registers. */
14676 
14677                 /* RSEQ GP */
14678                 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14679                 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14680                     16, 32);
14681                 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14682                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14683                 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14684                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14685                 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14686                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14687                 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14688                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14689                 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14690                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14691                 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14692                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14693                 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14694                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14695 
14696                 /* RSEQ-0 */
14697                 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14698                 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14699                     16, 32);
14700                 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14701                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14702 
14703                 /* RSEQ-1 */
14704                 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14705                 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14706                     sizeof (fw->rseq_1_reg) / 4, 32);
14707 
14708                 /* RSEQ-2 */
14709                 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14710                 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14711                     sizeof (fw->rseq_2_reg) / 4, 32);
14712 
14713                 /* Auxiliary sequencer registers. */
14714 
14715                 /* ASEQ GP */
14716                 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14717                 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14718                     16, 32);
14719                 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14720                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14721                 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14722                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14723                 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14724                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14725                 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14726                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14727                 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14728                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14729                 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14730                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14731                 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14732                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14733 
14734                 /* ASEQ-0 */
14735                 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14736                 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14737                     16, 32);
14738                 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14739                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14740 
14741                 /* ASEQ-1 */
14742                 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14743                 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14744                     16, 32);
14745 
14746                 /* ASEQ-2 */
14747                 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14748                 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14749                     16, 32);
14750 
14751                 /* Command DMA registers. */
14752 
14753                 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14754                 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14755                     sizeof (fw->cmd_dma_reg) / 4, 32);
14756 
14757                 /* Queues. */
14758 
14759                 /* RequestQ0 */
14760                 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14761                 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14762                     8, 32);
14763                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14764 
14765                 /* ResponseQ0 */
14766                 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14767                 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14768                     8, 32);
14769                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14770 
14771                 /* RequestQ1 */
14772                 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14773                 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14774                     8, 32);
14775                 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14776 
14777                 /* Transmit DMA registers. */
14778 
14779                 /* XMT0 */
14780                 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14781                 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14782                     16, 32);
14783                 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14784                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14785 
14786                 /* XMT1 */
14787                 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14788                 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14789                     16, 32);
14790                 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14791                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14792 
14793                 /* XMT2 */
14794                 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14795                 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14796                     16, 32);
14797                 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14798                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14799 
14800                 /* XMT3 */
14801                 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14802                 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14803                     16, 32);
14804                 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14805                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14806 
14807                 /* XMT4 */
14808                 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14809                 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14810                     16, 32);
14811                 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14812                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14813 
14814                 /* XMT Common */
14815                 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14816                 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14817                     ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14818 
14819                 /* Receive DMA registers. */
14820 
14821                 /* RCVThread0 */
14822                 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14823                 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14824                     ha->iobase + 0xC0, 16, 32);
14825                 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14826                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14827 
14828                 /* RCVThread1 */
14829                 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14830                 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14831                     ha->iobase + 0xC0, 16, 32);
14832                 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14833                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14834 
14835                 /* RISC registers. */
14836 
14837                 /* RISC GP */
14838                 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14839                 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14840                     16, 32);
14841                 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14842                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14843                 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14844                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14845                 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14846                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14847                 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14848                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14849                 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14850                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14851                 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14852                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14853                 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14854                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14855 
14856                 /* Local memory controller (LMC) registers. */
14857 
14858                 /* LMC */
14859                 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14860                 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14861                     16, 32);
14862                 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14863                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14864                 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14865                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14866                 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14867                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14868                 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14869                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14870                 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14871                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14872                 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14873                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14874                 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14875                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14876 
14877                 /* Fibre Protocol Module registers. */
14878 
14879                 /* FPM hardware */
14880                 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14881                 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14882                     16, 32);
14883                 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14884                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14885                 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14886                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14887                 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14888                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14889                 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14890                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14891                 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14892                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14893                 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14894                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14895                 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14896                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14897                 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14898                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14899                 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14900                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14901                 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14902                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14903                 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14904                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14905                 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14906                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14907                 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14908                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14909 
14910                 /* Frame Buffer registers. */
14911 
14912                 /* FB hardware */
14913                 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14914                 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14915                     16, 32);
14916                 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14917                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14918                 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14919                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14920                 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14921                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14922                 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14923                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14924                 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14925                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14926                 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14927                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14928                 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14929                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14930                 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14931                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14932                 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14933                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14934                 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14935                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14936                 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14937                 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14938                 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14939                 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14940         }
14941 
14942         /* Get the request queue */
14943         if (rval == QL_SUCCESS) {
14944                 uint32_t        cnt;
14945                 uint32_t        *w32 = (uint32_t *)ha->request_ring_bp;
14946 
14947                 /* Sync DMA buffer. */
14948                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14949                     REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14950                     DDI_DMA_SYNC_FORKERNEL);
14951 
14952                 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14953                         fw->req_q[cnt] = *w32++;
14954                         LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14955                 }
14956         }
14957 
14958         /* Get the response queue */
14959         if (rval == QL_SUCCESS) {
14960                 uint32_t        cnt;
14961                 uint32_t        *w32 = (uint32_t *)ha->response_ring_bp;
14962 
14963                 /* Sync DMA buffer. */
14964                 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14965                     RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14966                     DDI_DMA_SYNC_FORKERNEL);
14967 
14968                 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14969                         fw->rsp_q[cnt] = *w32++;
14970                         LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14971                 }
14972         }
14973 
14974         /* Reset RISC. */
14975 
14976         ql_reset_chip(ha);
14977 
14978         /* Memory. */
14979 
14980         if (rval == QL_SUCCESS) {
14981                 /* Code RAM. */
14982                 rval = ql_read_risc_ram(ha, 0x20000,
14983                     sizeof (fw->code_ram) / 4, fw->code_ram);
14984         }
14985         if (rval == QL_SUCCESS) {
14986                 /* External Memory. */
14987                 rval = ql_read_risc_ram(ha, 0x100000,
14988                     ha->fw_ext_memory_size / 4, fw->ext_mem);
14989         }
14990 
14991         /* Get the FC event trace buffer */
14992         if (rval == QL_SUCCESS) {
14993                 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14994                     (ha->fwfcetracebuf.bp != NULL)) {
14995                         uint32_t        cnt;
14996                         uint32_t        *w32 = ha->fwfcetracebuf.bp;
14997 
14998                         /* Sync DMA buffer. */
14999                         (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15000                             FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15001 
15002                         for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15003                                 fw->fce_trace_buf[cnt] = *w32++;
15004                         }
15005                 }
15006         }
15007 
15008         /* Get the extended trace buffer */
15009         if (rval == QL_SUCCESS) {
15010                 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15011                     (ha->fwexttracebuf.bp != NULL)) {
15012                         uint32_t        cnt;
15013                         uint32_t        *w32 = ha->fwexttracebuf.bp;
15014 
15015                         /* Sync DMA buffer. */
15016                         (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15017                             FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15018 
15019                         for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15020                                 fw->ext_trace_buf[cnt] = *w32++;
15021                         }
15022                 }
15023         }
15024 
15025         if (rval != QL_SUCCESS) {
15026                 EL(ha, "failed=%xh\n", rval);
15027         } else {
15028                 /*EMPTY*/
15029                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15030         }
15031 
15032         return (rval);
15033 }
15034 
15035 /*
15036  * ql_read_risc_ram
15037  *      Reads RISC RAM one word at a time.
15038  *      Risc interrupts must be disabled when this routine is called.
15039  *
15040  * Input:
15041  *      ha:     adapter state pointer.
15042  *      risc_address:   RISC code start address.
15043  *      len:            Number of words.
15044  *      buf:            buffer pointer.
15045  *
15046  * Returns:
15047  *      ql local function return status code.
15048  *
15049  * Context:
15050  *      Interrupt or Kernel context, no mailbox commands allowed.
15051  */
15052 static int
15053 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15054     void *buf)
15055 {
15056         uint32_t        cnt;
15057         uint16_t        stat;
15058         clock_t         timer;
15059         uint16_t        *buf16 = (uint16_t *)buf;
15060         uint32_t        *buf32 = (uint32_t *)buf;
15061         int             rval = QL_SUCCESS;
15062 
15063         for (cnt = 0; cnt < len; cnt++, risc_address++) {
15064                 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15065                 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15066                 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15067                 if (CFG_IST(ha, CFG_CTRL_8021)) {
15068                         WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15069                 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15070                         WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15071                 } else {
15072                         WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15073                 }
15074                 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15075                         if (INTERRUPT_PENDING(ha)) {
15076                                 stat = (uint16_t)
15077                                     (RD16_IO_REG(ha, risc2host) & 0xff);
15078                                 if ((stat == 1) || (stat == 0x10)) {
15079                                         if (CFG_IST(ha, CFG_CTRL_24258081)) {
15080                                                 buf32[cnt] = SHORT_TO_LONG(
15081                                                     RD16_IO_REG(ha,
15082                                                     mailbox_out[2]),
15083                                                     RD16_IO_REG(ha,
15084                                                     mailbox_out[3]));
15085                                         } else {
15086                                                 buf16[cnt] =
15087                                                     RD16_IO_REG(ha,
15088                                                     mailbox_out[2]);
15089                                         }
15090 
15091                                         break;
15092                                 } else if ((stat == 2) || (stat == 0x11)) {
15093                                         rval = RD16_IO_REG(ha, mailbox_out[0]);
15094                                         break;
15095                                 }
15096                                 if (CFG_IST(ha, CFG_CTRL_8021)) {
15097                                         ql_8021_clr_hw_intr(ha);
15098                                         ql_8021_clr_fw_intr(ha);
15099                                 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15100                                         WRT32_IO_REG(ha, hccr,
15101                                             HC24_CLR_RISC_INT);
15102                                         RD32_IO_REG(ha, hccr);
15103                                 } else {
15104                                         WRT16_IO_REG(ha, hccr,
15105                                             HC_CLR_RISC_INT);
15106                                 }
15107                         }
15108                         drv_usecwait(5);
15109                 }
15110                 if (CFG_IST(ha, CFG_CTRL_8021)) {
15111                         ql_8021_clr_hw_intr(ha);
15112                         ql_8021_clr_fw_intr(ha);
15113                 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15114                         WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15115                         RD32_IO_REG(ha, hccr);
15116                 } else {
15117                         WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15118                         WRT16_IO_REG(ha, semaphore, 0);
15119                 }
15120 
15121                 if (timer == 0) {
15122                         rval = QL_FUNCTION_TIMEOUT;
15123                 }
15124         }
15125 
15126         return (rval);
15127 }
15128 
15129 /*
15130  * ql_read_regs
15131  *      Reads adapter registers to buffer.
15132  *
15133  * Input:
15134  *      ha:     adapter state pointer.
15135  *      buf:    buffer pointer.
15136  *      reg:    start address.
15137  *      count:  number of registers.
15138  *      wds:    register size.
15139  *
15140  * Context:
15141  *      Interrupt or Kernel context, no mailbox commands allowed.
15142  */
15143 static void *
15144 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15145     uint8_t wds)
15146 {
15147         uint32_t        *bp32, *reg32;
15148         uint16_t        *bp16, *reg16;
15149         uint8_t         *bp8, *reg8;
15150 
15151         switch (wds) {
15152         case 32:
15153                 bp32 = buf;
15154                 reg32 = reg;
15155                 while (count--) {
15156                         *bp32++ = RD_REG_DWORD(ha, reg32++);
15157                 }
15158                 return (bp32);
15159         case 16:
15160                 bp16 = buf;
15161                 reg16 = reg;
15162                 while (count--) {
15163                         *bp16++ = RD_REG_WORD(ha, reg16++);
15164                 }
15165                 return (bp16);
15166         case 8:
15167                 bp8 = buf;
15168                 reg8 = reg;
15169                 while (count--) {
15170                         *bp8++ = RD_REG_BYTE(ha, reg8++);
15171                 }
15172                 return (bp8);
15173         default:
15174                 EL(ha, "Unknown word size=%d\n", wds);
15175                 return (buf);
15176         }
15177 }
15178 
15179 static int
15180 ql_save_config_regs(dev_info_t *dip)
15181 {
15182         ql_adapter_state_t      *ha;
15183         int                     ret;
15184         ql_config_space_t       chs;
15185         caddr_t                 prop = "ql-config-space";
15186 
15187         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15188         if (ha == NULL) {
15189                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15190                     ddi_get_instance(dip));
15191                 return (DDI_FAILURE);
15192         }
15193 
15194         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15195 
15196         /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15197         if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15198             1) {
15199                 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15200                 return (DDI_SUCCESS);
15201         }
15202 
15203         chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15204         chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15205             PCI_CONF_HEADER);
15206         if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15207                 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15208                     PCI_BCNF_BCNTRL);
15209         }
15210 
15211         chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15212             PCI_CONF_CACHE_LINESZ);
15213 
15214         chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15215             PCI_CONF_LATENCY_TIMER);
15216 
15217         if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15218                 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15219                     PCI_BCNF_LATENCY_TIMER);
15220         }
15221 
15222         chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15223         chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15224         chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15225         chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15226         chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15227         chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15228 
15229         /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15230         ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15231             (uchar_t *)&chs, sizeof (ql_config_space_t));
15232 
15233         if (ret != DDI_PROP_SUCCESS) {
15234                 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15235                     QL_NAME, ddi_get_instance(dip), prop);
15236                 return (DDI_FAILURE);
15237         }
15238 
15239         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15240 
15241         return (DDI_SUCCESS);
15242 }
15243 
15244 static int
15245 ql_restore_config_regs(dev_info_t *dip)
15246 {
15247         ql_adapter_state_t      *ha;
15248         uint_t                  elements;
15249         ql_config_space_t       *chs_p;
15250         caddr_t                 prop = "ql-config-space";
15251 
15252         ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15253         if (ha == NULL) {
15254                 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15255                     ddi_get_instance(dip));
15256                 return (DDI_FAILURE);
15257         }
15258 
15259         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15260 
15261         /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15262         if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15263             DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15264             (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15265                 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15266                 return (DDI_FAILURE);
15267         }
15268 
15269         ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15270 
15271         if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15272                 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15273                     chs_p->chs_bridge_control);
15274         }
15275 
15276         ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15277             chs_p->chs_cache_line_size);
15278 
15279         ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15280             chs_p->chs_latency_timer);
15281 
15282         if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15283                 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15284                     chs_p->chs_sec_latency_timer);
15285         }
15286 
15287         ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15288         ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15289         ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15290         ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15291         ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15292         ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15293 
15294         ddi_prop_free(chs_p);
15295 
15296         /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15297         if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15298                 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15299                     QL_NAME, ddi_get_instance(dip), prop);
15300         }
15301 
15302         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15303 
15304         return (DDI_SUCCESS);
15305 }
15306 
15307 uint8_t
15308 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15309 {
15310         if (CFG_IST(ha, CFG_SBUS_CARD)) {
15311                 return (ddi_get8(ha->sbus_config_handle,
15312                     (uint8_t *)(ha->sbus_config_base + off)));
15313         }
15314 
15315 #ifdef KERNEL_32
15316         return (pci_config_getb(ha->pci_handle, off));
15317 #else
15318         return (pci_config_get8(ha->pci_handle, off));
15319 #endif
15320 }
15321 
15322 uint16_t
15323 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15324 {
15325         if (CFG_IST(ha, CFG_SBUS_CARD)) {
15326                 return (ddi_get16(ha->sbus_config_handle,
15327                     (uint16_t *)(ha->sbus_config_base + off)));
15328         }
15329 
15330 #ifdef KERNEL_32
15331         return (pci_config_getw(ha->pci_handle, off));
15332 #else
15333         return (pci_config_get16(ha->pci_handle, off));
15334 #endif
15335 }
15336 
15337 uint32_t
15338 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15339 {
15340         if (CFG_IST(ha, CFG_SBUS_CARD)) {
15341                 return (ddi_get32(ha->sbus_config_handle,
15342                     (uint32_t *)(ha->sbus_config_base + off)));
15343         }
15344 
15345 #ifdef KERNEL_32
15346         return (pci_config_getl(ha->pci_handle, off));
15347 #else
15348         return (pci_config_get32(ha->pci_handle, off));
15349 #endif
15350 }
15351 
15352 void
15353 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15354 {
15355         if (CFG_IST(ha, CFG_SBUS_CARD)) {
15356                 ddi_put8(ha->sbus_config_handle,
15357                     (uint8_t *)(ha->sbus_config_base + off), val);
15358         } else {
15359 #ifdef KERNEL_32
15360                 pci_config_putb(ha->pci_handle, off, val);
15361 #else
15362                 pci_config_put8(ha->pci_handle, off, val);
15363 #endif
15364         }
15365 }
15366 
15367 void
15368 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15369 {
15370         if (CFG_IST(ha, CFG_SBUS_CARD)) {
15371                 ddi_put16(ha->sbus_config_handle,
15372                     (uint16_t *)(ha->sbus_config_base + off), val);
15373         } else {
15374 #ifdef KERNEL_32
15375                 pci_config_putw(ha->pci_handle, off, val);
15376 #else
15377                 pci_config_put16(ha->pci_handle, off, val);
15378 #endif
15379         }
15380 }
15381 
15382 void
15383 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15384 {
15385         if (CFG_IST(ha, CFG_SBUS_CARD)) {
15386                 ddi_put32(ha->sbus_config_handle,
15387                     (uint32_t *)(ha->sbus_config_base + off), val);
15388         } else {
15389 #ifdef KERNEL_32
15390                 pci_config_putl(ha->pci_handle, off, val);
15391 #else
15392                 pci_config_put32(ha->pci_handle, off, val);
15393 #endif
15394         }
15395 }
15396 
15397 /*
15398  * ql_halt
15399  *      Waits for commands that are running to finish and
15400  *      if they do not, commands are aborted.
15401  *      Finally the adapter is reset.
15402  *
15403  * Input:
15404  *      ha:     adapter state pointer.
15405  *      pwr:    power state.
15406  *
15407  * Context:
15408  *      Kernel context.
15409  */
15410 static void
15411 ql_halt(ql_adapter_state_t *ha, int pwr)
15412 {
15413         uint32_t        cnt;
15414         ql_tgt_t        *tq;
15415         ql_srb_t        *sp;
15416         uint16_t        index;
15417         ql_link_t       *link;
15418 
15419         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15420 
15421         /* Wait for all commands running to finish. */
15422         for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15423                 for (link = ha->dev[index].first; link != NULL;
15424                     link = link->next) {
15425                         tq = link->base_address;
15426                         (void) ql_abort_device(ha, tq, 0);
15427 
15428                         /* Wait for 30 seconds for commands to finish. */
15429                         for (cnt = 3000; cnt != 0; cnt--) {
15430                                 /* Acquire device queue lock. */
15431                                 DEVICE_QUEUE_LOCK(tq);
15432                                 if (tq->outcnt == 0) {
15433                                         /* Release device queue lock. */
15434                                         DEVICE_QUEUE_UNLOCK(tq);
15435                                         break;
15436                                 } else {
15437                                         /* Release device queue lock. */
15438                                         DEVICE_QUEUE_UNLOCK(tq);
15439                                         ql_delay(ha, 10000);
15440                                 }
15441                         }
15442 
15443                         /* Finish any commands waiting for more status. */
15444                         if (ha->status_srb != NULL) {
15445                                 sp = ha->status_srb;
15446                                 ha->status_srb = NULL;
15447                                 sp->cmd.next = NULL;
15448                                 ql_done(&sp->cmd);
15449                         }
15450 
15451                         /* Abort commands that did not finish. */
15452                         if (cnt == 0) {
15453                                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15454                                     cnt++) {
15455                                         if (ha->pending_cmds.first != NULL) {
15456                                                 ql_start_iocb(ha, NULL);
15457                                                 cnt = 1;
15458                                         }
15459                                         sp = ha->outstanding_cmds[cnt];
15460                                         if (sp != NULL &&
15461                                             sp->lun_queue->target_queue ==
15462                                             tq) {
15463                                                 (void) ql_abort((opaque_t)ha,
15464                                                     sp->pkt, 0);
15465                                         }
15466                                 }
15467                         }
15468                 }
15469         }
15470 
15471         /* Shutdown IP. */
15472         if (ha->flags & IP_INITIALIZED) {
15473                 (void) ql_shutdown_ip(ha);
15474         }
15475 
15476         /* Stop all timers. */
15477         ADAPTER_STATE_LOCK(ha);
15478         ha->port_retry_timer = 0;
15479         ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15480         ha->watchdog_timer = 0;
15481         ADAPTER_STATE_UNLOCK(ha);
15482 
15483         if (pwr == PM_LEVEL_D3) {
15484                 ADAPTER_STATE_LOCK(ha);
15485                 ha->flags &= ~ONLINE;
15486                 ADAPTER_STATE_UNLOCK(ha);
15487 
15488                 /* Reset ISP chip. */
15489                 ql_reset_chip(ha);
15490         }
15491 
15492         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15493 }
15494 
15495 /*
15496  * ql_get_dma_mem
15497  *      Function used to allocate dma memory.
15498  *
15499  * Input:
15500  *      ha:                     adapter state pointer.
15501  *      mem:                    pointer to dma memory object.
15502  *      size:                   size of the request in bytes
15503  *
15504  * Returns:
15505  *      qn local function return status code.
15506  *
15507  * Context:
15508  *      Kernel context.
15509  */
15510 int
15511 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15512     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15513 {
15514         int     rval;
15515 
15516         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15517 
15518         mem->size = size;
15519         mem->type = allocation_type;
15520         mem->cookie_count = 1;
15521 
15522         switch (alignment) {
15523         case QL_DMA_DATA_ALIGN:
15524                 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15525                 break;
15526         case QL_DMA_RING_ALIGN:
15527                 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15528                 break;
15529         default:
15530                 EL(ha, "failed, unknown alignment type %x\n", alignment);
15531                 break;
15532         }
15533 
15534         if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15535                 ql_free_phys(ha, mem);
15536                 EL(ha, "failed, alloc_phys=%xh\n", rval);
15537         }
15538 
15539         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15540 
15541         return (rval);
15542 }
15543 
15544 /*
15545  * ql_alloc_phys
15546  *      Function used to allocate memory and zero it.
15547  *      Memory is below 4 GB.
15548  *
15549  * Input:
15550  *      ha:                     adapter state pointer.
15551  *      mem:                    pointer to dma memory object.
15552  *      sleep:                  KM_SLEEP/KM_NOSLEEP flag.
15553  *      mem->cookie_count    number of segments allowed.
15554  *      mem->type            memory allocation type.
15555  *      mem->size            memory size.
15556  *      mem->alignment               memory alignment.
15557  *
15558  * Returns:
15559  *      qn local function return status code.
15560  *
15561  * Context:
15562  *      Kernel context.
15563  */
15564 int
15565 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15566 {
15567         size_t                  rlen;
15568         ddi_dma_attr_t          dma_attr;
15569         ddi_device_acc_attr_t   acc_attr = ql_dev_acc_attr;
15570 
15571         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15572 
15573         dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15574             ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15575 
15576         dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15577         dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15578 
15579         /*
15580          * Workaround for SUN XMITS buffer must end and start on 8 byte
15581          * boundary. Else, hardware will overrun the buffer. Simple fix is
15582          * to make sure buffer has enough room for overrun.
15583          */
15584         if (mem->size & 7) {
15585                 mem->size += 8 - (mem->size & 7);
15586         }
15587 
15588         mem->flags = DDI_DMA_CONSISTENT;
15589 
15590         /*
15591          * Allocate DMA memory for command.
15592          */
15593         if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15594             DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15595             DDI_SUCCESS) {
15596                 EL(ha, "failed, ddi_dma_alloc_handle\n");
15597                 mem->dma_handle = NULL;
15598                 return (QL_MEMORY_ALLOC_FAILED);
15599         }
15600 
15601         switch (mem->type) {
15602         case KERNEL_MEM:
15603                 mem->bp = kmem_zalloc(mem->size, sleep);
15604                 break;
15605         case BIG_ENDIAN_DMA:
15606         case LITTLE_ENDIAN_DMA:
15607         case NO_SWAP_DMA:
15608                 if (mem->type == BIG_ENDIAN_DMA) {
15609                         acc_attr.devacc_attr_endian_flags =
15610                             DDI_STRUCTURE_BE_ACC;
15611                 } else if (mem->type == NO_SWAP_DMA) {
15612                         acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15613                 }
15614                 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15615                     mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15616                     DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15617                     &mem->acc_handle) == DDI_SUCCESS) {
15618                         bzero(mem->bp, mem->size);
15619                         /* ensure we got what we asked for (32bit) */
15620                         if (dma_attr.dma_attr_addr_hi == NULL) {
15621                                 if (mem->cookie.dmac_notused != NULL) {
15622                                         EL(ha, "failed, ddi_dma_mem_alloc "
15623                                             "returned 64 bit DMA address\n");
15624                                         ql_free_phys(ha, mem);
15625                                         return (QL_MEMORY_ALLOC_FAILED);
15626                                 }
15627                         }
15628                 } else {
15629                         mem->acc_handle = NULL;
15630                         mem->bp = NULL;
15631                 }
15632                 break;
15633         default:
15634                 EL(ha, "failed, unknown type=%xh\n", mem->type);
15635                 mem->acc_handle = NULL;
15636                 mem->bp = NULL;
15637                 break;
15638         }
15639 
15640         if (mem->bp == NULL) {
15641                 EL(ha, "failed, ddi_dma_mem_alloc\n");
15642                 ddi_dma_free_handle(&mem->dma_handle);
15643                 mem->dma_handle = NULL;
15644                 return (QL_MEMORY_ALLOC_FAILED);
15645         }
15646 
15647         mem->flags |= DDI_DMA_RDWR;
15648 
15649         if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15650                 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15651                 ql_free_phys(ha, mem);
15652                 return (QL_MEMORY_ALLOC_FAILED);
15653         }
15654 
15655         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15656 
15657         return (QL_SUCCESS);
15658 }
15659 
15660 /*
15661  * ql_free_phys
15662  *      Function used to free physical memory.
15663  *
15664  * Input:
15665  *      ha:     adapter state pointer.
15666  *      mem:    pointer to dma memory object.
15667  *
15668  * Context:
15669  *      Kernel context.
15670  */
15671 void
15672 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15673 {
15674         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15675 
15676         if (mem != NULL && mem->dma_handle != NULL) {
15677                 ql_unbind_dma_buffer(ha, mem);
15678                 switch (mem->type) {
15679                 case KERNEL_MEM:
15680                         if (mem->bp != NULL) {
15681                                 kmem_free(mem->bp, mem->size);
15682                         }
15683                         break;
15684                 case LITTLE_ENDIAN_DMA:
15685                 case BIG_ENDIAN_DMA:
15686                 case NO_SWAP_DMA:
15687                         if (mem->acc_handle != NULL) {
15688                                 ddi_dma_mem_free(&mem->acc_handle);
15689                                 mem->acc_handle = NULL;
15690                         }
15691                         break;
15692                 default:
15693                         break;
15694                 }
15695                 mem->bp = NULL;
15696                 ddi_dma_free_handle(&mem->dma_handle);
15697                 mem->dma_handle = NULL;
15698         }
15699 
15700         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15701 }
15702 
15703 /*
15704  * ql_alloc_dma_resouce.
15705  *      Allocates DMA resource for buffer.
15706  *
15707  * Input:
15708  *      ha:                     adapter state pointer.
15709  *      mem:                    pointer to dma memory object.
15710  *      sleep:                  KM_SLEEP/KM_NOSLEEP flag.
15711  *      mem->cookie_count    number of segments allowed.
15712  *      mem->type            memory allocation type.
15713  *      mem->size            memory size.
15714  *      mem->bp                      pointer to memory or struct buf
15715  *
15716  * Returns:
15717  *      qn local function return status code.
15718  *
15719  * Context:
15720  *      Kernel context.
15721  */
15722 int
15723 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15724 {
15725         ddi_dma_attr_t  dma_attr;
15726 
15727         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15728 
15729         dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15730             ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15731         dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15732 
15733         /*
15734          * Allocate DMA handle for command.
15735          */
15736         if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15737             DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15738             DDI_SUCCESS) {
15739                 EL(ha, "failed, ddi_dma_alloc_handle\n");
15740                 mem->dma_handle = NULL;
15741                 return (QL_MEMORY_ALLOC_FAILED);
15742         }
15743 
15744         mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15745 
15746         if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15747                 EL(ha, "failed, bind_dma_buffer\n");
15748                 ddi_dma_free_handle(&mem->dma_handle);
15749                 mem->dma_handle = NULL;
15750                 return (QL_MEMORY_ALLOC_FAILED);
15751         }
15752 
15753         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15754 
15755         return (QL_SUCCESS);
15756 }
15757 
15758 /*
15759  * ql_free_dma_resource
15760  *      Frees DMA resources.
15761  *
15762  * Input:
15763  *      ha:             adapter state pointer.
15764  *      mem:            pointer to dma memory object.
15765  *      mem->dma_handle      DMA memory handle.
15766  *
15767  * Context:
15768  *      Kernel context.
15769  */
15770 void
15771 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15772 {
15773         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15774 
15775         ql_free_phys(ha, mem);
15776 
15777         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15778 }
15779 
15780 /*
15781  * ql_bind_dma_buffer
15782  *      Binds DMA buffer.
15783  *
15784  * Input:
15785  *      ha:                     adapter state pointer.
15786  *      mem:                    pointer to dma memory object.
15787  *      sleep:                  KM_SLEEP or KM_NOSLEEP.
15788  *      mem->dma_handle              DMA memory handle.
15789  *      mem->cookie_count    number of segments allowed.
15790  *      mem->type            memory allocation type.
15791  *      mem->size            memory size.
15792  *      mem->bp                      pointer to memory or struct buf
15793  *
15794  * Returns:
15795  *      mem->cookies         pointer to list of cookies.
15796  *      mem->cookie_count    number of cookies.
15797  *      status                  success = DDI_DMA_MAPPED
15798  *                              DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15799  *                              DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15800  *                              DDI_DMA_TOOBIG
15801  *
15802  * Context:
15803  *      Kernel context.
15804  */
15805 static int
15806 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15807 {
15808         int                     rval;
15809         ddi_dma_cookie_t        *cookiep;
15810         uint32_t                cnt = mem->cookie_count;
15811 
15812         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15813 
15814         if (mem->type == STRUCT_BUF_MEMORY) {
15815                 rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15816                     mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15817                     DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15818         } else {
15819                 rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15820                     mem->size, mem->flags, (sleep == KM_SLEEP) ?
15821                     DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15822                     &mem->cookie_count);
15823         }
15824 
15825         if (rval == DDI_DMA_MAPPED) {
15826                 if (mem->cookie_count > cnt) {
15827                         (void) ddi_dma_unbind_handle(mem->dma_handle);
15828                         EL(ha, "failed, cookie_count %d > %d\n",
15829                             mem->cookie_count, cnt);
15830                         rval = DDI_DMA_TOOBIG;
15831                 } else {
15832                         if (mem->cookie_count > 1) {
15833                                 if (mem->cookies = kmem_zalloc(
15834                                     sizeof (ddi_dma_cookie_t) *
15835                                     mem->cookie_count, sleep)) {
15836                                         *mem->cookies = mem->cookie;
15837                                         cookiep = mem->cookies;
15838                                         for (cnt = 1; cnt < mem->cookie_count;
15839                                             cnt++) {
15840                                                 ddi_dma_nextcookie(
15841                                                     mem->dma_handle,
15842                                                     ++cookiep);
15843                                         }
15844                                 } else {
15845                                         (void) ddi_dma_unbind_handle(
15846                                             mem->dma_handle);
15847                                         EL(ha, "failed, kmem_zalloc\n");
15848                                         rval = DDI_DMA_NORESOURCES;
15849                                 }
15850                         } else {
15851                                 /*
15852                                  * It has been reported that dmac_size at times
15853                                  * may be incorrect on sparc machines so for
15854                                  * sparc machines that only have one segment
15855                                  * use the buffer size instead.
15856                                  */
15857                                 mem->cookies = &mem->cookie;
15858                                 mem->cookies->dmac_size = mem->size;
15859                         }
15860                 }
15861         }
15862 
15863         if (rval != DDI_DMA_MAPPED) {
15864                 EL(ha, "failed=%xh\n", rval);
15865         } else {
15866                 /*EMPTY*/
15867                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15868         }
15869 
15870         return (rval);
15871 }
15872 
15873 /*
15874  * ql_unbind_dma_buffer
15875  *      Unbinds DMA buffer.
15876  *
15877  * Input:
15878  *      ha:                     adapter state pointer.
15879  *      mem:                    pointer to dma memory object.
15880  *      mem->dma_handle              DMA memory handle.
15881  *      mem->cookies         pointer to cookie list.
15882  *      mem->cookie_count    number of cookies.
15883  *
15884  * Context:
15885  *      Kernel context.
15886  */
15887 /* ARGSUSED */
15888 static void
15889 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15890 {
15891         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15892 
15893         (void) ddi_dma_unbind_handle(mem->dma_handle);
15894         if (mem->cookie_count > 1) {
15895                 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15896                     mem->cookie_count);
15897                 mem->cookies = NULL;
15898         }
15899         mem->cookie_count = 0;
15900 
15901         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15902 }
15903 
15904 static int
15905 ql_suspend_adapter(ql_adapter_state_t *ha)
15906 {
15907         clock_t timer = 32 * drv_usectohz(1000000);
15908 
15909         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15910 
15911         /*
15912          * First we will claim mbox ownership so that no
15913          * thread using mbox hangs when we disable the
15914          * interrupt in the middle of it.
15915          */
15916         MBX_REGISTER_LOCK(ha);
15917 
15918         /* Check for mailbox available, if not wait for signal. */
15919         while (ha->mailbox_flags & MBX_BUSY_FLG) {
15920                 ha->mailbox_flags = (uint8_t)
15921                     (ha->mailbox_flags | MBX_WANT_FLG);
15922 
15923                 /* 30 seconds from now */
15924                 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15925                     timer, TR_CLOCK_TICK) == -1) {
15926 
15927                         /* Release mailbox register lock. */
15928                         MBX_REGISTER_UNLOCK(ha);
15929                         EL(ha, "failed, Suspend mbox");
15930                         return (QL_FUNCTION_TIMEOUT);
15931                 }
15932         }
15933 
15934         /* Set busy flag. */
15935         ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15936         MBX_REGISTER_UNLOCK(ha);
15937 
15938         (void) ql_wait_outstanding(ha);
15939 
15940         /*
15941          * here we are sure that there will not be any mbox interrupt.
15942          * So, let's make sure that we return back all the outstanding
15943          * cmds as well as internally queued commands.
15944          */
15945         ql_halt(ha, PM_LEVEL_D0);
15946 
15947         if (ha->power_level != PM_LEVEL_D3) {
15948                 /* Disable ISP interrupts. */
15949                 WRT16_IO_REG(ha, ictrl, 0);
15950         }
15951 
15952         ADAPTER_STATE_LOCK(ha);
15953         ha->flags &= ~INTERRUPTS_ENABLED;
15954         ADAPTER_STATE_UNLOCK(ha);
15955 
15956         MBX_REGISTER_LOCK(ha);
15957         /* Reset busy status. */
15958         ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15959 
15960         /* If thread is waiting for mailbox go signal it to start. */
15961         if (ha->mailbox_flags & MBX_WANT_FLG) {
15962                 ha->mailbox_flags = (uint8_t)
15963                     (ha->mailbox_flags & ~MBX_WANT_FLG);
15964                 cv_broadcast(&ha->cv_mbx_wait);
15965         }
15966         /* Release mailbox register lock. */
15967         MBX_REGISTER_UNLOCK(ha);
15968 
15969         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15970 
15971         return (QL_SUCCESS);
15972 }
15973 
15974 /*
15975  * ql_add_link_b
15976  *      Add link to the end of the chain.
15977  *
15978  * Input:
15979  *      head = Head of link list.
15980  *      link = link to be added.
15981  *      LOCK must be already obtained.
15982  *
15983  * Context:
15984  *      Interrupt or Kernel context, no mailbox commands allowed.
15985  */
15986 void
15987 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15988 {
15989         /* at the end there isn't a next */
15990         link->next = NULL;
15991 
15992         if ((link->prev = head->last) == NULL) {
15993                 head->first = link;
15994         } else {
15995                 head->last->next = link;
15996         }
15997 
15998         head->last = link;
15999         link->head = head;   /* the queue we're on */
16000 }
16001 
16002 /*
16003  * ql_add_link_t
16004  *      Add link to the beginning of the chain.
16005  *
16006  * Input:
16007  *      head = Head of link list.
16008  *      link = link to be added.
16009  *      LOCK must be already obtained.
16010  *
16011  * Context:
16012  *      Interrupt or Kernel context, no mailbox commands allowed.
16013  */
16014 void
16015 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16016 {
16017         link->prev = NULL;
16018 
16019         if ((link->next = head->first) == NULL)   {
16020                 head->last = link;
16021         } else {
16022                 head->first->prev = link;
16023         }
16024 
16025         head->first = link;
16026         link->head = head;   /* the queue we're on */
16027 }
16028 
16029 /*
16030  * ql_remove_link
16031  *      Remove a link from the chain.
16032  *
16033  * Input:
16034  *      head = Head of link list.
16035  *      link = link to be removed.
16036  *      LOCK must be already obtained.
16037  *
16038  * Context:
16039  *      Interrupt or Kernel context, no mailbox commands allowed.
16040  */
16041 void
16042 ql_remove_link(ql_head_t *head, ql_link_t *link)
16043 {
16044         if (link->prev != NULL) {
16045                 if ((link->prev->next = link->next) == NULL) {
16046                         head->last = link->prev;
16047                 } else {
16048                         link->next->prev = link->prev;
16049                 }
16050         } else if ((head->first = link->next) == NULL) {
16051                 head->last = NULL;
16052         } else {
16053                 head->first->prev = NULL;
16054         }
16055 
16056         /* not on a queue any more */
16057         link->prev = link->next = NULL;
16058         link->head = NULL;
16059 }
16060 
16061 /*
16062  * ql_chg_endian
16063  *      Change endianess of byte array.
16064  *
16065  * Input:
16066  *      buf = array pointer.
16067  *      size = size of array in bytes.
16068  *
16069  * Context:
16070  *      Interrupt or Kernel context, no mailbox commands allowed.
16071  */
16072 void
16073 ql_chg_endian(uint8_t buf[], size_t size)
16074 {
16075         uint8_t byte;
16076         size_t  cnt1;
16077         size_t  cnt;
16078 
16079         cnt1 = size - 1;
16080         for (cnt = 0; cnt < size / 2; cnt++) {
16081                 byte = buf[cnt1];
16082                 buf[cnt1] = buf[cnt];
16083                 buf[cnt] = byte;
16084                 cnt1--;
16085         }
16086 }
16087 
16088 /*
16089  * ql_bstr_to_dec
16090  *      Convert decimal byte string to number.
16091  *
16092  * Input:
16093  *      s:      byte string pointer.
16094  *      ans:    interger pointer for number.
16095  *      size:   number of ascii bytes.
16096  *
16097  * Returns:
16098  *      success = number of ascii bytes processed.
16099  *
16100  * Context:
16101  *      Kernel/Interrupt context.
16102  */
16103 static int
16104 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16105 {
16106         int                     mul, num, cnt, pos;
16107         char                    *str;
16108 
16109         /* Calculate size of number. */
16110         if (size == 0) {
16111                 for (str = s; *str >= '0' && *str <= '9'; str++) {
16112                         size++;
16113                 }
16114         }
16115 
16116         *ans = 0;
16117         for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16118                 if (*s >= '0' && *s <= '9') {
16119                         num = *s++ - '0';
16120                 } else {
16121                         break;
16122                 }
16123 
16124                 for (mul = 1, pos = 1; pos < size; pos++) {
16125                         mul *= 10;
16126                 }
16127                 *ans += num * mul;
16128         }
16129 
16130         return (cnt);
16131 }
16132 
16133 /*
16134  * ql_delay
16135  *      Calls delay routine if threads are not suspended, otherwise, busy waits
16136  *      Minimum = 1 tick = 10ms
16137  *
16138  * Input:
16139  *      dly = delay time in microseconds.
16140  *
16141  * Context:
16142  *      Kernel or Interrupt context, no mailbox commands allowed.
16143  */
16144 void
16145 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16146 {
16147         if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16148                 drv_usecwait(usecs);
16149         } else {
16150                 delay(drv_usectohz(usecs));
16151         }
16152 }
16153 
16154 /*
16155  * ql_stall_drv
16156  *      Stalls one or all driver instances, waits for 30 seconds.
16157  *
16158  * Input:
16159  *      ha:             adapter state pointer or NULL for all.
16160  *      options:        BIT_0 --> leave driver stalled on exit if
16161  *                                failed.
16162  *
16163  * Returns:
16164  *      ql local function return status code.
16165  *
16166  * Context:
16167  *      Kernel context.
16168  */
16169 int
16170 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16171 {
16172         ql_link_t               *link;
16173         ql_adapter_state_t      *ha2;
16174         uint32_t                timer;
16175 
16176         QL_PRINT_3(CE_CONT, "started\n");
16177 
16178         /* Wait for 30 seconds for daemons unstall. */
16179         timer = 3000;
16180         link = ha == NULL ? ql_hba.first : &ha->hba;
16181         while (link != NULL && timer) {
16182                 ha2 = link->base_address;
16183 
16184                 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16185 
16186                 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16187                     (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16188                     (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16189                     ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16190                         link = ha == NULL ? link->next : NULL;
16191                         continue;
16192                 }
16193 
16194                 ql_delay(ha2, 10000);
16195                 timer--;
16196                 link = ha == NULL ? ql_hba.first : &ha->hba;
16197         }
16198 
16199         if (ha2 != NULL && timer == 0) {
16200                 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16201                     ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16202                     "unstalled"));
16203                 if (options & BIT_0) {
16204                         ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16205                 }
16206                 return (QL_FUNCTION_TIMEOUT);
16207         }
16208 
16209         QL_PRINT_3(CE_CONT, "done\n");
16210 
16211         return (QL_SUCCESS);
16212 }
16213 
16214 /*
16215  * ql_restart_driver
16216  *      Restarts one or all driver instances.
16217  *
16218  * Input:
16219  *      ha:     adapter state pointer or NULL for all.
16220  *
16221  * Context:
16222  *      Kernel context.
16223  */
16224 void
16225 ql_restart_driver(ql_adapter_state_t *ha)
16226 {
16227         ql_link_t               *link;
16228         ql_adapter_state_t      *ha2;
16229         uint32_t                timer;
16230 
16231         QL_PRINT_3(CE_CONT, "started\n");
16232 
16233         /* Tell all daemons to unstall. */
16234         link = ha == NULL ? ql_hba.first : &ha->hba;
16235         while (link != NULL) {
16236                 ha2 = link->base_address;
16237 
16238                 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16239 
16240                 link = ha == NULL ? link->next : NULL;
16241         }
16242 
16243         /* Wait for 30 seconds for all daemons unstall. */
16244         timer = 3000;
16245         link = ha == NULL ? ql_hba.first : &ha->hba;
16246         while (link != NULL && timer) {
16247                 ha2 = link->base_address;
16248 
16249                 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16250                     (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16251                     (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16252                         QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16253                             ha2->instance, ha2->vp_index);
16254                         ql_restart_queues(ha2);
16255                         link = ha == NULL ? link->next : NULL;
16256                         continue;
16257                 }
16258 
16259                 QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16260                     ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16261 
16262                 ql_delay(ha2, 10000);
16263                 timer--;
16264                 link = ha == NULL ? ql_hba.first : &ha->hba;
16265         }
16266 
16267         QL_PRINT_3(CE_CONT, "done\n");
16268 }
16269 
16270 /*
16271  * ql_setup_interrupts
16272  *      Sets up interrupts based on the HBA's and platform's
16273  *      capabilities (e.g., legacy / MSI / FIXED).
16274  *
16275  * Input:
16276  *      ha = adapter state pointer.
16277  *
16278  * Returns:
16279  *      DDI_SUCCESS or DDI_FAILURE.
16280  *
16281  * Context:
16282  *      Kernel context.
16283  */
16284 static int
16285 ql_setup_interrupts(ql_adapter_state_t *ha)
16286 {
16287         int32_t         rval = DDI_FAILURE;
16288         int32_t         i;
16289         int32_t         itypes = 0;
16290 
16291         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16292 
16293         /*
16294          * The Solaris Advanced Interrupt Functions (aif) are only
16295          * supported on s10U1 or greater.
16296          */
16297         if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16298                 EL(ha, "interrupt framework is not supported or is "
16299                     "disabled, using legacy\n");
16300                 return (ql_legacy_intr(ha));
16301         } else if (ql_os_release_level == 10) {
16302                 /*
16303                  * See if the advanced interrupt functions (aif) are
16304                  * in the kernel
16305                  */
16306                 void    *fptr = (void *)&ddi_intr_get_supported_types;
16307 
16308                 if (fptr == NULL) {
16309                         EL(ha, "aif is not supported, using legacy "
16310                             "interrupts (rev)\n");
16311                         return (ql_legacy_intr(ha));
16312                 }
16313         }
16314 
16315         /* See what types of interrupts this HBA and platform support */
16316         if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16317             DDI_SUCCESS) {
16318                 EL(ha, "get supported types failed, rval=%xh, "
16319                     "assuming FIXED\n", i);
16320                 itypes = DDI_INTR_TYPE_FIXED;
16321         }
16322 
16323         EL(ha, "supported types are: %xh\n", itypes);
16324 
16325         if ((itypes & DDI_INTR_TYPE_MSIX) &&
16326             (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16327                 EL(ha, "successful MSI-X setup\n");
16328         } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16329             (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16330                 EL(ha, "successful MSI setup\n");
16331         } else {
16332                 rval = ql_setup_fixed(ha);
16333         }
16334 
16335         if (rval != DDI_SUCCESS) {
16336                 EL(ha, "failed, aif, rval=%xh\n", rval);
16337         } else {
16338                 /*EMPTY*/
16339                 QL_PRINT_3(CE_CONT, "(%d): done\n");
16340         }
16341 
16342         return (rval);
16343 }
16344 
16345 /*
16346  * ql_setup_msi
16347  *      Set up aif MSI interrupts
16348  *
16349  * Input:
16350  *      ha = adapter state pointer.
16351  *
16352  * Returns:
16353  *      DDI_SUCCESS or DDI_FAILURE.
16354  *
16355  * Context:
16356  *      Kernel context.
16357  */
16358 static int
16359 ql_setup_msi(ql_adapter_state_t *ha)
16360 {
16361         int32_t         count = 0;
16362         int32_t         avail = 0;
16363         int32_t         actual = 0;
16364         int32_t         msitype = DDI_INTR_TYPE_MSI;
16365         int32_t         ret;
16366         ql_ifunc_t      itrfun[10] = {0};
16367 
16368         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16369 
16370         if (ql_disable_msi != 0) {
16371                 EL(ha, "MSI is disabled by user\n");
16372                 return (DDI_FAILURE);
16373         }
16374 
16375         /* MSI support is only suported on 24xx HBA's. */
16376         if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16377                 EL(ha, "HBA does not support MSI\n");
16378                 return (DDI_FAILURE);
16379         }
16380 
16381         /* Get number of MSI interrupts the system supports */
16382         if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16383             DDI_SUCCESS) || count == 0) {
16384                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16385                 return (DDI_FAILURE);
16386         }
16387 
16388         /* Get number of available MSI interrupts */
16389         if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16390             DDI_SUCCESS) || avail == 0) {
16391                 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16392                 return (DDI_FAILURE);
16393         }
16394 
16395         /* MSI requires only 1.  */
16396         count = 1;
16397         itrfun[0].ifunc = &ql_isr_aif;
16398 
16399         /* Allocate space for interrupt handles */
16400         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16401         ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16402 
16403         ha->iflags |= IFLG_INTR_MSI;
16404 
16405         /* Allocate the interrupts */
16406         if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16407             &actual, 0)) != DDI_SUCCESS || actual < count) {
16408                 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16409                     "actual=%xh\n", ret, count, actual);
16410                 ql_release_intr(ha);
16411                 return (DDI_FAILURE);
16412         }
16413 
16414         ha->intr_cnt = actual;
16415 
16416         /* Get interrupt priority */
16417         if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16418             DDI_SUCCESS) {
16419                 EL(ha, "failed, get_pri ret=%xh\n", ret);
16420                 ql_release_intr(ha);
16421                 return (ret);
16422         }
16423 
16424         /* Add the interrupt handler */
16425         if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16426             (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16427                 EL(ha, "failed, intr_add ret=%xh\n", ret);
16428                 ql_release_intr(ha);
16429                 return (ret);
16430         }
16431 
16432         /* Setup mutexes */
16433         if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16434                 EL(ha, "failed, mutex init ret=%xh\n", ret);
16435                 ql_release_intr(ha);
16436                 return (ret);
16437         }
16438 
16439         /* Get the capabilities */
16440         (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16441 
16442         /* Enable interrupts */
16443         if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16444                 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16445                     DDI_SUCCESS) {
16446                         EL(ha, "failed, block enable, ret=%xh\n", ret);
16447                         ql_destroy_mutex(ha);
16448                         ql_release_intr(ha);
16449                         return (ret);
16450                 }
16451         } else {
16452                 if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16453                         EL(ha, "failed, intr enable, ret=%xh\n", ret);
16454                         ql_destroy_mutex(ha);
16455                         ql_release_intr(ha);
16456                         return (ret);
16457                 }
16458         }
16459 
16460         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16461 
16462         return (DDI_SUCCESS);
16463 }
16464 
16465 /*
16466  * ql_setup_msix
16467  *      Set up aif MSI-X interrupts
16468  *
16469  * Input:
16470  *      ha = adapter state pointer.
16471  *
16472  * Returns:
16473  *      DDI_SUCCESS or DDI_FAILURE.
16474  *
16475  * Context:
16476  *      Kernel context.
16477  */
16478 static int
16479 ql_setup_msix(ql_adapter_state_t *ha)
16480 {
16481         uint16_t        hwvect;
16482         int32_t         count = 0;
16483         int32_t         avail = 0;
16484         int32_t         actual = 0;
16485         int32_t         msitype = DDI_INTR_TYPE_MSIX;
16486         int32_t         ret;
16487         uint32_t        i;
16488         ql_ifunc_t      itrfun[QL_MSIX_MAXAIF] = {0};
16489 
16490         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16491 
16492         if (ql_disable_msix != 0) {
16493                 EL(ha, "MSI-X is disabled by user\n");
16494                 return (DDI_FAILURE);
16495         }
16496 
16497         /*
16498          * MSI-X support is only available on 24xx HBA's that have
16499          * rev A2 parts (revid = 3) or greater.
16500          */
16501         if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16502             (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16503             (ha->device_id == 0x8021))) {
16504                 EL(ha, "HBA does not support MSI-X\n");
16505                 return (DDI_FAILURE);
16506         }
16507 
16508         if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16509                 EL(ha, "HBA does not support MSI-X (revid)\n");
16510                 return (DDI_FAILURE);
16511         }
16512 
16513         /* Per HP, these HP branded HBA's are not supported with MSI-X */
16514         if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16515             ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16516                 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16517                 return (DDI_FAILURE);
16518         }
16519 
16520         /* Get the number of 24xx/25xx MSI-X h/w vectors */
16521         hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16522             ql_pci_config_get16(ha, 0x7e) :
16523             ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16524 
16525         EL(ha, "pcie config space hwvect = %d\n", hwvect);
16526 
16527         if (hwvect < QL_MSIX_MAXAIF) {
16528                 EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16529                     QL_MSIX_MAXAIF, hwvect);
16530                 return (DDI_FAILURE);
16531         }
16532 
16533         /* Get number of MSI-X interrupts the platform h/w supports */
16534         if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16535             DDI_SUCCESS) || count == 0) {
16536                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16537                 return (DDI_FAILURE);
16538         }
16539 
16540         /* Get number of available system interrupts */
16541         if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16542             DDI_SUCCESS) || avail == 0) {
16543                 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16544                 return (DDI_FAILURE);
16545         }
16546 
16547         /* Fill out the intr table */
16548         count = QL_MSIX_MAXAIF;
16549         itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16550         itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16551 
16552         /* Allocate space for interrupt handles */
16553         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16554         if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16555                 ha->hsize = 0;
16556                 EL(ha, "failed, unable to allocate htable space\n");
16557                 return (DDI_FAILURE);
16558         }
16559 
16560         ha->iflags |= IFLG_INTR_MSIX;
16561 
16562         /* Allocate the interrupts */
16563         if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16564             DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16565             actual < QL_MSIX_MAXAIF) {
16566                 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16567                     "actual=%xh\n", ret, count, actual);
16568                 ql_release_intr(ha);
16569                 return (DDI_FAILURE);
16570         }
16571 
16572         ha->intr_cnt = actual;
16573 
16574         /* Get interrupt priority */
16575         if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16576             DDI_SUCCESS) {
16577                 EL(ha, "failed, get_pri ret=%xh\n", ret);
16578                 ql_release_intr(ha);
16579                 return (ret);
16580         }
16581 
16582         /* Add the interrupt handlers */
16583         for (i = 0; i < actual; i++) {
16584                 if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16585                     (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16586                         EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16587                             actual, ret);
16588                         ql_release_intr(ha);
16589                         return (ret);
16590                 }
16591         }
16592 
16593         /*
16594          * duplicate the rest of the intr's
16595          * ddi_intr_dup_handler() isn't working on x86 just yet...
16596          */
16597 #ifdef __sparc
16598         for (i = actual; i < hwvect; i++) {
16599                 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16600                     &ha->htable[i])) != DDI_SUCCESS) {
16601                         EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16602                             i, actual, ret);
16603                         ql_release_intr(ha);
16604                         return (ret);
16605                 }
16606         }
16607 #endif
16608 
16609         /* Setup mutexes */
16610         if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16611                 EL(ha, "failed, mutex init ret=%xh\n", ret);
16612                 ql_release_intr(ha);
16613                 return (ret);
16614         }
16615 
16616         /* Get the capabilities */
16617         (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16618 
16619         /* Enable interrupts */
16620         if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16621                 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16622                     DDI_SUCCESS) {
16623                         EL(ha, "failed, block enable, ret=%xh\n", ret);
16624                         ql_destroy_mutex(ha);
16625                         ql_release_intr(ha);
16626                         return (ret);
16627                 }
16628         } else {
16629                 for (i = 0; i < ha->intr_cnt; i++) {
16630                         if ((ret = ddi_intr_enable(ha->htable[i])) !=
16631                             DDI_SUCCESS) {
16632                                 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16633                                 ql_destroy_mutex(ha);
16634                                 ql_release_intr(ha);
16635                                 return (ret);
16636                         }
16637                 }
16638         }
16639 
16640         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16641 
16642         return (DDI_SUCCESS);
16643 }
16644 
16645 /*
16646  * ql_setup_fixed
16647  *      Sets up aif FIXED interrupts
16648  *
16649  * Input:
16650  *      ha = adapter state pointer.
16651  *
16652  * Returns:
16653  *      DDI_SUCCESS or DDI_FAILURE.
16654  *
16655  * Context:
16656  *      Kernel context.
16657  */
16658 static int
16659 ql_setup_fixed(ql_adapter_state_t *ha)
16660 {
16661         int32_t         count = 0;
16662         int32_t         actual = 0;
16663         int32_t         ret;
16664         uint32_t        i;
16665 
16666         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16667 
16668         /* Get number of fixed interrupts the system supports */
16669         if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16670             &count)) != DDI_SUCCESS) || count == 0) {
16671                 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16672                 return (DDI_FAILURE);
16673         }
16674 
16675         ha->iflags |= IFLG_INTR_FIXED;
16676 
16677         /* Allocate space for interrupt handles */
16678         ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16679         ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16680 
16681         /* Allocate the interrupts */
16682         if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16683             0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16684             actual < count) {
16685                 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16686                     "actual=%xh\n", ret, count, actual);
16687                 ql_release_intr(ha);
16688                 return (DDI_FAILURE);
16689         }
16690 
16691         ha->intr_cnt = actual;
16692 
16693         /* Get interrupt priority */
16694         if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16695             DDI_SUCCESS) {
16696                 EL(ha, "failed, get_pri ret=%xh\n", ret);
16697                 ql_release_intr(ha);
16698                 return (ret);
16699         }
16700 
16701         /* Add the interrupt handlers */
16702         for (i = 0; i < ha->intr_cnt; i++) {
16703                 if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16704                     (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16705                         EL(ha, "failed, intr_add ret=%xh\n", ret);
16706                         ql_release_intr(ha);
16707                         return (ret);
16708                 }
16709         }
16710 
16711         /* Setup mutexes */
16712         if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16713                 EL(ha, "failed, mutex init ret=%xh\n", ret);
16714                 ql_release_intr(ha);
16715                 return (ret);
16716         }
16717 
16718         /* Enable interrupts */
16719         for (i = 0; i < ha->intr_cnt; i++) {
16720                 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16721                         EL(ha, "failed, intr enable, ret=%xh\n", ret);
16722                         ql_destroy_mutex(ha);
16723                         ql_release_intr(ha);
16724                         return (ret);
16725                 }
16726         }
16727 
16728         EL(ha, "using FIXED interupts\n");
16729 
16730         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16731 
16732         return (DDI_SUCCESS);
16733 }
16734 
16735 /*
16736  * ql_disable_intr
16737  *      Disables interrupts
16738  *
16739  * Input:
16740  *      ha = adapter state pointer.
16741  *
16742  * Returns:
16743  *
16744  * Context:
16745  *      Kernel context.
16746  */
16747 static void
16748 ql_disable_intr(ql_adapter_state_t *ha)
16749 {
16750         uint32_t        i, rval;
16751 
16752         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16753 
16754         if (!(ha->iflags & IFLG_INTR_AIF)) {
16755 
16756                 /* Disable legacy interrupts */
16757                 (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16758 
16759         } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16760             (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16761 
16762                 /* Remove AIF block interrupts (MSI) */
16763                 if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16764                     != DDI_SUCCESS) {
16765                         EL(ha, "failed intr block disable, rval=%x\n", rval);
16766                 }
16767 
16768         } else {
16769 
16770                 /* Remove AIF non-block interrupts (fixed).  */
16771                 for (i = 0; i < ha->intr_cnt; i++) {
16772                         if ((rval = ddi_intr_disable(ha->htable[i])) !=
16773                             DDI_SUCCESS) {
16774                                 EL(ha, "failed intr disable, intr#=%xh, "
16775                                     "rval=%xh\n", i, rval);
16776                         }
16777                 }
16778         }
16779 
16780         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16781 }
16782 
16783 /*
16784  * ql_release_intr
16785  *      Releases aif legacy interrupt resources
16786  *
16787  * Input:
16788  *      ha = adapter state pointer.
16789  *
16790  * Returns:
16791  *
16792  * Context:
16793  *      Kernel context.
16794  */
16795 static void
16796 ql_release_intr(ql_adapter_state_t *ha)
16797 {
16798         int32_t         i;
16799 
16800         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16801 
16802         if (!(ha->iflags & IFLG_INTR_AIF)) {
16803                 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16804                 return;
16805         }
16806 
16807         ha->iflags &= ~(IFLG_INTR_AIF);
16808         if (ha->htable != NULL && ha->hsize > 0) {
16809                 i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16810                 while (i-- > 0) {
16811                         if (ha->htable[i] == 0) {
16812                                 EL(ha, "htable[%x]=0h\n", i);
16813                                 continue;
16814                         }
16815 
16816                         (void) ddi_intr_disable(ha->htable[i]);
16817 
16818                         if (i < ha->intr_cnt) {
16819                                 (void) ddi_intr_remove_handler(ha->htable[i]);
16820                         }
16821 
16822                         (void) ddi_intr_free(ha->htable[i]);
16823                 }
16824 
16825                 kmem_free(ha->htable, ha->hsize);
16826                 ha->htable = NULL;
16827         }
16828 
16829         ha->hsize = 0;
16830         ha->intr_cnt = 0;
16831         ha->intr_pri = 0;
16832         ha->intr_cap = 0;
16833 
16834         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16835 }
16836 
16837 /*
16838  * ql_legacy_intr
16839  *      Sets up legacy interrupts.
16840  *
16841  *      NB: Only to be used if AIF (Advanced Interupt Framework)
16842  *          if NOT in the kernel.
16843  *
16844  * Input:
16845  *      ha = adapter state pointer.
16846  *
16847  * Returns:
16848  *      DDI_SUCCESS or DDI_FAILURE.
16849  *
16850  * Context:
16851  *      Kernel context.
16852  */
16853 static int
16854 ql_legacy_intr(ql_adapter_state_t *ha)
16855 {
16856         int     rval = DDI_SUCCESS;
16857 
16858         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16859 
16860         /* Setup mutexes */
16861         if (ql_init_mutex(ha) != DDI_SUCCESS) {
16862                 EL(ha, "failed, mutex init\n");
16863                 return (DDI_FAILURE);
16864         }
16865 
16866         /* Setup standard/legacy interrupt handler */
16867         if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16868             (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16869                 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16870                     QL_NAME, ha->instance);
16871                 ql_destroy_mutex(ha);
16872                 rval = DDI_FAILURE;
16873         }
16874 
16875         if (rval == DDI_SUCCESS) {
16876                 ha->iflags |= IFLG_INTR_LEGACY;
16877                 EL(ha, "using legacy interrupts\n");
16878         }
16879 
16880         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16881 
16882         return (rval);
16883 }
16884 
16885 /*
16886  * ql_init_mutex
16887  *      Initializes mutex's
16888  *
16889  * Input:
16890  *      ha = adapter state pointer.
16891  *
16892  * Returns:
16893  *      DDI_SUCCESS or DDI_FAILURE.
16894  *
16895  * Context:
16896  *      Kernel context.
16897  */
16898 static int
16899 ql_init_mutex(ql_adapter_state_t *ha)
16900 {
16901         int     ret;
16902         void    *intr;
16903 
16904         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16905 
16906         if (ha->iflags & IFLG_INTR_AIF) {
16907                 intr = (void *)(uintptr_t)ha->intr_pri;
16908         } else {
16909                 /* Get iblock cookies to initialize mutexes */
16910                 if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16911                     &ha->iblock_cookie)) != DDI_SUCCESS) {
16912                         EL(ha, "failed, get_iblock: %xh\n", ret);
16913                         return (DDI_FAILURE);
16914                 }
16915                 intr = (void *)ha->iblock_cookie;
16916         }
16917 
16918         /* mutexes to protect the adapter state structure. */
16919         mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16920 
16921         /* mutex to protect the ISP response ring. */
16922         mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16923 
16924         /* mutex to protect the mailbox registers. */
16925         mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16926 
16927         /* power management protection */
16928         mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16929 
16930         /* Mailbox wait and interrupt conditional variable. */
16931         cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16932         cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16933 
16934         /* mutex to protect the ISP request ring. */
16935         mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16936 
16937         /* Unsolicited buffer conditional variable. */
16938         cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16939 
16940         mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16941         mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16942 
16943         /* Suspended conditional variable. */
16944         cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16945 
16946         /* mutex to protect task daemon context. */
16947         mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16948 
16949         /* Task_daemon thread conditional variable. */
16950         cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16951 
16952         /* mutex to protect diag port manage interface */
16953         mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16954 
16955         /* mutex to protect per instance f/w dump flags and buffer */
16956         mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16957 
16958         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16959 
16960         return (DDI_SUCCESS);
16961 }
16962 
16963 /*
16964  * ql_destroy_mutex
16965  *      Destroys mutex's
16966  *
16967  * Input:
16968  *      ha = adapter state pointer.
16969  *
16970  * Returns:
16971  *
16972  * Context:
16973  *      Kernel context.
16974  */
16975 static void
16976 ql_destroy_mutex(ql_adapter_state_t *ha)
16977 {
16978         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16979 
16980         mutex_destroy(&ha->dump_mutex);
16981         mutex_destroy(&ha->portmutex);
16982         cv_destroy(&ha->cv_task_daemon);
16983         mutex_destroy(&ha->task_daemon_mutex);
16984         cv_destroy(&ha->cv_dr_suspended);
16985         mutex_destroy(&ha->cache_mutex);
16986         mutex_destroy(&ha->ub_mutex);
16987         cv_destroy(&ha->cv_ub);
16988         mutex_destroy(&ha->req_ring_mutex);
16989         cv_destroy(&ha->cv_mbx_intr);
16990         cv_destroy(&ha->cv_mbx_wait);
16991         mutex_destroy(&ha->pm_mutex);
16992         mutex_destroy(&ha->mbx_mutex);
16993         mutex_destroy(&ha->intr_mutex);
16994         mutex_destroy(&ha->mutex);
16995 
16996         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16997 }
16998 
16999 /*
17000  * ql_fwmodule_resolve
17001  *      Loads and resolves external firmware module and symbols
17002  *
17003  * Input:
17004  *      ha:             adapter state pointer.
17005  *
17006  * Returns:
17007  *      ql local function return status code:
17008  *              QL_SUCCESS - external f/w module module and symbols resolved
17009  *              QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17010  *              QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17011  *              QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17012  * Context:
17013  *      Kernel context.
17014  *
17015  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
17016  * could switch to a tighter scope around acutal download (and add an extra
17017  * ddi_modopen for module opens that occur before root is mounted).
17018  *
17019  */
17020 uint32_t
17021 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17022 {
17023         int8_t                  module[128];
17024         int8_t                  fw_version[128];
17025         uint32_t                rval = QL_SUCCESS;
17026         caddr_t                 code, code02;
17027         uint8_t                 *p_ucfw;
17028         uint16_t                *p_usaddr, *p_uslen;
17029         uint32_t                *p_uiaddr, *p_uilen, *p_uifw;
17030         uint32_t                *p_uiaddr02, *p_uilen02;
17031         struct fw_table         *fwt;
17032         extern struct fw_table  fw_table[];
17033 
17034         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17035 
17036         if (ha->fw_module != NULL) {
17037                 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17038                     ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17039                     ha->fw_subminor_version);
17040                 return (rval);
17041         }
17042 
17043         /* make sure the fw_class is in the fw_table of supported classes */
17044         for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17045                 if (fwt->fw_class == ha->fw_class)
17046                         break;                  /* match */
17047         }
17048         if (fwt->fw_version == NULL) {
17049                 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17050                     "in driver's fw_table", QL_NAME, ha->instance,
17051                     ha->fw_class);
17052                 return (QL_FW_NOT_SUPPORTED);
17053         }
17054 
17055         /*
17056          * open the module related to the fw_class
17057          */
17058         (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17059             ha->fw_class);
17060 
17061         ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17062         if (ha->fw_module == NULL) {
17063                 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17064                     QL_NAME, ha->instance, module);
17065                 return (QL_FWMODLOAD_FAILED);
17066         }
17067 
17068         /*
17069          * resolve the fw module symbols, data types depend on fw_class
17070          */
17071 
17072         switch (ha->fw_class) {
17073         case 0x2200:
17074         case 0x2300:
17075         case 0x6322:
17076 
17077                 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17078                     NULL)) == NULL) {
17079                         rval = QL_FWSYM_NOT_FOUND;
17080                         EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17081                 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
17082                     "risc_code_addr01", NULL)) == NULL) {
17083                         rval = QL_FWSYM_NOT_FOUND;
17084                         EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17085                 } else if ((p_uslen = ddi_modsym(ha->fw_module,
17086                     "risc_code_length01", NULL)) == NULL) {
17087                         rval = QL_FWSYM_NOT_FOUND;
17088                         EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17089                 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17090                     "firmware_version", NULL)) == NULL) {
17091                         rval = QL_FWSYM_NOT_FOUND;
17092                         EL(ha, "failed, f/w module %d fwver symbol\n", module);
17093                 }
17094 
17095                 if (rval == QL_SUCCESS) {
17096                         ha->risc_fw[0].code = code;
17097                         ha->risc_fw[0].addr = *p_usaddr;
17098                         ha->risc_fw[0].length = *p_uslen;
17099 
17100                         (void) snprintf(fw_version, sizeof (fw_version),
17101                             "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17102                 }
17103                 break;
17104 
17105         case 0x2400:
17106         case 0x2500:
17107         case 0x8100:
17108 
17109                 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17110                     NULL)) == NULL) {
17111                         rval = QL_FWSYM_NOT_FOUND;
17112                         EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17113                 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17114                     "risc_code_addr01", NULL)) == NULL) {
17115                         rval = QL_FWSYM_NOT_FOUND;
17116                         EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17117                 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17118                     "risc_code_length01", NULL)) == NULL) {
17119                         rval = QL_FWSYM_NOT_FOUND;
17120                         EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17121                 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17122                     "firmware_version", NULL)) == NULL) {
17123                         rval = QL_FWSYM_NOT_FOUND;
17124                         EL(ha, "failed, f/w module %d fwver symbol\n", module);
17125                 }
17126 
17127                 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17128                     NULL)) == NULL) {
17129                         rval = QL_FWSYM_NOT_FOUND;
17130                         EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17131                 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17132                     "risc_code_addr02", NULL)) == NULL) {
17133                         rval = QL_FWSYM_NOT_FOUND;
17134                         EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17135                 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17136                     "risc_code_length02", NULL)) == NULL) {
17137                         rval = QL_FWSYM_NOT_FOUND;
17138                         EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17139                 }
17140 
17141                 if (rval == QL_SUCCESS) {
17142                         ha->risc_fw[0].code = code;
17143                         ha->risc_fw[0].addr = *p_uiaddr;
17144                         ha->risc_fw[0].length = *p_uilen;
17145                         ha->risc_fw[1].code = code02;
17146                         ha->risc_fw[1].addr = *p_uiaddr02;
17147                         ha->risc_fw[1].length = *p_uilen02;
17148 
17149                         (void) snprintf(fw_version, sizeof (fw_version),
17150                             "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17151                 }
17152                 break;
17153 
17154         default:
17155                 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17156                 rval = QL_FW_NOT_SUPPORTED;
17157         }
17158 
17159         if (rval != QL_SUCCESS) {
17160                 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17161                     "module %s (%x)", QL_NAME, ha->instance, module, rval);
17162                 if (ha->fw_module != NULL) {
17163                         (void) ddi_modclose(ha->fw_module);
17164                         ha->fw_module = NULL;
17165                 }
17166         } else {
17167                 /*
17168                  * check for firmware version mismatch between module and
17169                  * compiled in fw_table version.
17170                  */
17171 
17172                 if (strcmp(fwt->fw_version, fw_version) != 0) {
17173 
17174                         /*
17175                          * If f/w / driver version mismatches then
17176                          * return a successful status -- however warn
17177                          * the user that this is NOT recommended.
17178                          */
17179 
17180                         cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17181                             "mismatch for %x: driver-%s module-%s", QL_NAME,
17182                             ha->instance, ha->fw_class, fwt->fw_version,
17183                             fw_version);
17184 
17185                         ha->cfg_flags |= CFG_FW_MISMATCH;
17186                 } else {
17187                         ha->cfg_flags &= ~CFG_FW_MISMATCH;
17188                 }
17189         }
17190 
17191         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17192 
17193         return (rval);
17194 }
17195 
17196 /*
17197  * ql_port_state
17198  *      Set the state on all adapter ports.
17199  *
17200  * Input:
17201  *      ha:     parent adapter state pointer.
17202  *      state:  port state.
17203  *      flags:  task daemon flags to set.
17204  *
17205  * Context:
17206  *      Interrupt or Kernel context, no mailbox commands allowed.
17207  */
17208 void
17209 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17210 {
17211         ql_adapter_state_t      *vha;
17212 
17213         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17214 
17215         TASK_DAEMON_LOCK(ha);
17216         for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17217                 if (FC_PORT_STATE_MASK(vha->state) != state) {
17218                         vha->state = state != FC_STATE_OFFLINE ?
17219                             (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17220                         vha->task_daemon_flags |= flags;
17221                 }
17222         }
17223         ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17224         TASK_DAEMON_UNLOCK(ha);
17225 
17226         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17227 }
17228 
17229 /*
17230  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17231  *
17232  * Input:       Pointer to the adapter state structure.
17233  * Returns:     Success or Failure.
17234  * Context:     Kernel context.
17235  */
17236 int
17237 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17238 {
17239         int     rval = DDI_SUCCESS;
17240 
17241         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17242 
17243         ha->el_trace_desc =
17244             (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17245 
17246         if (ha->el_trace_desc == NULL) {
17247                 cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17248                     QL_NAME, ha->instance);
17249                 rval = DDI_FAILURE;
17250         } else {
17251                 ha->el_trace_desc->next           = 0;
17252                 ha->el_trace_desc->trace_buffer =
17253                     (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17254 
17255                 if (ha->el_trace_desc->trace_buffer == NULL) {
17256                         cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17257                             QL_NAME, ha->instance);
17258                         kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17259                         rval = DDI_FAILURE;
17260                 } else {
17261                         ha->el_trace_desc->trace_buffer_size =
17262                             EL_TRACE_BUF_SIZE;
17263                         mutex_init(&ha->el_trace_desc->mutex, NULL,
17264                             MUTEX_DRIVER, NULL);
17265                 }
17266         }
17267 
17268         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17269 
17270         return (rval);
17271 }
17272 
17273 /*
17274  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17275  *
17276  * Input:       Pointer to the adapter state structure.
17277  * Returns:     Success or Failure.
17278  * Context:     Kernel context.
17279  */
17280 int
17281 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17282 {
17283         int     rval = DDI_SUCCESS;
17284 
17285         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17286 
17287         if (ha->el_trace_desc == NULL) {
17288                 cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17289                     QL_NAME, ha->instance);
17290                 rval = DDI_FAILURE;
17291         } else {
17292                 if (ha->el_trace_desc->trace_buffer != NULL) {
17293                         kmem_free(ha->el_trace_desc->trace_buffer,
17294                             ha->el_trace_desc->trace_buffer_size);
17295                 }
17296                 mutex_destroy(&ha->el_trace_desc->mutex);
17297                 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17298         }
17299 
17300         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17301 
17302         return (rval);
17303 }
17304 
17305 /*
17306  * els_cmd_text - Return a pointer to a string describing the command
17307  *
17308  * Input:       els_cmd = the els command opcode.
17309  * Returns:     pointer to a string.
17310  * Context:     Kernel context.
17311  */
17312 char *
17313 els_cmd_text(int els_cmd)
17314 {
17315         cmd_table_t *entry = &els_cmd_tbl[0];
17316 
17317         return (cmd_text(entry, els_cmd));
17318 }
17319 
17320 /*
17321  * mbx_cmd_text - Return a pointer to a string describing the command
17322  *
17323  * Input:       mbx_cmd = the mailbox command opcode.
17324  * Returns:     pointer to a string.
17325  * Context:     Kernel context.
17326  */
17327 char *
17328 mbx_cmd_text(int mbx_cmd)
17329 {
17330         cmd_table_t *entry = &mbox_cmd_tbl[0];
17331 
17332         return (cmd_text(entry, mbx_cmd));
17333 }
17334 
17335 /*
17336  * cmd_text     Return a pointer to a string describing the command
17337  *
17338  * Input:       entry = the command table
17339  *              cmd = the command.
17340  * Returns:     pointer to a string.
17341  * Context:     Kernel context.
17342  */
17343 char *
17344 cmd_text(cmd_table_t *entry, int cmd)
17345 {
17346         for (; entry->cmd != 0; entry++) {
17347                 if (entry->cmd == cmd) {
17348                         break;
17349                 }
17350         }
17351         return (entry->string);
17352 }
17353 
17354 /*
17355  * ql_els_24xx_mbox_cmd_iocb - els request indication.
17356  *
17357  * Input:       ha = adapter state pointer.
17358  *              srb = scsi request block pointer.
17359  *              arg = els passthru entry iocb pointer.
17360  * Returns:
17361  * Context:     Kernel context.
17362  */
17363 void
17364 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17365 {
17366         els_descriptor_t        els_desc;
17367 
17368         /* Extract the ELS information */
17369         ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17370 
17371         /* Construct the passthru entry */
17372         ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17373 
17374         /* Ensure correct endianness */
17375         ql_isp_els_handle_cmd_endian(ha, srb);
17376 }
17377 
17378 /*
17379  * ql_fca_isp_els_request - Extract into an els descriptor the info required
17380  *                          to build an els_passthru iocb from an fc packet.
17381  *
17382  * Input:       ha = adapter state pointer.
17383  *              pkt = fc packet pointer
17384  *              els_desc = els descriptor pointer
17385  * Returns:
17386  * Context:     Kernel context.
17387  */
17388 static void
17389 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17390     els_descriptor_t *els_desc)
17391 {
17392         ls_code_t       els;
17393 
17394         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17395             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17396 
17397         els_desc->els = els.ls_code;
17398 
17399         els_desc->els_handle = ha->hba_buf.acc_handle;
17400         els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17401         els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17402         /* if n_port_handle is not < 0x7d use 0 */
17403         if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17404                 els_desc->n_port_handle = ha->n_port->n_port_handle;
17405         } else {
17406                 els_desc->n_port_handle = 0;
17407         }
17408         els_desc->control_flags = 0;
17409         els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17410         /*
17411          * Transmit DSD. This field defines the Fibre Channel Frame payload
17412          * (without the frame header) in system memory.
17413          */
17414         els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17415         els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17416         els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17417 
17418         els_desc->rsp_byte_count = pkt->pkt_rsplen;
17419         /*
17420          * Receive DSD. This field defines the ELS response payload buffer
17421          * for the ISP24xx firmware transferring the received ELS
17422          * response frame to a location in host memory.
17423          */
17424         els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17425         els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17426         els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17427 }
17428 
17429 /*
17430  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17431  * using the els descriptor.
17432  *
17433  * Input:       ha = adapter state pointer.
17434  *              els_desc = els descriptor pointer.
17435  *              els_entry = els passthru entry iocb pointer.
17436  * Returns:
17437  * Context:     Kernel context.
17438  */
17439 static void
17440 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17441     els_passthru_entry_t *els_entry)
17442 {
17443         uint32_t        *ptr32;
17444 
17445         /*
17446          * Construct command packet.
17447          */
17448         ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17449             (uint8_t)ELS_PASSTHRU_TYPE);
17450         ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17451             els_desc->n_port_handle);
17452         ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17453         ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17454             (uint32_t)0);
17455         ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17456             els_desc->els);
17457         ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17458             els_desc->d_id.b.al_pa);
17459         ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17460             els_desc->d_id.b.area);
17461         ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17462             els_desc->d_id.b.domain);
17463         ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17464             els_desc->s_id.b.al_pa);
17465         ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17466             els_desc->s_id.b.area);
17467         ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17468             els_desc->s_id.b.domain);
17469         ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17470             els_desc->control_flags);
17471         ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17472             els_desc->rsp_byte_count);
17473         ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17474             els_desc->cmd_byte_count);
17475         /* Load transmit data segments and count. */
17476         ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17477         ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17478         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17479         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17480         ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17481         ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17482         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17483         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17484         ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17485 }
17486 
17487 /*
17488  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17489  *                                in host memory.
17490  *
17491  * Input:       ha = adapter state pointer.
17492  *              srb = scsi request block
17493  * Returns:
17494  * Context:     Kernel context.
17495  */
17496 void
17497 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17498 {
17499         ls_code_t       els;
17500         fc_packet_t     *pkt;
17501         uint8_t         *ptr;
17502 
17503         pkt = srb->pkt;
17504 
17505         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17506             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17507 
17508         ptr = (uint8_t *)pkt->pkt_cmd;
17509 
17510         ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17511 }
17512 
17513 /*
17514  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17515  *                                in host memory.
17516  * Input:       ha = adapter state pointer.
17517  *              srb = scsi request block
17518  * Returns:
17519  * Context:     Kernel context.
17520  */
17521 void
17522 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17523 {
17524         ls_code_t       els;
17525         fc_packet_t     *pkt;
17526         uint8_t         *ptr;
17527 
17528         pkt = srb->pkt;
17529 
17530         ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17531             (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17532 
17533         ptr = (uint8_t *)pkt->pkt_resp;
17534         BIG_ENDIAN_32(&els);
17535         ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17536 }
17537 
17538 /*
17539  * ql_isp_els_handle_endian - els requests/responses must be in big endian
17540  *                            in host memory.
17541  * Input:       ha = adapter state pointer.
17542  *              ptr = els request/response buffer pointer.
17543  *              ls_code = els command code.
17544  * Returns:
17545  * Context:     Kernel context.
17546  */
17547 void
17548 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17549 {
17550         switch (ls_code) {
17551         case LA_ELS_PLOGI: {
17552                 BIG_ENDIAN_32(ptr);     /* Command Code */
17553                 ptr += 4;
17554                 BIG_ENDIAN_16(ptr);     /* FC-PH version */
17555                 ptr += 2;
17556                 BIG_ENDIAN_16(ptr);     /* b2b credit */
17557                 ptr += 2;
17558                 BIG_ENDIAN_16(ptr);     /* Cmn Feature flags */
17559                 ptr += 2;
17560                 BIG_ENDIAN_16(ptr);     /* Rcv data size */
17561                 ptr += 2;
17562                 BIG_ENDIAN_16(ptr);     /* Concurrent Seq */
17563                 ptr += 2;
17564                 BIG_ENDIAN_16(ptr);     /* Rel offset */
17565                 ptr += 2;
17566                 BIG_ENDIAN_32(ptr);     /* E_D_TOV */
17567                 ptr += 4;               /* Port Name */
17568                 ptr += 8;               /* Node Name */
17569                 ptr += 8;               /* Class 1 */
17570                 ptr += 16;              /* Class 2 */
17571                 ptr += 16;              /* Class 3 */
17572                 BIG_ENDIAN_16(ptr);     /* Service options */
17573                 ptr += 2;
17574                 BIG_ENDIAN_16(ptr);     /* Initiator control */
17575                 ptr += 2;
17576                 BIG_ENDIAN_16(ptr);     /* Recipient Control */
17577                 ptr += 2;
17578                 BIG_ENDIAN_16(ptr);     /* Rcv size */
17579                 ptr += 2;
17580                 BIG_ENDIAN_16(ptr);     /* Concurrent Seq */
17581                 ptr += 2;
17582                 BIG_ENDIAN_16(ptr);     /* N_Port e2e credit */
17583                 ptr += 2;
17584                 BIG_ENDIAN_16(ptr);     /* Open Seq/Exch */
17585                 break;
17586         }
17587         case LA_ELS_PRLI: {
17588                 BIG_ENDIAN_32(ptr);     /* Command Code/Page length */
17589                 ptr += 4;               /* Type */
17590                 ptr += 2;
17591                 BIG_ENDIAN_16(ptr);     /* Flags */
17592                 ptr += 2;
17593                 BIG_ENDIAN_32(ptr);     /* Originator Process associator  */
17594                 ptr += 4;
17595                 BIG_ENDIAN_32(ptr);     /* Responder Process associator */
17596                 ptr += 4;
17597                 BIG_ENDIAN_32(ptr);     /* Flags */
17598                 break;
17599         }
17600         default:
17601                 EL(ha, "can't handle els code %x\n", ls_code);
17602                 break;
17603         }
17604 }
17605 
17606 /*
17607  * ql_n_port_plogi
17608  *      In N port 2 N port topology where an N Port has logged in with the
17609  *      firmware because it has the N_Port login initiative, we send up
17610  *      a plogi by proxy which stimulates the login procedure to continue.
17611  *
17612  * Input:
17613  *      ha = adapter state pointer.
17614  * Returns:
17615  *
17616  * Context:
17617  *      Kernel context.
17618  */
17619 static int
17620 ql_n_port_plogi(ql_adapter_state_t *ha)
17621 {
17622         int             rval;
17623         ql_tgt_t        *tq;
17624         ql_head_t done_q = { NULL, NULL };
17625 
17626         rval = QL_SUCCESS;
17627 
17628         if (ha->topology & QL_N_PORT) {
17629                 /* if we're doing this the n_port_handle must be good */
17630                 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17631                         tq = ql_loop_id_to_queue(ha,
17632                             ha->n_port->n_port_handle);
17633                         if (tq != NULL) {
17634                                 (void) ql_send_plogi(ha, tq, &done_q);
17635                         } else {
17636                                 EL(ha, "n_port_handle = %x, tq = %x\n",
17637                                     ha->n_port->n_port_handle, tq);
17638                         }
17639                 } else {
17640                         EL(ha, "n_port_handle = %x, tq = %x\n",
17641                             ha->n_port->n_port_handle, tq);
17642                 }
17643                 if (done_q.first != NULL) {
17644                         ql_done(done_q.first);
17645                 }
17646         }
17647         return (rval);
17648 }
17649 
17650 /*
17651  * Compare two WWNs. The NAA is omitted for comparison.
17652  *
17653  * Note particularly that the indentation used in this
17654  * function  isn't according to Sun recommendations. It
17655  * is indented to make reading a bit easy.
17656  *
17657  * Return Values:
17658  *   if first == second return  0
17659  *   if first > second  return  1
17660  *   if first < second  return -1
17661  */
17662 int
17663 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17664 {
17665         la_wwn_t t1, t2;
17666         int rval;
17667 
17668         EL(ha, "WWPN=%08x%08x\n",
17669             BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17670         EL(ha, "WWPN=%08x%08x\n",
17671             BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17672         /*
17673          * Fibre Channel protocol is big endian, so compare
17674          * as big endian values
17675          */
17676         t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17677         t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17678 
17679         t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17680         t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17681 
17682         if (t1.i_wwn[0] == t2.i_wwn[0]) {
17683                 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17684                         rval = 0;
17685                 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17686                         rval = 1;
17687                 } else {
17688                         rval = -1;
17689                 }
17690         } else {
17691                 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17692                         rval = 1;
17693                 } else {
17694                         rval = -1;
17695                 }
17696         }
17697         return (rval);
17698 }
17699 
17700 /*
17701  * ql_wait_for_td_stop
17702  *      Wait for task daemon to stop running.  Internal command timeout
17703  *      is approximately 30 seconds, so it may help in some corner
17704  *      cases to wait that long
17705  *
17706  * Input:
17707  *      ha = adapter state pointer.
17708  *
17709  * Returns:
17710  *      DDI_SUCCESS or DDI_FAILURE.
17711  *
17712  * Context:
17713  *      Kernel context.
17714  */
17715 
17716 static int
17717 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17718 {
17719         int     rval = DDI_FAILURE;
17720         UINT16  wait_cnt;
17721 
17722         for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17723                 /* The task daemon clears the stop flag on exit. */
17724                 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17725                         if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17726                             ddi_in_panic()) {
17727                                 drv_usecwait(10000);
17728                         } else {
17729                                 delay(drv_usectohz(10000));
17730                         }
17731                 } else {
17732                         rval = DDI_SUCCESS;
17733                         break;
17734                 }
17735         }
17736         return (rval);
17737 }
17738 
17739 /*
17740  * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17741  *
17742  * Input:       Pointer to the adapter state structure.
17743  * Returns:     Success or Failure.
17744  * Context:     Kernel context.
17745  */
17746 int
17747 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17748 {
17749         int     rval = DDI_SUCCESS;
17750 
17751         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17752 
17753         ha->nvram_cache =
17754             (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17755             KM_SLEEP);
17756 
17757         if (ha->nvram_cache == NULL) {
17758                 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17759                     " descriptor", QL_NAME, ha->instance);
17760                 rval = DDI_FAILURE;
17761         } else {
17762                 if (CFG_IST(ha, CFG_CTRL_24258081)) {
17763                         ha->nvram_cache->size = sizeof (nvram_24xx_t);
17764                 } else {
17765                         ha->nvram_cache->size = sizeof (nvram_t);
17766                 }
17767                 ha->nvram_cache->cache =
17768                     (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17769                 if (ha->nvram_cache->cache == NULL) {
17770                         cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17771                             QL_NAME, ha->instance);
17772                         kmem_free(ha->nvram_cache,
17773                             sizeof (nvram_cache_desc_t));
17774                         ha->nvram_cache = 0;
17775                         rval = DDI_FAILURE;
17776                 } else {
17777                         mutex_init(&ha->nvram_cache->mutex, NULL,
17778                             MUTEX_DRIVER, NULL);
17779                         ha->nvram_cache->valid = 0;
17780                 }
17781         }
17782 
17783         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17784 
17785         return (rval);
17786 }
17787 
17788 /*
17789  * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17790  *
17791  * Input:       Pointer to the adapter state structure.
17792  * Returns:     Success or Failure.
17793  * Context:     Kernel context.
17794  */
17795 int
17796 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17797 {
17798         int     rval = DDI_SUCCESS;
17799 
17800         QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17801 
17802         if (ha->nvram_cache == NULL) {
17803                 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17804                     QL_NAME, ha->instance);
17805                 rval = DDI_FAILURE;
17806         } else {
17807                 if (ha->nvram_cache->cache != NULL) {
17808                         kmem_free(ha->nvram_cache->cache,
17809                             ha->nvram_cache->size);
17810                 }
17811                 mutex_destroy(&ha->nvram_cache->mutex);
17812                 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17813         }
17814 
17815         QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17816 
17817         return (rval);
17818 }
17819 
17820 /*
17821  * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17822  *
17823  * Input:       Pointer to the adapter state structure.
17824  * Returns:     void
17825  * Context:     Kernel context.
17826  */
17827 static void
17828 ql_process_idc_event(ql_adapter_state_t *ha)
17829 {
17830         int     rval;
17831 
17832         switch (ha->idc_mb[0]) {
17833         case MBA_IDC_NOTIFICATION:
17834                 /*
17835                  * The informational opcode (idc_mb[2]) can be a
17836                  * defined value or the mailbox command being executed
17837                  * on another function which stimulated this IDC message.
17838                  */
17839                 ADAPTER_STATE_LOCK(ha);
17840                 switch (ha->idc_mb[2]) {
17841                 case IDC_OPC_DRV_START:
17842                         if (ha->idc_flash_acc != 0) {
17843                                 ha->idc_flash_acc--;
17844                                 if (ha->idc_flash_acc == 0) {
17845                                         ha->idc_flash_acc_timer = 0;
17846                                         GLOBAL_HW_UNLOCK();
17847                                 }
17848                         }
17849                         if (ha->idc_restart_cnt != 0) {
17850                                 ha->idc_restart_cnt--;
17851                                 if (ha->idc_restart_cnt == 0) {
17852                                         ha->idc_restart_timer = 0;
17853                                         ADAPTER_STATE_UNLOCK(ha);
17854                                         TASK_DAEMON_LOCK(ha);
17855                                         ha->task_daemon_flags &= ~DRIVER_STALL;
17856                                         TASK_DAEMON_UNLOCK(ha);
17857                                         ql_restart_queues(ha);
17858                                 } else {
17859                                         ADAPTER_STATE_UNLOCK(ha);
17860                                 }
17861                         } else {
17862                                 ADAPTER_STATE_UNLOCK(ha);
17863                         }
17864                         break;
17865                 case IDC_OPC_FLASH_ACC:
17866                         ha->idc_flash_acc_timer = 30;
17867                         if (ha->idc_flash_acc == 0) {
17868                                 GLOBAL_HW_LOCK();
17869                         }
17870                         ha->idc_flash_acc++;
17871                         ADAPTER_STATE_UNLOCK(ha);
17872                         break;
17873                 case IDC_OPC_RESTART_MPI:
17874                         ha->idc_restart_timer = 30;
17875                         ha->idc_restart_cnt++;
17876                         ADAPTER_STATE_UNLOCK(ha);
17877                         TASK_DAEMON_LOCK(ha);
17878                         ha->task_daemon_flags |= DRIVER_STALL;
17879                         TASK_DAEMON_UNLOCK(ha);
17880                         break;
17881                 case IDC_OPC_PORT_RESET_MBC:
17882                 case IDC_OPC_SET_PORT_CONFIG_MBC:
17883                         ha->idc_restart_timer = 30;
17884                         ha->idc_restart_cnt++;
17885                         ADAPTER_STATE_UNLOCK(ha);
17886                         TASK_DAEMON_LOCK(ha);
17887                         ha->task_daemon_flags |= DRIVER_STALL;
17888                         TASK_DAEMON_UNLOCK(ha);
17889                         (void) ql_wait_outstanding(ha);
17890                         break;
17891                 default:
17892                         ADAPTER_STATE_UNLOCK(ha);
17893                         EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17894                             ha->idc_mb[2]);
17895                         break;
17896                 }
17897                 /*
17898                  * If there is a timeout value associated with this IDC
17899                  * notification then there is an implied requirement
17900                  * that we return an ACK.
17901                  */
17902                 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17903                         rval = ql_idc_ack(ha);
17904                         if (rval != QL_SUCCESS) {
17905                                 EL(ha, "idc_ack status=%xh %xh\n", rval,
17906                                     ha->idc_mb[2]);
17907                         }
17908                 }
17909                 break;
17910         case MBA_IDC_COMPLETE:
17911                 /*
17912                  * We don't ACK completions, only these require action.
17913                  */
17914                 switch (ha->idc_mb[2]) {
17915                 case IDC_OPC_PORT_RESET_MBC:
17916                 case IDC_OPC_SET_PORT_CONFIG_MBC:
17917                         ADAPTER_STATE_LOCK(ha);
17918                         if (ha->idc_restart_cnt != 0) {
17919                                 ha->idc_restart_cnt--;
17920                                 if (ha->idc_restart_cnt == 0) {
17921                                         ha->idc_restart_timer = 0;
17922                                         ADAPTER_STATE_UNLOCK(ha);
17923                                         TASK_DAEMON_LOCK(ha);
17924                                         ha->task_daemon_flags &= ~DRIVER_STALL;
17925                                         TASK_DAEMON_UNLOCK(ha);
17926                                         ql_restart_queues(ha);
17927                                 } else {
17928                                         ADAPTER_STATE_UNLOCK(ha);
17929                                 }
17930                         } else {
17931                                 ADAPTER_STATE_UNLOCK(ha);
17932                         }
17933                         break;
17934                 default:
17935                         break; /* Don't care... */
17936                 }
17937                 break;
17938         case MBA_IDC_TIME_EXTENDED:
17939                 QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17940                     "%xh\n", ha->instance, ha->idc_mb[2]);
17941                 break;
17942         default:
17943                 EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17944                     ha->idc_mb[2]);
17945                 ADAPTER_STATE_UNLOCK(ha);
17946                 break;
17947         }
17948 }