1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
  25  */
  26 
  27 /*
  28  * sf - Solaris Fibre Channel driver
  29  *
  30  * This module implements some of the Fibre Channel FC-4 layer, converting
  31  * from FC frames to SCSI and back.  (Note: no sequence management is done
  32  * here, though.)
  33  */
  34 
  35 #if defined(lint) && !defined(DEBUG)
  36 #define DEBUG   1
  37 #endif
  38 
  39 /*
  40  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
  41  * Need to use the ugly RAID LUN mappings in FCP Annex D
  42  * to prevent SCSA from barfing.  This *REALLY* needs to
  43  * be addressed by the standards committee.
  44  */
  45 #define RAID_LUNS       1
  46 
  47 #ifdef DEBUG
  48 static int sfdebug = 0;
  49 #include <sys/debug.h>
  50 
  51 #define SF_DEBUG(level, args) \
  52         if (sfdebug >= (level)) sf_log args
  53 #else
  54 #define SF_DEBUG(level, args)
  55 #endif
  56 
  57 static int sf_bus_config_debug = 0;
  58 
  59 /* Why do I have to do this? */
  60 #define offsetof(s, m)  (size_t)(&(((s *)0)->m))
  61 
  62 #include <sys/scsi/scsi.h>
  63 #include <sys/fc4/fcal.h>
  64 #include <sys/fc4/fcp.h>
  65 #include <sys/fc4/fcal_linkapp.h>
  66 #include <sys/socal_cq_defs.h>
  67 #include <sys/fc4/fcal_transport.h>
  68 #include <sys/fc4/fcio.h>
  69 #include <sys/scsi/adapters/sfvar.h>
  70 #include <sys/scsi/impl/scsi_reset_notify.h>
  71 #include <sys/stat.h>
  72 #include <sys/varargs.h>
  73 #include <sys/var.h>
  74 #include <sys/thread.h>
  75 #include <sys/proc.h>
  76 #include <sys/kstat.h>
  77 #include <sys/devctl.h>
  78 #include <sys/scsi/targets/ses.h>
  79 #include <sys/callb.h>
  80 
  81 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
  82 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
  83 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
  84 static void sf_softstate_unlink(struct sf *);
  85 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
  86     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
  87 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
  88     ddi_bus_config_op_t op, void *arg);
  89 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
  90     scsi_hba_tran_t *, struct scsi_device *);
  91 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
  92     scsi_hba_tran_t *, struct scsi_device *);
  93 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
  94     int, int, int);
  95 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
  96 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
  97     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
  98 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
  99 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
 100 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
 101 static int sf_scsi_reset_notify(struct scsi_address *, int,
 102     void (*)(caddr_t), caddr_t);
 103 static int sf_scsi_get_name(struct scsi_device *, char *, int);
 104 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
 105 static int sf_add_cr_pool(struct sf *);
 106 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
 107 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
 108 static void sf_crpool_free(struct sf *);
 109 static int sf_kmem_cache_constructor(void *, void *, int);
 110 static void sf_kmem_cache_destructor(void *, void *);
 111 static void sf_statec_callback(void *, int);
 112 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
 113 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
 114 static void sf_els_callback(struct fcal_packet *);
 115 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
 116 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
 117 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
 118     struct sf_target *);
 119 static void sf_reportlun_callback(struct fcal_packet *);
 120 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
 121     struct sf_target *);
 122 static void sf_inq_callback(struct fcal_packet *);
 123 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
 124     int, caddr_t *, caddr_t *);
 125 static void sf_els_free(struct fcal_packet *);
 126 static struct sf_target *sf_create_target(struct sf *,
 127     struct sf_els_hdr *, int, int64_t);
 128 #ifdef RAID_LUNS
 129 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
 130 #else
 131 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
 132 #endif
 133 static void sf_finish_init(struct sf *, int);
 134 static void sf_offline_target(struct sf *, struct sf_target *);
 135 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
 136 static int sf_create_props(dev_info_t *, struct sf_target *, int);
 137 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
 138 static int sf_getcap(struct scsi_address *, char *, int);
 139 static int sf_setcap(struct scsi_address *, char *, int, int);
 140 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
 141 static int sf_reset(struct scsi_address *, int);
 142 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
 143 static int sf_start(struct scsi_address *, struct scsi_pkt *);
 144 static int sf_start_internal(struct sf *, struct sf_pkt *);
 145 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
 146 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
 147 static int sf_dopoll(struct sf *, struct sf_pkt *);
 148 static void sf_cmd_callback(struct fcal_packet *);
 149 static void sf_throttle(struct sf *);
 150 static void sf_watch(void *);
 151 static void sf_throttle_start(struct sf *);
 152 static void sf_check_targets(struct sf *);
 153 static void sf_check_reset_delay(void *);
 154 static int sf_target_timeout(struct sf *, struct sf_pkt *);
 155 static void sf_force_lip(struct sf *);
 156 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
 157 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
 158 /*PRINTFLIKE3*/
 159 static void sf_log(struct sf *, int, const char *, ...);
 160 static int sf_kstat_update(kstat_t *, int);
 161 static int sf_open(dev_t *, int, int, cred_t *);
 162 static int sf_close(dev_t, int, int, cred_t *);
 163 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 164 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
 165 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
 166     ddi_eventcookie_t *);
 167 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
 168     ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
 169 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
 170 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
 171     ddi_eventcookie_t, void *);
 172 
 173 static void sf_hp_daemon(void *);
 174 
 175 /*
 176  * this is required to be able to supply a control node
 177  * where ioctls can be executed
 178  */
 179 struct cb_ops sf_cb_ops = {
 180         sf_open,                        /* open */
 181         sf_close,                       /* close */
 182         nodev,                          /* strategy */
 183         nodev,                          /* print */
 184         nodev,                          /* dump */
 185         nodev,                          /* read */
 186         nodev,                          /* write */
 187         sf_ioctl,                       /* ioctl */
 188         nodev,                          /* devmap */
 189         nodev,                          /* mmap */
 190         nodev,                          /* segmap */
 191         nochpoll,                       /* poll */
 192         ddi_prop_op,                    /* cb_prop_op */
 193         0,                              /* streamtab  */
 194         D_MP | D_NEW | D_HOTPLUG        /* driver flags */
 195 
 196 };
 197 
 198 /*
 199  * autoconfiguration routines.
 200  */
 201 static struct dev_ops sf_ops = {
 202         DEVO_REV,               /* devo_rev, */
 203         0,                      /* refcnt  */
 204         sf_info,                /* info */
 205         nulldev,                /* identify */
 206         nulldev,                /* probe */
 207         sf_attach,              /* attach */
 208         sf_detach,              /* detach */
 209         nodev,                  /* reset */
 210         &sf_cb_ops,         /* driver operations */
 211         NULL,                   /* bus operations */
 212         NULL,                   /* power management */
 213         ddi_quiesce_not_supported,      /* devo_quiesce */
 214 };
 215 
 216 #define SF_NAME "FC-AL FCP Nexus Driver"        /* Name of the module. */
 217 static  char    sf_version[] = "1.72 08/19/2008"; /* version of the module */
 218 
 219 static struct modldrv modldrv = {
 220         &mod_driverops, /* Type of module. This one is a driver */
 221         SF_NAME,
 222         &sf_ops,    /* driver ops */
 223 };
 224 
 225 static struct modlinkage modlinkage = {
 226         MODREV_1, (void *)&modldrv, NULL
 227 };
 228 
 229 /* XXXXXX The following is here to handle broken targets -- remove it later */
 230 static int sf_reportlun_forever = 0;
 231 /* XXXXXX */
 232 static int sf_lip_on_plogo = 0;
 233 static int sf_els_retries = SF_ELS_RETRIES;
 234 static struct sf *sf_head = NULL;
 235 static int sf_target_scan_cnt = 4;
 236 static int sf_pkt_scan_cnt = 5;
 237 static int sf_pool_scan_cnt = 1800;
 238 static void *sf_state = NULL;
 239 static int sf_watchdog_init = 0;
 240 static int sf_watchdog_time = 0;
 241 static int sf_watchdog_timeout = 1;
 242 static int sf_watchdog_tick;
 243 static int sf_watch_running = 0;
 244 static timeout_id_t sf_watchdog_id;
 245 static timeout_id_t sf_reset_timeout_id;
 246 static int sf_max_targets = SF_MAX_TARGETS;
 247 static kmutex_t sf_global_mutex;
 248 static int sf_core = 0;
 249 int *sf_token = NULL; /* Must not be static or lint complains. */
 250 static kcondvar_t sf_watch_cv;
 251 extern pri_t minclsyspri;
 252 static ddi_eventcookie_t        sf_insert_eid;
 253 static ddi_eventcookie_t        sf_remove_eid;
 254 
 255 static ndi_event_definition_t   sf_event_defs[] = {
 256 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
 257 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
 258 };
 259 
 260 #define SF_N_NDI_EVENTS \
 261         (sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
 262 
 263 #ifdef DEBUG
 264 static int sf_lip_flag = 1;             /* bool: to allow LIPs */
 265 static int sf_reset_flag = 1;           /* bool: to allow reset after LIP */
 266 static int sf_abort_flag = 0;           /* bool: to do just one abort */
 267 #endif
 268 
 269 extern int64_t ddi_get_lbolt64(void);
 270 
 271 /*
 272  * for converting between target number (switch) and hard address/AL_PA
 273  */
 274 static uchar_t sf_switch_to_alpa[] = {
 275         0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
 276         0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
 277         0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
 278         0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
 279         0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
 280         0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
 281         0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
 282         0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
 283         0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
 284         0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
 285         0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 286         0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
 287         0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
 288 };
 289 
 290 static uchar_t sf_alpa_to_switch[] = {
 291         0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
 292         0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
 293         0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
 294         0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
 295         0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
 296         0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
 297         0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
 298         0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
 299         0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
 300         0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
 301         0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
 302         0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
 303         0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
 304         0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
 305         0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
 306         0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
 307         0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
 308         0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
 309         0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
 310         0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
 311         0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
 312         0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
 313         0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
 314         0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 315 };
 316 
 317 /*
 318  * these macros call the proper transport-layer function given
 319  * a particular transport
 320  */
 321 #define soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
 322 #define soc_transport_poll(a, b, c, d)\
 323         (*a->fcal_ops->fcal_transport_poll)(b, c, d)
 324 #define soc_get_lilp_map(a, b, c, d, e)\
 325         (*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
 326 #define soc_force_lip(a, b, c, d, e)\
 327         (*a->fcal_ops->fcal_force_lip)(b, c, d, e)
 328 #define soc_abort(a, b, c, d, e)\
 329         (*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
 330 #define soc_force_reset(a, b, c, d)\
 331         (*a->fcal_ops->fcal_force_reset)(b, c, d)
 332 #define soc_add_ulp(a, b, c, d, e, f, g, h)\
 333         (*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
 334 #define soc_remove_ulp(a, b, c, d, e)\
 335         (*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
 336 #define soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
 337 
 338 
 339 /* power management property defines (should be in a common include file?) */
 340 #define PM_HARDWARE_STATE_PROP          "pm-hardware-state"
 341 #define PM_NEEDS_SUSPEND_RESUME         "needs-suspend-resume"
 342 
 343 
 344 /* node properties */
 345 #define NODE_WWN_PROP                   "node-wwn"
 346 #define PORT_WWN_PROP                   "port-wwn"
 347 #define LIP_CNT_PROP                    "lip-count"
 348 #define TARGET_PROP                     "target"
 349 #define LUN_PROP                        "lun"
 350 
 351 
 352 /*
 353  * initialize this driver and install this module
 354  */
 355 int
 356 _init(void)
 357 {
 358         int     i;
 359 
 360         i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
 361             SF_INIT_ITEMS);
 362         if (i != 0)
 363                 return (i);
 364 
 365         if ((i = scsi_hba_init(&modlinkage)) != 0) {
 366                 ddi_soft_state_fini(&sf_state);
 367                 return (i);
 368         }
 369 
 370         mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
 371         sf_watch_running = 0;
 372         cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
 373 
 374         if ((i = mod_install(&modlinkage)) != 0) {
 375                 mutex_destroy(&sf_global_mutex);
 376                 cv_destroy(&sf_watch_cv);
 377                 scsi_hba_fini(&modlinkage);
 378                 ddi_soft_state_fini(&sf_state);
 379                 return (i);
 380         }
 381 
 382         return (i);
 383 }
 384 
 385 
 386 /*
 387  * remove this driver module from the system
 388  */
 389 int
 390 _fini(void)
 391 {
 392         int     i;
 393 
 394         if ((i = mod_remove(&modlinkage)) == 0) {
 395                 scsi_hba_fini(&modlinkage);
 396                 mutex_destroy(&sf_global_mutex);
 397                 cv_destroy(&sf_watch_cv);
 398                 ddi_soft_state_fini(&sf_state);
 399         }
 400         return (i);
 401 }
 402 
 403 
 404 int
 405 _info(struct modinfo *modinfop)
 406 {
 407         return (mod_info(&modlinkage, modinfop));
 408 }
 409 
 410 /*
 411  * Given the device number return the devinfo pointer or instance
 412  */
 413 /*ARGSUSED*/
 414 static int
 415 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
 416 {
 417         int             instance = SF_MINOR2INST(getminor((dev_t)arg));
 418         struct sf       *sf;
 419 
 420         switch (infocmd) {
 421         case DDI_INFO_DEVT2DEVINFO:
 422                 sf = ddi_get_soft_state(sf_state, instance);
 423                 if (sf != NULL)
 424                         *result = sf->sf_dip;
 425                 else {
 426                         *result = NULL;
 427                         return (DDI_FAILURE);
 428                 }
 429                 break;
 430 
 431         case DDI_INFO_DEVT2INSTANCE:
 432                 *result = (void *)(uintptr_t)instance;
 433                 break;
 434         default:
 435                 return (DDI_FAILURE);
 436         }
 437         return (DDI_SUCCESS);
 438 }
 439 
 440 /*
 441  * either attach or resume this driver
 442  */
 443 static int
 444 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 445 {
 446         int instance;
 447         int mutex_initted = FALSE;
 448         uint_t ccount;
 449         size_t i, real_size;
 450         struct fcal_transport *handle;
 451         char buf[64];
 452         struct sf *sf, *tsf;
 453         scsi_hba_tran_t *tran = NULL;
 454         int     handle_bound = FALSE;
 455         kthread_t *tp;
 456 
 457 
 458         switch ((int)cmd) {
 459 
 460         case DDI_RESUME:
 461 
 462                 /*
 463                  * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
 464                  * so time to undo that and get going again by forcing a
 465                  * lip
 466                  */
 467 
 468                 instance = ddi_get_instance(dip);
 469 
 470                 sf = ddi_get_soft_state(sf_state, instance);
 471                 SF_DEBUG(2, (sf, CE_CONT,
 472                     "sf_attach: DDI_RESUME for sf%d\n", instance));
 473                 if (sf == NULL) {
 474                         cmn_err(CE_WARN, "sf%d: bad soft state", instance);
 475                         return (DDI_FAILURE);
 476                 }
 477 
 478                 /*
 479                  * clear suspended flag so that normal operations can resume
 480                  */
 481                 mutex_enter(&sf->sf_mutex);
 482                 sf->sf_state &= ~SF_STATE_SUSPENDED;
 483                 mutex_exit(&sf->sf_mutex);
 484 
 485                 /*
 486                  * force a login by setting our state to offline
 487                  */
 488                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 489                 sf->sf_state = SF_STATE_OFFLINE;
 490 
 491                 /*
 492                  * call transport routine to register state change and
 493                  * ELS callback routines (to register us as a ULP)
 494                  */
 495                 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
 496                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
 497                     sf_statec_callback, sf_unsol_els_callback, NULL, sf);
 498 
 499                 /*
 500                  * call transport routine to force loop initialization
 501                  */
 502                 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
 503                     sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
 504 
 505                 /*
 506                  * increment watchdog init flag, setting watchdog timeout
 507                  * if we are the first (since somebody has to do it)
 508                  */
 509                 mutex_enter(&sf_global_mutex);
 510                 if (!sf_watchdog_init++) {
 511                         mutex_exit(&sf_global_mutex);
 512                         sf_watchdog_id = timeout(sf_watch,
 513                             (caddr_t)0, sf_watchdog_tick);
 514                 } else {
 515                         mutex_exit(&sf_global_mutex);
 516                 }
 517 
 518                 return (DDI_SUCCESS);
 519 
 520         case DDI_ATTACH:
 521 
 522                 /*
 523                  * this instance attaching for the first time
 524                  */
 525 
 526                 instance = ddi_get_instance(dip);
 527 
 528                 if (ddi_soft_state_zalloc(sf_state, instance) !=
 529                     DDI_SUCCESS) {
 530                         cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
 531                             instance);
 532                         return (DDI_FAILURE);
 533                 }
 534 
 535                 sf = ddi_get_soft_state(sf_state, instance);
 536                 SF_DEBUG(4, (sf, CE_CONT,
 537                     "sf_attach: DDI_ATTACH for sf%d\n", instance));
 538                 if (sf == NULL) {
 539                         /* this shouldn't happen since we just allocated it */
 540                         cmn_err(CE_WARN, "sf%d: bad soft state", instance);
 541                         return (DDI_FAILURE);
 542                 }
 543 
 544                 /*
 545                  * from this point on, if there's an error, we must de-allocate
 546                  * soft state before returning DDI_FAILURE
 547                  */
 548 
 549                 if ((handle = ddi_get_parent_data(dip)) == NULL) {
 550                         cmn_err(CE_WARN,
 551                             "sf%d: failed to obtain transport handle",
 552                             instance);
 553                         goto fail;
 554                 }
 555 
 556                 /* fill in our soft state structure */
 557                 sf->sf_dip = dip;
 558                 sf->sf_state = SF_STATE_INIT;
 559                 sf->sf_throttle = handle->fcal_cmdmax;
 560                 sf->sf_sochandle = handle;
 561                 sf->sf_socp = handle->fcal_handle;
 562                 sf->sf_check_n_close = 0;
 563 
 564                 /* create a command/response buffer pool for this instance */
 565                 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
 566                         cmn_err(CE_WARN,
 567                             "sf%d: failed to allocate command/response pool",
 568                             instance);
 569                         goto fail;
 570                 }
 571 
 572                 /* create a a cache for this instance */
 573                 (void) sprintf(buf, "sf%d_cache", instance);
 574                 sf->sf_pkt_cache = kmem_cache_create(buf,
 575                     sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
 576                     scsi_pkt_size(), 8,
 577                     sf_kmem_cache_constructor, sf_kmem_cache_destructor,
 578                     NULL, NULL, NULL, 0);
 579                 if (sf->sf_pkt_cache == NULL) {
 580                         cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
 581                             instance);
 582                         goto fail;
 583                 }
 584 
 585                 /* set up a handle and allocate memory for DMA */
 586                 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
 587                     fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
 588                     sf_lilp_dmahandle) != DDI_SUCCESS) {
 589                         cmn_err(CE_WARN,
 590                             "sf%d: failed to allocate dma handle for lilp map",
 591                             instance);
 592                         goto fail;
 593                 }
 594                 i = sizeof (struct fcal_lilp_map) + 1;
 595                 if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
 596                     i, sf->sf_sochandle->
 597                     fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
 598                     (caddr_t *)&sf->sf_lilp_map, &real_size,
 599                     &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
 600                         cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
 601                             instance);
 602                         goto fail;
 603                 }
 604                 if (real_size < i) {
 605                         /* no error message ??? */
 606                         goto fail;              /* trouble allocating memory */
 607                 }
 608 
 609                 /*
 610                  * set up the address for the DMA transfers (getting a cookie)
 611                  */
 612                 if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
 613                     (caddr_t)sf->sf_lilp_map, real_size,
 614                     DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
 615                     &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
 616                         cmn_err(CE_WARN,
 617                             "sf%d: failed to bind dma handle for lilp map",
 618                             instance);
 619                         goto fail;
 620                 }
 621                 handle_bound = TRUE;
 622                 /* ensure only one cookie was allocated */
 623                 if (ccount != 1) {
 624                         goto fail;
 625                 }
 626 
 627                 /* ensure LILP map and DMA cookie addresses are even?? */
 628                 sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
 629                     sf_lilp_map + 1) & ~1);
 630                 sf->sf_lilp_dmacookie.dmac_address = (sf->
 631                     sf_lilp_dmacookie.dmac_address + 1) & ~1;
 632 
 633                 /* set up all of our mutexes and condition variables */
 634                 mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
 635                 mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
 636                 mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
 637                 mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
 638                 cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
 639                 cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
 640 
 641                 mutex_initted = TRUE;
 642 
 643                 /* create our devctl minor node */
 644                 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
 645                     SF_INST2DEVCTL_MINOR(instance),
 646                     DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
 647                         cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
 648                             " for devctl", instance);
 649                         goto fail;
 650                 }
 651 
 652                 /* create fc minor node */
 653                 if (ddi_create_minor_node(dip, "fc", S_IFCHR,
 654                     SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
 655                     0) != DDI_SUCCESS) {
 656                         cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
 657                             " for fc", instance);
 658                         goto fail;
 659                 }
 660                 /* allocate a SCSI transport structure */
 661                 tran = scsi_hba_tran_alloc(dip, 0);
 662                 if (tran == NULL) {
 663                         /* remove all minor nodes created */
 664                         ddi_remove_minor_node(dip, NULL);
 665                         cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
 666                             instance);
 667                         goto fail;
 668                 }
 669 
 670                 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
 671                 scsi_size_clean(dip);           /* SCSI_SIZE_CLEAN_VERIFY ok */
 672 
 673                 /* save ptr to new transport structure and fill it in */
 674                 sf->sf_tran = tran;
 675 
 676                 tran->tran_hba_private               = sf;
 677                 tran->tran_tgt_private               = NULL;
 678                 tran->tran_tgt_init          = sf_scsi_tgt_init;
 679                 tran->tran_tgt_probe         = NULL;
 680                 tran->tran_tgt_free          = sf_scsi_tgt_free;
 681 
 682                 tran->tran_start             = sf_start;
 683                 tran->tran_abort             = sf_abort;
 684                 tran->tran_reset             = sf_reset;
 685                 tran->tran_getcap            = sf_getcap;
 686                 tran->tran_setcap            = sf_setcap;
 687                 tran->tran_init_pkt          = sf_scsi_init_pkt;
 688                 tran->tran_destroy_pkt               = sf_scsi_destroy_pkt;
 689                 tran->tran_dmafree           = sf_scsi_dmafree;
 690                 tran->tran_sync_pkt          = sf_scsi_sync_pkt;
 691                 tran->tran_reset_notify              = sf_scsi_reset_notify;
 692 
 693                 /*
 694                  * register event notification routines with scsa
 695                  */
 696                 tran->tran_get_eventcookie   = sf_bus_get_eventcookie;
 697                 tran->tran_add_eventcall     = sf_bus_add_eventcall;
 698                 tran->tran_remove_eventcall  = sf_bus_remove_eventcall;
 699                 tran->tran_post_event                = sf_bus_post_event;
 700 
 701                 /*
 702                  * register bus configure/unconfigure
 703                  */
 704                 tran->tran_bus_config                = sf_scsi_bus_config;
 705                 tran->tran_bus_unconfig              = sf_scsi_bus_unconfig;
 706 
 707                 /*
 708                  * allocate an ndi event handle
 709                  */
 710                 sf->sf_event_defs = (ndi_event_definition_t *)
 711                     kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
 712 
 713                 bcopy(sf_event_defs, sf->sf_event_defs,
 714                     sizeof (sf_event_defs));
 715 
 716                 (void) ndi_event_alloc_hdl(dip, NULL,
 717                     &sf->sf_event_hdl, NDI_SLEEP);
 718 
 719                 sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
 720                 sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
 721                 sf->sf_events.ndi_event_defs = sf->sf_event_defs;
 722 
 723                 if (ndi_event_bind_set(sf->sf_event_hdl,
 724                     &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
 725                         goto fail;
 726                 }
 727 
 728                 tran->tran_get_name          = sf_scsi_get_name;
 729                 tran->tran_get_bus_addr              = sf_scsi_get_bus_addr;
 730 
 731                 /* setup and attach SCSI hba transport */
 732                 if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
 733                     fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
 734                         cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
 735                             instance);
 736                         goto fail;
 737                 }
 738 
 739                 /* set up kstats */
 740                 if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
 741                     "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
 742                     KSTAT_FLAG_VIRTUAL)) == NULL) {
 743                         cmn_err(CE_WARN, "sf%d: failed to create kstat",
 744                             instance);
 745                 } else {
 746                         sf->sf_stats.version = 2;
 747                         (void) sprintf(sf->sf_stats.drvr_name,
 748                         "%s: %s", SF_NAME, sf_version);
 749                         sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
 750                         sf->sf_ksp->ks_private = sf;
 751                         sf->sf_ksp->ks_update = sf_kstat_update;
 752                         kstat_install(sf->sf_ksp);
 753                 }
 754 
 755                 /* create the hotplug thread */
 756                 mutex_enter(&sf->sf_hp_daemon_mutex);
 757                 tp = thread_create(NULL, 0,
 758                     (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
 759                 sf->sf_hp_tid = tp->t_did;
 760                 mutex_exit(&sf->sf_hp_daemon_mutex);
 761 
 762                 /* add this soft state instance to the head of the list */
 763                 mutex_enter(&sf_global_mutex);
 764                 sf->sf_next = sf_head;
 765                 tsf = sf_head;
 766                 sf_head = sf;
 767 
 768                 /*
 769                  * find entry in list that has the same FC-AL handle (if any)
 770                  */
 771                 while (tsf != NULL) {
 772                         if (tsf->sf_socp == sf->sf_socp) {
 773                                 break;          /* found matching entry */
 774                         }
 775                         tsf = tsf->sf_next;
 776                 }
 777 
 778                 if (tsf != NULL) {
 779                         /* if we found a matching entry keep track of it */
 780                         sf->sf_sibling = tsf;
 781                 }
 782 
 783                 /*
 784                  * increment watchdog init flag, setting watchdog timeout
 785                  * if we are the first (since somebody has to do it)
 786                  */
 787                 if (!sf_watchdog_init++) {
 788                         mutex_exit(&sf_global_mutex);
 789                         sf_watchdog_tick = drv_sectohz(sf_watchdog_timeout);
 790                         sf_watchdog_id = timeout(sf_watch,
 791                             NULL, sf_watchdog_tick);
 792                 } else {
 793                         mutex_exit(&sf_global_mutex);
 794                 }
 795 
 796                 if (tsf != NULL) {
 797                         /*
 798                          * set up matching entry to be our sibling
 799                          */
 800                         mutex_enter(&tsf->sf_mutex);
 801                         tsf->sf_sibling = sf;
 802                         mutex_exit(&tsf->sf_mutex);
 803                 }
 804 
 805                 /*
 806                  * create this property so that PM code knows we want
 807                  * to be suspended at PM time
 808                  */
 809                 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
 810                     PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
 811 
 812                 /* log the fact that we have a new device */
 813                 ddi_report_dev(dip);
 814 
 815                 /*
 816                  * force a login by setting our state to offline
 817                  */
 818                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 819                 sf->sf_state = SF_STATE_OFFLINE;
 820 
 821                 /*
 822                  * call transport routine to register state change and
 823                  * ELS callback routines (to register us as a ULP)
 824                  */
 825                 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
 826                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
 827                     sf_statec_callback, sf_unsol_els_callback, NULL, sf);
 828 
 829                 /*
 830                  * call transport routine to force loop initialization
 831                  */
 832                 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
 833                     sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
 834                 sf->sf_reset_time = ddi_get_lbolt64();
 835                 return (DDI_SUCCESS);
 836 
 837         default:
 838                 return (DDI_FAILURE);
 839         }
 840 
 841 fail:
 842         cmn_err(CE_WARN, "sf%d: failed to attach", instance);
 843 
 844         /*
 845          * Unbind and free event set
 846          */
 847         if (sf->sf_event_hdl) {
 848                 (void) ndi_event_unbind_set(sf->sf_event_hdl,
 849                     &sf->sf_events, NDI_SLEEP);
 850                 (void) ndi_event_free_hdl(sf->sf_event_hdl);
 851         }
 852 
 853         if (sf->sf_event_defs) {
 854                 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
 855         }
 856 
 857         if (sf->sf_tran != NULL) {
 858                 scsi_hba_tran_free(sf->sf_tran);
 859         }
 860         while (sf->sf_cr_pool != NULL) {
 861                 sf_crpool_free(sf);
 862         }
 863         if (sf->sf_lilp_dmahandle != NULL) {
 864                 if (handle_bound) {
 865                         (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
 866                 }
 867                 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
 868         }
 869         if (sf->sf_pkt_cache != NULL) {
 870                 kmem_cache_destroy(sf->sf_pkt_cache);
 871         }
 872         if (sf->sf_lilp_map != NULL) {
 873                 ddi_dma_mem_free(&sf->sf_lilp_acchandle);
 874         }
 875         if (sf->sf_ksp != NULL) {
 876                 kstat_delete(sf->sf_ksp);
 877         }
 878         if (mutex_initted) {
 879                 mutex_destroy(&sf->sf_mutex);
 880                 mutex_destroy(&sf->sf_cmd_mutex);
 881                 mutex_destroy(&sf->sf_cr_mutex);
 882                 mutex_destroy(&sf->sf_hp_daemon_mutex);
 883                 cv_destroy(&sf->sf_cr_cv);
 884                 cv_destroy(&sf->sf_hp_daemon_cv);
 885         }
 886         mutex_enter(&sf_global_mutex);
 887 
 888         /*
 889          * kill off the watchdog if we are the last instance
 890          */
 891         if (!--sf_watchdog_init) {
 892                 timeout_id_t tid = sf_watchdog_id;
 893                 mutex_exit(&sf_global_mutex);
 894                 (void) untimeout(tid);
 895         } else {
 896                 mutex_exit(&sf_global_mutex);
 897         }
 898 
 899         ddi_soft_state_free(sf_state, instance);
 900 
 901         if (tran != NULL) {
 902                 /* remove all minor nodes */
 903                 ddi_remove_minor_node(dip, NULL);
 904         }
 905 
 906         return (DDI_FAILURE);
 907 }
 908 
 909 
 910 /* ARGSUSED */
 911 static int
 912 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 913 {
 914         struct sf               *sf;
 915         int                     instance;
 916         int                     i;
 917         struct sf_target        *target;
 918         timeout_id_t            tid;
 919 
 920 
 921 
 922         /* NO OTHER THREADS ARE RUNNING */
 923 
 924         instance = ddi_get_instance(dip);
 925 
 926         if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
 927                 cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
 928                 return (DDI_FAILURE);
 929         }
 930 
 931         switch (cmd) {
 932 
 933         case DDI_SUSPEND:
 934                 /*
 935                  * suspend our instance
 936                  */
 937 
 938                 SF_DEBUG(2, (sf, CE_CONT,
 939                     "sf_detach: DDI_SUSPEND for sf%d\n", instance));
 940                 /*
 941                  * There is a race condition in socal where while doing
 942                  * callbacks if a ULP removes it self from the callback list
 943                  * the for loop in socal may panic as cblist is junk and
 944                  * while trying to get cblist->next the system will panic.
 945                  */
 946 
 947                 /* call transport to remove our unregister our callbacks */
 948                 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
 949                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
 950 
 951                 /*
 952                  * begin process of clearing outstanding commands
 953                  * by issuing a lip
 954                  */
 955                 sf_force_lip(sf);
 956 
 957                 /*
 958                  * toggle the device OFFLINE in order to cause
 959                  * outstanding commands to drain
 960                  */
 961                 mutex_enter(&sf->sf_mutex);
 962                 sf->sf_lip_cnt++;
 963                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 964                 sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
 965                 for (i = 0; i < sf_max_targets; i++) {
 966                         target = sf->sf_targets[i];
 967                         if (target != NULL) {
 968                                 struct sf_target *ntarget;
 969 
 970                                 mutex_enter(&target->sft_mutex);
 971                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
 972                                         target->sft_state |=
 973                                             (SF_TARGET_BUSY | SF_TARGET_MARK);
 974                                 }
 975                                 /* do this for all LUNs as well */
 976                                 for (ntarget = target->sft_next_lun;
 977                                     ntarget;
 978                                     ntarget = ntarget->sft_next_lun) {
 979                                         mutex_enter(&ntarget->sft_mutex);
 980                                         if (!(ntarget->sft_state &
 981                                             SF_TARGET_OFFLINE)) {
 982                                                 ntarget->sft_state |=
 983                                                     (SF_TARGET_BUSY |
 984                                                     SF_TARGET_MARK);
 985                                         }
 986                                         mutex_exit(&ntarget->sft_mutex);
 987                                 }
 988                                 mutex_exit(&target->sft_mutex);
 989                         }
 990                 }
 991                 mutex_exit(&sf->sf_mutex);
 992                 mutex_enter(&sf_global_mutex);
 993 
 994                 /*
 995                  * kill off the watchdog if we are the last instance
 996                  */
 997                 if (!--sf_watchdog_init) {
 998                         tid = sf_watchdog_id;
 999                         mutex_exit(&sf_global_mutex);
1000                         (void) untimeout(tid);
1001                 } else {
1002                         mutex_exit(&sf_global_mutex);
1003                 }
1004 
1005                 return (DDI_SUCCESS);
1006 
1007         case DDI_DETACH:
1008                 /*
1009                  * detach this instance
1010                  */
1011 
1012                 SF_DEBUG(2, (sf, CE_CONT,
1013                     "sf_detach: DDI_DETACH for sf%d\n", instance));
1014 
1015                 /* remove this "sf" from the list of sf softstates */
1016                 sf_softstate_unlink(sf);
1017 
1018                 /*
1019                  * prior to taking any DDI_DETACH actions, toggle the
1020                  * device OFFLINE in order to cause outstanding
1021                  * commands to drain
1022                  */
1023                 mutex_enter(&sf->sf_mutex);
1024                 sf->sf_lip_cnt++;
1025                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1026                 sf->sf_state = SF_STATE_OFFLINE;
1027                 for (i = 0; i < sf_max_targets; i++) {
1028                         target = sf->sf_targets[i];
1029                         if (target != NULL) {
1030                                 struct sf_target *ntarget;
1031 
1032                                 mutex_enter(&target->sft_mutex);
1033                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1034                                         target->sft_state |=
1035                                             (SF_TARGET_BUSY | SF_TARGET_MARK);
1036                                 }
1037                                 for (ntarget = target->sft_next_lun;
1038                                     ntarget;
1039                                     ntarget = ntarget->sft_next_lun) {
1040                                         mutex_enter(&ntarget->sft_mutex);
1041                                         if (!(ntarget->sft_state &
1042                                             SF_TARGET_OFFLINE)) {
1043                                                 ntarget->sft_state |=
1044                                                     (SF_TARGET_BUSY |
1045                                                     SF_TARGET_MARK);
1046                                         }
1047                                         mutex_exit(&ntarget->sft_mutex);
1048                                 }
1049                                 mutex_exit(&target->sft_mutex);
1050                         }
1051                 }
1052                 mutex_exit(&sf->sf_mutex);
1053 
1054                 /* call transport to remove and unregister our callbacks */
1055                 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1056                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1057 
1058                 /*
1059                  * kill off the watchdog if we are the last instance
1060                  */
1061                 mutex_enter(&sf_global_mutex);
1062                 if (!--sf_watchdog_init) {
1063                         tid = sf_watchdog_id;
1064                         mutex_exit(&sf_global_mutex);
1065                         (void) untimeout(tid);
1066                 } else {
1067                         mutex_exit(&sf_global_mutex);
1068                 }
1069 
1070                 /* signal sf_hp_daemon() to exit and wait for exit */
1071                 mutex_enter(&sf->sf_hp_daemon_mutex);
1072                 ASSERT(sf->sf_hp_tid);
1073                 sf->sf_hp_exit = 1;          /* flag exit */
1074                 cv_signal(&sf->sf_hp_daemon_cv);
1075                 mutex_exit(&sf->sf_hp_daemon_mutex);
1076                 thread_join(sf->sf_hp_tid);  /* wait for hotplug to exit */
1077 
1078                 /*
1079                  * Unbind and free event set
1080                  */
1081                 if (sf->sf_event_hdl) {
1082                         (void) ndi_event_unbind_set(sf->sf_event_hdl,
1083                             &sf->sf_events, NDI_SLEEP);
1084                         (void) ndi_event_free_hdl(sf->sf_event_hdl);
1085                 }
1086 
1087                 if (sf->sf_event_defs) {
1088                         kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1089                 }
1090 
1091                 /* detach this instance of the HBA driver */
1092                 (void) scsi_hba_detach(dip);
1093                 scsi_hba_tran_free(sf->sf_tran);
1094 
1095                 /* deallocate/unbind DMA handle for lilp map */
1096                 if (sf->sf_lilp_map != NULL) {
1097                         (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1098                         if (sf->sf_lilp_dmahandle != NULL) {
1099                                 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1100                         }
1101                         ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1102                 }
1103 
1104                 /*
1105                  * the kmem cache must be destroyed before free'ing
1106                  * up the crpools
1107                  *
1108                  * our finagle of "ntot" and "nfree"
1109                  * causes an ASSERT failure in "sf_cr_free()"
1110                  * if the kmem cache is free'd after invoking
1111                  * "sf_crpool_free()".
1112                  */
1113                 kmem_cache_destroy(sf->sf_pkt_cache);
1114 
1115                 SF_DEBUG(2, (sf, CE_CONT,
1116                     "sf_detach: sf_crpool_free() for instance 0x%x\n",
1117                     instance));
1118                 while (sf->sf_cr_pool != NULL) {
1119                         /*
1120                          * set ntot to nfree for this particular entry
1121                          *
1122                          * this causes sf_crpool_free() to update
1123                          * the cr_pool list when deallocating this entry
1124                          */
1125                         sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1126                         sf_crpool_free(sf);
1127                 }
1128 
1129                 /*
1130                  * now that the cr_pool's are gone it's safe
1131                  * to destroy all softstate mutex's and cv's
1132                  */
1133                 mutex_destroy(&sf->sf_mutex);
1134                 mutex_destroy(&sf->sf_cmd_mutex);
1135                 mutex_destroy(&sf->sf_cr_mutex);
1136                 mutex_destroy(&sf->sf_hp_daemon_mutex);
1137                 cv_destroy(&sf->sf_cr_cv);
1138                 cv_destroy(&sf->sf_hp_daemon_cv);
1139 
1140                 /* remove all minor nodes from the device tree */
1141                 ddi_remove_minor_node(dip, NULL);
1142 
1143                 /* remove properties created during attach() */
1144                 ddi_prop_remove_all(dip);
1145 
1146                 /* remove kstat's if present */
1147                 if (sf->sf_ksp != NULL) {
1148                         kstat_delete(sf->sf_ksp);
1149                 }
1150 
1151                 SF_DEBUG(2, (sf, CE_CONT,
1152                     "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1153                     instance));
1154                 ddi_soft_state_free(sf_state, instance);
1155                 return (DDI_SUCCESS);
1156 
1157         default:
1158                 SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1159                     instance, (int)cmd));
1160                 return (DDI_FAILURE);
1161         }
1162 }
1163 
1164 
1165 /*
1166  * sf_softstate_unlink() - remove an sf instance from the list of softstates
1167  */
1168 static void
1169 sf_softstate_unlink(struct sf *sf)
1170 {
1171         struct sf       *sf_ptr;
1172         struct sf       *sf_found_sibling;
1173         struct sf       *sf_reposition = NULL;
1174 
1175 
1176         mutex_enter(&sf_global_mutex);
1177         while (sf_watch_running) {
1178                 /* Busy working the list -- wait */
1179                 cv_wait(&sf_watch_cv, &sf_global_mutex);
1180         }
1181         if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1182                 /*
1183                  * we have a sibling so NULL out its reference to us
1184                  */
1185                 mutex_enter(&sf_found_sibling->sf_mutex);
1186                 sf_found_sibling->sf_sibling = NULL;
1187                 mutex_exit(&sf_found_sibling->sf_mutex);
1188         }
1189 
1190         /* remove our instance from the global list */
1191         if (sf == sf_head) {
1192                 /* we were at at head of the list */
1193                 sf_head = sf->sf_next;
1194         } else {
1195                 /* find us in the list */
1196                 for (sf_ptr = sf_head;
1197                     sf_ptr != NULL;
1198                     sf_ptr = sf_ptr->sf_next) {
1199                         if (sf_ptr == sf) {
1200                                 break;
1201                         }
1202                         /* remember this place */
1203                         sf_reposition = sf_ptr;
1204                 }
1205                 ASSERT(sf_ptr == sf);
1206                 ASSERT(sf_reposition != NULL);
1207 
1208                 sf_reposition->sf_next = sf_ptr->sf_next;
1209         }
1210         mutex_exit(&sf_global_mutex);
1211 }
1212 
1213 
1214 static int
1215 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1216     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1217 {
1218         int64_t         reset_delay;
1219         struct sf       *sf;
1220 
1221         sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1222         ASSERT(sf);
1223 
1224         reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1225             (ddi_get_lbolt64() - sf->sf_reset_time);
1226         if (reset_delay < 0)
1227                 reset_delay = 0;
1228 
1229         if (sf_bus_config_debug)
1230                 flag |= NDI_DEVI_DEBUG;
1231 
1232         return (ndi_busop_bus_config(parent, flag, op,
1233             arg, childp, (clock_t)reset_delay));
1234 }
1235 
1236 static int
1237 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1238     ddi_bus_config_op_t op, void *arg)
1239 {
1240         if (sf_bus_config_debug)
1241                 flag |= NDI_DEVI_DEBUG;
1242 
1243         return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1244 }
1245 
1246 
1247 /*
1248  * called by transport to initialize a SCSI target
1249  */
1250 /* ARGSUSED */
1251 static int
1252 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1253     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1254 {
1255 #ifdef RAID_LUNS
1256         int lun;
1257 #else
1258         int64_t lun;
1259 #endif
1260         struct sf_target *target;
1261         struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1262         int i, t_len;
1263         unsigned int lip_cnt;
1264         unsigned char wwn[FC_WWN_SIZE];
1265 
1266 
1267         /* get and validate our SCSI target ID */
1268         i = sd->sd_address.a_target;
1269         if (i >= sf_max_targets) {
1270                 return (DDI_NOT_WELL_FORMED);
1271         }
1272 
1273         /* get our port WWN property */
1274         t_len = sizeof (wwn);
1275         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1276             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1277             (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1278                 /* no port WWN property - ignore the OBP stub node */
1279                 return (DDI_NOT_WELL_FORMED);
1280         }
1281 
1282         /* get our LIP count property */
1283         t_len = sizeof (lip_cnt);
1284         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1285             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1286             (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1287                 return (DDI_FAILURE);
1288         }
1289         /* and our LUN property */
1290         t_len = sizeof (lun);
1291         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1292             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1293             (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1294                 return (DDI_FAILURE);
1295         }
1296 
1297         /* find the target structure for this instance */
1298         mutex_enter(&sf->sf_mutex);
1299         if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1300                 mutex_exit(&sf->sf_mutex);
1301                 return (DDI_FAILURE);
1302         }
1303 
1304         mutex_enter(&target->sft_mutex);
1305         if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1306             & SF_TARGET_INIT_DONE)) {
1307                 /*
1308                  * set links between HBA transport and target structures
1309                  * and set done flag
1310                  */
1311                 hba_tran->tran_tgt_private = target;
1312                 target->sft_tran = hba_tran;
1313                 target->sft_state |= SF_TARGET_INIT_DONE;
1314         } else {
1315                 /* already initialized ?? */
1316                 mutex_exit(&target->sft_mutex);
1317                 mutex_exit(&sf->sf_mutex);
1318                 return (DDI_FAILURE);
1319         }
1320         mutex_exit(&target->sft_mutex);
1321         mutex_exit(&sf->sf_mutex);
1322 
1323         return (DDI_SUCCESS);
1324 }
1325 
1326 
1327 /*
1328  * called by transport to free a target
1329  */
1330 /* ARGSUSED */
1331 static void
1332 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1333     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1334 {
1335         struct sf_target *target = hba_tran->tran_tgt_private;
1336 
1337         if (target != NULL) {
1338                 mutex_enter(&target->sft_mutex);
1339                 target->sft_tran = NULL;
1340                 target->sft_state &= ~SF_TARGET_INIT_DONE;
1341                 mutex_exit(&target->sft_mutex);
1342         }
1343 }
1344 
1345 
1346 /*
1347  * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1348  * success, else return FALSE
1349  */
1350 /*ARGSUSED*/
1351 static int
1352 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1353     int tgtlen, int statuslen, int kf)
1354 {
1355         caddr_t scbp, tgt;
1356         int failure = FALSE;
1357         struct scsi_pkt *pkt = CMD2PKT(cmd);
1358 
1359 
1360         tgt = scbp = NULL;
1361 
1362         if (tgtlen > PKT_PRIV_LEN) {
1363                 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1364                         failure = TRUE;
1365                 } else {
1366                         cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1367                         pkt->pkt_private = tgt;
1368                 }
1369         }
1370         if (statuslen > EXTCMDS_STATUS_SIZE) {
1371                 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1372                         failure = TRUE;
1373                 } else {
1374                         cmd->cmd_flags |= CFLAG_SCBEXTERN;
1375                         pkt->pkt_scbp = (opaque_t)scbp;
1376                 }
1377         }
1378         if (failure) {
1379                 sf_pkt_destroy_extern(sf, cmd);
1380         }
1381         return (failure);
1382 }
1383 
1384 
1385 /*
1386  * deallocator for non-std size cdb/pkt_private/status
1387  */
1388 static void
1389 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1390 {
1391         struct scsi_pkt *pkt = CMD2PKT(cmd);
1392 
1393         if (cmd->cmd_flags & CFLAG_FREE) {
1394                 cmn_err(CE_PANIC,
1395                     "sf_scsi_impl_pktfree: freeing free packet");
1396                 _NOTE(NOT_REACHED)
1397                 /* NOTREACHED */
1398         }
1399         if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1400                 kmem_free((caddr_t)pkt->pkt_scbp,
1401                     (size_t)cmd->cmd_scblen);
1402         }
1403         if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1404                 kmem_free((caddr_t)pkt->pkt_private,
1405                     (size_t)cmd->cmd_privlen);
1406         }
1407 
1408         cmd->cmd_flags = CFLAG_FREE;
1409         kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1410 }
1411 
1412 
1413 /*
1414  * create or initialize a SCSI packet -- called internally and
1415  * by the transport
1416  */
1417 static struct scsi_pkt *
1418 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1419     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1420     int flags, int (*callback)(), caddr_t arg)
1421 {
1422         int kf;
1423         int failure = FALSE;
1424         struct sf_pkt *cmd;
1425         struct sf *sf = ADDR2SF(ap);
1426         struct sf_target *target = ADDR2TARGET(ap);
1427         struct sf_pkt   *new_cmd = NULL;
1428         struct fcal_packet      *fpkt;
1429         fc_frame_header_t       *hp;
1430         struct fcp_cmd *fcmd;
1431 
1432 
1433         /*
1434          * If we've already allocated a pkt once,
1435          * this request is for dma allocation only.
1436          */
1437         if (pkt == NULL) {
1438 
1439                 /*
1440                  * First step of sf_scsi_init_pkt:  pkt allocation
1441                  */
1442                 if (cmdlen > FCP_CDB_SIZE) {
1443                         return (NULL);
1444                 }
1445 
1446                 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1447 
1448                 if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1449                         /*
1450                          * Selective zeroing of the pkt.
1451                          */
1452 
1453                         cmd->cmd_flags = 0;
1454                         cmd->cmd_forw = 0;
1455                         cmd->cmd_back = 0;
1456                         cmd->cmd_next = 0;
1457                         cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1458                             sizeof (struct sf_pkt) + sizeof (struct
1459                             fcal_packet));
1460                         cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1461                             sizeof (struct sf_pkt));
1462                         cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1463                         cmd->cmd_state = SF_STATE_IDLE;
1464                         cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1465                         cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1466                         cmd->cmd_pkt->pkt_comp    = NULL;
1467                         cmd->cmd_pkt->pkt_flags   = 0;
1468                         cmd->cmd_pkt->pkt_time    = 0;
1469                         cmd->cmd_pkt->pkt_resid   = 0;
1470                         cmd->cmd_pkt->pkt_reason = 0;
1471                         cmd->cmd_cdblen = (uchar_t)cmdlen;
1472                         cmd->cmd_scblen              = statuslen;
1473                         cmd->cmd_privlen     = tgtlen;
1474                         cmd->cmd_pkt->pkt_address = *ap;
1475 
1476                         /* zero pkt_private */
1477                         (int *)(cmd->cmd_pkt->pkt_private =
1478                             cmd->cmd_pkt_private);
1479                         bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1480                             PKT_PRIV_LEN);
1481                 } else {
1482                         failure = TRUE;
1483                 }
1484 
1485                 if (failure ||
1486                     (tgtlen > PKT_PRIV_LEN) ||
1487                     (statuslen > EXTCMDS_STATUS_SIZE)) {
1488                         if (!failure) {
1489                                 /* need to allocate more space */
1490                                 failure = sf_pkt_alloc_extern(sf, cmd,
1491                                     tgtlen, statuslen, kf);
1492                         }
1493                         if (failure) {
1494                                 return (NULL);
1495                         }
1496                 }
1497 
1498                 fpkt = cmd->cmd_fp_pkt;
1499                 if (cmd->cmd_block == NULL) {
1500 
1501                         /* allocate cmd/response pool buffers */
1502                         if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1503                                 sf_pkt_destroy_extern(sf, cmd);
1504                                 return (NULL);
1505                         }
1506 
1507                         /* fill in the FC-AL packet */
1508                         fpkt->fcal_pkt_cookie = sf->sf_socp;
1509                         fpkt->fcal_pkt_comp = sf_cmd_callback;
1510                         fpkt->fcal_pkt_flags = 0;
1511                         fpkt->fcal_magic = FCALP_MAGIC;
1512                         fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1513                             (ushort_t)(SOC_FC_HEADER |
1514                             sf->sf_sochandle->fcal_portno);
1515                         fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1516                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1517                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1518                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1519                         fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1520                             (uint32_t)cmd->cmd_dmac;
1521                         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1522                             sizeof (struct fcp_cmd);
1523                         fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1524                             (uint32_t)cmd->cmd_rsp_dmac;
1525                         fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1526                             FCP_MAX_RSP_IU_SIZE;
1527 
1528                         /* Fill in the Fabric Channel Header */
1529                         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1530                         hp->r_ctl = R_CTL_COMMAND;
1531                         hp->type = TYPE_SCSI_FCP;
1532                         hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1533                         hp->reserved1 = 0;
1534                         hp->seq_id = 0;
1535                         hp->df_ctl  = 0;
1536                         hp->seq_cnt = 0;
1537                         hp->ox_id = 0xffff;
1538                         hp->rx_id = 0xffff;
1539                         hp->ro = 0;
1540 
1541                         /* Establish the LUN */
1542                         bcopy((caddr_t)&target->sft_lun.b,
1543                             (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1544                             FCP_LUN_SIZE);
1545                         *((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1546                 }
1547                 cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1548 
1549                 mutex_enter(&target->sft_pkt_mutex);
1550 
1551                 target->sft_pkt_tail->cmd_forw = cmd;
1552                 cmd->cmd_back = target->sft_pkt_tail;
1553                 cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1554                 target->sft_pkt_tail = cmd;
1555 
1556                 mutex_exit(&target->sft_pkt_mutex);
1557                 new_cmd = cmd;          /* for later cleanup if needed */
1558         } else {
1559                 /* pkt already exists -- just a request for DMA allocation */
1560                 cmd = PKT2CMD(pkt);
1561                 fpkt = cmd->cmd_fp_pkt;
1562         }
1563 
1564         /* zero cdb (bzero is too slow) */
1565         bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1566 
1567         /*
1568          * Second step of sf_scsi_init_pkt:  dma allocation
1569          * Set up dma info
1570          */
1571         if ((bp != NULL) && (bp->b_bcount != 0)) {
1572                 int cmd_flags, dma_flags;
1573                 int rval = 0;
1574                 uint_t dmacookie_count;
1575 
1576                 /* there is a buffer and some data to transfer */
1577 
1578                 /* set up command and DMA flags */
1579                 cmd_flags = cmd->cmd_flags;
1580                 if (bp->b_flags & B_READ) {
1581                         /* a read */
1582                         cmd_flags &= ~CFLAG_DMASEND;
1583                         dma_flags = DDI_DMA_READ;
1584                 } else {
1585                         /* a write */
1586                         cmd_flags |= CFLAG_DMASEND;
1587                         dma_flags = DDI_DMA_WRITE;
1588                 }
1589                 if (flags & PKT_CONSISTENT) {
1590                         cmd_flags |= CFLAG_CMDIOPB;
1591                         dma_flags |= DDI_DMA_CONSISTENT;
1592                 }
1593 
1594                 /* ensure we have a DMA handle */
1595                 if (cmd->cmd_dmahandle == NULL) {
1596                         rval = ddi_dma_alloc_handle(sf->sf_dip,
1597                             sf->sf_sochandle->fcal_dmaattr, callback, arg,
1598                             &cmd->cmd_dmahandle);
1599                 }
1600 
1601                 if (rval == 0) {
1602                         /* bind our DMA handle to our buffer */
1603                         rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1604                             dma_flags, callback, arg, &cmd->cmd_dmacookie,
1605                             &dmacookie_count);
1606                 }
1607 
1608                 if (rval != 0) {
1609                         /* DMA failure */
1610                         SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1611                         switch (rval) {
1612                         case DDI_DMA_NORESOURCES:
1613                                 bioerror(bp, 0);
1614                                 break;
1615                         case DDI_DMA_BADATTR:
1616                         case DDI_DMA_NOMAPPING:
1617                                 bioerror(bp, EFAULT);
1618                                 break;
1619                         case DDI_DMA_TOOBIG:
1620                         default:
1621                                 bioerror(bp, EINVAL);
1622                                 break;
1623                         }
1624                         /* clear valid flag */
1625                         cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1626                         if (new_cmd != NULL) {
1627                                 /* destroy packet if we just created it */
1628                                 sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1629                         }
1630                         return (NULL);
1631                 }
1632 
1633                 ASSERT(dmacookie_count == 1);
1634                 /* set up amt to transfer and set valid flag */
1635                 cmd->cmd_dmacount = bp->b_bcount;
1636                 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1637 
1638                 ASSERT(cmd->cmd_dmahandle != NULL);
1639         }
1640 
1641         /* set up FC-AL packet */
1642         fcmd = cmd->cmd_block;
1643 
1644         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1645                 if (cmd->cmd_flags & CFLAG_DMASEND) {
1646                         /* DMA write */
1647                         fcmd->fcp_cntl.cntl_read_data = 0;
1648                         fcmd->fcp_cntl.cntl_write_data = 1;
1649                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1650                             CQ_TYPE_IO_WRITE;
1651                 } else {
1652                         /* DMA read */
1653                         fcmd->fcp_cntl.cntl_read_data = 1;
1654                         fcmd->fcp_cntl.cntl_write_data = 0;
1655                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1656                             CQ_TYPE_IO_READ;
1657                 }
1658                 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1659                     (uint32_t)cmd->cmd_dmacookie.dmac_address;
1660                 fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1661                     cmd->cmd_dmacookie.dmac_size;
1662                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1663                 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1664                     cmd->cmd_dmacookie.dmac_size;
1665                 fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1666         } else {
1667                 /* not a read or write */
1668                 fcmd->fcp_cntl.cntl_read_data = 0;
1669                 fcmd->fcp_cntl.cntl_write_data = 0;
1670                 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1671                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1672                 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1673                     sizeof (struct fcp_cmd);
1674                 fcmd->fcp_data_len = 0;
1675         }
1676         fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1677 
1678         return (cmd->cmd_pkt);
1679 }
1680 
1681 
1682 /*
1683  * destroy a SCSI packet -- called internally and by the transport
1684  */
1685 static void
1686 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1687 {
1688         struct sf_pkt *cmd = PKT2CMD(pkt);
1689         struct sf *sf = ADDR2SF(ap);
1690         struct sf_target *target = ADDR2TARGET(ap);
1691         struct fcal_packet      *fpkt = cmd->cmd_fp_pkt;
1692 
1693 
1694         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1695                 /* DMA was set up -- clean up */
1696                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1697                 cmd->cmd_flags ^= CFLAG_DMAVALID;
1698         }
1699 
1700         /* take this packet off the doubly-linked list */
1701         mutex_enter(&target->sft_pkt_mutex);
1702         cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1703         cmd->cmd_forw->cmd_back = cmd->cmd_back;
1704         mutex_exit(&target->sft_pkt_mutex);
1705 
1706         fpkt->fcal_pkt_flags = 0;
1707         /* free the packet */
1708         if ((cmd->cmd_flags &
1709             (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1710                 /* just a regular packet */
1711                 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1712                 cmd->cmd_flags = CFLAG_FREE;
1713                 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1714         } else {
1715                 /* a packet with extra memory */
1716                 sf_pkt_destroy_extern(sf, cmd);
1717         }
1718 }
1719 
1720 
1721 /*
1722  * called by transport to unbind DMA handle
1723  */
1724 /* ARGSUSED */
1725 static void
1726 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1727 {
1728         struct sf_pkt *cmd = PKT2CMD(pkt);
1729 
1730 
1731         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1732                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1733                 cmd->cmd_flags ^= CFLAG_DMAVALID;
1734         }
1735 
1736 }
1737 
1738 
1739 /*
1740  * called by transport to synchronize CPU and I/O views of memory
1741  */
1742 /* ARGSUSED */
1743 static void
1744 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1745 {
1746         struct sf_pkt *cmd = PKT2CMD(pkt);
1747 
1748 
1749         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1750                 if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1751                     (cmd->cmd_flags & CFLAG_DMASEND) ?
1752                     DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1753                     DDI_SUCCESS) {
1754                         cmn_err(CE_WARN, "sf: sync pkt failed");
1755                 }
1756         }
1757 }
1758 
1759 
1760 /*
1761  * routine for reset notification setup, to register or cancel. -- called
1762  * by transport
1763  */
1764 static int
1765 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1766     void (*callback)(caddr_t), caddr_t arg)
1767 {
1768         struct sf       *sf = ADDR2SF(ap);
1769 
1770         return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1771             &sf->sf_mutex, &sf->sf_reset_notify_listf));
1772 }
1773 
1774 
1775 /*
1776  * called by transport to get port WWN property (except sun4u)
1777  */
1778 /* ARGSUSED */
1779 static int
1780 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1781 {
1782         char tbuf[(FC_WWN_SIZE*2)+1];
1783         unsigned char wwn[FC_WWN_SIZE];
1784         int i, lun;
1785         dev_info_t *tgt_dip;
1786 
1787         tgt_dip = sd->sd_dev;
1788         i = sizeof (wwn);
1789         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1790             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1791             (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1792                 name[0] = '\0';
1793                 return (0);
1794         }
1795         i = sizeof (lun);
1796         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1797             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1798             (caddr_t)&lun, &i) != DDI_SUCCESS) {
1799                 name[0] = '\0';
1800                 return (0);
1801         }
1802         for (i = 0; i < FC_WWN_SIZE; i++)
1803                 (void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1804         (void) sprintf(name, "w%s,%x", tbuf, lun);
1805         return (1);
1806 }
1807 
1808 
1809 /*
1810  * called by transport to get target soft AL-PA (except sun4u)
1811  */
1812 /* ARGSUSED */
1813 static int
1814 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1815 {
1816         struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1817 
1818         if (target == NULL)
1819                 return (0);
1820 
1821         (void) sprintf(name, "%x", target->sft_al_pa);
1822         return (1);
1823 }
1824 
1825 
1826 /*
1827  * add to the command/response buffer pool for this sf instance
1828  */
1829 static int
1830 sf_add_cr_pool(struct sf *sf)
1831 {
1832         int             cmd_buf_size;
1833         size_t          real_cmd_buf_size;
1834         int             rsp_buf_size;
1835         size_t          real_rsp_buf_size;
1836         uint_t          i, ccount;
1837         struct sf_cr_pool       *ptr;
1838         struct sf_cr_free_elem *cptr;
1839         caddr_t dptr, eptr;
1840         ddi_dma_cookie_t        cmd_cookie;
1841         ddi_dma_cookie_t        rsp_cookie;
1842         int             cmd_bound = FALSE, rsp_bound = FALSE;
1843 
1844 
1845         /* allocate room for the pool */
1846         if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1847             NULL) {
1848                 return (DDI_FAILURE);
1849         }
1850 
1851         /* allocate a DMA handle for the command pool */
1852         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1853             DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1854                 goto fail;
1855         }
1856 
1857         /*
1858          * Get a piece of memory in which to put commands
1859          */
1860         cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1861         if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1862             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1863             DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1864             &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1865                 goto fail;
1866         }
1867 
1868         /* bind the DMA handle to an address */
1869         if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1870             ptr->cmd_base, real_cmd_buf_size,
1871             DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1872             NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1873                 goto fail;
1874         }
1875         cmd_bound = TRUE;
1876         /* ensure only one cookie was allocated */
1877         if (ccount != 1) {
1878                 goto fail;
1879         }
1880 
1881         /* allocate a DMA handle for the response pool */
1882         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1883             DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1884                 goto fail;
1885         }
1886 
1887         /*
1888          * Get a piece of memory in which to put responses
1889          */
1890         rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1891         if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1892             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1893             DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1894             &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1895                 goto fail;
1896         }
1897 
1898         /* bind the DMA handle to an address */
1899         if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1900             ptr->rsp_base, real_rsp_buf_size,
1901             DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1902             NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1903                 goto fail;
1904         }
1905         rsp_bound = TRUE;
1906         /* ensure only one cookie was allocated */
1907         if (ccount != 1) {
1908                 goto fail;
1909         }
1910 
1911         /*
1912          * Generate a (cmd/rsp structure) free list
1913          */
1914         /* ensure ptr points to start of long word (8-byte block) */
1915         dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1916         /* keep track of actual size after moving pointer */
1917         real_cmd_buf_size -= (dptr - ptr->cmd_base);
1918         eptr = ptr->rsp_base;
1919 
1920         /* set actual total number of entries */
1921         ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1922             (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1923         ptr->nfree = ptr->ntot;
1924         ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1925         ptr->sf = sf;
1926 
1927         /* set up DMA for each pair of entries */
1928         i = 0;
1929         while (i < ptr->ntot) {
1930                 cptr = (struct sf_cr_free_elem *)dptr;
1931                 dptr += sizeof (struct fcp_cmd);
1932 
1933                 cptr->next = (struct sf_cr_free_elem *)dptr;
1934                 cptr->rsp = eptr;
1935 
1936                 cptr->cmd_dmac = cmd_cookie.dmac_address +
1937                     (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1938 
1939                 cptr->rsp_dmac = rsp_cookie.dmac_address +
1940                     (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1941 
1942                 eptr += FCP_MAX_RSP_IU_SIZE;
1943                 i++;
1944         }
1945 
1946         /* terminate the list */
1947         cptr->next = NULL;
1948 
1949         /* add this list at front of current one */
1950         mutex_enter(&sf->sf_cr_mutex);
1951         ptr->next = sf->sf_cr_pool;
1952         sf->sf_cr_pool = ptr;
1953         sf->sf_cr_pool_cnt++;
1954         mutex_exit(&sf->sf_cr_mutex);
1955 
1956         return (DDI_SUCCESS);
1957 
1958 fail:
1959         /* we failed so clean up */
1960         if (ptr->cmd_dma_handle != NULL) {
1961                 if (cmd_bound) {
1962                         (void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1963                 }
1964                 ddi_dma_free_handle(&ptr->cmd_dma_handle);
1965         }
1966 
1967         if (ptr->rsp_dma_handle != NULL) {
1968                 if (rsp_bound) {
1969                         (void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1970                 }
1971                 ddi_dma_free_handle(&ptr->rsp_dma_handle);
1972         }
1973 
1974         if (ptr->cmd_base != NULL) {
1975                 ddi_dma_mem_free(&ptr->cmd_acc_handle);
1976         }
1977 
1978         if (ptr->rsp_base != NULL) {
1979                 ddi_dma_mem_free(&ptr->rsp_acc_handle);
1980         }
1981 
1982         kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1983         return (DDI_FAILURE);
1984 }
1985 
1986 
1987 /*
1988  * allocate a command/response buffer from the pool, allocating more
1989  * in the pool as needed
1990  */
1991 static int
1992 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1993 {
1994         struct sf_cr_pool *ptr;
1995         struct sf_cr_free_elem *cptr;
1996 
1997 
1998         mutex_enter(&sf->sf_cr_mutex);
1999 
2000 try_again:
2001 
2002         /* find a free buffer in the existing pool */
2003         ptr = sf->sf_cr_pool;
2004         while (ptr != NULL) {
2005                 if (ptr->nfree != 0) {
2006                         ptr->nfree--;
2007                         break;
2008                 } else {
2009                         ptr = ptr->next;
2010                 }
2011         }
2012 
2013         /* did we find a free buffer ? */
2014         if (ptr != NULL) {
2015                 /* we found a free buffer -- take it off the free list */
2016                 cptr = ptr->free;
2017                 ptr->free = cptr->next;
2018                 mutex_exit(&sf->sf_cr_mutex);
2019                 /* set up the command to use the buffer pair */
2020                 cmd->cmd_block = (struct fcp_cmd *)cptr;
2021                 cmd->cmd_dmac = cptr->cmd_dmac;
2022                 cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2023                 cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2024                 cmd->cmd_cr_pool = ptr;
2025                 return (DDI_SUCCESS);           /* success */
2026         }
2027 
2028         /* no free buffer available -- can we allocate more ? */
2029         if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2030                 /* we need to allocate more buffer pairs */
2031                 if (sf->sf_cr_flag) {
2032                         /* somebody already allocating for this instance */
2033                         if (func == SLEEP_FUNC) {
2034                                 /* user wants to wait */
2035                                 cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2036                                 /* we've been woken so go try again */
2037                                 goto try_again;
2038                         }
2039                         /* user does not want to wait */
2040                         mutex_exit(&sf->sf_cr_mutex);
2041                         sf->sf_stats.cralloc_failures++;
2042                         return (DDI_FAILURE);   /* give up */
2043                 }
2044                 /* set flag saying we're allocating */
2045                 sf->sf_cr_flag = 1;
2046                 mutex_exit(&sf->sf_cr_mutex);
2047                 /* add to our pool */
2048                 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2049                         /* couldn't add to our pool for some reason */
2050                         mutex_enter(&sf->sf_cr_mutex);
2051                         sf->sf_cr_flag = 0;
2052                         cv_broadcast(&sf->sf_cr_cv);
2053                         mutex_exit(&sf->sf_cr_mutex);
2054                         sf->sf_stats.cralloc_failures++;
2055                         return (DDI_FAILURE);   /* give up */
2056                 }
2057                 /*
2058                  * clear flag saying we're allocating and tell all other
2059                  * that care
2060                  */
2061                 mutex_enter(&sf->sf_cr_mutex);
2062                 sf->sf_cr_flag = 0;
2063                 cv_broadcast(&sf->sf_cr_cv);
2064                 /* now that we have more buffers try again */
2065                 goto try_again;
2066         }
2067 
2068         /* we don't have room to allocate any more buffers */
2069         mutex_exit(&sf->sf_cr_mutex);
2070         sf->sf_stats.cralloc_failures++;
2071         return (DDI_FAILURE);                   /* give up */
2072 }
2073 
2074 
2075 /*
2076  * free a cmd/response buffer pair in our pool
2077  */
2078 static void
2079 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2080 {
2081         struct sf *sf = cp->sf;
2082         struct sf_cr_free_elem *elem;
2083 
2084         elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2085         elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2086         elem->cmd_dmac = cmd->cmd_dmac;
2087         elem->rsp_dmac = cmd->cmd_rsp_dmac;
2088 
2089         mutex_enter(&sf->sf_cr_mutex);
2090         cp->nfree++;
2091         ASSERT(cp->nfree <= cp->ntot);
2092 
2093         elem->next = cp->free;
2094         cp->free = elem;
2095         mutex_exit(&sf->sf_cr_mutex);
2096 }
2097 
2098 
2099 /*
2100  * free our pool of cmd/response buffers
2101  */
2102 static void
2103 sf_crpool_free(struct sf *sf)
2104 {
2105         struct sf_cr_pool *cp, *prev;
2106 
2107         prev = NULL;
2108         mutex_enter(&sf->sf_cr_mutex);
2109         cp = sf->sf_cr_pool;
2110         while (cp != NULL) {
2111                 if (cp->nfree == cp->ntot) {
2112                         if (prev != NULL) {
2113                                 prev->next = cp->next;
2114                         } else {
2115                                 sf->sf_cr_pool = cp->next;
2116                         }
2117                         sf->sf_cr_pool_cnt--;
2118                         mutex_exit(&sf->sf_cr_mutex);
2119 
2120                         (void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2121                         ddi_dma_free_handle(&cp->cmd_dma_handle);
2122                         (void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2123                         ddi_dma_free_handle(&cp->rsp_dma_handle);
2124                         ddi_dma_mem_free(&cp->cmd_acc_handle);
2125                         ddi_dma_mem_free(&cp->rsp_acc_handle);
2126                         kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2127                         return;
2128                 }
2129                 prev = cp;
2130                 cp = cp->next;
2131         }
2132         mutex_exit(&sf->sf_cr_mutex);
2133 }
2134 
2135 
2136 /* ARGSUSED */
2137 static int
2138 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2139 {
2140         struct sf_pkt *cmd = buf;
2141 
2142         mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2143         cmd->cmd_block = NULL;
2144         cmd->cmd_dmahandle = NULL;
2145         return (0);
2146 }
2147 
2148 
2149 /* ARGSUSED */
2150 static void
2151 sf_kmem_cache_destructor(void *buf, void *size)
2152 {
2153         struct sf_pkt *cmd = buf;
2154 
2155         if (cmd->cmd_dmahandle != NULL) {
2156                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2157         }
2158 
2159         if (cmd->cmd_block != NULL) {
2160                 sf_cr_free(cmd->cmd_cr_pool, cmd);
2161         }
2162         mutex_destroy(&cmd->cmd_abort_mutex);
2163 }
2164 
2165 
2166 /*
2167  * called by transport when a state change occurs
2168  */
2169 static void
2170 sf_statec_callback(void *arg, int msg)
2171 {
2172         struct sf *sf = (struct sf *)arg;
2173         struct sf_target        *target;
2174         int i;
2175         struct sf_pkt *cmd;
2176         struct scsi_pkt *pkt;
2177 
2178 
2179 
2180         switch (msg) {
2181 
2182         case FCAL_STATUS_LOOP_ONLINE: {
2183                 uchar_t         al_pa;          /* to save AL-PA */
2184                 int             ret;            /* ret value from getmap */
2185                 int             lip_cnt;        /* to save current count */
2186                 int             cnt;            /* map length */
2187 
2188                 /*
2189                  * the loop has gone online
2190                  */
2191                 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2192                     ddi_get_instance(sf->sf_dip)));
2193                 mutex_enter(&sf->sf_mutex);
2194                 sf->sf_lip_cnt++;
2195                 sf->sf_state = SF_STATE_ONLINING;
2196                 mutex_exit(&sf->sf_mutex);
2197 
2198                 /* scan each target hash queue */
2199                 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2200                         target = sf->sf_wwn_lists[i];
2201                         while (target != NULL) {
2202                                 /*
2203                                  * foreach target, if it's not offline then
2204                                  * mark it as busy
2205                                  */
2206                                 mutex_enter(&target->sft_mutex);
2207                                 if (!(target->sft_state & SF_TARGET_OFFLINE))
2208                                         target->sft_state |= (SF_TARGET_BUSY
2209                                             | SF_TARGET_MARK);
2210 #ifdef DEBUG
2211                                 /*
2212                                  * for debugging, print out info on any
2213                                  * pending commands (left hanging)
2214                                  */
2215                                 cmd = target->sft_pkt_head;
2216                                 while (cmd != (struct sf_pkt *)&target->
2217                                     sft_pkt_head) {
2218                                         if (cmd->cmd_state ==
2219                                             SF_STATE_ISSUED) {
2220                                                 SF_DEBUG(1, (sf, CE_CONT,
2221                                                     "cmd 0x%p pending "
2222                                                     "after lip\n",
2223                                                     (void *)cmd->cmd_fp_pkt));
2224                                         }
2225                                         cmd = cmd->cmd_forw;
2226                                 }
2227 #endif
2228                                 mutex_exit(&target->sft_mutex);
2229                                 target = target->sft_next;
2230                         }
2231                 }
2232 
2233                 /*
2234                  * since the loop has just gone online get a new map from
2235                  * the transport
2236                  */
2237                 if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2238                     sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2239                     sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2240                         if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2241                                 (void) soc_take_core(sf->sf_sochandle,
2242                                     sf->sf_socp);
2243                                 sf_core = 0;
2244                         }
2245                         sf_log(sf, CE_WARN,
2246                             "!soc lilp map failed status=0x%x\n", ret);
2247                         mutex_enter(&sf->sf_mutex);
2248                         sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2249                         sf->sf_lip_cnt++;
2250                         sf->sf_state = SF_STATE_OFFLINE;
2251                         mutex_exit(&sf->sf_mutex);
2252                         return;
2253                 }
2254 
2255                 /* ensure consistent view of DMA memory */
2256                 (void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2257                     DDI_DMA_SYNC_FORKERNEL);
2258 
2259                 /* how many entries in map ? */
2260                 cnt = sf->sf_lilp_map->lilp_length;
2261                 if (cnt >= SF_MAX_LILP_ENTRIES) {
2262                         sf_log(sf, CE_WARN, "invalid lilp map\n");
2263                         return;
2264                 }
2265 
2266                 mutex_enter(&sf->sf_mutex);
2267                 sf->sf_device_count = cnt - 1;
2268                 sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2269                 lip_cnt = sf->sf_lip_cnt;
2270                 al_pa = sf->sf_al_pa;
2271 
2272                 SF_DEBUG(1, (sf, CE_CONT,
2273                     "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2274 
2275                 /*
2276                  * since the last entry of the map may be mine (common) check
2277                  * for that, and if it is we have one less entry to look at
2278                  */
2279                 if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2280                         cnt--;
2281                 }
2282                 /* If we didn't get a valid loop map enable all targets */
2283                 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2284                         for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2285                                 sf->sf_lilp_map->lilp_alpalist[i] =
2286                                     sf_switch_to_alpa[i];
2287                         cnt = i;
2288                         sf->sf_device_count = cnt - 1;
2289                 }
2290                 if (sf->sf_device_count == 0) {
2291                         sf_finish_init(sf, lip_cnt);
2292                         mutex_exit(&sf->sf_mutex);
2293                         break;
2294                 }
2295                 mutex_exit(&sf->sf_mutex);
2296 
2297                 SF_DEBUG(2, (sf, CE_WARN,
2298                     "!statec_callback: starting with %d targets\n",
2299                     sf->sf_device_count));
2300 
2301                 /* scan loop map, logging into all ports (except mine) */
2302                 for (i = 0; i < cnt; i++) {
2303                         SF_DEBUG(1, (sf, CE_CONT,
2304                             "!lilp map entry %d = %x,%x\n", i,
2305                             sf->sf_lilp_map->lilp_alpalist[i],
2306                             sf_alpa_to_switch[
2307                             sf->sf_lilp_map->lilp_alpalist[i]]));
2308                         /* is this entry for somebody else ? */
2309                         if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2310                                 /* do a PLOGI to this port */
2311                                 if (!sf_login(sf, LA_ELS_PLOGI,
2312                                     sf->sf_lilp_map->lilp_alpalist[i],
2313                                     sf->sf_lilp_map->lilp_alpalist[cnt-1],
2314                                     lip_cnt)) {
2315                                         /* a problem logging in */
2316                                         mutex_enter(&sf->sf_mutex);
2317                                         if (lip_cnt == sf->sf_lip_cnt) {
2318                                                 /*
2319                                                  * problem not from a new LIP
2320                                                  */
2321                                                 sf->sf_device_count--;
2322                                                 ASSERT(sf->sf_device_count
2323                                                     >= 0);
2324                                                 if (sf->sf_device_count == 0) {
2325                                                         sf_finish_init(sf,
2326                                                             lip_cnt);
2327                                                 }
2328                                         }
2329                                         mutex_exit(&sf->sf_mutex);
2330                                 }
2331                         }
2332                 }
2333                 break;
2334         }
2335 
2336         case FCAL_STATUS_ERR_OFFLINE:
2337                 /*
2338                  * loop has gone offline due to an error
2339                  */
2340                 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2341                     ddi_get_instance(sf->sf_dip)));
2342                 mutex_enter(&sf->sf_mutex);
2343                 sf->sf_lip_cnt++;
2344                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2345                 if (!sf->sf_online_timer) {
2346                         sf->sf_online_timer = sf_watchdog_time +
2347                             SF_ONLINE_TIMEOUT;
2348                 }
2349                 /*
2350                  * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2351                  * since throttling logic in sf_watch() depends on
2352                  * preservation of this flag while device is suspended
2353                  */
2354                 if (sf->sf_state & SF_STATE_SUSPENDED) {
2355                         sf->sf_state |= SF_STATE_OFFLINE;
2356                         SF_DEBUG(1, (sf, CE_CONT,
2357                             "sf_statec_callback, sf%d: "
2358                             "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2359                             ddi_get_instance(sf->sf_dip)));
2360                 } else {
2361                         sf->sf_state = SF_STATE_OFFLINE;
2362                 }
2363 
2364                 /* scan each possible target on the loop */
2365                 for (i = 0; i < sf_max_targets; i++) {
2366                         target = sf->sf_targets[i];
2367                         while (target != NULL) {
2368                                 mutex_enter(&target->sft_mutex);
2369                                 if (!(target->sft_state & SF_TARGET_OFFLINE))
2370                                         target->sft_state |= (SF_TARGET_BUSY
2371                                             | SF_TARGET_MARK);
2372                                 mutex_exit(&target->sft_mutex);
2373                                 target = target->sft_next_lun;
2374                         }
2375                 }
2376                 mutex_exit(&sf->sf_mutex);
2377                 break;
2378 
2379         case FCAL_STATE_RESET: {
2380                 struct sf_els_hdr       *privp; /* ptr to private list */
2381                 struct sf_els_hdr       *tmpp1; /* tmp prev hdr ptr */
2382                 struct sf_els_hdr       *tmpp2; /* tmp next hdr ptr */
2383                 struct sf_els_hdr       *head;  /* to save our private list */
2384                 struct fcal_packet      *fpkt;  /* ptr to pkt in hdr */
2385 
2386                 /*
2387                  * a transport reset
2388                  */
2389                 SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2390                     ddi_get_instance(sf->sf_dip)));
2391                 tmpp1 = head = NULL;
2392                 mutex_enter(&sf->sf_mutex);
2393                 sf->sf_lip_cnt++;
2394                 sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2395                 /*
2396                  * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2397                  * since throttling logic in sf_watch() depends on
2398                  * preservation of this flag while device is suspended
2399                  */
2400                 if (sf->sf_state & SF_STATE_SUSPENDED) {
2401                         sf->sf_state |= SF_STATE_OFFLINE;
2402                         SF_DEBUG(1, (sf, CE_CONT,
2403                             "sf_statec_callback, sf%d: "
2404                             "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2405                             ddi_get_instance(sf->sf_dip)));
2406                 } else {
2407                         sf->sf_state = SF_STATE_OFFLINE;
2408                 }
2409 
2410                 /*
2411                  * scan each possible target on the loop, looking for targets
2412                  * that need callbacks ran
2413                  */
2414                 for (i = 0; i < sf_max_targets; i++) {
2415                         target = sf->sf_targets[i];
2416                         while (target != NULL) {
2417                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2418                                         target->sft_state |= (SF_TARGET_BUSY
2419                                             | SF_TARGET_MARK);
2420                                         mutex_exit(&sf->sf_mutex);
2421                                         /*
2422                                          * run remove event callbacks for lun
2423                                          *
2424                                          * We have a nasty race condition here
2425                                          * 'cause we're dropping this mutex to
2426                                          * run the callback and expect the
2427                                          * linked list to be the same.
2428                                          */
2429                                         (void) ndi_event_retrieve_cookie(
2430                                             sf->sf_event_hdl, target->sft_dip,
2431                                             FCAL_REMOVE_EVENT, &sf_remove_eid,
2432                                             NDI_EVENT_NOPASS);
2433                                         (void) ndi_event_run_callbacks(
2434                                             sf->sf_event_hdl,
2435                                             target->sft_dip,
2436                                             sf_remove_eid, NULL);
2437                                         mutex_enter(&sf->sf_mutex);
2438                                 }
2439                                 target = target->sft_next_lun;
2440                         }
2441                 }
2442 
2443                 /*
2444                  * scan for ELS commands that are in transport, not complete,
2445                  * and have a valid timeout, building a private list
2446                  */
2447                 privp = sf->sf_els_list;
2448                 while (privp != NULL) {
2449                         fpkt = privp->fpkt;
2450                         if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2451                             (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2452                             (privp->timeout != SF_INVALID_TIMEOUT)) {
2453                                 /*
2454                                  * cmd in transport && not complete &&
2455                                  * timeout valid
2456                                  *
2457                                  * move this entry from ELS input list to our
2458                                  * private list
2459                                  */
2460 
2461                                 tmpp2 = privp->next; /* save ptr to next */
2462 
2463                                 /* push this on private list head */
2464                                 privp->next = head;
2465                                 head = privp;
2466 
2467                                 /* remove this entry from input list */
2468                                 if (tmpp1 != NULL) {
2469                                         /*
2470                                          * remove this entry from somewhere in
2471                                          * the middle of the list
2472                                          */
2473                                         tmpp1->next = tmpp2;
2474                                         if (tmpp2 != NULL) {
2475                                                 tmpp2->prev = tmpp1;
2476                                         }
2477                                 } else {
2478                                         /*
2479                                          * remove this entry from the head
2480                                          * of the list
2481                                          */
2482                                         sf->sf_els_list = tmpp2;
2483                                         if (tmpp2 != NULL) {
2484                                                 tmpp2->prev = NULL;
2485                                         }
2486                                 }
2487                                 privp = tmpp2;  /* skip to next entry */
2488                         } else {
2489                                 tmpp1 = privp;  /* save ptr to prev entry */
2490                                 privp = privp->next; /* skip to next entry */
2491                         }
2492                 }
2493 
2494                 mutex_exit(&sf->sf_mutex);
2495 
2496                 /*
2497                  * foreach cmd in our list free the ELS packet associated
2498                  * with it
2499                  */
2500                 privp = head;
2501                 while (privp != NULL) {
2502                         fpkt = privp->fpkt;
2503                         privp = privp->next;
2504                         sf_els_free(fpkt);
2505                 }
2506 
2507                 /*
2508                  * scan for commands from each possible target
2509                  */
2510                 for (i = 0; i < sf_max_targets; i++) {
2511                         target = sf->sf_targets[i];
2512                         while (target != NULL) {
2513                                 /*
2514                                  * scan all active commands for this target,
2515                                  * looking for commands that have been issued,
2516                                  * are in transport, and are not yet complete
2517                                  * (so we can terminate them because of the
2518                                  * reset)
2519                                  */
2520                                 mutex_enter(&target->sft_pkt_mutex);
2521                                 cmd = target->sft_pkt_head;
2522                                 while (cmd != (struct sf_pkt *)&target->
2523                                     sft_pkt_head) {
2524                                         fpkt = cmd->cmd_fp_pkt;
2525                                         mutex_enter(&cmd->cmd_abort_mutex);
2526                                         if ((cmd->cmd_state ==
2527                                             SF_STATE_ISSUED) &&
2528                                             (fpkt->fcal_cmd_state &
2529                                             FCAL_CMD_IN_TRANSPORT) &&
2530                                             (!(fpkt->fcal_cmd_state &
2531                                             FCAL_CMD_COMPLETE))) {
2532                                                 /* a command to be reset */
2533                                                 pkt = cmd->cmd_pkt;
2534                                                 pkt->pkt_reason = CMD_RESET;
2535                                                 pkt->pkt_statistics |=
2536                                                     STAT_BUS_RESET;
2537                                                 cmd->cmd_state = SF_STATE_IDLE;
2538                                                 mutex_exit(&cmd->
2539                                                     cmd_abort_mutex);
2540                                                 mutex_exit(&target->
2541                                                     sft_pkt_mutex);
2542                                                 if (pkt->pkt_comp != NULL) {
2543                                                         (*pkt->pkt_comp)(pkt);
2544                                                 }
2545                                                 mutex_enter(&target->
2546                                                     sft_pkt_mutex);
2547                                                 cmd = target->sft_pkt_head;
2548                                         } else {
2549                                                 mutex_exit(&cmd->
2550                                                     cmd_abort_mutex);
2551                                                 /* get next command */
2552                                                 cmd = cmd->cmd_forw;
2553                                         }
2554                                 }
2555                                 mutex_exit(&target->sft_pkt_mutex);
2556                                 target = target->sft_next_lun;
2557                         }
2558                 }
2559 
2560                 /*
2561                  * get packet queue for this target, resetting all remaining
2562                  * commands
2563                  */
2564                 mutex_enter(&sf->sf_mutex);
2565                 cmd = sf->sf_pkt_head;
2566                 sf->sf_pkt_head = NULL;
2567                 mutex_exit(&sf->sf_mutex);
2568 
2569                 while (cmd != NULL) {
2570                         pkt = cmd->cmd_pkt;
2571                         cmd = cmd->cmd_next;
2572                         pkt->pkt_reason = CMD_RESET;
2573                         pkt->pkt_statistics |= STAT_BUS_RESET;
2574                         if (pkt->pkt_comp != NULL) {
2575                                 (*pkt->pkt_comp)(pkt);
2576                         }
2577                 }
2578                 break;
2579         }
2580 
2581         default:
2582                 break;
2583         }
2584 }
2585 
2586 
2587 /*
2588  * called to send a PLOGI (N_port login) ELS request to a destination ID,
2589  * returning TRUE upon success, else returning FALSE
2590  */
2591 static int
2592 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2593     int lip_cnt)
2594 {
2595         struct la_els_logi      *logi;
2596         struct  sf_els_hdr      *privp;
2597 
2598 
2599         if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2600             sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2601             (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2602                 sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2603                     "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2604                 return (FALSE);
2605         }
2606 
2607         privp->lip_cnt = lip_cnt;
2608         if (els_code == LA_ELS_PLOGI) {
2609                 bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2610                     (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2611                     - 4);
2612                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2613                     (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2614                 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2615                     (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2616                 bzero((caddr_t)&logi->reserved, 16);
2617         } else if (els_code == LA_ELS_LOGO) {
2618                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2619                     (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2620                 ((struct la_els_logo    *)logi)->reserved = 0;
2621                 ((struct la_els_logo    *)logi)->nport_id[0] = 0;
2622                 ((struct la_els_logo    *)logi)->nport_id[1] = 0;
2623                 ((struct la_els_logo    *)logi)->nport_id[2] = arg1;
2624         }
2625 
2626         privp->els_code = els_code;
2627         logi->ls_code = els_code;
2628         logi->mbz[0] = 0;
2629         logi->mbz[1] = 0;
2630         logi->mbz[2] = 0;
2631 
2632         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2633         return (sf_els_transport(sf, privp));
2634 }
2635 
2636 
2637 /*
2638  * send an ELS IU via the transport,
2639  * returning TRUE upon success, else returning FALSE
2640  */
2641 static int
2642 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2643 {
2644         struct fcal_packet *fpkt = privp->fpkt;
2645 
2646 
2647         (void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2648             DDI_DMA_SYNC_FORDEV);
2649         privp->prev = NULL;
2650         mutex_enter(&sf->sf_mutex);
2651         privp->next = sf->sf_els_list;
2652         if (sf->sf_els_list != NULL) {
2653                 sf->sf_els_list->prev = privp;
2654         }
2655         sf->sf_els_list = privp;
2656         mutex_exit(&sf->sf_mutex);
2657 
2658         /* call the transport to send a packet */
2659         if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2660             CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2661                 mutex_enter(&sf->sf_mutex);
2662                 if (privp->prev != NULL) {
2663                         privp->prev->next = privp->next;
2664                 }
2665                 if (privp->next != NULL) {
2666                         privp->next->prev = privp->prev;
2667                 }
2668                 if (sf->sf_els_list == privp) {
2669                         sf->sf_els_list = privp->next;
2670                 }
2671                 mutex_exit(&sf->sf_mutex);
2672                 sf_els_free(fpkt);
2673                 return (FALSE);                 /* failure */
2674         }
2675         return (TRUE);                          /* success */
2676 }
2677 
2678 
2679 /*
2680  * called as the pkt_comp routine for ELS FC packets
2681  */
2682 static void
2683 sf_els_callback(struct fcal_packet *fpkt)
2684 {
2685         struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2686         struct sf *sf = privp->sf;
2687         struct sf *tsf;
2688         int tgt_id;
2689         struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2690         struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2691         struct  sf_target *target;
2692         short   ncmds;
2693         short   free_pkt = TRUE;
2694 
2695 
2696         /*
2697          * we've received an ELS callback, i.e. an ELS packet has arrived
2698          */
2699 
2700         /* take the current packet off of the queue */
2701         mutex_enter(&sf->sf_mutex);
2702         if (privp->timeout == SF_INVALID_TIMEOUT) {
2703                 mutex_exit(&sf->sf_mutex);
2704                 return;
2705         }
2706         if (privp->prev != NULL) {
2707                 privp->prev->next = privp->next;
2708         }
2709         if (privp->next != NULL) {
2710                 privp->next->prev = privp->prev;
2711         }
2712         if (sf->sf_els_list == privp) {
2713                 sf->sf_els_list = privp->next;
2714         }
2715         privp->prev = privp->next = NULL;
2716         mutex_exit(&sf->sf_mutex);
2717 
2718         /* get # pkts in this callback */
2719         ncmds = fpkt->fcal_ncmds;
2720         ASSERT(ncmds >= 0);
2721         mutex_enter(&sf->sf_cmd_mutex);
2722         sf->sf_ncmds = ncmds;
2723         mutex_exit(&sf->sf_cmd_mutex);
2724 
2725         /* sync idea of memory */
2726         (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2727             DDI_DMA_SYNC_FORKERNEL);
2728 
2729         /* was this an OK ACC msg ?? */
2730         if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2731             (ptr->ls_code == LA_ELS_ACC)) {
2732 
2733                 /*
2734                  * this was an OK ACC pkt
2735                  */
2736 
2737                 switch (privp->els_code) {
2738                 case LA_ELS_PLOGI:
2739                         /*
2740                          * was able to to an N_port login
2741                          */
2742                         SF_DEBUG(2, (sf, CE_CONT,
2743                             "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2744                             privp->dest_nport_id,
2745                             *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2746                             *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2747                         /* try to do a process login */
2748                         if (!sf_do_prli(sf, privp, ptr)) {
2749                                 free_pkt = FALSE;
2750                                 goto fail;      /* PRLI failed */
2751                         }
2752                         break;
2753                 case LA_ELS_PRLI:
2754                         /*
2755                          * was able to do a process login
2756                          */
2757                         SF_DEBUG(2, (sf, CE_CONT,
2758                             "!PRLI to al_pa %x succeeded\n",
2759                             privp->dest_nport_id));
2760                         /* try to do address discovery */
2761                         if (sf_do_adisc(sf, privp) != 1) {
2762                                 free_pkt = FALSE;
2763                                 goto fail;      /* ADISC failed */
2764                         }
2765                         break;
2766                 case LA_ELS_ADISC:
2767                         /*
2768                          * found a target via ADISC
2769                          */
2770 
2771                         SF_DEBUG(2, (sf, CE_CONT,
2772                             "!ADISC to al_pa %x succeeded\n",
2773                             privp->dest_nport_id));
2774 
2775                         /* create the target info */
2776                         if ((target = sf_create_target(sf, privp,
2777                             sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2778                             (int64_t)0))
2779                             == NULL) {
2780                                 goto fail;      /* can't create target */
2781                         }
2782 
2783                         /*
2784                          * ensure address discovered matches what we thought
2785                          * it would be
2786                          */
2787                         if ((uchar_t)adisc->hard_address !=
2788                             privp->dest_nport_id) {
2789                                 sf_log(sf, CE_WARN,
2790                                     "target 0x%x, AL-PA 0x%x and "
2791                                     "hard address 0x%x don't match\n",
2792                                     sf_alpa_to_switch[
2793                                     (uchar_t)privp->dest_nport_id],
2794                                     privp->dest_nport_id,
2795                                     (uchar_t)adisc->hard_address);
2796                                 mutex_enter(&sf->sf_mutex);
2797                                 sf_offline_target(sf, target);
2798                                 mutex_exit(&sf->sf_mutex);
2799                                 goto fail;      /* addr doesn't match */
2800                         }
2801                         /*
2802                          * get inquiry data from the target
2803                          */
2804                         if (!sf_do_reportlun(sf, privp, target)) {
2805                                 mutex_enter(&sf->sf_mutex);
2806                                 sf_offline_target(sf, target);
2807                                 mutex_exit(&sf->sf_mutex);
2808                                 free_pkt = FALSE;
2809                                 goto fail;      /* inquiry failed */
2810                         }
2811                         break;
2812                 default:
2813                         SF_DEBUG(2, (sf, CE_CONT,
2814                             "!ELS %x to al_pa %x succeeded\n",
2815                             privp->els_code, privp->dest_nport_id));
2816                         sf_els_free(fpkt);
2817                         break;
2818                 }
2819 
2820         } else {
2821 
2822                 /*
2823                  * oh oh -- this was not an OK ACC packet
2824                  */
2825 
2826                 /* get target ID from dest loop address */
2827                 tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2828 
2829                 /* keep track of failures */
2830                 sf->sf_stats.tstats[tgt_id].els_failures++;
2831                 if (++(privp->retries) < sf_els_retries &&
2832                     fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2833                         if (fpkt->fcal_pkt_status ==
2834                             FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2835                                 tsf = sf->sf_sibling;
2836                                 if (tsf != NULL) {
2837                                         mutex_enter(&tsf->sf_cmd_mutex);
2838                                         tsf->sf_flag = 1;
2839                                         tsf->sf_throttle = SF_DECR_DELTA;
2840                                         mutex_exit(&tsf->sf_cmd_mutex);
2841                                 }
2842                         }
2843                         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2844                         privp->prev = NULL;
2845 
2846                         mutex_enter(&sf->sf_mutex);
2847 
2848                         if (privp->lip_cnt == sf->sf_lip_cnt) {
2849                                 SF_DEBUG(1, (sf, CE_WARN,
2850                                     "!ELS %x to al_pa %x failed, retrying",
2851                                     privp->els_code, privp->dest_nport_id));
2852                                 privp->next = sf->sf_els_list;
2853                                 if (sf->sf_els_list != NULL) {
2854                                         sf->sf_els_list->prev = privp;
2855                                 }
2856 
2857                                 sf->sf_els_list = privp;
2858 
2859                                 mutex_exit(&sf->sf_mutex);
2860                                 /* device busy?  wait a bit ... */
2861                                 if (fpkt->fcal_pkt_status ==
2862                                     FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2863                                         privp->delayed_retry = 1;
2864                                         return;
2865                                 }
2866                                 /* call the transport to send a pkt */
2867                                 if (soc_transport(sf->sf_sochandle, fpkt,
2868                                     FCAL_NOSLEEP, CQ_REQUEST_1) !=
2869                                     FCAL_TRANSPORT_SUCCESS) {
2870                                         mutex_enter(&sf->sf_mutex);
2871                                         if (privp->prev != NULL) {
2872                                                 privp->prev->next =
2873                                                     privp->next;
2874                                         }
2875                                         if (privp->next != NULL) {
2876                                                 privp->next->prev =
2877                                                     privp->prev;
2878                                         }
2879                                         if (sf->sf_els_list == privp) {
2880                                                 sf->sf_els_list = privp->next;
2881                                         }
2882                                         mutex_exit(&sf->sf_mutex);
2883                                         goto fail;
2884                                 } else
2885                                         return;
2886                         } else {
2887                                 mutex_exit(&sf->sf_mutex);
2888                                 goto fail;
2889                         }
2890                 } else {
2891 #ifdef  DEBUG
2892                         if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2893                         SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2894                             privp->els_code, privp->dest_nport_id));
2895                         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2896                                 SF_DEBUG(2, (sf, CE_NOTE,
2897                                     "els reply code = %x", ptr->ls_code));
2898                                 if (ptr->ls_code == LA_ELS_RJT)
2899                                         SF_DEBUG(1, (sf, CE_CONT,
2900                                             "LS_RJT reason = %x\n",
2901                                             *(((uint_t *)ptr) + 1)));
2902                         } else
2903                                 SF_DEBUG(2, (sf, CE_NOTE,
2904                                     "fc packet status = %x",
2905                                     fpkt->fcal_pkt_status));
2906                         }
2907 #endif
2908                         goto fail;
2909                 }
2910         }
2911         return;                                 /* success */
2912 fail:
2913         mutex_enter(&sf->sf_mutex);
2914         if (sf->sf_lip_cnt == privp->lip_cnt) {
2915                 sf->sf_device_count--;
2916                 ASSERT(sf->sf_device_count >= 0);
2917                 if (sf->sf_device_count == 0) {
2918                         sf_finish_init(sf, privp->lip_cnt);
2919                 }
2920         }
2921         mutex_exit(&sf->sf_mutex);
2922         if (free_pkt) {
2923                 sf_els_free(fpkt);
2924         }
2925 }
2926 
2927 
2928 /*
2929  * send a PRLI (process login) ELS IU via the transport,
2930  * returning TRUE upon success, else returning FALSE
2931  */
2932 static int
2933 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2934 {
2935         struct la_els_prli      *prli = (struct la_els_prli *)privp->cmd;
2936         struct fcp_prli         *fprli;
2937         struct  fcal_packet     *fpkt = privp->fpkt;
2938 
2939 
2940         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2941             sizeof (struct la_els_prli);
2942         privp->els_code = LA_ELS_PRLI;
2943         fprli = (struct fcp_prli *)prli->service_params;
2944         prli->ls_code = LA_ELS_PRLI;
2945         prli->page_length = 0x10;
2946         prli->payload_length = sizeof (struct la_els_prli);
2947         fprli->type = 0x08;                  /* no define here? */
2948         fprli->resvd1 = 0;
2949         fprli->orig_process_assoc_valid = 0;
2950         fprli->resp_process_assoc_valid = 0;
2951         fprli->establish_image_pair = 1;
2952         fprli->resvd2 = 0;
2953         fprli->resvd3 = 0;
2954         fprli->data_overlay_allowed = 0;
2955         fprli->initiator_fn = 1;
2956         fprli->target_fn = 0;
2957         fprli->cmd_data_mixed = 0;
2958         fprli->data_resp_mixed = 0;
2959         fprli->read_xfer_rdy_disabled = 1;
2960         fprli->write_xfer_rdy_disabled = 0;
2961 
2962         bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2963             sizeof (privp->port_wwn));
2964         bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2965             sizeof (privp->node_wwn));
2966 
2967         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2968         return (sf_els_transport(sf, privp));
2969 }
2970 
2971 
2972 /*
2973  * send an ADISC (address discovery) ELS IU via the transport,
2974  * returning TRUE upon success, else returning FALSE
2975  */
2976 static int
2977 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2978 {
2979         struct la_els_adisc     *adisc = (struct la_els_adisc *)privp->cmd;
2980         struct  fcal_packet     *fpkt = privp->fpkt;
2981 
2982         privp->els_code = LA_ELS_ADISC;
2983         adisc->ls_code = LA_ELS_ADISC;
2984         adisc->mbz[0] = 0;
2985         adisc->mbz[1] = 0;
2986         adisc->mbz[2] = 0;
2987         adisc->hard_address = 0; /* ??? */
2988         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2989             sizeof (struct la_els_adisc);
2990         bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2991             (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2992         bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2993             (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2994         adisc->nport_id = sf->sf_al_pa;
2995 
2996         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2997         return (sf_els_transport(sf, privp));
2998 }
2999 
3000 
3001 static struct fcal_packet *
3002 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3003     int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3004 {
3005         struct  fcal_packet     *fpkt;
3006         ddi_dma_cookie_t        pcookie;
3007         ddi_dma_cookie_t        rcookie;
3008         struct  sf_els_hdr      *privp;
3009         ddi_dma_handle_t        cmd_dma_handle = NULL;
3010         ddi_dma_handle_t        rsp_dma_handle = NULL;
3011         ddi_acc_handle_t        cmd_acc_handle = NULL;
3012         ddi_acc_handle_t        rsp_acc_handle = NULL;
3013         size_t                  real_size;
3014         uint_t                  ccount;
3015         fc_frame_header_t       *hp;
3016         int                     cmd_bound = FALSE, rsp_bound = FALSE;
3017         caddr_t                 cmd = NULL;
3018         caddr_t                 rsp = NULL;
3019 
3020         if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3021             sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3022                 SF_DEBUG(1, (sf, CE_WARN,
3023                         "Could not allocate fcal_packet for ELS\n"));
3024                 return (NULL);
3025         }
3026 
3027         if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3028             KM_NOSLEEP)) == NULL) {
3029                 SF_DEBUG(1, (sf, CE_WARN,
3030                     "Could not allocate sf_els_hdr for ELS\n"));
3031                 goto fail;
3032         }
3033 
3034         privp->size = priv_size;
3035         fpkt->fcal_pkt_private = (caddr_t)privp;
3036 
3037         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3038             DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3039                 SF_DEBUG(1, (sf, CE_WARN,
3040                     "Could not allocate DMA handle for ELS\n"));
3041                 goto fail;
3042         }
3043 
3044         if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3045             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3046             DDI_DMA_DONTWAIT, NULL, &cmd,
3047             &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3048                 SF_DEBUG(1, (sf, CE_WARN,
3049                     "Could not allocate DMA memory for ELS\n"));
3050                 goto fail;
3051         }
3052 
3053         if (real_size < cmd_size) {
3054                 SF_DEBUG(1, (sf, CE_WARN,
3055                     "DMA memory too small for ELS\n"));
3056                 goto fail;
3057         }
3058 
3059         if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3060             cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3061             DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3062                 SF_DEBUG(1, (sf, CE_WARN,
3063                     "Could not bind DMA memory for ELS\n"));
3064                 goto fail;
3065         }
3066         cmd_bound = TRUE;
3067 
3068         if (ccount != 1) {
3069                 SF_DEBUG(1, (sf, CE_WARN,
3070                     "Wrong cookie count for ELS\n"));
3071                 goto fail;
3072         }
3073 
3074         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3075             DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3076                 SF_DEBUG(1, (sf, CE_WARN,
3077                     "Could not allocate DMA handle for ELS rsp\n"));
3078                 goto fail;
3079         }
3080         if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3081             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3082             DDI_DMA_DONTWAIT, NULL, &rsp,
3083             &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3084                 SF_DEBUG(1, (sf, CE_WARN,
3085                     "Could not allocate DMA memory for ELS rsp\n"));
3086                 goto fail;
3087         }
3088 
3089         if (real_size < rsp_size) {
3090                 SF_DEBUG(1, (sf, CE_WARN,
3091                     "DMA memory too small for ELS rsp\n"));
3092                 goto fail;
3093         }
3094 
3095         if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3096             rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3097             DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3098                 SF_DEBUG(1, (sf, CE_WARN,
3099                     "Could not bind DMA memory for ELS rsp\n"));
3100                 goto fail;
3101         }
3102         rsp_bound = TRUE;
3103 
3104         if (ccount != 1) {
3105                 SF_DEBUG(1, (sf, CE_WARN,
3106                     "Wrong cookie count for ELS rsp\n"));
3107                 goto fail;
3108         }
3109 
3110         privp->cmd = cmd;
3111         privp->sf = sf;
3112         privp->cmd_dma_handle = cmd_dma_handle;
3113         privp->cmd_acc_handle = cmd_acc_handle;
3114         privp->rsp = rsp;
3115         privp->rsp_dma_handle = rsp_dma_handle;
3116         privp->rsp_acc_handle = rsp_acc_handle;
3117         privp->dest_nport_id = dest_id;
3118         privp->fpkt = fpkt;
3119 
3120         fpkt->fcal_pkt_cookie = sf->sf_socp;
3121         fpkt->fcal_pkt_comp = sf_els_callback;
3122         fpkt->fcal_magic = FCALP_MAGIC;
3123         fpkt->fcal_pkt_flags = 0;
3124         fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3125             (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3126         fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3127         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3128         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3129         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3130         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3131         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3132         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3133         fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3134             pcookie.dmac_address;
3135         fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3136         fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3137             rcookie.dmac_address;
3138         fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3139 
3140         /* Fill in the Fabric Channel Header */
3141         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3142         hp->r_ctl = R_CTL_ELS_REQ;
3143         hp->d_id = dest_id;
3144         hp->s_id = sf->sf_al_pa;
3145         hp->type = TYPE_EXTENDED_LS;
3146         hp->reserved1 = 0;
3147         hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3148         hp->seq_id = 0;
3149         hp->df_ctl  = 0;
3150         hp->seq_cnt = 0;
3151         hp->ox_id = 0xffff;
3152         hp->rx_id = 0xffff;
3153         hp->ro = 0;
3154 
3155         *rprivp = (caddr_t)privp;
3156         *cmd_buf = cmd;
3157         return (fpkt);
3158 
3159 fail:
3160         if (cmd_dma_handle != NULL) {
3161                 if (cmd_bound) {
3162                         (void) ddi_dma_unbind_handle(cmd_dma_handle);
3163                 }
3164                 ddi_dma_free_handle(&cmd_dma_handle);
3165                 privp->cmd_dma_handle = NULL;
3166         }
3167         if (rsp_dma_handle != NULL) {
3168                 if (rsp_bound) {
3169                         (void) ddi_dma_unbind_handle(rsp_dma_handle);
3170                 }
3171                 ddi_dma_free_handle(&rsp_dma_handle);
3172                 privp->rsp_dma_handle = NULL;
3173         }
3174         sf_els_free(fpkt);
3175         return (NULL);
3176 }
3177 
3178 
3179 static void
3180 sf_els_free(struct fcal_packet *fpkt)
3181 {
3182         struct  sf_els_hdr      *privp = fpkt->fcal_pkt_private;
3183 
3184         if (privp != NULL) {
3185                 if (privp->cmd_dma_handle != NULL) {
3186                         (void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3187                         ddi_dma_free_handle(&privp->cmd_dma_handle);
3188                 }
3189                 if (privp->cmd != NULL) {
3190                         ddi_dma_mem_free(&privp->cmd_acc_handle);
3191                 }
3192 
3193                 if (privp->rsp_dma_handle != NULL) {
3194                         (void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3195                         ddi_dma_free_handle(&privp->rsp_dma_handle);
3196                 }
3197 
3198                 if (privp->rsp != NULL) {
3199                         ddi_dma_mem_free(&privp->rsp_acc_handle);
3200                 }
3201                 if (privp->data_dma_handle) {
3202                         (void) ddi_dma_unbind_handle(privp->data_dma_handle);
3203                         ddi_dma_free_handle(&privp->data_dma_handle);
3204                 }
3205                 if (privp->data_buf) {
3206                         ddi_dma_mem_free(&privp->data_acc_handle);
3207                 }
3208                 kmem_free(privp, privp->size);
3209         }
3210         kmem_free(fpkt, sizeof (struct fcal_packet));
3211 }
3212 
3213 
3214 static struct sf_target *
3215 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3216 {
3217         struct sf_target *target, *ntarget, *otarget, *ptarget;
3218         int hash;
3219 #ifdef RAID_LUNS
3220         int64_t orig_lun = lun;
3221 
3222         /* XXXX Work around SCSA limitations. */
3223         lun = *((short *)&lun);
3224 #endif
3225         ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3226         mutex_enter(&sf->sf_mutex);
3227         if (sf->sf_lip_cnt != privp->lip_cnt) {
3228                 mutex_exit(&sf->sf_mutex);
3229                 if (ntarget != NULL)
3230                         kmem_free(ntarget, sizeof (struct sf_target));
3231                 return (NULL);
3232         }
3233 
3234         target = sf_lookup_target(sf, privp->port_wwn, lun);
3235         if (lun != 0) {
3236                 /*
3237                  * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3238                  * and enqueue the new LUN.
3239                  */
3240                 if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3241                     (int64_t)0)) ==     NULL) {
3242                         /*
3243                          * Yeep -- no LUN 0?
3244                          */
3245                         mutex_exit(&sf->sf_mutex);
3246                         sf_log(sf, CE_WARN, "target 0x%x "
3247                             "lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3248                         if (ntarget != NULL)
3249                                 kmem_free(ntarget, sizeof (struct sf_target));
3250                         return (NULL);
3251                 }
3252                 mutex_enter(&ptarget->sft_mutex);
3253                 if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3254                     ptarget->sft_state&SF_TARGET_OFFLINE) {
3255                         /* LUN 0 already finished, duplicate its state */
3256                         mutex_exit(&ptarget->sft_mutex);
3257                         sf_offline_target(sf, target);
3258                         mutex_exit(&sf->sf_mutex);
3259                         if (ntarget != NULL)
3260                                 kmem_free(ntarget, sizeof (struct sf_target));
3261                         return (target);
3262                 } else if (target != NULL) {
3263                         /*
3264                          * LUN 0 online or not examined yet.
3265                          * Try to bring the LUN back online
3266                          */
3267                         mutex_exit(&ptarget->sft_mutex);
3268                         mutex_enter(&target->sft_mutex);
3269                         target->sft_lip_cnt = privp->lip_cnt;
3270                         target->sft_state |= SF_TARGET_BUSY;
3271                         target->sft_state &= ~(SF_TARGET_OFFLINE|
3272                             SF_TARGET_MARK);
3273                         target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3274                         target->sft_hard_address = sf_switch_to_alpa[tnum];
3275                         mutex_exit(&target->sft_mutex);
3276                         mutex_exit(&sf->sf_mutex);
3277                         if (ntarget != NULL)
3278                                 kmem_free(ntarget, sizeof (struct sf_target));
3279                         return (target);
3280                 }
3281                 mutex_exit(&ptarget->sft_mutex);
3282                 if (ntarget == NULL) {
3283                         mutex_exit(&sf->sf_mutex);
3284                         return (NULL);
3285                 }
3286                 /* Initialize new target structure */
3287                 bcopy((caddr_t)&privp->node_wwn,
3288                     (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3289                 bcopy((caddr_t)&privp->port_wwn,
3290                     (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3291                 ntarget->sft_lun.l = lun;
3292 #ifdef RAID_LUNS
3293                 ntarget->sft_lun.l = orig_lun;
3294                 ntarget->sft_raid_lun = (uint_t)lun;
3295 #endif
3296                 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3297                 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3298                 /* Don't let anyone use this till we finishup init. */
3299                 mutex_enter(&ntarget->sft_mutex);
3300                 mutex_enter(&ntarget->sft_pkt_mutex);
3301 
3302                 hash = SF_HASH(privp->port_wwn, lun);
3303                 ntarget->sft_next = sf->sf_wwn_lists[hash];
3304                 sf->sf_wwn_lists[hash] = ntarget;
3305 
3306                 ntarget->sft_lip_cnt = privp->lip_cnt;
3307                 ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3308                 ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3309                 ntarget->sft_device_type = DTYPE_UNKNOWN;
3310                 ntarget->sft_state = SF_TARGET_BUSY;
3311                 ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3312                     sft_pkt_head;
3313                 ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3314                     sft_pkt_head;
3315 
3316                 mutex_enter(&ptarget->sft_mutex);
3317                 /* Traverse the list looking for this target */
3318                 for (target = ptarget; target->sft_next_lun;
3319                     target = target->sft_next_lun) {
3320                         otarget = target->sft_next_lun;
3321                 }
3322                 ntarget->sft_next_lun = target->sft_next_lun;
3323                 target->sft_next_lun = ntarget;
3324                 mutex_exit(&ptarget->sft_mutex);
3325                 mutex_exit(&ntarget->sft_pkt_mutex);
3326                 mutex_exit(&ntarget->sft_mutex);
3327                 mutex_exit(&sf->sf_mutex);
3328                 return (ntarget);
3329 
3330         }
3331         if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3332                 /* It's been touched this LIP -- duplicate WWNs */
3333                 sf_offline_target(sf, target); /* And all the baby targets */
3334                 mutex_exit(&sf->sf_mutex);
3335                 sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3336                     tnum);
3337                 if (ntarget != NULL) {
3338                         kmem_free(ntarget, sizeof (struct sf_target));
3339                 }
3340                 return (NULL);
3341         }
3342 
3343         if ((otarget = sf->sf_targets[tnum]) != NULL) {
3344                 /* Someone else is in our slot */
3345                 mutex_enter(&otarget->sft_mutex);
3346                 if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3347                         mutex_exit(&otarget->sft_mutex);
3348                         sf_offline_target(sf, otarget);
3349                         if (target != NULL)
3350                                 sf_offline_target(sf, target);
3351                         mutex_exit(&sf->sf_mutex);
3352                         sf_log(sf, CE_WARN,
3353                             "target 0x%x, duplicate switch settings\n", tnum);
3354                         if (ntarget != NULL)
3355                                 kmem_free(ntarget, sizeof (struct sf_target));
3356                         return (NULL);
3357                 }
3358                 mutex_exit(&otarget->sft_mutex);
3359                 if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3360                     sft_port_wwn, sizeof (privp->port_wwn))) {
3361                         sf_offline_target(sf, otarget);
3362                         mutex_exit(&sf->sf_mutex);
3363                         sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3364                             tnum);
3365                         bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3366                             sizeof (struct sf_target_stats));
3367                         mutex_enter(&sf->sf_mutex);
3368                 }
3369         }
3370 
3371         sf->sf_targets[tnum] = target;
3372         if ((target = sf->sf_targets[tnum]) == NULL) {
3373                 if (ntarget == NULL) {
3374                         mutex_exit(&sf->sf_mutex);
3375                         return (NULL);
3376                 }
3377                 bcopy((caddr_t)&privp->node_wwn,
3378                     (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3379                 bcopy((caddr_t)&privp->port_wwn,
3380                     (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3381                 ntarget->sft_lun.l = lun;
3382 #ifdef RAID_LUNS
3383                 ntarget->sft_lun.l = orig_lun;
3384                 ntarget->sft_raid_lun = (uint_t)lun;
3385 #endif
3386                 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3387                 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3388                 mutex_enter(&ntarget->sft_mutex);
3389                 mutex_enter(&ntarget->sft_pkt_mutex);
3390                 hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3391                 ntarget->sft_next = sf->sf_wwn_lists[hash];
3392                 sf->sf_wwn_lists[hash] = ntarget;
3393 
3394                 target = ntarget;
3395                 target->sft_lip_cnt = privp->lip_cnt;
3396                 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3397                 target->sft_hard_address = sf_switch_to_alpa[tnum];
3398                 target->sft_device_type = DTYPE_UNKNOWN;
3399                 target->sft_state = SF_TARGET_BUSY;
3400                 target->sft_pkt_head = (struct sf_pkt *)&target->
3401                     sft_pkt_head;
3402                 target->sft_pkt_tail = (struct sf_pkt *)&target->
3403                     sft_pkt_head;
3404                 sf->sf_targets[tnum] = target;
3405                 mutex_exit(&ntarget->sft_mutex);
3406                 mutex_exit(&ntarget->sft_pkt_mutex);
3407                 mutex_exit(&sf->sf_mutex);
3408         } else {
3409                 mutex_enter(&target->sft_mutex);
3410                 target->sft_lip_cnt = privp->lip_cnt;
3411                 target->sft_state |= SF_TARGET_BUSY;
3412                 target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3413                 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3414                 target->sft_hard_address = sf_switch_to_alpa[tnum];
3415                 mutex_exit(&target->sft_mutex);
3416                 mutex_exit(&sf->sf_mutex);
3417                 if (ntarget != NULL)
3418                         kmem_free(ntarget, sizeof (struct sf_target));
3419         }
3420         return (target);
3421 }
3422 
3423 
3424 /*
3425  * find the target for a given sf instance
3426  */
3427 /* ARGSUSED */
3428 static struct sf_target *
3429 #ifdef RAID_LUNS
3430 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3431 #else
3432 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3433 #endif
3434 {
3435         int hash;
3436         struct sf_target *target;
3437 
3438         ASSERT(mutex_owned(&sf->sf_mutex));
3439         hash = SF_HASH(wwn, lun);
3440 
3441         target = sf->sf_wwn_lists[hash];
3442         while (target != NULL) {
3443 
3444 #ifndef RAID_LUNS
3445                 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3446                     sizeof (target->sft_port_wwn)) == 0 &&
3447                         target->sft_lun.l == lun)
3448                         break;
3449 #else
3450                 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3451                     sizeof (target->sft_port_wwn)) == 0 &&
3452                         target->sft_raid_lun == lun)
3453                         break;
3454 #endif
3455                 target = target->sft_next;
3456         }
3457 
3458         return (target);
3459 }
3460 
3461 
3462 /*
3463  * Send out a REPORT_LUNS command.
3464  */
3465 static int
3466 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3467     struct sf_target *target)
3468 {
3469         struct  fcal_packet     *fpkt = privp->fpkt;
3470         ddi_dma_cookie_t        pcookie;
3471         ddi_dma_handle_t        lun_dma_handle = NULL;
3472         ddi_acc_handle_t        lun_acc_handle;
3473         uint_t                  ccount;
3474         size_t                  real_size;
3475         caddr_t                 lun_buf = NULL;
3476         int                     handle_bound = 0;
3477         fc_frame_header_t       *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3478         struct fcp_cmd          *reportlun = (struct fcp_cmd *)privp->cmd;
3479         char                    *msg = "Transport";
3480 
3481         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3482             DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3483                 msg = "ddi_dma_alloc_handle()";
3484                 goto fail;
3485         }
3486 
3487         if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3488             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3489             DDI_DMA_DONTWAIT, NULL, &lun_buf,
3490             &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3491                 msg = "ddi_dma_mem_alloc()";
3492                 goto fail;
3493         }
3494 
3495         if (real_size < REPORT_LUNS_SIZE) {
3496                 msg = "DMA mem < REPORT_LUNS_SIZE";
3497                 goto fail;
3498         }
3499 
3500         if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3501             lun_buf, real_size, DDI_DMA_READ |
3502             DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3503             NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3504                 msg = "ddi_dma_addr_bind_handle()";
3505                 goto fail;
3506         }
3507         handle_bound = 1;
3508 
3509         if (ccount != 1) {
3510                 msg = "ccount != 1";
3511                 goto fail;
3512         }
3513         privp->els_code = 0;
3514         privp->target = target;
3515         privp->data_dma_handle = lun_dma_handle;
3516         privp->data_acc_handle = lun_acc_handle;
3517         privp->data_buf = lun_buf;
3518 
3519         fpkt->fcal_pkt_comp = sf_reportlun_callback;
3520         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3521         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3522         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3523             sizeof (struct fcp_cmd);
3524         fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3525             (uint32_t)pcookie.dmac_address;
3526         fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3527         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3528         hp->r_ctl = R_CTL_COMMAND;
3529         hp->type = TYPE_SCSI_FCP;
3530         bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3531         ((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3532         /* Now set the buffer size.  If DDI gave us extra, that's O.K. */
3533         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3534             (real_size&0x0ff);
3535         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3536             (real_size>>8)&0x0ff;
3537         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3538             (real_size>>16)&0x0ff;
3539         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3540             (real_size>>24)&0x0ff;
3541         reportlun->fcp_cntl.cntl_read_data = 1;
3542         reportlun->fcp_cntl.cntl_write_data = 0;
3543         reportlun->fcp_data_len = pcookie.dmac_size;
3544         reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3545 
3546         (void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3547         /* We know he's there, so this should be fast */
3548         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3549         if (sf_els_transport(sf, privp) == 1)
3550                 return (1);
3551 
3552 fail:
3553         sf_log(sf, CE_WARN,
3554             "%s failure for REPORTLUN to target 0x%x\n",
3555             msg, sf_alpa_to_switch[privp->dest_nport_id]);
3556         sf_els_free(fpkt);
3557         if (lun_dma_handle != NULL) {
3558                 if (handle_bound)
3559                         (void) ddi_dma_unbind_handle(lun_dma_handle);
3560                 ddi_dma_free_handle(&lun_dma_handle);
3561         }
3562         if (lun_buf != NULL) {
3563                 ddi_dma_mem_free(&lun_acc_handle);
3564         }
3565         return (0);
3566 }
3567 
3568 /*
3569  * Handle the results of a REPORT_LUNS command:
3570  *      Create additional targets if necessary
3571  *      Initiate INQUIRYs on all LUNs.
3572  */
3573 static void
3574 sf_reportlun_callback(struct fcal_packet *fpkt)
3575 {
3576         struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3577             fcal_pkt_private;
3578         struct scsi_report_luns *ptr =
3579             (struct scsi_report_luns *)privp->data_buf;
3580         struct sf *sf = privp->sf;
3581         struct sf_target *target = privp->target;
3582         struct fcp_rsp *rsp = NULL;
3583         int delayed_retry = 0;
3584         int tid = sf_alpa_to_switch[target->sft_hard_address];
3585         int i, free_pkt = 1;
3586         short   ncmds;
3587 
3588         mutex_enter(&sf->sf_mutex);
3589         /* use as temporary state variable */
3590         if (privp->timeout == SF_INVALID_TIMEOUT) {
3591                 mutex_exit(&sf->sf_mutex);
3592                 return;
3593         }
3594         if (privp->prev)
3595                 privp->prev->next = privp->next;
3596         if (privp->next)
3597                 privp->next->prev = privp->prev;
3598         if (sf->sf_els_list == privp)
3599                 sf->sf_els_list = privp->next;
3600         privp->prev = privp->next = NULL;
3601         mutex_exit(&sf->sf_mutex);
3602         ncmds = fpkt->fcal_ncmds;
3603         ASSERT(ncmds >= 0);
3604         mutex_enter(&sf->sf_cmd_mutex);
3605         sf->sf_ncmds = ncmds;
3606         mutex_exit(&sf->sf_cmd_mutex);
3607 
3608         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3609                 (void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3610                     0, DDI_DMA_SYNC_FORKERNEL);
3611 
3612                 rsp = (struct fcp_rsp *)privp->rsp;
3613         }
3614         SF_DEBUG(1, (sf, CE_CONT,
3615             "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3616             privp->dest_nport_id,
3617             fpkt->fcal_pkt_status,
3618             rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3619 
3620                 /* See if target simply does not support REPORT_LUNS. */
3621         if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3622             rsp->fcp_u.fcp_status.sense_len_set &&
3623             rsp->fcp_sense_len >=
3624                 offsetof(struct scsi_extended_sense, es_qual_code)) {
3625                         struct scsi_extended_sense *sense;
3626                         sense = (struct scsi_extended_sense *)
3627                         ((caddr_t)rsp + sizeof (struct fcp_rsp)
3628                                 + rsp->fcp_response_len);
3629                         if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3630                                 if (sense->es_add_code == 0x20) {
3631                                         /* Fake LUN 0 */
3632                                 SF_DEBUG(1, (sf, CE_CONT,
3633                                         "!REPORTLUN Faking good "
3634                                         "completion for alpa %x\n",
3635                                         privp->dest_nport_id));
3636                                         ptr->lun_list_len = FCP_LUN_SIZE;
3637                                         ptr->lun[0] = 0;
3638                                         rsp->fcp_u.fcp_status.scsi_status =
3639                                                 STATUS_GOOD;
3640                                 } else if (sense->es_add_code == 0x25) {
3641                                         SF_DEBUG(1, (sf, CE_CONT,
3642                                             "!REPORTLUN device alpa %x "
3643                                             "key %x code %x\n",
3644                                             privp->dest_nport_id,
3645                                             sense->es_key, sense->es_add_code));
3646                                             goto fail;
3647                                 }
3648                         } else if (sense->es_key ==
3649                                 KEY_UNIT_ATTENTION &&
3650                                 sense->es_add_code == 0x29) {
3651                                 SF_DEBUG(1, (sf, CE_CONT,
3652                                         "!REPORTLUN device alpa %x was reset\n",
3653                                         privp->dest_nport_id));
3654                         } else {
3655                                 SF_DEBUG(1, (sf, CE_CONT,
3656                                         "!REPORTLUN device alpa %x "
3657                                         "key %x code %x\n",
3658                                         privp->dest_nport_id,
3659                                         sense->es_key, sense->es_add_code));
3660 /* XXXXXX The following is here to handle broken targets -- remove it later */
3661                                 if (sf_reportlun_forever &&
3662                                         sense->es_key == KEY_UNIT_ATTENTION)
3663                                         goto retry;
3664 /* XXXXXX */
3665                                 if (sense->es_key == KEY_NOT_READY)
3666                                         delayed_retry = 1;
3667                                 }
3668                 }
3669 
3670         if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3671                 struct fcp_rsp_info *bep;
3672 
3673                 bep = (struct fcp_rsp_info *)(&rsp->
3674                     fcp_response_len + 1);
3675                 if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3676                     bep->rsp_code == FCP_NO_FAILURE) {
3677                         (void) ddi_dma_sync(privp->data_dma_handle,
3678                             0, 0, DDI_DMA_SYNC_FORKERNEL);
3679 
3680                         /* Convert from #bytes to #ints */
3681                         ptr->lun_list_len = ptr->lun_list_len >> 3;
3682                         SF_DEBUG(2, (sf, CE_CONT,
3683                             "!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3684                             privp->dest_nport_id, ptr->lun_list_len));
3685                         if (!ptr->lun_list_len) {
3686                                 /* No LUNs? Ya gotta be kidding... */
3687                                 sf_log(sf, CE_WARN,
3688                                     "SCSI violation -- "
3689                                     "target 0x%x reports no LUNs\n",
3690                                     sf_alpa_to_switch[
3691                                     privp->dest_nport_id]);
3692                                 ptr->lun_list_len = 1;
3693                                 ptr->lun[0] = 0;
3694                         }
3695 
3696                         mutex_enter(&sf->sf_mutex);
3697                         if (sf->sf_lip_cnt == privp->lip_cnt) {
3698                                 sf->sf_device_count += ptr->lun_list_len - 1;
3699                         }
3700 
3701                         mutex_exit(&sf->sf_mutex);
3702                         for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3703                             sf->sf_lip_cnt; i++) {
3704                                 struct sf_els_hdr *nprivp;
3705                                 struct fcal_packet *nfpkt;
3706 
3707                                 /* LUN 0 is already in `target' */
3708                                 if (ptr->lun[i] != 0) {
3709                                         target = sf_create_target(sf,
3710                                             privp, tid, ptr->lun[i]);
3711                                 }
3712                                 nprivp = NULL;
3713                                 nfpkt = NULL;
3714                                 if (target) {
3715                                         nfpkt = sf_els_alloc(sf,
3716                                             target->sft_al_pa,
3717                                             sizeof (struct sf_els_hdr),
3718                                             sizeof (union sf_els_cmd),
3719                                             sizeof (union sf_els_rsp),
3720                                             (caddr_t *)&nprivp,
3721                                             (caddr_t *)&rsp);
3722                                         if (nprivp)
3723                                                 nprivp->lip_cnt =
3724                                                     privp->lip_cnt;
3725                                 }
3726                                 if (nfpkt && nprivp &&
3727                                     (sf_do_inquiry(sf, nprivp, target) ==
3728                                     0)) {
3729                                         mutex_enter(&sf->sf_mutex);
3730                                         if (sf->sf_lip_cnt == privp->
3731                                             lip_cnt) {
3732                                                 sf->sf_device_count --;
3733                                         }
3734                                         sf_offline_target(sf, target);
3735                                         mutex_exit(&sf->sf_mutex);
3736                                 }
3737                         }
3738                         sf_els_free(fpkt);
3739                         return;
3740                 } else {
3741                         SF_DEBUG(1, (sf, CE_CONT,
3742                             "!REPORTLUN al_pa %x fcp failure, "
3743                             "fcp_rsp_code %x scsi status %x\n",
3744                             privp->dest_nport_id, bep->rsp_code,
3745                             rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3746                         goto fail;
3747                 }
3748         }
3749         if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3750             (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3751                 delayed_retry = 1;
3752         }
3753 
3754         if (++(privp->retries) < sf_els_retries ||
3755             (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3756 /* XXXXXX The following is here to handle broken targets -- remove it later */
3757 retry:
3758 /* XXXXXX */
3759                 if (delayed_retry) {
3760                         privp->retries--;
3761                         privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3762                         privp->delayed_retry = 1;
3763                 } else {
3764                         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3765                 }
3766 
3767                 privp->prev = NULL;
3768                 mutex_enter(&sf->sf_mutex);
3769                 if (privp->lip_cnt == sf->sf_lip_cnt) {
3770                         if (!delayed_retry)
3771                                 SF_DEBUG(1, (sf, CE_WARN,
3772                                     "!REPORTLUN to al_pa %x failed, retrying\n",
3773                                     privp->dest_nport_id));
3774                         privp->next = sf->sf_els_list;
3775                         if (sf->sf_els_list != NULL)
3776                                 sf->sf_els_list->prev = privp;
3777                         sf->sf_els_list = privp;
3778                         mutex_exit(&sf->sf_mutex);
3779                         if (!delayed_retry && soc_transport(sf->sf_sochandle,
3780                             fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3781                             FCAL_TRANSPORT_SUCCESS) {
3782                                 mutex_enter(&sf->sf_mutex);
3783                                 if (privp->prev)
3784                                         privp->prev->next = privp->next;
3785                                 if (privp->next)
3786                                         privp->next->prev = privp->prev;
3787                                 if (sf->sf_els_list == privp)
3788                                         sf->sf_els_list = privp->next;
3789                                 mutex_exit(&sf->sf_mutex);
3790                                 goto fail;
3791                         } else
3792                                 return;
3793                 } else {
3794                         mutex_exit(&sf->sf_mutex);
3795                 }
3796         } else {
3797 fail:
3798 
3799                 /* REPORT_LUN failed -- try inquiry */
3800                 if (sf_do_inquiry(sf, privp, target) != 0) {
3801                         return;
3802                 } else {
3803                         free_pkt = 0;
3804                 }
3805                 mutex_enter(&sf->sf_mutex);
3806                 if (sf->sf_lip_cnt == privp->lip_cnt) {
3807                         sf_log(sf, CE_WARN,
3808                             "!REPORTLUN to target 0x%x failed\n",
3809                             sf_alpa_to_switch[privp->dest_nport_id]);
3810                         sf_offline_target(sf, target);
3811                         sf->sf_device_count--;
3812                         ASSERT(sf->sf_device_count >= 0);
3813                         if (sf->sf_device_count == 0)
3814                         sf_finish_init(sf, privp->lip_cnt);
3815                 }
3816                 mutex_exit(&sf->sf_mutex);
3817         }
3818         if (free_pkt) {
3819                 sf_els_free(fpkt);
3820         }
3821 }
3822 
3823 static int
3824 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3825     struct sf_target *target)
3826 {
3827         struct  fcal_packet     *fpkt = privp->fpkt;
3828         ddi_dma_cookie_t        pcookie;
3829         ddi_dma_handle_t        inq_dma_handle = NULL;
3830         ddi_acc_handle_t        inq_acc_handle;
3831         uint_t                  ccount;
3832         size_t                  real_size;
3833         caddr_t                 inq_buf = NULL;
3834         int                     handle_bound = FALSE;
3835         fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3836         struct fcp_cmd          *inq = (struct fcp_cmd *)privp->cmd;
3837         char                    *msg = "Transport";
3838 
3839 
3840         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3841             DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3842                 msg = "ddi_dma_alloc_handle()";
3843                 goto fail;
3844         }
3845 
3846         if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3847             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3848             DDI_DMA_DONTWAIT, NULL, &inq_buf,
3849             &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3850                 msg = "ddi_dma_mem_alloc()";
3851                 goto fail;
3852         }
3853 
3854         if (real_size < SUN_INQSIZE) {
3855                 msg = "DMA mem < inquiry size";
3856                 goto fail;
3857         }
3858 
3859         if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3860             inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3861             DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3862                 msg = "ddi_dma_addr_bind_handle()";
3863                 goto fail;
3864         }
3865         handle_bound = TRUE;
3866 
3867         if (ccount != 1) {
3868                 msg = "ccount != 1";
3869                 goto fail;
3870         }
3871         privp->els_code = 0;                 /* not an ELS command */
3872         privp->target = target;
3873         privp->data_dma_handle = inq_dma_handle;
3874         privp->data_acc_handle = inq_acc_handle;
3875         privp->data_buf = inq_buf;
3876         fpkt->fcal_pkt_comp = sf_inq_callback;
3877         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3878         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3879         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3880             sizeof (struct fcp_cmd);
3881         fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3882             (uint32_t)pcookie.dmac_address;
3883         fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3884         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3885         hp->r_ctl = R_CTL_COMMAND;
3886         hp->type = TYPE_SCSI_FCP;
3887         bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3888         ((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3889         ((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3890         bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3891             FCP_LUN_SIZE);
3892         inq->fcp_cntl.cntl_read_data = 1;
3893         inq->fcp_cntl.cntl_write_data = 0;
3894         inq->fcp_data_len = pcookie.dmac_size;
3895         inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3896 
3897         (void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3898             DDI_DMA_SYNC_FORDEV);
3899         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3900         SF_DEBUG(5, (sf, CE_WARN,
3901             "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3902             privp->dest_nport_id,
3903             SCSA_LUN(target)));
3904         return (sf_els_transport(sf, privp));
3905 
3906 fail:
3907         sf_log(sf, CE_WARN,
3908             "%s failure for INQUIRY to target 0x%x\n",
3909             msg, sf_alpa_to_switch[privp->dest_nport_id]);
3910         sf_els_free(fpkt);
3911         if (inq_dma_handle != NULL) {
3912                 if (handle_bound) {
3913                         (void) ddi_dma_unbind_handle(inq_dma_handle);
3914                 }
3915                 ddi_dma_free_handle(&inq_dma_handle);
3916         }
3917         if (inq_buf != NULL) {
3918                 ddi_dma_mem_free(&inq_acc_handle);
3919         }
3920         return (FALSE);
3921 }
3922 
3923 
3924 /*
3925  * called as the pkt_comp routine for INQ packets
3926  */
3927 static void
3928 sf_inq_callback(struct fcal_packet *fpkt)
3929 {
3930         struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3931             fcal_pkt_private;
3932         struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3933         struct sf *sf = privp->sf;
3934         struct sf *tsf;
3935         struct sf_target *target = privp->target;
3936         struct fcp_rsp *rsp;
3937         int delayed_retry = FALSE;
3938         short   ncmds;
3939 
3940 
3941         mutex_enter(&sf->sf_mutex);
3942         /* use as temporary state variable */
3943         if (privp->timeout == SF_INVALID_TIMEOUT) {
3944                 mutex_exit(&sf->sf_mutex);
3945                 return;
3946         }
3947         if (privp->prev != NULL) {
3948                 privp->prev->next = privp->next;
3949         }
3950         if (privp->next != NULL) {
3951                 privp->next->prev = privp->prev;
3952         }
3953         if (sf->sf_els_list == privp) {
3954                 sf->sf_els_list = privp->next;
3955         }
3956         privp->prev = privp->next = NULL;
3957         mutex_exit(&sf->sf_mutex);
3958         ncmds = fpkt->fcal_ncmds;
3959         ASSERT(ncmds >= 0);
3960         mutex_enter(&sf->sf_cmd_mutex);
3961         sf->sf_ncmds = ncmds;
3962         mutex_exit(&sf->sf_cmd_mutex);
3963 
3964         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3965 
3966                 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3967                     (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3968 
3969                 rsp = (struct fcp_rsp *)privp->rsp;
3970                 SF_DEBUG(2, (sf, CE_CONT,
3971                     "!INQUIRY to al_pa %x scsi status %x",
3972                     privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3973 
3974                 if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3975                     !rsp->fcp_u.fcp_status.resid_over &&
3976                     (!rsp->fcp_u.fcp_status.resid_under ||
3977                     ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3978                         struct fcp_rsp_info *bep;
3979 
3980                         bep = (struct fcp_rsp_info *)(&rsp->
3981                             fcp_response_len + 1);
3982 
3983                         if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3984                             (bep->rsp_code == FCP_NO_FAILURE)) {
3985 
3986                                 SF_DEBUG(2, (sf, CE_CONT,
3987                                     "!INQUIRY to al_pa %x lun %" PRIx64
3988                                     " succeeded\n",
3989                                     privp->dest_nport_id, SCSA_LUN(target)));
3990 
3991                                 (void) ddi_dma_sync(privp->data_dma_handle,
3992                                     (off_t)0, (size_t)0,
3993                                     DDI_DMA_SYNC_FORKERNEL);
3994 
3995                                 mutex_enter(&sf->sf_mutex);
3996 
3997                                 if (sf->sf_lip_cnt == privp->lip_cnt) {
3998                                         mutex_enter(&target->sft_mutex);
3999                                         target->sft_device_type =
4000                                             prt->inq_dtype;
4001                                         bcopy(prt, &target->sft_inq,
4002                                             sizeof (*prt));
4003                                         mutex_exit(&target->sft_mutex);
4004                                         sf->sf_device_count--;
4005                                         ASSERT(sf->sf_device_count >= 0);
4006                                         if (sf->sf_device_count == 0) {
4007                                                 sf_finish_init(sf,
4008                                                     privp->lip_cnt);
4009                                         }
4010                                 }
4011                                 mutex_exit(&sf->sf_mutex);
4012                                 sf_els_free(fpkt);
4013                                 return;
4014                         }
4015                 } else if ((rsp->fcp_u.fcp_status.scsi_status ==
4016                     STATUS_BUSY) ||
4017                     (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4018                     (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4019                         delayed_retry = TRUE;
4020                 }
4021         } else {
4022                 SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4023                     privp->dest_nport_id, fpkt->fcal_pkt_status));
4024         }
4025 
4026         if (++(privp->retries) < sf_els_retries ||
4027             (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4028                 if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
4029                         tsf = sf->sf_sibling;
4030                         if (tsf != NULL) {
4031                                 mutex_enter(&tsf->sf_cmd_mutex);
4032                                 tsf->sf_flag = 1;
4033                                 tsf->sf_throttle = SF_DECR_DELTA;
4034                                 mutex_exit(&tsf->sf_cmd_mutex);
4035                         }
4036                         delayed_retry = 1;
4037                 }
4038                 if (delayed_retry) {
4039                         privp->retries--;
4040                         privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4041                         privp->delayed_retry = TRUE;
4042                 } else {
4043                         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4044                 }
4045 
4046                 privp->prev = NULL;
4047                 mutex_enter(&sf->sf_mutex);
4048                 if (privp->lip_cnt == sf->sf_lip_cnt) {
4049                         if (!delayed_retry) {
4050                                 SF_DEBUG(1, (sf, CE_WARN,
4051                                     "INQUIRY to al_pa %x failed, retrying",
4052                                     privp->dest_nport_id));
4053                         }
4054                         privp->next = sf->sf_els_list;
4055                         if (sf->sf_els_list != NULL) {
4056                                 sf->sf_els_list->prev = privp;
4057                         }
4058                         sf->sf_els_list = privp;
4059                         mutex_exit(&sf->sf_mutex);
4060                         /* if not delayed call transport to send a pkt */
4061                         if (!delayed_retry &&
4062                             (soc_transport(sf->sf_sochandle, fpkt,
4063                             FCAL_NOSLEEP, CQ_REQUEST_1) !=
4064                             FCAL_TRANSPORT_SUCCESS)) {
4065                                 mutex_enter(&sf->sf_mutex);
4066                                 if (privp->prev != NULL) {
4067                                         privp->prev->next = privp->next;
4068                                 }
4069                                 if (privp->next != NULL) {
4070                                         privp->next->prev = privp->prev;
4071                                 }
4072                                 if (sf->sf_els_list == privp) {
4073                                         sf->sf_els_list = privp->next;
4074                                 }
4075                                 mutex_exit(&sf->sf_mutex);
4076                                 goto fail;
4077                         }
4078                         return;
4079                 }
4080                 mutex_exit(&sf->sf_mutex);
4081         } else {
4082 fail:
4083                 mutex_enter(&sf->sf_mutex);
4084                 if (sf->sf_lip_cnt == privp->lip_cnt) {
4085                         sf_offline_target(sf, target);
4086                         sf_log(sf, CE_NOTE,
4087                             "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4088                             "Retry Count: %d\n",
4089                             sf_alpa_to_switch[privp->dest_nport_id],
4090                             SCSA_LUN(target),
4091                             privp->retries);
4092                         sf->sf_device_count--;
4093                         ASSERT(sf->sf_device_count >= 0);
4094                         if (sf->sf_device_count == 0) {
4095                                 sf_finish_init(sf, privp->lip_cnt);
4096                         }
4097                 }
4098                 mutex_exit(&sf->sf_mutex);
4099         }
4100         sf_els_free(fpkt);
4101 }
4102 
4103 
4104 static void
4105 sf_finish_init(struct sf *sf, int lip_cnt)
4106 {
4107         int                     i;              /* loop index */
4108         int                     cflag;
4109         struct sf_target        *target;        /* current target */
4110         dev_info_t              *dip;
4111         struct sf_hp_elem       *elem;          /* hotplug element created */
4112 
4113         SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4114         ASSERT(mutex_owned(&sf->sf_mutex));
4115 
4116         /* scan all hash queues */
4117         for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4118                 target = sf->sf_wwn_lists[i];
4119                 while (target != NULL) {
4120                         mutex_enter(&target->sft_mutex);
4121 
4122                         /* see if target is not offline */
4123                         if ((target->sft_state & SF_TARGET_OFFLINE)) {
4124                                 /*
4125                                  * target already offline
4126                                  */
4127                                 mutex_exit(&target->sft_mutex);
4128                                 goto next_entry;
4129                         }
4130 
4131                         /*
4132                          * target is not already offline -- see if it has
4133                          * already been marked as ready to go offline
4134                          */
4135                         if (target->sft_state & SF_TARGET_MARK) {
4136                                 /*
4137                                  * target already marked, so take it offline
4138                                  */
4139                                 mutex_exit(&target->sft_mutex);
4140                                 sf_offline_target(sf, target);
4141                                 goto next_entry;
4142                         }
4143 
4144                         /* clear target busy flag */
4145                         target->sft_state &= ~SF_TARGET_BUSY;
4146 
4147                         /* is target init not yet done ?? */
4148                         cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4149 
4150                         /* get pointer to target dip */
4151                         dip = target->sft_dip;
4152 
4153                         mutex_exit(&target->sft_mutex);
4154                         mutex_exit(&sf->sf_mutex);
4155 
4156                         if (cflag && (dip == NULL)) {
4157                                 /*
4158                                  * target init not yet done &&
4159                                  * devinfo not yet created
4160                                  */
4161                                 sf_create_devinfo(sf, target, lip_cnt);
4162                                 mutex_enter(&sf->sf_mutex);
4163                                 goto next_entry;
4164                         }
4165 
4166                         /*
4167                          * target init already done || devinfo already created
4168                          */
4169                         ASSERT(dip != NULL);
4170                         if (!sf_create_props(dip, target, lip_cnt)) {
4171                                 /* a problem creating properties */
4172                                 mutex_enter(&sf->sf_mutex);
4173                                 goto next_entry;
4174                         }
4175 
4176                         /* create a new element for the hotplug list */
4177                         if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4178                             KM_NOSLEEP)) != NULL) {
4179 
4180                                 /* fill in the new element */
4181                                 elem->dip = dip;
4182                                 elem->target = target;
4183                                 elem->what = SF_ONLINE;
4184 
4185                                 /* add the new element into the hotplug list */
4186                                 mutex_enter(&sf->sf_hp_daemon_mutex);
4187                                 if (sf->sf_hp_elem_tail != NULL) {
4188                                         sf->sf_hp_elem_tail->next = elem;
4189                                         sf->sf_hp_elem_tail = elem;
4190                                 } else {
4191                                         /* this is the first element in list */
4192                                         sf->sf_hp_elem_head =
4193                                             sf->sf_hp_elem_tail =
4194                                             elem;
4195                                 }
4196                                 cv_signal(&sf->sf_hp_daemon_cv);
4197                                 mutex_exit(&sf->sf_hp_daemon_mutex);
4198                         } else {
4199                                 /* could not allocate memory for element ?? */
4200                                 (void) ndi_devi_online_async(dip, 0);
4201                         }
4202 
4203                         mutex_enter(&sf->sf_mutex);
4204 
4205 next_entry:
4206                         /* ensure no new LIPs have occurred */
4207                         if (sf->sf_lip_cnt != lip_cnt) {
4208                                 return;
4209                         }
4210                         target = target->sft_next;
4211                 }
4212 
4213                 /* done scanning all targets in this queue */
4214         }
4215 
4216         /* done with all hash queues */
4217 
4218         sf->sf_state = SF_STATE_ONLINE;
4219         sf->sf_online_timer = 0;
4220 }
4221 
4222 
4223 /*
4224  * create devinfo node
4225  */
4226 static void
4227 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4228 {
4229         dev_info_t              *cdip = NULL;
4230         char                    *nname = NULL;
4231         char                    **compatible = NULL;
4232         int                     ncompatible;
4233         struct scsi_inquiry     *inq = &target->sft_inq;
4234         char                    *scsi_binding_set;
4235 
4236         /* get the 'scsi-binding-set' property */
4237         if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4238             DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4239             &scsi_binding_set) != DDI_PROP_SUCCESS)
4240                 scsi_binding_set = NULL;
4241 
4242         /* determine the node name and compatible */
4243         scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4244             inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4245         if (scsi_binding_set)
4246                 ddi_prop_free(scsi_binding_set);
4247 
4248         /* if nodename can't be determined then print a message and skip it */
4249         if (nname == NULL) {
4250 #ifndef RAID_LUNS
4251                 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4252                     "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4253                     "    compatible: %s",
4254                     ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4255                     target->sft_port_wwn[0], target->sft_port_wwn[1],
4256                     target->sft_port_wwn[2], target->sft_port_wwn[3],
4257                     target->sft_port_wwn[4], target->sft_port_wwn[5],
4258                     target->sft_port_wwn[6], target->sft_port_wwn[7],
4259                     target->sft_lun.l, *compatible);
4260 #else
4261                 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4262                     "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4263                     "    compatible: %s",
4264                     ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4265                     target->sft_port_wwn[0], target->sft_port_wwn[1],
4266                     target->sft_port_wwn[2], target->sft_port_wwn[3],
4267                     target->sft_port_wwn[4], target->sft_port_wwn[5],
4268                     target->sft_port_wwn[6], target->sft_port_wwn[7],
4269                     target->sft_raid_lun, *compatible);
4270 #endif
4271                 goto fail;
4272         }
4273 
4274         /* allocate the node */
4275         if (ndi_devi_alloc(sf->sf_dip, nname,
4276             DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4277                 goto fail;
4278         }
4279 
4280         /* decorate the node with compatible */
4281         if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4282             "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4283                 goto fail;
4284         }
4285 
4286         /* add addressing properties to the node */
4287         if (sf_create_props(cdip, target, lip_cnt) != 1) {
4288                 goto fail;
4289         }
4290 
4291         mutex_enter(&target->sft_mutex);
4292         if (target->sft_dip != NULL) {
4293                 mutex_exit(&target->sft_mutex);
4294                 goto fail;
4295         }
4296         target->sft_dip = cdip;
4297         mutex_exit(&target->sft_mutex);
4298 
4299         if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4300                 goto fail;
4301         }
4302 
4303         scsi_hba_nodename_compatible_free(nname, compatible);
4304         return;
4305 
4306 fail:
4307         scsi_hba_nodename_compatible_free(nname, compatible);
4308         if (cdip != NULL) {
4309                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4310                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4311                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4312                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4313                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4314                 if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4315                         sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4316                 } else {
4317                         mutex_enter(&target->sft_mutex);
4318                         if (cdip == target->sft_dip) {
4319                                 target->sft_dip = NULL;
4320                         }
4321                         mutex_exit(&target->sft_mutex);
4322                 }
4323         }
4324 }
4325 
4326 /*
4327  * create required properties, returning TRUE iff we succeed, else
4328  * returning FALSE
4329  */
4330 static int
4331 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4332 {
4333         int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4334 
4335 
4336         if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4337             cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4338             DDI_PROP_SUCCESS) {
4339                 return (FALSE);
4340         }
4341 
4342         if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4343             cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4344             DDI_PROP_SUCCESS) {
4345                 return (FALSE);
4346         }
4347 
4348         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4349             cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4350                 return (FALSE);
4351         }
4352 
4353         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4354             cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4355                 return (FALSE);
4356         }
4357 
4358 #ifndef RAID_LUNS
4359         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4360             cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4361                 return (0);
4362         }
4363 #else
4364         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4365             cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4366                 return (0);
4367         }
4368 #endif
4369 
4370         return (TRUE);
4371 }
4372 
4373 
4374 /*
4375  * called by the transport to offline a target
4376  */
4377 /* ARGSUSED */
4378 static void
4379 sf_offline_target(struct sf *sf, struct sf_target *target)
4380 {
4381         dev_info_t *dip;
4382         struct sf_target *next_target = NULL;
4383         struct sf_hp_elem       *elem;
4384 
4385         ASSERT(mutex_owned(&sf->sf_mutex));
4386 
4387         if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4388                 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4389                 sf_core = 0;
4390         }
4391 
4392         while (target != NULL) {
4393                 sf_log(sf, CE_NOTE,
4394                     "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4395                     sf_alpa_to_switch[target->sft_al_pa],
4396                     target->sft_al_pa, SCSA_LUN(target));
4397                 mutex_enter(&target->sft_mutex);
4398                 target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4399                 target->sft_state |= SF_TARGET_OFFLINE;
4400                 mutex_exit(&target->sft_mutex);
4401                 mutex_exit(&sf->sf_mutex);
4402 
4403                 /* XXXX if this is LUN 0, offline all other LUNs */
4404                 if (next_target || target->sft_lun.l == 0)
4405                         next_target = target->sft_next_lun;
4406 
4407                 /* abort all cmds for this target */
4408                 sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4409 
4410                 mutex_enter(&sf->sf_mutex);
4411                 mutex_enter(&target->sft_mutex);
4412                 if (target->sft_state & SF_TARGET_INIT_DONE) {
4413                         dip = target->sft_dip;
4414                         mutex_exit(&target->sft_mutex);
4415                         mutex_exit(&sf->sf_mutex);
4416                         (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4417                             TARGET_PROP);
4418                         (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4419                             dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4420                             NDI_EVENT_NOPASS);
4421                         (void) ndi_event_run_callbacks(sf->sf_event_hdl,
4422                             target->sft_dip, sf_remove_eid, NULL);
4423 
4424                         elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4425                             KM_NOSLEEP);
4426                         if (elem != NULL) {
4427                                 elem->dip = dip;
4428                                 elem->target = target;
4429                                 elem->what = SF_OFFLINE;
4430                                 mutex_enter(&sf->sf_hp_daemon_mutex);
4431                                 if (sf->sf_hp_elem_tail != NULL) {
4432                                         sf->sf_hp_elem_tail->next = elem;
4433                                         sf->sf_hp_elem_tail = elem;
4434                                 } else {
4435                                         sf->sf_hp_elem_head =
4436                                             sf->sf_hp_elem_tail =
4437                                             elem;
4438                                 }
4439                                 cv_signal(&sf->sf_hp_daemon_cv);
4440                                 mutex_exit(&sf->sf_hp_daemon_mutex);
4441                         } else {
4442                                 /* don't do NDI_DEVI_REMOVE for now */
4443                                 if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4444                                         SF_DEBUG(1, (sf, CE_WARN,
4445                                             "target %x lun %" PRIx64 ", "
4446                                             "device offline failed",
4447                                             sf_alpa_to_switch[target->
4448                                             sft_al_pa],
4449                                             SCSA_LUN(target)));
4450                                 } else {
4451                                         SF_DEBUG(1, (sf, CE_NOTE,
4452                                             "target %x, lun %" PRIx64 ", "
4453                                             "device offline succeeded\n",
4454                                             sf_alpa_to_switch[target->
4455                                             sft_al_pa],
4456                                             SCSA_LUN(target)));
4457                                 }
4458                         }
4459                         mutex_enter(&sf->sf_mutex);
4460                 } else {
4461                         mutex_exit(&target->sft_mutex);
4462                 }
4463                 target = next_target;
4464         }
4465 }
4466 
4467 
4468 /*
4469  * routine to get/set a capability
4470  *
4471  * returning:
4472  *      1 (TRUE)        boolean capability is true (on get)
4473  *      0 (FALSE)       invalid capability, can't set capability (on set),
4474  *                      or boolean capability is false (on get)
4475  *      -1 (UNDEFINED)  can't find capability (SCSA) or unsupported capability
4476  *      3               when getting SCSI version number
4477  *      AL_PA           when getting port initiator ID
4478  */
4479 static int
4480 sf_commoncap(struct scsi_address *ap, char *cap,
4481     int val, int tgtonly, int doset)
4482 {
4483         struct sf *sf = ADDR2SF(ap);
4484         int cidx;
4485         int rval = FALSE;
4486 
4487 
4488         if (cap == NULL) {
4489                 SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4490                 return (rval);
4491         }
4492 
4493         /* get index of capability string */
4494         if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4495                 /* can't find capability */
4496                 return (UNDEFINED);
4497         }
4498 
4499         if (doset) {
4500                 /*
4501                  * Process setcap request.
4502                  */
4503 
4504                 /*
4505                  * At present, we can only set binary (0/1) values
4506                  */
4507                 switch (cidx) {
4508                 case SCSI_CAP_ARQ:      /* can't set this capability */
4509                         break;
4510                 default:
4511                         SF_DEBUG(3, (sf, CE_WARN,
4512                             "sf_setcap: unsupported %d", cidx));
4513                         rval = UNDEFINED;
4514                         break;
4515                 }
4516 
4517                 SF_DEBUG(4, (sf, CE_NOTE,
4518                     "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4519                     ",doset=0x%x,rval=%d\n",
4520                     cap, val, tgtonly, doset, rval));
4521 
4522         } else {
4523                 /*
4524                  * Process getcap request.
4525                  */
4526                 switch (cidx) {
4527                 case SCSI_CAP_DMA_MAX:
4528                         break;          /* don't' have this capability */
4529                 case SCSI_CAP_INITIATOR_ID:
4530                         rval = sf->sf_al_pa;
4531                         break;
4532                 case SCSI_CAP_ARQ:
4533                         rval = TRUE;    /* do have this capability */
4534                         break;
4535                 case SCSI_CAP_RESET_NOTIFICATION:
4536                 case SCSI_CAP_TAGGED_QING:
4537                         rval = TRUE;    /* do have this capability */
4538                         break;
4539                 case SCSI_CAP_SCSI_VERSION:
4540                         rval = 3;
4541                         break;
4542                 case SCSI_CAP_INTERCONNECT_TYPE:
4543                         rval = INTERCONNECT_FIBRE;
4544                         break;
4545                 default:
4546                         SF_DEBUG(4, (sf, CE_WARN,
4547                             "sf_scsi_getcap: unsupported"));
4548                         rval = UNDEFINED;
4549                         break;
4550                 }
4551                 SF_DEBUG(4, (sf, CE_NOTE,
4552                     "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4553                     "doset=0x%x,rval=%d\n",
4554                     cap, val, tgtonly, doset, rval));
4555         }
4556 
4557         return (rval);
4558 }
4559 
4560 
4561 /*
4562  * called by the transport to get a capability
4563  */
4564 static int
4565 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4566 {
4567         return (sf_commoncap(ap, cap, 0, whom, FALSE));
4568 }
4569 
4570 
4571 /*
4572  * called by the transport to set a capability
4573  */
4574 static int
4575 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4576 {
4577         return (sf_commoncap(ap, cap, value, whom, TRUE));
4578 }
4579 
4580 
4581 /*
4582  * called by the transport to abort a target
4583  */
4584 static int
4585 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4586 {
4587         struct sf *sf = ADDR2SF(ap);
4588         struct sf_target *target = ADDR2TARGET(ap);
4589         struct sf_pkt *cmd, *ncmd, *pcmd;
4590         struct fcal_packet *fpkt;
4591         int     rval = 0, t, my_rval = FALSE;
4592         int     old_target_state;
4593         int     lip_cnt;
4594         int     tgt_id;
4595         fc_frame_header_t       *hp;
4596         int     deferred_destroy;
4597 
4598         deferred_destroy = 0;
4599 
4600         if (pkt != NULL) {
4601                 cmd = PKT2CMD(pkt);
4602                 fpkt = cmd->cmd_fp_pkt;
4603                 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4604                     (void *)fpkt));
4605                 pcmd = NULL;
4606                 mutex_enter(&sf->sf_cmd_mutex);
4607                 ncmd = sf->sf_pkt_head;
4608                 while (ncmd != NULL) {
4609                         if (ncmd == cmd) {
4610                                 if (pcmd != NULL) {
4611                                         pcmd->cmd_next = cmd->cmd_next;
4612                                 } else {
4613                                         sf->sf_pkt_head = cmd->cmd_next;
4614                                 }
4615                                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4616                                 cmd->cmd_state = SF_STATE_IDLE;
4617                                 pkt->pkt_reason = CMD_ABORTED;
4618                                 pkt->pkt_statistics |= STAT_ABORTED;
4619                                 my_rval = TRUE;
4620                                 break;
4621                         } else {
4622                                 pcmd = ncmd;
4623                                 ncmd = ncmd->cmd_next;
4624                         }
4625                 }
4626                 mutex_exit(&sf->sf_cmd_mutex);
4627                 if (ncmd == NULL) {
4628                         mutex_enter(&cmd->cmd_abort_mutex);
4629                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4630                                 cmd->cmd_state = SF_STATE_ABORTING;
4631                                 cmd->cmd_timeout = sf_watchdog_time + 20;
4632                                 mutex_exit(&cmd->cmd_abort_mutex);
4633                                 /* call transport to abort command */
4634                                 if (((rval = soc_abort(sf->sf_sochandle,
4635                                     sf->sf_socp, sf->sf_sochandle->fcal_portno,
4636                                     fpkt, 1)) == FCAL_ABORTED) ||
4637                                     (rval == FCAL_ABORT_FAILED)) {
4638                                         my_rval = TRUE;
4639                                         pkt->pkt_reason = CMD_ABORTED;
4640                                         pkt->pkt_statistics |= STAT_ABORTED;
4641                                         cmd->cmd_state = SF_STATE_IDLE;
4642                                 } else if (rval == FCAL_BAD_ABORT) {
4643                                         cmd->cmd_timeout = sf_watchdog_time
4644                                             + 20;
4645                                         my_rval = FALSE;
4646                                 } else {
4647                                         SF_DEBUG(1, (sf, CE_NOTE,
4648                                             "Command Abort failed\n"));
4649                                 }
4650                         } else {
4651                                 mutex_exit(&cmd->cmd_abort_mutex);
4652                         }
4653                 }
4654         } else {
4655                 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4656                 mutex_enter(&sf->sf_mutex);
4657                 lip_cnt = sf->sf_lip_cnt;
4658                 mutex_enter(&target->sft_mutex);
4659                 if (target->sft_state & (SF_TARGET_BUSY |
4660                     SF_TARGET_OFFLINE)) {
4661                         mutex_exit(&target->sft_mutex);
4662                         return (rval);
4663                 }
4664                 old_target_state = target->sft_state;
4665                 target->sft_state |= SF_TARGET_BUSY;
4666                 mutex_exit(&target->sft_mutex);
4667                 mutex_exit(&sf->sf_mutex);
4668 
4669                 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4670                     0, 0, 0, NULL, 0)) != NULL) {
4671 
4672                         cmd = PKT2CMD(pkt);
4673                         cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4674                         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4675                         cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4676 
4677                         /* prepare the packet for transport */
4678                         if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4679 
4680                                 cmd->cmd_state = SF_STATE_ISSUED;
4681                                 /*
4682                                  * call transport to send a pkt polled
4683                                  *
4684                                  * if that fails call the transport to abort it
4685                                  */
4686                                 if (soc_transport_poll(sf->sf_sochandle,
4687                                     cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4688                                     CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4689                                         (void) ddi_dma_sync(
4690                                             cmd->cmd_cr_pool->rsp_dma_handle,
4691                                             (off_t)
4692                                             ((caddr_t)cmd->cmd_rsp_block -
4693                                             cmd->cmd_cr_pool->rsp_base),
4694                                             FCP_MAX_RSP_IU_SIZE,
4695                                             DDI_DMA_SYNC_FORKERNEL);
4696                                         if (((struct fcp_rsp_info *)
4697                                             (&cmd->cmd_rsp_block->
4698                                             fcp_response_len + 1))->
4699                                             rsp_code == FCP_NO_FAILURE) {
4700                                                 /* abort cmds for this targ */
4701                                                 sf_abort_all(sf, target, TRUE,
4702                                                     lip_cnt, TRUE);
4703                                         } else {
4704                                                 hp = &cmd->cmd_fp_pkt->
4705                                                     fcal_socal_request.
4706                                                     sr_fc_frame_hdr;
4707                                                 tgt_id = sf_alpa_to_switch[
4708                                                     (uchar_t)hp->d_id];
4709                                                 sf->sf_stats.tstats[tgt_id].
4710                                                     task_mgmt_failures++;
4711                                                 SF_DEBUG(1, (sf, CE_NOTE,
4712                                                     "Target %d Abort Task "
4713                                                     "Set failed\n", hp->d_id));
4714                                         }
4715                                 } else {
4716                                         mutex_enter(&cmd->cmd_abort_mutex);
4717                                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4718                                         cmd->cmd_state = SF_STATE_ABORTING;
4719                                         cmd->cmd_timeout = sf_watchdog_time
4720                                             + 20;
4721                                         mutex_exit(&cmd->cmd_abort_mutex);
4722                                         if ((t = soc_abort(sf->sf_sochandle,
4723                                             sf->sf_socp, sf->sf_sochandle->
4724                                             fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4725                                             FCAL_ABORTED &&
4726                                             (t != FCAL_ABORT_FAILED)) {
4727                                                 sf_log(sf, CE_NOTE,
4728                                                     "sf_abort failed, "
4729                                                     "initiating LIP\n");
4730                                                 sf_force_lip(sf);
4731                                                 deferred_destroy = 1;
4732                                         }
4733                                         } else {
4734                                         mutex_exit(&cmd->cmd_abort_mutex);
4735                                         }
4736                                 }
4737                         }
4738                         if (!deferred_destroy) {
4739                                 cmd->cmd_fp_pkt->fcal_pkt_comp =
4740                                     sf_cmd_callback;
4741                                 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4742                                 sf_scsi_destroy_pkt(ap, pkt);
4743                                 my_rval = TRUE;
4744                         }
4745                 }
4746                 mutex_enter(&sf->sf_mutex);
4747                 if (lip_cnt == sf->sf_lip_cnt) {
4748                         mutex_enter(&target->sft_mutex);
4749                         target->sft_state = old_target_state;
4750                         mutex_exit(&target->sft_mutex);
4751                 }
4752                 mutex_exit(&sf->sf_mutex);
4753         }
4754         return (my_rval);
4755 }
4756 
4757 
4758 /*
4759  * called by the transport and internally to reset a target
4760  */
4761 static int
4762 sf_reset(struct scsi_address *ap, int level)
4763 {
4764         struct scsi_pkt *pkt;
4765         struct fcal_packet *fpkt;
4766         struct sf *sf = ADDR2SF(ap);
4767         struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4768         struct sf_pkt *cmd;
4769         int     rval = FALSE, t;
4770         int     lip_cnt;
4771         int     tgt_id, ret;
4772         fc_frame_header_t       *hp;
4773         int     deferred_destroy;
4774 
4775         /* We don't support RESET_LUN yet. */
4776         if (level == RESET_TARGET) {
4777                 struct sf_reset_list *p;
4778 
4779                 if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4780                     == NULL)
4781                         return (rval);
4782 
4783                 SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4784                 mutex_enter(&sf->sf_mutex);
4785                 /* All target resets go to LUN 0 */
4786                 if (target->sft_lun.l) {
4787                         target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4788                 }
4789                 mutex_enter(&target->sft_mutex);
4790                 if (target->sft_state & (SF_TARGET_BUSY |
4791                     SF_TARGET_OFFLINE)) {
4792                         mutex_exit(&target->sft_mutex);
4793                         mutex_exit(&sf->sf_mutex);
4794                         kmem_free(p, sizeof (struct sf_reset_list));
4795                         return (rval);
4796                 }
4797                 lip_cnt = sf->sf_lip_cnt;
4798                 target->sft_state |= SF_TARGET_BUSY;
4799                 for (ntarget = target->sft_next_lun;
4800                     ntarget;
4801                     ntarget = ntarget->sft_next_lun) {
4802                         mutex_enter(&ntarget->sft_mutex);
4803                         /*
4804                          * XXXX If we supported RESET_LUN we should check here
4805                          * to see if any LUN were being reset and somehow fail
4806                          * that operation.
4807                          */
4808                         ntarget->sft_state |= SF_TARGET_BUSY;
4809                         mutex_exit(&ntarget->sft_mutex);
4810                 }
4811                 mutex_exit(&target->sft_mutex);
4812                 mutex_exit(&sf->sf_mutex);
4813 
4814                 deferred_destroy = 0;
4815                 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4816                     0, 0, 0, NULL, 0)) != NULL) {
4817                         cmd = PKT2CMD(pkt);
4818                         cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4819                         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4820                         cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4821 
4822                         /* prepare the packet for transport */
4823                         if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4824                                 /* call transport to send a pkt polled */
4825                                 cmd->cmd_state = SF_STATE_ISSUED;
4826                                 if ((ret = soc_transport_poll(sf->sf_sochandle,
4827                                     cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4828                                     CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4829                                         (void) ddi_dma_sync(cmd->cmd_cr_pool->
4830                                             rsp_dma_handle, (caddr_t)cmd->
4831                                             cmd_rsp_block - cmd->cmd_cr_pool->
4832                                             rsp_base, FCP_MAX_RSP_IU_SIZE,
4833                                             DDI_DMA_SYNC_FORKERNEL);
4834                                         fpkt = cmd->cmd_fp_pkt;
4835                                         if ((fpkt->fcal_pkt_status ==
4836                                             FCAL_STATUS_OK) &&
4837                                             (((struct fcp_rsp_info *)
4838                                             (&cmd->cmd_rsp_block->
4839                                             fcp_response_len + 1))->
4840                                             rsp_code == FCP_NO_FAILURE)) {
4841                                                 sf_log(sf, CE_NOTE,
4842                                                     "!sf%d: Target 0x%x Reset "
4843                                                     "successful\n",
4844                                                     ddi_get_instance(\
4845                                                     sf->sf_dip),
4846                                                     sf_alpa_to_switch[
4847                                                     target->sft_al_pa]);
4848                                                 rval = TRUE;
4849                                         } else {
4850                                                 hp = &cmd->cmd_fp_pkt->
4851                                                     fcal_socal_request.
4852                                                     sr_fc_frame_hdr;
4853                                                 tgt_id = sf_alpa_to_switch[
4854                                                     (uchar_t)hp->d_id];
4855                                                 sf->sf_stats.tstats[tgt_id].
4856                                                     task_mgmt_failures++;
4857                                                 sf_log(sf, CE_NOTE,
4858                                                     "!sf%d: Target 0x%x "
4859                                                     "Reset failed."
4860                                                     "Status code 0x%x "
4861                                                     "Resp code 0x%x\n",
4862                                                     ddi_get_instance(\
4863                                                     sf->sf_dip),
4864                                                     tgt_id,
4865                                                     fpkt->fcal_pkt_status,
4866                                                     ((struct fcp_rsp_info *)
4867                                                     (&cmd->cmd_rsp_block->
4868                                                     fcp_response_len + 1))->
4869                                                     rsp_code);
4870                                         }
4871                                 } else {
4872                                         sf_log(sf, CE_NOTE, "!sf%d: Target "
4873                                             "0x%x Reset Failed. Ret=%x\n",
4874                                             ddi_get_instance(sf->sf_dip),
4875                                             sf_alpa_to_switch[
4876                                             target->sft_al_pa], ret);
4877                                         mutex_enter(&cmd->cmd_abort_mutex);
4878                                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4879                                         /* call the transport to abort a cmd */
4880                                         cmd->cmd_timeout = sf_watchdog_time
4881                                             + 20;
4882                                         cmd->cmd_state = SF_STATE_ABORTING;
4883                                         mutex_exit(&cmd->cmd_abort_mutex);
4884                                         if (((t = soc_abort(sf->sf_sochandle,
4885                                             sf->sf_socp,
4886                                             sf->sf_sochandle->fcal_portno,
4887                                             cmd->cmd_fp_pkt, 1)) !=
4888                                             FCAL_ABORTED) &&
4889                                             (t != FCAL_ABORT_FAILED)) {
4890                                                 sf_log(sf, CE_NOTE,
4891                                                     "!sf%d: Target 0x%x Reset "
4892                                                     "failed. Abort Failed, "
4893                                                     "forcing LIP\n",
4894                                                     ddi_get_instance(
4895                                                     sf->sf_dip),
4896                                                     sf_alpa_to_switch[
4897                                                     target->sft_al_pa]);
4898                                                 sf_force_lip(sf);
4899                                                 rval = TRUE;
4900                                                 deferred_destroy = 1;
4901                                         }
4902                                         } else {
4903                                                 mutex_exit
4904                                                     (&cmd->cmd_abort_mutex);
4905                                         }
4906                                 }
4907                         }
4908                         /*
4909                          * Defer releasing the packet if we abort returned with
4910                          * a BAD_ABORT or timed out, because there is a
4911                          * possibility that the ucode might return it.
4912                          * We wait for at least 20s and let it be released
4913                          * by the sf_watch thread
4914                          */
4915                         if (!deferred_destroy) {
4916                                 cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4917                                 cmd->cmd_fp_pkt->fcal_pkt_comp =
4918                                     sf_cmd_callback;
4919                                 cmd->cmd_state = SF_STATE_IDLE;
4920                                 /* for cache */
4921                                 sf_scsi_destroy_pkt(ap, pkt);
4922                         }
4923                 } else {
4924                         cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4925                             "Resource allocation error.\n",
4926                             ddi_get_instance(sf->sf_dip),
4927                             sf_alpa_to_switch[target->sft_al_pa]);
4928                 }
4929                 mutex_enter(&sf->sf_mutex);
4930                 if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4931                         p->target = target;
4932                         p->lip_cnt = lip_cnt;
4933                         p->timeout = ddi_get_lbolt() +
4934                             drv_usectohz(SF_TARGET_RESET_DELAY);
4935                         p->next = sf->sf_reset_list;
4936                         sf->sf_reset_list = p;
4937                         mutex_exit(&sf->sf_mutex);
4938                         mutex_enter(&sf_global_mutex);
4939                         if (sf_reset_timeout_id == 0) {
4940                                 sf_reset_timeout_id = timeout(
4941                                     sf_check_reset_delay, NULL,
4942                                     drv_usectohz(SF_TARGET_RESET_DELAY));
4943                         }
4944                         mutex_exit(&sf_global_mutex);
4945                 } else {
4946                         if (lip_cnt == sf->sf_lip_cnt) {
4947                                 mutex_enter(&target->sft_mutex);
4948                                 target->sft_state &= ~SF_TARGET_BUSY;
4949                                 for (ntarget = target->sft_next_lun;
4950                                     ntarget;
4951                                     ntarget = ntarget->sft_next_lun) {
4952                                         mutex_enter(&ntarget->sft_mutex);
4953                                         ntarget->sft_state &= ~SF_TARGET_BUSY;
4954                                         mutex_exit(&ntarget->sft_mutex);
4955                                 }
4956                                 mutex_exit(&target->sft_mutex);
4957                         }
4958                         mutex_exit(&sf->sf_mutex);
4959                         kmem_free(p, sizeof (struct sf_reset_list));
4960                 }
4961         } else {
4962                 mutex_enter(&sf->sf_mutex);
4963                 if ((sf->sf_state == SF_STATE_OFFLINE) &&
4964                     (sf_watchdog_time < sf->sf_timer)) {
4965                         /*
4966                          * We are currently in a lip, so let this one
4967                          * finish before forcing another one.
4968                          */
4969                         mutex_exit(&sf->sf_mutex);
4970                         return (TRUE);
4971                 }
4972                 mutex_exit(&sf->sf_mutex);
4973                 sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4974                 sf_force_lip(sf);
4975                 rval = TRUE;
4976         }
4977         return (rval);
4978 }
4979 
4980 
4981 /*
4982  * abort all commands for a target
4983  *
4984  * if try_abort is set then send an abort
4985  * if abort is set then this is abort, else this is a reset
4986  */
4987 static void
4988 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4989     lip_cnt, int try_abort)
4990 {
4991         struct sf_target *ntarget;
4992         struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4993         struct fcal_packet *fpkt;
4994         struct scsi_pkt *pkt;
4995         int rval = FCAL_ABORTED;
4996 
4997         /*
4998          * First pull all commands for all LUNs on this target out of the
4999          * overflow list.  We can tell it's the same target by comparing
5000          * the node WWN.
5001          */
5002         mutex_enter(&sf->sf_mutex);
5003         if (lip_cnt == sf->sf_lip_cnt) {
5004                 mutex_enter(&sf->sf_cmd_mutex);
5005                 cmd = sf->sf_pkt_head;
5006                 while (cmd != NULL) {
5007                         ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5008                             pkt_address);
5009                         if (ntarget == target) {
5010                                 if (pcmd != NULL)
5011                                         pcmd->cmd_next = cmd->cmd_next;
5012                                 else
5013                                         sf->sf_pkt_head = cmd->cmd_next;
5014                                 if (sf->sf_pkt_tail == cmd) {
5015                                         sf->sf_pkt_tail = pcmd;
5016                                         if (pcmd != NULL)
5017                                                 pcmd->cmd_next = NULL;
5018                                 }
5019                                 tcmd = cmd->cmd_next;
5020                                 if (head == NULL) {
5021                                         head = cmd;
5022                                         tail = cmd;
5023                                 } else {
5024                                         tail->cmd_next = cmd;
5025                                         tail = cmd;
5026                                 }
5027                                 cmd->cmd_next = NULL;
5028                                 cmd = tcmd;
5029                         } else {
5030                                 pcmd = cmd;
5031                                 cmd = cmd->cmd_next;
5032                         }
5033                 }
5034                 mutex_exit(&sf->sf_cmd_mutex);
5035         }
5036         mutex_exit(&sf->sf_mutex);
5037 
5038         /*
5039          * Now complete all the commands on our list.  In the process,
5040          * the completion routine may take the commands off the target
5041          * lists.
5042          */
5043         cmd = head;
5044         while (cmd != NULL) {
5045                 pkt = cmd->cmd_pkt;
5046                 if (abort) {
5047                         pkt->pkt_reason = CMD_ABORTED;
5048                         pkt->pkt_statistics |= STAT_ABORTED;
5049                 } else {
5050                         pkt->pkt_reason = CMD_RESET;
5051                         pkt->pkt_statistics |= STAT_DEV_RESET;
5052                 }
5053                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5054                 cmd->cmd_state = SF_STATE_IDLE;
5055                 cmd = cmd->cmd_next;
5056                 /*
5057                  * call the packet completion routine only for
5058                  * non-polled commands. Ignore the polled commands as
5059                  * they timeout and will be handled differently
5060                  */
5061                 if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5062                         (*pkt->pkt_comp)(pkt);
5063 
5064         }
5065 
5066         /*
5067          * Finally get all outstanding commands for each LUN, and abort them if
5068          * they've been issued, and call the completion routine.
5069          * For the case where sf_offline_target is called from sf_watch
5070          * due to a Offline Timeout, it is quite possible that the soc+
5071          * ucode is hosed and therefore  cannot return the commands.
5072          * Clear up all the issued commands as well.
5073          * Try_abort will be false only if sf_abort_all is coming from
5074          * sf_target_offline.
5075          */
5076 
5077         if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5078                 mutex_enter(&target->sft_pkt_mutex);
5079                 cmd = tcmd = target->sft_pkt_head;
5080                 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5081                         fpkt = cmd->cmd_fp_pkt;
5082                         pkt = cmd->cmd_pkt;
5083                         mutex_enter(&cmd->cmd_abort_mutex);
5084                         if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5085                             (fpkt->fcal_cmd_state &
5086                             FCAL_CMD_IN_TRANSPORT) &&
5087                             ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5088                             0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5089                                 cmd->cmd_state = SF_STATE_ABORTING;
5090                                 cmd->cmd_timeout = sf_watchdog_time +
5091                                     cmd->cmd_pkt->pkt_time + 20;
5092                                 mutex_exit(&cmd->cmd_abort_mutex);
5093                                 mutex_exit(&target->sft_pkt_mutex);
5094                                 if (try_abort) {
5095                                         /* call the transport to abort a pkt */
5096                                         rval = soc_abort(sf->sf_sochandle,
5097                                             sf->sf_socp,
5098                                             sf->sf_sochandle->fcal_portno,
5099                                             fpkt, 1);
5100                                 }
5101                                 if ((rval == FCAL_ABORTED) ||
5102                                     (rval == FCAL_ABORT_FAILED)) {
5103                                         if (abort) {
5104                                                 pkt->pkt_reason = CMD_ABORTED;
5105                                                 pkt->pkt_statistics |=
5106                                                     STAT_ABORTED;
5107                                         } else {
5108                                                 pkt->pkt_reason = CMD_RESET;
5109                                                 pkt->pkt_statistics |=
5110                                                     STAT_DEV_RESET;
5111                                         }
5112                                         cmd->cmd_state = SF_STATE_IDLE;
5113                                         if (pkt->pkt_comp)
5114                                                 (*pkt->pkt_comp)(pkt);
5115                                 }
5116                                 mutex_enter(&sf->sf_mutex);
5117                                 if (lip_cnt != sf->sf_lip_cnt) {
5118                                         mutex_exit(&sf->sf_mutex);
5119                                         return;
5120                                 }
5121                                 mutex_exit(&sf->sf_mutex);
5122                                 mutex_enter(&target->sft_pkt_mutex);
5123                                 cmd = target->sft_pkt_head;
5124                         } else {
5125                                 mutex_exit(&cmd->cmd_abort_mutex);
5126                                 cmd = cmd->cmd_forw;
5127                         }
5128                 }
5129                 mutex_exit(&target->sft_pkt_mutex);
5130         }
5131 }
5132 
5133 
5134 /*
5135  * called by the transport to start a packet
5136  */
5137 static int
5138 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5139 {
5140         struct sf *sf = ADDR2SF(ap);
5141         struct sf_target *target = ADDR2TARGET(ap);
5142         struct sf_pkt *cmd = PKT2CMD(pkt);
5143         int rval;
5144 
5145 
5146         SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5147 
5148         if (cmd->cmd_state == SF_STATE_ISSUED) {
5149                 cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5150                     (void *)cmd);
5151         }
5152 
5153         /* prepare the packet for transport */
5154         if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5155                 return (rval);
5156         }
5157 
5158         if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5159                 if (target->sft_state & SF_TARGET_OFFLINE) {
5160                         return (TRAN_FATAL_ERROR);
5161                 }
5162                 if (pkt->pkt_flags & FLAG_NOINTR) {
5163                         return (TRAN_BUSY);
5164                 }
5165                 mutex_enter(&sf->sf_cmd_mutex);
5166                 sf->sf_use_lock = TRUE;
5167                 goto enque;
5168         }
5169 
5170 
5171         /* if no interrupts then do polled I/O */
5172         if (pkt->pkt_flags & FLAG_NOINTR) {
5173                 return (sf_dopoll(sf, cmd));
5174         }
5175 
5176         /* regular interrupt-driven I/O */
5177 
5178         if (!sf->sf_use_lock) {
5179 
5180                 /* locking no needed */
5181 
5182                 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5183                     sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5184                 cmd->cmd_state = SF_STATE_ISSUED;
5185 
5186                 /* call the transport to send a pkt */
5187                 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5188                     FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5189                         cmd->cmd_state = SF_STATE_IDLE;
5190                         return (TRAN_BADPKT);
5191                 }
5192                 return (TRAN_ACCEPT);
5193         }
5194 
5195         /* regular I/O using locking */
5196 
5197         mutex_enter(&sf->sf_cmd_mutex);
5198         if ((sf->sf_ncmds >= sf->sf_throttle) ||
5199             (sf->sf_pkt_head != NULL)) {
5200 enque:
5201                 /*
5202                  * either we're throttling back or there are already commands
5203                  * on the queue, so enqueue this one for later
5204                  */
5205                 cmd->cmd_flags |= CFLAG_IN_QUEUE;
5206                 if (sf->sf_pkt_head != NULL) {
5207                         /* add to the queue */
5208                         sf->sf_pkt_tail->cmd_next = cmd;
5209                         cmd->cmd_next = NULL;
5210                         sf->sf_pkt_tail = cmd;
5211                 } else {
5212                         /* this is the first entry in the queue */
5213                         sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5214                         cmd->cmd_next = NULL;
5215                 }
5216                 mutex_exit(&sf->sf_cmd_mutex);
5217                 return (TRAN_ACCEPT);
5218         }
5219 
5220         /*
5221          * start this packet now
5222          */
5223 
5224         /* still have cmd mutex */
5225         return (sf_start_internal(sf, cmd));
5226 }
5227 
5228 
5229 /*
5230  * internal routine to start a packet from the queue now
5231  *
5232  * enter with cmd mutex held and leave with it released
5233  */
5234 static int
5235 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5236 {
5237         /* we have the cmd mutex */
5238         sf->sf_ncmds++;
5239         mutex_exit(&sf->sf_cmd_mutex);
5240 
5241         ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5242         SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5243 
5244         cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5245             cmd->cmd_pkt->pkt_time : 0;
5246         cmd->cmd_state = SF_STATE_ISSUED;
5247 
5248         /* call transport to send the pkt */
5249         if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5250             CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5251                 cmd->cmd_state = SF_STATE_IDLE;
5252                 mutex_enter(&sf->sf_cmd_mutex);
5253                 sf->sf_ncmds--;
5254                 mutex_exit(&sf->sf_cmd_mutex);
5255                 return (TRAN_BADPKT);
5256         }
5257         return (TRAN_ACCEPT);
5258 }
5259 
5260 
5261 /*
5262  * prepare a packet for transport
5263  */
5264 static int
5265 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5266 {
5267         struct fcp_cmd *fcmd = cmd->cmd_block;
5268 
5269 /* XXXX Need to set the LUN ? */
5270         bcopy((caddr_t)&target->sft_lun.b,
5271             (caddr_t)&fcmd->fcp_ent_addr,
5272             FCP_LUN_SIZE);
5273         cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5274         cmd->cmd_pkt->pkt_state = 0;
5275         cmd->cmd_pkt->pkt_statistics = 0;
5276 
5277 
5278         if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5279             ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5280                 return (TRAN_BADPKT);
5281         }
5282 
5283         /* invalidate imp field(s) of rsp block */
5284         cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5285 
5286         /* set up amt of I/O to do */
5287         if (cmd->cmd_flags & CFLAG_DMAVALID) {
5288                 cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5289                 if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5290                         (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5291                             DDI_DMA_SYNC_FORDEV);
5292                 }
5293         } else {
5294                 cmd->cmd_pkt->pkt_resid = 0;
5295         }
5296 
5297         /* set up the Tagged Queuing type */
5298         if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5299                 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5300         } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5301                 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5302         }
5303 
5304         /*
5305          * Sync the cmd segment
5306          */
5307         (void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5308             (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5309             sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5310 
5311         sf_fill_ids(sf, cmd, target);
5312         return (TRAN_ACCEPT);
5313 }
5314 
5315 
5316 /*
5317  * fill in packet hdr source and destination IDs and hdr byte count
5318  */
5319 static void
5320 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5321 {
5322         struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5323         fc_frame_header_t       *hp;
5324 
5325 
5326         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5327         hp->d_id = target->sft_al_pa;
5328         hp->s_id = sf->sf_al_pa;
5329         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5330             cmd->cmd_dmacookie.dmac_size;
5331 }
5332 
5333 
5334 /*
5335  * do polled I/O using transport
5336  */
5337 static int
5338 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5339 {
5340         int timeout;
5341         int rval;
5342 
5343 
5344         mutex_enter(&sf->sf_cmd_mutex);
5345         sf->sf_ncmds++;
5346         mutex_exit(&sf->sf_cmd_mutex);
5347 
5348         timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5349             : SF_POLL_TIMEOUT;
5350         cmd->cmd_timeout = 0;
5351         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5352         cmd->cmd_state = SF_STATE_ISSUED;
5353 
5354         /* call transport to send a pkt polled */
5355         rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5356             timeout*1000000, CQ_REQUEST_1);
5357         mutex_enter(&cmd->cmd_abort_mutex);
5358         cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5359         if (rval != FCAL_TRANSPORT_SUCCESS) {
5360                 if (rval == FCAL_TRANSPORT_TIMEOUT) {
5361                         cmd->cmd_state = SF_STATE_ABORTING;
5362                         mutex_exit(&cmd->cmd_abort_mutex);
5363                         (void) sf_target_timeout(sf, cmd);
5364                 } else {
5365                         mutex_exit(&cmd->cmd_abort_mutex);
5366                 }
5367                 cmd->cmd_state = SF_STATE_IDLE;
5368                 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5369                 mutex_enter(&sf->sf_cmd_mutex);
5370                 sf->sf_ncmds--;
5371                 mutex_exit(&sf->sf_cmd_mutex);
5372                 return (TRAN_BADPKT);
5373         }
5374         mutex_exit(&cmd->cmd_abort_mutex);
5375         cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5376         sf_cmd_callback(cmd->cmd_fp_pkt);
5377         return (TRAN_ACCEPT);
5378 }
5379 
5380 
5381 /* a shortcut for defining debug messages below */
5382 #ifdef  DEBUG
5383 #define SF_DMSG1(s)             msg1 = s
5384 #else
5385 #define SF_DMSG1(s)             /* do nothing */
5386 #endif
5387 
5388 
5389 /*
5390  * the pkt_comp callback for command packets
5391  */
5392 static void
5393 sf_cmd_callback(struct fcal_packet *fpkt)
5394 {
5395         struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5396         struct scsi_pkt *pkt = cmd->cmd_pkt;
5397         struct sf *sf = ADDR2SF(&pkt->pkt_address);
5398         struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5399         struct fcp_rsp *rsp;
5400         char *msg1 = NULL;
5401         char *msg2 = NULL;
5402         short ncmds;
5403         int tgt_id;
5404         int good_scsi_status = TRUE;
5405 
5406 
5407 
5408         if (cmd->cmd_state == SF_STATE_IDLE) {
5409                 cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5410                     (void *)cmd);
5411         }
5412 
5413         mutex_enter(&cmd->cmd_abort_mutex);
5414         if (cmd->cmd_state == SF_STATE_ABORTING) {
5415                 /* cmd already being aborted -- nothing to do */
5416                 mutex_exit(&cmd->cmd_abort_mutex);
5417                 return;
5418         }
5419 
5420         cmd->cmd_state = SF_STATE_IDLE;
5421         mutex_exit(&cmd->cmd_abort_mutex);
5422 
5423         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5424 
5425                 (void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5426                     (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5427                     FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5428 
5429                 rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5430 
5431                 if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5432 
5433                         if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5434                                 sf_token = (int *)(uintptr_t)
5435                                     fpkt->fcal_socal_request.\
5436                                     sr_soc_hdr.sh_request_token;
5437                                 (void) soc_take_core(sf->sf_sochandle,
5438                                     sf->sf_socp);
5439                         }
5440 
5441                         pkt->pkt_reason = CMD_INCOMPLETE;
5442                         pkt->pkt_state = STATE_GOT_BUS;
5443                         pkt->pkt_statistics |= STAT_ABORTED;
5444 
5445                 } else {
5446 
5447                         pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5448                             STATE_SENT_CMD | STATE_GOT_STATUS;
5449                         pkt->pkt_resid = 0;
5450                         if (cmd->cmd_flags & CFLAG_DMAVALID) {
5451                                 pkt->pkt_state |= STATE_XFERRED_DATA;
5452                         }
5453 
5454                         if ((pkt->pkt_scbp != NULL) &&
5455                             ((*(pkt->pkt_scbp) =
5456                             rsp->fcp_u.fcp_status.scsi_status)
5457                             != STATUS_GOOD)) {
5458                                 good_scsi_status = FALSE;
5459                         /*
5460                          * The next two checks make sure that if there
5461                          * is no sense data or a valid response and
5462                          * the command came back with check condition,
5463                          * the command should be retried
5464                          */
5465                                 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5466                                     !rsp->fcp_u.fcp_status.sense_len_set) {
5467                                         pkt->pkt_state &= ~STATE_XFERRED_DATA;
5468                                         pkt->pkt_resid = cmd->cmd_dmacount;
5469                                 }
5470                         }
5471 
5472                         if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5473                             (pkt->pkt_state & STATE_XFERRED_DATA)) {
5474                                 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5475                                     (uint_t)0, DDI_DMA_SYNC_FORCPU);
5476                         }
5477                         /*
5478                          * Update the transfer resid, if appropriate
5479                          */
5480                         if (rsp->fcp_u.fcp_status.resid_over ||
5481                             rsp->fcp_u.fcp_status.resid_under)
5482                                 pkt->pkt_resid = rsp->fcp_resid;
5483 
5484                         /*
5485                          * Check to see if the SCSI command failed.
5486                          *
5487                          */
5488 
5489                         /*
5490                          * First see if we got a FCP protocol error.
5491                          */
5492                         if (rsp->fcp_u.fcp_status.rsp_len_set) {
5493                                 struct fcp_rsp_info *bep;
5494 
5495                                 bep = (struct fcp_rsp_info *)
5496                                     (&rsp->fcp_response_len + 1);
5497                                 if (bep->rsp_code != FCP_NO_FAILURE) {
5498                                                 pkt->pkt_reason = CMD_TRAN_ERR;
5499                                         tgt_id = pkt->pkt_address.a_target;
5500                                         switch (bep->rsp_code) {
5501                                         case FCP_CMND_INVALID:
5502                                                 SF_DMSG1("FCP_RSP FCP_CMND "
5503                                                     "fields invalid");
5504                                                 break;
5505                                         case FCP_TASK_MGMT_NOT_SUPPTD:
5506                                                 SF_DMSG1("FCP_RSP Task"
5507                                                     "Management Function"
5508                                                     "Not Supported");
5509                                                 break;
5510                                         case FCP_TASK_MGMT_FAILED:
5511                                                 SF_DMSG1("FCP_RSP Task "
5512                                                     "Management Function"
5513                                                     "Failed");
5514                                                 sf->sf_stats.tstats[tgt_id].
5515                                                     task_mgmt_failures++;
5516                                                 break;
5517                                         case FCP_DATA_RO_MISMATCH:
5518                                                 SF_DMSG1("FCP_RSP FCP_DATA RO "
5519                                                     "mismatch with "
5520                                                     "FCP_XFER_RDY DATA_RO");
5521                                                 sf->sf_stats.tstats[tgt_id].
5522                                                     data_ro_mismatches++;
5523                                                 break;
5524                                         case FCP_DL_LEN_MISMATCH:
5525                                                 SF_DMSG1("FCP_RSP FCP_DATA "
5526                                                     "length "
5527                                                     "different than BURST_LEN");
5528                                                 sf->sf_stats.tstats[tgt_id].
5529                                                     dl_len_mismatches++;
5530                                                 break;
5531                                         default:
5532                                                 SF_DMSG1("FCP_RSP invalid "
5533                                                     "RSP_CODE");
5534                                                 break;
5535                                         }
5536                                 }
5537                         }
5538 
5539                         /*
5540                          * See if we got a SCSI error with sense data
5541                          */
5542                         if (rsp->fcp_u.fcp_status.sense_len_set) {
5543                                 uchar_t rqlen = min(rsp->fcp_sense_len,
5544                                     sizeof (struct scsi_extended_sense));
5545                                 caddr_t sense = (caddr_t)rsp +
5546                                     sizeof (struct fcp_rsp) +
5547                                     rsp->fcp_response_len;
5548                                 struct scsi_arq_status *arq;
5549                                 struct scsi_extended_sense *sensep =
5550                                     (struct scsi_extended_sense *)sense;
5551 
5552                                 if (rsp->fcp_u.fcp_status.scsi_status !=
5553                                     STATUS_GOOD) {
5554                                 if (rsp->fcp_u.fcp_status.scsi_status
5555                                     == STATUS_CHECK) {
5556                                         if (sensep->es_key ==
5557                                             KEY_RECOVERABLE_ERROR)
5558                                                 good_scsi_status = 1;
5559                                         if (sensep->es_key ==
5560                                             KEY_UNIT_ATTENTION &&
5561                                             sensep->es_add_code == 0x3f &&
5562                                             sensep->es_qual_code == 0x0e) {
5563                                                 /* REPORT_LUNS_HAS_CHANGED */
5564                                                 sf_log(sf, CE_NOTE,
5565                                                 "!REPORT_LUNS_HAS_CHANGED\n");
5566                                                 sf_force_lip(sf);
5567                                         }
5568                                 }
5569                                 }
5570 
5571                                 if ((pkt->pkt_scbp != NULL) &&
5572                                     (cmd->cmd_scblen >=
5573                                         sizeof (struct scsi_arq_status))) {
5574 
5575                                 pkt->pkt_state |= STATE_ARQ_DONE;
5576 
5577                                 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5578                                 /*
5579                                  * copy out sense information
5580                                  */
5581                                 bcopy(sense, (caddr_t)&arq->sts_sensedata,
5582                                     rqlen);
5583                                 arq->sts_rqpkt_resid =
5584                                     sizeof (struct scsi_extended_sense) -
5585                                         rqlen;
5586                                 *((uchar_t *)&arq->sts_rqpkt_status) =
5587                                     STATUS_GOOD;
5588                                 arq->sts_rqpkt_reason = 0;
5589                                 arq->sts_rqpkt_statistics = 0;
5590                                 arq->sts_rqpkt_state = STATE_GOT_BUS |
5591                                     STATE_GOT_TARGET | STATE_SENT_CMD |
5592                                     STATE_GOT_STATUS | STATE_ARQ_DONE |
5593                                     STATE_XFERRED_DATA;
5594                             }
5595                                 target->sft_alive = TRUE;
5596                         }
5597 
5598                         /*
5599                          * The firmware returns the number of bytes actually
5600                          * xfered into/out of host. Compare this with what
5601                          * we asked and if it is different, we lost frames ?
5602                          */
5603                         if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5604                             (good_scsi_status) &&
5605                             (pkt->pkt_state & STATE_XFERRED_DATA) &&
5606                             (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5607                             (target->sft_device_type != DTYPE_ESI)) {
5608                                 int byte_cnt =
5609                                     fpkt->fcal_socal_request.
5610                                     sr_soc_hdr.sh_byte_cnt;
5611                                 if (cmd->cmd_flags & CFLAG_DMASEND) {
5612                                         if (byte_cnt != 0) {
5613                                         sf_log(sf, CE_NOTE,
5614                                             "!sf_cmd_callback: Lost Frame: "
5615                                             "(write) received 0x%x expected"
5616                                             " 0x%x target 0x%x\n",
5617                                             byte_cnt, cmd->cmd_dmacount,
5618                                             sf_alpa_to_switch[
5619                                             target->sft_al_pa]);
5620                                         pkt->pkt_reason = CMD_INCOMPLETE;
5621                                         pkt->pkt_statistics |= STAT_ABORTED;
5622                                         }
5623                                 } else if (byte_cnt < cmd->cmd_dmacount) {
5624                                         sf_log(sf, CE_NOTE,
5625                                             "!sf_cmd_callback: "
5626                                             "Lost Frame: (read) "
5627                                             "received 0x%x expected 0x%x "
5628                                             "target 0x%x\n", byte_cnt,
5629                                             cmd->cmd_dmacount,
5630                                             sf_alpa_to_switch[
5631                                             target->sft_al_pa]);
5632                                         pkt->pkt_reason = CMD_INCOMPLETE;
5633                                         pkt->pkt_statistics |= STAT_ABORTED;
5634                                 }
5635                         }
5636                 }
5637 
5638         } else {
5639 
5640                 /* pkt status was not ok */
5641 
5642                 switch (fpkt->fcal_pkt_status) {
5643 
5644                 case FCAL_STATUS_ERR_OFFLINE:
5645                         SF_DMSG1("Fibre Channel Offline");
5646                         mutex_enter(&target->sft_mutex);
5647                         if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5648                                 target->sft_state |= (SF_TARGET_BUSY
5649                                     | SF_TARGET_MARK);
5650                         }
5651                         mutex_exit(&target->sft_mutex);
5652                         (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5653                             target->sft_dip, FCAL_REMOVE_EVENT,
5654                             &sf_remove_eid, NDI_EVENT_NOPASS);
5655                         (void) ndi_event_run_callbacks(sf->sf_event_hdl,
5656                             target->sft_dip, sf_remove_eid, NULL);
5657                         pkt->pkt_reason = CMD_TRAN_ERR;
5658                         pkt->pkt_statistics |= STAT_BUS_RESET;
5659                         break;
5660 
5661                 case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5662                         sf_throttle(sf);
5663                         sf->sf_use_lock = TRUE;
5664                         pkt->pkt_reason = CMD_TRAN_ERR;
5665                         pkt->pkt_state = STATE_GOT_BUS;
5666                         pkt->pkt_statistics |= STAT_ABORTED;
5667                         break;
5668 
5669                 case FCAL_STATUS_TIMEOUT:
5670                         SF_DMSG1("Fibre Channel Timeout");
5671                         pkt->pkt_reason = CMD_TIMEOUT;
5672                         break;
5673 
5674                 case FCAL_STATUS_ERR_OVERRUN:
5675                         SF_DMSG1("CMD_DATA_OVR");
5676                         pkt->pkt_reason = CMD_DATA_OVR;
5677                         break;
5678 
5679                 case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5680                         SF_DMSG1("Unknown CQ type");
5681                         pkt->pkt_reason = CMD_TRAN_ERR;
5682                         break;
5683 
5684                 case FCAL_STATUS_BAD_SEG_CNT:
5685                         SF_DMSG1("Bad SEG CNT");
5686                         pkt->pkt_reason = CMD_TRAN_ERR;
5687                         break;
5688 
5689                 case FCAL_STATUS_BAD_XID:
5690                         SF_DMSG1("Fibre Channel Invalid X_ID");
5691                         pkt->pkt_reason = CMD_TRAN_ERR;
5692                         break;
5693 
5694                 case FCAL_STATUS_XCHG_BUSY:
5695                         SF_DMSG1("Fibre Channel Exchange Busy");
5696                         pkt->pkt_reason = CMD_TRAN_ERR;
5697                         break;
5698 
5699                 case FCAL_STATUS_INSUFFICIENT_CQES:
5700                         SF_DMSG1("Insufficient CQEs");
5701                         pkt->pkt_reason = CMD_TRAN_ERR;
5702                         break;
5703 
5704                 case FCAL_STATUS_ALLOC_FAIL:
5705                         SF_DMSG1("ALLOC FAIL");
5706                         pkt->pkt_reason = CMD_TRAN_ERR;
5707                         break;
5708 
5709                 case FCAL_STATUS_BAD_SID:
5710                         SF_DMSG1("Fibre Channel Invalid S_ID");
5711                         pkt->pkt_reason = CMD_TRAN_ERR;
5712                         break;
5713 
5714                 case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5715                         if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5716                                 sf_token = (int *)(uintptr_t)
5717                                     fpkt->fcal_socal_request.\
5718                                     sr_soc_hdr.sh_request_token;
5719                                 (void) soc_take_core(sf->sf_sochandle,
5720                                     sf->sf_socp);
5721                                 sf_core = 0;
5722                         }
5723                         msg2 =
5724                         "INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5725                         pkt->pkt_reason = CMD_INCOMPLETE;
5726                         pkt->pkt_state = STATE_GOT_BUS;
5727                         pkt->pkt_statistics |= STAT_ABORTED;
5728                         break;
5729 
5730                 case FCAL_STATUS_CRC_ERR:
5731                         msg2 = "Fibre Channel CRC Error on frames";
5732                         pkt->pkt_reason = CMD_INCOMPLETE;
5733                         pkt->pkt_state = STATE_GOT_BUS;
5734                         pkt->pkt_statistics |= STAT_ABORTED;
5735                         break;
5736 
5737                 case FCAL_STATUS_NO_SEQ_INIT:
5738                         SF_DMSG1("Fibre Channel Seq Init Error");
5739                         pkt->pkt_reason = CMD_TRAN_ERR;
5740                         break;
5741 
5742                 case  FCAL_STATUS_OPEN_FAIL:
5743                         pkt->pkt_reason = CMD_TRAN_ERR;
5744                         SF_DMSG1("Fibre Channel Open Failure");
5745                         if ((target->sft_state & (SF_TARGET_BUSY |
5746                             SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5747                                 sf_log(sf, CE_NOTE,
5748                                     "!Open failure to target 0x%x "
5749                                     "forcing LIP\n",
5750                                     sf_alpa_to_switch[target->sft_al_pa]);
5751                                 sf_force_lip(sf);
5752                         }
5753                         break;
5754 
5755 
5756                 case FCAL_STATUS_ONLINE_TIMEOUT:
5757                         SF_DMSG1("Fibre Channel Online Timeout");
5758                         pkt->pkt_reason = CMD_TRAN_ERR;
5759                         break;
5760 
5761                 default:
5762                         SF_DMSG1("Unknown FC Status");
5763                         pkt->pkt_reason = CMD_TRAN_ERR;
5764                         break;
5765                 }
5766         }
5767 
5768 #ifdef  DEBUG
5769         /*
5770          * msg1 will be non-NULL if we've detected some sort of error
5771          */
5772         if (msg1 != NULL && sfdebug >= 4) {
5773                 sf_log(sf, CE_WARN,
5774                     "!Transport error on cmd=0x%p target=0x%x:  %s\n",
5775                     (void *)fpkt, pkt->pkt_address.a_target, msg1);
5776         }
5777 #endif
5778 
5779         if (msg2 != NULL) {
5780                 sf_log(sf, CE_WARN, "!Transport error on target=0x%x:  %s\n",
5781                     pkt->pkt_address.a_target, msg2);
5782         }
5783 
5784         ncmds = fpkt->fcal_ncmds;
5785         ASSERT(ncmds >= 0);
5786         if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5787 #ifdef DEBUG
5788                 if (!sf->sf_use_lock) {
5789                         SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5790                 }
5791 #endif
5792                 sf->sf_use_lock = TRUE;
5793         }
5794 
5795         mutex_enter(&sf->sf_cmd_mutex);
5796         sf->sf_ncmds = ncmds;
5797         sf_throttle_start(sf);
5798         mutex_exit(&sf->sf_cmd_mutex);
5799 
5800         if (!msg1 && !msg2)
5801                 SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5802                     (void *)pkt));
5803         if (pkt->pkt_comp != NULL) {
5804                 (*pkt->pkt_comp)(pkt);
5805         }
5806 }
5807 
5808 #undef  SF_DMSG1
5809 
5810 
5811 
5812 /*
5813  * start throttling for this instance
5814  */
5815 static void
5816 sf_throttle_start(struct sf *sf)
5817 {
5818         struct sf_pkt *cmd, *prev_cmd = NULL;
5819         struct scsi_pkt *pkt;
5820         struct sf_target *target;
5821 
5822 
5823         ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5824 
5825         cmd = sf->sf_pkt_head;
5826         while ((cmd != NULL) &&
5827             (sf->sf_state == SF_STATE_ONLINE) &&
5828             (sf->sf_ncmds < sf->sf_throttle)) {
5829 
5830                 pkt = CMD2PKT(cmd);
5831 
5832                 target = ADDR2TARGET(&pkt->pkt_address);
5833                 if (target->sft_state & SF_TARGET_BUSY) {
5834                         /* this command is busy -- go to next */
5835                         ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5836                         prev_cmd = cmd;
5837                         cmd = cmd->cmd_next;
5838                         continue;
5839                 }
5840 
5841                 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5842 
5843                 /* this cmd not busy and not issued */
5844 
5845                 /* remove this packet from the queue */
5846                 if (sf->sf_pkt_head == cmd) {
5847                         /* this was the first packet */
5848                         sf->sf_pkt_head = cmd->cmd_next;
5849                 } else if (sf->sf_pkt_tail == cmd) {
5850                         /* this was the last packet */
5851                         sf->sf_pkt_tail = prev_cmd;
5852                         if (prev_cmd != NULL) {
5853                                 prev_cmd->cmd_next = NULL;
5854                         }
5855                 } else {
5856                         /* some packet in the middle of the queue */
5857                         ASSERT(prev_cmd != NULL);
5858                         prev_cmd->cmd_next = cmd->cmd_next;
5859                 }
5860                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5861 
5862                 if (target->sft_state & SF_TARGET_OFFLINE) {
5863                         mutex_exit(&sf->sf_cmd_mutex);
5864                         pkt->pkt_reason = CMD_TRAN_ERR;
5865                         if (pkt->pkt_comp != NULL) {
5866                                 (*pkt->pkt_comp)(cmd->cmd_pkt);
5867                         }
5868                 } else {
5869                         sf_fill_ids(sf, cmd, target);
5870                         if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5871                                 pkt->pkt_reason = CMD_TRAN_ERR;
5872                                 if (pkt->pkt_comp != NULL) {
5873                                         (*pkt->pkt_comp)(cmd->cmd_pkt);
5874                                 }
5875                         }
5876                 }
5877                 mutex_enter(&sf->sf_cmd_mutex);
5878                 cmd = sf->sf_pkt_head;
5879                 prev_cmd = NULL;
5880         }
5881 }
5882 
5883 
5884 /*
5885  * called when the max exchange value is exceeded to throttle back commands
5886  */
5887 static void
5888 sf_throttle(struct sf *sf)
5889 {
5890         int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5891 
5892 
5893         mutex_enter(&sf->sf_cmd_mutex);
5894 
5895         sf->sf_flag = TRUE;
5896 
5897         if (sf->sf_ncmds > (cmdmax / 2)) {
5898                 sf->sf_throttle = cmdmax / 2;
5899         } else {
5900                 if (sf->sf_ncmds > SF_DECR_DELTA) {
5901                         sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5902                 } else {
5903                         /*
5904                          * This case is just a safeguard, should not really
5905                          * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5906                          */
5907                         sf->sf_throttle = SF_DECR_DELTA;
5908                 }
5909         }
5910         mutex_exit(&sf->sf_cmd_mutex);
5911 
5912         sf = sf->sf_sibling;
5913         if (sf != NULL) {
5914                 mutex_enter(&sf->sf_cmd_mutex);
5915                 sf->sf_flag = TRUE;
5916                 if (sf->sf_ncmds >= (cmdmax / 2)) {
5917                         sf->sf_throttle = cmdmax / 2;
5918                 } else {
5919                         if (sf->sf_ncmds > SF_DECR_DELTA) {
5920                                 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5921                         } else {
5922                                 sf->sf_throttle = SF_DECR_DELTA;
5923                         }
5924                 }
5925 
5926                 mutex_exit(&sf->sf_cmd_mutex);
5927         }
5928 }
5929 
5930 
5931 /*
5932  * sf watchdog routine, called for a timeout
5933  */
5934 /*ARGSUSED*/
5935 static void
5936 sf_watch(void *arg)
5937 {
5938         struct sf *sf;
5939         struct sf_els_hdr       *privp;
5940         static int count = 0, pscan_count = 0;
5941         int cmdmax, i, mescount = 0;
5942         struct sf_target *target;
5943 
5944 
5945         sf_watchdog_time += sf_watchdog_timeout;
5946         count++;
5947         pscan_count++;
5948 
5949         mutex_enter(&sf_global_mutex);
5950         sf_watch_running = 1;
5951         for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5952 
5953                 mutex_exit(&sf_global_mutex);
5954 
5955                 /* disable throttling while we're suspended */
5956                 mutex_enter(&sf->sf_mutex);
5957                 if (sf->sf_state & SF_STATE_SUSPENDED) {
5958                         mutex_exit(&sf->sf_mutex);
5959                         SF_DEBUG(1, (sf, CE_CONT,
5960                             "sf_watch, sf%d:throttle disabled "
5961                             "due to DDI_SUSPEND\n",
5962                             ddi_get_instance(sf->sf_dip)));
5963                         mutex_enter(&sf_global_mutex);
5964                         continue;
5965                 }
5966                 mutex_exit(&sf->sf_mutex);
5967 
5968                 cmdmax = sf->sf_sochandle->fcal_cmdmax;
5969 
5970                 if (sf->sf_take_core) {
5971                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5972                 }
5973 
5974                 mutex_enter(&sf->sf_cmd_mutex);
5975 
5976                 if (!sf->sf_flag) {
5977                         if (sf->sf_throttle < (cmdmax / 2)) {
5978                                 sf->sf_throttle = cmdmax / 2;
5979                         } else if ((sf->sf_throttle += SF_INCR_DELTA) >
5980                             cmdmax) {
5981                                 sf->sf_throttle = cmdmax;
5982                         }
5983                 } else {
5984                         sf->sf_flag = FALSE;
5985                 }
5986 
5987                 sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5988                     >> 2;
5989                 if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5990                     (sf->sf_pkt_head == NULL)) {
5991 #ifdef DEBUG
5992                         if (sf->sf_use_lock) {
5993                                 SF_DEBUG(4, (sf, CE_NOTE,
5994                                     "use lock flag off\n"));
5995                         }
5996 #endif
5997                         sf->sf_use_lock = FALSE;
5998                 }
5999 
6000                 if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
6001                     sf->sf_ncmds < sf->sf_throttle) {
6002                         sf_throttle_start(sf);
6003                 }
6004 
6005                 mutex_exit(&sf->sf_cmd_mutex);
6006 
6007                 if (pscan_count >= sf_pool_scan_cnt) {
6008                         if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6009                             SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6010                                 sf_crpool_free(sf);
6011                         }
6012                 }
6013                 mutex_enter(&sf->sf_mutex);
6014 
6015                 privp = sf->sf_els_list;
6016                 while (privp != NULL) {
6017                         if (privp->timeout < sf_watchdog_time) {
6018                                 /* timeout this command */
6019                                 privp = sf_els_timeout(sf, privp);
6020                         } else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6021                             (privp->lip_cnt != sf->sf_lip_cnt)) {
6022                                 if (privp->prev != NULL) {
6023                                         privp->prev->next = privp->next;
6024                                 }
6025                                 if (sf->sf_els_list == privp) {
6026                                         sf->sf_els_list = privp->next;
6027                                 }
6028                                 if (privp->next != NULL) {
6029                                         privp->next->prev = privp->prev;
6030                                 }
6031                                 mutex_exit(&sf->sf_mutex);
6032                                 sf_els_free(privp->fpkt);
6033                                 mutex_enter(&sf->sf_mutex);
6034                                 privp = sf->sf_els_list;
6035                         } else {
6036                                 privp = privp->next;
6037                         }
6038                 }
6039 
6040                 if (sf->sf_online_timer && sf->sf_online_timer <
6041                     sf_watchdog_time) {
6042                         for (i = 0; i < sf_max_targets; i++) {
6043                                 target = sf->sf_targets[i];
6044                                 if (target != NULL) {
6045                                         if (!mescount && target->sft_state &
6046                                             SF_TARGET_BUSY) {
6047                                                 sf_log(sf, CE_WARN, "!Loop "
6048                                                     "Unstable: Failed to bring "
6049                                                     "Loop Online\n");
6050                                                 mescount = 1;
6051                                         }
6052                                         target->sft_state |= SF_TARGET_MARK;
6053                                 }
6054                         }
6055                         sf_finish_init(sf, sf->sf_lip_cnt);
6056                         sf->sf_state = SF_STATE_INIT;
6057                         sf->sf_online_timer = 0;
6058                 }
6059 
6060                 if (sf->sf_state == SF_STATE_ONLINE) {
6061                         mutex_exit(&sf->sf_mutex);
6062                         if (count >= sf_pkt_scan_cnt) {
6063                                 sf_check_targets(sf);
6064                         }
6065                 } else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6066                     (sf->sf_timer < sf_watchdog_time)) {
6067                         for (i = 0; i < sf_max_targets; i++) {
6068                                 target = sf->sf_targets[i];
6069                                 if ((target != NULL) &&
6070                                     (target->sft_state &
6071                                     SF_TARGET_BUSY)) {
6072                                         sf_log(sf, CE_WARN,
6073                                             "!Offline Timeout\n");
6074                                         if (sf_core && (sf_core &
6075                                             SF_CORE_OFFLINE_TIMEOUT)) {
6076                                                 (void) soc_take_core(
6077                                                     sf->sf_sochandle,
6078                                                     sf->sf_socp);
6079                                                 sf_core = 0;
6080                                         }
6081                                         break;
6082                                 }
6083                         }
6084                         sf_finish_init(sf, sf->sf_lip_cnt);
6085                         sf->sf_state = SF_STATE_INIT;
6086                         mutex_exit(&sf->sf_mutex);
6087                 } else {
6088                         mutex_exit(&sf->sf_mutex);
6089                 }
6090                 mutex_enter(&sf_global_mutex);
6091         }
6092         mutex_exit(&sf_global_mutex);
6093         if (count >= sf_pkt_scan_cnt) {
6094                 count = 0;
6095         }
6096         if (pscan_count >= sf_pool_scan_cnt) {
6097                 pscan_count = 0;
6098         }
6099 
6100         /* reset timeout */
6101         sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6102 
6103         /* signal waiting thread */
6104         mutex_enter(&sf_global_mutex);
6105         sf_watch_running = 0;
6106         cv_broadcast(&sf_watch_cv);
6107         mutex_exit(&sf_global_mutex);
6108 }
6109 
6110 
6111 /*
6112  * called during a timeout to check targets
6113  */
6114 static void
6115 sf_check_targets(struct sf *sf)
6116 {
6117         struct sf_target *target;
6118         int i;
6119         struct sf_pkt *cmd;
6120         struct scsi_pkt *pkt;
6121         int lip_cnt;
6122 
6123         mutex_enter(&sf->sf_mutex);
6124         lip_cnt = sf->sf_lip_cnt;
6125         mutex_exit(&sf->sf_mutex);
6126 
6127         /* check scan all possible targets */
6128         for (i = 0; i < sf_max_targets; i++) {
6129                 target = sf->sf_targets[i];
6130                 while (target != NULL) {
6131                         mutex_enter(&target->sft_pkt_mutex);
6132                         if (target->sft_alive && target->sft_scan_count !=
6133                             sf_target_scan_cnt) {
6134                                 target->sft_alive = 0;
6135                                 target->sft_scan_count++;
6136                                 mutex_exit(&target->sft_pkt_mutex);
6137                                 return;
6138                         }
6139                         target->sft_alive = 0;
6140                         target->sft_scan_count = 0;
6141                         cmd = target->sft_pkt_head;
6142                         while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6143                                 mutex_enter(&cmd->cmd_abort_mutex);
6144                                 if (cmd->cmd_state == SF_STATE_ISSUED &&
6145                                     ((cmd->cmd_timeout && sf_watchdog_time >
6146 #ifdef  DEBUG
6147                                     cmd->cmd_timeout) || sf_abort_flag)) {
6148                                         sf_abort_flag = 0;
6149 #else
6150                                         cmd->cmd_timeout))) {
6151 #endif
6152                                         cmd->cmd_timeout = 0;
6153         /* prevent reset from getting at this packet */
6154                                         cmd->cmd_state = SF_STATE_ABORTING;
6155                                         mutex_exit(&cmd->cmd_abort_mutex);
6156                                         mutex_exit(&target->sft_pkt_mutex);
6157                                         sf->sf_stats.tstats[i].timeouts++;
6158                                         if (sf_target_timeout(sf, cmd))
6159                                                 return;
6160                                         else {
6161                                                 if (lip_cnt != sf->sf_lip_cnt) {
6162                                                         return;
6163                                                 } else {
6164                                                         mutex_enter(&target->
6165                                                             sft_pkt_mutex);
6166                                                         cmd = target->
6167                                                             sft_pkt_head;
6168                                                 }
6169                                         }
6170         /*
6171          * if the abort and lip fail, a reset will be carried out.
6172          * But the reset will ignore this packet. We have waited at least
6173          * 20 seconds after the initial timeout. Now, complete it here.
6174          * This also takes care of spurious bad aborts.
6175          */
6176                                 } else if ((cmd->cmd_state ==
6177                                     SF_STATE_ABORTING) && (cmd->cmd_timeout
6178                                     <= sf_watchdog_time)) {
6179                                         cmd->cmd_state = SF_STATE_IDLE;
6180                                         mutex_exit(&cmd->cmd_abort_mutex);
6181                                         mutex_exit(&target->sft_pkt_mutex);
6182                                         SF_DEBUG(1, (sf, CE_NOTE,
6183                                             "Command 0x%p to sft 0x%p"
6184                                             " delayed release\n",
6185                                             (void *)cmd, (void *)target));
6186                                         pkt = cmd->cmd_pkt;
6187                                         pkt->pkt_statistics |=
6188                                             (STAT_TIMEOUT|STAT_ABORTED);
6189                                         pkt->pkt_reason = CMD_TIMEOUT;
6190                                         if (pkt->pkt_comp) {
6191                                                 scsi_hba_pkt_comp(pkt);
6192                                         /* handle deferred_destroy case */
6193                                         } else {
6194                                                 if ((cmd->cmd_block->fcp_cntl.
6195                                                     cntl_reset == 1) ||
6196                                                     (cmd->cmd_block->
6197                                                     fcp_cntl.cntl_abort_tsk ==
6198                                                     1)) {
6199                                                         cmd->cmd_block->
6200                                                             fcp_cntl.
6201                                                             cntl_reset = 0;
6202                                                         cmd->cmd_block->
6203                                                             fcp_cntl.
6204                                                             cntl_abort_tsk = 0;
6205                                                         cmd->cmd_fp_pkt->
6206                                                             fcal_pkt_comp =
6207                                                             sf_cmd_callback;
6208                                                         /* for cache */
6209                                                         sf_scsi_destroy_pkt
6210                                                             (&pkt->pkt_address,
6211                                                             pkt);
6212                                                 }
6213                                         }
6214                                         mutex_enter(&target->sft_pkt_mutex);
6215                                         cmd = target->sft_pkt_head;
6216                                 } else {
6217                                         mutex_exit(&cmd->cmd_abort_mutex);
6218                                         cmd = cmd->cmd_forw;
6219                                 }
6220                         }
6221                         mutex_exit(&target->sft_pkt_mutex);
6222                         target = target->sft_next_lun;
6223                 }
6224         }
6225 }
6226 
6227 
6228 /*
6229  * a command to a target has timed out
6230  * return TRUE iff cmd abort failed or timed out, else return FALSE
6231  */
6232 static int
6233 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6234 {
6235         int rval;
6236         struct scsi_pkt *pkt;
6237         struct fcal_packet *fpkt;
6238         int tgt_id;
6239         int retval = FALSE;
6240 
6241 
6242         SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6243             (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6244 
6245         fpkt = cmd->cmd_fp_pkt;
6246 
6247         if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6248                 sf_token = (int *)(uintptr_t)
6249                     fpkt->fcal_socal_request.sr_soc_hdr.\
6250                     sh_request_token;
6251                 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6252                 sf_core = 0;
6253         }
6254 
6255         /* call the transport to abort a command */
6256         rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6257             sf->sf_sochandle->fcal_portno, fpkt, 1);
6258 
6259         switch (rval) {
6260         case FCAL_ABORTED:
6261                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6262                 pkt = cmd->cmd_pkt;
6263                 cmd->cmd_state = SF_STATE_IDLE;
6264                 pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6265                 pkt->pkt_reason = CMD_TIMEOUT;
6266                 if (pkt->pkt_comp != NULL) {
6267                         (*pkt->pkt_comp)(pkt);
6268                 }
6269                 break;                          /* success */
6270 
6271         case FCAL_ABORT_FAILED:
6272                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6273                 pkt = cmd->cmd_pkt;
6274                 cmd->cmd_state = SF_STATE_IDLE;
6275                 pkt->pkt_reason = CMD_TIMEOUT;
6276                 pkt->pkt_statistics |= STAT_TIMEOUT;
6277                 tgt_id = pkt->pkt_address.a_target;
6278                 sf->sf_stats.tstats[tgt_id].abts_failures++;
6279                 if (pkt->pkt_comp != NULL) {
6280                         (*pkt->pkt_comp)(pkt);
6281                 }
6282                 break;
6283 
6284         case FCAL_BAD_ABORT:
6285                 if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6286                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6287                             sr_soc_hdr.sh_request_token;
6288                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6289                         sf_core = 0;
6290                 }
6291                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6292                 cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6293                     + 20;
6294                 break;
6295 
6296         case FCAL_TIMEOUT:
6297                 retval = TRUE;
6298                 break;
6299 
6300         default:
6301                 pkt = cmd->cmd_pkt;
6302                 tgt_id = pkt->pkt_address.a_target;
6303                 sf_log(sf, CE_WARN,
6304                 "Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6305                 if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6306                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6307                             sr_soc_hdr.sh_request_token;
6308                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6309                         sf_core = 0;
6310                 }
6311                 sf_force_lip(sf);
6312                 retval = TRUE;
6313                 break;
6314         }
6315 
6316         return (retval);
6317 }
6318 
6319 
6320 /*
6321  * an ELS command has timed out
6322  * return ???
6323  */
6324 static struct sf_els_hdr *
6325 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6326 {
6327         struct fcal_packet *fpkt;
6328         int rval, dflag, timeout = SF_ELS_TIMEOUT;
6329         uint_t lip_cnt = privp->lip_cnt;
6330         uchar_t els_code = privp->els_code;
6331         struct sf_target *target = privp->target;
6332         char what[64];
6333 
6334         fpkt = privp->fpkt;
6335         dflag = privp->delayed_retry;
6336         /* use as temporary state variable */
6337         privp->timeout = SF_INVALID_TIMEOUT;
6338         mutex_exit(&sf->sf_mutex);
6339 
6340         if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6341                 /*
6342                  * take socal core if required. Timeouts for IB and hosts
6343                  * are not very interesting, so we take socal core only
6344                  * if the timeout is *not* for a IB or host.
6345                  */
6346                 if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6347                     ((sf_alpa_to_switch[privp->dest_nport_id] &
6348                     0x0d) != 0x0d) && ((privp->dest_nport_id != 1) ||
6349                     (privp->dest_nport_id != 2) ||
6350                     (privp->dest_nport_id != 4) ||
6351                     (privp->dest_nport_id != 8) ||
6352                     (privp->dest_nport_id != 0xf))) {
6353                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6354                             sr_soc_hdr.sh_request_token;
6355                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6356                         sf_core = 0;
6357                 }
6358                 (void) sprintf(what, "ELS 0x%x", privp->els_code);
6359         } else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6360                 if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6361                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6362                             sr_soc_hdr.sh_request_token;
6363                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6364                         sf_core = 0;
6365                 }
6366                 timeout = SF_FCP_TIMEOUT;
6367                 (void) sprintf(what, "REPORT_LUNS");
6368         } else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6369                 if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6370                         sf_token = (int *)(uintptr_t)
6371                             fpkt->fcal_socal_request.\
6372                             sr_soc_hdr.sh_request_token;
6373                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6374                         sf_core = 0;
6375                 }
6376                 timeout = SF_FCP_TIMEOUT;
6377                 (void) sprintf(what, "INQUIRY to LUN 0x%lx",
6378                     (long)SCSA_LUN(target));
6379         } else {
6380                 (void) sprintf(what, "UNKNOWN OPERATION");
6381         }
6382 
6383         if (dflag) {
6384                 /* delayed retry */
6385                 SF_DEBUG(2, (sf, CE_CONT,
6386                     "!sf%d: %s to target %x delayed retry\n",
6387                     ddi_get_instance(sf->sf_dip), what,
6388                     sf_alpa_to_switch[privp->dest_nport_id]));
6389                 privp->delayed_retry = FALSE;
6390                 goto try_again;
6391         }
6392 
6393         sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6394             what, sf_alpa_to_switch[privp->dest_nport_id],
6395             privp->dest_nport_id);
6396 
6397         rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6398             ->fcal_portno, fpkt, 1);
6399         if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6400         SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6401             what, privp->dest_nport_id));
6402 try_again:
6403 
6404                 mutex_enter(&sf->sf_mutex);
6405                 if (privp->prev != NULL) {
6406                         privp->prev->next = privp->next;
6407                 }
6408                 if (sf->sf_els_list == privp) {
6409                         sf->sf_els_list = privp->next;
6410                 }
6411                 if (privp->next != NULL) {
6412                         privp->next->prev = privp->prev;
6413                 }
6414                 privp->prev = privp->next = NULL;
6415                 if (lip_cnt == sf->sf_lip_cnt) {
6416                         privp->timeout = sf_watchdog_time + timeout;
6417                         if ((++(privp->retries) < sf_els_retries) ||
6418                             (dflag && (privp->retries < SF_BSY_RETRIES))) {
6419                                 mutex_exit(&sf->sf_mutex);
6420                                 sf_log(sf, CE_NOTE,
6421                                     "!%s to target 0x%x retrying\n",
6422                                     what,
6423                                     sf_alpa_to_switch[privp->dest_nport_id]);
6424                                 if (sf_els_transport(sf, privp) == 1) {
6425                                         mutex_enter(&sf->sf_mutex);
6426                                         return (sf->sf_els_list); /* success */
6427                                 }
6428                                 mutex_enter(&sf->sf_mutex);
6429                                 fpkt = NULL;
6430                         }
6431                         if ((lip_cnt == sf->sf_lip_cnt) &&
6432                             (els_code != LA_ELS_LOGO)) {
6433                                 if (target != NULL) {
6434                                         sf_offline_target(sf, target);
6435                                 }
6436                                 if (sf->sf_lip_cnt == lip_cnt) {
6437                                         sf->sf_device_count--;
6438                                         ASSERT(sf->sf_device_count >= 0);
6439                                         if (sf->sf_device_count == 0) {
6440                                                 sf_finish_init(sf,
6441                                                     sf->sf_lip_cnt);
6442                                         }
6443                                 }
6444                         }
6445                         privp = sf->sf_els_list;
6446                         mutex_exit(&sf->sf_mutex);
6447                         if (fpkt != NULL) {
6448                                 sf_els_free(fpkt);
6449                         }
6450                 } else {
6451                         mutex_exit(&sf->sf_mutex);
6452                         sf_els_free(privp->fpkt);
6453                         privp = NULL;
6454                 }
6455         } else {
6456                 if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6457                         sf_token = (int *)(uintptr_t)
6458                             fpkt->fcal_socal_request.\
6459                             sr_soc_hdr.sh_request_token;
6460                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6461                         sf_core = 0;
6462                 }
6463                 sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6464                     "status=0x%x, forcing LIP\n", what,
6465                     sf_alpa_to_switch[privp->dest_nport_id], rval);
6466                 privp = NULL;
6467                 if (sf->sf_lip_cnt == lip_cnt) {
6468                         sf_force_lip(sf);
6469                 }
6470         }
6471 
6472         mutex_enter(&sf->sf_mutex);
6473         return (privp);
6474 }
6475 
6476 
6477 /*
6478  * called by timeout when a reset times out
6479  */
6480 /*ARGSUSED*/
6481 static void
6482 sf_check_reset_delay(void *arg)
6483 {
6484         struct sf *sf;
6485         struct sf_target *target;
6486         struct sf_reset_list *rp, *tp;
6487         uint_t lip_cnt, reset_timeout_flag = FALSE;
6488         clock_t lb;
6489 
6490         lb = ddi_get_lbolt();
6491 
6492         mutex_enter(&sf_global_mutex);
6493 
6494         sf_reset_timeout_id = 0;
6495 
6496         for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6497 
6498                 mutex_exit(&sf_global_mutex);
6499                 mutex_enter(&sf->sf_mutex);
6500 
6501                 /* is this type cast needed? */
6502                 tp = (struct sf_reset_list *)&sf->sf_reset_list;
6503 
6504                 rp = sf->sf_reset_list;
6505                 while (rp != NULL) {
6506                         if (((rp->timeout - lb) < 0) &&
6507                             (rp->lip_cnt == sf->sf_lip_cnt)) {
6508                                 tp->next = rp->next;
6509                                 mutex_exit(&sf->sf_mutex);
6510                                 target = rp->target;
6511                                 lip_cnt = rp->lip_cnt;
6512                                 kmem_free(rp, sizeof (struct sf_reset_list));
6513                                 /* abort all cmds for this target */
6514                                 while (target) {
6515                                         sf_abort_all(sf, target, FALSE,
6516                                             lip_cnt, TRUE);
6517                                         mutex_enter(&target->sft_mutex);
6518                                         if (lip_cnt == sf->sf_lip_cnt) {
6519                                                 target->sft_state &=
6520                                                     ~SF_TARGET_BUSY;
6521                                         }
6522                                         mutex_exit(&target->sft_mutex);
6523                                         target = target->sft_next_lun;
6524                                 }
6525                                 mutex_enter(&sf->sf_mutex);
6526                                 tp = (struct sf_reset_list *)
6527                                     &sf->sf_reset_list;
6528                                 rp = sf->sf_reset_list;
6529                                 lb = ddi_get_lbolt();
6530                         } else if (rp->lip_cnt != sf->sf_lip_cnt) {
6531                                 tp->next = rp->next;
6532                                 kmem_free(rp, sizeof (struct sf_reset_list));
6533                                 rp = tp->next;
6534                         } else {
6535                                 reset_timeout_flag = TRUE;
6536                                 tp = rp;
6537                                 rp = rp->next;
6538                         }
6539                 }
6540                 mutex_exit(&sf->sf_mutex);
6541                 mutex_enter(&sf_global_mutex);
6542         }
6543 
6544         if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6545                 sf_reset_timeout_id = timeout(sf_check_reset_delay,
6546                     NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6547         }
6548 
6549         mutex_exit(&sf_global_mutex);
6550 }
6551 
6552 
6553 /*
6554  * called to "reset the bus", i.e. force loop initialization (and address
6555  * re-negotiation)
6556  */
6557 static void
6558 sf_force_lip(struct sf *sf)
6559 {
6560         int i;
6561         struct sf_target *target;
6562 
6563 
6564         /* disable restart of lip if we're suspended */
6565         mutex_enter(&sf->sf_mutex);
6566         if (sf->sf_state & SF_STATE_SUSPENDED) {
6567                 mutex_exit(&sf->sf_mutex);
6568                 SF_DEBUG(1, (sf, CE_CONT,
6569                     "sf_force_lip, sf%d: lip restart disabled "
6570                     "due to DDI_SUSPEND\n",
6571                     ddi_get_instance(sf->sf_dip)));
6572                 return;
6573         }
6574 
6575         sf_log(sf, CE_NOTE, "Forcing lip\n");
6576 
6577         for (i = 0; i < sf_max_targets; i++) {
6578                 target = sf->sf_targets[i];
6579                 while (target != NULL) {
6580                         mutex_enter(&target->sft_mutex);
6581                         if (!(target->sft_state & SF_TARGET_OFFLINE))
6582                                 target->sft_state |= SF_TARGET_BUSY;
6583                         mutex_exit(&target->sft_mutex);
6584                         target = target->sft_next_lun;
6585                 }
6586         }
6587 
6588         sf->sf_lip_cnt++;
6589         sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6590         sf->sf_state = SF_STATE_OFFLINE;
6591         mutex_exit(&sf->sf_mutex);
6592         sf->sf_stats.lip_count++;            /* no mutex for this? */
6593 
6594 #ifdef DEBUG
6595         /* are we allowing LIPs ?? */
6596         if (sf_lip_flag != 0) {
6597 #endif
6598                 /* call the transport to force loop initialization */
6599                 if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6600                     sf->sf_sochandle->fcal_portno, 1,
6601                     FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6602                     (i != FCAL_TIMEOUT)) {
6603                         /* force LIP failed */
6604                         if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6605                                 (void) soc_take_core(sf->sf_sochandle,
6606                                     sf->sf_socp);
6607                                 sf_core = 0;
6608                         }
6609 #ifdef DEBUG
6610                         /* are we allowing reset after LIP failed ?? */
6611                         if (sf_reset_flag != 0) {
6612 #endif
6613                                 /* restart socal after resetting it */
6614                                 sf_log(sf, CE_NOTE,
6615                                     "!Force lip failed Status code 0x%x."
6616                                     " Reseting\n", i);
6617                                 /* call transport to force a reset */
6618                                 soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6619                                     sf->sf_sochandle->fcal_portno, 1);
6620 #ifdef  DEBUG
6621                         }
6622 #endif
6623                 }
6624 #ifdef  DEBUG
6625         }
6626 #endif
6627 }
6628 
6629 
6630 /*
6631  * called by the transport when an unsolicited ELS is received
6632  */
6633 static void
6634 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6635 {
6636         struct sf *sf = (struct sf *)arg;
6637         els_payload_t   *els = (els_payload_t *)payload;
6638         struct la_els_rjt *rsp;
6639         int     i, tgt_id;
6640         uchar_t dest_id;
6641         struct fcal_packet *fpkt;
6642         fc_frame_header_t *hp;
6643         struct sf_els_hdr *privp;
6644 
6645 
6646         if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6647                 return;
6648         }
6649 
6650         if (i > SOC_CQE_PAYLOAD) {
6651                 i = SOC_CQE_PAYLOAD;
6652         }
6653 
6654         dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6655         tgt_id = sf_alpa_to_switch[dest_id];
6656 
6657         switch (els->els_cmd.c.ls_command) {
6658 
6659         case LA_ELS_LOGO:
6660                 /*
6661                  * logout received -- log the fact
6662                  */
6663                 sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6664                 sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6665                     tgt_id,
6666                     sf_lip_on_plogo ? "Forcing LIP...." : "");
6667                 if (sf_lip_on_plogo) {
6668                         sf_force_lip(sf);
6669                 }
6670                 break;
6671 
6672         default:  /* includes LA_ELS_PLOGI */
6673                 /*
6674                  * something besides a logout received -- we don't handle
6675                  * this so send back a reject saying its unsupported
6676                  */
6677 
6678                 sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6679                     els->els_cmd.c.ls_command, tgt_id);
6680 
6681 
6682                 /* allocate room for a response */
6683                 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6684                     sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6685                     (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6686                         break;
6687                 }
6688 
6689                 fpkt = privp->fpkt;
6690 
6691                 /* fill in pkt header */
6692                 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6693                 hp->r_ctl = R_CTL_ELS_RSP;
6694                 hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6695                 hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6696                 hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6697                 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6698                     CQ_TYPE_OUTBOUND;
6699 
6700                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6701 
6702                 /* fill in response */
6703                 rsp->ls_code = LA_ELS_RJT;   /* reject this ELS */
6704                 rsp->mbz[0] = 0;
6705                 rsp->mbz[1] = 0;
6706                 rsp->mbz[2] = 0;
6707                 ((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6708                 *((int *)&rsp->reserved) = 0;
6709                 rsp->reason_code = RJT_UNSUPPORTED;
6710                 privp->retries = sf_els_retries;
6711                 privp->els_code = LA_ELS_RJT;
6712                 privp->timeout = (unsigned)0xffffffff;
6713                 (void) sf_els_transport(sf, privp);
6714                 break;
6715         }
6716 }
6717 
6718 
6719 /*
6720  * Error logging, printing, and debug print routines
6721  */
6722 
6723 /*PRINTFLIKE3*/
6724 static void
6725 sf_log(struct sf *sf, int level, const char *fmt, ...)
6726 {
6727         char buf[256];
6728         dev_info_t *dip;
6729         va_list ap;
6730 
6731         if (sf != NULL) {
6732                 dip = sf->sf_dip;
6733         } else {
6734                 dip = NULL;
6735         }
6736 
6737         va_start(ap, fmt);
6738         (void) vsprintf(buf, fmt, ap);
6739         va_end(ap);
6740         scsi_log(dip, "sf", level, buf);
6741 }
6742 
6743 
6744 /*
6745  * called to get some sf kstats -- return 0 on success else return errno
6746  */
6747 static int
6748 sf_kstat_update(kstat_t *ksp, int rw)
6749 {
6750         struct sf *sf;
6751 
6752         if (rw == KSTAT_WRITE) {
6753                 /* can't write */
6754                 return (EACCES);
6755         }
6756 
6757         sf = ksp->ks_private;
6758         sf->sf_stats.ncmds = sf->sf_ncmds;
6759         sf->sf_stats.throttle_limit = sf->sf_throttle;
6760         sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6761 
6762         return (0);                             /* success */
6763 }
6764 
6765 
6766 /*
6767  * Unix Entry Points
6768  */
6769 
6770 /*
6771  * driver entry point for opens on control device
6772  */
6773 /* ARGSUSED */
6774 static int
6775 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6776 {
6777         dev_t dev = *dev_p;
6778         struct sf *sf;
6779 
6780 
6781         /* just ensure soft state exists for this device */
6782         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6783         if (sf == NULL) {
6784                 return (ENXIO);
6785         }
6786 
6787         ++(sf->sf_check_n_close);
6788 
6789         return (0);
6790 }
6791 
6792 
6793 /*
6794  * driver entry point for last close on control device
6795  */
6796 /* ARGSUSED */
6797 static int
6798 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6799 {
6800         struct sf *sf;
6801 
6802         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6803         if (sf == NULL) {
6804                 return (ENXIO);
6805         }
6806 
6807         if (!sf->sf_check_n_close) { /* if this flag is zero */
6808                 cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6809                     SF_MINOR2INST(getminor(dev)));
6810                 return (ENODEV);
6811         } else {
6812                 --(sf->sf_check_n_close);
6813         }
6814         return (0);
6815 }
6816 
6817 
6818 /*
6819  * driver entry point for sf ioctl commands
6820  */
6821 /* ARGSUSED */
6822 static int
6823 sf_ioctl(dev_t dev,
6824     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6825 {
6826         struct sf *sf;
6827         struct sf_target *target;
6828         uchar_t al_pa;
6829         struct sf_al_map map;
6830         int cnt, i;
6831         int     retval;                         /* return value */
6832         struct devctl_iocdata *dcp;
6833         dev_info_t *cdip;
6834         struct scsi_address ap;
6835         scsi_hba_tran_t *tran;
6836 
6837 
6838         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6839         if (sf == NULL) {
6840                 return (ENXIO);
6841         }
6842 
6843         /* handle all ioctls */
6844         switch (cmd) {
6845 
6846         /*
6847          * We can use the generic implementation for these ioctls
6848          */
6849         case DEVCTL_DEVICE_GETSTATE:
6850         case DEVCTL_DEVICE_ONLINE:
6851         case DEVCTL_DEVICE_OFFLINE:
6852         case DEVCTL_BUS_GETSTATE:
6853                 return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6854 
6855         /*
6856          * return FC map
6857          */
6858         case SFIOCGMAP:
6859                 if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6860                     sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6861                     sf->sf_state != SF_STATE_ONLINE) {
6862                         retval = ENOENT;
6863                         goto dun;
6864                 }
6865                 mutex_enter(&sf->sf_mutex);
6866                 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6867                         int i, j = 0;
6868 
6869                         /* Need to generate a fake lilp map */
6870                         for (i = 0; i < sf_max_targets; i++) {
6871                                 if (sf->sf_targets[i])
6872                                         sf->sf_lilp_map->lilp_alpalist[j++] =
6873                                             sf->sf_targets[i]->
6874                                             sft_hard_address;
6875                         }
6876                         sf->sf_lilp_map->lilp_length = (uchar_t)j;
6877                 }
6878                 cnt = sf->sf_lilp_map->lilp_length;
6879                 map.sf_count = (short)cnt;
6880                 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6881                     (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6882                     sizeof (la_wwn_t));
6883                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6884                     (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6885                     sizeof (la_wwn_t));
6886                 map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6887                 map.sf_hba_addr.sf_hard_address = 0;
6888                 map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6889                 for (i = 0; i < cnt; i++) {
6890                         al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6891                         map.sf_addr_pair[i].sf_al_pa = al_pa;
6892                         if (al_pa == sf->sf_al_pa) {
6893                                 (void) bcopy((caddr_t)&sf->sf_sochandle
6894                                     ->fcal_n_wwn, (caddr_t)&map.
6895                                     sf_addr_pair[i].sf_node_wwn,
6896                                     sizeof (la_wwn_t));
6897                                 (void) bcopy((caddr_t)&sf->sf_sochandle
6898                                     ->fcal_p_wwn, (caddr_t)&map.
6899                                     sf_addr_pair[i].sf_port_wwn,
6900                                     sizeof (la_wwn_t));
6901                                 map.sf_addr_pair[i].sf_hard_address =
6902                                     al_pa;
6903                                 map.sf_addr_pair[i].sf_inq_dtype =
6904                                     DTYPE_PROCESSOR;
6905                                 continue;
6906                         }
6907                         target = sf->sf_targets[sf_alpa_to_switch[
6908                             al_pa]];
6909                         if (target != NULL) {
6910                                 mutex_enter(&target->sft_mutex);
6911                                 if (!(target->sft_state &
6912                                     (SF_TARGET_OFFLINE |
6913                                     SF_TARGET_BUSY))) {
6914                                         bcopy((caddr_t)&target->
6915                                             sft_node_wwn,
6916                                             (caddr_t)&map.sf_addr_pair
6917                                             [i].sf_node_wwn,
6918                                             sizeof (la_wwn_t));
6919                                         bcopy((caddr_t)&target->
6920                                             sft_port_wwn,
6921                                             (caddr_t)&map.sf_addr_pair
6922                                             [i].sf_port_wwn,
6923                                             sizeof (la_wwn_t));
6924                                         map.sf_addr_pair[i].
6925                                             sf_hard_address
6926                                             = target->sft_hard_address;
6927                                         map.sf_addr_pair[i].
6928                                             sf_inq_dtype
6929                                             = target->sft_device_type;
6930                                         mutex_exit(&target->sft_mutex);
6931                                         continue;
6932                                 }
6933                                 mutex_exit(&target->sft_mutex);
6934                         }
6935                         bzero((caddr_t)&map.sf_addr_pair[i].
6936                             sf_node_wwn, sizeof (la_wwn_t));
6937                         bzero((caddr_t)&map.sf_addr_pair[i].
6938                             sf_port_wwn, sizeof (la_wwn_t));
6939                         map.sf_addr_pair[i].sf_inq_dtype =
6940                             DTYPE_UNKNOWN;
6941                 }
6942                 mutex_exit(&sf->sf_mutex);
6943                 if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6944                     sizeof (struct sf_al_map), mode) != 0) {
6945                         retval = EFAULT;
6946                         goto dun;
6947                 }
6948                 break;
6949 
6950         /*
6951          * handle device control ioctls
6952          */
6953         case DEVCTL_DEVICE_RESET:
6954                 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6955                         retval = EFAULT;
6956                         goto dun;
6957                 }
6958                 if ((ndi_dc_getname(dcp) == NULL) ||
6959                     (ndi_dc_getaddr(dcp) == NULL)) {
6960                         ndi_dc_freehdl(dcp);
6961                         retval = EINVAL;
6962                         goto dun;
6963                 }
6964                 cdip = ndi_devi_find(sf->sf_dip,
6965                     ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6966                 ndi_dc_freehdl(dcp);
6967 
6968                 if (cdip == NULL) {
6969                         retval = ENXIO;
6970                         goto dun;
6971                 }
6972 
6973                 if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6974                         retval = ENXIO;
6975                         goto dun;
6976                 }
6977                 mutex_enter(&target->sft_mutex);
6978                 if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6979                         mutex_exit(&target->sft_mutex);
6980                         retval = ENXIO;
6981                         goto dun;
6982                 }
6983 
6984                 /* This is ugly */
6985                 tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP);
6986                 bcopy(target->sft_tran, tran, scsi_hba_tran_size());
6987                 mutex_exit(&target->sft_mutex);
6988                 ap.a_hba_tran = tran;
6989                 ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6990                 if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6991                         retval = EIO;
6992                 } else {
6993                         retval = 0;
6994                 }
6995                 kmem_free(tran, scsi_hba_tran_size());
6996                 goto dun;
6997 
6998         case DEVCTL_BUS_QUIESCE:
6999         case DEVCTL_BUS_UNQUIESCE:
7000                 retval = ENOTSUP;
7001                 goto dun;
7002 
7003         case DEVCTL_BUS_RESET:
7004         case DEVCTL_BUS_RESETALL:
7005                 sf_force_lip(sf);
7006                 break;
7007 
7008         default:
7009                 retval = ENOTTY;
7010                 goto dun;
7011         }
7012 
7013         retval = 0;                             /* success */
7014 
7015 dun:
7016         return (retval);
7017 }
7018 
7019 
7020 /*
7021  * get the target given a DIP
7022  */
7023 static struct sf_target *
7024 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7025 {
7026         int i;
7027         struct sf_target *target;
7028 
7029 
7030         /* scan each hash queue for the DIP in question */
7031         for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7032                 target = sf->sf_wwn_lists[i];
7033                 while (target != NULL) {
7034                         if (target->sft_dip == dip) {
7035                                 return (target); /* success: target found */
7036                         }
7037                         target = target->sft_next;
7038                 }
7039         }
7040         return (NULL);                          /* failure: target not found */
7041 }
7042 
7043 
7044 /*
7045  * called by the transport to get an event cookie
7046  */
7047 static int
7048 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7049     ddi_eventcookie_t *event_cookiep)
7050 {
7051         struct sf *sf;
7052 
7053         sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7054         if (sf == NULL) {
7055                 /* can't find instance for this device */
7056                 return (DDI_FAILURE);
7057         }
7058 
7059         return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7060             event_cookiep, NDI_EVENT_NOPASS));
7061 
7062 }
7063 
7064 
7065 /*
7066  * called by the transport to add an event callback
7067  */
7068 static int
7069 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7070     ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7071     ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7072     ddi_callback_id_t *cb_id)
7073 {
7074         struct sf *sf;
7075 
7076         sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7077         if (sf == NULL) {
7078                 /* can't find instance for this device */
7079                 return (DDI_FAILURE);
7080         }
7081 
7082         return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7083             eventid, callback, arg, NDI_SLEEP, cb_id));
7084 
7085 }
7086 
7087 
7088 /*
7089  * called by the transport to remove an event callback
7090  */
7091 static int
7092 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7093 {
7094         struct sf *sf;
7095 
7096         sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7097         if (sf == NULL) {
7098                 /* can't find instance for this device */
7099                 return (DDI_FAILURE);
7100         }
7101 
7102         return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7103 }
7104 
7105 
7106 /*
7107  * called by the transport to post an event
7108  */
7109 static int
7110 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7111     ddi_eventcookie_t eventid, void *impldata)
7112 {
7113         ddi_eventcookie_t remove_cookie, cookie;
7114 
7115         /* is this a remove event ?? */
7116         struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7117         remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7118             SF_EVENT_TAG_REMOVE);
7119 
7120         if (remove_cookie == eventid) {
7121                 struct sf_target *target;
7122 
7123                 /* handle remove event */
7124 
7125                 if (sf == NULL) {
7126                         /* no sf instance for this device */
7127                         return (NDI_FAILURE);
7128                 }
7129 
7130                 /* get the target for this event */
7131                 if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7132                         /*
7133                          * clear device info for this target and mark as
7134                          * not done
7135                          */
7136                         mutex_enter(&target->sft_mutex);
7137                         target->sft_dip = NULL;
7138                         target->sft_state &= ~SF_TARGET_INIT_DONE;
7139                         mutex_exit(&target->sft_mutex);
7140                         return (NDI_SUCCESS); /* event handled */
7141                 }
7142 
7143                 /* no target for this event */
7144                 return (NDI_FAILURE);
7145         }
7146 
7147         /* an insertion event */
7148         if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7149             != NDI_SUCCESS) {
7150                 return (NDI_FAILURE);
7151         }
7152 
7153         return (ndi_post_event(dip, rdip, cookie, impldata));
7154 }
7155 
7156 
7157 /*
7158  * the sf hotplug daemon, one thread per sf instance
7159  */
7160 static void
7161 sf_hp_daemon(void *arg)
7162 {
7163         struct sf *sf = (struct sf *)arg;
7164         struct sf_hp_elem *elem;
7165         struct sf_target *target;
7166         int tgt_id;
7167         callb_cpr_t cprinfo;
7168 
7169         CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7170             callb_generic_cpr, "sf_hp_daemon");
7171 
7172         mutex_enter(&sf->sf_hp_daemon_mutex);
7173 
7174         do {
7175                 while (sf->sf_hp_elem_head != NULL) {
7176 
7177                         /* save ptr to head of list */
7178                         elem = sf->sf_hp_elem_head;
7179 
7180                         /* take element off of list */
7181                         if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7182                                 /* element only one in list -- list now empty */
7183                                 sf->sf_hp_elem_head = NULL;
7184                                 sf->sf_hp_elem_tail = NULL;
7185                         } else {
7186                                 /* remove element from head of list */
7187                                 sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7188                         }
7189 
7190                         mutex_exit(&sf->sf_hp_daemon_mutex);
7191 
7192                         switch (elem->what) {
7193                         case SF_ONLINE:
7194                                 /* online this target */
7195                                 target = elem->target;
7196                                 (void) ndi_devi_online(elem->dip, 0);
7197                                 (void) ndi_event_retrieve_cookie(
7198                                     sf->sf_event_hdl,
7199                                     target->sft_dip, FCAL_INSERT_EVENT,
7200                                     &sf_insert_eid, NDI_EVENT_NOPASS);
7201                                 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
7202                                     target->sft_dip, sf_insert_eid, NULL);
7203                                 break;
7204                         case SF_OFFLINE:
7205                                 /* offline this target */
7206                                 target = elem->target;
7207                                 tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7208                                 /* don't do NDI_DEVI_REMOVE for now */
7209                                 if (ndi_devi_offline(elem->dip, 0) !=
7210                                     NDI_SUCCESS) {
7211                                         SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7212                                             "device offline failed", tgt_id));
7213                                 } else {
7214                                         SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7215                                             "device offline succeeded\n",
7216                                             tgt_id));
7217                                 }
7218                                 break;
7219                         }
7220                         kmem_free(elem, sizeof (struct sf_hp_elem));
7221                         mutex_enter(&sf->sf_hp_daemon_mutex);
7222                 }
7223 
7224                 /* if exit is not already signaled */
7225                 if (sf->sf_hp_exit == 0) {
7226                         /* wait to be signaled by work or exit */
7227                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
7228                         cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7229                         CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7230                 }
7231         } while (sf->sf_hp_exit == 0);
7232 
7233         /* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7234         CALLB_CPR_EXIT(&cprinfo);
7235         thread_exit();                  /* no more hotplug thread */
7236         /* NOTREACHED */
7237 }