2077 if (pppt_modload() == STMF_FAILURE) {
2078 ret = EIO;
2079 goto err;
2080 }
2081 if (alua_state->alua_node != 0) {
2082 /* reset existing rtpids to new base */
2083 stmf_rtpid_counter = 255;
2084 }
2085 stmf_state.stmf_alua_node = alua_state->alua_node;
2086 stmf_state.stmf_alua_state = 1;
2087 /* register existing local ports with ppp */
2088 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2089 ilport = ilport->ilport_next) {
2090 /* skip standby ports and non-alua participants */
2091 if (ilport->ilport_standby == 1 ||
2092 ilport->ilport_alua == 0) {
2093 continue;
2094 }
2095 if (alua_state->alua_node != 0) {
2096 ilport->ilport_rtpid =
2097 atomic_add_16_nv(&stmf_rtpid_counter, 1);
2098 }
2099 lport = ilport->ilport_lport;
2100 ic_reg_port = ic_reg_port_msg_alloc(
2101 lport->lport_id, ilport->ilport_rtpid,
2102 0, NULL, stmf_proxy_msg_id);
2103 if (ic_reg_port) {
2104 ic_ret = ic_tx_msg(ic_reg_port);
2105 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2106 ilport->ilport_reg_msgid =
2107 stmf_proxy_msg_id++;
2108 } else {
2109 cmn_err(CE_WARN,
2110 "error on port registration "
2111 "port - %s",
2112 ilport->ilport_kstat_tgt_name);
2113 }
2114 }
2115 }
2116 /* register existing logical units */
2117 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
3236 ilport->ilport_next = stmf_state.stmf_ilportlist;
3237 ilport->ilport_prev = NULL;
3238 if (ilport->ilport_next)
3239 ilport->ilport_next->ilport_prev = ilport;
3240 stmf_state.stmf_ilportlist = ilport;
3241 stmf_state.stmf_nlports++;
3242 if (lport->lport_pp) {
3243 ((stmf_i_port_provider_t *)
3244 (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3245 }
3246 ilport->ilport_tg =
3247 stmf_lookup_group_for_target(lport->lport_id->ident,
3248 lport->lport_id->ident_length);
3249
3250 /*
3251 * rtpid will/must be set if this is a standby port
3252 * only register ports that are not standby (proxy) ports
3253 * and ports that are alua participants (ilport_alua == 1)
3254 */
3255 if (ilport->ilport_standby == 0) {
3256 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
3257 }
3258
3259 if (stmf_state.stmf_alua_state == 1 &&
3260 ilport->ilport_standby == 0 &&
3261 ilport->ilport_alua == 1) {
3262 stmf_ic_msg_t *ic_reg_port;
3263 stmf_ic_msg_status_t ic_ret;
3264 stmf_local_port_t *lport;
3265 lport = ilport->ilport_lport;
3266 ic_reg_port = ic_reg_port_msg_alloc(
3267 lport->lport_id, ilport->ilport_rtpid,
3268 0, NULL, stmf_proxy_msg_id);
3269 if (ic_reg_port) {
3270 ic_ret = ic_tx_msg(ic_reg_port);
3271 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3272 ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3273 } else {
3274 cmn_err(CE_WARN, "error on port registration "
3275 "port - %s", ilport->ilport_kstat_tgt_name);
3276 }
3578 }
3579
3580 /* sessions use the ilport_lock. No separate lock is required */
3581 iss->iss_lockp = &ilport->ilport_lock;
3582
3583 if (iss->iss_sm != NULL)
3584 cmn_err(CE_PANIC, "create lun map called with non NULL map");
3585 iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3586 KM_SLEEP);
3587
3588 mutex_enter(&stmf_state.stmf_lock);
3589 rw_enter(&ilport->ilport_lock, RW_WRITER);
3590 (void) stmf_session_create_lun_map(ilport, iss);
3591 ilport->ilport_nsessions++;
3592 iss->iss_next = ilport->ilport_ss_list;
3593 ilport->ilport_ss_list = iss;
3594 rw_exit(&ilport->ilport_lock);
3595 mutex_exit(&stmf_state.stmf_lock);
3596
3597 iss->iss_creation_time = ddi_get_time();
3598 ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
3599 iss->iss_flags &= ~ISS_BEING_CREATED;
3600 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3601 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3602 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3603 stmf_scsi_session_t *, ss);
3604 return (STMF_SUCCESS);
3605 }
3606
3607 void
3608 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3609 {
3610 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3611 lport->lport_stmf_private;
3612 stmf_i_scsi_session_t *iss, **ppss;
3613 int found = 0;
3614 stmf_ic_msg_t *ic_session_dereg;
3615 stmf_status_t ic_ret = STMF_FAILURE;
3616
3617 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3618 stmf_scsi_session_t *, ss);
3777 mutex_exit(&stmf_state.stmf_lock);
3778
3779 return (STMF_SUCCESS);
3780 }
3781
3782 void
3783 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
3784 {
3785 uint8_t old, new;
3786
3787 do {
3788 old = new = itl->itl_flags;
3789 if (old & STMF_ITL_BEING_TERMINATED)
3790 return;
3791 new |= STMF_ITL_BEING_TERMINATED;
3792 } while (atomic_cas_8(&itl->itl_flags, old, new) != old);
3793 itl->itl_hdlrm_reason = hdlrm_reason;
3794
3795 ASSERT(itl->itl_counter);
3796
3797 if (atomic_add_32_nv(&itl->itl_counter, -1))
3798 return;
3799
3800 stmf_release_itl_handle(lu, itl);
3801 }
3802
3803 stmf_status_t
3804 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
3805 {
3806 stmf_i_lu_t *ilu;
3807 stmf_i_local_port_t *ilport;
3808 stmf_i_scsi_session_t *iss;
3809 stmf_lun_map_t *lm;
3810 stmf_lun_map_ent_t *ent;
3811 uint32_t nmaps, nu;
3812 stmf_itl_data_t **itl_list;
3813 int i;
3814
3815 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3816
3817 dereg_itl_start:;
4135 stmf_free(task);
4136 return (NULL);
4137 }
4138 mutex_enter(&ilu->ilu_task_lock);
4139 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4140 mutex_exit(&ilu->ilu_task_lock);
4141 rw_exit(iss->iss_lockp);
4142 stmf_free(task);
4143 return (NULL);
4144 }
4145 itask->itask_lu_next = ilu->ilu_tasks;
4146 if (ilu->ilu_tasks)
4147 ilu->ilu_tasks->itask_lu_prev = itask;
4148 ilu->ilu_tasks = itask;
4149 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4150 ilu->ilu_ntasks++;
4151 mutex_exit(&ilu->ilu_task_lock);
4152 }
4153
4154 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4155 atomic_add_32(itask->itask_ilu_task_cntr, 1);
4156 itask->itask_start_time = ddi_get_lbolt();
4157
4158 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4159 lun_map_ent->ent_itl_datap) != NULL)) {
4160 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
4161 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4162 } else {
4163 itask->itask_itl_datap = NULL;
4164 task->task_lu_itl_handle = NULL;
4165 }
4166
4167 rw_exit(iss->iss_lockp);
4168 return (task);
4169 }
4170
4171 static void
4172 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4173 {
4174 stmf_i_scsi_task_t *itask =
4175 (stmf_i_scsi_task_t *)task->task_stmf_private;
4176 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4177
4178 ASSERT(rw_lock_held(iss->iss_lockp));
4179 itask->itask_flags = ITASK_IN_FREE_LIST;
4180 itask->itask_proxy_msg_id = 0;
4181 mutex_enter(&ilu->ilu_task_lock);
4182 itask->itask_lu_free_next = ilu->ilu_free_tasks;
4183 ilu->ilu_free_tasks = itask;
4184 ilu->ilu_ntasks_free++;
4185 if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
4186 cv_signal(&ilu->ilu_offline_pending_cv);
4187 mutex_exit(&ilu->ilu_task_lock);
4188 atomic_add_32(itask->itask_ilu_task_cntr, -1);
4189 }
4190
4191 void
4192 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4193 {
4194 uint32_t num_to_release, ndx;
4195 stmf_i_scsi_task_t *itask;
4196 stmf_lu_t *lu = ilu->ilu_lu;
4197
4198 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4199
4200 /* free half of the minimal free of the free tasks */
4201 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4202 if (!num_to_release) {
4203 return;
4204 }
4205 for (ndx = 0; ndx < num_to_release; ndx++) {
4206 mutex_enter(&ilu->ilu_task_lock);
4207 itask = ilu->ilu_free_tasks;
4208 if (itask == NULL) {
4391 itask->itask_allocated_buf_map = 0;
4392 }
4393
4394 void
4395 stmf_task_free(scsi_task_t *task)
4396 {
4397 stmf_local_port_t *lport = task->task_lport;
4398 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4399 task->task_stmf_private;
4400 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4401 task->task_session->ss_stmf_private;
4402
4403 stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4404
4405 stmf_free_task_bufs(itask, lport);
4406 stmf_itl_task_done(itask);
4407 DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4408 hrtime_t,
4409 itask->itask_done_timestamp - itask->itask_start_timestamp);
4410 if (itask->itask_itl_datap) {
4411 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
4412 -1) == 0) {
4413 stmf_release_itl_handle(task->task_lu,
4414 itask->itask_itl_datap);
4415 }
4416 }
4417
4418 rw_enter(iss->iss_lockp, RW_READER);
4419 lport->lport_task_free(task);
4420 if (itask->itask_worker) {
4421 atomic_add_32(&stmf_cur_ntasks, -1);
4422 atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
4423 }
4424 /*
4425 * After calling stmf_task_lu_free, the task pointer can no longer
4426 * be trusted.
4427 */
4428 stmf_task_lu_free(task, iss);
4429 rw_exit(iss->iss_lockp);
4430 }
4431
4432 void
4433 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4434 {
4435 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4436 task->task_stmf_private;
4437 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4438 int nv;
4439 uint32_t old, new;
4440 uint32_t ct;
4441 stmf_worker_t *w, *w1;
4442 uint8_t tm;
4443
4444 if (task->task_max_nbufs > 4)
4445 task->task_max_nbufs = 4;
4446 task->task_cur_nbufs = 0;
4447 /* Latest value of currently running tasks */
4448 ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
4449
4450 /* Select the next worker using round robin */
4451 nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
4452 if (nv >= stmf_nworkers_accepting_cmds) {
4453 int s = nv;
4454 do {
4455 nv -= stmf_nworkers_accepting_cmds;
4456 } while (nv >= stmf_nworkers_accepting_cmds);
4457 if (nv < 0)
4458 nv = 0;
4459 /* Its ok if this cas fails */
4460 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4461 s, nv);
4462 }
4463 w = &stmf_workers[nv];
4464
4465 /*
4466 * A worker can be pinned by interrupt. So select the next one
4467 * if it has lower load.
4468 */
4469 if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4470 w1 = stmf_workers;
4471 } else {
4508 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4509 new |= ITASK_DEFAULT_HANDLING;
4510 }
4511 new &= ~ITASK_IN_TRANSITION;
4512 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4513
4514 stmf_itl_task_start(itask);
4515
4516 itask->itask_worker_next = NULL;
4517 if (w->worker_task_tail) {
4518 w->worker_task_tail->itask_worker_next = itask;
4519 } else {
4520 w->worker_task_head = itask;
4521 }
4522 w->worker_task_tail = itask;
4523 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4524 w->worker_max_qdepth_pu = w->worker_queue_depth;
4525 }
4526 /* Measure task waitq time */
4527 itask->itask_waitq_enter_timestamp = gethrtime();
4528 atomic_add_32(&w->worker_ref_count, 1);
4529 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4530 itask->itask_ncmds = 1;
4531 stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4532 if (dbuf) {
4533 itask->itask_allocated_buf_map = 1;
4534 itask->itask_dbufs[0] = dbuf;
4535 dbuf->db_handle = 0;
4536 } else {
4537 itask->itask_allocated_buf_map = 0;
4538 itask->itask_dbufs[0] = NULL;
4539 }
4540
4541 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4542 w->worker_signal_timestamp = gethrtime();
4543 DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4544 scsi_task_t *, task);
4545 cv_signal(&w->worker_cv);
4546 }
4547 mutex_exit(&w->worker_lock);
4548
4596 stmf_status_t ret = STMF_SUCCESS;
4597
4598 stmf_i_scsi_task_t *itask =
4599 (stmf_i_scsi_task_t *)task->task_stmf_private;
4600
4601 stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
4602
4603 if (ioflags & STMF_IOF_LU_DONE) {
4604 uint32_t new, old;
4605 do {
4606 new = old = itask->itask_flags;
4607 if (new & ITASK_BEING_ABORTED)
4608 return (STMF_ABORTED);
4609 new &= ~ITASK_KNOWN_TO_LU;
4610 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4611 }
4612 if (itask->itask_flags & ITASK_BEING_ABORTED)
4613 return (STMF_ABORTED);
4614 #ifdef DEBUG
4615 if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
4616 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
4617 1)
4618 return (STMF_SUCCESS);
4619 }
4620 #endif
4621
4622 stmf_update_kstat_lu_io(task, dbuf);
4623 stmf_update_kstat_lport_io(task, dbuf);
4624 stmf_lport_xfer_start(itask, dbuf);
4625 if (ioflags & STMF_IOF_STATS_ONLY) {
4626 stmf_lport_xfer_done(itask, dbuf);
4627 return (STMF_SUCCESS);
4628 }
4629
4630 dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
4631 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
4632
4633 /*
4634 * Port provider may have already called the buffer callback in
4635 * which case dbuf->db_xfer_start_timestamp will be 0.
4636 */
5693 stmf_status_t
5694 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
5695 scsi_devid_desc_t *lu_id)
5696 {
5697 uint8_t *p;
5698 struct timeval32 timestamp32;
5699 uint32_t *t = (uint32_t *)×tamp32;
5700 struct ether_addr mac;
5701 uint8_t *e = (uint8_t *)&mac;
5702 int hid = (int)host_id;
5703 uint16_t gen_number;
5704
5705 if (company_id == COMPANY_ID_NONE)
5706 company_id = COMPANY_ID_SUN;
5707
5708 if (lu_id->ident_length != 0x10)
5709 return (STMF_INVALID_ARG);
5710
5711 p = (uint8_t *)lu_id;
5712
5713 gen_number = atomic_add_16_nv(&stmf_lu_id_gen_number, 1);
5714
5715 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
5716 p[4] = ((company_id >> 20) & 0xf) | 0x60;
5717 p[5] = (company_id >> 12) & 0xff;
5718 p[6] = (company_id >> 4) & 0xff;
5719 p[7] = (company_id << 4) & 0xf0;
5720 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
5721 hid = BE_32((int)zone_get_hostid(NULL));
5722 }
5723 if (hid != 0) {
5724 e[0] = (hid >> 24) & 0xff;
5725 e[1] = (hid >> 16) & 0xff;
5726 e[2] = (hid >> 8) & 0xff;
5727 e[3] = hid & 0xff;
5728 e[4] = e[5] = 0;
5729 }
5730 bcopy(e, p+8, 6);
5731 uniqtime32(×tamp32);
5732 *t = BE_32(*t);
5733 bcopy(t, p+14, 4);
6325 /* We made it here means we are going to call LU */
6326 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6327 lu = task->task_lu;
6328 else
6329 lu = dlun0;
6330 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6331 mutex_exit(&w->worker_lock);
6332 curcmd &= ITASK_CMD_MASK;
6333 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6334 switch (curcmd) {
6335 case ITASK_CMD_NEW_TASK:
6336 iss = (stmf_i_scsi_session_t *)
6337 task->task_session->ss_stmf_private;
6338 stmf_itl_lu_new_task(itask);
6339 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6340 if (stmf_handle_cmd_during_ic(itask))
6341 break;
6342 }
6343 #ifdef DEBUG
6344 if (stmf_drop_task_counter > 0) {
6345 if (atomic_add_32_nv(
6346 (uint32_t *)&stmf_drop_task_counter,
6347 -1) == 1) {
6348 break;
6349 }
6350 }
6351 #endif
6352 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6353 lu->lu_new_task(task, dbuf);
6354 break;
6355 case ITASK_CMD_DATA_XFER_DONE:
6356 lu->lu_dbuf_xfer_done(task, dbuf);
6357 break;
6358 case ITASK_CMD_STATUS_DONE:
6359 lu->lu_send_status_done(task);
6360 break;
6361 case ITASK_CMD_ABORT:
6362 if (abort_free) {
6363 stmf_task_free(task);
6364 } else {
6365 stmf_do_task_abort(task);
6366 }
6367 break;
|
2077 if (pppt_modload() == STMF_FAILURE) {
2078 ret = EIO;
2079 goto err;
2080 }
2081 if (alua_state->alua_node != 0) {
2082 /* reset existing rtpids to new base */
2083 stmf_rtpid_counter = 255;
2084 }
2085 stmf_state.stmf_alua_node = alua_state->alua_node;
2086 stmf_state.stmf_alua_state = 1;
2087 /* register existing local ports with ppp */
2088 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2089 ilport = ilport->ilport_next) {
2090 /* skip standby ports and non-alua participants */
2091 if (ilport->ilport_standby == 1 ||
2092 ilport->ilport_alua == 0) {
2093 continue;
2094 }
2095 if (alua_state->alua_node != 0) {
2096 ilport->ilport_rtpid =
2097 atomic_inc_16_nv(&stmf_rtpid_counter);
2098 }
2099 lport = ilport->ilport_lport;
2100 ic_reg_port = ic_reg_port_msg_alloc(
2101 lport->lport_id, ilport->ilport_rtpid,
2102 0, NULL, stmf_proxy_msg_id);
2103 if (ic_reg_port) {
2104 ic_ret = ic_tx_msg(ic_reg_port);
2105 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2106 ilport->ilport_reg_msgid =
2107 stmf_proxy_msg_id++;
2108 } else {
2109 cmn_err(CE_WARN,
2110 "error on port registration "
2111 "port - %s",
2112 ilport->ilport_kstat_tgt_name);
2113 }
2114 }
2115 }
2116 /* register existing logical units */
2117 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
3236 ilport->ilport_next = stmf_state.stmf_ilportlist;
3237 ilport->ilport_prev = NULL;
3238 if (ilport->ilport_next)
3239 ilport->ilport_next->ilport_prev = ilport;
3240 stmf_state.stmf_ilportlist = ilport;
3241 stmf_state.stmf_nlports++;
3242 if (lport->lport_pp) {
3243 ((stmf_i_port_provider_t *)
3244 (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3245 }
3246 ilport->ilport_tg =
3247 stmf_lookup_group_for_target(lport->lport_id->ident,
3248 lport->lport_id->ident_length);
3249
3250 /*
3251 * rtpid will/must be set if this is a standby port
3252 * only register ports that are not standby (proxy) ports
3253 * and ports that are alua participants (ilport_alua == 1)
3254 */
3255 if (ilport->ilport_standby == 0) {
3256 ilport->ilport_rtpid = atomic_inc_16_nv(&stmf_rtpid_counter);
3257 }
3258
3259 if (stmf_state.stmf_alua_state == 1 &&
3260 ilport->ilport_standby == 0 &&
3261 ilport->ilport_alua == 1) {
3262 stmf_ic_msg_t *ic_reg_port;
3263 stmf_ic_msg_status_t ic_ret;
3264 stmf_local_port_t *lport;
3265 lport = ilport->ilport_lport;
3266 ic_reg_port = ic_reg_port_msg_alloc(
3267 lport->lport_id, ilport->ilport_rtpid,
3268 0, NULL, stmf_proxy_msg_id);
3269 if (ic_reg_port) {
3270 ic_ret = ic_tx_msg(ic_reg_port);
3271 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3272 ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3273 } else {
3274 cmn_err(CE_WARN, "error on port registration "
3275 "port - %s", ilport->ilport_kstat_tgt_name);
3276 }
3578 }
3579
3580 /* sessions use the ilport_lock. No separate lock is required */
3581 iss->iss_lockp = &ilport->ilport_lock;
3582
3583 if (iss->iss_sm != NULL)
3584 cmn_err(CE_PANIC, "create lun map called with non NULL map");
3585 iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3586 KM_SLEEP);
3587
3588 mutex_enter(&stmf_state.stmf_lock);
3589 rw_enter(&ilport->ilport_lock, RW_WRITER);
3590 (void) stmf_session_create_lun_map(ilport, iss);
3591 ilport->ilport_nsessions++;
3592 iss->iss_next = ilport->ilport_ss_list;
3593 ilport->ilport_ss_list = iss;
3594 rw_exit(&ilport->ilport_lock);
3595 mutex_exit(&stmf_state.stmf_lock);
3596
3597 iss->iss_creation_time = ddi_get_time();
3598 ss->ss_session_id = atomic_inc_64_nv(&stmf_session_counter);
3599 iss->iss_flags &= ~ISS_BEING_CREATED;
3600 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3601 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3602 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3603 stmf_scsi_session_t *, ss);
3604 return (STMF_SUCCESS);
3605 }
3606
3607 void
3608 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3609 {
3610 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3611 lport->lport_stmf_private;
3612 stmf_i_scsi_session_t *iss, **ppss;
3613 int found = 0;
3614 stmf_ic_msg_t *ic_session_dereg;
3615 stmf_status_t ic_ret = STMF_FAILURE;
3616
3617 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3618 stmf_scsi_session_t *, ss);
3777 mutex_exit(&stmf_state.stmf_lock);
3778
3779 return (STMF_SUCCESS);
3780 }
3781
3782 void
3783 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
3784 {
3785 uint8_t old, new;
3786
3787 do {
3788 old = new = itl->itl_flags;
3789 if (old & STMF_ITL_BEING_TERMINATED)
3790 return;
3791 new |= STMF_ITL_BEING_TERMINATED;
3792 } while (atomic_cas_8(&itl->itl_flags, old, new) != old);
3793 itl->itl_hdlrm_reason = hdlrm_reason;
3794
3795 ASSERT(itl->itl_counter);
3796
3797 if (atomic_dec_32_nv(&itl->itl_counter))
3798 return;
3799
3800 stmf_release_itl_handle(lu, itl);
3801 }
3802
3803 stmf_status_t
3804 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
3805 {
3806 stmf_i_lu_t *ilu;
3807 stmf_i_local_port_t *ilport;
3808 stmf_i_scsi_session_t *iss;
3809 stmf_lun_map_t *lm;
3810 stmf_lun_map_ent_t *ent;
3811 uint32_t nmaps, nu;
3812 stmf_itl_data_t **itl_list;
3813 int i;
3814
3815 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3816
3817 dereg_itl_start:;
4135 stmf_free(task);
4136 return (NULL);
4137 }
4138 mutex_enter(&ilu->ilu_task_lock);
4139 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4140 mutex_exit(&ilu->ilu_task_lock);
4141 rw_exit(iss->iss_lockp);
4142 stmf_free(task);
4143 return (NULL);
4144 }
4145 itask->itask_lu_next = ilu->ilu_tasks;
4146 if (ilu->ilu_tasks)
4147 ilu->ilu_tasks->itask_lu_prev = itask;
4148 ilu->ilu_tasks = itask;
4149 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4150 ilu->ilu_ntasks++;
4151 mutex_exit(&ilu->ilu_task_lock);
4152 }
4153
4154 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4155 atomic_inc_32(itask->itask_ilu_task_cntr);
4156 itask->itask_start_time = ddi_get_lbolt();
4157
4158 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4159 lun_map_ent->ent_itl_datap) != NULL)) {
4160 atomic_inc_32(&itask->itask_itl_datap->itl_counter);
4161 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4162 } else {
4163 itask->itask_itl_datap = NULL;
4164 task->task_lu_itl_handle = NULL;
4165 }
4166
4167 rw_exit(iss->iss_lockp);
4168 return (task);
4169 }
4170
4171 static void
4172 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4173 {
4174 stmf_i_scsi_task_t *itask =
4175 (stmf_i_scsi_task_t *)task->task_stmf_private;
4176 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4177
4178 ASSERT(rw_lock_held(iss->iss_lockp));
4179 itask->itask_flags = ITASK_IN_FREE_LIST;
4180 itask->itask_proxy_msg_id = 0;
4181 mutex_enter(&ilu->ilu_task_lock);
4182 itask->itask_lu_free_next = ilu->ilu_free_tasks;
4183 ilu->ilu_free_tasks = itask;
4184 ilu->ilu_ntasks_free++;
4185 if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
4186 cv_signal(&ilu->ilu_offline_pending_cv);
4187 mutex_exit(&ilu->ilu_task_lock);
4188 atomic_dec_32(itask->itask_ilu_task_cntr);
4189 }
4190
4191 void
4192 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4193 {
4194 uint32_t num_to_release, ndx;
4195 stmf_i_scsi_task_t *itask;
4196 stmf_lu_t *lu = ilu->ilu_lu;
4197
4198 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4199
4200 /* free half of the minimal free of the free tasks */
4201 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4202 if (!num_to_release) {
4203 return;
4204 }
4205 for (ndx = 0; ndx < num_to_release; ndx++) {
4206 mutex_enter(&ilu->ilu_task_lock);
4207 itask = ilu->ilu_free_tasks;
4208 if (itask == NULL) {
4391 itask->itask_allocated_buf_map = 0;
4392 }
4393
4394 void
4395 stmf_task_free(scsi_task_t *task)
4396 {
4397 stmf_local_port_t *lport = task->task_lport;
4398 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4399 task->task_stmf_private;
4400 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4401 task->task_session->ss_stmf_private;
4402
4403 stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4404
4405 stmf_free_task_bufs(itask, lport);
4406 stmf_itl_task_done(itask);
4407 DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4408 hrtime_t,
4409 itask->itask_done_timestamp - itask->itask_start_timestamp);
4410 if (itask->itask_itl_datap) {
4411 if (atomic_dec_32_nv(&itask->itask_itl_datap->itl_counter) ==
4412 0) {
4413 stmf_release_itl_handle(task->task_lu,
4414 itask->itask_itl_datap);
4415 }
4416 }
4417
4418 rw_enter(iss->iss_lockp, RW_READER);
4419 lport->lport_task_free(task);
4420 if (itask->itask_worker) {
4421 atomic_dec_32(&stmf_cur_ntasks);
4422 atomic_dec_32(&itask->itask_worker->worker_ref_count);
4423 }
4424 /*
4425 * After calling stmf_task_lu_free, the task pointer can no longer
4426 * be trusted.
4427 */
4428 stmf_task_lu_free(task, iss);
4429 rw_exit(iss->iss_lockp);
4430 }
4431
4432 void
4433 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4434 {
4435 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4436 task->task_stmf_private;
4437 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4438 int nv;
4439 uint32_t old, new;
4440 uint32_t ct;
4441 stmf_worker_t *w, *w1;
4442 uint8_t tm;
4443
4444 if (task->task_max_nbufs > 4)
4445 task->task_max_nbufs = 4;
4446 task->task_cur_nbufs = 0;
4447 /* Latest value of currently running tasks */
4448 ct = atomic_inc_32_nv(&stmf_cur_ntasks);
4449
4450 /* Select the next worker using round robin */
4451 nv = (int)atomic_inc_32_nv((uint32_t *)&stmf_worker_sel_counter);
4452 if (nv >= stmf_nworkers_accepting_cmds) {
4453 int s = nv;
4454 do {
4455 nv -= stmf_nworkers_accepting_cmds;
4456 } while (nv >= stmf_nworkers_accepting_cmds);
4457 if (nv < 0)
4458 nv = 0;
4459 /* Its ok if this cas fails */
4460 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4461 s, nv);
4462 }
4463 w = &stmf_workers[nv];
4464
4465 /*
4466 * A worker can be pinned by interrupt. So select the next one
4467 * if it has lower load.
4468 */
4469 if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4470 w1 = stmf_workers;
4471 } else {
4508 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4509 new |= ITASK_DEFAULT_HANDLING;
4510 }
4511 new &= ~ITASK_IN_TRANSITION;
4512 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4513
4514 stmf_itl_task_start(itask);
4515
4516 itask->itask_worker_next = NULL;
4517 if (w->worker_task_tail) {
4518 w->worker_task_tail->itask_worker_next = itask;
4519 } else {
4520 w->worker_task_head = itask;
4521 }
4522 w->worker_task_tail = itask;
4523 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4524 w->worker_max_qdepth_pu = w->worker_queue_depth;
4525 }
4526 /* Measure task waitq time */
4527 itask->itask_waitq_enter_timestamp = gethrtime();
4528 atomic_inc_32(&w->worker_ref_count);
4529 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4530 itask->itask_ncmds = 1;
4531 stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4532 if (dbuf) {
4533 itask->itask_allocated_buf_map = 1;
4534 itask->itask_dbufs[0] = dbuf;
4535 dbuf->db_handle = 0;
4536 } else {
4537 itask->itask_allocated_buf_map = 0;
4538 itask->itask_dbufs[0] = NULL;
4539 }
4540
4541 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4542 w->worker_signal_timestamp = gethrtime();
4543 DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4544 scsi_task_t *, task);
4545 cv_signal(&w->worker_cv);
4546 }
4547 mutex_exit(&w->worker_lock);
4548
4596 stmf_status_t ret = STMF_SUCCESS;
4597
4598 stmf_i_scsi_task_t *itask =
4599 (stmf_i_scsi_task_t *)task->task_stmf_private;
4600
4601 stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
4602
4603 if (ioflags & STMF_IOF_LU_DONE) {
4604 uint32_t new, old;
4605 do {
4606 new = old = itask->itask_flags;
4607 if (new & ITASK_BEING_ABORTED)
4608 return (STMF_ABORTED);
4609 new &= ~ITASK_KNOWN_TO_LU;
4610 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4611 }
4612 if (itask->itask_flags & ITASK_BEING_ABORTED)
4613 return (STMF_ABORTED);
4614 #ifdef DEBUG
4615 if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
4616 if (atomic_dec_32_nv((uint32_t *)&stmf_drop_buf_counter) ==
4617 1)
4618 return (STMF_SUCCESS);
4619 }
4620 #endif
4621
4622 stmf_update_kstat_lu_io(task, dbuf);
4623 stmf_update_kstat_lport_io(task, dbuf);
4624 stmf_lport_xfer_start(itask, dbuf);
4625 if (ioflags & STMF_IOF_STATS_ONLY) {
4626 stmf_lport_xfer_done(itask, dbuf);
4627 return (STMF_SUCCESS);
4628 }
4629
4630 dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
4631 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
4632
4633 /*
4634 * Port provider may have already called the buffer callback in
4635 * which case dbuf->db_xfer_start_timestamp will be 0.
4636 */
5693 stmf_status_t
5694 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
5695 scsi_devid_desc_t *lu_id)
5696 {
5697 uint8_t *p;
5698 struct timeval32 timestamp32;
5699 uint32_t *t = (uint32_t *)×tamp32;
5700 struct ether_addr mac;
5701 uint8_t *e = (uint8_t *)&mac;
5702 int hid = (int)host_id;
5703 uint16_t gen_number;
5704
5705 if (company_id == COMPANY_ID_NONE)
5706 company_id = COMPANY_ID_SUN;
5707
5708 if (lu_id->ident_length != 0x10)
5709 return (STMF_INVALID_ARG);
5710
5711 p = (uint8_t *)lu_id;
5712
5713 gen_number = atomic_inc_16_nv(&stmf_lu_id_gen_number);
5714
5715 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
5716 p[4] = ((company_id >> 20) & 0xf) | 0x60;
5717 p[5] = (company_id >> 12) & 0xff;
5718 p[6] = (company_id >> 4) & 0xff;
5719 p[7] = (company_id << 4) & 0xf0;
5720 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
5721 hid = BE_32((int)zone_get_hostid(NULL));
5722 }
5723 if (hid != 0) {
5724 e[0] = (hid >> 24) & 0xff;
5725 e[1] = (hid >> 16) & 0xff;
5726 e[2] = (hid >> 8) & 0xff;
5727 e[3] = hid & 0xff;
5728 e[4] = e[5] = 0;
5729 }
5730 bcopy(e, p+8, 6);
5731 uniqtime32(×tamp32);
5732 *t = BE_32(*t);
5733 bcopy(t, p+14, 4);
6325 /* We made it here means we are going to call LU */
6326 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6327 lu = task->task_lu;
6328 else
6329 lu = dlun0;
6330 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6331 mutex_exit(&w->worker_lock);
6332 curcmd &= ITASK_CMD_MASK;
6333 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6334 switch (curcmd) {
6335 case ITASK_CMD_NEW_TASK:
6336 iss = (stmf_i_scsi_session_t *)
6337 task->task_session->ss_stmf_private;
6338 stmf_itl_lu_new_task(itask);
6339 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6340 if (stmf_handle_cmd_during_ic(itask))
6341 break;
6342 }
6343 #ifdef DEBUG
6344 if (stmf_drop_task_counter > 0) {
6345 if (atomic_dec_32_nv((uint32_t *)&stmf_drop_task_counter) ==
6346 1) {
6347 break;
6348 }
6349 }
6350 #endif
6351 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6352 lu->lu_new_task(task, dbuf);
6353 break;
6354 case ITASK_CMD_DATA_XFER_DONE:
6355 lu->lu_dbuf_xfer_done(task, dbuf);
6356 break;
6357 case ITASK_CMD_STATUS_DONE:
6358 lu->lu_send_status_done(task);
6359 break;
6360 case ITASK_CMD_ABORT:
6361 if (abort_free) {
6362 stmf_task_free(task);
6363 } else {
6364 stmf_do_task_abort(task);
6365 }
6366 break;
|