921 rw_enter(&iport->iport_lock, RW_WRITER);
922 /* Make sure nobody created the struct except us */
923 if (fct_portid_to_portptr(iport, cmd->cmd_rportid)) {
924 /* Oh well, free it */
925 fct_free(rp);
926 } else {
927 fct_queue_rp(iport, irp);
928 }
929 rw_downgrade(&iport->iport_lock);
930 /* Start over becasue we dropped the lock */
931 goto start_els_posting;
932 }
933
934 /* A PLOGI is by default a logout of previous session */
935 irp->irp_deregister_timer = ddi_get_lbolt() +
936 drv_usectohz(USEC_DEREG_RP_TIMEOUT);
937 irp->irp_dereg_count = 0;
938 fct_post_to_discovery_queue(iport, irp, NULL);
939
940 /* A PLOGI also invalidates any RSCNs related to this rp */
941 atomic_add_32(&irp->irp_rscn_counter, 1);
942 } else {
943 /*
944 * For everything else, we have (or be able to lookup) a
945 * valid port pointer.
946 */
947 if (irp == NULL) {
948 rw_exit(&iport->iport_lock);
949 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
950 /* XXX Throw a logout to the initiator */
951 stmf_trace(iport->iport_alias, "ELS %x "
952 "received from %x without a session",
953 els->els_req_payload[0], cmd->cmd_rportid);
954 } else {
955 stmf_trace(iport->iport_alias, "Sending ELS %x "
956 "to %x without a session",
957 els->els_req_payload[0], cmd->cmd_rportid);
958 }
959 fct_queue_cmd_for_termination(cmd, FCT_NOT_LOGGED_IN);
960 return;
961 }
966 * Lets get a slot for this els
967 */
968 if (!(icmd->icmd_flags & ICMD_IMPLICIT)) {
969 cmd_slot = fct_alloc_cmd_slot(iport, cmd);
970 if (cmd_slot == FCT_SLOT_EOL) {
971 /* This should not have happened */
972 rw_exit(&iport->iport_lock);
973 stmf_trace(iport->iport_alias,
974 "ran out of xchg resources");
975 fct_queue_cmd_for_termination(cmd,
976 FCT_NO_XCHG_RESOURCE);
977 return;
978 }
979 } else {
980 /*
981 * Tell the framework that fct_cmd_free() can decrement the
982 * irp_nonfcp_xchg_count variable.
983 */
984 atomic_or_32(&icmd->icmd_flags, ICMD_IMPLICIT_CMD_HAS_RESOURCE);
985 }
986 atomic_add_16(&irp->irp_nonfcp_xchg_count, 1);
987
988 /*
989 * Grab the remote port lock while we modify the port state.
990 * we should not drop the fca port lock (as a reader) until we
991 * modify the remote port state.
992 */
993 rw_enter(&irp->irp_lock, RW_WRITER);
994 if ((op == ELS_OP_PLOGI) || (op == ELS_OP_PRLI) ||
995 (op == ELS_OP_LOGO) || (op == ELS_OP_PRLO) ||
996 (op == ELS_OP_TPRLO)) {
997 uint32_t rf = IRP_PRLI_DONE;
998 if ((op == ELS_OP_PLOGI) || (op == ELS_OP_LOGO)) {
999 rf |= IRP_PLOGI_DONE;
1000 if (irp->irp_flags & IRP_PLOGI_DONE)
1001 atomic_add_32(&iport->iport_nrps_login, -1);
1002 }
1003 atomic_add_16(&irp->irp_sa_elses_count, 1);
1004 atomic_and_32(&irp->irp_flags, ~rf);
1005 atomic_or_32(&icmd->icmd_flags, ICMD_SESSION_AFFECTING);
1006 } else {
1007 atomic_add_16(&irp->irp_nsa_elses_count, 1);
1008 }
1009
1010 fct_post_to_discovery_queue(iport, irp, icmd);
1011
1012 rw_exit(&irp->irp_lock);
1013 rw_exit(&iport->iport_lock);
1014 }
1015
1016 /*
1017 * Cleanup I/Os for a rport. ttc is a bit Mask of cmd types to clean.
1018 * No locks held.
1019 */
1020 int
1021 fct_trigger_rport_cleanup(fct_i_remote_port_t *irp, int ttc)
1022 {
1023 fct_remote_port_t *rp = irp->irp_rp;
1024 fct_local_port_t *port = rp->rp_port;
1025 fct_i_local_port_t *iport =
1026 (fct_i_local_port_t *)port->port_fct_private;
1027 fct_cmd_t *cmd;
1143 for (i = 0; i < port->port_max_logins; i++) {
1144 if (iport->iport_rp_slots[i] == NULL) {
1145 break;
1146 }
1147 }
1148 if (i == port->port_max_logins) {
1149 /* This is really pushing it. */
1150 (void) snprintf(info, sizeof (info),
1151 "fct_register_remote_port "
1152 "Cannot register portid %x because all the "
1153 "handles are used up", rp->rp_id);
1154 goto hba_fatal_err;
1155 }
1156 rp->rp_handle = i;
1157 }
1158 /* By this time rport_handle is valid */
1159 if ((irp->irp_flags & IRP_HANDLE_OPENED) == 0) {
1160 iport->iport_rp_slots[rp->rp_handle] = irp;
1161 atomic_or_32(&irp->irp_flags, IRP_HANDLE_OPENED);
1162 }
1163 (void) atomic_add_64_nv(&iport->iport_last_change, 1);
1164 fct_log_remote_port_event(port, ESC_SUNFC_TARGET_ADD,
1165 rp->rp_pwwn, rp->rp_id);
1166
1167 register_rp_done:;
1168 rw_exit(&irp->irp_lock);
1169 rw_exit(&iport->iport_lock);
1170 return (FCT_SUCCESS);
1171
1172 hba_fatal_err:;
1173 rw_exit(&irp->irp_lock);
1174 rw_exit(&iport->iport_lock);
1175 /*
1176 * XXX Throw HBA fatal error event
1177 */
1178 (void) fct_port_shutdown(iport->iport_port,
1179 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
1180 return (FCT_FAILURE);
1181 }
1182
1183 fct_status_t
1188 fct_i_remote_port_t *irp = RP_TO_IRP(rp);
1189
1190 if (irp->irp_snn) {
1191 kmem_free(irp->irp_snn, strlen(irp->irp_snn) + 1);
1192 irp->irp_snn = NULL;
1193 }
1194 if (irp->irp_spn) {
1195 kmem_free(irp->irp_spn, strlen(irp->irp_spn) + 1);
1196 irp->irp_spn = NULL;
1197 }
1198
1199 if ((ret = port->port_deregister_remote_port(port, rp)) !=
1200 FCT_SUCCESS) {
1201 return (ret);
1202 }
1203
1204 if (irp->irp_flags & IRP_HANDLE_OPENED) {
1205 atomic_and_32(&irp->irp_flags, ~IRP_HANDLE_OPENED);
1206 iport->iport_rp_slots[rp->rp_handle] = NULL;
1207 }
1208 (void) atomic_add_64_nv(&iport->iport_last_change, 1);
1209 fct_log_remote_port_event(port, ESC_SUNFC_TARGET_REMOVE,
1210 rp->rp_pwwn, rp->rp_id);
1211
1212 return (FCT_SUCCESS);
1213 }
1214
1215 fct_status_t
1216 fct_send_accrjt(fct_cmd_t *cmd, uint8_t accrjt, uint8_t reason, uint8_t expl)
1217 {
1218 fct_local_port_t *port = (fct_local_port_t *)cmd->cmd_port;
1219 fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
1220
1221 els->els_resp_size = els->els_resp_alloc_size = 8;
1222 els->els_resp_payload = (uint8_t *)kmem_zalloc(8, KM_SLEEP);
1223 els->els_resp_payload[0] = accrjt;
1224 if (accrjt == 1) {
1225 els->els_resp_payload[5] = reason;
1226 els->els_resp_payload[6] = expl;
1227 } else {
1228 els->els_resp_size = 4;
1494 } else {
1495 /*
1496 * The reason we set this flag is to prevent
1497 * killing a PRLI while we have not yet processed
1498 * a response to PLOGI. Because the initiator
1499 * will send a PRLI as soon as it responds to PLOGI.
1500 * Check fct_process_els() for more info.
1501 */
1502 atomic_or_32(&irp->irp_flags,
1503 IRP_SOL_PLOGI_IN_PROGRESS);
1504 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
1505 ret = port->port_send_cmd(cmd);
1506 if (ret != FCT_SUCCESS) {
1507 atomic_and_32(&icmd->icmd_flags,
1508 ~ICMD_KNOWN_TO_FCA);
1509 atomic_and_32(&irp->irp_flags,
1510 ~IRP_SOL_PLOGI_IN_PROGRESS);
1511 }
1512 }
1513 }
1514 atomic_add_16(&irp->irp_sa_elses_count, -1);
1515
1516 if (ret == FCT_SUCCESS) {
1517 if (cmd_type == FCT_CMD_RCVD_ELS) {
1518 atomic_or_32(&irp->irp_flags, IRP_PLOGI_DONE);
1519 atomic_add_32(&iport->iport_nrps_login, 1);
1520 if (irp->irp_deregister_timer)
1521 irp->irp_deregister_timer = 0;
1522 }
1523 if (icmd_flags & ICMD_IMPLICIT) {
1524 DTRACE_FC_5(rport__login__end,
1525 fct_cmd_t, cmd,
1526 fct_local_port_t, port,
1527 fct_i_remote_port_t, irp,
1528 int, (cmd_type != FCT_CMD_RCVD_ELS),
1529 int, FCT_SUCCESS);
1530
1531 p = els->els_resp_payload;
1532 p[0] = ELS_OP_ACC;
1533 cmd->cmd_comp_status = FCT_SUCCESS;
1534 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
1535 }
1536 } else {
1537 DTRACE_FC_5(rport__login__end,
1538 fct_cmd_t, cmd,
1539 fct_local_port_t, port,
1580 * has responded to PLOGI already.
1581 */
1582 /* XXX: Probably need a timeout here */
1583 return (DISC_ACTION_DELAY_RESCAN);
1584 }
1585 /* The caller has made sure that login is done */
1586
1587 /* Make sure the process is fcp in this case */
1588 if ((els->els_req_size != 20) || (bcmp(els->els_req_payload,
1589 fct_prli_temp, 16))) {
1590 if (els->els_req_payload[4] != 0x08)
1591 stmf_trace(iport->iport_alias, "PRLI received from"
1592 " %x for unknown FC-4 type %x", cmd->cmd_rportid,
1593 els->els_req_payload[4]);
1594 else
1595 stmf_trace(iport->iport_alias, "Rejecting PRLI from %x "
1596 " pld sz %d, prli_flags %x", cmd->cmd_rportid,
1597 els->els_req_size, els->els_req_payload[6]);
1598
1599 fct_dequeue_els(irp);
1600 atomic_add_16(&irp->irp_sa_elses_count, -1);
1601 ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0x2c);
1602 goto prli_end;
1603 }
1604
1605 if (irp->irp_fcp_xchg_count) {
1606 /* Trigger cleanup if necessary */
1607 if ((irp->irp_flags & IRP_FCP_CLEANUP) == 0) {
1608 stmf_trace(iport->iport_alias, "handling PRLI from"
1609 " %x. Triggering cleanup", cmd->cmd_rportid);
1610 if (fct_trigger_rport_cleanup(irp, FCT_CMD_FCP_XCHG)) {
1611 atomic_or_32(&irp->irp_flags, IRP_FCP_CLEANUP);
1612 } else {
1613 /* XXX: handle this */
1614 /* EMPTY */
1615 }
1616 }
1617
1618 end_time = icmd->icmd_start_time +
1619 drv_usectohz(USEC_ELS_TIMEOUT);
1620 if (ddi_get_lbolt() > end_time) {
1654 ses->ss_lport = port->port_lport;
1655 if (stmf_register_scsi_session(port->port_lport, ses) !=
1656 STMF_SUCCESS) {
1657 stmf_free(ses);
1658 ses = NULL;
1659 } else {
1660 irp->irp_session = ses;
1661 irp->irp_session->ss_rport_alias = irp->irp_snn;
1662
1663 /*
1664 * The reason IRP_SCSI_SESSION_STARTED is different
1665 * from IRP_PRLI_DONE is that we clear IRP_PRLI_DONE
1666 * inside interrupt context. We dont want to deregister
1667 * the session from an interrupt.
1668 */
1669 atomic_or_32(&irp->irp_flags, IRP_SCSI_SESSION_STARTED);
1670 }
1671 }
1672
1673 fct_dequeue_els(irp);
1674 atomic_add_16(&irp->irp_sa_elses_count, -1);
1675 if (ses == NULL) {
1676 /* fail PRLI */
1677 ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0);
1678 } else {
1679 /* accept PRLI */
1680 els->els_resp_payload = (uint8_t *)kmem_zalloc(20, KM_SLEEP);
1681 bcopy(fct_prli_temp, els->els_resp_payload, 20);
1682 els->els_resp_payload[0] = 2;
1683 els->els_resp_payload[6] = 0x21;
1684
1685 /* XXX the two bytes below need to set as per capabilities */
1686 els->els_resp_payload[18] = 0;
1687 els->els_resp_payload[19] = 0x12;
1688
1689 els->els_resp_size = els->els_resp_alloc_size = 20;
1690 if ((ret = port->port_send_cmd_response(cmd, 0)) !=
1691 FCT_SUCCESS) {
1692 stmf_deregister_scsi_session(port->port_lport, ses);
1693 stmf_free(irp->irp_session);
1694 irp->irp_session = NULL;
1758
1759 if ((ddi_get_lbolt() & 0x7f) == 0) {
1760 stmf_trace(iport->iport_alias, "handling"
1761 " LOGO rp_id %x, waiting for cmds to"
1762 " drain", cmd->cmd_rportid);
1763 }
1764 return (DISC_ACTION_DELAY_RESCAN);
1765 }
1766 atomic_and_32(&irp->irp_flags, ~IRP_SESSION_CLEANUP);
1767
1768 /* Session can only be terminated after all the I/Os have drained */
1769 if (irp->irp_flags & IRP_SCSI_SESSION_STARTED) {
1770 stmf_deregister_scsi_session(iport->iport_port->port_lport,
1771 irp->irp_session);
1772 stmf_free(irp->irp_session);
1773 irp->irp_session = NULL;
1774 atomic_and_32(&irp->irp_flags, ~IRP_SCSI_SESSION_STARTED);
1775 }
1776
1777 fct_dequeue_els(irp);
1778 atomic_add_16(&irp->irp_sa_elses_count, -1);
1779
1780 /* don't send response if this is an implicit logout cmd */
1781 if (!(icmd->icmd_flags & ICMD_IMPLICIT)) {
1782 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
1783 ret = fct_send_accrjt(cmd, ELS_OP_ACC, 0, 0);
1784 } else {
1785 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
1786 ret = port->port_send_cmd(cmd);
1787 if (ret != FCT_SUCCESS) {
1788 atomic_and_32(&icmd->icmd_flags,
1789 ~ICMD_KNOWN_TO_FCA);
1790 }
1791 }
1792
1793 if (ret != FCT_SUCCESS) {
1794 fct_queue_cmd_for_termination(cmd, ret);
1795 }
1796
1797 DTRACE_FC_4(rport__logout__end,
1798 fct_cmd_t, cmd,
1869
1870 if ((ddi_get_lbolt() & 0x7f) == 0) {
1871 stmf_trace(iport->iport_alias, "handling"
1872 " PRLO from %x, waiting for cmds to"
1873 " drain", cmd->cmd_rportid);
1874 }
1875 return (DISC_ACTION_DELAY_RESCAN);
1876 }
1877 atomic_and_32(&irp->irp_flags, ~IRP_FCP_CLEANUP);
1878
1879 /* Session can only be terminated after all the I/Os have drained */
1880 if (irp->irp_flags & IRP_SCSI_SESSION_STARTED) {
1881 stmf_deregister_scsi_session(iport->iport_port->port_lport,
1882 irp->irp_session);
1883 stmf_free(irp->irp_session);
1884 irp->irp_session = NULL;
1885 atomic_and_32(&irp->irp_flags, ~IRP_SCSI_SESSION_STARTED);
1886 }
1887
1888 fct_dequeue_els(irp);
1889 atomic_add_16(&irp->irp_sa_elses_count, -1);
1890 ret = fct_send_accrjt(cmd, ELS_OP_ACC, 0, 0);
1891 if (ret != FCT_SUCCESS)
1892 fct_queue_cmd_for_termination(cmd, ret);
1893
1894 return (DISC_ACTION_RESCAN);
1895 }
1896
1897 disc_action_t
1898 fct_process_rcvd_adisc(fct_i_cmd_t *icmd)
1899 {
1900 fct_cmd_t *cmd = icmd->icmd_cmd;
1901 fct_remote_port_t *rp = cmd->cmd_rp;
1902 fct_local_port_t *port = cmd->cmd_port;
1903 fct_i_local_port_t *iport = (fct_i_local_port_t *)
1904 port->port_fct_private;
1905 fct_els_t *els = (fct_els_t *)
1906 cmd->cmd_specific;
1907 fct_i_remote_port_t *irp = (fct_i_remote_port_t *)
1908 rp->rp_fct_private;
1909 uint8_t *p;
1910 uint32_t *q;
1911 fct_status_t ret;
1912
1913 fct_dequeue_els(irp);
1914 atomic_add_16(&irp->irp_nsa_elses_count, -1);
1915
1916 /* Validate the adisc request */
1917 p = els->els_req_payload;
1918 q = (uint32_t *)p;
1919 if ((els->els_req_size != 28) || (bcmp(rp->rp_pwwn, p + 8, 8)) ||
1920 (bcmp(rp->rp_nwwn, p + 16, 8))) {
1921 ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0);
1922 } else {
1923 rp->rp_hard_address = BE_32(q[1]);
1924 els->els_resp_size = els->els_resp_alloc_size = 28;
1925 els->els_resp_payload = (uint8_t *)kmem_zalloc(28, KM_SLEEP);
1926 bcopy(p, els->els_resp_payload, 28);
1927 p = els->els_resp_payload;
1928 q = (uint32_t *)p;
1929 p[0] = ELS_OP_ACC;
1930 q[1] = BE_32(port->port_hard_address);
1931 bcopy(port->port_pwwn, p + 8, 8);
1932 bcopy(port->port_nwwn, p + 16, 8);
1933 q[6] = BE_32(iport->iport_link_info.portid);
1934 ret = port->port_send_cmd_response(cmd, 0);
1935 }
1936 if (ret != FCT_SUCCESS) {
1937 fct_queue_cmd_for_termination(cmd, ret);
1938 }
1939
1940 return (DISC_ACTION_RESCAN);
1941 }
1942
1943 disc_action_t
1944 fct_process_unknown_els(fct_i_cmd_t *icmd)
1945 {
1946 fct_i_local_port_t *iport = ICMD_TO_IPORT(icmd);
1947 fct_status_t ret = FCT_FAILURE;
1948 uint8_t op = 0;
1949
1950 ASSERT(icmd->icmd_cmd->cmd_type == FCT_CMD_RCVD_ELS);
1951 fct_dequeue_els(ICMD_TO_IRP(icmd));
1952 atomic_add_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count, -1);
1953 op = ICMD_TO_ELS(icmd)->els_req_payload[0];
1954 stmf_trace(iport->iport_alias, "Rejecting unknown unsol els %x (%s)",
1955 op, FCT_ELS_NAME(op));
1956 ret = fct_send_accrjt(icmd->icmd_cmd, ELS_OP_LSRJT, 1, 0);
1957 if (ret != FCT_SUCCESS) {
1958 fct_queue_cmd_for_termination(icmd->icmd_cmd, ret);
1959 }
1960
1961 return (DISC_ACTION_RESCAN);
1962 }
1963
1964 disc_action_t
1965 fct_process_rscn(fct_i_cmd_t *icmd)
1966 {
1967 fct_i_local_port_t *iport = ICMD_TO_IPORT(icmd);
1968 fct_status_t ret = FCT_FAILURE;
1969 uint8_t op = 0;
1970 uint8_t *rscn_req_payload;
1971 uint32_t rscn_req_size;
1972
1973 fct_dequeue_els(ICMD_TO_IRP(icmd));
1974 atomic_add_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count, -1);
1975 if (icmd->icmd_cmd->cmd_type == FCT_CMD_RCVD_ELS) {
1976 op = ICMD_TO_ELS(icmd)->els_req_payload[0];
1977 stmf_trace(iport->iport_alias, "Accepting RSCN %x (%s)",
1978 op, FCT_ELS_NAME(op));
1979 rscn_req_size = ICMD_TO_ELS(icmd)->els_req_size;
1980 rscn_req_payload = kmem_alloc(rscn_req_size, KM_SLEEP);
1981 bcopy(ICMD_TO_ELS(icmd)->els_req_payload, rscn_req_payload,
1982 rscn_req_size);
1983 ret = fct_send_accrjt(icmd->icmd_cmd, ELS_OP_ACC, 1, 0);
1984 if (ret != FCT_SUCCESS) {
1985 fct_queue_cmd_for_termination(icmd->icmd_cmd, ret);
1986 } else {
1987 if (fct_rscn_options & RSCN_OPTION_VERIFY) {
1988 fct_rscn_verify(iport, rscn_req_payload,
1989 rscn_req_size);
1990 }
1991 }
1992
1993 kmem_free(rscn_req_payload, rscn_req_size);
1994 } else {
2051 stmf_trace(iport->iport_alias, "Killing ELS %x cond 1",
2052 els->els_req_payload[0]);
2053 } else if (irp->irp_sa_elses_count &&
2054 (((*ppcmd)->icmd_flags & ICMD_SESSION_AFFECTING) == 0)) {
2055 stmf_trace(iport->iport_alias, "Killing ELS %x cond 2",
2056 els->els_req_payload[0]);
2057 dq = 1;
2058 } else if (((irp->irp_flags & IRP_PLOGI_DONE) == 0) &&
2059 (els->els_req_payload[0] != ELS_OP_PLOGI) &&
2060 (els->els_req_payload[0] != ELS_OP_LOGO) &&
2061 (special_prli_cond == 0)) {
2062 stmf_trace(iport->iport_alias, "Killing ELS %x cond 3",
2063 els->els_req_payload[0]);
2064 dq = 1;
2065 }
2066
2067 if (dq) {
2068 fct_i_cmd_t *c = (*ppcmd)->icmd_next;
2069
2070 if ((*ppcmd)->icmd_flags & ICMD_SESSION_AFFECTING)
2071 atomic_add_16(&irp->irp_sa_elses_count, -1);
2072 else
2073 atomic_add_16(&irp->irp_nsa_elses_count, -1);
2074 (*ppcmd)->icmd_next = cmd_to_abort;
2075 cmd_to_abort = *ppcmd;
2076 *ppcmd = c;
2077 } else {
2078 ppcmd = &((*ppcmd)->icmd_next);
2079 }
2080 }
2081 rw_exit(&irp->irp_lock);
2082
2083 while (cmd_to_abort) {
2084 fct_i_cmd_t *c = cmd_to_abort->icmd_next;
2085
2086 atomic_and_32(&cmd_to_abort->icmd_flags, ~ICMD_IN_IRP_QUEUE);
2087 fct_queue_cmd_for_termination(cmd_to_abort->icmd_cmd,
2088 FCT_ABORTED);
2089 cmd_to_abort = c;
2090 }
2091
2092 /*
2093 * pick from the top of the queue
2108 if ((icmd->icmd_flags & ICMD_ELS_PROCESSING_STARTED) == 0) {
2109 stmf_trace(iport->iport_alias, "Processing %ssol ELS %x (%s) "
2110 "rp_id=%x", (cmd->cmd_type == FCT_CMD_RCVD_ELS) ? "un" : "",
2111 op, FCT_ELS_NAME(op), cmd->cmd_rportid);
2112 atomic_or_32(&icmd->icmd_flags, ICMD_ELS_PROCESSING_STARTED);
2113 }
2114
2115 if (op == ELS_OP_PLOGI) {
2116 ret |= fct_process_plogi(icmd);
2117 } else if (op == ELS_OP_PRLI) {
2118 ret |= fct_process_prli(icmd);
2119 } else if (op == ELS_OP_LOGO) {
2120 ret |= fct_process_logo(icmd);
2121 } else if ((op == ELS_OP_PRLO) || (op == ELS_OP_TPRLO)) {
2122 ret |= fct_process_prlo(icmd);
2123 } else if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
2124 fct_status_t s;
2125 fct_local_port_t *port = iport->iport_port;
2126
2127 fct_dequeue_els(irp);
2128 atomic_add_16(&irp->irp_nsa_elses_count, -1);
2129 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
2130 if ((s = port->port_send_cmd(cmd)) != FCT_SUCCESS) {
2131 atomic_and_32(&icmd->icmd_flags, ~ICMD_KNOWN_TO_FCA);
2132 fct_queue_cmd_for_termination(cmd, s);
2133 stmf_trace(iport->iport_alias, "Solicited els "
2134 "transport failed, ret = %llx", s);
2135 }
2136 } else if (op == ELS_OP_ADISC) {
2137 ret |= fct_process_rcvd_adisc(icmd);
2138 } else if (op == ELS_OP_RSCN) {
2139 (void) fct_process_rscn(icmd);
2140 } else {
2141 (void) fct_process_unknown_els(icmd);
2142 }
2143
2144 /*
2145 * This if condition will be false if a sa ELS trigged a cleanup
2146 * and set the ret = DISC_ACTION_DELAY_RESCAN. In that case we should
2147 * keep it that way.
2148 */
2162
2163 void
2164 fct_handle_sol_els_completion(fct_i_local_port_t *iport, fct_i_cmd_t *icmd)
2165 {
2166 fct_i_remote_port_t *irp = NULL;
2167 fct_els_t *els = ICMD_TO_ELS(icmd);
2168 uint8_t op = els->els_req_payload[0];
2169
2170 if (icmd->icmd_cmd->cmd_rp) {
2171 irp = ICMD_TO_IRP(icmd);
2172 }
2173 if (icmd->icmd_cmd->cmd_rp &&
2174 (icmd->icmd_cmd->cmd_comp_status == FCT_SUCCESS) &&
2175 (els->els_req_payload[0] == ELS_OP_PLOGI)) {
2176 bcopy(els->els_resp_payload + 20, irp->irp_rp->rp_pwwn, 8);
2177 bcopy(els->els_resp_payload + 28, irp->irp_rp->rp_nwwn, 8);
2178
2179 stmf_wwn_to_devid_desc((scsi_devid_desc_t *)irp->irp_id,
2180 irp->irp_rp->rp_pwwn, PROTOCOL_FIBRE_CHANNEL);
2181 atomic_or_32(&irp->irp_flags, IRP_PLOGI_DONE);
2182 atomic_add_32(&iport->iport_nrps_login, 1);
2183 if (irp->irp_deregister_timer) {
2184 irp->irp_deregister_timer = 0;
2185 irp->irp_dereg_count = 0;
2186 }
2187 }
2188
2189 if (irp && (els->els_req_payload[0] == ELS_OP_PLOGI)) {
2190 atomic_and_32(&irp->irp_flags, ~IRP_SOL_PLOGI_IN_PROGRESS);
2191 }
2192 atomic_or_32(&icmd->icmd_flags, ICMD_CMD_COMPLETE);
2193 stmf_trace(iport->iport_alias, "Sol ELS %x (%s) completed with "
2194 "status %llx, did/%x", op, FCT_ELS_NAME(op),
2195 icmd->icmd_cmd->cmd_comp_status, icmd->icmd_cmd->cmd_rportid);
2196 }
2197
2198 static disc_action_t
2199 fct_check_cmdlist(fct_i_local_port_t *iport)
2200 {
2201 int num_to_release, ndx;
2202 fct_i_cmd_t *icmd;
2208 max_active = iport->iport_max_active_ncmds;
2209
2210 if (total <= max_active)
2211 return (DISC_ACTION_NO_WORK);
2212 /*
2213 * Everytime, we release half of the difference
2214 */
2215 num_to_release = (total + 1 - max_active) / 2;
2216
2217 mutex_exit(&iport->iport_worker_lock);
2218 for (ndx = 0; ndx < num_to_release; ndx++) {
2219 mutex_enter(&iport->iport_cached_cmd_lock);
2220 icmd = iport->iport_cached_cmdlist;
2221 if (icmd == NULL) {
2222 mutex_exit(&iport->iport_cached_cmd_lock);
2223 break;
2224 }
2225 iport->iport_cached_cmdlist = icmd->icmd_next;
2226 iport->iport_cached_ncmds--;
2227 mutex_exit(&iport->iport_cached_cmd_lock);
2228 atomic_add_32(&iport->iport_total_alloced_ncmds, -1);
2229 fct_free(icmd->icmd_cmd);
2230 }
2231 mutex_enter(&iport->iport_worker_lock);
2232 return (DISC_ACTION_RESCAN);
2233 }
2234
2235 /*
2236 * The efficiency of handling solicited commands is very low here. But
2237 * fortunately, we seldom send solicited commands. So it will not hurt
2238 * the system performance much.
2239 */
2240 static disc_action_t
2241 fct_check_solcmd_queue(fct_i_local_port_t *iport)
2242 {
2243 fct_i_cmd_t *icmd = NULL;
2244 fct_i_cmd_t *prev_icmd = NULL;
2245 fct_i_cmd_t *next_icmd = NULL;
2246
2247 ASSERT(mutex_owned(&iport->iport_worker_lock));
2248 for (icmd = iport->iport_solcmd_queue; icmd; icmd = next_icmd) {
2357
2358 stmf_trace(iport->iport_alias, "fct_transport_solcmd: "
2359 "ran out of xchg resources - cmd-%p", cmd);
2360 fct_queue_cmd_for_termination(cmd, FCT_NO_XCHG_RESOURCE);
2361 return;
2362 }
2363
2364 if (fct_netbuf_to_value(ICMD_TO_CT(icmd)->ct_req_payload + 8, 2) ==
2365 NS_GID_PN) {
2366 fct_i_remote_port_t *query_irp = NULL;
2367
2368 query_irp = fct_lookup_irp_by_portwwn(iport,
2369 ICMD_TO_CT(icmd)->ct_req_payload + 16);
2370 if (query_irp) {
2371 atomic_and_32(&query_irp->irp_flags, ~IRP_RSCN_QUEUED);
2372 }
2373 }
2374 rw_exit(&irp->irp_lock);
2375 rw_exit(&iport->iport_lock);
2376
2377 atomic_add_16(&irp->irp_nonfcp_xchg_count, 1);
2378 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
2379 icmd->icmd_start_time = ddi_get_lbolt();
2380 ret = iport->iport_port->port_send_cmd(cmd);
2381 if (ret != FCT_SUCCESS) {
2382 atomic_and_32(&icmd->icmd_flags, ~ICMD_KNOWN_TO_FCA);
2383 fct_queue_cmd_for_termination(cmd, ret);
2384 }
2385 }
2386
2387 void
2388 fct_logo_cb(fct_i_cmd_t *icmd)
2389 {
2390 ASSERT(!(icmd->icmd_flags & ICMD_IMPLICIT));
2391 if (!FCT_IS_ELS_ACC(icmd)) {
2392 stmf_trace(ICMD_TO_IPORT(icmd)->iport_alias, "fct_logo_cb: "
2393 "solicited LOGO is not accepted - icmd/%p", icmd);
2394 }
2395 }
2396
2397 void
2746 if (bcmp(irp->irp_rp->rp_pwwn, portwwn, FC_WWN_LEN)) {
2747 continue;
2748 } else {
2749 return (irp);
2750 }
2751 }
2752 }
2753
2754 return (NULL);
2755 }
2756
2757 #ifdef lint
2758 #define FCT_VERIFY_RSCN() _NOTE(EMPTY)
2759 #else
2760 #define FCT_VERIFY_RSCN() \
2761 do { \
2762 ct_cmd = fct_create_solct(port, irp->irp_rp, NS_GID_PN, \
2763 fct_gid_cb); \
2764 if (ct_cmd) { \
2765 uint32_t cnt; \
2766 cnt = atomic_add_32_nv(&irp->irp_rscn_counter, 1); \
2767 CMD_TO_ICMD(ct_cmd)->icmd_cb_private = \
2768 INT2PTR(cnt, void *); \
2769 irp->irp_flags |= IRP_RSCN_QUEUED; \
2770 fct_post_to_solcmd_queue(port, ct_cmd); \
2771 } \
2772 } while (0)
2773 #endif
2774
2775 /* ARGSUSED */
2776 static void
2777 fct_rscn_verify(fct_i_local_port_t *iport, uint8_t *rscn_req_payload,
2778 uint32_t rscn_req_size)
2779 {
2780 int idx = 0;
2781 uint8_t page_format = 0;
2782 uint32_t page_portid = 0;
2783 uint8_t *page_buf = NULL;
2784 uint8_t *last_page_buf = NULL;
2785 #ifndef lint
2786 fct_cmd_t *ct_cmd = NULL;
|
921 rw_enter(&iport->iport_lock, RW_WRITER);
922 /* Make sure nobody created the struct except us */
923 if (fct_portid_to_portptr(iport, cmd->cmd_rportid)) {
924 /* Oh well, free it */
925 fct_free(rp);
926 } else {
927 fct_queue_rp(iport, irp);
928 }
929 rw_downgrade(&iport->iport_lock);
930 /* Start over becasue we dropped the lock */
931 goto start_els_posting;
932 }
933
934 /* A PLOGI is by default a logout of previous session */
935 irp->irp_deregister_timer = ddi_get_lbolt() +
936 drv_usectohz(USEC_DEREG_RP_TIMEOUT);
937 irp->irp_dereg_count = 0;
938 fct_post_to_discovery_queue(iport, irp, NULL);
939
940 /* A PLOGI also invalidates any RSCNs related to this rp */
941 atomic_inc_32(&irp->irp_rscn_counter);
942 } else {
943 /*
944 * For everything else, we have (or be able to lookup) a
945 * valid port pointer.
946 */
947 if (irp == NULL) {
948 rw_exit(&iport->iport_lock);
949 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
950 /* XXX Throw a logout to the initiator */
951 stmf_trace(iport->iport_alias, "ELS %x "
952 "received from %x without a session",
953 els->els_req_payload[0], cmd->cmd_rportid);
954 } else {
955 stmf_trace(iport->iport_alias, "Sending ELS %x "
956 "to %x without a session",
957 els->els_req_payload[0], cmd->cmd_rportid);
958 }
959 fct_queue_cmd_for_termination(cmd, FCT_NOT_LOGGED_IN);
960 return;
961 }
966 * Lets get a slot for this els
967 */
968 if (!(icmd->icmd_flags & ICMD_IMPLICIT)) {
969 cmd_slot = fct_alloc_cmd_slot(iport, cmd);
970 if (cmd_slot == FCT_SLOT_EOL) {
971 /* This should not have happened */
972 rw_exit(&iport->iport_lock);
973 stmf_trace(iport->iport_alias,
974 "ran out of xchg resources");
975 fct_queue_cmd_for_termination(cmd,
976 FCT_NO_XCHG_RESOURCE);
977 return;
978 }
979 } else {
980 /*
981 * Tell the framework that fct_cmd_free() can decrement the
982 * irp_nonfcp_xchg_count variable.
983 */
984 atomic_or_32(&icmd->icmd_flags, ICMD_IMPLICIT_CMD_HAS_RESOURCE);
985 }
986 atomic_inc_16(&irp->irp_nonfcp_xchg_count);
987
988 /*
989 * Grab the remote port lock while we modify the port state.
990 * we should not drop the fca port lock (as a reader) until we
991 * modify the remote port state.
992 */
993 rw_enter(&irp->irp_lock, RW_WRITER);
994 if ((op == ELS_OP_PLOGI) || (op == ELS_OP_PRLI) ||
995 (op == ELS_OP_LOGO) || (op == ELS_OP_PRLO) ||
996 (op == ELS_OP_TPRLO)) {
997 uint32_t rf = IRP_PRLI_DONE;
998 if ((op == ELS_OP_PLOGI) || (op == ELS_OP_LOGO)) {
999 rf |= IRP_PLOGI_DONE;
1000 if (irp->irp_flags & IRP_PLOGI_DONE)
1001 atomic_dec_32(&iport->iport_nrps_login);
1002 }
1003 atomic_inc_16(&irp->irp_sa_elses_count);
1004 atomic_and_32(&irp->irp_flags, ~rf);
1005 atomic_or_32(&icmd->icmd_flags, ICMD_SESSION_AFFECTING);
1006 } else {
1007 atomic_inc_16(&irp->irp_nsa_elses_count);
1008 }
1009
1010 fct_post_to_discovery_queue(iport, irp, icmd);
1011
1012 rw_exit(&irp->irp_lock);
1013 rw_exit(&iport->iport_lock);
1014 }
1015
1016 /*
1017 * Cleanup I/Os for a rport. ttc is a bit Mask of cmd types to clean.
1018 * No locks held.
1019 */
1020 int
1021 fct_trigger_rport_cleanup(fct_i_remote_port_t *irp, int ttc)
1022 {
1023 fct_remote_port_t *rp = irp->irp_rp;
1024 fct_local_port_t *port = rp->rp_port;
1025 fct_i_local_port_t *iport =
1026 (fct_i_local_port_t *)port->port_fct_private;
1027 fct_cmd_t *cmd;
1143 for (i = 0; i < port->port_max_logins; i++) {
1144 if (iport->iport_rp_slots[i] == NULL) {
1145 break;
1146 }
1147 }
1148 if (i == port->port_max_logins) {
1149 /* This is really pushing it. */
1150 (void) snprintf(info, sizeof (info),
1151 "fct_register_remote_port "
1152 "Cannot register portid %x because all the "
1153 "handles are used up", rp->rp_id);
1154 goto hba_fatal_err;
1155 }
1156 rp->rp_handle = i;
1157 }
1158 /* By this time rport_handle is valid */
1159 if ((irp->irp_flags & IRP_HANDLE_OPENED) == 0) {
1160 iport->iport_rp_slots[rp->rp_handle] = irp;
1161 atomic_or_32(&irp->irp_flags, IRP_HANDLE_OPENED);
1162 }
1163 (void) atomic_inc_64_nv(&iport->iport_last_change);
1164 fct_log_remote_port_event(port, ESC_SUNFC_TARGET_ADD,
1165 rp->rp_pwwn, rp->rp_id);
1166
1167 register_rp_done:;
1168 rw_exit(&irp->irp_lock);
1169 rw_exit(&iport->iport_lock);
1170 return (FCT_SUCCESS);
1171
1172 hba_fatal_err:;
1173 rw_exit(&irp->irp_lock);
1174 rw_exit(&iport->iport_lock);
1175 /*
1176 * XXX Throw HBA fatal error event
1177 */
1178 (void) fct_port_shutdown(iport->iport_port,
1179 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
1180 return (FCT_FAILURE);
1181 }
1182
1183 fct_status_t
1188 fct_i_remote_port_t *irp = RP_TO_IRP(rp);
1189
1190 if (irp->irp_snn) {
1191 kmem_free(irp->irp_snn, strlen(irp->irp_snn) + 1);
1192 irp->irp_snn = NULL;
1193 }
1194 if (irp->irp_spn) {
1195 kmem_free(irp->irp_spn, strlen(irp->irp_spn) + 1);
1196 irp->irp_spn = NULL;
1197 }
1198
1199 if ((ret = port->port_deregister_remote_port(port, rp)) !=
1200 FCT_SUCCESS) {
1201 return (ret);
1202 }
1203
1204 if (irp->irp_flags & IRP_HANDLE_OPENED) {
1205 atomic_and_32(&irp->irp_flags, ~IRP_HANDLE_OPENED);
1206 iport->iport_rp_slots[rp->rp_handle] = NULL;
1207 }
1208 (void) atomic_inc_64_nv(&iport->iport_last_change);
1209 fct_log_remote_port_event(port, ESC_SUNFC_TARGET_REMOVE,
1210 rp->rp_pwwn, rp->rp_id);
1211
1212 return (FCT_SUCCESS);
1213 }
1214
1215 fct_status_t
1216 fct_send_accrjt(fct_cmd_t *cmd, uint8_t accrjt, uint8_t reason, uint8_t expl)
1217 {
1218 fct_local_port_t *port = (fct_local_port_t *)cmd->cmd_port;
1219 fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
1220
1221 els->els_resp_size = els->els_resp_alloc_size = 8;
1222 els->els_resp_payload = (uint8_t *)kmem_zalloc(8, KM_SLEEP);
1223 els->els_resp_payload[0] = accrjt;
1224 if (accrjt == 1) {
1225 els->els_resp_payload[5] = reason;
1226 els->els_resp_payload[6] = expl;
1227 } else {
1228 els->els_resp_size = 4;
1494 } else {
1495 /*
1496 * The reason we set this flag is to prevent
1497 * killing a PRLI while we have not yet processed
1498 * a response to PLOGI. Because the initiator
1499 * will send a PRLI as soon as it responds to PLOGI.
1500 * Check fct_process_els() for more info.
1501 */
1502 atomic_or_32(&irp->irp_flags,
1503 IRP_SOL_PLOGI_IN_PROGRESS);
1504 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
1505 ret = port->port_send_cmd(cmd);
1506 if (ret != FCT_SUCCESS) {
1507 atomic_and_32(&icmd->icmd_flags,
1508 ~ICMD_KNOWN_TO_FCA);
1509 atomic_and_32(&irp->irp_flags,
1510 ~IRP_SOL_PLOGI_IN_PROGRESS);
1511 }
1512 }
1513 }
1514 atomic_dec_16(&irp->irp_sa_elses_count);
1515
1516 if (ret == FCT_SUCCESS) {
1517 if (cmd_type == FCT_CMD_RCVD_ELS) {
1518 atomic_or_32(&irp->irp_flags, IRP_PLOGI_DONE);
1519 atomic_inc_32(&iport->iport_nrps_login);
1520 if (irp->irp_deregister_timer)
1521 irp->irp_deregister_timer = 0;
1522 }
1523 if (icmd_flags & ICMD_IMPLICIT) {
1524 DTRACE_FC_5(rport__login__end,
1525 fct_cmd_t, cmd,
1526 fct_local_port_t, port,
1527 fct_i_remote_port_t, irp,
1528 int, (cmd_type != FCT_CMD_RCVD_ELS),
1529 int, FCT_SUCCESS);
1530
1531 p = els->els_resp_payload;
1532 p[0] = ELS_OP_ACC;
1533 cmd->cmd_comp_status = FCT_SUCCESS;
1534 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
1535 }
1536 } else {
1537 DTRACE_FC_5(rport__login__end,
1538 fct_cmd_t, cmd,
1539 fct_local_port_t, port,
1580 * has responded to PLOGI already.
1581 */
1582 /* XXX: Probably need a timeout here */
1583 return (DISC_ACTION_DELAY_RESCAN);
1584 }
1585 /* The caller has made sure that login is done */
1586
1587 /* Make sure the process is fcp in this case */
1588 if ((els->els_req_size != 20) || (bcmp(els->els_req_payload,
1589 fct_prli_temp, 16))) {
1590 if (els->els_req_payload[4] != 0x08)
1591 stmf_trace(iport->iport_alias, "PRLI received from"
1592 " %x for unknown FC-4 type %x", cmd->cmd_rportid,
1593 els->els_req_payload[4]);
1594 else
1595 stmf_trace(iport->iport_alias, "Rejecting PRLI from %x "
1596 " pld sz %d, prli_flags %x", cmd->cmd_rportid,
1597 els->els_req_size, els->els_req_payload[6]);
1598
1599 fct_dequeue_els(irp);
1600 atomic_dec_16(&irp->irp_sa_elses_count);
1601 ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0x2c);
1602 goto prli_end;
1603 }
1604
1605 if (irp->irp_fcp_xchg_count) {
1606 /* Trigger cleanup if necessary */
1607 if ((irp->irp_flags & IRP_FCP_CLEANUP) == 0) {
1608 stmf_trace(iport->iport_alias, "handling PRLI from"
1609 " %x. Triggering cleanup", cmd->cmd_rportid);
1610 if (fct_trigger_rport_cleanup(irp, FCT_CMD_FCP_XCHG)) {
1611 atomic_or_32(&irp->irp_flags, IRP_FCP_CLEANUP);
1612 } else {
1613 /* XXX: handle this */
1614 /* EMPTY */
1615 }
1616 }
1617
1618 end_time = icmd->icmd_start_time +
1619 drv_usectohz(USEC_ELS_TIMEOUT);
1620 if (ddi_get_lbolt() > end_time) {
1654 ses->ss_lport = port->port_lport;
1655 if (stmf_register_scsi_session(port->port_lport, ses) !=
1656 STMF_SUCCESS) {
1657 stmf_free(ses);
1658 ses = NULL;
1659 } else {
1660 irp->irp_session = ses;
1661 irp->irp_session->ss_rport_alias = irp->irp_snn;
1662
1663 /*
1664 * The reason IRP_SCSI_SESSION_STARTED is different
1665 * from IRP_PRLI_DONE is that we clear IRP_PRLI_DONE
1666 * inside interrupt context. We dont want to deregister
1667 * the session from an interrupt.
1668 */
1669 atomic_or_32(&irp->irp_flags, IRP_SCSI_SESSION_STARTED);
1670 }
1671 }
1672
1673 fct_dequeue_els(irp);
1674 atomic_dec_16(&irp->irp_sa_elses_count);
1675 if (ses == NULL) {
1676 /* fail PRLI */
1677 ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0);
1678 } else {
1679 /* accept PRLI */
1680 els->els_resp_payload = (uint8_t *)kmem_zalloc(20, KM_SLEEP);
1681 bcopy(fct_prli_temp, els->els_resp_payload, 20);
1682 els->els_resp_payload[0] = 2;
1683 els->els_resp_payload[6] = 0x21;
1684
1685 /* XXX the two bytes below need to set as per capabilities */
1686 els->els_resp_payload[18] = 0;
1687 els->els_resp_payload[19] = 0x12;
1688
1689 els->els_resp_size = els->els_resp_alloc_size = 20;
1690 if ((ret = port->port_send_cmd_response(cmd, 0)) !=
1691 FCT_SUCCESS) {
1692 stmf_deregister_scsi_session(port->port_lport, ses);
1693 stmf_free(irp->irp_session);
1694 irp->irp_session = NULL;
1758
1759 if ((ddi_get_lbolt() & 0x7f) == 0) {
1760 stmf_trace(iport->iport_alias, "handling"
1761 " LOGO rp_id %x, waiting for cmds to"
1762 " drain", cmd->cmd_rportid);
1763 }
1764 return (DISC_ACTION_DELAY_RESCAN);
1765 }
1766 atomic_and_32(&irp->irp_flags, ~IRP_SESSION_CLEANUP);
1767
1768 /* Session can only be terminated after all the I/Os have drained */
1769 if (irp->irp_flags & IRP_SCSI_SESSION_STARTED) {
1770 stmf_deregister_scsi_session(iport->iport_port->port_lport,
1771 irp->irp_session);
1772 stmf_free(irp->irp_session);
1773 irp->irp_session = NULL;
1774 atomic_and_32(&irp->irp_flags, ~IRP_SCSI_SESSION_STARTED);
1775 }
1776
1777 fct_dequeue_els(irp);
1778 atomic_dec_16(&irp->irp_sa_elses_count);
1779
1780 /* don't send response if this is an implicit logout cmd */
1781 if (!(icmd->icmd_flags & ICMD_IMPLICIT)) {
1782 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
1783 ret = fct_send_accrjt(cmd, ELS_OP_ACC, 0, 0);
1784 } else {
1785 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
1786 ret = port->port_send_cmd(cmd);
1787 if (ret != FCT_SUCCESS) {
1788 atomic_and_32(&icmd->icmd_flags,
1789 ~ICMD_KNOWN_TO_FCA);
1790 }
1791 }
1792
1793 if (ret != FCT_SUCCESS) {
1794 fct_queue_cmd_for_termination(cmd, ret);
1795 }
1796
1797 DTRACE_FC_4(rport__logout__end,
1798 fct_cmd_t, cmd,
1869
1870 if ((ddi_get_lbolt() & 0x7f) == 0) {
1871 stmf_trace(iport->iport_alias, "handling"
1872 " PRLO from %x, waiting for cmds to"
1873 " drain", cmd->cmd_rportid);
1874 }
1875 return (DISC_ACTION_DELAY_RESCAN);
1876 }
1877 atomic_and_32(&irp->irp_flags, ~IRP_FCP_CLEANUP);
1878
1879 /* Session can only be terminated after all the I/Os have drained */
1880 if (irp->irp_flags & IRP_SCSI_SESSION_STARTED) {
1881 stmf_deregister_scsi_session(iport->iport_port->port_lport,
1882 irp->irp_session);
1883 stmf_free(irp->irp_session);
1884 irp->irp_session = NULL;
1885 atomic_and_32(&irp->irp_flags, ~IRP_SCSI_SESSION_STARTED);
1886 }
1887
1888 fct_dequeue_els(irp);
1889 atomic_dec_16(&irp->irp_sa_elses_count);
1890 ret = fct_send_accrjt(cmd, ELS_OP_ACC, 0, 0);
1891 if (ret != FCT_SUCCESS)
1892 fct_queue_cmd_for_termination(cmd, ret);
1893
1894 return (DISC_ACTION_RESCAN);
1895 }
1896
1897 disc_action_t
1898 fct_process_rcvd_adisc(fct_i_cmd_t *icmd)
1899 {
1900 fct_cmd_t *cmd = icmd->icmd_cmd;
1901 fct_remote_port_t *rp = cmd->cmd_rp;
1902 fct_local_port_t *port = cmd->cmd_port;
1903 fct_i_local_port_t *iport = (fct_i_local_port_t *)
1904 port->port_fct_private;
1905 fct_els_t *els = (fct_els_t *)
1906 cmd->cmd_specific;
1907 fct_i_remote_port_t *irp = (fct_i_remote_port_t *)
1908 rp->rp_fct_private;
1909 uint8_t *p;
1910 uint32_t *q;
1911 fct_status_t ret;
1912
1913 fct_dequeue_els(irp);
1914 atomic_dec_16(&irp->irp_nsa_elses_count);
1915
1916 /* Validate the adisc request */
1917 p = els->els_req_payload;
1918 q = (uint32_t *)p;
1919 if ((els->els_req_size != 28) || (bcmp(rp->rp_pwwn, p + 8, 8)) ||
1920 (bcmp(rp->rp_nwwn, p + 16, 8))) {
1921 ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0);
1922 } else {
1923 rp->rp_hard_address = BE_32(q[1]);
1924 els->els_resp_size = els->els_resp_alloc_size = 28;
1925 els->els_resp_payload = (uint8_t *)kmem_zalloc(28, KM_SLEEP);
1926 bcopy(p, els->els_resp_payload, 28);
1927 p = els->els_resp_payload;
1928 q = (uint32_t *)p;
1929 p[0] = ELS_OP_ACC;
1930 q[1] = BE_32(port->port_hard_address);
1931 bcopy(port->port_pwwn, p + 8, 8);
1932 bcopy(port->port_nwwn, p + 16, 8);
1933 q[6] = BE_32(iport->iport_link_info.portid);
1934 ret = port->port_send_cmd_response(cmd, 0);
1935 }
1936 if (ret != FCT_SUCCESS) {
1937 fct_queue_cmd_for_termination(cmd, ret);
1938 }
1939
1940 return (DISC_ACTION_RESCAN);
1941 }
1942
1943 disc_action_t
1944 fct_process_unknown_els(fct_i_cmd_t *icmd)
1945 {
1946 fct_i_local_port_t *iport = ICMD_TO_IPORT(icmd);
1947 fct_status_t ret = FCT_FAILURE;
1948 uint8_t op = 0;
1949
1950 ASSERT(icmd->icmd_cmd->cmd_type == FCT_CMD_RCVD_ELS);
1951 fct_dequeue_els(ICMD_TO_IRP(icmd));
1952 atomic_dec_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count);
1953 op = ICMD_TO_ELS(icmd)->els_req_payload[0];
1954 stmf_trace(iport->iport_alias, "Rejecting unknown unsol els %x (%s)",
1955 op, FCT_ELS_NAME(op));
1956 ret = fct_send_accrjt(icmd->icmd_cmd, ELS_OP_LSRJT, 1, 0);
1957 if (ret != FCT_SUCCESS) {
1958 fct_queue_cmd_for_termination(icmd->icmd_cmd, ret);
1959 }
1960
1961 return (DISC_ACTION_RESCAN);
1962 }
1963
1964 disc_action_t
1965 fct_process_rscn(fct_i_cmd_t *icmd)
1966 {
1967 fct_i_local_port_t *iport = ICMD_TO_IPORT(icmd);
1968 fct_status_t ret = FCT_FAILURE;
1969 uint8_t op = 0;
1970 uint8_t *rscn_req_payload;
1971 uint32_t rscn_req_size;
1972
1973 fct_dequeue_els(ICMD_TO_IRP(icmd));
1974 atomic_dec_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count);
1975 if (icmd->icmd_cmd->cmd_type == FCT_CMD_RCVD_ELS) {
1976 op = ICMD_TO_ELS(icmd)->els_req_payload[0];
1977 stmf_trace(iport->iport_alias, "Accepting RSCN %x (%s)",
1978 op, FCT_ELS_NAME(op));
1979 rscn_req_size = ICMD_TO_ELS(icmd)->els_req_size;
1980 rscn_req_payload = kmem_alloc(rscn_req_size, KM_SLEEP);
1981 bcopy(ICMD_TO_ELS(icmd)->els_req_payload, rscn_req_payload,
1982 rscn_req_size);
1983 ret = fct_send_accrjt(icmd->icmd_cmd, ELS_OP_ACC, 1, 0);
1984 if (ret != FCT_SUCCESS) {
1985 fct_queue_cmd_for_termination(icmd->icmd_cmd, ret);
1986 } else {
1987 if (fct_rscn_options & RSCN_OPTION_VERIFY) {
1988 fct_rscn_verify(iport, rscn_req_payload,
1989 rscn_req_size);
1990 }
1991 }
1992
1993 kmem_free(rscn_req_payload, rscn_req_size);
1994 } else {
2051 stmf_trace(iport->iport_alias, "Killing ELS %x cond 1",
2052 els->els_req_payload[0]);
2053 } else if (irp->irp_sa_elses_count &&
2054 (((*ppcmd)->icmd_flags & ICMD_SESSION_AFFECTING) == 0)) {
2055 stmf_trace(iport->iport_alias, "Killing ELS %x cond 2",
2056 els->els_req_payload[0]);
2057 dq = 1;
2058 } else if (((irp->irp_flags & IRP_PLOGI_DONE) == 0) &&
2059 (els->els_req_payload[0] != ELS_OP_PLOGI) &&
2060 (els->els_req_payload[0] != ELS_OP_LOGO) &&
2061 (special_prli_cond == 0)) {
2062 stmf_trace(iport->iport_alias, "Killing ELS %x cond 3",
2063 els->els_req_payload[0]);
2064 dq = 1;
2065 }
2066
2067 if (dq) {
2068 fct_i_cmd_t *c = (*ppcmd)->icmd_next;
2069
2070 if ((*ppcmd)->icmd_flags & ICMD_SESSION_AFFECTING)
2071 atomic_dec_16(&irp->irp_sa_elses_count);
2072 else
2073 atomic_dec_16(&irp->irp_nsa_elses_count);
2074 (*ppcmd)->icmd_next = cmd_to_abort;
2075 cmd_to_abort = *ppcmd;
2076 *ppcmd = c;
2077 } else {
2078 ppcmd = &((*ppcmd)->icmd_next);
2079 }
2080 }
2081 rw_exit(&irp->irp_lock);
2082
2083 while (cmd_to_abort) {
2084 fct_i_cmd_t *c = cmd_to_abort->icmd_next;
2085
2086 atomic_and_32(&cmd_to_abort->icmd_flags, ~ICMD_IN_IRP_QUEUE);
2087 fct_queue_cmd_for_termination(cmd_to_abort->icmd_cmd,
2088 FCT_ABORTED);
2089 cmd_to_abort = c;
2090 }
2091
2092 /*
2093 * pick from the top of the queue
2108 if ((icmd->icmd_flags & ICMD_ELS_PROCESSING_STARTED) == 0) {
2109 stmf_trace(iport->iport_alias, "Processing %ssol ELS %x (%s) "
2110 "rp_id=%x", (cmd->cmd_type == FCT_CMD_RCVD_ELS) ? "un" : "",
2111 op, FCT_ELS_NAME(op), cmd->cmd_rportid);
2112 atomic_or_32(&icmd->icmd_flags, ICMD_ELS_PROCESSING_STARTED);
2113 }
2114
2115 if (op == ELS_OP_PLOGI) {
2116 ret |= fct_process_plogi(icmd);
2117 } else if (op == ELS_OP_PRLI) {
2118 ret |= fct_process_prli(icmd);
2119 } else if (op == ELS_OP_LOGO) {
2120 ret |= fct_process_logo(icmd);
2121 } else if ((op == ELS_OP_PRLO) || (op == ELS_OP_TPRLO)) {
2122 ret |= fct_process_prlo(icmd);
2123 } else if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
2124 fct_status_t s;
2125 fct_local_port_t *port = iport->iport_port;
2126
2127 fct_dequeue_els(irp);
2128 atomic_dec_16(&irp->irp_nsa_elses_count);
2129 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
2130 if ((s = port->port_send_cmd(cmd)) != FCT_SUCCESS) {
2131 atomic_and_32(&icmd->icmd_flags, ~ICMD_KNOWN_TO_FCA);
2132 fct_queue_cmd_for_termination(cmd, s);
2133 stmf_trace(iport->iport_alias, "Solicited els "
2134 "transport failed, ret = %llx", s);
2135 }
2136 } else if (op == ELS_OP_ADISC) {
2137 ret |= fct_process_rcvd_adisc(icmd);
2138 } else if (op == ELS_OP_RSCN) {
2139 (void) fct_process_rscn(icmd);
2140 } else {
2141 (void) fct_process_unknown_els(icmd);
2142 }
2143
2144 /*
2145 * This if condition will be false if a sa ELS trigged a cleanup
2146 * and set the ret = DISC_ACTION_DELAY_RESCAN. In that case we should
2147 * keep it that way.
2148 */
2162
2163 void
2164 fct_handle_sol_els_completion(fct_i_local_port_t *iport, fct_i_cmd_t *icmd)
2165 {
2166 fct_i_remote_port_t *irp = NULL;
2167 fct_els_t *els = ICMD_TO_ELS(icmd);
2168 uint8_t op = els->els_req_payload[0];
2169
2170 if (icmd->icmd_cmd->cmd_rp) {
2171 irp = ICMD_TO_IRP(icmd);
2172 }
2173 if (icmd->icmd_cmd->cmd_rp &&
2174 (icmd->icmd_cmd->cmd_comp_status == FCT_SUCCESS) &&
2175 (els->els_req_payload[0] == ELS_OP_PLOGI)) {
2176 bcopy(els->els_resp_payload + 20, irp->irp_rp->rp_pwwn, 8);
2177 bcopy(els->els_resp_payload + 28, irp->irp_rp->rp_nwwn, 8);
2178
2179 stmf_wwn_to_devid_desc((scsi_devid_desc_t *)irp->irp_id,
2180 irp->irp_rp->rp_pwwn, PROTOCOL_FIBRE_CHANNEL);
2181 atomic_or_32(&irp->irp_flags, IRP_PLOGI_DONE);
2182 atomic_inc_32(&iport->iport_nrps_login);
2183 if (irp->irp_deregister_timer) {
2184 irp->irp_deregister_timer = 0;
2185 irp->irp_dereg_count = 0;
2186 }
2187 }
2188
2189 if (irp && (els->els_req_payload[0] == ELS_OP_PLOGI)) {
2190 atomic_and_32(&irp->irp_flags, ~IRP_SOL_PLOGI_IN_PROGRESS);
2191 }
2192 atomic_or_32(&icmd->icmd_flags, ICMD_CMD_COMPLETE);
2193 stmf_trace(iport->iport_alias, "Sol ELS %x (%s) completed with "
2194 "status %llx, did/%x", op, FCT_ELS_NAME(op),
2195 icmd->icmd_cmd->cmd_comp_status, icmd->icmd_cmd->cmd_rportid);
2196 }
2197
2198 static disc_action_t
2199 fct_check_cmdlist(fct_i_local_port_t *iport)
2200 {
2201 int num_to_release, ndx;
2202 fct_i_cmd_t *icmd;
2208 max_active = iport->iport_max_active_ncmds;
2209
2210 if (total <= max_active)
2211 return (DISC_ACTION_NO_WORK);
2212 /*
2213 * Everytime, we release half of the difference
2214 */
2215 num_to_release = (total + 1 - max_active) / 2;
2216
2217 mutex_exit(&iport->iport_worker_lock);
2218 for (ndx = 0; ndx < num_to_release; ndx++) {
2219 mutex_enter(&iport->iport_cached_cmd_lock);
2220 icmd = iport->iport_cached_cmdlist;
2221 if (icmd == NULL) {
2222 mutex_exit(&iport->iport_cached_cmd_lock);
2223 break;
2224 }
2225 iport->iport_cached_cmdlist = icmd->icmd_next;
2226 iport->iport_cached_ncmds--;
2227 mutex_exit(&iport->iport_cached_cmd_lock);
2228 atomic_dec_32(&iport->iport_total_alloced_ncmds);
2229 fct_free(icmd->icmd_cmd);
2230 }
2231 mutex_enter(&iport->iport_worker_lock);
2232 return (DISC_ACTION_RESCAN);
2233 }
2234
2235 /*
2236 * The efficiency of handling solicited commands is very low here. But
2237 * fortunately, we seldom send solicited commands. So it will not hurt
2238 * the system performance much.
2239 */
2240 static disc_action_t
2241 fct_check_solcmd_queue(fct_i_local_port_t *iport)
2242 {
2243 fct_i_cmd_t *icmd = NULL;
2244 fct_i_cmd_t *prev_icmd = NULL;
2245 fct_i_cmd_t *next_icmd = NULL;
2246
2247 ASSERT(mutex_owned(&iport->iport_worker_lock));
2248 for (icmd = iport->iport_solcmd_queue; icmd; icmd = next_icmd) {
2357
2358 stmf_trace(iport->iport_alias, "fct_transport_solcmd: "
2359 "ran out of xchg resources - cmd-%p", cmd);
2360 fct_queue_cmd_for_termination(cmd, FCT_NO_XCHG_RESOURCE);
2361 return;
2362 }
2363
2364 if (fct_netbuf_to_value(ICMD_TO_CT(icmd)->ct_req_payload + 8, 2) ==
2365 NS_GID_PN) {
2366 fct_i_remote_port_t *query_irp = NULL;
2367
2368 query_irp = fct_lookup_irp_by_portwwn(iport,
2369 ICMD_TO_CT(icmd)->ct_req_payload + 16);
2370 if (query_irp) {
2371 atomic_and_32(&query_irp->irp_flags, ~IRP_RSCN_QUEUED);
2372 }
2373 }
2374 rw_exit(&irp->irp_lock);
2375 rw_exit(&iport->iport_lock);
2376
2377 atomic_inc_16(&irp->irp_nonfcp_xchg_count);
2378 atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
2379 icmd->icmd_start_time = ddi_get_lbolt();
2380 ret = iport->iport_port->port_send_cmd(cmd);
2381 if (ret != FCT_SUCCESS) {
2382 atomic_and_32(&icmd->icmd_flags, ~ICMD_KNOWN_TO_FCA);
2383 fct_queue_cmd_for_termination(cmd, ret);
2384 }
2385 }
2386
2387 void
2388 fct_logo_cb(fct_i_cmd_t *icmd)
2389 {
2390 ASSERT(!(icmd->icmd_flags & ICMD_IMPLICIT));
2391 if (!FCT_IS_ELS_ACC(icmd)) {
2392 stmf_trace(ICMD_TO_IPORT(icmd)->iport_alias, "fct_logo_cb: "
2393 "solicited LOGO is not accepted - icmd/%p", icmd);
2394 }
2395 }
2396
2397 void
2746 if (bcmp(irp->irp_rp->rp_pwwn, portwwn, FC_WWN_LEN)) {
2747 continue;
2748 } else {
2749 return (irp);
2750 }
2751 }
2752 }
2753
2754 return (NULL);
2755 }
2756
2757 #ifdef lint
2758 #define FCT_VERIFY_RSCN() _NOTE(EMPTY)
2759 #else
2760 #define FCT_VERIFY_RSCN() \
2761 do { \
2762 ct_cmd = fct_create_solct(port, irp->irp_rp, NS_GID_PN, \
2763 fct_gid_cb); \
2764 if (ct_cmd) { \
2765 uint32_t cnt; \
2766 cnt = atomic_inc_32_nv(&irp->irp_rscn_counter); \
2767 CMD_TO_ICMD(ct_cmd)->icmd_cb_private = \
2768 INT2PTR(cnt, void *); \
2769 irp->irp_flags |= IRP_RSCN_QUEUED; \
2770 fct_post_to_solcmd_queue(port, ct_cmd); \
2771 } \
2772 } while (0)
2773 #endif
2774
2775 /* ARGSUSED */
2776 static void
2777 fct_rscn_verify(fct_i_local_port_t *iport, uint8_t *rscn_req_payload,
2778 uint32_t rscn_req_size)
2779 {
2780 int idx = 0;
2781 uint8_t page_format = 0;
2782 uint32_t page_portid = 0;
2783 uint8_t *page_buf = NULL;
2784 uint8_t *last_page_buf = NULL;
2785 #ifndef lint
2786 fct_cmd_t *ct_cmd = NULL;
|