Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27
28 28 #include <emlxs.h>
29 29
30 30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 32
33 33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 36 uint32_t ha_copy);
37 37 #ifdef SFCT_SUPPORT
38 38 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
39 39 #endif /* SFCT_SUPPORT */
40 40
41 41 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
42 42
43 43 static uint32_t emlxs_disable_traffic_cop = 1;
44 44
45 45 static int emlxs_sli3_map_hdw(emlxs_hba_t *hba);
46 46
47 47 static void emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
48 48
49 49 static int32_t emlxs_sli3_online(emlxs_hba_t *hba);
50 50
51 51 static void emlxs_sli3_offline(emlxs_hba_t *hba);
52 52
53 53 static uint32_t emlxs_sli3_hba_reset(emlxs_hba_t *hba,
54 54 uint32_t restart, uint32_t skip_post,
55 55 uint32_t quiesce);
56 56
57 57 static void emlxs_sli3_hba_kill(emlxs_hba_t *hba);
58 58 static void emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
59 59 static uint32_t emlxs_sli3_hba_init(emlxs_hba_t *hba);
60 60
61 61 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port,
62 62 emlxs_buf_t *sbp);
63 63 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port,
64 64 emlxs_buf_t *sbp);
65 65 static uint32_t emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
66 66 emlxs_buf_t *sbp);
67 67 static uint32_t emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
68 68 emlxs_buf_t *sbp);
69 69
70 70
71 71 static void emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
72 72 CHANNEL *rp, IOCBQ *iocb_cmd);
73 73
74 74
75 75 static uint32_t emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
76 76 MAILBOXQ *mbq, int32_t flg,
77 77 uint32_t tmo);
78 78
79 79
80 80 #ifdef SFCT_SUPPORT
81 81 static uint32_t emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
82 82 emlxs_buf_t *cmd_sbp, int channel);
83 83
84 84 #endif /* SFCT_SUPPORT */
85 85
86 86 static uint32_t emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
87 87 emlxs_buf_t *sbp, int ring);
88 88
89 89 static uint32_t emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
90 90 emlxs_buf_t *sbp);
91 91
92 92 static uint32_t emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
93 93 emlxs_buf_t *sbp);
94 94
95 95
96 96 static uint32_t emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
97 97 emlxs_buf_t *sbp);
98 98
99 99
100 100 static void emlxs_sli3_poll_intr(emlxs_hba_t *hba,
101 101 uint32_t att_bit);
102 102
103 103 static int32_t emlxs_sli3_intx_intr(char *arg);
104 104 #ifdef MSI_SUPPORT
105 105 static uint32_t emlxs_sli3_msi_intr(char *arg1, char *arg2);
106 106 #endif /* MSI_SUPPORT */
107 107
108 108 static void emlxs_sli3_enable_intr(emlxs_hba_t *hba);
109 109
110 110 static void emlxs_sli3_disable_intr(emlxs_hba_t *hba,
111 111 uint32_t att);
112 112
113 113
114 114 static void emlxs_handle_ff_error(emlxs_hba_t *hba);
115 115
116 116 static uint32_t emlxs_handle_mb_event(emlxs_hba_t *hba);
117 117
118 118 static void emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
119 119
120 120 static uint32_t emlxs_mb_config_port(emlxs_hba_t *hba,
121 121 MAILBOXQ *mbq, uint32_t sli_mode,
122 122 uint32_t hbainit);
123 123 static void emlxs_enable_latt(emlxs_hba_t *hba);
124 124
125 125 static uint32_t emlxs_check_attention(emlxs_hba_t *hba);
126 126
127 127 static uint32_t emlxs_get_attention(emlxs_hba_t *hba,
128 128 int32_t msgid);
129 129 static void emlxs_proc_attention(emlxs_hba_t *hba,
130 130 uint32_t ha_copy);
131 131 /* static int emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
132 132 /* CHANNEL *cp, IOCBQ *iocbq); */
133 133 /* static void emlxs_update_HBQ_index(emlxs_hba_t *hba, */
134 134 /* uint32_t hbq_id); */
135 135 /* static void emlxs_hbq_free_all(emlxs_hba_t *hba, */
136 136 /* uint32_t hbq_id); */
137 137 static uint32_t emlxs_hbq_setup(emlxs_hba_t *hba,
138 138 uint32_t hbq_id);
139 139 extern void emlxs_sli3_timer(emlxs_hba_t *hba);
140 140
141 141 extern void emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
142 142
143 143
144 144 /* Define SLI3 API functions */
145 145 emlxs_sli_api_t emlxs_sli3_api = {
146 146 emlxs_sli3_map_hdw,
147 147 emlxs_sli3_unmap_hdw,
148 148 emlxs_sli3_online,
149 149 emlxs_sli3_offline,
150 150 emlxs_sli3_hba_reset,
151 151 emlxs_sli3_hba_kill,
152 152 emlxs_sli3_issue_iocb_cmd,
153 153 emlxs_sli3_issue_mbox_cmd,
154 154 #ifdef SFCT_SUPPORT
155 155 emlxs_sli3_prep_fct_iocb,
156 156 #else
157 157 NULL,
158 158 #endif /* SFCT_SUPPORT */
159 159 emlxs_sli3_prep_fcp_iocb,
160 160 emlxs_sli3_prep_ip_iocb,
161 161 emlxs_sli3_prep_els_iocb,
162 162 emlxs_sli3_prep_ct_iocb,
163 163 emlxs_sli3_poll_intr,
164 164 emlxs_sli3_intx_intr,
165 165 emlxs_sli3_msi_intr,
166 166 emlxs_sli3_disable_intr,
167 167 emlxs_sli3_timer,
168 168 emlxs_sli3_poll_erratt
169 169 };
170 170
171 171
172 172 /*
173 173 * emlxs_sli3_online()
174 174 *
175 175 * This routine will start initialization of the SLI2/3 HBA.
176 176 */
177 177 static int32_t
178 178 emlxs_sli3_online(emlxs_hba_t *hba)
179 179 {
180 180 emlxs_port_t *port = &PPORT;
181 181 emlxs_config_t *cfg;
182 182 emlxs_vpd_t *vpd;
183 183 MAILBOX *mb = NULL;
184 184 MAILBOXQ *mbq = NULL;
185 185 RING *rp;
186 186 CHANNEL *cp;
187 187 MATCHMAP *mp = NULL;
188 188 MATCHMAP *mp1 = NULL;
189 189 uint8_t *inptr;
190 190 uint8_t *outptr;
191 191 uint32_t status;
192 192 uint16_t i;
193 193 uint32_t j;
194 194 uint32_t read_rev_reset;
195 195 uint32_t key = 0;
196 196 uint32_t fw_check;
197 197 uint32_t kern_update = 0;
198 198 uint32_t rval = 0;
199 199 uint32_t offset;
200 200 uint8_t vpd_data[DMP_VPD_SIZE];
201 201 uint32_t MaxRbusSize;
202 202 uint32_t MaxIbusSize;
203 203 uint32_t sli_mode;
204 204 uint32_t sli_mode_mask;
205 205
206 206 cfg = &CFG;
207 207 vpd = &VPD;
208 208 MaxRbusSize = 0;
209 209 MaxIbusSize = 0;
210 210 read_rev_reset = 0;
211 211 hba->chan_count = MAX_RINGS;
212 212
213 213 if (hba->bus_type == SBUS_FC) {
214 214 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
215 215 }
216 216
217 217 /* Set the fw_check flag */
218 218 fw_check = cfg[CFG_FW_CHECK].current;
219 219
220 220 if ((fw_check & 0x04) ||
221 221 (hba->fw_flag & FW_UPDATE_KERNEL)) {
222 222 kern_update = 1;
223 223 }
224 224
225 225 hba->mbox_queue_flag = 0;
226 226 hba->sli.sli3.hc_copy = 0;
227 227 hba->fc_edtov = FF_DEF_EDTOV;
228 228 hba->fc_ratov = FF_DEF_RATOV;
229 229 hba->fc_altov = FF_DEF_ALTOV;
230 230 hba->fc_arbtov = FF_DEF_ARBTOV;
231 231
232 232 /*
233 233 * Get a buffer which will be used repeatedly for mailbox commands
234 234 */
235 235 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
236 236
237 237 mb = (MAILBOX *)mbq;
238 238
239 239 reset:
240 240 /* Initialize sli mode based on configuration parameter */
241 241 switch (cfg[CFG_SLI_MODE].current) {
242 242 case 2: /* SLI2 mode */
243 243 sli_mode = EMLXS_HBA_SLI2_MODE;
244 244 sli_mode_mask = EMLXS_SLI2_MASK;
245 245 break;
246 246
247 247 case 3: /* SLI3 mode */
248 248 sli_mode = EMLXS_HBA_SLI3_MODE;
249 249 sli_mode_mask = EMLXS_SLI3_MASK;
250 250 break;
251 251
252 252 case 0: /* Best available */
253 253 case 1: /* Best available */
254 254 default:
255 255 if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
256 256 sli_mode = EMLXS_HBA_SLI3_MODE;
257 257 sli_mode_mask = EMLXS_SLI3_MASK;
258 258 } else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
259 259 sli_mode = EMLXS_HBA_SLI2_MODE;
260 260 sli_mode_mask = EMLXS_SLI2_MASK;
261 261 }
262 262 }
263 263 /* SBUS adapters only available in SLI2 */
264 264 if (hba->bus_type == SBUS_FC) {
265 265 sli_mode = EMLXS_HBA_SLI2_MODE;
266 266 sli_mode_mask = EMLXS_SLI2_MASK;
267 267 }
268 268
269 269 /* Reset & Initialize the adapter */
270 270 if (emlxs_sli3_hba_init(hba)) {
271 271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
272 272 "Unable to init hba.");
273 273
274 274 rval = EIO;
275 275 goto failed;
276 276 }
277 277
278 278 #ifdef FMA_SUPPORT
279 279 /* Access handle validation */
280 280 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
281 281 != DDI_FM_OK) ||
282 282 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
283 283 != DDI_FM_OK) ||
284 284 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
285 285 != DDI_FM_OK)) {
286 286 EMLXS_MSGF(EMLXS_CONTEXT,
287 287 &emlxs_invalid_access_handle_msg, NULL);
288 288
289 289 rval = EIO;
290 290 goto failed;
291 291 }
292 292 #endif /* FMA_SUPPORT */
293 293
294 294 /* Check for the LP9802 (This is a special case) */
295 295 /* We need to check for dual channel adapter */
296 296 if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
297 297 /* Try to determine if this is a DC adapter */
298 298 if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
299 299 if (MaxRbusSize == REDUCED_SRAM_CFG) {
300 300 /* LP9802DC */
301 301 for (i = 1; i < emlxs_pci_model_count; i++) {
302 302 if (emlxs_pci_model[i].id == LP9802DC) {
303 303 bcopy(&emlxs_pci_model[i],
304 304 &hba->model_info,
305 305 sizeof (emlxs_model_t));
306 306 break;
307 307 }
308 308 }
309 309 } else if (hba->model_info.id != LP9802) {
310 310 /* LP9802 */
311 311 for (i = 1; i < emlxs_pci_model_count; i++) {
312 312 if (emlxs_pci_model[i].id == LP9802) {
313 313 bcopy(&emlxs_pci_model[i],
314 314 &hba->model_info,
315 315 sizeof (emlxs_model_t));
316 316 break;
317 317 }
318 318 }
319 319 }
320 320 }
321 321 }
322 322
323 323 /*
324 324 * Setup and issue mailbox READ REV command
325 325 */
326 326 vpd->opFwRev = 0;
327 327 vpd->postKernRev = 0;
328 328 vpd->sli1FwRev = 0;
329 329 vpd->sli2FwRev = 0;
330 330 vpd->sli3FwRev = 0;
331 331 vpd->sli4FwRev = 0;
332 332
333 333 vpd->postKernName[0] = 0;
334 334 vpd->opFwName[0] = 0;
335 335 vpd->sli1FwName[0] = 0;
336 336 vpd->sli2FwName[0] = 0;
337 337 vpd->sli3FwName[0] = 0;
338 338 vpd->sli4FwName[0] = 0;
339 339
340 340 vpd->opFwLabel[0] = 0;
341 341 vpd->sli1FwLabel[0] = 0;
342 342 vpd->sli2FwLabel[0] = 0;
343 343 vpd->sli3FwLabel[0] = 0;
344 344 vpd->sli4FwLabel[0] = 0;
345 345
346 346 /* Sanity check */
347 347 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
348 348 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
349 349 "Adapter / SLI mode mismatch mask:x%x",
350 350 hba->model_info.sli_mask);
351 351
352 352 rval = EIO;
353 353 goto failed;
354 354 }
355 355
356 356 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
357 357 emlxs_mb_read_rev(hba, mbq, 0);
358 358 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
359 359 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
360 360 "Unable to read rev. Mailbox cmd=%x status=%x",
361 361 mb->mbxCommand, mb->mbxStatus);
362 362
363 363 rval = EIO;
364 364 goto failed;
365 365 }
366 366
367 367 if (mb->un.varRdRev.rr == 0) {
368 368 /* Old firmware */
369 369 if (read_rev_reset == 0) {
370 370 read_rev_reset = 1;
371 371
372 372 goto reset;
373 373 } else {
374 374 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
375 375 "Outdated firmware detected.");
376 376 }
377 377
378 378 vpd->rBit = 0;
379 379 } else {
380 380 if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
381 381 if (read_rev_reset == 0) {
382 382 read_rev_reset = 1;
383 383
384 384 goto reset;
385 385 } else {
386 386 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
387 387 "Non-operational firmware detected. "
388 388 "type=%x",
389 389 mb->un.varRdRev.un.b.ProgType);
390 390 }
391 391 }
392 392
393 393 vpd->rBit = 1;
394 394 vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
395 395 bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
396 396 16);
397 397 vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
398 398 bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
399 399 16);
400 400
401 401 /*
402 402 * Lets try to read the SLI3 version
403 403 * Setup and issue mailbox READ REV(v3) command
404 404 */
405 405 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
406 406
407 407 /* Reuse mbq from previous mbox */
408 408 bzero(mbq, sizeof (MAILBOXQ));
409 409
410 410 emlxs_mb_read_rev(hba, mbq, 1);
411 411
412 412 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
413 413 MBX_SUCCESS) {
414 414 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
415 415 "Unable to read rev (v3). Mailbox cmd=%x status=%x",
416 416 mb->mbxCommand, mb->mbxStatus);
417 417
418 418 rval = EIO;
419 419 goto failed;
420 420 }
421 421
422 422 if (mb->un.varRdRev.rf3) {
423 423 /*
424 424 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
425 425 * Not needed
426 426 */
427 427 vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
428 428 bcopy((char *)mb->un.varRdRev.sliFwName2,
429 429 vpd->sli3FwLabel, 16);
430 430 }
431 431 }
432 432
433 433 if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
434 434 if (vpd->sli2FwRev) {
435 435 sli_mode = EMLXS_HBA_SLI2_MODE;
436 436 sli_mode_mask = EMLXS_SLI2_MASK;
437 437 } else {
438 438 sli_mode = 0;
439 439 sli_mode_mask = 0;
440 440 }
441 441 }
442 442
443 443 else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
444 444 if (vpd->sli3FwRev) {
445 445 sli_mode = EMLXS_HBA_SLI3_MODE;
446 446 sli_mode_mask = EMLXS_SLI3_MASK;
447 447 } else {
448 448 sli_mode = 0;
449 449 sli_mode_mask = 0;
450 450 }
451 451 }
452 452
453 453 if (!(hba->model_info.sli_mask & sli_mode_mask)) {
454 454 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
455 455 "Firmware not available. sli-mode=%d",
456 456 cfg[CFG_SLI_MODE].current);
457 457
458 458 rval = EIO;
459 459 goto failed;
460 460 }
461 461
462 462 /* Save information as VPD data */
463 463 vpd->postKernRev = mb->un.varRdRev.postKernRev;
464 464 vpd->opFwRev = mb->un.varRdRev.opFwRev;
465 465 bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
466 466 vpd->biuRev = mb->un.varRdRev.biuRev;
467 467 vpd->smRev = mb->un.varRdRev.smRev;
468 468 vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
469 469 vpd->endecRev = mb->un.varRdRev.endecRev;
470 470 vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
471 471 vpd->fcphLow = mb->un.varRdRev.fcphLow;
472 472 vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
473 473 vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
474 474
475 475 /* Decode FW names */
476 476 emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
477 477 emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
478 478 emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
479 479 emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
480 480 emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
481 481 emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
482 482
483 483 /* Decode FW labels */
484 484 emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1);
485 485 emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1);
486 486 emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1);
487 487 emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1);
488 488 emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1);
489 489
490 490 /* Reuse mbq from previous mbox */
491 491 bzero(mbq, sizeof (MAILBOXQ));
492 492
493 493 key = emlxs_get_key(hba, mbq);
494 494
495 495 /* Get adapter VPD information */
496 496 offset = 0;
497 497 bzero(vpd_data, sizeof (vpd_data));
498 498 vpd->port_index = (uint32_t)-1;
499 499
500 500 while (offset < DMP_VPD_SIZE) {
501 501 /* Reuse mbq from previous mbox */
502 502 bzero(mbq, sizeof (MAILBOXQ));
503 503
504 504 emlxs_mb_dump_vpd(hba, mbq, offset);
505 505 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
506 506 MBX_SUCCESS) {
507 507 /*
508 508 * Let it go through even if failed.
509 509 * Not all adapter's have VPD info and thus will
510 510 * fail here. This is not a problem
511 511 */
512 512
513 513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
514 514 "No VPD found. offset=%x status=%x", offset,
515 515 mb->mbxStatus);
516 516 break;
517 517 } else {
518 518 if (mb->un.varDmp.ra == 1) {
519 519 uint32_t *lp1, *lp2;
520 520 uint32_t bsize;
521 521 uint32_t wsize;
522 522
523 523 /*
524 524 * mb->un.varDmp.word_cnt is actually byte
525 525 * count for the dump reply
526 526 */
527 527 bsize = mb->un.varDmp.word_cnt;
528 528
529 529 /* Stop if no data was received */
530 530 if (bsize == 0) {
531 531 break;
532 532 }
533 533
534 534 /* Check limit on byte size */
535 535 bsize = (bsize >
536 536 (sizeof (vpd_data) - offset)) ?
537 537 (sizeof (vpd_data) - offset) : bsize;
538 538
539 539 /*
540 540 * Convert size from bytes to words with
541 541 * minimum of 1 word
542 542 */
543 543 wsize = (bsize > 4) ? (bsize >> 2) : 1;
544 544
545 545 /*
546 546 * Transfer data into vpd_data buffer one
547 547 * word at a time
548 548 */
549 549 lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
550 550 lp2 = (uint32_t *)&vpd_data[offset];
551 551
552 552 for (i = 0; i < wsize; i++) {
553 553 status = *lp1++;
554 554 *lp2++ = BE_SWAP32(status);
555 555 }
556 556
557 557 /* Increment total byte count saved */
558 558 offset += (wsize << 2);
559 559
560 560 /*
561 561 * Stop if less than a full transfer was
562 562 * received
563 563 */
564 564 if (wsize < DMP_VPD_DUMP_WCOUNT) {
565 565 break;
566 566 }
567 567
568 568 } else {
569 569 EMLXS_MSGF(EMLXS_CONTEXT,
570 570 &emlxs_init_debug_msg,
571 571 "No VPD acknowledgment. offset=%x",
572 572 offset);
573 573 break;
574 574 }
575 575 }
576 576
577 577 }
578 578
579 579 if (vpd_data[0]) {
580 580 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
581 581
582 582 /*
583 583 * If there is a VPD part number, and it does not
584 584 * match the current default HBA model info,
585 585 * replace the default data with an entry that
586 586 * does match.
587 587 *
588 588 * After emlxs_parse_vpd model holds the VPD value
589 589 * for V2 and part_num hold the value for PN. These
590 590 * 2 values are NOT necessarily the same.
591 591 */
592 592
593 593 rval = 0;
594 594 if ((vpd->model[0] != 0) &&
595 595 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
596 596
597 597 /* First scan for a V2 match */
598 598
599 599 for (i = 1; i < emlxs_pci_model_count; i++) {
600 600 if (strcmp(&vpd->model[0],
601 601 emlxs_pci_model[i].model) == 0) {
602 602 bcopy(&emlxs_pci_model[i],
603 603 &hba->model_info,
604 604 sizeof (emlxs_model_t));
605 605 rval = 1;
606 606 break;
607 607 }
608 608 }
609 609 }
610 610
611 611 if (!rval && (vpd->part_num[0] != 0) &&
612 612 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
613 613
614 614 /* Next scan for a PN match */
615 615
616 616 for (i = 1; i < emlxs_pci_model_count; i++) {
617 617 if (strcmp(&vpd->part_num[0],
618 618 emlxs_pci_model[i].model) == 0) {
619 619 bcopy(&emlxs_pci_model[i],
620 620 &hba->model_info,
621 621 sizeof (emlxs_model_t));
622 622 break;
623 623 }
624 624 }
625 625 }
626 626
627 627 /*
628 628 * Now lets update hba->model_info with the real
629 629 * VPD data, if any.
630 630 */
631 631
632 632 /*
633 633 * Replace the default model description with vpd data
634 634 */
635 635 if (vpd->model_desc[0] != 0) {
636 636 (void) strcpy(hba->model_info.model_desc,
637 637 vpd->model_desc);
638 638 }
639 639
640 640 /* Replace the default model with vpd data */
641 641 if (vpd->model[0] != 0) {
642 642 (void) strcpy(hba->model_info.model, vpd->model);
643 643 }
644 644
645 645 /* Replace the default program types with vpd data */
646 646 if (vpd->prog_types[0] != 0) {
647 647 emlxs_parse_prog_types(hba, vpd->prog_types);
648 648 }
649 649 }
650 650
651 651 /*
652 652 * Since the adapter model may have changed with the vpd data
653 653 * lets double check if adapter is not supported
654 654 */
655 655 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
656 656 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
657 657 "Unsupported adapter found. "
658 658 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
659 659 hba->model_info.id, hba->model_info.device_id,
660 660 hba->model_info.ssdid, hba->model_info.model);
661 661
662 662 rval = EIO;
663 663 goto failed;
664 664 }
665 665
666 666 /* Read the adapter's wakeup parms */
667 667 (void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
668 668 emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
669 669 vpd->boot_version);
670 670
671 671 /* Get fcode version property */
672 672 emlxs_get_fcode_version(hba);
673 673
674 674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
675 675 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
676 676 vpd->opFwRev, vpd->sli1FwRev);
677 677
678 678 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
679 679 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
680 680 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
681 681
682 682 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
683 683 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
684 684
685 685 /*
686 686 * If firmware checking is enabled and the adapter model indicates
687 687 * a firmware image, then perform firmware version check
688 688 */
689 689 hba->fw_flag = 0;
690 690 hba->fw_timer = 0;
691 691
692 692 if (((fw_check & 0x1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
693 693 hba->model_info.fwid) || ((fw_check & 0x2) &&
694 694 hba->model_info.fwid)) {
695 695 emlxs_firmware_t *fw;
696 696
697 697 /* Find firmware image indicated by adapter model */
698 698 fw = NULL;
699 699 for (i = 0; i < emlxs_fw_count; i++) {
700 700 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
701 701 fw = &emlxs_fw_table[i];
702 702 break;
703 703 }
704 704 }
705 705
706 706 /*
707 707 * If the image was found, then verify current firmware
708 708 * versions of adapter
709 709 */
710 710 if (fw) {
711 711 if (!kern_update &&
712 712 ((fw->kern && (vpd->postKernRev != fw->kern)) ||
713 713 (fw->stub && (vpd->opFwRev != fw->stub)))) {
714 714
715 715 hba->fw_flag |= FW_UPDATE_NEEDED;
716 716
717 717 } else if ((fw->kern && (vpd->postKernRev !=
718 718 fw->kern)) ||
719 719 (fw->stub && (vpd->opFwRev != fw->stub)) ||
720 720 (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
721 721 (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
722 722 (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
723 723 (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
724 724 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
725 725 "Firmware update needed. "
726 726 "Updating. id=%d fw=%d",
727 727 hba->model_info.id, hba->model_info.fwid);
728 728
729 729 #ifdef MODFW_SUPPORT
730 730 /*
731 731 * Load the firmware image now
732 732 * If MODFW_SUPPORT is not defined, the
733 733 * firmware image will already be defined
734 734 * in the emlxs_fw_table
735 735 */
736 736 emlxs_fw_load(hba, fw);
737 737 #endif /* MODFW_SUPPORT */
738 738
739 739 if (fw->image && fw->size) {
740 740 if (emlxs_fw_download(hba,
741 741 (char *)fw->image, fw->size, 0)) {
742 742 EMLXS_MSGF(EMLXS_CONTEXT,
743 743 &emlxs_init_msg,
744 744 "Firmware update failed.");
745 745
746 746 hba->fw_flag |=
747 747 FW_UPDATE_NEEDED;
748 748 }
749 749 #ifdef MODFW_SUPPORT
750 750 /*
751 751 * Unload the firmware image from
752 752 * kernel memory
753 753 */
754 754 emlxs_fw_unload(hba, fw);
755 755 #endif /* MODFW_SUPPORT */
756 756
757 757 fw_check = 0;
758 758
759 759 goto reset;
760 760 }
761 761
762 762 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
763 763 "Firmware image unavailable.");
764 764 } else {
765 765 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
766 766 "Firmware update not needed.");
767 767 }
768 768 } else {
769 769 /* This should not happen */
770 770
771 771 /*
772 772 * This means either the adapter database is not
773 773 * correct or a firmware image is missing from the
774 774 * compile
775 775 */
776 776 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
777 777 "Firmware image unavailable. id=%d fw=%d",
778 778 hba->model_info.id, hba->model_info.fwid);
779 779 }
780 780 }
781 781
782 782 /*
783 783 * Add our interrupt routine to kernel's interrupt chain & enable it
784 784 * If MSI is enabled this will cause Solaris to program the MSI address
785 785 * and data registers in PCI config space
786 786 */
787 787 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
788 788 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
789 789 "Unable to add interrupt(s).");
790 790
791 791 rval = EIO;
792 792 goto failed;
793 793 }
794 794
795 795 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
796 796
797 797 /* Reuse mbq from previous mbox */
798 798 bzero(mbq, sizeof (MAILBOXQ));
799 799
800 800 (void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
801 801 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
802 802 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
803 803 "Unable to configure port. "
804 804 "Mailbox cmd=%x status=%x slimode=%d key=%x",
805 805 mb->mbxCommand, mb->mbxStatus, sli_mode, key);
806 806
807 807 for (sli_mode--; sli_mode > 0; sli_mode--) {
808 808 /* Check if sli_mode is supported by this adapter */
809 809 if (hba->model_info.sli_mask &
810 810 EMLXS_SLI_MASK(sli_mode)) {
811 811 sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
812 812 break;
813 813 }
814 814 }
815 815
816 816 if (sli_mode) {
817 817 fw_check = 0;
818 818
819 819 goto reset;
820 820 }
821 821
822 822 hba->flag &= ~FC_SLIM2_MODE;
823 823
824 824 rval = EIO;
825 825 goto failed;
826 826 }
827 827
828 828 /* Check if SLI3 mode was achieved */
829 829 if (mb->un.varCfgPort.rMA &&
830 830 (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
831 831
832 832 if (mb->un.varCfgPort.vpi_max > 1) {
833 833 hba->flag |= FC_NPIV_ENABLED;
834 834
835 835 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
836 836 hba->vpi_max =
837 837 min(mb->un.varCfgPort.vpi_max,
838 838 MAX_VPORTS - 1);
839 839 } else {
840 840 hba->vpi_max =
841 841 min(mb->un.varCfgPort.vpi_max,
842 842 MAX_VPORTS_LIMITED - 1);
843 843 }
844 844 }
845 845
846 846 #if (EMLXS_MODREV >= EMLXS_MODREV5)
847 847 hba->fca_tran->fca_num_npivports =
848 848 (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
849 849 #endif /* >= EMLXS_MODREV5 */
850 850
851 851 if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
852 852 hba->flag |= FC_HBQ_ENABLED;
853 853 }
854 854
855 855 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
856 856 "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
857 857 } else {
858 858 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
859 859 "SLI2 mode: flag=%x", hba->flag);
860 860 sli_mode = EMLXS_HBA_SLI2_MODE;
861 861 sli_mode_mask = EMLXS_SLI2_MASK;
862 862 hba->sli_mode = sli_mode;
863 863 }
864 864
865 865 /* Get and save the current firmware version (based on sli_mode) */
866 866 emlxs_decode_firmware_rev(hba, vpd);
867 867
868 868 emlxs_pcix_mxr_update(hba, 0);
869 869
870 870 /* Reuse mbq from previous mbox */
871 871 bzero(mbq, sizeof (MAILBOXQ));
872 872
873 873 emlxs_mb_read_config(hba, mbq);
874 874 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
875 875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
876 876 "Unable to read configuration. Mailbox cmd=%x status=%x",
877 877 mb->mbxCommand, mb->mbxStatus);
878 878
879 879 rval = EIO;
880 880 goto failed;
881 881 }
882 882
883 883 /* Save the link speed capabilities */
884 884 vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt;
885 885 emlxs_process_link_speed(hba);
886 886
887 887 /* Set the max node count */
888 888 if (cfg[CFG_NUM_NODES].current > 0) {
889 889 hba->max_nodes =
890 890 min(cfg[CFG_NUM_NODES].current,
891 891 mb->un.varRdConfig.max_rpi);
892 892 } else {
893 893 hba->max_nodes = mb->un.varRdConfig.max_rpi;
894 894 }
895 895
896 896 /* Set the io throttle */
897 897 hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
898 898 hba->max_iotag = mb->un.varRdConfig.max_xri;
899 899
900 900 /*
901 901 * Allocate some memory for buffers
902 902 */
903 903 if (emlxs_mem_alloc_buffer(hba) == 0) {
904 904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
905 905 "Unable to allocate memory buffers.");
906 906
907 907 EMLXS_STATE_CHANGE(hba, FC_ERROR);
908 908 return (ENOMEM);
909 909 }
910 910
911 911 /*
912 912 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
913 913 */
914 914 if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
915 915 ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
916 916 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
917 917 "Unable to allocate diag buffers.");
918 918
919 919 rval = ENOMEM;
920 920 goto failed;
921 921 }
922 922
923 923 bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
924 924 MEM_ELSBUF_SIZE);
925 925 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
926 926 DDI_DMA_SYNC_FORDEV);
927 927
928 928 bzero(mp1->virt, MEM_ELSBUF_SIZE);
929 929 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
930 930 DDI_DMA_SYNC_FORDEV);
931 931
932 932 /* Reuse mbq from previous mbox */
933 933 bzero(mbq, sizeof (MAILBOXQ));
934 934
935 935 (void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
936 936
937 937 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
938 938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
939 939 "Unable to run BIU diag. Mailbox cmd=%x status=%x",
940 940 mb->mbxCommand, mb->mbxStatus);
941 941
942 942 rval = EIO;
943 943 goto failed;
944 944 }
945 945
946 946 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
947 947 DDI_DMA_SYNC_FORKERNEL);
948 948
949 949 #ifdef FMA_SUPPORT
950 950 if (mp->dma_handle) {
951 951 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
952 952 != DDI_FM_OK) {
953 953 EMLXS_MSGF(EMLXS_CONTEXT,
954 954 &emlxs_invalid_dma_handle_msg,
955 955 "emlxs_sli3_online: hdl=%p",
956 956 mp->dma_handle);
957 957 rval = EIO;
958 958 goto failed;
959 959 }
960 960 }
961 961
962 962 if (mp1->dma_handle) {
963 963 if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
964 964 != DDI_FM_OK) {
965 965 EMLXS_MSGF(EMLXS_CONTEXT,
966 966 &emlxs_invalid_dma_handle_msg,
967 967 "emlxs_sli3_online: hdl=%p",
968 968 mp1->dma_handle);
969 969 rval = EIO;
970 970 goto failed;
971 971 }
972 972 }
973 973 #endif /* FMA_SUPPORT */
974 974
975 975 outptr = mp->virt;
976 976 inptr = mp1->virt;
977 977
978 978 for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
979 979 if (*outptr++ != *inptr++) {
980 980 outptr--;
981 981 inptr--;
982 982
983 983 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
984 984 "BIU diagnostic failed. "
985 985 "offset %x value %x should be %x.",
986 986 i, (uint32_t)*inptr, (uint32_t)*outptr);
987 987
988 988 rval = EIO;
989 989 goto failed;
990 990 }
991 991 }
992 992
993 993 /* Free the buffers since we were polling */
994 994 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
995 995 mp = NULL;
996 996 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
997 997 mp1 = NULL;
998 998
999 999 hba->channel_fcp = FC_FCP_RING;
1000 1000 hba->channel_els = FC_ELS_RING;
1001 1001 hba->channel_ip = FC_IP_RING;
1002 1002 hba->channel_ct = FC_CT_RING;
1003 1003 hba->sli.sli3.ring_count = MAX_RINGS;
1004 1004
1005 1005 hba->channel_tx_count = 0;
1006 1006 hba->io_count = 0;
1007 1007 hba->fc_iotag = 1;
1008 1008
1009 1009 /*
1010 1010 * OutOfRange (oor) iotags are used for abort or
1011 1011 * close XRI commands
1012 1012 */
1013 1013 hba->fc_oor_iotag = hba->max_iotag;
1014 1014
1015 1015 for (i = 0; i < hba->chan_count; i++) {
1016 1016 cp = &hba->chan[i];
1017 1017
1018 1018 /* 1 to 1 mapping between ring and channel */
1019 1019 cp->iopath = (void *)&hba->sli.sli3.ring[i];
1020 1020
1021 1021 cp->hba = hba;
1022 1022 cp->channelno = i;
1023 1023 }
1024 1024
1025 1025 /*
1026 1026 * Setup and issue mailbox CONFIGURE RING command
1027 1027 */
1028 1028 for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1029 1029 /*
1030 1030 * Initialize cmd/rsp ring pointers
1031 1031 */
1032 1032 rp = &hba->sli.sli3.ring[i];
1033 1033
1034 1034 /* 1 to 1 mapping between ring and channel */
1035 1035 rp->channelp = &hba->chan[i];
1036 1036
1037 1037 rp->hba = hba;
1038 1038 rp->ringno = (uint8_t)i;
1039 1039
1040 1040 rp->fc_cmdidx = 0;
1041 1041 rp->fc_rspidx = 0;
1042 1042 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1043 1043
1044 1044 /* Reuse mbq from previous mbox */
1045 1045 bzero(mbq, sizeof (MAILBOXQ));
1046 1046
1047 1047 emlxs_mb_config_ring(hba, i, mbq);
1048 1048 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1049 1049 MBX_SUCCESS) {
1050 1050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1051 1051 "Unable to configure ring. "
1052 1052 "Mailbox cmd=%x status=%x",
1053 1053 mb->mbxCommand, mb->mbxStatus);
1054 1054
1055 1055 rval = EIO;
1056 1056 goto failed;
1057 1057 }
1058 1058 }
1059 1059
1060 1060 /*
1061 1061 * Setup link timers
1062 1062 */
1063 1063 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1064 1064
1065 1065 /* Reuse mbq from previous mbox */
1066 1066 bzero(mbq, sizeof (MAILBOXQ));
1067 1067
1068 1068 emlxs_mb_config_link(hba, mbq);
1069 1069 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1070 1070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1071 1071 "Unable to configure link. Mailbox cmd=%x status=%x",
1072 1072 mb->mbxCommand, mb->mbxStatus);
1073 1073
1074 1074 rval = EIO;
1075 1075 goto failed;
1076 1076 }
1077 1077
1078 1078 #ifdef MAX_RRDY_SUPPORT
1079 1079 /* Set MAX_RRDY if one is provided */
1080 1080 if (cfg[CFG_MAX_RRDY].current) {
1081 1081
1082 1082 /* Reuse mbq from previous mbox */
1083 1083 bzero(mbq, sizeof (MAILBOXQ));
1084 1084
1085 1085 emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1086 1086 cfg[CFG_MAX_RRDY].current);
1087 1087
1088 1088 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1089 1089 MBX_SUCCESS) {
1090 1090 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1091 1091 "MAX_RRDY: Unable to set. status=%x " \
1092 1092 "value=%d",
1093 1093 mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1094 1094 } else {
1095 1095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1096 1096 "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1097 1097 }
1098 1098 }
1099 1099 #endif /* MAX_RRDY_SUPPORT */
1100 1100
1101 1101 /* Reuse mbq from previous mbox */
1102 1102 bzero(mbq, sizeof (MAILBOXQ));
1103 1103
1104 1104 /*
1105 1105 * We need to get login parameters for NID
1106 1106 */
1107 1107 (void) emlxs_mb_read_sparam(hba, mbq);
1108 1108 mp = (MATCHMAP *)mbq->bp;
1109 1109 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1110 1110 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1111 1111 "Unable to read parameters. Mailbox cmd=%x status=%x",
1112 1112 mb->mbxCommand, mb->mbxStatus);
1113 1113
1114 1114 rval = EIO;
1115 1115 goto failed;
1116 1116 }
1117 1117
1118 1118 /* Free the buffer since we were polling */
1119 1119 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1120 1120 mp = NULL;
1121 1121
1122 1122 /* If no serial number in VPD data, then use the WWPN */
1123 1123 if (vpd->serial_num[0] == 0) {
1124 1124 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1125 1125 for (i = 0; i < 12; i++) {
1126 1126 status = *outptr++;
1127 1127 j = ((status & 0xf0) >> 4);
1128 1128 if (j <= 9) {
1129 1129 vpd->serial_num[i] =
1130 1130 (char)((uint8_t)'0' + (uint8_t)j);
1131 1131 } else {
1132 1132 vpd->serial_num[i] =
1133 1133 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1134 1134 }
1135 1135
1136 1136 i++;
1137 1137 j = (status & 0xf);
1138 1138 if (j <= 9) {
1139 1139 vpd->serial_num[i] =
1140 1140 (char)((uint8_t)'0' + (uint8_t)j);
1141 1141 } else {
1142 1142 vpd->serial_num[i] =
1143 1143 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1144 1144 }
1145 1145 }
1146 1146
1147 1147 /*
1148 1148 * Set port number and port index to zero
1149 1149 * The WWN's are unique to each port and therefore port_num
1150 1150 * must equal zero. This effects the hba_fru_details structure
1151 1151 * in fca_bind_port()
1152 1152 */
1153 1153 vpd->port_num[0] = 0;
1154 1154 vpd->port_index = 0;
1155 1155 }
1156 1156
1157 1157 /*
1158 1158 * Make first attempt to set a port index
1159 1159 * Check if this is a multifunction adapter
1160 1160 */
1161 1161 if ((vpd->port_index == (uint32_t)-1) &&
1162 1162 (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1163 1163 char *buffer;
1164 1164 int32_t i;
1165 1165
1166 1166 /*
1167 1167 * The port address looks like this:
1168 1168 * 1 - for port index 0
1169 1169 * 1,1 - for port index 1
1170 1170 * 1,2 - for port index 2
1171 1171 */
1172 1172 buffer = ddi_get_name_addr(hba->dip);
1173 1173
1174 1174 if (buffer) {
1175 1175 vpd->port_index = 0;
1176 1176
1177 1177 /* Reverse scan for a comma */
1178 1178 for (i = strlen(buffer) - 1; i > 0; i--) {
1179 1179 if (buffer[i] == ',') {
1180 1180 /* Comma found - set index now */
1181 1181 vpd->port_index =
1182 1182 emlxs_strtol(&buffer[i + 1], 10);
1183 1183 break;
1184 1184 }
1185 1185 }
1186 1186 }
1187 1187 }
1188 1188
1189 1189 /* Make final attempt to set a port index */
1190 1190 if (vpd->port_index == (uint32_t)-1) {
1191 1191 dev_info_t *p_dip;
1192 1192 dev_info_t *c_dip;
1193 1193
1194 1194 p_dip = ddi_get_parent(hba->dip);
1195 1195 c_dip = ddi_get_child(p_dip);
1196 1196
1197 1197 vpd->port_index = 0;
1198 1198 while (c_dip && (hba->dip != c_dip)) {
1199 1199 c_dip = ddi_get_next_sibling(c_dip);
1200 1200 vpd->port_index++;
1201 1201 }
1202 1202 }
1203 1203
1204 1204 if (vpd->port_num[0] == 0) {
1205 1205 if (hba->model_info.channels > 1) {
1206 1206 (void) sprintf(vpd->port_num, "%d", vpd->port_index);
1207 1207 }
1208 1208 }
1209 1209
1210 1210 if (vpd->id[0] == 0) {
1211 1211 (void) strcpy(vpd->id, hba->model_info.model_desc);
1212 1212 }
1213 1213
1214 1214 if (vpd->manufacturer[0] == 0) {
1215 1215 (void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1216 1216 }
1217 1217
1218 1218 if (vpd->part_num[0] == 0) {
1219 1219 (void) strcpy(vpd->part_num, hba->model_info.model);
1220 1220 }
1221 1221
1222 1222 if (vpd->model_desc[0] == 0) {
1223 1223 (void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1224 1224 }
1225 1225
1226 1226 if (vpd->model[0] == 0) {
1227 1227 (void) strcpy(vpd->model, hba->model_info.model);
1228 1228 }
1229 1229
1230 1230 if (vpd->prog_types[0] == 0) {
1231 1231 emlxs_build_prog_types(hba, vpd->prog_types);
1232 1232 }
1233 1233
1234 1234 /* Create the symbolic names */
1235 1235 (void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1236 1236 hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1237 1237 (char *)utsname.nodename);
1238 1238
1239 1239 (void) sprintf(hba->spn,
1240 1240 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1241 1241 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1242 1242 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1243 1243 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1244 1244
1245 1245 if (cfg[CFG_NETWORK_ON].current) {
1246 1246 if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1247 1247 (hba->sparam.portName.IEEEextMsn != 0) ||
1248 1248 (hba->sparam.portName.IEEEextLsb != 0)) {
1249 1249
1250 1250 cfg[CFG_NETWORK_ON].current = 0;
1251 1251
1252 1252 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1253 1253 "WWPN doesn't conform to IP profile: nameType=%x",
1254 1254 hba->sparam.portName.nameType);
1255 1255 }
1256 1256
1257 1257 /* Reuse mbq from previous mbox */
1258 1258 bzero(mbq, sizeof (MAILBOXQ));
1259 1259
1260 1260 /* Issue CONFIG FARP */
1261 1261 emlxs_mb_config_farp(hba, mbq);
1262 1262 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1263 1263 MBX_SUCCESS) {
1264 1264 /*
1265 1265 * Let it go through even if failed.
1266 1266 */
1267 1267 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1268 1268 "Unable to configure FARP. "
1269 1269 "Mailbox cmd=%x status=%x",
1270 1270 mb->mbxCommand, mb->mbxStatus);
1271 1271 }
1272 1272 }
1273 1273 #ifdef MSI_SUPPORT
1274 1274 /* Configure MSI map if required */
1275 1275 if (hba->intr_count > 1) {
1276 1276
1277 1277 if (hba->intr_type == DDI_INTR_TYPE_MSIX) {
1278 1278 /* always start from 0 */
1279 1279 hba->last_msiid = 0;
1280 1280 }
1281 1281
1282 1282 /* Reuse mbq from previous mbox */
1283 1283 bzero(mbq, sizeof (MAILBOXQ));
1284 1284
1285 1285 emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1286 1286
1287 1287 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1288 1288 MBX_SUCCESS) {
1289 1289 goto msi_configured;
1290 1290 }
1291 1291
1292 1292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1293 1293 "Unable to config MSIX. Mailbox cmd=0x%x status=0x%x",
1294 1294 mb->mbxCommand, mb->mbxStatus);
1295 1295
1296 1296 /* Reuse mbq from previous mbox */
1297 1297 bzero(mbq, sizeof (MAILBOXQ));
1298 1298
1299 1299 emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1300 1300
1301 1301 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1302 1302 MBX_SUCCESS) {
1303 1303 goto msi_configured;
1304 1304 }
1305 1305
1306 1306
1307 1307 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1308 1308 "Unable to config MSI. Mailbox cmd=0x%x status=0x%x",
1309 1309 mb->mbxCommand, mb->mbxStatus);
1310 1310
1311 1311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1312 1312 "Attempting single interrupt mode...");
1313 1313
1314 1314 /* First cleanup old interrupts */
1315 1315 (void) emlxs_msi_remove(hba);
1316 1316 (void) emlxs_msi_uninit(hba);
1317 1317
1318 1318 status = emlxs_msi_init(hba, 1);
1319 1319
1320 1320 if (status != DDI_SUCCESS) {
1321 1321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1322 1322 "Unable to initialize interrupt. status=%d",
1323 1323 status);
1324 1324
1325 1325 rval = EIO;
1326 1326 goto failed;
1327 1327 }
1328 1328
1329 1329 /*
1330 1330 * Reset adapter - The adapter needs to be reset because
1331 1331 * the bus cannot handle the MSI change without handshaking
1332 1332 * with the adapter again
1333 1333 */
1334 1334
1335 1335 (void) emlxs_mem_free_buffer(hba);
1336 1336 fw_check = 0;
1337 1337 goto reset;
1338 1338 }
1339 1339
1340 1340 msi_configured:
1341 1341
1342 1342
1343 1343 if ((hba->intr_count >= 1) &&
1344 1344 (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) {
1345 1345 /* intr_count is a sequence of msi id */
1346 1346 /* Setup msi2chan[msi_id] */
1347 1347 for (i = 0; i < hba->intr_count; i ++) {
1348 1348 hba->msi2chan[i] = i;
1349 1349 if (i >= hba->chan_count)
1350 1350 hba->msi2chan[i] = (i - hba->chan_count);
1351 1351 }
1352 1352 }
1353 1353 #endif /* MSI_SUPPORT */
1354 1354
1355 1355 /*
1356 1356 * We always disable the firmware traffic cop feature
1357 1357 */
1358 1358 if (emlxs_disable_traffic_cop) {
1359 1359 /* Reuse mbq from previous mbox */
1360 1360 bzero(mbq, sizeof (MAILBOXQ));
1361 1361
1362 1362 emlxs_disable_tc(hba, mbq);
1363 1363 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1364 1364 MBX_SUCCESS) {
1365 1365 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1366 1366 "Unable to disable traffic cop. "
1367 1367 "Mailbox cmd=%x status=%x",
1368 1368 mb->mbxCommand, mb->mbxStatus);
1369 1369
1370 1370 rval = EIO;
1371 1371 goto failed;
1372 1372 }
1373 1373 }
1374 1374
1375 1375
1376 1376 /* Reuse mbq from previous mbox */
1377 1377 bzero(mbq, sizeof (MAILBOXQ));
1378 1378
1379 1379 /* Register for async events */
1380 1380 emlxs_mb_async_event(hba, mbq);
1381 1381 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1382 1382 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1383 1383 "Async events disabled. Mailbox status=%x",
1384 1384 mb->mbxStatus);
1385 1385 } else {
1386 1386 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1387 1387 "Async events enabled.");
1388 1388 hba->flag |= FC_ASYNC_EVENTS;
1389 1389 }
1390 1390
1391 1391 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1392 1392
1393 1393 emlxs_sli3_enable_intr(hba);
1394 1394
1395 1395 if (hba->flag & FC_HBQ_ENABLED) {
1396 1396 if (hba->tgt_mode) {
1397 1397 if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1398 1398 EMLXS_MSGF(EMLXS_CONTEXT,
1399 1399 &emlxs_init_failed_msg,
1400 1400 "Unable to setup FCT HBQ.");
1401 1401
1402 1402 rval = ENOMEM;
1403 1403 goto failed;
1404 1404 }
1405 1405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1406 1406 "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1407 1407 }
1408 1408
1409 1409 if (cfg[CFG_NETWORK_ON].current) {
1410 1410 if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1411 1411 EMLXS_MSGF(EMLXS_CONTEXT,
1412 1412 &emlxs_init_failed_msg,
1413 1413 "Unable to setup IP HBQ.");
1414 1414
1415 1415 rval = ENOMEM;
1416 1416 goto failed;
1417 1417 }
1418 1418 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1419 1419 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1420 1420 }
1421 1421
1422 1422 if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1423 1423 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1424 1424 "Unable to setup ELS HBQ.");
1425 1425 rval = ENOMEM;
1426 1426 goto failed;
1427 1427 }
1428 1428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1429 1429 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1430 1430
1431 1431 if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1432 1432 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1433 1433 "Unable to setup CT HBQ.");
1434 1434
1435 1435 rval = ENOMEM;
1436 1436 goto failed;
1437 1437 }
1438 1438 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1439 1439 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1440 1440 } else {
1441 1441 if (hba->tgt_mode) {
1442 1442 /* Post the FCT unsol buffers */
1443 1443 rp = &hba->sli.sli3.ring[FC_FCT_RING];
1444 1444 for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1445 1445 (void) emlxs_post_buffer(hba, rp, 2);
1446 1446 }
1447 1447 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1448 1448 "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1449 1449 }
1450 1450
1451 1451 if (cfg[CFG_NETWORK_ON].current) {
1452 1452 /* Post the IP unsol buffers */
1453 1453 rp = &hba->sli.sli3.ring[FC_IP_RING];
1454 1454 for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1455 1455 (void) emlxs_post_buffer(hba, rp, 2);
1456 1456 }
1457 1457 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1458 1458 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1459 1459 }
1460 1460
1461 1461 /* Post the ELS unsol buffers */
1462 1462 rp = &hba->sli.sli3.ring[FC_ELS_RING];
1463 1463 for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1464 1464 (void) emlxs_post_buffer(hba, rp, 2);
1465 1465 }
1466 1466 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1467 1467 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1468 1468
1469 1469
1470 1470 /* Post the CT unsol buffers */
1471 1471 rp = &hba->sli.sli3.ring[FC_CT_RING];
1472 1472 for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1473 1473 (void) emlxs_post_buffer(hba, rp, 2);
1474 1474 }
1475 1475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1476 1476 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1477 1477 }
1478 1478
1479 1479 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1480 1480
1481 1481 /*
1482 1482 * Setup and issue mailbox INITIALIZE LINK command
1483 1483 * At this point, the interrupt will be generated by the HW
1484 1484 * Do this only if persist-linkdown is not set
1485 1485 */
1486 1486 if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
1487 1487 mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1);
1488 1488 if (mbq == NULL) {
1489 1489 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1490 1490 "Unable to allocate mailbox buffer.");
1491 1491
1492 1492 rval = EIO;
1493 1493 goto failed;
1494 1494 }
1495 1495
1496 1496 emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1497 1497 cfg[CFG_LINK_SPEED].current);
1498 1498
1499 1499 rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1500 1500 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1501 1501 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1502 1502 "Unable to initialize link. " \
1503 1503 "Mailbox cmd=%x status=%x",
1504 1504 mb->mbxCommand, mb->mbxStatus);
1505 1505
1506 1506 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1507 1507 mbq = NULL;
1508 1508 rval = EIO;
1509 1509 goto failed;
1510 1510 }
1511 1511
1512 1512 /*
1513 1513 * Enable link attention interrupt
1514 1514 */
1515 1515 emlxs_enable_latt(hba);
1516 1516
1517 1517 /* Wait for link to come up */
1518 1518 i = cfg[CFG_LINKUP_DELAY].current;
1519 1519 while (i && (hba->state < FC_LINK_UP)) {
1520 1520 /* Check for hardware error */
1521 1521 if (hba->state == FC_ERROR) {
1522 1522 EMLXS_MSGF(EMLXS_CONTEXT,
1523 1523 &emlxs_init_failed_msg,
1524 1524 "Adapter error.");
1525 1525
1526 1526 mbq = NULL;
1527 1527 rval = EIO;
1528 1528 goto failed;
1529 1529 }
1530 1530
1531 1531 DELAYMS(1000);
1532 1532 i--;
1533 1533 }
1534 1534 } else {
1535 1535 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1536 1536 }
1537 1537
1538 1538 /*
1539 1539 * The leadvile driver will now handle the FLOGI at the driver level
1540 1540 */
1541 1541
1542 1542 return (0);
1543 1543
1544 1544 failed:
1545 1545
1546 1546 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1547 1547
1548 1548 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1549 1549 (void) EMLXS_INTR_REMOVE(hba);
1550 1550 }
1551 1551
1552 1552 if (mp) {
1553 1553 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1554 1554 mp = NULL;
1555 1555 }
1556 1556
1557 1557 if (mp1) {
1558 1558 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1559 1559 mp1 = NULL;
1560 1560 }
1561 1561
1562 1562 (void) emlxs_mem_free_buffer(hba);
1563 1563
1564 1564 if (mbq) {
1565 1565 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1566 1566 mbq = NULL;
1567 1567 mb = NULL;
1568 1568 }
1569 1569
1570 1570 if (rval == 0) {
1571 1571 rval = EIO;
1572 1572 }
1573 1573
1574 1574 return (rval);
1575 1575
1576 1576 } /* emlxs_sli3_online() */
1577 1577
1578 1578
1579 1579 static void
1580 1580 emlxs_sli3_offline(emlxs_hba_t *hba)
1581 1581 {
1582 1582 /* Reverse emlxs_sli3_online */
1583 1583
1584 1584 /* Kill the adapter */
1585 1585 emlxs_sli3_hba_kill(hba);
1586 1586
1587 1587 /* Free driver shared memory */
1588 1588 (void) emlxs_mem_free_buffer(hba);
1589 1589
1590 1590 } /* emlxs_sli3_offline() */
1591 1591
1592 1592
1593 1593 static int
1594 1594 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1595 1595 {
1596 1596 emlxs_port_t *port = &PPORT;
1597 1597 dev_info_t *dip;
1598 1598 ddi_device_acc_attr_t dev_attr;
1599 1599 int status;
1600 1600
1601 1601 dip = (dev_info_t *)hba->dip;
1602 1602 dev_attr = emlxs_dev_acc_attr;
1603 1603
1604 1604 if (hba->bus_type == SBUS_FC) {
1605 1605
1606 1606 if (hba->sli.sli3.slim_acc_handle == 0) {
1607 1607 status = ddi_regs_map_setup(dip,
1608 1608 SBUS_DFLY_SLIM_RINDEX,
1609 1609 (caddr_t *)&hba->sli.sli3.slim_addr,
1610 1610 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1611 1611 if (status != DDI_SUCCESS) {
1612 1612 EMLXS_MSGF(EMLXS_CONTEXT,
1613 1613 &emlxs_attach_failed_msg,
1614 1614 "(SBUS) ddi_regs_map_setup SLIM failed. "
1615 1615 "status=%x", status);
1616 1616 goto failed;
1617 1617 }
1618 1618 }
1619 1619 if (hba->sli.sli3.csr_acc_handle == 0) {
1620 1620 status = ddi_regs_map_setup(dip,
1621 1621 SBUS_DFLY_CSR_RINDEX,
1622 1622 (caddr_t *)&hba->sli.sli3.csr_addr,
1623 1623 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1624 1624 if (status != DDI_SUCCESS) {
1625 1625 EMLXS_MSGF(EMLXS_CONTEXT,
1626 1626 &emlxs_attach_failed_msg,
1627 1627 "(SBUS) ddi_regs_map_setup DFLY CSR "
1628 1628 "failed. status=%x", status);
1629 1629 goto failed;
1630 1630 }
1631 1631 }
1632 1632 if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1633 1633 status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1634 1634 (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1635 1635 &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1636 1636 if (status != DDI_SUCCESS) {
1637 1637 EMLXS_MSGF(EMLXS_CONTEXT,
1638 1638 &emlxs_attach_failed_msg,
1639 1639 "(SBUS) ddi_regs_map_setup Fcode Flash "
1640 1640 "failed. status=%x", status);
1641 1641 goto failed;
1642 1642 }
1643 1643 }
1644 1644 if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1645 1645 status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1646 1646 (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1647 1647 &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1648 1648 if (status != DDI_SUCCESS) {
1649 1649 EMLXS_MSGF(EMLXS_CONTEXT,
1650 1650 &emlxs_attach_failed_msg,
1651 1651 "(SBUS) ddi_regs_map_setup TITAN CORE "
1652 1652 "failed. status=%x", status);
1653 1653 goto failed;
1654 1654 }
1655 1655 }
1656 1656
1657 1657 if (hba->sli.sli3.sbus_csr_handle == 0) {
1658 1658 status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1659 1659 (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1660 1660 0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1661 1661 if (status != DDI_SUCCESS) {
1662 1662 EMLXS_MSGF(EMLXS_CONTEXT,
1663 1663 &emlxs_attach_failed_msg,
1664 1664 "(SBUS) ddi_regs_map_setup TITAN CSR "
1665 1665 "failed. status=%x", status);
1666 1666 goto failed;
1667 1667 }
1668 1668 }
1669 1669 } else { /* ****** PCI ****** */
1670 1670
1671 1671 if (hba->sli.sli3.slim_acc_handle == 0) {
1672 1672 status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1673 1673 (caddr_t *)&hba->sli.sli3.slim_addr,
1674 1674 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1675 1675 if (status != DDI_SUCCESS) {
1676 1676 EMLXS_MSGF(EMLXS_CONTEXT,
1677 1677 &emlxs_attach_failed_msg,
1678 1678 "(PCI) ddi_regs_map_setup SLIM failed. "
1679 1679 "stat=%d mem=%p attr=%p hdl=%p",
1680 1680 status, &hba->sli.sli3.slim_addr, &dev_attr,
1681 1681 &hba->sli.sli3.slim_acc_handle);
1682 1682 goto failed;
1683 1683 }
1684 1684 }
1685 1685
1686 1686 /*
1687 1687 * Map in control registers, using memory-mapped version of
1688 1688 * the registers rather than the I/O space-mapped registers.
1689 1689 */
1690 1690 if (hba->sli.sli3.csr_acc_handle == 0) {
1691 1691 status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1692 1692 (caddr_t *)&hba->sli.sli3.csr_addr,
1693 1693 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1694 1694 if (status != DDI_SUCCESS) {
1695 1695 EMLXS_MSGF(EMLXS_CONTEXT,
1696 1696 &emlxs_attach_failed_msg,
1697 1697 "ddi_regs_map_setup CSR failed. status=%x",
1698 1698 status);
1699 1699 goto failed;
1700 1700 }
1701 1701 }
1702 1702 }
1703 1703
1704 1704 if (hba->sli.sli3.slim2.virt == 0) {
1705 1705 MBUF_INFO *buf_info;
1706 1706 MBUF_INFO bufinfo;
1707 1707
1708 1708 buf_info = &bufinfo;
1709 1709
1710 1710 bzero(buf_info, sizeof (MBUF_INFO));
1711 1711 buf_info->size = SLI_SLIM2_SIZE;
1712 1712 buf_info->flags =
1713 1713 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1714 1714 buf_info->align = ddi_ptob(dip, 1L);
1715 1715
1716 1716 (void) emlxs_mem_alloc(hba, buf_info);
1717 1717
1718 1718 if (buf_info->virt == NULL) {
1719 1719 goto failed;
1720 1720 }
1721 1721
1722 1722 hba->sli.sli3.slim2.virt = buf_info->virt;
1723 1723 hba->sli.sli3.slim2.phys = buf_info->phys;
1724 1724 hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1725 1725 hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1726 1726 hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1727 1727 bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1728 1728 }
1729 1729
1730 1730 /* offset from beginning of register space */
1731 1731 hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1732 1732 (sizeof (uint32_t) * HA_REG_OFFSET));
1733 1733 hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1734 1734 (sizeof (uint32_t) * CA_REG_OFFSET));
1735 1735 hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1736 1736 (sizeof (uint32_t) * HS_REG_OFFSET));
1737 1737 hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1738 1738 (sizeof (uint32_t) * HC_REG_OFFSET));
1739 1739 hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1740 1740 (sizeof (uint32_t) * BC_REG_OFFSET));
1741 1741
1742 1742 if (hba->bus_type == SBUS_FC) {
1743 1743 /* offset from beginning of register space */
1744 1744 /* for TITAN registers */
1745 1745 hba->sli.sli3.shc_reg_addr =
1746 1746 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1747 1747 (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1748 1748 hba->sli.sli3.shs_reg_addr =
1749 1749 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1750 1750 (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1751 1751 hba->sli.sli3.shu_reg_addr =
1752 1752 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1753 1753 (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1754 1754 }
1755 1755 hba->chan_count = MAX_RINGS;
1756 1756
1757 1757 return (0);
1758 1758
1759 1759 failed:
1760 1760
1761 1761 emlxs_sli3_unmap_hdw(hba);
1762 1762 return (ENOMEM);
1763 1763
1764 1764 } /* emlxs_sli3_map_hdw() */
1765 1765
1766 1766
1767 1767 static void
1768 1768 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1769 1769 {
1770 1770 MBUF_INFO bufinfo;
1771 1771 MBUF_INFO *buf_info = &bufinfo;
1772 1772
1773 1773 if (hba->sli.sli3.csr_acc_handle) {
1774 1774 ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1775 1775 hba->sli.sli3.csr_acc_handle = 0;
1776 1776 }
1777 1777
1778 1778 if (hba->sli.sli3.slim_acc_handle) {
1779 1779 ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1780 1780 hba->sli.sli3.slim_acc_handle = 0;
1781 1781 }
1782 1782
1783 1783 if (hba->sli.sli3.sbus_flash_acc_handle) {
1784 1784 ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1785 1785 hba->sli.sli3.sbus_flash_acc_handle = 0;
1786 1786 }
1787 1787
1788 1788 if (hba->sli.sli3.sbus_core_acc_handle) {
1789 1789 ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1790 1790 hba->sli.sli3.sbus_core_acc_handle = 0;
1791 1791 }
1792 1792
1793 1793 if (hba->sli.sli3.sbus_csr_handle) {
1794 1794 ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1795 1795 hba->sli.sli3.sbus_csr_handle = 0;
1796 1796 }
1797 1797
1798 1798 if (hba->sli.sli3.slim2.virt) {
1799 1799 bzero(buf_info, sizeof (MBUF_INFO));
1800 1800
1801 1801 if (hba->sli.sli3.slim2.phys) {
1802 1802 buf_info->phys = hba->sli.sli3.slim2.phys;
1803 1803 buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1804 1804 buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1805 1805 buf_info->flags = FC_MBUF_DMA;
1806 1806 }
1807 1807
1808 1808 buf_info->virt = hba->sli.sli3.slim2.virt;
1809 1809 buf_info->size = hba->sli.sli3.slim2.size;
1810 1810 emlxs_mem_free(hba, buf_info);
1811 1811
1812 1812 hba->sli.sli3.slim2.virt = NULL;
1813 1813 }
1814 1814
1815 1815
1816 1816 return;
1817 1817
1818 1818 } /* emlxs_sli3_unmap_hdw() */
1819 1819
1820 1820
1821 1821 static uint32_t
1822 1822 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1823 1823 {
1824 1824 emlxs_port_t *port = &PPORT;
1825 1825 emlxs_port_t *vport;
1826 1826 emlxs_config_t *cfg;
1827 1827 uint16_t i;
1828 1828
1829 1829 cfg = &CFG;
1830 1830 i = 0;
1831 1831
1832 1832 /* Restart the adapter */
1833 1833 if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1834 1834 return (1);
1835 1835 }
1836 1836
1837 1837 hba->channel_fcp = FC_FCP_RING;
1838 1838 hba->channel_els = FC_ELS_RING;
1839 1839 hba->channel_ip = FC_IP_RING;
1840 1840 hba->channel_ct = FC_CT_RING;
1841 1841 hba->chan_count = MAX_RINGS;
1842 1842 hba->sli.sli3.ring_count = MAX_RINGS;
1843 1843
1844 1844 /*
1845 1845 * WARNING: There is a max of 6 ring masks allowed
1846 1846 */
1847 1847 /* RING 0 - FCP */
1848 1848 if (hba->tgt_mode) {
1849 1849 hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1850 1850 hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1851 1851 hba->sli.sli3.ring_rmask[i] = 0;
1852 1852 hba->sli.sli3.ring_tval[i] = FC_FCP_DATA;
1853 1853 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1854 1854 } else {
1855 1855 hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1856 1856 }
1857 1857
1858 1858 hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1859 1859 hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1860 1860
1861 1861 /* RING 1 - IP */
1862 1862 if (cfg[CFG_NETWORK_ON].current) {
1863 1863 hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1864 1864 hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1865 1865 hba->sli.sli3.ring_rmask[i] = 0xFF;
1866 1866 hba->sli.sli3.ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
1867 1867 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1868 1868 } else {
1869 1869 hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1870 1870 }
1871 1871
1872 1872 hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1873 1873 hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1874 1874
1875 1875 /* RING 2 - ELS */
1876 1876 hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1877 1877 hba->sli.sli3.ring_rval[i] = FC_ELS_REQ; /* ELS request/rsp */
1878 1878 hba->sli.sli3.ring_rmask[i] = 0xFE;
1879 1879 hba->sli.sli3.ring_tval[i] = FC_ELS_DATA; /* ELS */
1880 1880 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1881 1881
1882 1882 hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1883 1883 hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1884 1884
1885 1885 /* RING 3 - CT */
1886 1886 hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1887 1887 hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL; /* CT request/rsp */
1888 1888 hba->sli.sli3.ring_rmask[i] = 0xFE;
1889 1889 hba->sli.sli3.ring_tval[i] = FC_CT_TYPE; /* CT */
1890 1890 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1891 1891
1892 1892 hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1893 1893 hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1894 1894
1895 1895 if (i > 6) {
1896 1896 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1897 1897 "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
1898 1898 return (1);
1899 1899 }
1900 1900
1901 1901 /* Initialize all the port objects */
1902 1902 hba->vpi_base = 0;
1903 1903 hba->vpi_max = 0;
1904 1904 for (i = 0; i < MAX_VPORTS; i++) {
1905 1905 vport = &VPORT(i);
1906 1906 vport->hba = hba;
1907 1907 vport->vpi = i;
1908 1908 vport->VPIobj.index = i;
1909 1909 vport->VPIobj.VPI = i;
1910 1910 vport->VPIobj.port = vport;
1911 1911 vport->VPIobj.state = VPI_STATE_OFFLINE;
1912 1912 }
1913 1913
1914 1914 /*
1915 1915 * Initialize the max_node count to a default value if needed
1916 1916 * This determines how many node objects we preallocate in the pool
1917 1917 * The actual max_nodes will be set later based on adapter info
1918 1918 */
1919 1919 if (hba->max_nodes == 0) {
1920 1920 if (cfg[CFG_NUM_NODES].current > 0) {
1921 1921 hba->max_nodes = cfg[CFG_NUM_NODES].current;
1922 1922 } else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
1923 1923 hba->max_nodes = 4096;
1924 1924 } else {
1925 1925 hba->max_nodes = 512;
1926 1926 }
1927 1927 }
1928 1928
1929 1929 return (0);
1930 1930
1931 1931 } /* emlxs_sli3_hba_init() */
1932 1932
1933 1933
1934 1934 /*
1935 1935 * 0: quiesce indicates the call is not from quiesce routine.
1936 1936 * 1: quiesce indicates the call is from quiesce routine.
1937 1937 */
1938 1938 static uint32_t
1939 1939 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1940 1940 uint32_t quiesce)
1941 1941 {
1942 1942 emlxs_port_t *port = &PPORT;
1943 1943 MAILBOX *swpmb;
1944 1944 MAILBOX *mb;
1945 1945 uint32_t word0;
1946 1946 uint16_t cfg_value;
1947 1947 uint32_t status;
1948 1948 uint32_t status1;
1949 1949 uint32_t status2;
1950 1950 uint32_t i;
1951 1951 uint32_t ready;
1952 1952 emlxs_port_t *vport;
1953 1953 RING *rp;
1954 1954 emlxs_config_t *cfg = &CFG;
1955 1955
1956 1956 i = 0;
1957 1957
1958 1958 if (!cfg[CFG_RESET_ENABLE].current) {
1959 1959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1960 1960 "Adapter reset disabled.");
1961 1961 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1962 1962
1963 1963 return (1);
1964 1964 }
1965 1965
1966 1966 /* Kill the adapter first */
1967 1967 if (quiesce == 0) {
1968 1968 emlxs_sli3_hba_kill(hba);
1969 1969 } else {
1970 1970 emlxs_sli3_hba_kill4quiesce(hba);
1971 1971 }
1972 1972
1973 1973 if (restart) {
1974 1974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1975 1975 "Restarting.");
1976 1976 EMLXS_STATE_CHANGE(hba, FC_INIT_START);
1977 1977
1978 1978 ready = (HS_FFRDY | HS_MBRDY);
1979 1979 } else {
1980 1980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1981 1981 "Resetting.");
1982 1982 EMLXS_STATE_CHANGE(hba, FC_WARM_START);
1983 1983
1984 1984 ready = HS_MBRDY;
1985 1985 }
1986 1986
1987 1987 hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
1988 1988
1989 1989 mb = FC_SLIM1_MAILBOX(hba);
1990 1990 swpmb = (MAILBOX *)&word0;
1991 1991
1992 1992 reset:
1993 1993
1994 1994 /* Save reset time */
1995 1995 HBASTATS.ResetTime = hba->timer_tics;
1996 1996
1997 1997 if (restart) {
1998 1998 /* First put restart command in mailbox */
1999 1999 word0 = 0;
2000 2000 swpmb->mbxCommand = MBX_RESTART;
2001 2001 swpmb->mbxHc = 1;
2002 2002 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
2003 2003
2004 2004 /* Only skip post after emlxs_sli3_online is completed */
2005 2005 if (skip_post) {
2006 2006 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2007 2007 1);
2008 2008 } else {
2009 2009 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2010 2010 0);
2011 2011 }
2012 2012
2013 2013 }
2014 2014
2015 2015 /*
2016 2016 * Turn off SERR, PERR in PCI cmd register
2017 2017 */
2018 2018 cfg_value = ddi_get16(hba->pci_acc_handle,
2019 2019 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
2020 2020
2021 2021 ddi_put16(hba->pci_acc_handle,
2022 2022 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2023 2023 (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
2024 2024
2025 2025 hba->sli.sli3.hc_copy = HC_INITFF;
2026 2026 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2027 2027
2028 2028 /* Wait 1 msec before restoring PCI config */
2029 2029 DELAYMS(1);
2030 2030
2031 2031 /* Restore PCI cmd register */
2032 2032 ddi_put16(hba->pci_acc_handle,
2033 2033 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2034 2034 (uint16_t)cfg_value);
2035 2035
2036 2036 /* Wait 3 seconds before checking */
2037 2037 DELAYMS(3000);
2038 2038 i += 3;
2039 2039
2040 2040 /* Wait for reset completion */
2041 2041 while (i < 30) {
2042 2042 /* Check status register to see what current state is */
2043 2043 status = READ_CSR_REG(hba, FC_HS_REG(hba));
2044 2044
2045 2045 /* Check to see if any errors occurred during init */
2046 2046 if (status & HS_FFERM) {
2047 2047 status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2048 2048 hba->sli.sli3.slim_addr + 0xa8));
2049 2049 status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2050 2050 hba->sli.sli3.slim_addr + 0xac));
2051 2051
2052 2052 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2053 2053 "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2054 2054 status, status1, status2);
2055 2055
2056 2056 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2057 2057 return (1);
2058 2058 }
2059 2059
2060 2060 if ((status & ready) == ready) {
2061 2061 /* Reset Done !! */
2062 2062 goto done;
2063 2063 }
2064 2064
2065 2065 /*
2066 2066 * Check every 1 second for 15 seconds, then reset board
2067 2067 * again (w/post), then check every 1 second for 15 * seconds.
2068 2068 */
2069 2069 DELAYMS(1000);
2070 2070 i++;
2071 2071
2072 2072 /* Reset again (w/post) at 15 seconds */
2073 2073 if (i == 15) {
2074 2074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2075 2075 "Reset failed. Retrying...");
2076 2076
2077 2077 goto reset;
2078 2078 }
2079 2079 }
2080 2080
2081 2081 #ifdef FMA_SUPPORT
2082 2082 reset_fail:
2083 2083 #endif /* FMA_SUPPORT */
2084 2084
2085 2085 /* Timeout occurred */
2086 2086 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2087 2087 "Timeout: status=0x%x", status);
2088 2088 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2089 2089
2090 2090 /* Log a dump event */
2091 2091 emlxs_log_dump_event(port, NULL, 0);
2092 2092
2093 2093 return (1);
2094 2094
2095 2095 done:
2096 2096
2097 2097 /* Initialize hc_copy */
2098 2098 hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2099 2099
2100 2100 #ifdef FMA_SUPPORT
2101 2101 /* Access handle validation */
2102 2102 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2103 2103 != DDI_FM_OK) ||
2104 2104 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2105 2105 != DDI_FM_OK) ||
2106 2106 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2107 2107 != DDI_FM_OK)) {
2108 2108 EMLXS_MSGF(EMLXS_CONTEXT,
2109 2109 &emlxs_invalid_access_handle_msg, NULL);
2110 2110 goto reset_fail;
2111 2111 }
2112 2112 #endif /* FMA_SUPPORT */
2113 2113
2114 2114 /* Reset the hba structure */
2115 2115 hba->flag &= FC_RESET_MASK;
2116 2116 hba->channel_tx_count = 0;
2117 2117 hba->io_count = 0;
2118 2118 hba->iodone_count = 0;
2119 2119 hba->topology = 0;
2120 2120 hba->linkspeed = 0;
2121 2121 hba->heartbeat_active = 0;
2122 2122 hba->discovery_timer = 0;
2123 2123 hba->linkup_timer = 0;
2124 2124 hba->loopback_tics = 0;
2125 2125
2126 2126
2127 2127 /* Reset the ring objects */
2128 2128 for (i = 0; i < MAX_RINGS; i++) {
2129 2129 rp = &hba->sli.sli3.ring[i];
2130 2130 rp->fc_mpon = 0;
2131 2131 rp->fc_mpoff = 0;
2132 2132 }
2133 2133
2134 2134 /* Reset the port objects */
2135 2135 for (i = 0; i < MAX_VPORTS; i++) {
2136 2136 vport = &VPORT(i);
2137 2137
2138 2138 vport->flag &= EMLXS_PORT_RESET_MASK;
2139 2139 vport->did = 0;
2140 2140 vport->prev_did = 0;
2141 2141 vport->lip_type = 0;
2142 2142 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2143 2143 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2144 2144
2145 2145 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2146 2146 vport->node_base.nlp_Rpi = 0;
2147 2147 vport->node_base.nlp_DID = 0xffffff;
2148 2148 vport->node_base.nlp_list_next = NULL;
2149 2149 vport->node_base.nlp_list_prev = NULL;
2150 2150 vport->node_base.nlp_active = 1;
2151 2151 vport->node_count = 0;
2152 2152
2153 2153 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2154 2154 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2155 2155 }
2156 2156 }
2157 2157
2158 2158 return (0);
2159 2159
2160 2160 } /* emlxs_sli3_hba_reset */
2161 2161
2162 2162
2163 2163 #define BPL_CMD 0
2164 2164 #define BPL_RESP 1
2165 2165 #define BPL_DATA 2
2166 2166
2167 2167 static ULP_BDE64 *
2168 2168 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
2169 2169 uint8_t bdeFlags)
2170 2170 {
2171 2171 ddi_dma_cookie_t *cp;
2172 2172 uint_t i;
2173 2173 int32_t size;
2174 2174 uint_t cookie_cnt;
2175 2175
2176 2176 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2177 2177 switch (bpl_type) {
2178 2178 case BPL_CMD:
2179 2179 cp = pkt->pkt_cmd_cookie;
2180 2180 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2181 2181 size = (int32_t)pkt->pkt_cmdlen;
2182 2182 break;
2183 2183
2184 2184 case BPL_RESP:
2185 2185 cp = pkt->pkt_resp_cookie;
2186 2186 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2187 2187 size = (int32_t)pkt->pkt_rsplen;
2188 2188 break;
2189 2189
2190 2190
2191 2191 case BPL_DATA:
2192 2192 cp = pkt->pkt_data_cookie;
2193 2193 cookie_cnt = pkt->pkt_data_cookie_cnt;
2194 2194 size = (int32_t)pkt->pkt_datalen;
2195 2195 break;
2196 2196 }
2197 2197
2198 2198 #else
2199 2199 switch (bpl_type) {
2200 2200 case BPL_CMD:
2201 2201 cp = &pkt->pkt_cmd_cookie;
2202 2202 cookie_cnt = 1;
2203 2203 size = (int32_t)pkt->pkt_cmdlen;
2204 2204 break;
2205 2205
2206 2206 case BPL_RESP:
2207 2207 cp = &pkt->pkt_resp_cookie;
2208 2208 cookie_cnt = 1;
2209 2209 size = (int32_t)pkt->pkt_rsplen;
2210 2210 break;
2211 2211
2212 2212
2213 2213 case BPL_DATA:
2214 2214 cp = &pkt->pkt_data_cookie;
2215 2215 cookie_cnt = 1;
2216 2216 size = (int32_t)pkt->pkt_datalen;
2217 2217 break;
2218 2218 }
2219 2219 #endif /* >= EMLXS_MODREV3 */
2220 2220
2221 2221 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2222 2222 bpl->addrHigh =
2223 2223 BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2224 2224 bpl->addrLow =
2225 2225 BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2226 2226 bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2227 2227 bpl->tus.f.bdeFlags = bdeFlags;
2228 2228 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2229 2229
2230 2230 bpl++;
2231 2231 size -= cp->dmac_size;
2232 2232 }
2233 2233
2234 2234 return (bpl);
2235 2235
2236 2236 } /* emlxs_pkt_to_bpl */
2237 2237
2238 2238
2239 2239 static uint32_t
2240 2240 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2241 2241 {
2242 2242 emlxs_hba_t *hba = HBA;
2243 2243 fc_packet_t *pkt;
2244 2244 MATCHMAP *bmp;
2245 2245 ULP_BDE64 *bpl;
2246 2246 uint64_t bp;
2247 2247 uint8_t bdeFlag;
2248 2248 IOCB *iocb;
2249 2249 IOCBQ *iocbq;
2250 2250 CHANNEL *cp;
2251 2251 uint32_t cmd_cookie_cnt;
2252 2252 uint32_t resp_cookie_cnt;
2253 2253 uint32_t data_cookie_cnt;
2254 2254 uint32_t cookie_cnt;
2255 2255
2256 2256 cp = sbp->channel;
2257 2257 iocb = (IOCB *) & sbp->iocbq;
2258 2258 pkt = PRIV2PKT(sbp);
2259 2259
2260 2260 #ifdef EMLXS_SPARC
2261 2261 /* Use FCP MEM_BPL table to get BPL buffer */
2262 2262 bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2263 2263 #else
2264 2264 /* Use MEM_BPL pool to get BPL buffer */
2265 2265 bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL, 0);
2266 2266
2267 2267 #endif
2268 2268
2269 2269 if (!bmp) {
2270 2270 return (1);
2271 2271 }
2272 2272
2273 2273 sbp->bmp = bmp;
2274 2274 bpl = (ULP_BDE64 *)bmp->virt;
2275 2275 bp = bmp->phys;
2276 2276 cookie_cnt = 0;
2277 2277
2278 2278 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2279 2279 cmd_cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2280 2280 resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
2281 2281 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2282 2282 #else
2283 2283 cmd_cookie_cnt = 1;
2284 2284 resp_cookie_cnt = 1;
2285 2285 data_cookie_cnt = 1;
2286 2286 #endif /* >= EMLXS_MODREV3 */
2287 2287
2288 2288 iocbq = &sbp->iocbq;
2289 2289 if (iocbq->flag & IOCB_FCP_CMD)
2290 2290 goto fcpcmd;
2291 2291
2292 2292 switch (cp->channelno) {
2293 2293 case FC_FCP_RING:
2294 2294 fcpcmd:
2295 2295 /* CMD payload */
2296 2296 bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2297 2297 cookie_cnt = cmd_cookie_cnt;
2298 2298
2299 2299 if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2300 2300 /* RSP payload */
2301 2301 bpl =
2302 2302 emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2303 2303 BUFF_USE_RCV);
2304 2304 cookie_cnt += resp_cookie_cnt;
2305 2305
2306 2306 /* DATA payload */
2307 2307 if (pkt->pkt_datalen != 0) {
2308 2308 bdeFlag =
2309 2309 (pkt->pkt_tran_type ==
2310 2310 FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
2311 2311 bpl =
2312 2312 emlxs_pkt_to_bpl(bpl, pkt, BPL_DATA,
2313 2313 bdeFlag);
2314 2314 cookie_cnt += data_cookie_cnt;
2315 2315 }
2316 2316 }
2317 2317 /*
2318 2318 * else
2319 2319 * {
2320 2320 * Target mode FCP status. Do nothing more.
2321 2321 * }
2322 2322 */
2323 2323
2324 2324 break;
2325 2325
2326 2326 case FC_IP_RING:
2327 2327
2328 2328 /* CMD payload */
2329 2329 bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2330 2330 cookie_cnt = cmd_cookie_cnt;
2331 2331
2332 2332 break;
2333 2333
2334 2334 case FC_ELS_RING:
2335 2335
2336 2336 /* CMD payload */
2337 2337 bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2338 2338 cookie_cnt = cmd_cookie_cnt;
2339 2339
2340 2340 /* RSP payload */
2341 2341 if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2342 2342 bpl =
2343 2343 emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2344 2344 BUFF_USE_RCV);
2345 2345 cookie_cnt += resp_cookie_cnt;
2346 2346 }
2347 2347
2348 2348 break;
2349 2349
2350 2350
2351 2351 case FC_CT_RING:
2352 2352
2353 2353 /* CMD payload */
2354 2354 bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2355 2355 cookie_cnt = cmd_cookie_cnt;
2356 2356
2357 2357 if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2358 2358 (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2359 2359 /* RSP payload */
2360 2360 bpl =
2361 2361 emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2362 2362 BUFF_USE_RCV);
2363 2363 cookie_cnt += resp_cookie_cnt;
2364 2364 }
2365 2365
2366 2366 break;
2367 2367
2368 2368 }
2369 2369
2370 2370 iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2371 2371 iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2372 2372 iocb->un.genreq64.bdl.addrLow = PADDR_LO(bp);
2373 2373 iocb->un.genreq64.bdl.bdeSize = cookie_cnt * sizeof (ULP_BDE64);
2374 2374
2375 2375 iocb->ULPBDECOUNT = 1;
2376 2376 iocb->ULPLE = 1;
2377 2377
2378 2378 return (0);
2379 2379
2380 2380 } /* emlxs_sli2_bde_setup */
2381 2381
2382 2382
2383 2383 static uint32_t
2384 2384 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2385 2385 {
2386 2386 ddi_dma_cookie_t *cp_cmd;
2387 2387 ddi_dma_cookie_t *cp_resp;
2388 2388 ddi_dma_cookie_t *cp_data;
2389 2389 fc_packet_t *pkt;
2390 2390 ULP_BDE64 *bde;
2391 2391 int data_cookie_cnt;
2392 2392 uint32_t i;
2393 2393 IOCB *iocb;
2394 2394 IOCBQ *iocbq;
2395 2395 CHANNEL *cp;
2396 2396
2397 2397 cp = sbp->channel;
2398 2398 iocb = (IOCB *) & sbp->iocbq;
2399 2399 pkt = PRIV2PKT(sbp);
2400 2400 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2401 2401 if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2402 2402 (pkt->pkt_resp_cookie_cnt > 1) ||
2403 2403 ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2404 2404 pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2405 2405 i = emlxs_sli2_bde_setup(port, sbp);
2406 2406 return (i);
2407 2407 }
2408 2408
2409 2409 #endif /* >= EMLXS_MODREV3 */
2410 2410
2411 2411 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2412 2412 cp_cmd = pkt->pkt_cmd_cookie;
2413 2413 cp_resp = pkt->pkt_resp_cookie;
2414 2414 cp_data = pkt->pkt_data_cookie;
2415 2415 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2416 2416 #else
2417 2417 cp_cmd = &pkt->pkt_cmd_cookie;
2418 2418 cp_resp = &pkt->pkt_resp_cookie;
2419 2419 cp_data = &pkt->pkt_data_cookie;
2420 2420 data_cookie_cnt = 1;
2421 2421 #endif /* >= EMLXS_MODREV3 */
2422 2422
2423 2423 iocb->unsli3.ext_iocb.ebde_count = 0;
2424 2424
2425 2425 iocbq = &sbp->iocbq;
2426 2426 if (iocbq->flag & IOCB_FCP_CMD)
2427 2427 goto fcpcmd;
2428 2428
2429 2429 switch (cp->channelno) {
2430 2430 case FC_FCP_RING:
2431 2431 fcpcmd:
2432 2432 /* CMD payload */
2433 2433 iocb->un.fcpi64.bdl.addrHigh =
2434 2434 PADDR_HI(cp_cmd->dmac_laddress);
2435 2435 iocb->un.fcpi64.bdl.addrLow =
2436 2436 PADDR_LO(cp_cmd->dmac_laddress);
2437 2437 iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen;
2438 2438 iocb->un.fcpi64.bdl.bdeFlags = 0;
2439 2439
2440 2440 if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2441 2441 /* RSP payload */
2442 2442 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2443 2443 PADDR_HI(cp_resp->dmac_laddress);
2444 2444 iocb->unsli3.ext_iocb.ebde1.addrLow =
2445 2445 PADDR_LO(cp_resp->dmac_laddress);
2446 2446 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2447 2447 pkt->pkt_rsplen;
2448 2448 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2449 2449 iocb->unsli3.ext_iocb.ebde_count = 1;
2450 2450
2451 2451 /* DATA payload */
2452 2452 if (pkt->pkt_datalen != 0) {
2453 2453 bde =
2454 2454 (ULP_BDE64 *)&iocb->unsli3.ext_iocb.
2455 2455 ebde2;
2456 2456 for (i = 0; i < data_cookie_cnt; i++) {
2457 2457 bde->addrHigh =
2458 2458 PADDR_HI(cp_data->
2459 2459 dmac_laddress);
2460 2460 bde->addrLow =
2461 2461 PADDR_LO(cp_data->
2462 2462 dmac_laddress);
2463 2463 bde->tus.f.bdeSize =
2464 2464 cp_data->dmac_size;
2465 2465 bde->tus.f.bdeFlags = 0;
2466 2466 cp_data++;
2467 2467 bde++;
2468 2468 }
2469 2469 iocb->unsli3.ext_iocb.ebde_count +=
2470 2470 data_cookie_cnt;
2471 2471 }
2472 2472 }
2473 2473 /*
2474 2474 * else
2475 2475 * {
2476 2476 * Target mode FCP status. Do nothing more.
2477 2477 * }
2478 2478 */
2479 2479
2480 2480 break;
2481 2481
2482 2482 case FC_IP_RING:
2483 2483
2484 2484 /* CMD payload */
2485 2485 iocb->un.xseq64.bdl.addrHigh =
2486 2486 PADDR_HI(cp_cmd->dmac_laddress);
2487 2487 iocb->un.xseq64.bdl.addrLow =
2488 2488 PADDR_LO(cp_cmd->dmac_laddress);
2489 2489 iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen;
2490 2490 iocb->un.xseq64.bdl.bdeFlags = 0;
2491 2491
2492 2492 break;
2493 2493
2494 2494 case FC_ELS_RING:
2495 2495
2496 2496 /* CMD payload */
2497 2497 iocb->un.elsreq64.bdl.addrHigh =
2498 2498 PADDR_HI(cp_cmd->dmac_laddress);
2499 2499 iocb->un.elsreq64.bdl.addrLow =
2500 2500 PADDR_LO(cp_cmd->dmac_laddress);
2501 2501 iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2502 2502 iocb->un.elsreq64.bdl.bdeFlags = 0;
2503 2503
2504 2504 /* RSP payload */
2505 2505 if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2506 2506 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2507 2507 PADDR_HI(cp_resp->dmac_laddress);
2508 2508 iocb->unsli3.ext_iocb.ebde1.addrLow =
2509 2509 PADDR_LO(cp_resp->dmac_laddress);
2510 2510 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2511 2511 pkt->pkt_rsplen;
2512 2512 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2513 2513 BUFF_USE_RCV;
2514 2514 iocb->unsli3.ext_iocb.ebde_count = 1;
2515 2515 }
2516 2516
2517 2517 break;
2518 2518
2519 2519 case FC_CT_RING:
2520 2520
2521 2521 /* CMD payload */
2522 2522 iocb->un.genreq64.bdl.addrHigh =
2523 2523 PADDR_HI(cp_cmd->dmac_laddress);
2524 2524 iocb->un.genreq64.bdl.addrLow =
2525 2525 PADDR_LO(cp_cmd->dmac_laddress);
2526 2526 iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2527 2527 iocb->un.genreq64.bdl.bdeFlags = 0;
2528 2528
2529 2529 if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2530 2530 (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2531 2531 /* RSP payload */
2532 2532 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2533 2533 PADDR_HI(cp_resp->dmac_laddress);
2534 2534 iocb->unsli3.ext_iocb.ebde1.addrLow =
2535 2535 PADDR_LO(cp_resp->dmac_laddress);
2536 2536 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2537 2537 pkt->pkt_rsplen;
2538 2538 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2539 2539 BUFF_USE_RCV;
2540 2540 iocb->unsli3.ext_iocb.ebde_count = 1;
2541 2541 }
2542 2542
2543 2543 break;
2544 2544 }
2545 2545
2546 2546 iocb->ULPBDECOUNT = 0;
2547 2547 iocb->ULPLE = 0;
2548 2548
2549 2549 return (0);
2550 2550
2551 2551 } /* emlxs_sli3_bde_setup */
2552 2552
2553 2553
2554 2554 /* Only used for FCP Data xfers */
2555 2555 #ifdef SFCT_SUPPORT
2556 2556 /*ARGSUSED*/
2557 2557 static uint32_t
2558 2558 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2559 2559 {
2560 2560 emlxs_hba_t *hba = HBA;
2561 2561 scsi_task_t *fct_task;
2562 2562 MATCHMAP *bmp;
2563 2563 ULP_BDE64 *bpl;
2564 2564 uint64_t bp;
2565 2565 uint8_t bdeFlags;
2566 2566 IOCB *iocb;
2567 2567 uint32_t resid;
2568 2568 uint32_t count;
2569 2569 uint32_t size;
2570 2570 uint32_t sgllen;
2571 2571 struct stmf_sglist_ent *sgl;
2572 2572 emlxs_fct_dmem_bctl_t *bctl;
2573 2573
2574 2574
2575 2575 iocb = (IOCB *)&sbp->iocbq;
2576 2576 sbp->bmp = NULL;
2577 2577
2578 2578 if (!sbp->fct_buf) {
2579 2579 iocb->un.fcpt64.bdl.addrHigh = 0;
2580 2580 iocb->un.fcpt64.bdl.addrLow = 0;
2581 2581 iocb->un.fcpt64.bdl.bdeSize = 0;
2582 2582 iocb->un.fcpt64.bdl.bdeFlags = 0;
2583 2583 iocb->un.fcpt64.fcpt_Offset = 0;
2584 2584 iocb->un.fcpt64.fcpt_Length = 0;
2585 2585 iocb->ULPBDECOUNT = 0;
2586 2586 iocb->ULPLE = 1;
2587 2587 return (0);
2588 2588 }
2589 2589 #ifdef EMLXS_SPARC
2590 2590 /* Use FCP MEM_BPL table to get BPL buffer */
2591 2591 bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2592 2592 #else
2593 2593 /* Use MEM_BPL pool to get BPL buffer */
2594 2594 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL, 0);
2595 2595 #endif /* EMLXS_SPARC */
2596 2596
2597 2597 if (!bmp) {
2598 2598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2599 2599 "emlxs_fct_sli2_bde_setup: Unable to BPL buffer. iotag=%x",
2600 2600 sbp->iotag);
2601 2601
2602 2602 iocb->un.fcpt64.bdl.addrHigh = 0;
2603 2603 iocb->un.fcpt64.bdl.addrLow = 0;
2604 2604 iocb->un.fcpt64.bdl.bdeSize = 0;
2605 2605 iocb->un.fcpt64.bdl.bdeFlags = 0;
2606 2606 iocb->un.fcpt64.fcpt_Offset = 0;
2607 2607 iocb->un.fcpt64.fcpt_Length = 0;
2608 2608 iocb->ULPBDECOUNT = 0;
2609 2609 iocb->ULPLE = 1;
2610 2610 return (1);
2611 2611 }
2612 2612
2613 2613 bpl = (ULP_BDE64 *)bmp->virt;
2614 2614 bp = bmp->phys;
2615 2615
2616 2616
2617 2617 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2618 2618
2619 2619 size = sbp->fct_buf->db_data_size;
2620 2620 count = sbp->fct_buf->db_sglist_length;
2621 2621 bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2622 2622
2623 2623 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2624 2624 sgl = sbp->fct_buf->db_sglist;
2625 2625 resid = size;
2626 2626
2627 2627 /* Init the buffer list */
2628 2628 for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
2629 2629 bpl->addrHigh =
2630 2630 BE_SWAP32(PADDR_HI(bctl->bctl_dev_addr));
2631 2631 bpl->addrLow =
2632 2632 BE_SWAP32(PADDR_LO(bctl->bctl_dev_addr));
2633 2633 bpl->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2634 2634 bpl->tus.f.bdeFlags = bdeFlags;
2635 2635 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2636 2636 bpl++;
2637 2637
2638 2638 resid -= MIN(resid, sgl->seg_length);
2639 2639 sgl++;
2640 2640 }
2641 2641
2642 2642 /* Init the IOCB */
2643 2643 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2644 2644 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2645 2645 iocb->un.fcpt64.bdl.bdeSize = sgllen * sizeof (ULP_BDE64);
2646 2646 iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2647 2647
2648 2648 iocb->un.fcpt64.fcpt_Length =
2649 2649 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2650 2650 iocb->un.fcpt64.fcpt_Offset = 0;
2651 2651
2652 2652 iocb->ULPBDECOUNT = 1;
2653 2653 iocb->ULPLE = 1;
2654 2654 sbp->bmp = bmp;
2655 2655
2656 2656 return (0);
2657 2657
2658 2658 } /* emlxs_sli2_fct_bde_setup */
2659 2659 #endif /* SFCT_SUPPORT */
2660 2660
2661 2661
2662 2662 #ifdef SFCT_SUPPORT
2663 2663 /*ARGSUSED*/
2664 2664 static uint32_t
2665 2665 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2666 2666 {
2667 2667 scsi_task_t *fct_task;
2668 2668 ULP_BDE64 *bde;
2669 2669 IOCB *iocb;
2670 2670 uint32_t size;
2671 2671 uint32_t count;
2672 2672 uint32_t sgllen;
2673 2673 int32_t resid;
2674 2674 struct stmf_sglist_ent *sgl;
2675 2675 uint32_t bdeFlags;
2676 2676 emlxs_fct_dmem_bctl_t *bctl;
2677 2677
2678 2678 iocb = (IOCB *)&sbp->iocbq;
2679 2679
2680 2680 if (!sbp->fct_buf) {
2681 2681 iocb->un.fcpt64.bdl.addrHigh = 0;
2682 2682 iocb->un.fcpt64.bdl.addrLow = 0;
2683 2683 iocb->un.fcpt64.bdl.bdeSize = 0;
2684 2684 iocb->un.fcpt64.bdl.bdeFlags = 0;
2685 2685 iocb->un.fcpt64.fcpt_Offset = 0;
2686 2686 iocb->un.fcpt64.fcpt_Length = 0;
2687 2687 iocb->ULPBDECOUNT = 0;
2688 2688 iocb->ULPLE = 0;
2689 2689 iocb->unsli3.ext_iocb.ebde_count = 0;
2690 2690 return (0);
2691 2691 }
2692 2692
2693 2693 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2694 2694
2695 2695 size = sbp->fct_buf->db_data_size;
2696 2696 count = sbp->fct_buf->db_sglist_length;
2697 2697 bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2698 2698
2699 2699 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2700 2700 sgl = sbp->fct_buf->db_sglist;
2701 2701 resid = size;
2702 2702
2703 2703 /* Init first BDE */
2704 2704 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2705 2705 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bctl->bctl_dev_addr);
2706 2706 iocb->un.fcpt64.bdl.bdeSize = MIN(resid, sgl->seg_length);
2707 2707 iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2708 2708 resid -= MIN(resid, sgl->seg_length);
2709 2709 sgl++;
2710 2710
2711 2711 /* Init remaining BDE's */
2712 2712 bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde1;
2713 2713 for (sgllen = 1; sgllen < count && resid > 0; sgllen++) {
2714 2714 bde->addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2715 2715 bde->addrLow = PADDR_LO(bctl->bctl_dev_addr);
2716 2716 bde->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2717 2717 bde->tus.f.bdeFlags = bdeFlags;
2718 2718 bde++;
2719 2719
2720 2720 resid -= MIN(resid, sgl->seg_length);
2721 2721 sgl++;
2722 2722 }
2723 2723
2724 2724 iocb->unsli3.ext_iocb.ebde_count = sgllen - 1;
2725 2725 iocb->un.fcpt64.fcpt_Length =
2726 2726 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2727 2727 iocb->un.fcpt64.fcpt_Offset = 0;
2728 2728
2729 2729 iocb->ULPBDECOUNT = 0;
2730 2730 iocb->ULPLE = 0;
2731 2731
2732 2732 return (0);
2733 2733
2734 2734 } /* emlxs_sli3_fct_bde_setup */
2735 2735 #endif /* SFCT_SUPPORT */
2736 2736
2737 2737
2738 2738 static void
2739 2739 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2740 2740 {
2741 2741 #ifdef FMA_SUPPORT
2742 2742 emlxs_port_t *port = &PPORT;
2743 2743 #endif /* FMA_SUPPORT */
2744 2744 PGP *pgp;
2745 2745 emlxs_buf_t *sbp;
2746 2746 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2747 2747 RING *rp;
2748 2748 uint32_t nextIdx;
2749 2749 uint32_t status;
2750 2750 void *ioa2;
2751 2751 off_t offset;
2752 2752 uint32_t count = 0;
2753 2753 uint32_t flag;
2754 2754 uint32_t channelno;
2755 2755 int32_t throttle;
2756 2756
2757 2757 channelno = cp->channelno;
2758 2758 rp = (RING *)cp->iopath;
2759 2759
2760 2760 throttle = 0;
2761 2761
2762 2762 /* Check if FCP ring and adapter is not ready */
2763 2763 /* We may use any ring for FCP_CMD */
2764 2764 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2765 2765 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2766 2766 !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
2767 2767 emlxs_tx_put(iocbq, 1);
2768 2768 return;
2769 2769 }
2770 2770 }
2771 2771
2772 2772 /* Attempt to acquire CMD_RING lock */
2773 2773 if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2774 2774 /* Queue it for later */
2775 2775 if (iocbq) {
2776 2776 if ((hba->io_count -
2777 2777 hba->channel_tx_count) > 10) {
2778 2778 emlxs_tx_put(iocbq, 1);
2779 2779 return;
2780 2780 } else {
2781 2781
2782 2782 /*
2783 2783 * EMLXS_MSGF(EMLXS_CONTEXT,
2784 2784 * &emlxs_ring_watchdog_msg,
2785 2785 * "%s host=%d port=%d cnt=%d,%d RACE
2786 2786 * CONDITION3 DETECTED.",
2787 2787 * emlxs_ring_xlate(channelno),
2788 2788 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2789 2789 * hba->channel_tx_count,
2790 2790 * hba->io_count);
2791 2791 */
2792 2792 mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2793 2793 }
2794 2794 } else {
2795 2795 return;
2796 2796 }
2797 2797 }
2798 2798 /* CMD_RING_LOCK acquired */
2799 2799
2800 2800 /* Throttle check only applies to non special iocb */
2801 2801 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2802 2802 /* Check if HBA is full */
2803 2803 throttle = hba->io_throttle - hba->io_active;
2804 2804 if (throttle <= 0) {
2805 2805 /* Hitting adapter throttle limit */
2806 2806 /* Queue it for later */
2807 2807 if (iocbq) {
2808 2808 emlxs_tx_put(iocbq, 1);
2809 2809 }
2810 2810
2811 2811 goto busy;
2812 2812 }
2813 2813 }
2814 2814
2815 2815 /* Read adapter's get index */
2816 2816 pgp = (PGP *)
2817 2817 &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2818 2818 offset =
2819 2819 (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2820 2820 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2821 2821 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2822 2822 DDI_DMA_SYNC_FORKERNEL);
2823 2823 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2824 2824
2825 2825 /* Calculate the next put index */
2826 2826 nextIdx =
2827 2827 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2828 2828
2829 2829 /* Check if ring is full */
2830 2830 if (nextIdx == rp->fc_port_cmdidx) {
2831 2831 /* Try one more time */
2832 2832 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2833 2833 DDI_DMA_SYNC_FORKERNEL);
2834 2834 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2835 2835
2836 2836 if (nextIdx == rp->fc_port_cmdidx) {
2837 2837 /* Queue it for later */
2838 2838 if (iocbq) {
2839 2839 emlxs_tx_put(iocbq, 1);
2840 2840 }
2841 2841
2842 2842 goto busy;
2843 2843 }
2844 2844 }
2845 2845
2846 2846 /*
2847 2847 * We have a command ring slot available
2848 2848 * Make sure we have an iocb to send
2849 2849 */
2850 2850 if (iocbq) {
2851 2851 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2852 2852
2853 2853 /* Check if the ring already has iocb's waiting */
2854 2854 if (cp->nodeq.q_first != NULL) {
2855 2855 /* Put the current iocbq on the tx queue */
2856 2856 emlxs_tx_put(iocbq, 0);
2857 2857
2858 2858 /*
2859 2859 * Attempt to replace it with the next iocbq
2860 2860 * in the tx queue
2861 2861 */
2862 2862 iocbq = emlxs_tx_get(cp, 0);
2863 2863 }
2864 2864
2865 2865 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2866 2866 } else {
2867 2867 /* Try to get the next iocb on the tx queue */
2868 2868 iocbq = emlxs_tx_get(cp, 1);
2869 2869 }
2870 2870
2871 2871 sendit:
2872 2872 count = 0;
2873 2873
2874 2874 /* Process each iocbq */
2875 2875 while (iocbq) {
2876 2876
2877 2877 sbp = iocbq->sbp;
2878 2878 if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2879 2879 /*
2880 2880 * Update adapter if needed, since we are about to
2881 2881 * delay here
2882 2882 */
2883 2883 if (count) {
2884 2884 count = 0;
2885 2885
2886 2886 /* Update the adapter's cmd put index */
2887 2887 if (hba->bus_type == SBUS_FC) {
2888 2888 slim2p->mbx.us.s2.host[channelno].
2889 2889 cmdPutInx =
2890 2890 BE_SWAP32(rp->fc_cmdidx);
2891 2891
2892 2892 /* DMA sync the index for the adapter */
2893 2893 offset = (off_t)
2894 2894 ((uint64_t)
2895 2895 ((unsigned long)&(slim2p->mbx.us.
2896 2896 s2.host[channelno].cmdPutInx)) -
2897 2897 (uint64_t)((unsigned long)slim2p));
2898 2898 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2899 2899 dma_handle, offset, 4,
2900 2900 DDI_DMA_SYNC_FORDEV);
2901 2901 } else {
2902 2902 ioa2 = (void *)
2903 2903 ((char *)hba->sli.sli3.slim_addr +
2904 2904 hba->sli.sli3.hgp_ring_offset +
2905 2905 ((channelno * 2) *
2906 2906 sizeof (uint32_t)));
2907 2907 WRITE_SLIM_ADDR(hba,
2908 2908 (volatile uint32_t *)ioa2,
2909 2909 rp->fc_cmdidx);
2910 2910 }
2911 2911
2912 2912 status = (CA_R0ATT << (channelno * 4));
2913 2913 WRITE_CSR_REG(hba, FC_CA_REG(hba),
2914 2914 (volatile uint32_t)status);
2915 2915
2916 2916 }
2917 2917 /* Perform delay */
2918 2918 if ((channelno == FC_ELS_RING) &&
2919 2919 !(iocbq->flag & IOCB_FCP_CMD)) {
2920 2920 drv_usecwait(100000);
2921 2921 } else {
2922 2922 drv_usecwait(20000);
2923 2923 }
2924 2924 }
2925 2925
2926 2926 /*
2927 2927 * At this point, we have a command ring slot available
2928 2928 * and an iocb to send
2929 2929 */
2930 2930 flag = iocbq->flag;
2931 2931
2932 2932 /* Send the iocb */
2933 2933 emlxs_sli3_issue_iocb(hba, rp, iocbq);
2934 2934 /*
2935 2935 * After this, the sbp / iocb should not be
2936 2936 * accessed in the xmit path.
2937 2937 */
2938 2938
2939 2939 count++;
2940 2940 if (iocbq && (!(flag & IOCB_SPECIAL))) {
2941 2941 /* Check if HBA is full */
2942 2942 throttle = hba->io_throttle - hba->io_active;
2943 2943 if (throttle <= 0) {
2944 2944 goto busy;
2945 2945 }
2946 2946 }
2947 2947
2948 2948 /* Calculate the next put index */
2949 2949 nextIdx =
2950 2950 (rp->fc_cmdidx + 1 >=
2951 2951 rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2952 2952
2953 2953 /* Check if ring is full */
2954 2954 if (nextIdx == rp->fc_port_cmdidx) {
2955 2955 /* Try one more time */
2956 2956 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2957 2957 offset, 4, DDI_DMA_SYNC_FORKERNEL);
2958 2958 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2959 2959
2960 2960 if (nextIdx == rp->fc_port_cmdidx) {
2961 2961 goto busy;
2962 2962 }
2963 2963 }
2964 2964
2965 2965 /* Get the next iocb from the tx queue if there is one */
2966 2966 iocbq = emlxs_tx_get(cp, 1);
2967 2967 }
2968 2968
2969 2969 if (count) {
2970 2970 /* Update the adapter's cmd put index */
2971 2971 if (hba->bus_type == SBUS_FC) {
2972 2972 slim2p->mbx.us.s2.host[channelno].
2973 2973 cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
2974 2974
2975 2975 /* DMA sync the index for the adapter */
2976 2976 offset = (off_t)
2977 2977 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2978 2978 host[channelno].cmdPutInx)) -
2979 2979 (uint64_t)((unsigned long)slim2p));
2980 2980 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2981 2981 offset, 4, DDI_DMA_SYNC_FORDEV);
2982 2982 } else {
2983 2983 ioa2 =
2984 2984 (void *)((char *)hba->sli.sli3.slim_addr +
2985 2985 hba->sli.sli3.hgp_ring_offset +
2986 2986 ((channelno * 2) * sizeof (uint32_t)));
2987 2987 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2988 2988 rp->fc_cmdidx);
2989 2989 }
2990 2990
2991 2991 status = (CA_R0ATT << (channelno * 4));
2992 2992 WRITE_CSR_REG(hba, FC_CA_REG(hba),
2993 2993 (volatile uint32_t)status);
2994 2994
2995 2995 /* Check tx queue one more time before releasing */
2996 2996 if ((iocbq = emlxs_tx_get(cp, 1))) {
2997 2997 /*
2998 2998 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
2999 2999 * "%s host=%d port=%d RACE CONDITION1
3000 3000 * DETECTED.", emlxs_ring_xlate(channelno),
3001 3001 * rp->fc_cmdidx, rp->fc_port_cmdidx);
3002 3002 */
3003 3003 goto sendit;
3004 3004 }
3005 3005 }
3006 3006
3007 3007 #ifdef FMA_SUPPORT
3008 3008 /* Access handle validation */
3009 3009 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3010 3010 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3011 3011 #endif /* FMA_SUPPORT */
3012 3012
3013 3013 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3014 3014
3015 3015 return;
3016 3016
3017 3017 busy:
3018 3018
3019 3019 /*
3020 3020 * Set ring to SET R0CE_REQ in Chip Att register.
3021 3021 * Chip will tell us when an entry is freed.
3022 3022 */
3023 3023 if (count) {
3024 3024 /* Update the adapter's cmd put index */
3025 3025 if (hba->bus_type == SBUS_FC) {
3026 3026 slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3027 3027 BE_SWAP32(rp->fc_cmdidx);
3028 3028
3029 3029 /* DMA sync the index for the adapter */
3030 3030 offset = (off_t)
3031 3031 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3032 3032 host[channelno].cmdPutInx)) -
3033 3033 (uint64_t)((unsigned long)slim2p));
3034 3034 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3035 3035 offset, 4, DDI_DMA_SYNC_FORDEV);
3036 3036 } else {
3037 3037 ioa2 =
3038 3038 (void *)((char *)hba->sli.sli3.slim_addr +
3039 3039 hba->sli.sli3.hgp_ring_offset +
3040 3040 ((channelno * 2) * sizeof (uint32_t)));
3041 3041 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3042 3042 rp->fc_cmdidx);
3043 3043 }
3044 3044 }
3045 3045
3046 3046 status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3047 3047 WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3048 3048
3049 3049 if (throttle <= 0) {
3050 3050 HBASTATS.IocbThrottled++;
3051 3051 } else {
3052 3052 HBASTATS.IocbRingFull[channelno]++;
3053 3053 }
3054 3054
3055 3055 #ifdef FMA_SUPPORT
3056 3056 /* Access handle validation */
3057 3057 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3058 3058 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3059 3059 #endif /* FMA_SUPPORT */
3060 3060
3061 3061 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3062 3062
3063 3063 return;
3064 3064
3065 3065 } /* emlxs_sli3_issue_iocb_cmd() */
3066 3066
3067 3067
3068 3068 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3069 3069 /* MBX_WAIT - returns MBX_TIMEOUT or mailbox_status */
3070 3070 /* MBX_SLEEP - returns MBX_TIMEOUT or mailbox_status */
3071 3071 /* MBX_POLL - returns MBX_TIMEOUT or mailbox_status */
3072 3072
3073 3073 static uint32_t
3074 3074 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3075 3075 uint32_t tmo)
3076 3076 {
3077 3077 emlxs_port_t *port;
3078 3078 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3079 3079 MAILBOX *mbox;
3080 3080 MAILBOX *mb;
3081 3081 volatile uint32_t word0;
3082 3082 volatile uint32_t ldata;
3083 3083 uint32_t ha_copy;
3084 3084 off_t offset;
3085 3085 MATCHMAP *mbox_bp;
3086 3086 uint32_t tmo_local;
3087 3087 MAILBOX *swpmb;
3088 3088
3089 3089 if (!mbq->port) {
3090 3090 mbq->port = &PPORT;
3091 3091 }
3092 3092
3093 3093 port = (emlxs_port_t *)mbq->port;
3094 3094
3095 3095 mb = (MAILBOX *)mbq;
3096 3096 swpmb = (MAILBOX *)&word0;
3097 3097
3098 3098 mb->mbxStatus = MBX_SUCCESS;
3099 3099
3100 3100 /* Check for minimum timeouts */
3101 3101 switch (mb->mbxCommand) {
3102 3102 /* Mailbox commands that erase/write flash */
3103 3103 case MBX_DOWN_LOAD:
3104 3104 case MBX_UPDATE_CFG:
3105 3105 case MBX_LOAD_AREA:
3106 3106 case MBX_LOAD_EXP_ROM:
3107 3107 case MBX_WRITE_NV:
3108 3108 case MBX_FLASH_WR_ULA:
3109 3109 case MBX_DEL_LD_ENTRY:
3110 3110 case MBX_LOAD_SM:
3111 3111 if (tmo < 300) {
3112 3112 tmo = 300;
3113 3113 }
3114 3114 break;
3115 3115
3116 3116 default:
3117 3117 if (tmo < 30) {
3118 3118 tmo = 30;
3119 3119 }
3120 3120 break;
3121 3121 }
3122 3122
3123 3123 /* Convert tmo seconds to 10 millisecond tics */
3124 3124 tmo_local = tmo * 100;
3125 3125
3126 3126 /* Adjust wait flag */
3127 3127 if (flag != MBX_NOWAIT) {
3128 3128 /* If interrupt is enabled, use sleep, otherwise poll */
3129 3129 if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3130 3130 flag = MBX_SLEEP;
3131 3131 } else {
3132 3132 flag = MBX_POLL;
3133 3133 }
3134 3134 }
3135 3135
3136 3136 mutex_enter(&EMLXS_PORT_LOCK);
3137 3137
3138 3138 /* Check for hardware error */
3139 3139 if (hba->flag & FC_HARDWARE_ERROR) {
3140 3140 mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3141 3141 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3142 3142
3143 3143 mutex_exit(&EMLXS_PORT_LOCK);
3144 3144
3145 3145 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3146 3146 "Hardware error reported. %s failed. status=%x mb=%p",
3147 3147 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3148 3148
3149 3149 return (MBX_HARDWARE_ERROR);
3150 3150 }
3151 3151
3152 3152 if (hba->mbox_queue_flag) {
3153 3153 /* If we are not polling, then queue it for later */
3154 3154 if (flag == MBX_NOWAIT) {
3155 3155 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3156 3156 "Busy. %s: mb=%p NoWait.",
3157 3157 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3158 3158
3159 3159 emlxs_mb_put(hba, mbq);
3160 3160
3161 3161 HBASTATS.MboxBusy++;
3162 3162
3163 3163 mutex_exit(&EMLXS_PORT_LOCK);
3164 3164
3165 3165 return (MBX_BUSY);
3166 3166 }
3167 3167
3168 3168 while (hba->mbox_queue_flag) {
3169 3169 mutex_exit(&EMLXS_PORT_LOCK);
3170 3170
3171 3171 if (tmo_local-- == 0) {
3172 3172 EMLXS_MSGF(EMLXS_CONTEXT,
3173 3173 &emlxs_mbox_event_msg,
3174 3174 "Timeout. %s: mb=%p tmo=%d Waiting.",
3175 3175 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3176 3176 tmo);
3177 3177
3178 3178 /* Non-lethalStatus mailbox timeout */
3179 3179 /* Does not indicate a hardware error */
3180 3180 mb->mbxStatus = MBX_TIMEOUT;
3181 3181 return (MBX_TIMEOUT);
3182 3182 }
3183 3183
3184 3184 DELAYMS(10);
3185 3185 mutex_enter(&EMLXS_PORT_LOCK);
3186 3186 }
3187 3187 }
3188 3188
3189 3189 /* Initialize mailbox area */
3190 3190 emlxs_mb_init(hba, mbq, flag, tmo);
3191 3191
3192 3192 switch (flag) {
3193 3193 case MBX_NOWAIT:
3194 3194
3195 3195 if (mb->mbxCommand != MBX_HEARTBEAT) {
3196 3196 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3197 3197 mb->mbxCommand != MBX_DUMP_MEMORY) {
3198 3198 EMLXS_MSGF(EMLXS_CONTEXT,
3199 3199 &emlxs_mbox_detail_msg,
3200 3200 "Sending. %s: mb=%p NoWait.",
3201 3201 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3202 3202 }
3203 3203 }
3204 3204
3205 3205 break;
3206 3206
3207 3207 case MBX_SLEEP:
3208 3208 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3209 3209 mb->mbxCommand != MBX_DUMP_MEMORY) {
3210 3210 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3211 3211 "Sending. %s: mb=%p Sleep.",
3212 3212 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3213 3213 }
3214 3214
3215 3215 break;
3216 3216
3217 3217 case MBX_POLL:
3218 3218 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3219 3219 mb->mbxCommand != MBX_DUMP_MEMORY) {
3220 3220 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3221 3221 "Sending. %s: mb=%p Polled.",
3222 3222 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3223 3223 }
3224 3224 break;
3225 3225 }
3226 3226
3227 3227 mb->mbxOwner = OWN_CHIP;
3228 3228
3229 3229 /* Clear the attention bit */
3230 3230 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3231 3231
3232 3232 if (hba->flag & FC_SLIM2_MODE) {
3233 3233 /* First copy command data */
3234 3234 mbox = FC_SLIM2_MAILBOX(hba);
3235 3235 offset =
3236 3236 (off_t)((uint64_t)((unsigned long)mbox)
3237 3237 - (uint64_t)((unsigned long)slim2p));
3238 3238
3239 3239 #ifdef MBOX_EXT_SUPPORT
3240 3240 if (mbq->extbuf) {
3241 3241 uint32_t *mbox_ext =
3242 3242 (uint32_t *)((uint8_t *)mbox +
3243 3243 MBOX_EXTENSION_OFFSET);
3244 3244 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3245 3245
3246 3246 BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3247 3247 (uint8_t *)mbox_ext, mbq->extsize);
3248 3248
3249 3249 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3250 3250 offset_ext, mbq->extsize,
3251 3251 DDI_DMA_SYNC_FORDEV);
3252 3252 }
3253 3253 #endif /* MBOX_EXT_SUPPORT */
3254 3254
3255 3255 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3256 3256 MAILBOX_CMD_BSIZE);
3257 3257
3258 3258 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3259 3259 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3260 3260 }
3261 3261 /* Check for config port command */
3262 3262 else if (mb->mbxCommand == MBX_CONFIG_PORT) {
3263 3263 /* copy command data into host mbox for cmpl */
3264 3264 mbox = FC_SLIM2_MAILBOX(hba);
3265 3265 offset = (off_t)((uint64_t)((unsigned long)mbox)
3266 3266 - (uint64_t)((unsigned long)slim2p));
3267 3267
3268 3268 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3269 3269 MAILBOX_CMD_BSIZE);
3270 3270
3271 3271 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3272 3272 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3273 3273
3274 3274 /* First copy command data */
3275 3275 mbox = FC_SLIM1_MAILBOX(hba);
3276 3276 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3277 3277 (MAILBOX_CMD_WSIZE - 1));
3278 3278
3279 3279 /* copy over last word, with mbxOwner set */
3280 3280 ldata = *((volatile uint32_t *)mb);
3281 3281 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3282 3282
3283 3283 /* switch over to host mailbox */
3284 3284 hba->flag |= FC_SLIM2_MODE;
3285 3285 } else { /* SLIM 1 */
3286 3286
3287 3287 mbox = FC_SLIM1_MAILBOX(hba);
3288 3288
3289 3289 #ifdef MBOX_EXT_SUPPORT
3290 3290 if (mbq->extbuf) {
3291 3291 uint32_t *mbox_ext =
3292 3292 (uint32_t *)((uint8_t *)mbox +
3293 3293 MBOX_EXTENSION_OFFSET);
3294 3294 WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3295 3295 mbox_ext, (mbq->extsize / 4));
3296 3296 }
3297 3297 #endif /* MBOX_EXT_SUPPORT */
3298 3298
3299 3299 /* First copy command data */
3300 3300 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3301 3301 (MAILBOX_CMD_WSIZE - 1));
3302 3302
3303 3303 /* copy over last word, with mbxOwner set */
3304 3304 ldata = *((volatile uint32_t *)mb);
3305 3305 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3306 3306 }
3307 3307
3308 3308 /* Interrupt board to do it right away */
3309 3309 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3310 3310
3311 3311 mutex_exit(&EMLXS_PORT_LOCK);
3312 3312
3313 3313 #ifdef FMA_SUPPORT
3314 3314 /* Access handle validation */
3315 3315 if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3316 3316 != DDI_FM_OK) ||
3317 3317 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3318 3318 != DDI_FM_OK)) {
3319 3319 EMLXS_MSGF(EMLXS_CONTEXT,
3320 3320 &emlxs_invalid_access_handle_msg, NULL);
3321 3321 return (MBX_HARDWARE_ERROR);
3322 3322 }
3323 3323 #endif /* FMA_SUPPORT */
3324 3324
3325 3325 switch (flag) {
3326 3326 case MBX_NOWAIT:
3327 3327 return (MBX_SUCCESS);
3328 3328
3329 3329 case MBX_SLEEP:
3330 3330
3331 3331 /* Wait for completion */
3332 3332 /* The driver clock is timing the mailbox. */
3333 3333 /* emlxs_mb_fini() will be called externally. */
3334 3334
3335 3335 mutex_enter(&EMLXS_MBOX_LOCK);
3336 3336 while (!(mbq->flag & MBQ_COMPLETED)) {
3337 3337 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3338 3338 }
3339 3339 mutex_exit(&EMLXS_MBOX_LOCK);
3340 3340
3341 3341 if (mb->mbxStatus == MBX_TIMEOUT) {
3342 3342 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3343 3343 "Timeout. %s: mb=%p tmo=%d. Sleep.",
3344 3344 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3345 3345 } else {
3346 3346 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3347 3347 mb->mbxCommand != MBX_DUMP_MEMORY) {
3348 3348 EMLXS_MSGF(EMLXS_CONTEXT,
3349 3349 &emlxs_mbox_detail_msg,
3350 3350 "Completed. %s: mb=%p status=%x Sleep.",
3351 3351 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3352 3352 mb->mbxStatus);
3353 3353 }
3354 3354 }
3355 3355
3356 3356 break;
3357 3357
3358 3358 case MBX_POLL:
3359 3359
3360 3360 /* Convert tmo seconds to 500 usec tics */
3361 3361 tmo_local = tmo * 2000;
3362 3362
3363 3363 if (hba->state >= FC_INIT_START) {
3364 3364 ha_copy =
3365 3365 READ_CSR_REG(hba, FC_HA_REG(hba));
3366 3366
3367 3367 /* Wait for command to complete */
3368 3368 while (!(ha_copy & HA_MBATT) &&
3369 3369 !(mbq->flag & MBQ_COMPLETED)) {
3370 3370 if (!hba->timer_id && (tmo_local-- == 0)) {
3371 3371 /* self time */
3372 3372 EMLXS_MSGF(EMLXS_CONTEXT,
3373 3373 &emlxs_mbox_timeout_msg,
3374 3374 "%s: mb=%p Polled.",
3375 3375 emlxs_mb_cmd_xlate(mb->
3376 3376 mbxCommand), mb);
3377 3377
3378 3378 hba->flag |= FC_MBOX_TIMEOUT;
3379 3379 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3380 3380 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3381 3381
3382 3382 break;
3383 3383 }
3384 3384
3385 3385 DELAYUS(500);
3386 3386 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
3387 3387 }
3388 3388
3389 3389 if (mb->mbxStatus == MBX_TIMEOUT) {
3390 3390 EMLXS_MSGF(EMLXS_CONTEXT,
3391 3391 &emlxs_mbox_event_msg,
3392 3392 "Timeout. %s: mb=%p tmo=%d. Polled.",
3393 3393 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3394 3394 tmo);
3395 3395
3396 3396 break;
3397 3397 }
3398 3398 }
3399 3399
3400 3400 /* Get first word of mailbox */
3401 3401 if (hba->flag & FC_SLIM2_MODE) {
3402 3402 mbox = FC_SLIM2_MAILBOX(hba);
3403 3403 offset = (off_t)((uint64_t)((unsigned long)mbox) -
3404 3404 (uint64_t)((unsigned long)slim2p));
3405 3405
3406 3406 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3407 3407 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3408 3408 word0 = *((volatile uint32_t *)mbox);
3409 3409 word0 = BE_SWAP32(word0);
3410 3410 } else {
3411 3411 mbox = FC_SLIM1_MAILBOX(hba);
3412 3412 word0 =
3413 3413 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3414 3414 }
3415 3415
3416 3416 /* Wait for command to complete */
3417 3417 while ((swpmb->mbxOwner == OWN_CHIP) &&
3418 3418 !(mbq->flag & MBQ_COMPLETED)) {
3419 3419 if (!hba->timer_id && (tmo_local-- == 0)) {
3420 3420 /* self time */
3421 3421 EMLXS_MSGF(EMLXS_CONTEXT,
3422 3422 &emlxs_mbox_timeout_msg,
3423 3423 "%s: mb=%p Polled.",
3424 3424 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3425 3425
3426 3426 hba->flag |= FC_MBOX_TIMEOUT;
3427 3427 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3428 3428 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3429 3429
3430 3430 break;
3431 3431 }
3432 3432
3433 3433 DELAYUS(500);
3434 3434
3435 3435 /* Get first word of mailbox */
3436 3436 if (hba->flag & FC_SLIM2_MODE) {
3437 3437 EMLXS_MPDATA_SYNC(
3438 3438 hba->sli.sli3.slim2.dma_handle, offset,
3439 3439 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3440 3440 word0 = *((volatile uint32_t *)mbox);
3441 3441 word0 = BE_SWAP32(word0);
3442 3442 } else {
3443 3443 word0 =
3444 3444 READ_SLIM_ADDR(hba,
3445 3445 ((volatile uint32_t *)mbox));
3446 3446 }
3447 3447
3448 3448 } /* while */
3449 3449
3450 3450 if (mb->mbxStatus == MBX_TIMEOUT) {
3451 3451 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3452 3452 "Timeout. %s: mb=%p tmo=%d. Polled.",
3453 3453 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3454 3454
3455 3455 break;
3456 3456 }
3457 3457
3458 3458 /* copy results back to user */
3459 3459 if (hba->flag & FC_SLIM2_MODE) {
3460 3460 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3461 3461 offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3462 3462
3463 3463 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3464 3464 MAILBOX_CMD_BSIZE);
3465 3465 } else {
3466 3466 READ_SLIM_COPY(hba, (uint32_t *)mb,
3467 3467 (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3468 3468 }
3469 3469
3470 3470 #ifdef MBOX_EXT_SUPPORT
3471 3471 if (mbq->extbuf) {
3472 3472 uint32_t *mbox_ext =
3473 3473 (uint32_t *)((uint8_t *)mbox +
3474 3474 MBOX_EXTENSION_OFFSET);
3475 3475 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3476 3476
3477 3477 if (hba->flag & FC_SLIM2_MODE) {
3478 3478 EMLXS_MPDATA_SYNC(
3479 3479 hba->sli.sli3.slim2.dma_handle, offset_ext,
3480 3480 mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3481 3481
3482 3482 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3483 3483 (uint8_t *)mbq->extbuf, mbq->extsize);
3484 3484 } else {
3485 3485 READ_SLIM_COPY(hba,
3486 3486 (uint32_t *)mbq->extbuf, mbox_ext,
3487 3487 (mbq->extsize / 4));
3488 3488 }
3489 3489 }
3490 3490 #endif /* MBOX_EXT_SUPPORT */
3491 3491
3492 3492 /* Sync the memory buffer */
3493 3493 if (mbq->bp) {
3494 3494 mbox_bp = (MATCHMAP *)mbq->bp;
3495 3495 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3496 3496 mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3497 3497 }
3498 3498
3499 3499 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3500 3500 mb->mbxCommand != MBX_DUMP_MEMORY) {
3501 3501 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3502 3502 "Completed. %s: mb=%p status=%x Polled.",
3503 3503 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3504 3504 mb->mbxStatus);
3505 3505 }
3506 3506
3507 3507 /* Process the result */
3508 3508 if (!(mbq->flag & MBQ_PASSTHRU)) {
3509 3509 if (mbq->mbox_cmpl) {
3510 3510 (void) (mbq->mbox_cmpl)(hba, mbq);
3511 3511 }
3512 3512 }
3513 3513
3514 3514 /* Clear the attention bit */
3515 3515 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3516 3516
3517 3517 /* Clean up the mailbox area */
3518 3518 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3519 3519
3520 3520 break;
3521 3521
3522 3522 } /* switch (flag) */
3523 3523
3524 3524 return (mb->mbxStatus);
3525 3525
3526 3526 } /* emlxs_sli3_issue_mbox_cmd() */
3527 3527
3528 3528
3529 3529 #ifdef SFCT_SUPPORT
3530 3530 static uint32_t
3531 3531 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3532 3532 int channel)
3533 3533 {
3534 3534 emlxs_hba_t *hba = HBA;
3535 3535 emlxs_config_t *cfg = &CFG;
3536 3536 fct_cmd_t *fct_cmd;
3537 3537 stmf_data_buf_t *dbuf;
3538 3538 scsi_task_t *fct_task;
3539 3539 uint32_t did;
3540 3540 IOCBQ *iocbq;
3541 3541 IOCB *iocb;
3542 3542 uint32_t timeout;
3543 3543 uint32_t iotag;
3544 3544 emlxs_node_t *ndlp;
3545 3545 CHANNEL *cp;
3546 3546
3547 3547 dbuf = cmd_sbp->fct_buf;
3548 3548 fct_cmd = cmd_sbp->fct_cmd;
3549 3549 fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3550 3550 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3551 3551 did = fct_cmd->cmd_rportid;
3552 3552
3553 3553 cp = (CHANNEL *)cmd_sbp->channel;
3554 3554
3555 3555 channel = channel;
3556 3556 iocbq = &cmd_sbp->iocbq;
3557 3557 iocb = &iocbq->iocb;
3558 3558
3559 3559 if (cfg[CFG_TIMEOUT_ENABLE].current) {
3560 3560 timeout =
3561 3561 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3562 3562 } else {
3563 3563 timeout = 0x80000000;
3564 3564 }
3565 3565
3566 3566 #ifdef FCT_API_TRACE
3567 3567 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_api_msg,
3568 3568 "emlxs_fct_send_fcp_data %p: flgs=%x ioflags=%x dl=%d,%d,%d,%d,%d",
3569 3569 fct_cmd, dbuf->db_flags, ioflags, fct_task->task_cmd_xfer_length,
3570 3570 fct_task->task_nbytes_transferred, dbuf->db_data_size,
3571 3571 fct_task->task_expected_xfer_length, channel);
3572 3572 #endif /* FCT_API_TRACE */
3573 3573
3574 3574
3575 3575 /* Get the iotag by registering the packet */
3576 3576 iotag = emlxs_register_pkt(cp, cmd_sbp);
3577 3577
3578 3578 if (!iotag) {
3579 3579 /* No more command slots available, retry later */
3580 3580 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3581 3581 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3582 3582
3583 3583 return (IOERR_NO_RESOURCES);
3584 3584 }
3585 3585
3586 3586 cmd_sbp->ticks =
3587 3587 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3588 3588
3589 3589 /* Initalize iocbq */
3590 3590 iocbq->port = (void *)port;
3591 3591 iocbq->node = (void *)ndlp;
3592 3592
3593 3593
3594 3594 iocbq->channel = (void *)cmd_sbp->channel;
3595 3595
3596 3596 if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3597 3597 /* Unregister the packet */
3598 3598 (void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3599 3599
3600 3600 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3601 3601 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3602 3602
3603 3603 return (IOERR_INTERNAL_ERROR);
3604 3604 }
3605 3605 /* Point of no return */
3606 3606
3607 3607 /* Initalize iocb */
3608 3608 iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3609 3609 iocb->ULPIOTAG = (uint16_t)iotag;
3610 3610 iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3611 3611 iocb->ULPOWNER = OWN_CHIP;
3612 3612 iocb->ULPCLASS = cmd_sbp->class;
3613 3613
3614 3614 iocb->ULPPU = 1; /* Wd4 is relative offset */
3615 3615 iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3616 3616
3617 3617 if (fct_task->task_flags & TF_WRITE_DATA) {
3618 3618 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3619 3619 } else { /* TF_READ_DATA */
3620 3620
3621 3621 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3622 3622
3623 3623 if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) &&
3624 3624 (dbuf->db_data_size ==
3625 3625 fct_task->task_expected_xfer_length)) {
3626 3626 iocb->ULPCT = 0x1;
3627 3627 /* enable auto-rsp AP feature */
3628 3628 }
3629 3629 }
3630 3630
3631 3631 return (IOERR_SUCCESS);
3632 3632
3633 3633 } /* emlxs_sli3_prep_fct_iocb() */
3634 3634 #endif /* SFCT_SUPPORT */
3635 3635
3636 3636 /* ARGSUSED */
3637 3637 static uint32_t
3638 3638 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3639 3639 {
3640 3640 emlxs_hba_t *hba = HBA;
3641 3641 fc_packet_t *pkt;
3642 3642 CHANNEL *cp;
3643 3643 IOCBQ *iocbq;
3644 3644 IOCB *iocb;
3645 3645 NODELIST *ndlp;
3646 3646 uint16_t iotag;
3647 3647 uint32_t did;
3648 3648
3649 3649 pkt = PRIV2PKT(sbp);
3650 3650 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3651 3651 cp = &hba->chan[FC_FCP_RING];
3652 3652
3653 3653 iocbq = &sbp->iocbq;
3654 3654 iocb = &iocbq->iocb;
3655 3655
3656 3656 /* Find target node object */
3657 3657 ndlp = (NODELIST *)iocbq->node;
3658 3658
3659 3659 /* Get the iotag by registering the packet */
3660 3660 iotag = emlxs_register_pkt(cp, sbp);
3661 3661
3662 3662 if (!iotag) {
3663 3663 /*
3664 3664 * No more command slots available, retry later
3665 3665 */
3666 3666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3667 3667 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3668 3668
3669 3669 return (FC_TRAN_BUSY);
3670 3670 }
3671 3671
3672 3672 /* Initalize iocbq */
3673 3673 iocbq->port = (void *) port;
3674 3674 iocbq->channel = (void *) cp;
3675 3675
3676 3676 /* Indicate this is a FCP cmd */
3677 3677 iocbq->flag |= IOCB_FCP_CMD;
3678 3678
3679 3679 if (emlxs_bde_setup(port, sbp)) {
3680 3680 /* Unregister the packet */
3681 3681 (void) emlxs_unregister_pkt(cp, iotag, 0);
3682 3682
3683 3683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3684 3684 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3685 3685
3686 3686 return (FC_TRAN_BUSY);
3687 3687 }
3688 3688 /* Point of no return */
3689 3689
3690 3690 /* Initalize iocb */
3691 3691 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3692 3692 iocb->ULPIOTAG = iotag;
3693 3693 iocb->ULPRSVDBYTE =
3694 3694 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3695 3695 iocb->ULPOWNER = OWN_CHIP;
3696 3696
3697 3697 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3698 3698 case FC_TRAN_CLASS1:
3699 3699 iocb->ULPCLASS = CLASS1;
3700 3700 break;
3701 3701 case FC_TRAN_CLASS2:
3702 3702 iocb->ULPCLASS = CLASS2;
3703 3703 /* iocb->ULPCLASS = CLASS3; */
3704 3704 break;
3705 3705 case FC_TRAN_CLASS3:
3706 3706 default:
3707 3707 iocb->ULPCLASS = CLASS3;
3708 3708 break;
3709 3709 }
3710 3710
3711 3711 /* if device is FCP-2 device, set the following bit */
3712 3712 /* that says to run the FC-TAPE protocol. */
3713 3713 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3714 3714 iocb->ULPFCP2RCVY = 1;
3715 3715 }
3716 3716
3717 3717 if (pkt->pkt_datalen == 0) {
3718 3718 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3719 3719 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3720 3720 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3721 3721 iocb->ULPPU = PARM_READ_CHECK;
3722 3722 iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3723 3723 } else {
3724 3724 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3725 3725 }
3726 3726
3727 3727 return (FC_SUCCESS);
3728 3728
3729 3729 } /* emlxs_sli3_prep_fcp_iocb() */
3730 3730
3731 3731
3732 3732 static uint32_t
3733 3733 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3734 3734 {
3735 3735 emlxs_hba_t *hba = HBA;
3736 3736 fc_packet_t *pkt;
3737 3737 IOCBQ *iocbq;
3738 3738 IOCB *iocb;
3739 3739 CHANNEL *cp;
3740 3740 NODELIST *ndlp;
3741 3741 uint16_t iotag;
3742 3742 uint32_t did;
3743 3743
3744 3744 pkt = PRIV2PKT(sbp);
3745 3745 cp = &hba->chan[FC_IP_RING];
3746 3746 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3747 3747
3748 3748 iocbq = &sbp->iocbq;
3749 3749 iocb = &iocbq->iocb;
3750 3750 ndlp = (NODELIST *)iocbq->node;
3751 3751
3752 3752 /* Get the iotag by registering the packet */
3753 3753 iotag = emlxs_register_pkt(cp, sbp);
3754 3754
3755 3755 if (!iotag) {
3756 3756 /*
3757 3757 * No more command slots available, retry later
3758 3758 */
3759 3759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3760 3760 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3761 3761
3762 3762 return (FC_TRAN_BUSY);
3763 3763 }
3764 3764
3765 3765 /* Initalize iocbq */
3766 3766 iocbq->port = (void *) port;
3767 3767 iocbq->channel = (void *) cp;
3768 3768
3769 3769 if (emlxs_bde_setup(port, sbp)) {
3770 3770 /* Unregister the packet */
3771 3771 (void) emlxs_unregister_pkt(cp, iotag, 0);
3772 3772
3773 3773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3774 3774 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3775 3775
3776 3776 return (FC_TRAN_BUSY);
3777 3777 }
3778 3778 /* Point of no return */
3779 3779
3780 3780 /* Initalize iocb */
3781 3781 iocb->un.xseq64.w5.hcsw.Fctl = 0;
3782 3782
3783 3783 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3784 3784 iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3785 3785 }
3786 3786 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3787 3787 iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3788 3788 }
3789 3789
3790 3790 /* network headers */
3791 3791 iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3792 3792 iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3793 3793 iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3794 3794
3795 3795 iocb->ULPIOTAG = iotag;
3796 3796 iocb->ULPRSVDBYTE =
3797 3797 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3798 3798 iocb->ULPOWNER = OWN_CHIP;
3799 3799
3800 3800 if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3801 3801 HBASTATS.IpBcastIssued++;
3802 3802
3803 3803 iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3804 3804 iocb->ULPCONTEXT = 0;
3805 3805
3806 3806 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3807 3807 if (hba->topology != TOPOLOGY_LOOP) {
3808 3808 iocb->ULPCT = 0x1;
3809 3809 }
3810 3810 iocb->ULPCONTEXT = port->vpi;
3811 3811 }
3812 3812 } else {
3813 3813 HBASTATS.IpSeqIssued++;
3814 3814
3815 3815 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3816 3816 iocb->ULPCONTEXT = ndlp->nlp_Xri;
3817 3817 }
3818 3818
3819 3819 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3820 3820 case FC_TRAN_CLASS1:
3821 3821 iocb->ULPCLASS = CLASS1;
3822 3822 break;
3823 3823 case FC_TRAN_CLASS2:
3824 3824 iocb->ULPCLASS = CLASS2;
3825 3825 break;
3826 3826 case FC_TRAN_CLASS3:
3827 3827 default:
3828 3828 iocb->ULPCLASS = CLASS3;
3829 3829 break;
3830 3830 }
3831 3831
3832 3832 return (FC_SUCCESS);
3833 3833
3834 3834 } /* emlxs_sli3_prep_ip_iocb() */
3835 3835
3836 3836
3837 3837 static uint32_t
3838 3838 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3839 3839 {
3840 3840 emlxs_hba_t *hba = HBA;
3841 3841 fc_packet_t *pkt;
3842 3842 IOCBQ *iocbq;
3843 3843 IOCB *iocb;
3844 3844 CHANNEL *cp;
3845 3845 uint16_t iotag;
3846 3846 uint32_t did;
3847 3847 uint32_t cmd;
3848 3848
3849 3849 pkt = PRIV2PKT(sbp);
3850 3850 cp = &hba->chan[FC_ELS_RING];
3851 3851 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3852 3852
3853 3853 iocbq = &sbp->iocbq;
3854 3854 iocb = &iocbq->iocb;
3855 3855
3856 3856
3857 3857 /* Get the iotag by registering the packet */
3858 3858 iotag = emlxs_register_pkt(cp, sbp);
3859 3859
3860 3860 if (!iotag) {
3861 3861 /*
3862 3862 * No more command slots available, retry later
3863 3863 */
3864 3864 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3865 3865 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3866 3866
3867 3867 return (FC_TRAN_BUSY);
3868 3868 }
3869 3869 /* Initalize iocbq */
3870 3870 iocbq->port = (void *) port;
3871 3871 iocbq->channel = (void *) cp;
3872 3872
3873 3873 if (emlxs_bde_setup(port, sbp)) {
3874 3874 /* Unregister the packet */
3875 3875 (void) emlxs_unregister_pkt(cp, iotag, 0);
3876 3876
3877 3877 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3878 3878 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3879 3879
3880 3880 return (FC_TRAN_BUSY);
3881 3881 }
3882 3882 /* Point of no return */
3883 3883
3884 3884 /* Initalize iocb */
3885 3885 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3886 3886 /* ELS Response */
3887 3887 iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
3888 3888 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
3889 3889 } else {
3890 3890 /* ELS Request */
3891 3891 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3892 3892 iocb->ULPCONTEXT =
3893 3893 (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
3894 3894 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
3895 3895
3896 3896 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3897 3897 if (hba->topology != TOPOLOGY_LOOP) {
3898 3898 cmd = *((uint32_t *)pkt->pkt_cmd);
3899 3899 cmd &= ELS_CMD_MASK;
3900 3900
3901 3901 if ((cmd == ELS_CMD_FLOGI) ||
3902 3902 (cmd == ELS_CMD_FDISC)) {
3903 3903 iocb->ULPCT = 0x2;
3904 3904 } else {
3905 3905 iocb->ULPCT = 0x1;
3906 3906 }
3907 3907 }
3908 3908 iocb->ULPCONTEXT = port->vpi;
3909 3909 }
3910 3910 }
3911 3911 iocb->ULPIOTAG = iotag;
3912 3912 iocb->ULPRSVDBYTE =
3913 3913 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3914 3914 iocb->ULPOWNER = OWN_CHIP;
3915 3915
3916 3916 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3917 3917 case FC_TRAN_CLASS1:
3918 3918 iocb->ULPCLASS = CLASS1;
3919 3919 break;
3920 3920 case FC_TRAN_CLASS2:
3921 3921 iocb->ULPCLASS = CLASS2;
3922 3922 break;
3923 3923 case FC_TRAN_CLASS3:
3924 3924 default:
3925 3925 iocb->ULPCLASS = CLASS3;
3926 3926 break;
3927 3927 }
3928 3928 sbp->class = iocb->ULPCLASS;
3929 3929
3930 3930 return (FC_SUCCESS);
3931 3931
3932 3932 } /* emlxs_sli3_prep_els_iocb() */
3933 3933
3934 3934
3935 3935 static uint32_t
3936 3936 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3937 3937 {
3938 3938 emlxs_hba_t *hba = HBA;
3939 3939 fc_packet_t *pkt;
3940 3940 IOCBQ *iocbq;
3941 3941 IOCB *iocb;
3942 3942 CHANNEL *cp;
3943 3943 NODELIST *ndlp;
3944 3944 uint16_t iotag;
3945 3945 uint32_t did;
3946 3946
3947 3947 pkt = PRIV2PKT(sbp);
3948 3948 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3949 3949 cp = &hba->chan[FC_CT_RING];
3950 3950
3951 3951 iocbq = &sbp->iocbq;
3952 3952 iocb = &iocbq->iocb;
3953 3953 ndlp = (NODELIST *)iocbq->node;
3954 3954
3955 3955 /* Get the iotag by registering the packet */
3956 3956 iotag = emlxs_register_pkt(cp, sbp);
3957 3957
3958 3958 if (!iotag) {
3959 3959 /*
3960 3960 * No more command slots available, retry later
3961 3961 */
3962 3962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3963 3963 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3964 3964
3965 3965 return (FC_TRAN_BUSY);
3966 3966 }
3967 3967
3968 3968 if (emlxs_bde_setup(port, sbp)) {
3969 3969 /* Unregister the packet */
3970 3970 (void) emlxs_unregister_pkt(cp, iotag, 0);
3971 3971
3972 3972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3973 3973 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3974 3974
3975 3975 return (FC_TRAN_BUSY);
3976 3976 }
3977 3977
3978 3978 /* Point of no return */
3979 3979
3980 3980 /* Initalize iocbq */
3981 3981 iocbq->port = (void *) port;
3982 3982 iocbq->channel = (void *) cp;
3983 3983
3984 3984 /* Fill in rest of iocb */
3985 3985 iocb->un.genreq64.w5.hcsw.Fctl = LA;
3986 3986
3987 3987 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3988 3988 iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
3989 3989 }
3990 3990 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3991 3991 iocb->un.genreq64.w5.hcsw.Fctl |= SI;
3992 3992 }
3993 3993
3994 3994 /* Initalize iocb */
3995 3995 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3996 3996 /* CT Response */
3997 3997 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3998 3998 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3999 3999 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
4000 4000 } else {
4001 4001 /* CT Request */
4002 4002 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4003 4003 iocb->un.genreq64.w5.hcsw.Dfctl = 0;
4004 4004 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
4005 4005 }
4006 4006
4007 4007 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4008 4008 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4009 4009
4010 4010 iocb->ULPIOTAG = iotag;
4011 4011 iocb->ULPRSVDBYTE =
4012 4012 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4013 4013 iocb->ULPOWNER = OWN_CHIP;
4014 4014
4015 4015 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4016 4016 case FC_TRAN_CLASS1:
4017 4017 iocb->ULPCLASS = CLASS1;
4018 4018 break;
4019 4019 case FC_TRAN_CLASS2:
4020 4020 iocb->ULPCLASS = CLASS2;
4021 4021 break;
4022 4022 case FC_TRAN_CLASS3:
4023 4023 default:
4024 4024 iocb->ULPCLASS = CLASS3;
4025 4025 break;
4026 4026 }
4027 4027
4028 4028 return (FC_SUCCESS);
4029 4029
4030 4030 } /* emlxs_sli3_prep_ct_iocb() */
4031 4031
4032 4032
4033 4033 #ifdef SFCT_SUPPORT
4034 4034 static uint32_t
4035 4035 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4036 4036 {
4037 4037 emlxs_hba_t *hba = HBA;
4038 4038 uint32_t sgllen = 1;
4039 4039 uint32_t rval;
4040 4040 uint32_t size;
4041 4041 uint32_t count;
4042 4042 uint32_t resid;
4043 4043 struct stmf_sglist_ent *sgl;
4044 4044
4045 4045 size = sbp->fct_buf->db_data_size;
4046 4046 count = sbp->fct_buf->db_sglist_length;
4047 4047 sgl = sbp->fct_buf->db_sglist;
4048 4048 resid = size;
4049 4049
4050 4050 for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
4051 4051 resid -= MIN(resid, sgl->seg_length);
4052 4052 sgl++;
4053 4053 }
4054 4054
4055 4055 if (resid > 0) {
4056 4056 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4057 4057 "emlxs_fct_bde_setup: Not enough scatter gather buffers "
4058 4058 " size=%d resid=%d count=%d",
4059 4059 size, resid, count);
4060 4060 return (1);
4061 4061 }
4062 4062
4063 4063 if ((hba->sli_mode < EMLXS_HBA_SLI3_MODE) ||
4064 4064 (sgllen > SLI3_MAX_BDE)) {
4065 4065 rval = emlxs_sli2_fct_bde_setup(port, sbp);
4066 4066 } else {
4067 4067 rval = emlxs_sli3_fct_bde_setup(port, sbp);
4068 4068 }
4069 4069
4070 4070 return (rval);
4071 4071
4072 4072 } /* emlxs_fct_bde_setup() */
4073 4073 #endif /* SFCT_SUPPORT */
4074 4074
4075 4075 static uint32_t
4076 4076 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4077 4077 {
4078 4078 uint32_t rval;
4079 4079 emlxs_hba_t *hba = HBA;
4080 4080
4081 4081 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4082 4082 rval = emlxs_sli2_bde_setup(port, sbp);
4083 4083 } else {
4084 4084 rval = emlxs_sli3_bde_setup(port, sbp);
4085 4085 }
4086 4086
4087 4087 return (rval);
4088 4088
4089 4089 } /* emlxs_bde_setup() */
4090 4090
4091 4091
4092 4092 static void
4093 4093 emlxs_sli3_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
4094 4094 {
4095 4095 uint32_t ha_copy;
4096 4096
4097 4097 /*
4098 4098 * Polling a specific attention bit.
4099 4099 */
4100 4100 for (;;) {
4101 4101 ha_copy = emlxs_check_attention(hba);
4102 4102
4103 4103 if (ha_copy & att_bit) {
4104 4104 break;
4105 4105 }
4106 4106
4107 4107 }
4108 4108
4109 4109 mutex_enter(&EMLXS_PORT_LOCK);
4110 4110 ha_copy = emlxs_get_attention(hba, -1);
4111 4111 mutex_exit(&EMLXS_PORT_LOCK);
4112 4112
4113 4113 /* Process the attentions */
4114 4114 emlxs_proc_attention(hba, ha_copy);
4115 4115
4116 4116 return;
4117 4117
4118 4118 } /* emlxs_sli3_poll_intr() */
4119 4119
4120 4120 #ifdef MSI_SUPPORT
4121 4121 static uint32_t
4122 4122 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4123 4123 {
4124 4124 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4125 4125 #ifdef FMA_SUPPORT
4126 4126 emlxs_port_t *port = &PPORT;
4127 4127 #endif /* FMA_SUPPORT */
4128 4128 uint16_t msgid;
4129 4129 uint32_t hc_copy;
4130 4130 uint32_t ha_copy;
4131 4131 uint32_t restore = 0;
4132 4132
4133 4133 /*
4134 4134 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4135 4135 * "emlxs_sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4136 4136 */
4137 4137
4138 4138 /* Check for legacy interrupt handling */
4139 4139 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4140 4140 mutex_enter(&EMLXS_PORT_LOCK);
4141 4141
4142 4142 if (hba->flag & FC_OFFLINE_MODE) {
4143 4143 mutex_exit(&EMLXS_PORT_LOCK);
4144 4144
4145 4145 if (hba->bus_type == SBUS_FC) {
4146 4146 return (DDI_INTR_CLAIMED);
4147 4147 } else {
4148 4148 return (DDI_INTR_UNCLAIMED);
4149 4149 }
4150 4150 }
4151 4151
4152 4152 /* Get host attention bits */
4153 4153 ha_copy = emlxs_get_attention(hba, -1);
4154 4154
4155 4155 if (ha_copy == 0) {
4156 4156 if (hba->intr_unclaimed) {
4157 4157 mutex_exit(&EMLXS_PORT_LOCK);
4158 4158 return (DDI_INTR_UNCLAIMED);
4159 4159 }
4160 4160
4161 4161 hba->intr_unclaimed = 1;
4162 4162 } else {
4163 4163 hba->intr_unclaimed = 0;
4164 4164 }
4165 4165
4166 4166 mutex_exit(&EMLXS_PORT_LOCK);
4167 4167
4168 4168 /* Process the interrupt */
4169 4169 emlxs_proc_attention(hba, ha_copy);
4170 4170
4171 4171 return (DDI_INTR_CLAIMED);
4172 4172 }
4173 4173
4174 4174 /* DDI_INTR_TYPE_MSI */
4175 4175 /* DDI_INTR_TYPE_MSIX */
4176 4176
4177 4177 /* Get MSI message id */
4178 4178 msgid = (uint16_t)((unsigned long)arg2);
4179 4179
4180 4180 /* Validate the message id */
4181 4181 if (msgid >= hba->intr_count) {
4182 4182 msgid = 0;
4183 4183 }
4184 4184
4185 4185 mutex_enter(&EMLXS_INTR_LOCK(msgid));
4186 4186
4187 4187 mutex_enter(&EMLXS_PORT_LOCK);
4188 4188
4189 4189 /* Check if adapter is offline */
4190 4190 if (hba->flag & FC_OFFLINE_MODE) {
4191 4191 mutex_exit(&EMLXS_PORT_LOCK);
4192 4192 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4193 4193
4194 4194 /* Always claim an MSI interrupt */
4195 4195 return (DDI_INTR_CLAIMED);
4196 4196 }
4197 4197
4198 4198 /* Disable interrupts associated with this msgid */
4199 4199 if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4200 4200 hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4201 4201 WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4202 4202 restore = 1;
4203 4203 }
4204 4204
4205 4205 /* Get host attention bits */
4206 4206 ha_copy = emlxs_get_attention(hba, msgid);
4207 4207
4208 4208 mutex_exit(&EMLXS_PORT_LOCK);
4209 4209
4210 4210 /* Process the interrupt */
4211 4211 emlxs_proc_attention(hba, ha_copy);
4212 4212
4213 4213 /* Restore interrupts */
4214 4214 if (restore) {
4215 4215 mutex_enter(&EMLXS_PORT_LOCK);
4216 4216 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4217 4217 #ifdef FMA_SUPPORT
4218 4218 /* Access handle validation */
4219 4219 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4220 4220 #endif /* FMA_SUPPORT */
4221 4221 mutex_exit(&EMLXS_PORT_LOCK);
4222 4222 }
4223 4223
4224 4224 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4225 4225
4226 4226 return (DDI_INTR_CLAIMED);
4227 4227
4228 4228 } /* emlxs_sli3_msi_intr() */
4229 4229 #endif /* MSI_SUPPORT */
4230 4230
4231 4231
4232 4232 static int
4233 4233 emlxs_sli3_intx_intr(char *arg)
4234 4234 {
4235 4235 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4236 4236 uint32_t ha_copy = 0;
4237 4237
4238 4238 mutex_enter(&EMLXS_PORT_LOCK);
4239 4239
4240 4240 if (hba->flag & FC_OFFLINE_MODE) {
4241 4241 mutex_exit(&EMLXS_PORT_LOCK);
4242 4242
4243 4243 if (hba->bus_type == SBUS_FC) {
4244 4244 return (DDI_INTR_CLAIMED);
4245 4245 } else {
4246 4246 return (DDI_INTR_UNCLAIMED);
4247 4247 }
4248 4248 }
4249 4249
4250 4250 /* Get host attention bits */
4251 4251 ha_copy = emlxs_get_attention(hba, -1);
4252 4252
4253 4253 if (ha_copy == 0) {
4254 4254 if (hba->intr_unclaimed) {
4255 4255 mutex_exit(&EMLXS_PORT_LOCK);
4256 4256 return (DDI_INTR_UNCLAIMED);
4257 4257 }
4258 4258
4259 4259 hba->intr_unclaimed = 1;
4260 4260 } else {
4261 4261 hba->intr_unclaimed = 0;
4262 4262 }
4263 4263
4264 4264 mutex_exit(&EMLXS_PORT_LOCK);
4265 4265
4266 4266 /* Process the interrupt */
4267 4267 emlxs_proc_attention(hba, ha_copy);
4268 4268
4269 4269 return (DDI_INTR_CLAIMED);
4270 4270
4271 4271 } /* emlxs_sli3_intx_intr() */
4272 4272
4273 4273
4274 4274 /* EMLXS_PORT_LOCK must be held when call this routine */
4275 4275 static uint32_t
4276 4276 emlxs_get_attention(emlxs_hba_t *hba, int32_t msgid)
4277 4277 {
4278 4278 #ifdef FMA_SUPPORT
4279 4279 emlxs_port_t *port = &PPORT;
4280 4280 #endif /* FMA_SUPPORT */
4281 4281 uint32_t ha_copy = 0;
4282 4282 uint32_t ha_copy2;
4283 4283 uint32_t mask = hba->sli.sli3.hc_copy;
4284 4284
4285 4285 #ifdef MSI_SUPPORT
4286 4286
4287 4287 read_ha_register:
4288 4288
4289 4289 /* Check for default MSI interrupt */
4290 4290 if (msgid == 0) {
4291 4291 /* Read host attention register to determine interrupt source */
4292 4292 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4293 4293
4294 4294 /* Filter out MSI non-default attention bits */
4295 4295 ha_copy2 &= ~(hba->intr_cond);
4296 4296 }
4297 4297
4298 4298 /* Check for polled or fixed type interrupt */
4299 4299 else if (msgid == -1) {
4300 4300 /* Read host attention register to determine interrupt source */
4301 4301 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4302 4302 }
4303 4303
4304 4304 /* Otherwise, assume a mapped MSI interrupt */
4305 4305 else {
4306 4306 /* Convert MSI msgid to mapped attention bits */
4307 4307 ha_copy2 = hba->intr_map[msgid];
4308 4308 }
4309 4309
4310 4310 #else /* !MSI_SUPPORT */
4311 4311
4312 4312 /* Read host attention register to determine interrupt source */
4313 4313 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4314 4314
4315 4315 #endif /* MSI_SUPPORT */
4316 4316
4317 4317 /* Check if Hardware error interrupt is enabled */
4318 4318 if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4319 4319 ha_copy2 &= ~HA_ERATT;
4320 4320 }
4321 4321
4322 4322 /* Check if link interrupt is enabled */
4323 4323 if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4324 4324 ha_copy2 &= ~HA_LATT;
4325 4325 }
4326 4326
4327 4327 /* Check if Mailbox interrupt is enabled */
4328 4328 if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4329 4329 ha_copy2 &= ~HA_MBATT;
4330 4330 }
4331 4331
4332 4332 /* Check if ring0 interrupt is enabled */
4333 4333 if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4334 4334 ha_copy2 &= ~HA_R0ATT;
4335 4335 }
4336 4336
4337 4337 /* Check if ring1 interrupt is enabled */
4338 4338 if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4339 4339 ha_copy2 &= ~HA_R1ATT;
4340 4340 }
4341 4341
4342 4342 /* Check if ring2 interrupt is enabled */
4343 4343 if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4344 4344 ha_copy2 &= ~HA_R2ATT;
4345 4345 }
4346 4346
4347 4347 /* Check if ring3 interrupt is enabled */
4348 4348 if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4349 4349 ha_copy2 &= ~HA_R3ATT;
4350 4350 }
4351 4351
4352 4352 /* Accumulate attention bits */
4353 4353 ha_copy |= ha_copy2;
4354 4354
4355 4355 /* Clear attentions except for error, link, and autoclear(MSIX) */
4356 4356 ha_copy2 &= ~(HA_ERATT | HA_LATT); /* | hba->intr_autoClear */
4357 4357
4358 4358 if (ha_copy2) {
4359 4359 WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4360 4360 }
4361 4361
4362 4362 #ifdef FMA_SUPPORT
4363 4363 /* Access handle validation */
4364 4364 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4365 4365 #endif /* FMA_SUPPORT */
4366 4366
4367 4367 return (ha_copy);
4368 4368
4369 4369 } /* emlxs_get_attention() */
4370 4370
4371 4371
4372 4372 static void
4373 4373 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4374 4374 {
4375 4375 #ifdef FMA_SUPPORT
4376 4376 emlxs_port_t *port = &PPORT;
4377 4377 #endif /* FMA_SUPPORT */
4378 4378
4379 4379 /* ha_copy should be pre-filtered */
4380 4380
4381 4381 /*
4382 4382 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4383 4383 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
4384 4384 */
4385 4385
4386 4386 if (hba->state < FC_WARM_START) {
4387 4387 return;
4388 4388 }
4389 4389
4390 4390 if (!ha_copy) {
4391 4391 return;
4392 4392 }
4393 4393
4394 4394 if (hba->bus_type == SBUS_FC) {
4395 4395 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4396 4396 }
4397 4397
4398 4398 /* Adapter error */
4399 4399 if (ha_copy & HA_ERATT) {
4400 4400 HBASTATS.IntrEvent[6]++;
4401 4401 emlxs_handle_ff_error(hba);
4402 4402 return;
4403 4403 }
4404 4404
4405 4405 /* Mailbox interrupt */
4406 4406 if (ha_copy & HA_MBATT) {
4407 4407 HBASTATS.IntrEvent[5]++;
4408 4408 (void) emlxs_handle_mb_event(hba);
4409 4409 }
4410 4410
4411 4411 /* Link Attention interrupt */
4412 4412 if (ha_copy & HA_LATT) {
4413 4413 HBASTATS.IntrEvent[4]++;
4414 4414 emlxs_sli3_handle_link_event(hba);
4415 4415 }
4416 4416
4417 4417 /* event on ring 0 - FCP Ring */
4418 4418 if (ha_copy & HA_R0ATT) {
4419 4419 HBASTATS.IntrEvent[0]++;
4420 4420 emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4421 4421 }
4422 4422
4423 4423 /* event on ring 1 - IP Ring */
4424 4424 if (ha_copy & HA_R1ATT) {
4425 4425 HBASTATS.IntrEvent[1]++;
4426 4426 emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4427 4427 }
4428 4428
4429 4429 /* event on ring 2 - ELS Ring */
4430 4430 if (ha_copy & HA_R2ATT) {
4431 4431 HBASTATS.IntrEvent[2]++;
4432 4432 emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4433 4433 }
4434 4434
4435 4435 /* event on ring 3 - CT Ring */
4436 4436 if (ha_copy & HA_R3ATT) {
4437 4437 HBASTATS.IntrEvent[3]++;
4438 4438 emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4439 4439 }
4440 4440
4441 4441 if (hba->bus_type == SBUS_FC) {
4442 4442 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4443 4443 }
4444 4444
4445 4445 /* Set heartbeat flag to show activity */
4446 4446 hba->heartbeat_flag = 1;
4447 4447
4448 4448 #ifdef FMA_SUPPORT
4449 4449 if (hba->bus_type == SBUS_FC) {
4450 4450 /* Access handle validation */
4451 4451 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4452 4452 }
4453 4453 #endif /* FMA_SUPPORT */
4454 4454
4455 4455 return;
4456 4456
4457 4457 } /* emlxs_proc_attention() */
4458 4458
4459 4459
4460 4460 /*
4461 4461 * emlxs_handle_ff_error()
4462 4462 *
4463 4463 * Description: Processes a FireFly error
4464 4464 * Runs at Interrupt level
4465 4465 */
4466 4466 static void
4467 4467 emlxs_handle_ff_error(emlxs_hba_t *hba)
4468 4468 {
4469 4469 emlxs_port_t *port = &PPORT;
4470 4470 uint32_t status;
4471 4471 uint32_t status1;
4472 4472 uint32_t status2;
4473 4473 int i = 0;
4474 4474
4475 4475 /* do what needs to be done, get error from STATUS REGISTER */
4476 4476 status = READ_CSR_REG(hba, FC_HS_REG(hba));
4477 4477
4478 4478 /* Clear Chip error bit */
4479 4479 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4480 4480
4481 4481 /* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4482 4482 if (status & HS_FFER1) {
4483 4483
4484 4484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4485 4485 "HS_FFER1 received");
4486 4486 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4487 4487 (void) emlxs_offline(hba);
4488 4488 while ((status & HS_FFER1) && (i < 300)) {
4489 4489 status =
4490 4490 READ_CSR_REG(hba, FC_HS_REG(hba));
4491 4491 DELAYMS(1000);
4492 4492 i++;
4493 4493 }
4494 4494 }
4495 4495
4496 4496 if (i == 300) {
4497 4497 /* 5 minutes is up, shutdown HBA */
4498 4498 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4499 4499 "HS_FFER1 clear timeout");
4500 4500
4501 4501 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4502 4502 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4503 4503
4504 4504 goto done;
4505 4505 }
4506 4506
4507 4507 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4508 4508 "HS_FFER1 cleared");
4509 4509
4510 4510 if (status & HS_OVERTEMP) {
4511 4511 status1 =
4512 4512 READ_SLIM_ADDR(hba,
4513 4513 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4514 4514
4515 4515 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4516 4516 "Maximum adapter temperature exceeded (%d °C).", status1);
4517 4517
4518 4518 hba->temperature = status1;
4519 4519 hba->flag |= FC_OVERTEMP_EVENT;
4520 4520
4521 4521 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4522 4522 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4523 4523 NULL, NULL);
4524 4524
4525 4525 } else {
4526 4526 status1 =
4527 4527 READ_SLIM_ADDR(hba,
4528 4528 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4529 4529 status2 =
4530 4530 READ_SLIM_ADDR(hba,
4531 4531 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4532 4532
4533 4533 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4534 4534 "Host Error Attention: "
4535 4535 "status=0x%x status1=0x%x status2=0x%x",
4536 4536 status, status1, status2);
4537 4537
4538 4538 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4539 4539
4540 4540 if (status & HS_FFER6) {
4541 4541 emlxs_thread_spawn(hba, emlxs_restart_thread,
4542 4542 NULL, NULL);
4543 4543 } else {
4544 4544 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4545 4545 NULL, NULL);
4546 4546 }
4547 4547 }
4548 4548
4549 4549 done:
4550 4550 #ifdef FMA_SUPPORT
4551 4551 /* Access handle validation */
4552 4552 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4553 4553 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4554 4554 #endif /* FMA_SUPPORT */
4555 4555
4556 4556 return;
4557 4557
4558 4558 } /* emlxs_handle_ff_error() */
4559 4559
4560 4560
4561 4561 /*
4562 4562 * emlxs_sli3_handle_link_event()
4563 4563 *
4564 4564 * Description: Process a Link Attention.
4565 4565 */
4566 4566 static void
4567 4567 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4568 4568 {
4569 4569 emlxs_port_t *port = &PPORT;
4570 4570 MAILBOXQ *mbq;
4571 4571 int rc;
4572 4572
4573 4573 HBASTATS.LinkEvent++;
4574 4574
4575 4575 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4576 4576 HBASTATS.LinkEvent);
4577 4577
4578 4578 /* Make sure link is declared down */
4579 4579 emlxs_linkdown(hba);
4580 4580
4581 4581
4582 4582 /* Get a buffer which will be used for mailbox commands */
4583 4583 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
4584 4584 /* Get link attention message */
4585 4585 if (emlxs_mb_read_la(hba, mbq) == 0) {
4586 4586 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq,
4587 4587 MBX_NOWAIT, 0);
4588 4588 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4589 4589 emlxs_mem_put(hba, MEM_MBOX,
4590 4590 (void *)mbq);
4591 4591 }
4592 4592
4593 4593 mutex_enter(&EMLXS_PORT_LOCK);
4594 4594
4595 4595
4596 4596 /*
4597 4597 * Clear Link Attention in HA REG
4598 4598 */
4599 4599 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4600 4600
4601 4601 #ifdef FMA_SUPPORT
4602 4602 /* Access handle validation */
4603 4603 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4604 4604 #endif /* FMA_SUPPORT */
4605 4605
4606 4606 mutex_exit(&EMLXS_PORT_LOCK);
4607 4607 } else {
4608 4608 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4609 4609 }
4610 4610 }
4611 4611
4612 4612 } /* emlxs_sli3_handle_link_event() */
4613 4613
4614 4614
4615 4615 /*
4616 4616 * emlxs_sli3_handle_ring_event()
4617 4617 *
4618 4618 * Description: Process a Ring Attention.
4619 4619 */
4620 4620 static void
4621 4621 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4622 4622 uint32_t ha_copy)
4623 4623 {
4624 4624 emlxs_port_t *port = &PPORT;
4625 4625 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4626 4626 CHANNEL *cp;
4627 4627 RING *rp;
4628 4628 IOCB *entry;
4629 4629 IOCBQ *iocbq;
4630 4630 IOCBQ local_iocbq;
4631 4631 PGP *pgp;
4632 4632 uint32_t count;
4633 4633 volatile uint32_t chipatt;
4634 4634 void *ioa2;
4635 4635 uint32_t reg;
4636 4636 uint32_t channel_no;
4637 4637 off_t offset;
4638 4638 IOCBQ *rsp_head = NULL;
4639 4639 IOCBQ *rsp_tail = NULL;
4640 4640 emlxs_buf_t *sbp = NULL;
4641 4641
4642 4642 count = 0;
4643 4643 rp = &hba->sli.sli3.ring[ring_no];
4644 4644 cp = rp->channelp;
4645 4645 channel_no = cp->channelno;
4646 4646
4647 4647 /*
4648 4648 * Isolate this ring's host attention bits
4649 4649 * This makes all ring attention bits equal
4650 4650 * to Ring0 attention bits
4651 4651 */
4652 4652 reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4653 4653
4654 4654 /*
4655 4655 * Gather iocb entries off response ring.
4656 4656 * Ensure entry is owned by the host.
4657 4657 */
4658 4658 pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4659 4659 offset =
4660 4660 (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4661 4661 (uint64_t)((unsigned long)slim2p));
4662 4662 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4663 4663 DDI_DMA_SYNC_FORKERNEL);
4664 4664 rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4665 4665
4666 4666 /* While ring is not empty */
4667 4667 while (rp->fc_rspidx != rp->fc_port_rspidx) {
4668 4668 HBASTATS.IocbReceived[channel_no]++;
4669 4669
4670 4670 /* Get the next response ring iocb */
4671 4671 entry =
4672 4672 (IOCB *)(((char *)rp->fc_rspringaddr +
4673 4673 (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4674 4674
4675 4675 /* DMA sync the response ring iocb for the adapter */
4676 4676 offset = (off_t)((uint64_t)((unsigned long)entry)
4677 4677 - (uint64_t)((unsigned long)slim2p));
4678 4678 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4679 4679 hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4680 4680
4681 4681 count++;
4682 4682
4683 4683 /* Copy word6 and word7 to local iocb for now */
4684 4684 iocbq = &local_iocbq;
4685 4685
4686 4686 BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4687 4687 (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4688 4688 (sizeof (uint32_t) * 2));
4689 4689
4690 4690 /* when LE is not set, entire Command has not been received */
4691 4691 if (!iocbq->iocb.ULPLE) {
4692 4692 /* This should never happen */
4693 4693 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4694 4694 "ulpLE is not set. "
4695 4695 "ring=%d iotag=%x cmd=%x status=%x",
4696 4696 channel_no, iocbq->iocb.ULPIOTAG,
4697 4697 iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4698 4698
4699 4699 goto next;
4700 4700 }
4701 4701
4702 4702 switch (iocbq->iocb.ULPCOMMAND) {
4703 4703 #ifdef SFCT_SUPPORT
4704 4704 case CMD_CLOSE_XRI_CX:
4705 4705 case CMD_CLOSE_XRI_CN:
4706 4706 case CMD_ABORT_XRI_CX:
4707 4707 if (!port->tgt_mode) {
4708 4708 sbp = NULL;
4709 4709 break;
4710 4710 }
4711 4711
4712 4712 sbp =
4713 4713 emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4714 4714 break;
4715 4715 #endif /* SFCT_SUPPORT */
4716 4716
4717 4717 /* Ring 0 registered commands */
4718 4718 case CMD_FCP_ICMND_CR:
4719 4719 case CMD_FCP_ICMND_CX:
4720 4720 case CMD_FCP_IREAD_CR:
4721 4721 case CMD_FCP_IREAD_CX:
4722 4722 case CMD_FCP_IWRITE_CR:
4723 4723 case CMD_FCP_IWRITE_CX:
4724 4724 case CMD_FCP_ICMND64_CR:
4725 4725 case CMD_FCP_ICMND64_CX:
4726 4726 case CMD_FCP_IREAD64_CR:
4727 4727 case CMD_FCP_IREAD64_CX:
4728 4728 case CMD_FCP_IWRITE64_CR:
4729 4729 case CMD_FCP_IWRITE64_CX:
4730 4730 #ifdef SFCT_SUPPORT
4731 4731 case CMD_FCP_TSEND_CX:
4732 4732 case CMD_FCP_TSEND64_CX:
4733 4733 case CMD_FCP_TRECEIVE_CX:
4734 4734 case CMD_FCP_TRECEIVE64_CX:
4735 4735 case CMD_FCP_TRSP_CX:
4736 4736 case CMD_FCP_TRSP64_CX:
4737 4737 #endif /* SFCT_SUPPORT */
4738 4738
4739 4739 /* Ring 1 registered commands */
4740 4740 case CMD_XMIT_BCAST_CN:
4741 4741 case CMD_XMIT_BCAST_CX:
4742 4742 case CMD_XMIT_SEQUENCE_CX:
4743 4743 case CMD_XMIT_SEQUENCE_CR:
4744 4744 case CMD_XMIT_BCAST64_CN:
4745 4745 case CMD_XMIT_BCAST64_CX:
4746 4746 case CMD_XMIT_SEQUENCE64_CX:
4747 4747 case CMD_XMIT_SEQUENCE64_CR:
4748 4748 case CMD_CREATE_XRI_CR:
4749 4749 case CMD_CREATE_XRI_CX:
4750 4750
4751 4751 /* Ring 2 registered commands */
4752 4752 case CMD_ELS_REQUEST_CR:
4753 4753 case CMD_ELS_REQUEST_CX:
4754 4754 case CMD_XMIT_ELS_RSP_CX:
4755 4755 case CMD_ELS_REQUEST64_CR:
4756 4756 case CMD_ELS_REQUEST64_CX:
4757 4757 case CMD_XMIT_ELS_RSP64_CX:
4758 4758
4759 4759 /* Ring 3 registered commands */
4760 4760 case CMD_GEN_REQUEST64_CR:
4761 4761 case CMD_GEN_REQUEST64_CX:
4762 4762
4763 4763 sbp =
4764 4764 emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4765 4765 break;
4766 4766
4767 4767 default:
4768 4768 sbp = NULL;
4769 4769 }
4770 4770
4771 4771 /* If packet is stale, then drop it. */
4772 4772 if (sbp == STALE_PACKET) {
4773 4773 cp->hbaCmplCmd_sbp++;
4774 4774 /* Copy entry to the local iocbq */
4775 4775 BE_SWAP32_BCOPY((uint8_t *)entry,
4776 4776 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4777 4777
4778 4778 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4779 4779 "channelno=%d iocb=%p cmd=%x status=%x "
4780 4780 "error=%x iotag=%x context=%x info=%x",
4781 4781 channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4782 4782 iocbq->iocb.ULPSTATUS,
4783 4783 (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4784 4784 (uint16_t)iocbq->iocb.ULPIOTAG,
4785 4785 (uint16_t)iocbq->iocb.ULPCONTEXT,
4786 4786 (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4787 4787
4788 4788 goto next;
4789 4789 }
4790 4790
4791 4791 /*
4792 4792 * If a packet was found, then queue the packet's
4793 4793 * iocb for deferred processing
4794 4794 */
4795 4795 else if (sbp) {
4796 4796 #ifdef SFCT_SUPPORT
4797 4797 fct_cmd_t *fct_cmd;
4798 4798 emlxs_buf_t *cmd_sbp;
4799 4799
4800 4800 fct_cmd = sbp->fct_cmd;
↓ open down ↓ |
4800 lines elided |
↑ open up ↑ |
4801 4801 if (fct_cmd) {
4802 4802 cmd_sbp =
4803 4803 (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4804 4804 mutex_enter(&cmd_sbp->fct_mtx);
4805 4805 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4806 4806 EMLXS_FCT_IOCB_COMPLETE);
4807 4807 mutex_exit(&cmd_sbp->fct_mtx);
4808 4808 }
4809 4809 #endif /* SFCT_SUPPORT */
4810 4810 cp->hbaCmplCmd_sbp++;
4811 - atomic_add_32(&hba->io_active, -1);
4811 + atomic_dec_32(&hba->io_active);
4812 4812
4813 4813 /* Copy entry to sbp's iocbq */
4814 4814 iocbq = &sbp->iocbq;
4815 4815 BE_SWAP32_BCOPY((uint8_t *)entry,
4816 4816 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4817 4817
4818 4818 iocbq->next = NULL;
4819 4819
4820 4820 /*
4821 4821 * If this is NOT a polled command completion
4822 4822 * or a driver allocated pkt, then defer pkt
4823 4823 * completion.
4824 4824 */
4825 4825 if (!(sbp->pkt_flags &
4826 4826 (PACKET_POLLED | PACKET_ALLOCATED))) {
4827 4827 /* Add the IOCB to the local list */
4828 4828 if (!rsp_head) {
4829 4829 rsp_head = iocbq;
4830 4830 } else {
4831 4831 rsp_tail->next = iocbq;
4832 4832 }
4833 4833
4834 4834 rsp_tail = iocbq;
4835 4835
4836 4836 goto next;
4837 4837 }
4838 4838 } else {
4839 4839 cp->hbaCmplCmd++;
4840 4840 /* Copy entry to the local iocbq */
4841 4841 BE_SWAP32_BCOPY((uint8_t *)entry,
4842 4842 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4843 4843
4844 4844 iocbq->next = NULL;
4845 4845 iocbq->bp = NULL;
4846 4846 iocbq->port = &PPORT;
4847 4847 iocbq->channel = cp;
4848 4848 iocbq->node = NULL;
4849 4849 iocbq->sbp = NULL;
4850 4850 iocbq->flag = 0;
4851 4851 }
4852 4852
4853 4853 /* process the channel event now */
4854 4854 emlxs_proc_channel_event(hba, cp, iocbq);
4855 4855
4856 4856 next:
4857 4857 /* Increment the driver's local response get index */
4858 4858 if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4859 4859 rp->fc_rspidx = 0;
4860 4860 }
4861 4861
4862 4862 } /* while (TRUE) */
4863 4863
4864 4864 if (rsp_head) {
4865 4865 mutex_enter(&cp->rsp_lock);
4866 4866 if (cp->rsp_head == NULL) {
4867 4867 cp->rsp_head = rsp_head;
4868 4868 cp->rsp_tail = rsp_tail;
4869 4869 } else {
4870 4870 cp->rsp_tail->next = rsp_head;
4871 4871 cp->rsp_tail = rsp_tail;
4872 4872 }
4873 4873 mutex_exit(&cp->rsp_lock);
4874 4874
4875 4875 emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4876 4876 }
4877 4877
4878 4878 /* Check if at least one response entry was processed */
4879 4879 if (count) {
4880 4880 /* Update response get index for the adapter */
4881 4881 if (hba->bus_type == SBUS_FC) {
4882 4882 slim2p->mbx.us.s2.host[channel_no].rspGetInx
4883 4883 = BE_SWAP32(rp->fc_rspidx);
4884 4884
4885 4885 /* DMA sync the index for the adapter */
4886 4886 offset = (off_t)
4887 4887 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4888 4888 host[channel_no].rspGetInx))
4889 4889 - (uint64_t)((unsigned long)slim2p));
4890 4890 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
4891 4891 offset, 4, DDI_DMA_SYNC_FORDEV);
4892 4892 } else {
4893 4893 ioa2 =
4894 4894 (void *)((char *)hba->sli.sli3.slim_addr +
4895 4895 hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
4896 4896 1) * sizeof (uint32_t)));
4897 4897 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
4898 4898 rp->fc_rspidx);
4899 4899 #ifdef FMA_SUPPORT
4900 4900 /* Access handle validation */
4901 4901 EMLXS_CHK_ACC_HANDLE(hba,
4902 4902 hba->sli.sli3.slim_acc_handle);
4903 4903 #endif /* FMA_SUPPORT */
4904 4904 }
4905 4905
4906 4906 if (reg & HA_R0RE_REQ) {
4907 4907 /* HBASTATS.chipRingFree++; */
4908 4908
4909 4909 mutex_enter(&EMLXS_PORT_LOCK);
4910 4910
4911 4911 /* Tell the adapter we serviced the ring */
4912 4912 chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4913 4913 (channel_no * 4));
4914 4914 WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
4915 4915
4916 4916 #ifdef FMA_SUPPORT
4917 4917 /* Access handle validation */
4918 4918 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4919 4919 #endif /* FMA_SUPPORT */
4920 4920
4921 4921 mutex_exit(&EMLXS_PORT_LOCK);
4922 4922 }
4923 4923 }
4924 4924
4925 4925 if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
4926 4926 /* HBASTATS.hostRingFree++; */
4927 4927
4928 4928 /* Cmd ring may be available. Try sending more iocbs */
4929 4929 emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
4930 4930 }
4931 4931
4932 4932 /* HBASTATS.ringEvent++; */
4933 4933
4934 4934 return;
4935 4935
4936 4936 } /* emlxs_sli3_handle_ring_event() */
4937 4937
4938 4938
4939 4939 extern int
4940 4940 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
4941 4941 {
4942 4942 emlxs_port_t *port = &PPORT;
4943 4943 IOCB *iocb;
4944 4944 RING *rp;
4945 4945 MATCHMAP *mp = NULL;
4946 4946 uint64_t bdeAddr;
4947 4947 uint32_t vpi = 0;
4948 4948 uint32_t channelno;
4949 4949 uint32_t size = 0;
4950 4950 uint32_t *RcvError;
4951 4951 uint32_t *RcvDropped;
4952 4952 uint32_t *UbPosted;
4953 4953 emlxs_msg_t *dropped_msg;
4954 4954 char error_str[64];
4955 4955 uint32_t buf_type;
4956 4956 uint32_t *word;
4957 4957 uint32_t hbq_id;
4958 4958
4959 4959 channelno = cp->channelno;
4960 4960 rp = &hba->sli.sli3.ring[channelno];
4961 4961
4962 4962 iocb = &iocbq->iocb;
4963 4963 word = (uint32_t *)iocb;
4964 4964
4965 4965 switch (channelno) {
4966 4966 #ifdef SFCT_SUPPORT
4967 4967 case FC_FCT_RING:
4968 4968 HBASTATS.FctRingEvent++;
4969 4969 RcvError = &HBASTATS.FctRingError;
4970 4970 RcvDropped = &HBASTATS.FctRingDropped;
4971 4971 UbPosted = &HBASTATS.FctUbPosted;
4972 4972 dropped_msg = &emlxs_fct_detail_msg;
4973 4973 buf_type = MEM_FCTBUF;
4974 4974 break;
4975 4975 #endif /* SFCT_SUPPORT */
4976 4976
4977 4977 case FC_IP_RING:
4978 4978 HBASTATS.IpRcvEvent++;
4979 4979 RcvError = &HBASTATS.IpDropped;
4980 4980 RcvDropped = &HBASTATS.IpDropped;
4981 4981 UbPosted = &HBASTATS.IpUbPosted;
4982 4982 dropped_msg = &emlxs_unsol_ip_dropped_msg;
4983 4983 buf_type = MEM_IPBUF;
4984 4984 break;
4985 4985
4986 4986 case FC_ELS_RING:
4987 4987 HBASTATS.ElsRcvEvent++;
4988 4988 RcvError = &HBASTATS.ElsRcvError;
4989 4989 RcvDropped = &HBASTATS.ElsRcvDropped;
4990 4990 UbPosted = &HBASTATS.ElsUbPosted;
4991 4991 dropped_msg = &emlxs_unsol_els_dropped_msg;
4992 4992 buf_type = MEM_ELSBUF;
4993 4993 break;
4994 4994
4995 4995 case FC_CT_RING:
4996 4996 HBASTATS.CtRcvEvent++;
4997 4997 RcvError = &HBASTATS.CtRcvError;
4998 4998 RcvDropped = &HBASTATS.CtRcvDropped;
4999 4999 UbPosted = &HBASTATS.CtUbPosted;
5000 5000 dropped_msg = &emlxs_unsol_ct_dropped_msg;
5001 5001 buf_type = MEM_CTBUF;
5002 5002 break;
5003 5003
5004 5004 default:
5005 5005 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
5006 5006 "channel=%d cmd=%x %s %x %x %x %x",
5007 5007 channelno, iocb->ULPCOMMAND,
5008 5008 emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
5009 5009 word[6], word[7]);
5010 5010 return (1);
5011 5011 }
5012 5012
5013 5013 if (iocb->ULPSTATUS) {
5014 5014 if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5015 5015 (iocb->un.grsp.perr.statLocalError ==
5016 5016 IOERR_RCV_BUFFER_TIMEOUT)) {
5017 5017 (void) strcpy(error_str, "Out of posted buffers:");
5018 5018 } else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5019 5019 (iocb->un.grsp.perr.statLocalError ==
5020 5020 IOERR_RCV_BUFFER_WAITING)) {
5021 5021 (void) strcpy(error_str, "Buffer waiting:");
5022 5022 goto done;
5023 5023 } else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
5024 5024 (void) strcpy(error_str, "Need Buffer Entry:");
5025 5025 goto done;
5026 5026 } else {
5027 5027 (void) strcpy(error_str, "General error:");
5028 5028 }
5029 5029
5030 5030 goto failed;
5031 5031 }
5032 5032
5033 5033 if (hba->flag & FC_HBQ_ENABLED) {
5034 5034 HBQ_INIT_t *hbq;
5035 5035 HBQE_t *hbqE;
5036 5036 uint32_t hbqe_tag;
5037 5037
5038 5038 (*UbPosted)--;
5039 5039
5040 5040 hbqE = (HBQE_t *)iocb;
5041 5041 hbq_id = hbqE->unt.ext.HBQ_tag;
5042 5042 hbqe_tag = hbqE->unt.ext.HBQE_tag;
5043 5043
5044 5044 hbq = &hba->sli.sli3.hbq_table[hbq_id];
5045 5045
5046 5046 if (hbqe_tag >= hbq->HBQ_numEntries) {
5047 5047 (void) sprintf(error_str, "Invalid HBQE tag=%x:",
5048 5048 hbqe_tag);
5049 5049 goto dropped;
5050 5050 }
5051 5051
5052 5052 mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
5053 5053
5054 5054 size = iocb->unsli3.ext_rcv.seq_len;
5055 5055 } else {
5056 5056 bdeAddr =
5057 5057 PADDR(iocb->un.cont64[0].addrHigh,
5058 5058 iocb->un.cont64[0].addrLow);
5059 5059
5060 5060 /* Check for invalid buffer */
5061 5061 if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5062 5062 (void) strcpy(error_str, "Invalid buffer:");
5063 5063 goto dropped;
5064 5064 }
5065 5065
5066 5066 mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5067 5067
5068 5068 size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5069 5069 }
5070 5070
5071 5071 if (!mp) {
5072 5072 (void) strcpy(error_str, "Buffer not mapped:");
5073 5073 goto dropped;
5074 5074 }
5075 5075
5076 5076 #ifdef FMA_SUPPORT
5077 5077 if (mp->dma_handle) {
5078 5078 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5079 5079 != DDI_FM_OK) {
5080 5080 EMLXS_MSGF(EMLXS_CONTEXT,
5081 5081 &emlxs_invalid_dma_handle_msg,
5082 5082 "emlxs_handle_rcv_seq: hdl=%p",
5083 5083 mp->dma_handle);
5084 5084 goto dropped;
5085 5085 }
5086 5086 }
5087 5087 #endif /* FMA_SUPPORT */
5088 5088
5089 5089 if (!size) {
5090 5090 (void) strcpy(error_str, "Buffer empty:");
5091 5091 goto dropped;
5092 5092 }
5093 5093
5094 5094 /* To avoid we drop the broadcast packets */
5095 5095 if (channelno != FC_IP_RING) {
5096 5096 /* Get virtual port */
5097 5097 if (hba->flag & FC_NPIV_ENABLED) {
5098 5098 vpi = iocb->unsli3.ext_rcv.vpi;
5099 5099 if (vpi >= hba->vpi_max) {
5100 5100 (void) sprintf(error_str,
5101 5101 "Invalid VPI=%d:", vpi);
5102 5102 goto dropped;
5103 5103 }
5104 5104
5105 5105 port = &VPORT(vpi);
5106 5106 }
5107 5107 }
5108 5108
5109 5109 /* Process request */
5110 5110 switch (channelno) {
5111 5111 #ifdef SFCT_SUPPORT
5112 5112 case FC_FCT_RING:
5113 5113 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, size);
5114 5114 break;
5115 5115 #endif /* SFCT_SUPPORT */
5116 5116
5117 5117 case FC_IP_RING:
5118 5118 (void) emlxs_ip_handle_unsol_req(port, cp, iocbq, mp, size);
5119 5119 break;
5120 5120
5121 5121 case FC_ELS_RING:
5122 5122 /* If this is a target port, then let fct handle this */
5123 5123 if (port->ini_mode) {
5124 5124 (void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5125 5125 size);
5126 5126 }
5127 5127 #ifdef SFCT_SUPPORT
5128 5128 else if (port->tgt_mode) {
5129 5129 (void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5130 5130 size);
5131 5131 }
5132 5132 #endif /* SFCT_SUPPORT */
5133 5133 break;
5134 5134
5135 5135 case FC_CT_RING:
5136 5136 (void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5137 5137 break;
5138 5138 }
5139 5139
5140 5140 goto done;
5141 5141
5142 5142 dropped:
5143 5143 (*RcvDropped)++;
5144 5144
5145 5145 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5146 5146 "%s: cmd=%x %s %x %x %x %x",
5147 5147 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5148 5148 word[4], word[5], word[6], word[7]);
5149 5149
5150 5150 if (channelno == FC_FCT_RING) {
5151 5151 uint32_t sid;
5152 5152
5153 5153 if (hba->sli_mode >= EMLXS_HBA_SLI3_MODE) {
5154 5154 emlxs_node_t *ndlp;
5155 5155 ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5156 5156 sid = ndlp->nlp_DID;
5157 5157 } else {
5158 5158 sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5159 5159 }
5160 5160
5161 5161 emlxs_send_logo(port, sid);
5162 5162 }
5163 5163
5164 5164 goto done;
5165 5165
5166 5166 failed:
5167 5167 (*RcvError)++;
5168 5168
5169 5169 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5170 5170 "%s: cmd=%x %s %x %x %x %x hba:%x %x",
5171 5171 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5172 5172 word[4], word[5], word[6], word[7], hba->state, hba->flag);
5173 5173
5174 5174 done:
5175 5175
5176 5176 if (hba->flag & FC_HBQ_ENABLED) {
5177 5177 emlxs_update_HBQ_index(hba, hbq_id);
5178 5178 } else {
5179 5179 if (mp) {
5180 5180 emlxs_mem_put(hba, buf_type, (void *)mp);
5181 5181 }
5182 5182 (void) emlxs_post_buffer(hba, rp, 1);
5183 5183 }
5184 5184
5185 5185 return (0);
5186 5186
5187 5187 } /* emlxs_handle_rcv_seq() */
5188 5188
5189 5189
5190 5190 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5191 5191 static void
5192 5192 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5193 5193 {
5194 5194 emlxs_port_t *port;
5195 5195 IOCB *icmd;
5196 5196 IOCB *iocb;
5197 5197 emlxs_buf_t *sbp;
5198 5198 off_t offset;
5199 5199 uint32_t ringno;
5200 5200
5201 5201 ringno = rp->ringno;
5202 5202 sbp = iocbq->sbp;
5203 5203 icmd = &iocbq->iocb;
5204 5204 port = iocbq->port;
5205 5205
5206 5206 HBASTATS.IocbIssued[ringno]++;
5207 5207
5208 5208 /* Check for ULP pkt request */
5209 5209 if (sbp) {
5210 5210 mutex_enter(&sbp->mtx);
↓ open down ↓ |
389 lines elided |
↑ open up ↑ |
5211 5211
5212 5212 if (sbp->node == NULL) {
5213 5213 /* Set node to base node by default */
5214 5214 iocbq->node = (void *)&port->node_base;
5215 5215 sbp->node = (void *)&port->node_base;
5216 5216 }
5217 5217
5218 5218 sbp->pkt_flags |= PACKET_IN_CHIPQ;
5219 5219 mutex_exit(&sbp->mtx);
5220 5220
5221 - atomic_add_32(&hba->io_active, 1);
5221 + atomic_inc_32(&hba->io_active);
5222 5222
5223 5223 #ifdef SFCT_SUPPORT
5224 5224 #ifdef FCT_IO_TRACE
5225 5225 if (sbp->fct_cmd) {
5226 5226 emlxs_fct_io_trace(port, sbp->fct_cmd,
5227 5227 EMLXS_FCT_IOCB_ISSUED);
5228 5228 emlxs_fct_io_trace(port, sbp->fct_cmd,
5229 5229 icmd->ULPCOMMAND);
5230 5230 }
5231 5231 #endif /* FCT_IO_TRACE */
5232 5232 #endif /* SFCT_SUPPORT */
5233 5233
5234 5234 rp->channelp->hbaSendCmd_sbp++;
5235 5235 iocbq->channel = rp->channelp;
5236 5236 } else {
5237 5237 rp->channelp->hbaSendCmd++;
5238 5238 }
5239 5239
5240 5240 /* get the next available command ring iocb */
5241 5241 iocb =
5242 5242 (IOCB *)(((char *)rp->fc_cmdringaddr +
5243 5243 (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5244 5244
5245 5245 /* Copy the local iocb to the command ring iocb */
5246 5246 BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5247 5247 hba->sli.sli3.iocb_cmd_size);
5248 5248
5249 5249 /* DMA sync the command ring iocb for the adapter */
5250 5250 offset = (off_t)((uint64_t)((unsigned long)iocb)
5251 5251 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5252 5252 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5253 5253 hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5254 5254
5255 5255 /*
5256 5256 * After this, the sbp / iocb should not be
5257 5257 * accessed in the xmit path.
5258 5258 */
5259 5259
5260 5260 /* Free the local iocb if there is no sbp tracking it */
5261 5261 if (!sbp) {
5262 5262 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
5263 5263 }
5264 5264
5265 5265 /* update local ring index to next available ring index */
5266 5266 rp->fc_cmdidx =
5267 5267 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5268 5268
5269 5269
5270 5270 return;
5271 5271
5272 5272 } /* emlxs_sli3_issue_iocb() */
5273 5273
5274 5274
5275 5275 static void
5276 5276 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5277 5277 {
5278 5278 emlxs_port_t *port = &PPORT;
5279 5279 MAILBOX *swpmb;
5280 5280 MAILBOX *mb2;
5281 5281 MAILBOX *mb1;
5282 5282 uint32_t word0;
5283 5283 uint32_t j;
5284 5284 uint32_t interlock_failed;
5285 5285 uint32_t ha_copy;
5286 5286 uint32_t value;
5287 5287 off_t offset;
5288 5288 uint32_t size;
5289 5289
5290 5290 /* Perform adapter interlock to kill adapter */
5291 5291 interlock_failed = 0;
5292 5292
5293 5293 mutex_enter(&EMLXS_PORT_LOCK);
5294 5294 if (hba->flag & FC_INTERLOCKED) {
5295 5295 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5296 5296
5297 5297 mutex_exit(&EMLXS_PORT_LOCK);
5298 5298
5299 5299 return;
5300 5300 }
5301 5301
5302 5302 j = 0;
5303 5303 while (j++ < 10000) {
5304 5304 if (hba->mbox_queue_flag == 0) {
5305 5305 break;
5306 5306 }
5307 5307
5308 5308 mutex_exit(&EMLXS_PORT_LOCK);
5309 5309 DELAYUS(100);
5310 5310 mutex_enter(&EMLXS_PORT_LOCK);
5311 5311 }
5312 5312
5313 5313 if (hba->mbox_queue_flag != 0) {
5314 5314 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5315 5315 "Interlock failed. Mailbox busy.");
5316 5316 mutex_exit(&EMLXS_PORT_LOCK);
5317 5317 return;
5318 5318 }
5319 5319
5320 5320 hba->flag |= FC_INTERLOCKED;
5321 5321 hba->mbox_queue_flag = 1;
5322 5322
5323 5323 /* Disable all host interrupts */
5324 5324 hba->sli.sli3.hc_copy = 0;
5325 5325 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5326 5326 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5327 5327
5328 5328 mb2 = FC_SLIM2_MAILBOX(hba);
5329 5329 mb1 = FC_SLIM1_MAILBOX(hba);
5330 5330 swpmb = (MAILBOX *)&word0;
5331 5331
5332 5332 if (!(hba->flag & FC_SLIM2_MODE)) {
5333 5333 goto mode_B;
5334 5334 }
5335 5335
5336 5336 mode_A:
5337 5337
5338 5338 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5339 5339 "Attempting SLIM2 Interlock...");
5340 5340
5341 5341 interlock_A:
5342 5342
5343 5343 value = 0x55555555;
5344 5344 word0 = 0;
5345 5345 swpmb->mbxCommand = MBX_KILL_BOARD;
5346 5346 swpmb->mbxOwner = OWN_CHIP;
5347 5347
5348 5348 /* Write value to SLIM */
5349 5349 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5350 5350 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5351 5351
5352 5352 /* Send Kill board request */
5353 5353 mb2->un.varWords[0] = value;
5354 5354 mb2->mbxCommand = MBX_KILL_BOARD;
5355 5355 mb2->mbxOwner = OWN_CHIP;
5356 5356
5357 5357 /* Sync the memory */
5358 5358 offset = (off_t)((uint64_t)((unsigned long)mb2)
5359 5359 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5360 5360 size = (sizeof (uint32_t) * 2);
5361 5361
5362 5362 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5363 5363
5364 5364 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5365 5365 DDI_DMA_SYNC_FORDEV);
5366 5366
5367 5367 /* interrupt board to do it right away */
5368 5368 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5369 5369
5370 5370 /* First wait for command acceptence */
5371 5371 j = 0;
5372 5372 while (j++ < 1000) {
5373 5373 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5374 5374
5375 5375 if (value == 0xAAAAAAAA) {
5376 5376 break;
5377 5377 }
5378 5378
5379 5379 DELAYUS(50);
5380 5380 }
5381 5381
5382 5382 if (value == 0xAAAAAAAA) {
5383 5383 /* Now wait for mailbox ownership to clear */
5384 5384 while (j++ < 10000) {
5385 5385 word0 =
5386 5386 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5387 5387
5388 5388 if (swpmb->mbxOwner == 0) {
5389 5389 break;
5390 5390 }
5391 5391
5392 5392 DELAYUS(50);
5393 5393 }
5394 5394
5395 5395 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5396 5396 "Interlock succeeded.");
5397 5397
5398 5398 goto done;
5399 5399 }
5400 5400
5401 5401 /* Interlock failed !!! */
5402 5402 interlock_failed = 1;
5403 5403
5404 5404 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5405 5405
5406 5406 mode_B:
5407 5407
5408 5408 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5409 5409 "Attempting SLIM1 Interlock...");
5410 5410
5411 5411 interlock_B:
5412 5412
5413 5413 value = 0x55555555;
5414 5414 word0 = 0;
5415 5415 swpmb->mbxCommand = MBX_KILL_BOARD;
5416 5416 swpmb->mbxOwner = OWN_CHIP;
5417 5417
5418 5418 /* Write KILL BOARD to mailbox */
5419 5419 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5420 5420 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5421 5421
5422 5422 /* interrupt board to do it right away */
5423 5423 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5424 5424
5425 5425 /* First wait for command acceptence */
5426 5426 j = 0;
5427 5427 while (j++ < 1000) {
5428 5428 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5429 5429
5430 5430 if (value == 0xAAAAAAAA) {
5431 5431 break;
5432 5432 }
5433 5433
5434 5434 DELAYUS(50);
5435 5435 }
5436 5436
5437 5437 if (value == 0xAAAAAAAA) {
5438 5438 /* Now wait for mailbox ownership to clear */
5439 5439 while (j++ < 10000) {
5440 5440 word0 =
5441 5441 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5442 5442
5443 5443 if (swpmb->mbxOwner == 0) {
5444 5444 break;
5445 5445 }
5446 5446
5447 5447 DELAYUS(50);
5448 5448 }
5449 5449
5450 5450 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5451 5451 "Interlock succeeded.");
5452 5452
5453 5453 goto done;
5454 5454 }
5455 5455
5456 5456 /* Interlock failed !!! */
5457 5457
5458 5458 /* If this is the first time then try again */
5459 5459 if (interlock_failed == 0) {
5460 5460 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5461 5461 "Interlock failed. Retrying...");
5462 5462
5463 5463 /* Try again */
5464 5464 interlock_failed = 1;
5465 5465 goto interlock_B;
5466 5466 }
5467 5467
5468 5468 /*
5469 5469 * Now check for error attention to indicate the board has
5470 5470 * been kiilled
5471 5471 */
5472 5472 j = 0;
5473 5473 while (j++ < 10000) {
5474 5474 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5475 5475
5476 5476 if (ha_copy & HA_ERATT) {
5477 5477 break;
5478 5478 }
5479 5479
5480 5480 DELAYUS(50);
5481 5481 }
5482 5482
5483 5483 if (ha_copy & HA_ERATT) {
5484 5484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5485 5485 "Interlock failed. Board killed.");
5486 5486 } else {
5487 5487 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5488 5488 "Interlock failed. Board not killed.");
5489 5489 }
5490 5490
5491 5491 done:
5492 5492
5493 5493 hba->mbox_queue_flag = 0;
5494 5494
5495 5495 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5496 5496
5497 5497 #ifdef FMA_SUPPORT
5498 5498 /* Access handle validation */
5499 5499 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5500 5500 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5501 5501 #endif /* FMA_SUPPORT */
5502 5502
5503 5503 mutex_exit(&EMLXS_PORT_LOCK);
5504 5504
5505 5505 return;
5506 5506
5507 5507 } /* emlxs_sli3_hba_kill() */
5508 5508
5509 5509
5510 5510 static void
5511 5511 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5512 5512 {
5513 5513 emlxs_port_t *port = &PPORT;
5514 5514 MAILBOX *swpmb;
5515 5515 MAILBOX *mb2;
5516 5516 MAILBOX *mb1;
5517 5517 uint32_t word0;
5518 5518 off_t offset;
5519 5519 uint32_t j;
5520 5520 uint32_t value;
5521 5521 uint32_t size;
5522 5522
5523 5523 /* Disable all host interrupts */
5524 5524 hba->sli.sli3.hc_copy = 0;
5525 5525 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5526 5526 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5527 5527
5528 5528 mb2 = FC_SLIM2_MAILBOX(hba);
5529 5529 mb1 = FC_SLIM1_MAILBOX(hba);
5530 5530 swpmb = (MAILBOX *)&word0;
5531 5531
5532 5532 value = 0x55555555;
5533 5533 word0 = 0;
5534 5534 swpmb->mbxCommand = MBX_KILL_BOARD;
5535 5535 swpmb->mbxOwner = OWN_CHIP;
5536 5536
5537 5537 /* Write value to SLIM */
5538 5538 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5539 5539 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5540 5540
5541 5541 /* Send Kill board request */
5542 5542 mb2->un.varWords[0] = value;
5543 5543 mb2->mbxCommand = MBX_KILL_BOARD;
5544 5544 mb2->mbxOwner = OWN_CHIP;
5545 5545
5546 5546 /* Sync the memory */
5547 5547 offset = (off_t)((uint64_t)((unsigned long)mb2)
5548 5548 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5549 5549 size = (sizeof (uint32_t) * 2);
5550 5550
5551 5551 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5552 5552
5553 5553 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5554 5554 DDI_DMA_SYNC_FORDEV);
5555 5555
5556 5556 /* interrupt board to do it right away */
5557 5557 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5558 5558
5559 5559 /* First wait for command acceptence */
5560 5560 j = 0;
5561 5561 while (j++ < 1000) {
5562 5562 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5563 5563
5564 5564 if (value == 0xAAAAAAAA) {
5565 5565 break;
5566 5566 }
5567 5567 DELAYUS(50);
5568 5568 }
5569 5569 if (value == 0xAAAAAAAA) {
5570 5570 /* Now wait for mailbox ownership to clear */
5571 5571 while (j++ < 10000) {
5572 5572 word0 =
5573 5573 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5574 5574 if (swpmb->mbxOwner == 0) {
5575 5575 break;
5576 5576 }
5577 5577 DELAYUS(50);
5578 5578 }
5579 5579 goto done;
5580 5580 }
5581 5581
5582 5582 done:
5583 5583 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5584 5584
5585 5585 #ifdef FMA_SUPPORT
5586 5586 /* Access handle validation */
5587 5587 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5588 5588 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5589 5589 #endif /* FMA_SUPPORT */
5590 5590 return;
5591 5591
5592 5592 } /* emlxs_sli3_hba_kill4quiesce */
5593 5593
5594 5594
5595 5595
5596 5596
5597 5597 /*
5598 5598 * emlxs_handle_mb_event
5599 5599 *
5600 5600 * Description: Process a Mailbox Attention.
5601 5601 * Called from host_interrupt to process MBATT
5602 5602 *
5603 5603 * Returns:
5604 5604 *
5605 5605 */
5606 5606 static uint32_t
5607 5607 emlxs_handle_mb_event(emlxs_hba_t *hba)
5608 5608 {
5609 5609 emlxs_port_t *port = &PPORT;
5610 5610 MAILBOX *mb;
5611 5611 MAILBOX *swpmb;
5612 5612 MAILBOX *mbox;
5613 5613 MAILBOXQ *mbq = NULL;
5614 5614 volatile uint32_t word0;
5615 5615 MATCHMAP *mbox_bp;
5616 5616 off_t offset;
5617 5617 uint32_t i;
5618 5618 int rc;
5619 5619
5620 5620 swpmb = (MAILBOX *)&word0;
5621 5621
5622 5622 mutex_enter(&EMLXS_PORT_LOCK);
5623 5623 switch (hba->mbox_queue_flag) {
5624 5624 case 0:
5625 5625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5626 5626 "No mailbox active.");
5627 5627
5628 5628 mutex_exit(&EMLXS_PORT_LOCK);
5629 5629 return (0);
5630 5630
5631 5631 case MBX_POLL:
5632 5632
5633 5633 /* Mark mailbox complete, this should wake up any polling */
5634 5634 /* threads. This can happen if interrupts are enabled while */
5635 5635 /* a polled mailbox command is outstanding. If we don't set */
5636 5636 /* MBQ_COMPLETED here, the polling thread may wait until */
5637 5637 /* timeout error occurs */
5638 5638
5639 5639 mutex_enter(&EMLXS_MBOX_LOCK);
5640 5640 mbq = (MAILBOXQ *)hba->mbox_mbq;
5641 5641 if (mbq) {
5642 5642 port = (emlxs_port_t *)mbq->port;
5643 5643 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5644 5644 "Mailbox event. Completing Polled command.");
5645 5645 mbq->flag |= MBQ_COMPLETED;
5646 5646 }
5647 5647 mutex_exit(&EMLXS_MBOX_LOCK);
5648 5648
5649 5649 mutex_exit(&EMLXS_PORT_LOCK);
5650 5650 return (0);
5651 5651
5652 5652 case MBX_SLEEP:
5653 5653 case MBX_NOWAIT:
5654 5654 /* Check mbox_timer, it acts as a service flag too */
5655 5655 /* The first to service the mbox queue will clear the timer */
5656 5656 if (hba->mbox_timer) {
5657 5657 hba->mbox_timer = 0;
5658 5658
5659 5659 mutex_enter(&EMLXS_MBOX_LOCK);
5660 5660 mbq = (MAILBOXQ *)hba->mbox_mbq;
5661 5661 mutex_exit(&EMLXS_MBOX_LOCK);
5662 5662 }
5663 5663
5664 5664 if (!mbq) {
5665 5665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5666 5666 "Mailbox event. No service required.");
5667 5667 mutex_exit(&EMLXS_PORT_LOCK);
5668 5668 return (0);
5669 5669 }
5670 5670
5671 5671 mb = (MAILBOX *)mbq;
5672 5672 mutex_exit(&EMLXS_PORT_LOCK);
5673 5673 break;
5674 5674
5675 5675 default:
5676 5676 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5677 5677 "Invalid Mailbox flag (%x).");
5678 5678
5679 5679 mutex_exit(&EMLXS_PORT_LOCK);
5680 5680 return (0);
5681 5681 }
5682 5682
5683 5683 /* Set port context */
5684 5684 port = (emlxs_port_t *)mbq->port;
5685 5685
5686 5686 /* Get first word of mailbox */
5687 5687 if (hba->flag & FC_SLIM2_MODE) {
5688 5688 mbox = FC_SLIM2_MAILBOX(hba);
5689 5689 offset = (off_t)((uint64_t)((unsigned long)mbox)
5690 5690 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5691 5691
5692 5692 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5693 5693 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5694 5694 word0 = *((volatile uint32_t *)mbox);
5695 5695 word0 = BE_SWAP32(word0);
5696 5696 } else {
5697 5697 mbox = FC_SLIM1_MAILBOX(hba);
5698 5698 word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5699 5699 }
5700 5700
5701 5701 i = 0;
5702 5702 while (swpmb->mbxOwner == OWN_CHIP) {
5703 5703 if (i++ > 10000) {
5704 5704 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5705 5705 "OWN_CHIP: %s: status=%x",
5706 5706 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5707 5707 swpmb->mbxStatus);
5708 5708
5709 5709 return (1);
5710 5710 }
5711 5711
5712 5712 /* Get first word of mailbox */
5713 5713 if (hba->flag & FC_SLIM2_MODE) {
5714 5714 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5715 5715 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5716 5716 word0 = *((volatile uint32_t *)mbox);
5717 5717 word0 = BE_SWAP32(word0);
5718 5718 } else {
5719 5719 word0 =
5720 5720 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5721 5721 }
5722 5722 }
5723 5723
5724 5724 /* Now that we are the owner, DMA Sync entire mailbox if needed */
5725 5725 if (hba->flag & FC_SLIM2_MODE) {
5726 5726 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5727 5727 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5728 5728
5729 5729 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5730 5730 MAILBOX_CMD_BSIZE);
5731 5731 } else {
5732 5732 READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5733 5733 MAILBOX_CMD_WSIZE);
5734 5734 }
5735 5735
5736 5736 #ifdef MBOX_EXT_SUPPORT
5737 5737 if (mbq->extbuf) {
5738 5738 uint32_t *mbox_ext =
5739 5739 (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5740 5740 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
5741 5741
5742 5742 if (hba->flag & FC_SLIM2_MODE) {
5743 5743 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5744 5744 offset_ext, mbq->extsize,
5745 5745 DDI_DMA_SYNC_FORKERNEL);
5746 5746 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5747 5747 (uint8_t *)mbq->extbuf, mbq->extsize);
5748 5748 } else {
5749 5749 READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5750 5750 mbox_ext, (mbq->extsize / 4));
5751 5751 }
5752 5752 }
5753 5753 #endif /* MBOX_EXT_SUPPORT */
5754 5754
5755 5755 #ifdef FMA_SUPPORT
5756 5756 if (!(hba->flag & FC_SLIM2_MODE)) {
5757 5757 /* Access handle validation */
5758 5758 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5759 5759 }
5760 5760 #endif /* FMA_SUPPORT */
5761 5761
5762 5762 /* Now sync the memory buffer if one was used */
5763 5763 if (mbq->bp) {
5764 5764 mbox_bp = (MATCHMAP *)mbq->bp;
5765 5765 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5766 5766 DDI_DMA_SYNC_FORKERNEL);
5767 5767 }
5768 5768
5769 5769 /* Mailbox has been completely received at this point */
5770 5770
5771 5771 if (mb->mbxCommand == MBX_HEARTBEAT) {
5772 5772 hba->heartbeat_active = 0;
5773 5773 goto done;
5774 5774 }
5775 5775
5776 5776 if (hba->mbox_queue_flag == MBX_SLEEP) {
5777 5777 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5778 5778 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5779 5779 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5780 5780 "Received. %s: status=%x Sleep.",
5781 5781 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5782 5782 swpmb->mbxStatus);
5783 5783 }
5784 5784 } else {
5785 5785 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5786 5786 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5787 5787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5788 5788 "Completed. %s: status=%x",
5789 5789 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5790 5790 swpmb->mbxStatus);
5791 5791 }
5792 5792 }
5793 5793
5794 5794 /* Filter out passthru mailbox */
5795 5795 if (mbq->flag & MBQ_PASSTHRU) {
5796 5796 goto done;
5797 5797 }
5798 5798
5799 5799 if (mb->mbxStatus) {
5800 5800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5801 5801 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5802 5802 (uint32_t)mb->mbxStatus);
5803 5803 }
5804 5804
5805 5805 if (mbq->mbox_cmpl) {
5806 5806 rc = (mbq->mbox_cmpl)(hba, mbq);
5807 5807 /* If mbox was retried, return immediately */
5808 5808 if (rc) {
5809 5809 return (0);
5810 5810 }
5811 5811 }
5812 5812
5813 5813 done:
5814 5814
5815 5815 /* Clean up the mailbox area */
5816 5816 emlxs_mb_fini(hba, mb, mb->mbxStatus);
5817 5817
5818 5818 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5819 5819 if (mbq) {
5820 5820 /* Attempt to send pending mailboxes */
5821 5821 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5822 5822 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5823 5823 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5824 5824 }
5825 5825 }
5826 5826 return (0);
5827 5827
5828 5828 } /* emlxs_handle_mb_event() */
5829 5829
5830 5830
5831 5831 extern void
5832 5832 emlxs_sli3_timer(emlxs_hba_t *hba)
5833 5833 {
5834 5834 /* Perform SLI3 level timer checks */
5835 5835
5836 5836 emlxs_sli3_timer_check_mbox(hba);
5837 5837
5838 5838 } /* emlxs_sli3_timer() */
5839 5839
5840 5840
5841 5841 static void
5842 5842 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
5843 5843 {
5844 5844 emlxs_port_t *port = &PPORT;
5845 5845 emlxs_config_t *cfg = &CFG;
5846 5846 MAILBOX *mb = NULL;
5847 5847 uint32_t word0;
5848 5848 uint32_t offset;
5849 5849 uint32_t ha_copy = 0;
5850 5850
5851 5851 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
5852 5852 return;
5853 5853 }
5854 5854
5855 5855 mutex_enter(&EMLXS_PORT_LOCK);
5856 5856
5857 5857 /* Return if timer hasn't expired */
5858 5858 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
5859 5859 mutex_exit(&EMLXS_PORT_LOCK);
5860 5860 return;
5861 5861 }
5862 5862
5863 5863 /* Mailbox timed out, first check for error attention */
5864 5864 ha_copy = emlxs_check_attention(hba);
5865 5865
5866 5866 if (ha_copy & HA_ERATT) {
5867 5867 hba->mbox_timer = 0;
5868 5868 mutex_exit(&EMLXS_PORT_LOCK);
5869 5869 emlxs_handle_ff_error(hba);
5870 5870 return;
5871 5871 }
5872 5872
5873 5873 if (hba->mbox_queue_flag) {
5874 5874 /* Get first word of mailbox */
5875 5875 if (hba->flag & FC_SLIM2_MODE) {
5876 5876 mb = FC_SLIM2_MAILBOX(hba);
5877 5877 offset =
5878 5878 (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
5879 5879 ((unsigned long)hba->sli.sli3.slim2.virt));
5880 5880
5881 5881 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5882 5882 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5883 5883 word0 = *((volatile uint32_t *)mb);
5884 5884 word0 = BE_SWAP32(word0);
5885 5885 } else {
5886 5886 mb = FC_SLIM1_MAILBOX(hba);
5887 5887 word0 =
5888 5888 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
5889 5889 #ifdef FMA_SUPPORT
5890 5890 /* Access handle validation */
5891 5891 EMLXS_CHK_ACC_HANDLE(hba,
5892 5892 hba->sli.sli3.slim_acc_handle);
5893 5893 #endif /* FMA_SUPPORT */
5894 5894 }
5895 5895
5896 5896 mb = (MAILBOX *)&word0;
5897 5897
5898 5898 /* Check if mailbox has actually completed */
5899 5899 if (mb->mbxOwner == OWN_HOST) {
5900 5900 /* Read host attention register to determine */
5901 5901 /* interrupt source */
5902 5902 uint32_t ha_copy = emlxs_check_attention(hba);
5903 5903
5904 5904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5905 5905 "Mailbox attention missed: %s. Forcing event. "
5906 5906 "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5907 5907 hba->sli.sli3.hc_copy, ha_copy);
5908 5908
5909 5909 mutex_exit(&EMLXS_PORT_LOCK);
5910 5910
5911 5911 (void) emlxs_handle_mb_event(hba);
5912 5912
5913 5913 return;
5914 5914 }
5915 5915
5916 5916 /* The first to service the mbox queue will clear the timer */
5917 5917 /* We will service the mailbox here */
5918 5918 hba->mbox_timer = 0;
5919 5919
5920 5920 mutex_enter(&EMLXS_MBOX_LOCK);
5921 5921 mb = (MAILBOX *)hba->mbox_mbq;
5922 5922 mutex_exit(&EMLXS_MBOX_LOCK);
5923 5923 }
5924 5924
5925 5925 if (mb) {
5926 5926 switch (hba->mbox_queue_flag) {
5927 5927 case MBX_NOWAIT:
5928 5928 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5929 5929 "%s: Nowait.",
5930 5930 emlxs_mb_cmd_xlate(mb->mbxCommand));
5931 5931 break;
5932 5932
5933 5933 case MBX_SLEEP:
5934 5934 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5935 5935 "%s: mb=%p Sleep.",
5936 5936 emlxs_mb_cmd_xlate(mb->mbxCommand),
5937 5937 mb);
5938 5938 break;
5939 5939
5940 5940 case MBX_POLL:
5941 5941 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5942 5942 "%s: mb=%p Polled.",
5943 5943 emlxs_mb_cmd_xlate(mb->mbxCommand),
5944 5944 mb);
5945 5945 break;
5946 5946
5947 5947 default:
5948 5948 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5949 5949 "%s: mb=%p (%d).",
5950 5950 emlxs_mb_cmd_xlate(mb->mbxCommand),
5951 5951 mb, hba->mbox_queue_flag);
5952 5952 break;
5953 5953 }
5954 5954 } else {
5955 5955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
5956 5956 }
5957 5957
5958 5958 hba->flag |= FC_MBOX_TIMEOUT;
5959 5959 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
5960 5960
5961 5961 mutex_exit(&EMLXS_PORT_LOCK);
5962 5962
5963 5963 /* Perform mailbox cleanup */
5964 5964 /* This will wake any sleeping or polling threads */
5965 5965 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
5966 5966
5967 5967 /* Trigger adapter shutdown */
5968 5968 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
5969 5969
5970 5970 return;
5971 5971
5972 5972 } /* emlxs_sli3_timer_check_mbox() */
5973 5973
5974 5974
5975 5975 /*
5976 5976 * emlxs_mb_config_port Issue a CONFIG_PORT mailbox command
5977 5977 */
5978 5978 static uint32_t
5979 5979 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
5980 5980 uint32_t hbainit)
5981 5981 {
5982 5982 MAILBOX *mb = (MAILBOX *)mbq;
5983 5983 emlxs_vpd_t *vpd = &VPD;
5984 5984 emlxs_port_t *port = &PPORT;
5985 5985 emlxs_config_t *cfg;
5986 5986 RING *rp;
5987 5987 uint64_t pcb;
5988 5988 uint64_t mbx;
5989 5989 uint64_t hgp;
5990 5990 uint64_t pgp;
5991 5991 uint64_t rgp;
5992 5992 MAILBOX *mbox;
5993 5993 SLIM2 *slim;
5994 5994 SLI2_RDSC *rdsc;
5995 5995 uint64_t offset;
5996 5996 uint32_t Laddr;
5997 5997 uint32_t i;
5998 5998
5999 5999 cfg = &CFG;
6000 6000 bzero((void *)mb, MAILBOX_CMD_BSIZE);
6001 6001 mbox = NULL;
6002 6002 slim = NULL;
6003 6003
6004 6004 mb->mbxCommand = MBX_CONFIG_PORT;
6005 6005 mb->mbxOwner = OWN_HOST;
6006 6006 mbq->mbox_cmpl = NULL;
6007 6007
6008 6008 mb->un.varCfgPort.pcbLen = sizeof (PCB);
6009 6009 mb->un.varCfgPort.hbainit[0] = hbainit;
6010 6010
6011 6011 pcb = hba->sli.sli3.slim2.phys +
6012 6012 (uint64_t)((unsigned long)&(slim->pcb));
6013 6013 mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6014 6014 mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6015 6015
6016 6016 /* Set Host pointers in SLIM flag */
6017 6017 mb->un.varCfgPort.hps = 1;
6018 6018
6019 6019 /* Initialize hba structure for assumed default SLI2 mode */
6020 6020 /* If config port succeeds, then we will update it then */
6021 6021 hba->sli_mode = sli_mode;
6022 6022 hba->vpi_max = 0;
6023 6023 hba->flag &= ~FC_NPIV_ENABLED;
6024 6024
6025 6025 if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6026 6026 mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6027 6027 mb->un.varCfgPort.cerbm = 1;
6028 6028 mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6029 6029
6030 6030 if (cfg[CFG_NPIV_ENABLE].current) {
6031 6031 if (vpd->feaLevelHigh >= 0x09) {
6032 6032 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6033 6033 mb->un.varCfgPort.vpi_max =
6034 6034 MAX_VPORTS - 1;
6035 6035 } else {
6036 6036 mb->un.varCfgPort.vpi_max =
6037 6037 MAX_VPORTS_LIMITED - 1;
6038 6038 }
6039 6039
6040 6040 mb->un.varCfgPort.cmv = 1;
6041 6041 } else {
6042 6042 EMLXS_MSGF(EMLXS_CONTEXT,
6043 6043 &emlxs_init_debug_msg,
6044 6044 "CFGPORT: Firmware does not support NPIV. "
6045 6045 "level=%d", vpd->feaLevelHigh);
6046 6046 }
6047 6047
6048 6048 }
6049 6049 }
6050 6050
6051 6051 /*
6052 6052 * Now setup pcb
6053 6053 */
6054 6054 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6055 6055 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6056 6056 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6057 6057 (hba->sli.sli3.ring_count - 1);
6058 6058 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6059 6059 sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6060 6060
6061 6061 mbx = hba->sli.sli3.slim2.phys +
6062 6062 (uint64_t)((unsigned long)&(slim->mbx));
6063 6063 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6064 6064 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6065 6065
6066 6066
6067 6067 /*
6068 6068 * Set up HGP - Port Memory
6069 6069 *
6070 6070 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
6071 6071 * RR0Get 0xc4 0x84
6072 6072 * CR1Put 0xc8 0x88
6073 6073 * RR1Get 0xcc 0x8c
6074 6074 * CR2Put 0xd0 0x90
6075 6075 * RR2Get 0xd4 0x94
6076 6076 * CR3Put 0xd8 0x98
6077 6077 * RR3Get 0xdc 0x9c
6078 6078 *
6079 6079 * Reserved 0xa0-0xbf
6080 6080 *
6081 6081 * If HBQs configured:
6082 6082 * HBQ 0 Put ptr 0xc0
6083 6083 * HBQ 1 Put ptr 0xc4
6084 6084 * HBQ 2 Put ptr 0xc8
6085 6085 * ...
6086 6086 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6087 6087 */
6088 6088
6089 6089 if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6090 6090 /* ERBM is enabled */
6091 6091 hba->sli.sli3.hgp_ring_offset = 0x80;
6092 6092 hba->sli.sli3.hgp_hbq_offset = 0xC0;
6093 6093
6094 6094 hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6095 6095 hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6096 6096
6097 6097 } else { /* SLI2 */
6098 6098 /* ERBM is disabled */
6099 6099 hba->sli.sli3.hgp_ring_offset = 0xC0;
6100 6100 hba->sli.sli3.hgp_hbq_offset = 0;
6101 6101
6102 6102 hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6103 6103 hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6104 6104 }
6105 6105
6106 6106 /* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6107 6107 if (hba->bus_type == SBUS_FC) {
6108 6108 hgp = hba->sli.sli3.slim2.phys +
6109 6109 (uint64_t)((unsigned long)&(mbox->us.s2.host));
6110 6110 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6111 6111 PADDR_HI(hgp);
6112 6112 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6113 6113 PADDR_LO(hgp);
6114 6114 } else {
6115 6115 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6116 6116 (uint32_t)ddi_get32(hba->pci_acc_handle,
6117 6117 (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6118 6118
6119 6119 Laddr =
6120 6120 ddi_get32(hba->pci_acc_handle,
6121 6121 (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6122 6122 Laddr &= ~0x4;
6123 6123 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6124 6124 (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6125 6125
6126 6126 #ifdef FMA_SUPPORT
6127 6127 /* Access handle validation */
6128 6128 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6129 6129 #endif /* FMA_SUPPORT */
6130 6130
6131 6131 }
6132 6132
6133 6133 pgp = hba->sli.sli3.slim2.phys +
6134 6134 (uint64_t)((unsigned long)&(mbox->us.s2.port));
6135 6135 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6136 6136 PADDR_HI(pgp);
6137 6137 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6138 6138 PADDR_LO(pgp);
6139 6139
6140 6140 offset = 0;
6141 6141 for (i = 0; i < 4; i++) {
6142 6142 rp = &hba->sli.sli3.ring[i];
6143 6143 rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6144 6144
6145 6145 /* Setup command ring */
6146 6146 rgp = hba->sli.sli3.slim2.phys +
6147 6147 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6148 6148 rdsc->cmdAddrHigh = PADDR_HI(rgp);
6149 6149 rdsc->cmdAddrLow = PADDR_LO(rgp);
6150 6150 rdsc->cmdEntries = rp->fc_numCiocb;
6151 6151
6152 6152 rp->fc_cmdringaddr =
6153 6153 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6154 6154 offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6155 6155
6156 6156 /* Setup response ring */
6157 6157 rgp = hba->sli.sli3.slim2.phys +
6158 6158 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6159 6159 rdsc->rspAddrHigh = PADDR_HI(rgp);
6160 6160 rdsc->rspAddrLow = PADDR_LO(rgp);
6161 6161 rdsc->rspEntries = rp->fc_numRiocb;
6162 6162
6163 6163 rp->fc_rspringaddr =
6164 6164 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6165 6165 offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6166 6166 }
6167 6167
6168 6168 BE_SWAP32_BCOPY((uint8_t *)
6169 6169 (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6170 6170 (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6171 6171 sizeof (PCB));
6172 6172
6173 6173 offset = ((uint64_t)((unsigned long)
6174 6174 &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6175 6175 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6176 6176 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6177 6177 sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6178 6178
6179 6179 return (0);
6180 6180
6181 6181 } /* emlxs_mb_config_port() */
6182 6182
6183 6183
6184 6184 static uint32_t
6185 6185 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6186 6186 {
6187 6187 emlxs_port_t *port = &PPORT;
6188 6188 HBQ_INIT_t *hbq;
6189 6189 MATCHMAP *mp;
6190 6190 HBQE_t *hbqE;
6191 6191 MAILBOX *mb;
6192 6192 MAILBOXQ *mbq;
6193 6193 void *ioa2;
6194 6194 uint32_t j;
6195 6195 uint32_t count;
6196 6196 uint32_t size;
6197 6197 uint32_t ringno;
6198 6198 uint32_t seg;
6199 6199
6200 6200 switch (hbq_id) {
6201 6201 case EMLXS_ELS_HBQ_ID:
6202 6202 count = MEM_ELSBUF_COUNT;
6203 6203 size = MEM_ELSBUF_SIZE;
6204 6204 ringno = FC_ELS_RING;
6205 6205 seg = MEM_ELSBUF;
6206 6206 HBASTATS.ElsUbPosted = count;
6207 6207 break;
6208 6208
6209 6209 case EMLXS_IP_HBQ_ID:
6210 6210 count = MEM_IPBUF_COUNT;
6211 6211 size = MEM_IPBUF_SIZE;
6212 6212 ringno = FC_IP_RING;
6213 6213 seg = MEM_IPBUF;
6214 6214 HBASTATS.IpUbPosted = count;
6215 6215 break;
6216 6216
6217 6217 case EMLXS_CT_HBQ_ID:
6218 6218 count = MEM_CTBUF_COUNT;
6219 6219 size = MEM_CTBUF_SIZE;
6220 6220 ringno = FC_CT_RING;
6221 6221 seg = MEM_CTBUF;
6222 6222 HBASTATS.CtUbPosted = count;
6223 6223 break;
6224 6224
6225 6225 #ifdef SFCT_SUPPORT
6226 6226 case EMLXS_FCT_HBQ_ID:
6227 6227 count = MEM_FCTBUF_COUNT;
6228 6228 size = MEM_FCTBUF_SIZE;
6229 6229 ringno = FC_FCT_RING;
6230 6230 seg = MEM_FCTBUF;
6231 6231 HBASTATS.FctUbPosted = count;
6232 6232 break;
6233 6233 #endif /* SFCT_SUPPORT */
6234 6234
6235 6235 default:
6236 6236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6237 6237 "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6238 6238 return (1);
6239 6239 }
6240 6240
6241 6241 /* Configure HBQ */
6242 6242 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6243 6243 hbq->HBQ_numEntries = count;
6244 6244
6245 6245 /* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6246 6246 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6247 6247 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6248 6248 "emlxs_hbq_setup: Unable to get mailbox.");
6249 6249 return (1);
6250 6250 }
6251 6251 mb = (MAILBOX *)mbq;
6252 6252
6253 6253 /* Allocate HBQ Host buffer and Initialize the HBQEs */
6254 6254 if (emlxs_hbq_alloc(hba, hbq_id)) {
6255 6255 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6256 6256 "emlxs_hbq_setup: Unable to allocate HBQ.");
6257 6257 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6258 6258 return (1);
6259 6259 }
6260 6260
6261 6261 hbq->HBQ_recvNotify = 1;
6262 6262 hbq->HBQ_num_mask = 0; /* Bind to ring */
6263 6263 hbq->HBQ_profile = 0; /* Selection profile */
6264 6264 /* 0=all, 7=logentry */
6265 6265 hbq->HBQ_ringMask = 1 << ringno; /* b0100 * ringno - Binds */
6266 6266 /* HBQ to a ring */
6267 6267 /* Ring0=b0001, Ring1=b0010, */
6268 6268 /* Ring2=b0100 */
6269 6269 hbq->HBQ_headerLen = 0; /* 0 if not profile 4 or 5 */
6270 6270 hbq->HBQ_logEntry = 0; /* Set to 1 if this HBQ will */
6271 6271 /* be used for */
6272 6272 hbq->HBQ_id = hbq_id;
6273 6273 hbq->HBQ_PutIdx_next = 0;
6274 6274 hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6275 6275 hbq->HBQ_GetIdx = 0;
6276 6276 hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6277 6277 bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6278 6278
6279 6279 /* Fill in POST BUFFERs in HBQE */
6280 6280 hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6281 6281 for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6282 6282 /* Allocate buffer to post */
6283 6283 if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6284 6284 seg, 1)) == 0) {
6285 6285 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6286 6286 "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
6287 6287 "cnt=%d", j);
6288 6288 emlxs_hbq_free_all(hba, hbq_id);
6289 6289 return (1);
6290 6290 }
6291 6291
6292 6292 hbq->HBQ_PostBufs[j] = mp;
6293 6293
6294 6294 hbqE->unt.ext.HBQ_tag = hbq_id;
6295 6295 hbqE->unt.ext.HBQE_tag = j;
6296 6296 hbqE->bde.tus.f.bdeSize = size;
6297 6297 hbqE->bde.tus.f.bdeFlags = 0;
6298 6298 hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6299 6299 hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6300 6300 hbqE->bde.addrLow =
6301 6301 BE_SWAP32(PADDR_LO(mp->phys));
6302 6302 hbqE->bde.addrHigh =
6303 6303 BE_SWAP32(PADDR_HI(mp->phys));
6304 6304 }
6305 6305
6306 6306 /* Issue CONFIG_HBQ */
6307 6307 emlxs_mb_config_hbq(hba, mbq, hbq_id);
6308 6308 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6309 6309 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6310 6310 "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6311 6311 mb->mbxCommand, mb->mbxStatus);
6312 6312
6313 6313 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6314 6314 emlxs_hbq_free_all(hba, hbq_id);
6315 6315 return (1);
6316 6316 }
6317 6317
6318 6318 /* Setup HBQ Get/Put indexes */
6319 6319 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6320 6320 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6321 6321 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6322 6322
6323 6323 hba->sli.sli3.hbq_count++;
6324 6324
6325 6325 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6326 6326
6327 6327 #ifdef FMA_SUPPORT
6328 6328 /* Access handle validation */
6329 6329 if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6330 6330 != DDI_FM_OK) {
6331 6331 EMLXS_MSGF(EMLXS_CONTEXT,
6332 6332 &emlxs_invalid_access_handle_msg, NULL);
6333 6333 emlxs_hbq_free_all(hba, hbq_id);
6334 6334 return (1);
6335 6335 }
6336 6336 #endif /* FMA_SUPPORT */
6337 6337
6338 6338 return (0);
6339 6339
6340 6340 } /* emlxs_hbq_setup() */
6341 6341
6342 6342
6343 6343 extern void
6344 6344 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6345 6345 {
6346 6346 HBQ_INIT_t *hbq;
6347 6347 MBUF_INFO *buf_info;
6348 6348 MBUF_INFO bufinfo;
6349 6349 uint32_t seg;
6350 6350 uint32_t j;
6351 6351
6352 6352 switch (hbq_id) {
6353 6353 case EMLXS_ELS_HBQ_ID:
6354 6354 seg = MEM_ELSBUF;
6355 6355 HBASTATS.ElsUbPosted = 0;
6356 6356 break;
6357 6357
6358 6358 case EMLXS_IP_HBQ_ID:
6359 6359 seg = MEM_IPBUF;
6360 6360 HBASTATS.IpUbPosted = 0;
6361 6361 break;
6362 6362
6363 6363 case EMLXS_CT_HBQ_ID:
6364 6364 seg = MEM_CTBUF;
6365 6365 HBASTATS.CtUbPosted = 0;
6366 6366 break;
6367 6367
6368 6368 #ifdef SFCT_SUPPORT
6369 6369 case EMLXS_FCT_HBQ_ID:
6370 6370 seg = MEM_FCTBUF;
6371 6371 HBASTATS.FctUbPosted = 0;
6372 6372 break;
6373 6373 #endif /* SFCT_SUPPORT */
6374 6374
6375 6375 default:
6376 6376 return;
6377 6377 }
6378 6378
6379 6379
6380 6380 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6381 6381
6382 6382 if (hbq->HBQ_host_buf.virt != 0) {
6383 6383 for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6384 6384 emlxs_mem_put(hba, seg,
6385 6385 (void *)hbq->HBQ_PostBufs[j]);
6386 6386 hbq->HBQ_PostBufs[j] = NULL;
6387 6387 }
6388 6388 hbq->HBQ_PostBufCnt = 0;
6389 6389
6390 6390 buf_info = &bufinfo;
6391 6391 bzero(buf_info, sizeof (MBUF_INFO));
6392 6392
6393 6393 buf_info->size = hbq->HBQ_host_buf.size;
6394 6394 buf_info->virt = hbq->HBQ_host_buf.virt;
6395 6395 buf_info->phys = hbq->HBQ_host_buf.phys;
6396 6396 buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6397 6397 buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6398 6398 buf_info->flags = FC_MBUF_DMA;
6399 6399
6400 6400 emlxs_mem_free(hba, buf_info);
6401 6401
6402 6402 hbq->HBQ_host_buf.virt = NULL;
6403 6403 }
6404 6404
6405 6405 return;
6406 6406
6407 6407 } /* emlxs_hbq_free_all() */
6408 6408
6409 6409
6410 6410 extern void
6411 6411 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6412 6412 {
6413 6413 #ifdef FMA_SUPPORT
6414 6414 emlxs_port_t *port = &PPORT;
6415 6415 #endif /* FMA_SUPPORT */
6416 6416 void *ioa2;
6417 6417 uint32_t status;
6418 6418 uint32_t HBQ_PortGetIdx;
6419 6419 HBQ_INIT_t *hbq;
6420 6420
6421 6421 switch (hbq_id) {
6422 6422 case EMLXS_ELS_HBQ_ID:
6423 6423 HBASTATS.ElsUbPosted++;
6424 6424 break;
6425 6425
6426 6426 case EMLXS_IP_HBQ_ID:
6427 6427 HBASTATS.IpUbPosted++;
6428 6428 break;
6429 6429
6430 6430 case EMLXS_CT_HBQ_ID:
6431 6431 HBASTATS.CtUbPosted++;
6432 6432 break;
6433 6433
6434 6434 #ifdef SFCT_SUPPORT
6435 6435 case EMLXS_FCT_HBQ_ID:
6436 6436 HBASTATS.FctUbPosted++;
6437 6437 break;
6438 6438 #endif /* SFCT_SUPPORT */
6439 6439
6440 6440 default:
6441 6441 return;
6442 6442 }
6443 6443
6444 6444 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6445 6445
6446 6446 hbq->HBQ_PutIdx =
6447 6447 (hbq->HBQ_PutIdx + 1 >=
6448 6448 hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6449 6449
6450 6450 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6451 6451 HBQ_PortGetIdx =
6452 6452 BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6453 6453 HBQ_PortGetIdx[hbq_id]);
6454 6454
6455 6455 hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6456 6456
6457 6457 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6458 6458 return;
6459 6459 }
6460 6460 }
6461 6461
6462 6462 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6463 6463 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6464 6464 status = hbq->HBQ_PutIdx;
6465 6465 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6466 6466
6467 6467 #ifdef FMA_SUPPORT
6468 6468 /* Access handle validation */
6469 6469 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6470 6470 #endif /* FMA_SUPPORT */
6471 6471
6472 6472 return;
6473 6473
6474 6474 } /* emlxs_update_HBQ_index() */
6475 6475
6476 6476
6477 6477 static void
6478 6478 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6479 6479 {
6480 6480 #ifdef FMA_SUPPORT
6481 6481 emlxs_port_t *port = &PPORT;
6482 6482 #endif /* FMA_SUPPORT */
6483 6483 uint32_t status;
6484 6484
6485 6485 /* Enable mailbox, error attention interrupts */
6486 6486 status = (uint32_t)(HC_MBINT_ENA);
6487 6487
6488 6488 /* Enable ring interrupts */
6489 6489 if (hba->sli.sli3.ring_count >= 4) {
6490 6490 status |=
6491 6491 (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6492 6492 HC_R0INT_ENA);
6493 6493 } else if (hba->sli.sli3.ring_count == 3) {
6494 6494 status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6495 6495 } else if (hba->sli.sli3.ring_count == 2) {
6496 6496 status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6497 6497 } else if (hba->sli.sli3.ring_count == 1) {
6498 6498 status |= (HC_R0INT_ENA);
6499 6499 }
6500 6500
6501 6501 hba->sli.sli3.hc_copy = status;
6502 6502 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6503 6503
6504 6504 #ifdef FMA_SUPPORT
6505 6505 /* Access handle validation */
6506 6506 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6507 6507 #endif /* FMA_SUPPORT */
6508 6508
6509 6509 } /* emlxs_sli3_enable_intr() */
6510 6510
6511 6511
6512 6512 static void
6513 6513 emlxs_enable_latt(emlxs_hba_t *hba)
6514 6514 {
6515 6515 #ifdef FMA_SUPPORT
6516 6516 emlxs_port_t *port = &PPORT;
6517 6517 #endif /* FMA_SUPPORT */
6518 6518
6519 6519 mutex_enter(&EMLXS_PORT_LOCK);
6520 6520 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6521 6521 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6522 6522 #ifdef FMA_SUPPORT
6523 6523 /* Access handle validation */
6524 6524 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6525 6525 #endif /* FMA_SUPPORT */
6526 6526 mutex_exit(&EMLXS_PORT_LOCK);
6527 6527
6528 6528 } /* emlxs_enable_latt() */
6529 6529
6530 6530
6531 6531 static void
6532 6532 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6533 6533 {
6534 6534 #ifdef FMA_SUPPORT
6535 6535 emlxs_port_t *port = &PPORT;
6536 6536 #endif /* FMA_SUPPORT */
6537 6537
6538 6538 /* Disable all adapter interrupts */
6539 6539 hba->sli.sli3.hc_copy = att;
6540 6540 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6541 6541 #ifdef FMA_SUPPORT
6542 6542 /* Access handle validation */
6543 6543 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6544 6544 #endif /* FMA_SUPPORT */
6545 6545
6546 6546 } /* emlxs_sli3_disable_intr() */
6547 6547
6548 6548
6549 6549 static uint32_t
6550 6550 emlxs_check_attention(emlxs_hba_t *hba)
6551 6551 {
6552 6552 #ifdef FMA_SUPPORT
6553 6553 emlxs_port_t *port = &PPORT;
6554 6554 #endif /* FMA_SUPPORT */
6555 6555 uint32_t ha_copy;
6556 6556
6557 6557 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6558 6558 #ifdef FMA_SUPPORT
6559 6559 /* Access handle validation */
6560 6560 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6561 6561 #endif /* FMA_SUPPORT */
6562 6562 return (ha_copy);
6563 6563
6564 6564 } /* emlxs_check_attention() */
6565 6565
6566 6566 void
6567 6567 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6568 6568 {
6569 6569 uint32_t ha_copy;
6570 6570
6571 6571 ha_copy = emlxs_check_attention(hba);
6572 6572
6573 6573 /* Adapter error */
6574 6574 if (ha_copy & HA_ERATT) {
6575 6575 HBASTATS.IntrEvent[6]++;
6576 6576 emlxs_handle_ff_error(hba);
6577 6577 }
6578 6578 }
↓ open down ↓ |
1347 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX