Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27
28 28 #include <emlxs.h>
29 29
30 30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 32
33 33 static int emlxs_sli4_create_queues(emlxs_hba_t *hba,
34 34 MAILBOXQ *mbq);
35 35 static int emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
36 36 MAILBOXQ *mbq);
37 37 static int emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
38 38 MAILBOXQ *mbq);
39 39
40 40 static int emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
41 41
42 42 extern void emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
43 43
44 44 extern int32_t emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
45 45 uint32_t size);
46 46 extern void emlxs_decode_label(char *label, char *buffer, int bige);
47 47
48 48 extern void emlxs_build_prog_types(emlxs_hba_t *hba,
49 49 char *prog_types);
50 50
51 51 extern int emlxs_pci_model_count;
52 52
53 53 extern emlxs_model_t emlxs_pci_model[];
54 54
55 55 static int emlxs_sli4_map_hdw(emlxs_hba_t *hba);
56 56
57 57 static void emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
58 58
59 59 static int32_t emlxs_sli4_online(emlxs_hba_t *hba);
60 60
61 61 static void emlxs_sli4_offline(emlxs_hba_t *hba);
62 62
63 63 static uint32_t emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
64 64 uint32_t skip_post, uint32_t quiesce);
65 65 static void emlxs_sli4_hba_kill(emlxs_hba_t *hba);
66 66
67 67 static uint32_t emlxs_sli4_hba_init(emlxs_hba_t *hba);
68 68
69 69 static uint32_t emlxs_sli4_bde_setup(emlxs_port_t *port,
70 70 emlxs_buf_t *sbp);
71 71
72 72
73 73 static void emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
74 74 CHANNEL *cp, IOCBQ *iocb_cmd);
75 75 static uint32_t emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
76 76 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
77 77 static uint32_t emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
78 78 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
79 79 #ifdef SFCT_SUPPORT
80 80 static uint32_t emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
81 81 emlxs_buf_t *cmd_sbp, int channel);
82 82 #endif /* SFCT_SUPPORT */
83 83
84 84 static uint32_t emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
85 85 emlxs_buf_t *sbp, int ring);
86 86 static uint32_t emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
87 87 emlxs_buf_t *sbp);
88 88 static uint32_t emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
89 89 emlxs_buf_t *sbp);
90 90 static uint32_t emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
91 91 emlxs_buf_t *sbp);
92 92 static void emlxs_sli4_poll_intr(emlxs_hba_t *hba,
93 93 uint32_t att_bit);
94 94 static int32_t emlxs_sli4_intx_intr(char *arg);
95 95
96 96 #ifdef MSI_SUPPORT
97 97 static uint32_t emlxs_sli4_msi_intr(char *arg1, char *arg2);
98 98 #endif /* MSI_SUPPORT */
99 99
100 100 static void emlxs_sli4_resource_free(emlxs_hba_t *hba);
101 101
102 102 static int emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
103 103
104 104 static XRIobj_t *emlxs_sli4_alloc_xri(emlxs_hba_t *hba,
105 105 emlxs_buf_t *sbp, RPIobj_t *rpip);
106 106 static void emlxs_sli4_enable_intr(emlxs_hba_t *hba);
107 107
108 108 static void emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
109 109
110 110 extern void emlxs_sli4_timer(emlxs_hba_t *hba);
111 111
112 112 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
113 113
114 114 static void emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
115 115
116 116 static XRIobj_t *emlxs_sli4_register_xri(emlxs_hba_t *hba,
117 117 emlxs_buf_t *sbp, uint16_t xri);
118 118
119 119 static XRIobj_t *emlxs_sli4_reserve_xri(emlxs_hba_t *hba,
120 120 RPIobj_t *rpip);
121 121 static int emlxs_check_hdw_ready(emlxs_hba_t *);
122 122
123 123
124 124 /* Define SLI4 API functions */
125 125 emlxs_sli_api_t emlxs_sli4_api = {
126 126 emlxs_sli4_map_hdw,
127 127 emlxs_sli4_unmap_hdw,
128 128 emlxs_sli4_online,
129 129 emlxs_sli4_offline,
130 130 emlxs_sli4_hba_reset,
131 131 emlxs_sli4_hba_kill,
132 132 emlxs_sli4_issue_iocb_cmd,
133 133 emlxs_sli4_issue_mbox_cmd,
134 134 #ifdef SFCT_SUPPORT
135 135 emlxs_sli4_prep_fct_iocb,
136 136 #else
137 137 NULL,
138 138 #endif /* SFCT_SUPPORT */
139 139 emlxs_sli4_prep_fcp_iocb,
140 140 emlxs_sli4_prep_ip_iocb,
141 141 emlxs_sli4_prep_els_iocb,
142 142 emlxs_sli4_prep_ct_iocb,
143 143 emlxs_sli4_poll_intr,
144 144 emlxs_sli4_intx_intr,
145 145 emlxs_sli4_msi_intr,
146 146 emlxs_sli4_disable_intr,
147 147 emlxs_sli4_timer,
148 148 emlxs_sli4_poll_erratt
149 149 };
150 150
151 151
152 152 /* ************************************************************************** */
153 153
154 154
155 155 /*
156 156 * emlxs_sli4_online()
157 157 *
158 158 * This routine will start initialization of the SLI4 HBA.
159 159 */
160 160 static int32_t
161 161 emlxs_sli4_online(emlxs_hba_t *hba)
162 162 {
163 163 emlxs_port_t *port = &PPORT;
164 164 emlxs_config_t *cfg;
165 165 emlxs_vpd_t *vpd;
166 166 MAILBOXQ *mbq = NULL;
167 167 MAILBOX4 *mb = NULL;
168 168 MATCHMAP *mp = NULL;
169 169 uint32_t i;
170 170 uint32_t j;
171 171 uint32_t rval = 0;
172 172 uint8_t *vpd_data;
173 173 uint32_t sli_mode;
174 174 uint8_t *outptr;
175 175 uint32_t status;
176 176 uint32_t fw_check;
177 177 uint32_t kern_update = 0;
178 178 emlxs_firmware_t hba_fw;
179 179 emlxs_firmware_t *fw;
180 180 uint16_t ssvid;
181 181
182 182 cfg = &CFG;
183 183 vpd = &VPD;
184 184
185 185 sli_mode = EMLXS_HBA_SLI4_MODE;
186 186 hba->sli_mode = sli_mode;
187 187
188 188 /* Set the fw_check flag */
189 189 fw_check = cfg[CFG_FW_CHECK].current;
190 190
191 191 if ((fw_check & 0x04) ||
192 192 (hba->fw_flag & FW_UPDATE_KERNEL)) {
193 193 kern_update = 1;
194 194 }
195 195
196 196 hba->mbox_queue_flag = 0;
197 197 hba->fc_edtov = FF_DEF_EDTOV;
198 198 hba->fc_ratov = FF_DEF_RATOV;
199 199 hba->fc_altov = FF_DEF_ALTOV;
200 200 hba->fc_arbtov = FF_DEF_ARBTOV;
201 201
202 202 /* Target mode not supported */
203 203 if (hba->tgt_mode) {
204 204 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
205 205 "Target mode not supported in SLI4.");
206 206
207 207 return (ENOMEM);
208 208 }
209 209
210 210 /* Networking not supported */
211 211 if (cfg[CFG_NETWORK_ON].current) {
212 212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
213 213 "Networking not supported in SLI4, turning it off");
214 214 cfg[CFG_NETWORK_ON].current = 0;
215 215 }
216 216
217 217 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
218 218 if (hba->chan_count > MAX_CHANNEL) {
219 219 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
220 220 "Max channels exceeded, dropping num-wq from %d to 1",
221 221 cfg[CFG_NUM_WQ].current);
222 222 cfg[CFG_NUM_WQ].current = 1;
223 223 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
224 224 }
225 225 hba->channel_fcp = 0; /* First channel */
226 226
227 227 /* Default channel for everything else is the last channel */
228 228 hba->channel_ip = hba->chan_count - 1;
229 229 hba->channel_els = hba->chan_count - 1;
230 230 hba->channel_ct = hba->chan_count - 1;
231 231
232 232 hba->fc_iotag = 1;
233 233 hba->io_count = 0;
234 234 hba->channel_tx_count = 0;
235 235
236 236 /* Initialize the local dump region buffer */
237 237 bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
238 238 hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
239 239 hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
240 240 | FC_MBUF_DMA32;
241 241 hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
242 242
243 243 (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
244 244
245 245 if (hba->sli.sli4.dump_region.virt == NULL) {
246 246 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
247 247 "Unable to allocate dump region buffer.");
248 248
249 249 return (ENOMEM);
250 250 }
251 251
252 252 /*
253 253 * Get a buffer which will be used repeatedly for mailbox commands
254 254 */
255 255 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
256 256
257 257 mb = (MAILBOX4 *)mbq;
258 258
259 259 reset:
260 260 /* Reset & Initialize the adapter */
261 261 if (emlxs_sli4_hba_init(hba)) {
262 262 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
263 263 "Unable to init hba.");
264 264
265 265 rval = EIO;
266 266 goto failed1;
267 267 }
268 268
269 269 #ifdef FMA_SUPPORT
270 270 /* Access handle validation */
271 271 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
272 272 != DDI_FM_OK) ||
273 273 (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar1_acc_handle)
274 274 != DDI_FM_OK) ||
275 275 (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar2_acc_handle)
276 276 != DDI_FM_OK)) {
277 277 EMLXS_MSGF(EMLXS_CONTEXT,
278 278 &emlxs_invalid_access_handle_msg, NULL);
279 279
280 280 rval = EIO;
281 281 goto failed1;
282 282 }
283 283 #endif /* FMA_SUPPORT */
284 284
285 285 /*
286 286 * Setup and issue mailbox READ REV command
287 287 */
288 288 vpd->opFwRev = 0;
289 289 vpd->postKernRev = 0;
290 290 vpd->sli1FwRev = 0;
291 291 vpd->sli2FwRev = 0;
292 292 vpd->sli3FwRev = 0;
293 293 vpd->sli4FwRev = 0;
294 294
295 295 vpd->postKernName[0] = 0;
296 296 vpd->opFwName[0] = 0;
297 297 vpd->sli1FwName[0] = 0;
298 298 vpd->sli2FwName[0] = 0;
299 299 vpd->sli3FwName[0] = 0;
300 300 vpd->sli4FwName[0] = 0;
301 301
302 302 vpd->opFwLabel[0] = 0;
303 303 vpd->sli1FwLabel[0] = 0;
304 304 vpd->sli2FwLabel[0] = 0;
305 305 vpd->sli3FwLabel[0] = 0;
306 306 vpd->sli4FwLabel[0] = 0;
307 307
308 308 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
309 309
310 310 emlxs_mb_read_rev(hba, mbq, 0);
311 311 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
312 312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
313 313 "Unable to read rev. Mailbox cmd=%x status=%x",
314 314 mb->mbxCommand, mb->mbxStatus);
315 315
316 316 rval = EIO;
317 317 goto failed1;
318 318
319 319 }
320 320
321 321 emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
322 322 if (mb->un.varRdRev4.sliLevel != 4) {
323 323 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
324 324 "Invalid read rev Version for SLI4: 0x%x",
325 325 mb->un.varRdRev4.sliLevel);
326 326
327 327 rval = EIO;
328 328 goto failed1;
329 329 }
330 330
331 331 switch (mb->un.varRdRev4.dcbxMode) {
332 332 case EMLXS_DCBX_MODE_CIN: /* Mapped to nonFIP mode */
333 333 hba->flag &= ~FC_FIP_SUPPORTED;
334 334 break;
335 335
336 336 case EMLXS_DCBX_MODE_CEE: /* Mapped to FIP mode */
337 337 hba->flag |= FC_FIP_SUPPORTED;
338 338 break;
339 339
340 340 default:
341 341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
342 342 "Invalid read rev dcbx mode for SLI4: 0x%x",
343 343 mb->un.varRdRev4.dcbxMode);
344 344
345 345 rval = EIO;
346 346 goto failed1;
347 347 }
348 348
349 349
350 350 /* Save information as VPD data */
351 351 vpd->rBit = 1;
352 352
353 353 vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
354 354 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
355 355
356 356 vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
357 357 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
358 358
359 359 vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
360 360 bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
361 361
362 362 vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
363 363 vpd->fcphLow = mb->un.varRdRev4.fcphLow;
364 364 vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
365 365 vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
366 366
367 367 /* Decode FW labels */
368 368 emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0);
369 369 emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0);
370 370 emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0);
371 371
372 372 if (hba->model_info.chip == EMLXS_BE2_CHIP) {
373 373 (void) strcpy(vpd->sli4FwLabel, "be2.ufi");
374 374 } else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
375 375 (void) strcpy(vpd->sli4FwLabel, "be3.ufi");
376 376 } else {
377 377 (void) strcpy(vpd->sli4FwLabel, "sli4.fw");
378 378 }
379 379
380 380 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
381 381 "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
382 382 vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
383 383 vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
384 384 mb->un.varRdRev4.dcbxMode);
385 385
386 386 /* No key information is needed for SLI4 products */
387 387
388 388 /* Get adapter VPD information */
389 389 vpd->port_index = (uint32_t)-1;
390 390
391 391 /* Reuse mbq from previous mbox */
392 392 bzero(mbq, sizeof (MAILBOXQ));
393 393
394 394 emlxs_mb_dump_vpd(hba, mbq, 0);
395 395 vpd_data = hba->sli.sli4.dump_region.virt;
396 396
397 397 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
398 398 MBX_SUCCESS) {
399 399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
400 400 "No VPD found. status=%x", mb->mbxStatus);
401 401 } else {
402 402 EMLXS_MSGF(EMLXS_CONTEXT,
403 403 &emlxs_init_debug_msg,
404 404 "VPD dumped. rsp_cnt=%d status=%x",
405 405 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
406 406
407 407 if (mb->un.varDmp4.rsp_cnt) {
408 408 EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
409 409 0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
410 410
411 411 #ifdef FMA_SUPPORT
412 412 if (hba->sli.sli4.dump_region.dma_handle) {
413 413 if (emlxs_fm_check_dma_handle(hba,
414 414 hba->sli.sli4.dump_region.dma_handle)
415 415 != DDI_FM_OK) {
416 416 EMLXS_MSGF(EMLXS_CONTEXT,
417 417 &emlxs_invalid_dma_handle_msg,
418 418 "emlxs_sli4_online: hdl=%p",
419 419 hba->sli.sli4.dump_region.
420 420 dma_handle);
421 421 rval = EIO;
422 422 goto failed1;
423 423 }
424 424 }
425 425 #endif /* FMA_SUPPORT */
426 426
427 427 }
428 428 }
429 429
430 430 if (vpd_data[0]) {
431 431 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
432 432 mb->un.varDmp4.rsp_cnt);
433 433
434 434 /*
435 435 * If there is a VPD part number, and it does not
436 436 * match the current default HBA model info,
437 437 * replace the default data with an entry that
438 438 * does match.
439 439 *
440 440 * After emlxs_parse_vpd model holds the VPD value
441 441 * for V2 and part_num hold the value for PN. These
442 442 * 2 values are NOT necessarily the same.
443 443 */
444 444
445 445 rval = 0;
446 446 if ((vpd->model[0] != 0) &&
447 447 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
448 448
449 449 /* First scan for a V2 match */
450 450
451 451 for (i = 1; i < emlxs_pci_model_count; i++) {
452 452 if (strcmp(&vpd->model[0],
453 453 emlxs_pci_model[i].model) == 0) {
454 454 bcopy(&emlxs_pci_model[i],
455 455 &hba->model_info,
456 456 sizeof (emlxs_model_t));
457 457 rval = 1;
458 458 break;
459 459 }
460 460 }
461 461 }
462 462
463 463 if (!rval && (vpd->part_num[0] != 0) &&
464 464 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
465 465
466 466 /* Next scan for a PN match */
467 467
468 468 for (i = 1; i < emlxs_pci_model_count; i++) {
469 469 if (strcmp(&vpd->part_num[0],
470 470 emlxs_pci_model[i].model) == 0) {
471 471 bcopy(&emlxs_pci_model[i],
472 472 &hba->model_info,
473 473 sizeof (emlxs_model_t));
474 474 break;
475 475 }
476 476 }
477 477 }
478 478
479 479 /* HP CNA port indices start at 1 instead of 0 */
480 480 if ((hba->model_info.chip == EMLXS_BE2_CHIP) ||
481 481 (hba->model_info.chip == EMLXS_BE3_CHIP)) {
482 482
483 483 ssvid = ddi_get16(hba->pci_acc_handle,
484 484 (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
485 485
486 486 if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
487 487 vpd->port_index--;
488 488 }
489 489 }
490 490
491 491 /*
492 492 * Now lets update hba->model_info with the real
493 493 * VPD data, if any.
494 494 */
495 495
496 496 /*
497 497 * Replace the default model description with vpd data
498 498 */
499 499 if (vpd->model_desc[0] != 0) {
500 500 (void) strcpy(hba->model_info.model_desc,
501 501 vpd->model_desc);
502 502 }
503 503
504 504 /* Replace the default model with vpd data */
505 505 if (vpd->model[0] != 0) {
506 506 (void) strcpy(hba->model_info.model, vpd->model);
507 507 }
508 508
509 509 /* Replace the default program types with vpd data */
510 510 if (vpd->prog_types[0] != 0) {
511 511 emlxs_parse_prog_types(hba, vpd->prog_types);
512 512 }
513 513 }
514 514
515 515 /*
516 516 * Since the adapter model may have changed with the vpd data
517 517 * lets double check if adapter is not supported
518 518 */
519 519 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
520 520 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
521 521 "Unsupported adapter found. "
522 522 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
523 523 hba->model_info.id, hba->model_info.device_id,
524 524 hba->model_info.ssdid, hba->model_info.model);
525 525
526 526 rval = EIO;
527 527 goto failed1;
528 528 }
529 529
530 530 (void) strcpy(vpd->boot_version, vpd->sli4FwName);
531 531
532 532 /* Get fcode version property */
533 533 emlxs_get_fcode_version(hba);
534 534
535 535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
536 536 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
537 537 vpd->opFwRev, vpd->sli1FwRev);
538 538
539 539 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
540 540 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
541 541 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
542 542
543 543 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
544 544 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
545 545
546 546 /*
547 547 * If firmware checking is enabled and the adapter model indicates
548 548 * a firmware image, then perform firmware version check
549 549 */
550 550 hba->fw_flag = 0;
551 551 hba->fw_timer = 0;
552 552
553 553 if (((fw_check & 0x1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
554 554 hba->model_info.fwid) || ((fw_check & 0x2) &&
555 555 hba->model_info.fwid)) {
556 556
557 557 /* Find firmware image indicated by adapter model */
558 558 fw = NULL;
559 559 for (i = 0; i < emlxs_fw_count; i++) {
560 560 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
561 561 fw = &emlxs_fw_table[i];
562 562 break;
563 563 }
564 564 }
565 565
566 566 /*
567 567 * If the image was found, then verify current firmware
568 568 * versions of adapter
569 569 */
570 570 if (fw) {
571 571
572 572 /* Obtain current firmware version info */
573 573 if ((hba->model_info.chip == EMLXS_BE2_CHIP) ||
574 574 (hba->model_info.chip == EMLXS_BE3_CHIP)) {
575 575 (void) emlxs_sli4_read_fw_version(hba, &hba_fw);
576 576 } else {
577 577 hba_fw.kern = vpd->postKernRev;
578 578 hba_fw.stub = vpd->opFwRev;
579 579 hba_fw.sli1 = vpd->sli1FwRev;
580 580 hba_fw.sli2 = vpd->sli2FwRev;
581 581 hba_fw.sli3 = vpd->sli3FwRev;
582 582 hba_fw.sli4 = vpd->sli4FwRev;
583 583 }
584 584
585 585 if (!kern_update &&
586 586 ((fw->kern && (hba_fw.kern != fw->kern)) ||
587 587 (fw->stub && (hba_fw.stub != fw->stub)))) {
588 588
589 589 hba->fw_flag |= FW_UPDATE_NEEDED;
590 590
591 591 } else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
592 592 (fw->stub && (hba_fw.stub != fw->stub)) ||
593 593 (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
594 594 (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
595 595 (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
596 596 (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
597 597
598 598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
599 599 "Firmware update needed. "
600 600 "Updating. id=%d fw=%d",
601 601 hba->model_info.id, hba->model_info.fwid);
602 602
603 603 #ifdef MODFW_SUPPORT
604 604 /*
605 605 * Load the firmware image now
606 606 * If MODFW_SUPPORT is not defined, the
607 607 * firmware image will already be defined
608 608 * in the emlxs_fw_table
609 609 */
610 610 emlxs_fw_load(hba, fw);
611 611 #endif /* MODFW_SUPPORT */
612 612
613 613 if (fw->image && fw->size) {
614 614 if (emlxs_fw_download(hba,
615 615 (char *)fw->image, fw->size, 0)) {
616 616 EMLXS_MSGF(EMLXS_CONTEXT,
617 617 &emlxs_init_msg,
618 618 "Firmware update failed.");
619 619
620 620 hba->fw_flag |=
621 621 FW_UPDATE_NEEDED;
622 622 }
623 623 #ifdef MODFW_SUPPORT
624 624 /*
625 625 * Unload the firmware image from
626 626 * kernel memory
627 627 */
628 628 emlxs_fw_unload(hba, fw);
629 629 #endif /* MODFW_SUPPORT */
630 630
631 631 fw_check = 0;
632 632
633 633 goto reset;
634 634 }
635 635
636 636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
637 637 "Firmware image unavailable.");
638 638 } else {
639 639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
640 640 "Firmware update not needed.");
641 641 }
642 642 } else {
643 643 /*
644 644 * This means either the adapter database is not
645 645 * correct or a firmware image is missing from the
646 646 * compile
647 647 */
648 648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
649 649 "Firmware image unavailable. id=%d fw=%d",
650 650 hba->model_info.id, hba->model_info.fwid);
651 651 }
652 652 }
653 653
654 654 /* Reuse mbq from previous mbox */
655 655 bzero(mbq, sizeof (MAILBOXQ));
656 656
657 657 emlxs_mb_dump_fcoe(hba, mbq, 0);
658 658
659 659 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
660 660 MBX_SUCCESS) {
661 661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
662 662 "No FCOE info found. status=%x", mb->mbxStatus);
663 663 } else {
664 664 EMLXS_MSGF(EMLXS_CONTEXT,
665 665 &emlxs_init_debug_msg,
666 666 "FCOE info dumped. rsp_cnt=%d status=%x",
667 667 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
668 668 (void) emlxs_parse_fcoe(hba,
669 669 (uint8_t *)hba->sli.sli4.dump_region.virt,
670 670 mb->un.varDmp4.rsp_cnt);
671 671 }
672 672
673 673 /* Reuse mbq from previous mbox */
674 674 bzero(mbq, sizeof (MAILBOXQ));
675 675
676 676 emlxs_mb_request_features(hba, mbq);
677 677
678 678 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
679 679 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
680 680 "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
681 681 mb->mbxCommand, mb->mbxStatus);
682 682
683 683 rval = EIO;
684 684 goto failed1;
685 685 }
686 686 emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
687 687
688 688 /* Make sure we get the features we requested */
689 689 if (mb->un.varReqFeatures.featuresRequested !=
690 690 mb->un.varReqFeatures.featuresEnabled) {
691 691
692 692 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
693 693 "Unable to get REQUESTed_FEATURES. want:x%x got:x%x",
694 694 mb->un.varReqFeatures.featuresRequested,
695 695 mb->un.varReqFeatures.featuresEnabled);
696 696
697 697 rval = EIO;
698 698 goto failed1;
699 699 }
700 700
701 701 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
702 702 hba->flag |= FC_NPIV_ENABLED;
703 703 }
704 704
705 705 /* Check enable-npiv driver parameter for now */
706 706 if (cfg[CFG_NPIV_ENABLE].current) {
707 707 hba->flag |= FC_NPIV_ENABLED;
708 708 }
709 709
710 710 /* Reuse mbq from previous mbox */
711 711 bzero(mbq, sizeof (MAILBOXQ));
712 712
713 713 emlxs_mb_read_config(hba, mbq);
714 714 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
715 715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
716 716 "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
717 717 mb->mbxCommand, mb->mbxStatus);
718 718
719 719 rval = EIO;
720 720 goto failed1;
721 721 }
722 722 emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
723 723
724 724 hba->sli.sli4.XRICount = (mb->un.varRdConfig4.XRICount);
725 725 hba->sli.sli4.XRIBase = (mb->un.varRdConfig4.XRIBase);
726 726 hba->sli.sli4.RPICount = (mb->un.varRdConfig4.RPICount);
727 727 hba->sli.sli4.RPIBase = (mb->un.varRdConfig4.RPIBase);
728 728 hba->sli.sli4.VPICount = (mb->un.varRdConfig4.VPICount);
729 729 hba->sli.sli4.VPIBase = (mb->un.varRdConfig4.VPIBase);
730 730 hba->sli.sli4.VFICount = (mb->un.varRdConfig4.VFICount);
731 731 hba->sli.sli4.VFIBase = (mb->un.varRdConfig4.VFIBase);
732 732 hba->sli.sli4.FCFICount = (mb->un.varRdConfig4.FCFICount);
733 733
734 734 if (hba->sli.sli4.VPICount) {
735 735 hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
736 736 }
737 737 hba->vpi_base = mb->un.varRdConfig4.VPIBase;
738 738
739 739 /* Set the max node count */
740 740 if (cfg[CFG_NUM_NODES].current > 0) {
741 741 hba->max_nodes =
742 742 min(cfg[CFG_NUM_NODES].current,
743 743 hba->sli.sli4.RPICount);
744 744 } else {
745 745 hba->max_nodes = hba->sli.sli4.RPICount;
746 746 }
747 747
748 748 /* Set the io throttle */
749 749 hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
750 750 hba->max_iotag = hba->sli.sli4.XRICount;
751 751
752 752 /* Save the link speed capabilities */
753 753 vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
754 754 emlxs_process_link_speed(hba);
755 755
756 756 /*
757 757 * Allocate some memory for buffers
758 758 */
759 759 if (emlxs_mem_alloc_buffer(hba) == 0) {
760 760 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
761 761 "Unable to allocate memory buffers.");
762 762
763 763 rval = ENOMEM;
764 764 goto failed1;
765 765 }
766 766
767 767 /*
768 768 * OutOfRange (oor) iotags are used for abort or close
769 769 * XRI commands or any WQE that does not require a SGL
770 770 */
771 771 hba->fc_oor_iotag = hba->max_iotag;
772 772
773 773 if (emlxs_sli4_resource_alloc(hba)) {
774 774 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
775 775 "Unable to allocate resources.");
776 776
777 777 rval = ENOMEM;
778 778 goto failed2;
779 779 }
780 780 emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
781 781
782 782 #if (EMLXS_MODREV >= EMLXS_MODREV5)
783 783 if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
784 784 hba->fca_tran->fca_num_npivports = hba->vpi_max;
785 785 }
786 786 #endif /* >= EMLXS_MODREV5 */
787 787
788 788 /* Reuse mbq from previous mbox */
789 789 bzero(mbq, sizeof (MAILBOXQ));
790 790
791 791 if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
792 792 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
793 793 "Unable to post sgl pages.");
794 794
795 795 rval = EIO;
796 796 goto failed3;
797 797 }
798 798
799 799 /* Reuse mbq from previous mbox */
800 800 bzero(mbq, sizeof (MAILBOXQ));
801 801
802 802 if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
803 803 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
804 804 "Unable to post header templates.");
805 805
806 806 rval = EIO;
807 807 goto failed3;
808 808 }
809 809
810 810 /*
811 811 * Add our interrupt routine to kernel's interrupt chain & enable it
812 812 * If MSI is enabled this will cause Solaris to program the MSI address
813 813 * and data registers in PCI config space
814 814 */
815 815 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
816 816 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
817 817 "Unable to add interrupt(s).");
818 818
819 819 rval = EIO;
820 820 goto failed3;
821 821 }
822 822
823 823 /* Reuse mbq from previous mbox */
824 824 bzero(mbq, sizeof (MAILBOXQ));
825 825
826 826 /* This MUST be done after EMLXS_INTR_ADD */
827 827 if (emlxs_sli4_create_queues(hba, mbq)) {
828 828 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
829 829 "Unable to create queues.");
830 830
831 831 rval = EIO;
832 832 goto failed3;
833 833 }
834 834
835 835 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
836 836
837 837 /* Get and save the current firmware version (based on sli_mode) */
838 838 emlxs_decode_firmware_rev(hba, vpd);
839 839
840 840
841 841 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
842 842
843 843 /* Reuse mbq from previous mbox */
844 844 bzero(mbq, sizeof (MAILBOXQ));
845 845
846 846 /*
847 847 * We need to get login parameters for NID
848 848 */
849 849 (void) emlxs_mb_read_sparam(hba, mbq);
850 850 mp = (MATCHMAP *)mbq->bp;
851 851 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
852 852 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
853 853 "Unable to read parameters. Mailbox cmd=%x status=%x",
854 854 mb->mbxCommand, mb->mbxStatus);
855 855
856 856 rval = EIO;
857 857 goto failed3;
858 858 }
859 859
860 860 /* Free the buffer since we were polling */
861 861 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
862 862 mp = NULL;
863 863
864 864 /* If no serial number in VPD data, then use the WWPN */
865 865 if (vpd->serial_num[0] == 0) {
866 866 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
867 867 for (i = 0; i < 12; i++) {
868 868 status = *outptr++;
869 869 j = ((status & 0xf0) >> 4);
870 870 if (j <= 9) {
871 871 vpd->serial_num[i] =
872 872 (char)((uint8_t)'0' + (uint8_t)j);
873 873 } else {
874 874 vpd->serial_num[i] =
875 875 (char)((uint8_t)'A' + (uint8_t)(j - 10));
876 876 }
877 877
878 878 i++;
879 879 j = (status & 0xf);
880 880 if (j <= 9) {
881 881 vpd->serial_num[i] =
882 882 (char)((uint8_t)'0' + (uint8_t)j);
883 883 } else {
884 884 vpd->serial_num[i] =
885 885 (char)((uint8_t)'A' + (uint8_t)(j - 10));
886 886 }
887 887 }
888 888
889 889 /*
890 890 * Set port number and port index to zero
891 891 * The WWN's are unique to each port and therefore port_num
892 892 * must equal zero. This effects the hba_fru_details structure
893 893 * in fca_bind_port()
894 894 */
895 895 vpd->port_num[0] = 0;
896 896 vpd->port_index = 0;
897 897 }
898 898
899 899 /* Make attempt to set a port index */
900 900 if (vpd->port_index == (uint32_t)-1) {
901 901 dev_info_t *p_dip;
902 902 dev_info_t *c_dip;
903 903
904 904 p_dip = ddi_get_parent(hba->dip);
905 905 c_dip = ddi_get_child(p_dip);
906 906
907 907 vpd->port_index = 0;
908 908 while (c_dip && (hba->dip != c_dip)) {
909 909 c_dip = ddi_get_next_sibling(c_dip);
910 910
911 911 if (strcmp(ddi_get_name(c_dip), "ethernet")) {
912 912 vpd->port_index++;
913 913 }
914 914 }
915 915 }
916 916
917 917 if (vpd->port_num[0] == 0) {
918 918 if (hba->model_info.channels > 1) {
919 919 (void) sprintf(vpd->port_num, "%d", vpd->port_index);
920 920 }
921 921 }
922 922
923 923 if (vpd->id[0] == 0) {
924 924 (void) sprintf(vpd->id, "%s %d",
925 925 hba->model_info.model_desc, vpd->port_index);
926 926
927 927 }
928 928
929 929 if (vpd->manufacturer[0] == 0) {
930 930 (void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
931 931 }
932 932
933 933 if (vpd->part_num[0] == 0) {
934 934 (void) strcpy(vpd->part_num, hba->model_info.model);
935 935 }
936 936
937 937 if (vpd->model_desc[0] == 0) {
938 938 (void) sprintf(vpd->model_desc, "%s %d",
939 939 hba->model_info.model_desc, vpd->port_index);
940 940 }
941 941
942 942 if (vpd->model[0] == 0) {
943 943 (void) strcpy(vpd->model, hba->model_info.model);
944 944 }
945 945
946 946 if (vpd->prog_types[0] == 0) {
947 947 emlxs_build_prog_types(hba, vpd->prog_types);
948 948 }
949 949
950 950 /* Create the symbolic names */
951 951 (void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
952 952 hba->model_info.model, hba->vpd.fw_version, emlxs_version,
953 953 (char *)utsname.nodename);
954 954
955 955 (void) sprintf(hba->spn,
956 956 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
957 957 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
958 958 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
959 959 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
960 960
961 961
962 962 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
963 963 emlxs_sli4_enable_intr(hba);
964 964
965 965 /* Reuse mbq from previous mbox */
966 966 bzero(mbq, sizeof (MAILBOXQ));
967 967
968 968 /*
969 969 * Setup and issue mailbox INITIALIZE LINK command
970 970 * At this point, the interrupt will be generated by the HW
971 971 * Do this only if persist-linkdown is not set
972 972 */
973 973 if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
974 974 emlxs_mb_init_link(hba, mbq,
975 975 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
976 976
977 977 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0)
978 978 != MBX_SUCCESS) {
979 979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
980 980 "Unable to initialize link. "
981 981 "Mailbox cmd=%x status=%x",
982 982 mb->mbxCommand, mb->mbxStatus);
983 983
984 984 rval = EIO;
985 985 goto failed3;
986 986 }
987 987
988 988 /* Wait for link to come up */
989 989 i = cfg[CFG_LINKUP_DELAY].current;
990 990 while (i && (hba->state < FC_LINK_UP)) {
991 991 /* Check for hardware error */
992 992 if (hba->state == FC_ERROR) {
993 993 EMLXS_MSGF(EMLXS_CONTEXT,
994 994 &emlxs_init_failed_msg,
995 995 "Adapter error.", mb->mbxCommand,
996 996 mb->mbxStatus);
997 997
998 998 rval = EIO;
999 999 goto failed3;
1000 1000 }
1001 1001
1002 1002 DELAYMS(1000);
1003 1003 i--;
1004 1004 }
1005 1005 } else {
1006 1006 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1007 1007 }
1008 1008
1009 1009 /*
1010 1010 * The leadvile driver will now handle the FLOGI at the driver level
1011 1011 */
1012 1012
1013 1013 if (mbq) {
1014 1014 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1015 1015 mbq = NULL;
1016 1016 mb = NULL;
1017 1017 }
1018 1018 return (0);
1019 1019
1020 1020 failed3:
1021 1021 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1022 1022
1023 1023 if (mp) {
1024 1024 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1025 1025 mp = NULL;
1026 1026 }
1027 1027
1028 1028
1029 1029 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1030 1030 (void) EMLXS_INTR_REMOVE(hba);
1031 1031 }
1032 1032
1033 1033 emlxs_sli4_resource_free(hba);
1034 1034
1035 1035 failed2:
1036 1036 (void) emlxs_mem_free_buffer(hba);
1037 1037
1038 1038 failed1:
1039 1039 if (mbq) {
1040 1040 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1041 1041 mbq = NULL;
1042 1042 mb = NULL;
1043 1043 }
1044 1044
1045 1045 if (hba->sli.sli4.dump_region.virt) {
1046 1046 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1047 1047 }
1048 1048
1049 1049 if (rval == 0) {
1050 1050 rval = EIO;
1051 1051 }
1052 1052
1053 1053 return (rval);
1054 1054
1055 1055 } /* emlxs_sli4_online() */
1056 1056
1057 1057
1058 1058 static void
1059 1059 emlxs_sli4_offline(emlxs_hba_t *hba)
1060 1060 {
1061 1061 emlxs_port_t *port = &PPORT;
1062 1062 MAILBOXQ mboxq;
1063 1063
1064 1064 /* Reverse emlxs_sli4_online */
1065 1065
1066 1066 mutex_enter(&EMLXS_PORT_LOCK);
1067 1067 if (!(hba->flag & FC_INTERLOCKED)) {
1068 1068 mutex_exit(&EMLXS_PORT_LOCK);
1069 1069
1070 1070 /* This is the only way to disable interupts */
1071 1071 bzero((void *)&mboxq, sizeof (MAILBOXQ));
1072 1072 emlxs_mb_resetport(hba, &mboxq);
1073 1073 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1074 1074 MBX_WAIT, 0) != MBX_SUCCESS) {
1075 1075 /* Timeout occurred */
1076 1076 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1077 1077 "Timeout: Offline RESET");
1078 1078 }
1079 1079 (void) emlxs_check_hdw_ready(hba);
1080 1080 } else {
1081 1081 mutex_exit(&EMLXS_PORT_LOCK);
1082 1082 }
1083 1083
1084 1084 /* Shutdown the adapter interface */
1085 1085 emlxs_sli4_hba_kill(hba);
1086 1086
1087 1087 /* Free SLI shared memory */
1088 1088 emlxs_sli4_resource_free(hba);
1089 1089
1090 1090 /* Free driver shared memory */
1091 1091 (void) emlxs_mem_free_buffer(hba);
1092 1092
1093 1093 /* Free the host dump region buffer */
1094 1094 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1095 1095
1096 1096 } /* emlxs_sli4_offline() */
1097 1097
1098 1098
1099 1099 /*ARGSUSED*/
1100 1100 static int
1101 1101 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1102 1102 {
1103 1103 emlxs_port_t *port = &PPORT;
1104 1104 dev_info_t *dip;
1105 1105 ddi_device_acc_attr_t dev_attr;
1106 1106 int status;
1107 1107
1108 1108 dip = (dev_info_t *)hba->dip;
1109 1109 dev_attr = emlxs_dev_acc_attr;
1110 1110
1111 1111 /*
1112 1112 * Map in Hardware BAR pages that will be used for
1113 1113 * communication with HBA.
1114 1114 */
1115 1115 if (hba->sli.sli4.bar1_acc_handle == 0) {
1116 1116 status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1117 1117 (caddr_t *)&hba->sli.sli4.bar1_addr,
1118 1118 0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1119 1119 if (status != DDI_SUCCESS) {
1120 1120 EMLXS_MSGF(EMLXS_CONTEXT,
1121 1121 &emlxs_attach_failed_msg,
1122 1122 "(PCI) ddi_regs_map_setup BAR1 failed. "
1123 1123 "stat=%d mem=%p attr=%p hdl=%p",
1124 1124 status, &hba->sli.sli4.bar1_addr, &dev_attr,
1125 1125 &hba->sli.sli4.bar1_acc_handle);
1126 1126 goto failed;
1127 1127 }
1128 1128 }
1129 1129
1130 1130 if (hba->sli.sli4.bar2_acc_handle == 0) {
1131 1131 status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1132 1132 (caddr_t *)&hba->sli.sli4.bar2_addr,
1133 1133 0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1134 1134 if (status != DDI_SUCCESS) {
1135 1135 EMLXS_MSGF(EMLXS_CONTEXT,
1136 1136 &emlxs_attach_failed_msg,
1137 1137 "ddi_regs_map_setup BAR2 failed. status=%x",
1138 1138 status);
1139 1139 goto failed;
1140 1140 }
1141 1141 }
1142 1142
1143 1143 if (hba->sli.sli4.bootstrapmb.virt == 0) {
1144 1144 MBUF_INFO *buf_info;
1145 1145 MBUF_INFO bufinfo;
1146 1146
1147 1147 buf_info = &bufinfo;
1148 1148
1149 1149 bzero(buf_info, sizeof (MBUF_INFO));
1150 1150 buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1151 1151 buf_info->flags =
1152 1152 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1153 1153 buf_info->align = ddi_ptob(dip, 1L);
1154 1154
1155 1155 (void) emlxs_mem_alloc(hba, buf_info);
1156 1156
1157 1157 if (buf_info->virt == NULL) {
1158 1158 goto failed;
1159 1159 }
1160 1160
1161 1161 hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1162 1162 hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1163 1163 hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1164 1164 MBOX_EXTENSION_SIZE;
1165 1165 hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1166 1166 hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1167 1167 bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1168 1168 EMLXS_BOOTSTRAP_MB_SIZE);
1169 1169 }
1170 1170
1171 1171 /* offset from beginning of register space */
1172 1172 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1173 1173 (uint32_t *)(hba->sli.sli4.bar1_addr + CSR_MPU_EP_SEMAPHORE_OFFSET);
1174 1174 hba->sli.sli4.MBDB_reg_addr =
1175 1175 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1176 1176 hba->sli.sli4.CQDB_reg_addr =
1177 1177 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1178 1178 hba->sli.sli4.MQDB_reg_addr =
1179 1179 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1180 1180 hba->sli.sli4.WQDB_reg_addr =
1181 1181 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1182 1182 hba->sli.sli4.RQDB_reg_addr =
1183 1183 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1184 1184 hba->chan_count = MAX_CHANNEL;
1185 1185
1186 1186 return (0);
1187 1187
1188 1188 failed:
1189 1189
1190 1190 emlxs_sli4_unmap_hdw(hba);
1191 1191 return (ENOMEM);
1192 1192
1193 1193
1194 1194 } /* emlxs_sli4_map_hdw() */
1195 1195
1196 1196
1197 1197 /*ARGSUSED*/
1198 1198 static void
1199 1199 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1200 1200 {
1201 1201 MBUF_INFO bufinfo;
1202 1202 MBUF_INFO *buf_info = &bufinfo;
1203 1203
1204 1204 /*
1205 1205 * Free map for Hardware BAR pages that were used for
1206 1206 * communication with HBA.
1207 1207 */
1208 1208 if (hba->sli.sli4.bar1_acc_handle) {
1209 1209 ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1210 1210 hba->sli.sli4.bar1_acc_handle = 0;
1211 1211 }
1212 1212
1213 1213 if (hba->sli.sli4.bar2_acc_handle) {
1214 1214 ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1215 1215 hba->sli.sli4.bar2_acc_handle = 0;
1216 1216 }
1217 1217 if (hba->sli.sli4.bootstrapmb.virt) {
1218 1218 bzero(buf_info, sizeof (MBUF_INFO));
1219 1219
1220 1220 if (hba->sli.sli4.bootstrapmb.phys) {
1221 1221 buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1222 1222 buf_info->data_handle =
1223 1223 hba->sli.sli4.bootstrapmb.data_handle;
1224 1224 buf_info->dma_handle =
1225 1225 hba->sli.sli4.bootstrapmb.dma_handle;
1226 1226 buf_info->flags = FC_MBUF_DMA;
1227 1227 }
1228 1228
1229 1229 buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1230 1230 buf_info->size = hba->sli.sli4.bootstrapmb.size;
1231 1231 emlxs_mem_free(hba, buf_info);
1232 1232
1233 1233 hba->sli.sli4.bootstrapmb.virt = NULL;
1234 1234 }
1235 1235
1236 1236 return;
1237 1237
1238 1238 } /* emlxs_sli4_unmap_hdw() */
1239 1239
1240 1240
1241 1241 static int
1242 1242 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1243 1243 {
1244 1244 emlxs_port_t *port = &PPORT;
1245 1245 uint32_t status;
1246 1246 uint32_t i = 0;
1247 1247
1248 1248 /* Wait for reset completion */
1249 1249 while (i < 30) {
1250 1250 /* Check Semaphore register to see what the ARM state is */
1251 1251 status = READ_BAR1_REG(hba, FC_SEMA_REG(hba));
1252 1252
1253 1253 /* Check to see if any errors occurred during init */
1254 1254 if (status & ARM_POST_FATAL) {
1255 1255 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1256 1256 "SEMA Error: status=0x%x", status);
1257 1257
1258 1258 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1259 1259 #ifdef FMA_SUPPORT
1260 1260 /* Access handle validation */
1261 1261 EMLXS_CHK_ACC_HANDLE(hba,
1262 1262 hba->sli.sli4.bar1_acc_handle);
1263 1263 #endif /* FMA_SUPPORT */
1264 1264 return (1);
1265 1265 }
1266 1266 if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1267 1267 /* ARM Ready !! */
1268 1268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1269 1269 "ARM Ready: status=0x%x", status);
1270 1270 #ifdef FMA_SUPPORT
1271 1271 /* Access handle validation */
1272 1272 EMLXS_CHK_ACC_HANDLE(hba,
1273 1273 hba->sli.sli4.bar1_acc_handle);
1274 1274 #endif /* FMA_SUPPORT */
1275 1275 return (0);
1276 1276 }
1277 1277
1278 1278 DELAYMS(1000);
1279 1279 i++;
1280 1280 }
1281 1281
1282 1282 /* Timeout occurred */
1283 1283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1284 1284 "Timeout waiting for READY: status=0x%x", status);
1285 1285
1286 1286 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1287 1287
1288 1288 #ifdef FMA_SUPPORT
1289 1289 /* Access handle validation */
1290 1290 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1291 1291 #endif /* FMA_SUPPORT */
1292 1292
1293 1293 /* Log a dump event - not supported */
1294 1294
1295 1295 return (2);
1296 1296
1297 1297 } /* emlxs_check_hdw_ready() */
1298 1298
1299 1299
1300 1300 static uint32_t
1301 1301 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
1302 1302 {
1303 1303 emlxs_port_t *port = &PPORT;
1304 1304 uint32_t status;
1305 1305
1306 1306 /* Wait for reset completion, tmo is in 10ms ticks */
1307 1307 while (tmo) {
1308 1308 /* Check Semaphore register to see what the ARM state is */
1309 1309 status = READ_BAR2_REG(hba, FC_MBDB_REG(hba));
1310 1310
1311 1311 /* Check to see if any errors occurred during init */
1312 1312 if (status & BMBX_READY) {
1313 1313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1314 1314 "BMBX Ready: status=0x%x", status);
1315 1315 #ifdef FMA_SUPPORT
1316 1316 /* Access handle validation */
1317 1317 EMLXS_CHK_ACC_HANDLE(hba,
1318 1318 hba->sli.sli4.bar2_acc_handle);
1319 1319 #endif /* FMA_SUPPORT */
1320 1320 return (tmo);
1321 1321 }
1322 1322
1323 1323 DELAYMS(10);
1324 1324 tmo--;
1325 1325 }
1326 1326
1327 1327 /* Timeout occurred */
1328 1328 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1329 1329 "Timeout waiting for BMailbox: status=0x%x", status);
1330 1330
1331 1331 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1332 1332
1333 1333 #ifdef FMA_SUPPORT
1334 1334 /* Access handle validation */
1335 1335 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1336 1336 #endif /* FMA_SUPPORT */
1337 1337
1338 1338 /* Log a dump event - not supported */
1339 1339
1340 1340 return (0);
1341 1341
1342 1342 } /* emlxs_check_bootstrap_ready() */
1343 1343
1344 1344
1345 1345 static uint32_t
1346 1346 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
1347 1347 {
1348 1348 emlxs_port_t *port = &PPORT;
1349 1349 uint32_t *iptr;
1350 1350 uint32_t addr30;
1351 1351
1352 1352 /*
1353 1353 * This routine assumes the bootstrap mbox is loaded
1354 1354 * with the mailbox command to be executed.
1355 1355 *
1356 1356 * First, load the high 30 bits of bootstrap mailbox
1357 1357 */
1358 1358 addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
1359 1359 addr30 |= BMBX_ADDR_HI;
1360 1360 WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1361 1361
1362 1362 tmo = emlxs_check_bootstrap_ready(hba, tmo);
1363 1363 if (tmo == 0) {
1364 1364 return (0);
1365 1365 }
1366 1366
1367 1367 /* Load the low 30 bits of bootstrap mailbox */
1368 1368 addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
1369 1369 WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1370 1370
1371 1371 tmo = emlxs_check_bootstrap_ready(hba, tmo);
1372 1372 if (tmo == 0) {
1373 1373 return (0);
1374 1374 }
1375 1375
1376 1376 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1377 1377
1378 1378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1379 1379 "BootstrapMB: %p Completed %08x %08x %08x",
1380 1380 hba->sli.sli4.bootstrapmb.virt,
1381 1381 *iptr, *(iptr+1), *(iptr+2));
1382 1382
1383 1383 return (tmo);
1384 1384
1385 1385 } /* emlxs_issue_bootstrap_mb() */
1386 1386
1387 1387
1388 1388 static int
1389 1389 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
1390 1390 {
1391 1391 #ifdef FMA_SUPPORT
1392 1392 emlxs_port_t *port = &PPORT;
1393 1393 #endif /* FMA_SUPPORT */
1394 1394 uint32_t *iptr;
1395 1395 uint32_t tmo;
1396 1396
1397 1397 if (emlxs_check_hdw_ready(hba)) {
1398 1398 return (1);
1399 1399 }
1400 1400
1401 1401 if (hba->flag & FC_BOOTSTRAPMB_INIT) {
1402 1402 return (0); /* Already initialized */
1403 1403 }
1404 1404
1405 1405 /* NOTE: tmo is in 10ms ticks */
1406 1406 tmo = emlxs_check_bootstrap_ready(hba, 3000);
1407 1407 if (tmo == 0) {
1408 1408 return (1);
1409 1409 }
1410 1410
1411 1411 /* Special words to initialize bootstrap mbox MUST be little endian */
1412 1412 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1413 1413 *iptr++ = LE_SWAP32(MQE_SPECIAL_WORD0);
1414 1414 *iptr = LE_SWAP32(MQE_SPECIAL_WORD1);
1415 1415
1416 1416 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1417 1417 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
1418 1418
1419 1419 emlxs_data_dump(port, "EndianIN", (uint32_t *)iptr, 6, 0);
1420 1420 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
1421 1421 return (1);
1422 1422 }
1423 1423 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1424 1424 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
1425 1425 emlxs_data_dump(port, "EndianOUT", (uint32_t *)iptr, 6, 0);
1426 1426
1427 1427 #ifdef FMA_SUPPORT
1428 1428 if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
1429 1429 != DDI_FM_OK) {
1430 1430 EMLXS_MSGF(EMLXS_CONTEXT,
1431 1431 &emlxs_invalid_dma_handle_msg,
1432 1432 "emlxs_init_bootstrap_mb: hdl=%p",
1433 1433 hba->sli.sli4.bootstrapmb.dma_handle);
1434 1434 return (1);
1435 1435 }
1436 1436 #endif
1437 1437 hba->flag |= FC_BOOTSTRAPMB_INIT;
1438 1438 return (0);
1439 1439
1440 1440 } /* emlxs_init_bootstrap_mb() */
1441 1441
1442 1442
1443 1443 static uint32_t
1444 1444 emlxs_sli4_hba_init(emlxs_hba_t *hba)
1445 1445 {
1446 1446 int rc;
1447 1447 uint16_t i;
1448 1448 emlxs_port_t *vport;
1449 1449 emlxs_config_t *cfg = &CFG;
1450 1450 CHANNEL *cp;
1451 1451
1452 1452 /* Restart the adapter */
1453 1453 if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
1454 1454 return (1);
1455 1455 }
1456 1456
1457 1457 for (i = 0; i < hba->chan_count; i++) {
1458 1458 cp = &hba->chan[i];
1459 1459 cp->iopath = (void *)&hba->sli.sli4.wq[i];
1460 1460 }
1461 1461
1462 1462 /* Initialize all the port objects */
1463 1463 hba->vpi_base = 0;
1464 1464 hba->vpi_max = 0;
1465 1465 for (i = 0; i < MAX_VPORTS; i++) {
1466 1466 vport = &VPORT(i);
1467 1467 vport->hba = hba;
1468 1468 vport->vpi = i;
1469 1469
1470 1470 vport->VPIobj.index = i;
1471 1471 vport->VPIobj.VPI = i;
1472 1472 vport->VPIobj.port = vport;
1473 1473 }
1474 1474
1475 1475 /* Set the max node count */
1476 1476 if (hba->max_nodes == 0) {
1477 1477 if (cfg[CFG_NUM_NODES].current > 0) {
1478 1478 hba->max_nodes = cfg[CFG_NUM_NODES].current;
1479 1479 } else {
1480 1480 hba->max_nodes = 4096;
1481 1481 }
1482 1482 }
1483 1483
1484 1484 rc = emlxs_init_bootstrap_mb(hba);
1485 1485 if (rc) {
1486 1486 return (rc);
1487 1487 }
1488 1488
1489 1489 hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
1490 1490 hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
1491 1491 hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
1492 1492
1493 1493 /* Cache the UE MASK registers value for UE error detection */
1494 1494 hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
1495 1495 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
1496 1496 hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
1497 1497 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
1498 1498
1499 1499 return (0);
1500 1500
1501 1501 } /* emlxs_sli4_hba_init() */
1502 1502
1503 1503
1504 1504 /*ARGSUSED*/
1505 1505 static uint32_t
1506 1506 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1507 1507 uint32_t quiesce)
1508 1508 {
1509 1509 emlxs_port_t *port = &PPORT;
1510 1510 emlxs_port_t *vport;
1511 1511 CHANNEL *cp;
1512 1512 emlxs_config_t *cfg = &CFG;
1513 1513 MAILBOXQ mboxq;
1514 1514 uint32_t i;
1515 1515 uint32_t rc;
1516 1516 uint16_t channelno;
1517 1517
1518 1518 if (!cfg[CFG_RESET_ENABLE].current) {
1519 1519 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1520 1520 "Adapter reset disabled.");
1521 1521 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1522 1522
1523 1523 return (1);
1524 1524 }
1525 1525
1526 1526 if (quiesce == 0) {
1527 1527 emlxs_sli4_hba_kill(hba);
1528 1528
1529 1529 /*
1530 1530 * Initalize Hardware that will be used to bring
1531 1531 * SLI4 online.
1532 1532 */
1533 1533 rc = emlxs_init_bootstrap_mb(hba);
1534 1534 if (rc) {
1535 1535 return (rc);
1536 1536 }
1537 1537 }
1538 1538
1539 1539 bzero((void *)&mboxq, sizeof (MAILBOXQ));
1540 1540 emlxs_mb_resetport(hba, &mboxq);
1541 1541
1542 1542 if (quiesce == 0) {
1543 1543 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1544 1544 MBX_POLL, 0) != MBX_SUCCESS) {
1545 1545 /* Timeout occurred */
1546 1546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1547 1547 "Timeout: RESET");
1548 1548 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1549 1549 /* Log a dump event - not supported */
1550 1550 return (1);
1551 1551 }
1552 1552 } else {
1553 1553 if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
1554 1554 MBX_POLL, 0) != MBX_SUCCESS) {
1555 1555 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1556 1556 /* Log a dump event - not supported */
1557 1557 return (1);
1558 1558 }
1559 1559 }
1560 1560 emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
1561 1561
1562 1562 /* Reset the hba structure */
1563 1563 hba->flag &= FC_RESET_MASK;
1564 1564
1565 1565 for (channelno = 0; channelno < hba->chan_count; channelno++) {
1566 1566 cp = &hba->chan[channelno];
1567 1567 cp->hba = hba;
1568 1568 cp->channelno = channelno;
1569 1569 }
1570 1570
1571 1571 hba->channel_tx_count = 0;
1572 1572 hba->io_count = 0;
1573 1573 hba->iodone_count = 0;
1574 1574 hba->topology = 0;
1575 1575 hba->linkspeed = 0;
1576 1576 hba->heartbeat_active = 0;
1577 1577 hba->discovery_timer = 0;
1578 1578 hba->linkup_timer = 0;
1579 1579 hba->loopback_tics = 0;
1580 1580
1581 1581 /* Reset the port objects */
1582 1582 for (i = 0; i < MAX_VPORTS; i++) {
1583 1583 vport = &VPORT(i);
1584 1584
1585 1585 vport->flag &= EMLXS_PORT_RESET_MASK;
1586 1586 vport->did = 0;
1587 1587 vport->prev_did = 0;
1588 1588 vport->lip_type = 0;
1589 1589 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
1590 1590 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
1591 1591
1592 1592 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
1593 1593 vport->node_base.nlp_Rpi = 0;
1594 1594 vport->node_base.nlp_DID = 0xffffff;
1595 1595 vport->node_base.nlp_list_next = NULL;
1596 1596 vport->node_base.nlp_list_prev = NULL;
1597 1597 vport->node_base.nlp_active = 1;
1598 1598 vport->node_count = 0;
1599 1599
1600 1600 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
1601 1601 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
1602 1602 }
1603 1603 }
1604 1604
1605 1605 if (emlxs_check_hdw_ready(hba)) {
1606 1606 return (1);
1607 1607 }
1608 1608
1609 1609 return (0);
1610 1610
1611 1611 } /* emlxs_sli4_hba_reset */
1612 1612
1613 1613
1614 1614 #define SGL_CMD 0
1615 1615 #define SGL_RESP 1
1616 1616 #define SGL_DATA 2
1617 1617 #define SGL_LAST 0x80
1618 1618
1619 1619 /*ARGSUSED*/
1620 1620 ULP_SGE64 *
1621 1621 emlxs_pkt_to_sgl(emlxs_port_t *port, ULP_SGE64 *sge, fc_packet_t *pkt,
1622 1622 uint32_t sgl_type, uint32_t *pcnt)
1623 1623 {
1624 1624 #ifdef DEBUG_SGE
1625 1625 emlxs_hba_t *hba = HBA;
1626 1626 #endif
1627 1627 ddi_dma_cookie_t *cp;
1628 1628 uint_t i;
1629 1629 uint_t last;
1630 1630 int32_t size;
1631 1631 int32_t sge_size;
1632 1632 uint64_t sge_addr;
1633 1633 int32_t len;
1634 1634 uint32_t cnt;
1635 1635 uint_t cookie_cnt;
1636 1636 ULP_SGE64 stage_sge;
1637 1637
1638 1638 last = sgl_type & SGL_LAST;
1639 1639 sgl_type &= ~SGL_LAST;
1640 1640
1641 1641 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1642 1642 switch (sgl_type) {
1643 1643 case SGL_CMD:
1644 1644 cp = pkt->pkt_cmd_cookie;
1645 1645 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
1646 1646 size = (int32_t)pkt->pkt_cmdlen;
1647 1647 break;
1648 1648
1649 1649 case SGL_RESP:
1650 1650 cp = pkt->pkt_resp_cookie;
1651 1651 cookie_cnt = pkt->pkt_resp_cookie_cnt;
1652 1652 size = (int32_t)pkt->pkt_rsplen;
1653 1653 break;
1654 1654
1655 1655
1656 1656 case SGL_DATA:
1657 1657 cp = pkt->pkt_data_cookie;
1658 1658 cookie_cnt = pkt->pkt_data_cookie_cnt;
1659 1659 size = (int32_t)pkt->pkt_datalen;
1660 1660 break;
1661 1661 }
1662 1662
1663 1663 #else
1664 1664 switch (sgl_type) {
1665 1665 case SGL_CMD:
1666 1666 cp = &pkt->pkt_cmd_cookie;
1667 1667 cookie_cnt = 1;
1668 1668 size = (int32_t)pkt->pkt_cmdlen;
1669 1669 break;
1670 1670
1671 1671 case SGL_RESP:
1672 1672 cp = &pkt->pkt_resp_cookie;
1673 1673 cookie_cnt = 1;
1674 1674 size = (int32_t)pkt->pkt_rsplen;
1675 1675 break;
1676 1676
1677 1677
1678 1678 case SGL_DATA:
1679 1679 cp = &pkt->pkt_data_cookie;
1680 1680 cookie_cnt = 1;
1681 1681 size = (int32_t)pkt->pkt_datalen;
1682 1682 break;
1683 1683 }
1684 1684 #endif /* >= EMLXS_MODREV3 */
1685 1685
1686 1686 stage_sge.offset = 0;
1687 1687 stage_sge.reserved = 0;
1688 1688 stage_sge.last = 0;
1689 1689 cnt = 0;
1690 1690 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
1691 1691
1692 1692 sge_size = cp->dmac_size;
1693 1693 sge_addr = cp->dmac_laddress;
1694 1694 while (sge_size && size) {
1695 1695 if (cnt) {
1696 1696 /* Copy staged SGE before we build next one */
1697 1697 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
1698 1698 (uint8_t *)sge, sizeof (ULP_SGE64));
1699 1699 sge++;
1700 1700 }
1701 1701 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
1702 1702 len = MIN(size, len);
1703 1703
1704 1704 stage_sge.addrHigh =
1705 1705 PADDR_HI(sge_addr);
1706 1706 stage_sge.addrLow =
1707 1707 PADDR_LO(sge_addr);
1708 1708 stage_sge.length = len;
1709 1709 if (sgl_type == SGL_DATA) {
1710 1710 stage_sge.offset = cnt;
1711 1711 }
1712 1712 #ifdef DEBUG_SGE
1713 1713 emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
1714 1714 4, 0);
1715 1715 #endif
1716 1716 sge_addr += len;
1717 1717 sge_size -= len;
1718 1718
1719 1719 cnt += len;
1720 1720 size -= len;
1721 1721 }
1722 1722 }
1723 1723
1724 1724 if (last) {
1725 1725 stage_sge.last = 1;
1726 1726 }
1727 1727 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
1728 1728 sizeof (ULP_SGE64));
1729 1729
1730 1730 sge++;
1731 1731
1732 1732 *pcnt = cnt;
1733 1733 return (sge);
1734 1734
1735 1735 } /* emlxs_pkt_to_sgl */
1736 1736
1737 1737
1738 1738 /*ARGSUSED*/
1739 1739 uint32_t
1740 1740 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1741 1741 {
1742 1742 fc_packet_t *pkt;
1743 1743 XRIobj_t *xrip;
1744 1744 ULP_SGE64 *sge;
1745 1745 emlxs_wqe_t *wqe;
1746 1746 IOCBQ *iocbq;
1747 1747 ddi_dma_cookie_t *cp_cmd;
1748 1748 uint32_t cmd_cnt;
1749 1749 uint32_t resp_cnt;
1750 1750 uint32_t cnt;
1751 1751
1752 1752 iocbq = (IOCBQ *) &sbp->iocbq;
1753 1753 wqe = &iocbq->wqe;
1754 1754 pkt = PRIV2PKT(sbp);
1755 1755 xrip = sbp->xrip;
1756 1756 sge = xrip->SGList.virt;
1757 1757
1758 1758 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1759 1759 cp_cmd = pkt->pkt_cmd_cookie;
1760 1760 #else
1761 1761 cp_cmd = &pkt->pkt_cmd_cookie;
1762 1762 #endif /* >= EMLXS_MODREV3 */
1763 1763
1764 1764 iocbq = &sbp->iocbq;
1765 1765 if (iocbq->flag & IOCB_FCP_CMD) {
1766 1766
1767 1767 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1768 1768 return (1);
1769 1769 }
1770 1770
1771 1771 /* CMD payload */
1772 1772 sge = emlxs_pkt_to_sgl(port, sge, pkt, SGL_CMD, &cmd_cnt);
1773 1773
1774 1774 /* DATA payload */
1775 1775 if (pkt->pkt_datalen != 0) {
1776 1776 /* RSP payload */
1777 1777 sge = emlxs_pkt_to_sgl(port, sge, pkt,
1778 1778 SGL_RESP, &resp_cnt);
1779 1779
1780 1780 /* Data portion */
1781 1781 sge = emlxs_pkt_to_sgl(port, sge, pkt,
1782 1782 SGL_DATA | SGL_LAST, &cnt);
1783 1783 } else {
1784 1784 /* RSP payload */
1785 1785 sge = emlxs_pkt_to_sgl(port, sge, pkt,
1786 1786 SGL_RESP | SGL_LAST, &resp_cnt);
1787 1787 }
1788 1788
1789 1789 wqe->un.FcpCmd.Payload.addrHigh =
1790 1790 PADDR_HI(cp_cmd->dmac_laddress);
1791 1791 wqe->un.FcpCmd.Payload.addrLow =
1792 1792 PADDR_LO(cp_cmd->dmac_laddress);
1793 1793 wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
1794 1794 wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
1795 1795
1796 1796 } else {
1797 1797
1798 1798 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1799 1799 /* CMD payload */
1800 1800 sge = emlxs_pkt_to_sgl(port, sge, pkt,
1801 1801 SGL_CMD | SGL_LAST, &cmd_cnt);
1802 1802 } else {
1803 1803 /* CMD payload */
1804 1804 sge = emlxs_pkt_to_sgl(port, sge, pkt,
1805 1805 SGL_CMD, &cmd_cnt);
1806 1806
1807 1807 /* RSP payload */
1808 1808 sge = emlxs_pkt_to_sgl(port, sge, pkt,
1809 1809 SGL_RESP | SGL_LAST, &resp_cnt);
1810 1810 wqe->un.GenReq.PayloadLength = cmd_cnt;
1811 1811 }
1812 1812
1813 1813 wqe->un.GenReq.Payload.addrHigh =
1814 1814 PADDR_HI(cp_cmd->dmac_laddress);
1815 1815 wqe->un.GenReq.Payload.addrLow =
1816 1816 PADDR_LO(cp_cmd->dmac_laddress);
1817 1817 wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
1818 1818 }
1819 1819 return (0);
1820 1820 } /* emlxs_sli4_bde_setup */
1821 1821
1822 1822
1823 1823
1824 1824
1825 1825 static void
1826 1826 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
1827 1827 {
1828 1828 emlxs_port_t *port = &PPORT;
1829 1829 emlxs_buf_t *sbp;
1830 1830 uint32_t channelno;
1831 1831 int32_t throttle;
1832 1832 emlxs_wqe_t *wqe;
1833 1833 emlxs_wqe_t *wqeslot;
1834 1834 WQ_DESC_t *wq;
1835 1835 uint32_t flag;
1836 1836 uint32_t wqdb;
1837 1837 uint16_t next_wqe;
1838 1838 off_t offset;
1839 1839
1840 1840
1841 1841 channelno = cp->channelno;
1842 1842 wq = (WQ_DESC_t *)cp->iopath;
1843 1843
1844 1844 #ifdef SLI4_FASTPATH_DEBUG
1845 1845 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1846 1846 "ISSUE WQE channel: %x %p", channelno, wq);
1847 1847 #endif
1848 1848
1849 1849 throttle = 0;
1850 1850
1851 1851 /* Check if FCP ring and adapter is not ready */
1852 1852 /* We may use any ring for FCP_CMD */
1853 1853 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
1854 1854 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
1855 1855 !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
1856 1856 emlxs_tx_put(iocbq, 1);
1857 1857 return;
1858 1858 }
1859 1859 }
1860 1860
1861 1861 /* Attempt to acquire CMD_RING lock */
1862 1862 if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
1863 1863 /* Queue it for later */
1864 1864 if (iocbq) {
1865 1865 if ((hba->io_count -
1866 1866 hba->channel_tx_count) > 10) {
1867 1867 emlxs_tx_put(iocbq, 1);
1868 1868 return;
1869 1869 } else {
1870 1870
1871 1871 mutex_enter(&EMLXS_QUE_LOCK(channelno));
1872 1872 }
1873 1873 } else {
1874 1874 return;
1875 1875 }
1876 1876 }
1877 1877 /* EMLXS_QUE_LOCK acquired */
1878 1878
1879 1879 /* Throttle check only applies to non special iocb */
1880 1880 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
1881 1881 /* Check if HBA is full */
1882 1882 throttle = hba->io_throttle - hba->io_active;
1883 1883 if (throttle <= 0) {
1884 1884 /* Hitting adapter throttle limit */
1885 1885 /* Queue it for later */
1886 1886 if (iocbq) {
1887 1887 emlxs_tx_put(iocbq, 1);
1888 1888 }
1889 1889
1890 1890 goto busy;
1891 1891 }
1892 1892 }
1893 1893
1894 1894 /* Check to see if we have room for this WQE */
1895 1895 next_wqe = wq->host_index + 1;
1896 1896 if (next_wqe >= wq->max_index) {
1897 1897 next_wqe = 0;
1898 1898 }
1899 1899
1900 1900 if (next_wqe == wq->port_index) {
1901 1901 /* Queue it for later */
1902 1902 if (iocbq) {
1903 1903 emlxs_tx_put(iocbq, 1);
1904 1904 }
1905 1905 goto busy;
1906 1906 }
1907 1907
1908 1908 /*
1909 1909 * We have a command ring slot available
1910 1910 * Make sure we have an iocb to send
1911 1911 */
1912 1912 if (iocbq) {
1913 1913 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1914 1914
1915 1915 /* Check if the ring already has iocb's waiting */
1916 1916 if (cp->nodeq.q_first != NULL) {
1917 1917 /* Put the current iocbq on the tx queue */
1918 1918 emlxs_tx_put(iocbq, 0);
1919 1919
1920 1920 /*
1921 1921 * Attempt to replace it with the next iocbq
1922 1922 * in the tx queue
1923 1923 */
1924 1924 iocbq = emlxs_tx_get(cp, 0);
1925 1925 }
1926 1926
1927 1927 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1928 1928 } else {
1929 1929 iocbq = emlxs_tx_get(cp, 1);
1930 1930 }
1931 1931
1932 1932 sendit:
1933 1933 /* Process each iocbq */
1934 1934 while (iocbq) {
1935 1935
1936 1936 wqe = &iocbq->wqe;
1937 1937 #ifdef SLI4_FASTPATH_DEBUG
1938 1938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1939 1939 "ISSUE QID %d WQE iotag: %x xri: %x", wq->qid,
1940 1940 wqe->RequestTag, wqe->XRITag);
1941 1941 #endif
1942 1942
1943 1943 sbp = iocbq->sbp;
1944 1944 if (sbp) {
1945 1945 /* If exchange removed after wqe was prep'ed, drop it */
1946 1946 if (!(sbp->xrip)) {
1947 1947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1948 1948 "Xmit WQE iotag: %x xri: %x aborted",
1949 1949 wqe->RequestTag, wqe->XRITag);
1950 1950
1951 1951 /* Get next iocb from the tx queue */
1952 1952 iocbq = emlxs_tx_get(cp, 1);
1953 1953 continue;
1954 1954 }
1955 1955
1956 1956 if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
1957 1957
1958 1958 /* Perform delay */
1959 1959 if ((channelno == hba->channel_els) &&
1960 1960 !(iocbq->flag & IOCB_FCP_CMD)) {
1961 1961 drv_usecwait(100000);
1962 1962 } else {
1963 1963 drv_usecwait(20000);
1964 1964 }
1965 1965 }
1966 1966 }
1967 1967
1968 1968 /*
1969 1969 * At this point, we have a command ring slot available
1970 1970 * and an iocb to send
1971 1971 */
1972 1972 wq->release_depth--;
1973 1973 if (wq->release_depth == 0) {
1974 1974 wq->release_depth = WQE_RELEASE_DEPTH;
1975 1975 wqe->WQEC = 1;
1976 1976 }
1977 1977
1978 1978
1979 1979 HBASTATS.IocbIssued[channelno]++;
1980 1980
1981 1981 /* Check for ULP pkt request */
1982 1982 if (sbp) {
1983 1983 mutex_enter(&sbp->mtx);
↓ open down ↓ |
1983 lines elided |
↑ open up ↑ |
1984 1984
1985 1985 if (sbp->node == NULL) {
1986 1986 /* Set node to base node by default */
1987 1987 iocbq->node = (void *)&port->node_base;
1988 1988 sbp->node = (void *)&port->node_base;
1989 1989 }
1990 1990
1991 1991 sbp->pkt_flags |= PACKET_IN_CHIPQ;
1992 1992 mutex_exit(&sbp->mtx);
1993 1993
1994 - atomic_add_32(&hba->io_active, 1);
1994 + atomic_inc_32(&hba->io_active);
1995 1995 sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
1996 1996 }
1997 1997
1998 1998
1999 1999 /* Free the local iocb if there is no sbp tracking it */
2000 2000 if (sbp) {
2001 2001 #ifdef SFCT_SUPPORT
2002 2002 #ifdef FCT_IO_TRACE
2003 2003 if (sbp->fct_cmd) {
2004 2004 emlxs_fct_io_trace(port, sbp->fct_cmd,
2005 2005 EMLXS_FCT_IOCB_ISSUED);
2006 2006 emlxs_fct_io_trace(port, sbp->fct_cmd,
2007 2007 icmd->ULPCOMMAND);
2008 2008 }
2009 2009 #endif /* FCT_IO_TRACE */
2010 2010 #endif /* SFCT_SUPPORT */
2011 2011 cp->hbaSendCmd_sbp++;
2012 2012 iocbq->channel = cp;
2013 2013 } else {
2014 2014 cp->hbaSendCmd++;
2015 2015 }
2016 2016
2017 2017 flag = iocbq->flag;
2018 2018
2019 2019 /* Send the iocb */
2020 2020 wqeslot = (emlxs_wqe_t *)wq->addr.virt;
2021 2021 wqeslot += wq->host_index;
2022 2022
2023 2023 wqe->CQId = wq->cqid;
2024 2024 BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
2025 2025 sizeof (emlxs_wqe_t));
2026 2026 #ifdef DEBUG_WQE
2027 2027 emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
2028 2028 #endif
2029 2029 offset = (off_t)((uint64_t)((unsigned long)
2030 2030 wq->addr.virt) -
2031 2031 (uint64_t)((unsigned long)
2032 2032 hba->sli.sli4.slim2.virt));
2033 2033
2034 2034 EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
2035 2035 4096, DDI_DMA_SYNC_FORDEV);
2036 2036
2037 2037 /* Ring the WQ Doorbell */
2038 2038 wqdb = wq->qid;
2039 2039 wqdb |= ((1 << 24) | (wq->host_index << 16));
2040 2040
2041 2041
2042 2042 WRITE_BAR2_REG(hba, FC_WQDB_REG(hba), wqdb);
2043 2043 wq->host_index = next_wqe;
2044 2044
2045 2045 #ifdef SLI4_FASTPATH_DEBUG
2046 2046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2047 2047 "WQ RING: %08x", wqdb);
2048 2048 #endif
2049 2049
2050 2050 /*
2051 2051 * After this, the sbp / iocb / wqe should not be
2052 2052 * accessed in the xmit path.
2053 2053 */
2054 2054
2055 2055 if (!sbp) {
2056 2056 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
2057 2057 }
2058 2058
2059 2059 if (iocbq && (!(flag & IOCB_SPECIAL))) {
2060 2060 /* Check if HBA is full */
2061 2061 throttle = hba->io_throttle - hba->io_active;
2062 2062 if (throttle <= 0) {
2063 2063 goto busy;
2064 2064 }
2065 2065 }
2066 2066
2067 2067 /* Check to see if we have room for another WQE */
2068 2068 next_wqe++;
2069 2069 if (next_wqe >= wq->max_index) {
2070 2070 next_wqe = 0;
2071 2071 }
2072 2072
2073 2073 if (next_wqe == wq->port_index) {
2074 2074 /* Queue it for later */
2075 2075 goto busy;
2076 2076 }
2077 2077
2078 2078
2079 2079 /* Get the next iocb from the tx queue if there is one */
2080 2080 iocbq = emlxs_tx_get(cp, 1);
2081 2081 }
2082 2082
2083 2083 mutex_exit(&EMLXS_QUE_LOCK(channelno));
2084 2084
2085 2085 return;
2086 2086
2087 2087 busy:
2088 2088 if (throttle <= 0) {
2089 2089 HBASTATS.IocbThrottled++;
2090 2090 } else {
2091 2091 HBASTATS.IocbRingFull[channelno]++;
2092 2092 }
2093 2093
2094 2094 mutex_exit(&EMLXS_QUE_LOCK(channelno));
2095 2095
2096 2096 return;
2097 2097
2098 2098 } /* emlxs_sli4_issue_iocb_cmd() */
2099 2099
2100 2100
2101 2101 /*ARGSUSED*/
2102 2102 static uint32_t
2103 2103 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
2104 2104 uint32_t tmo)
2105 2105 {
2106 2106 emlxs_hba_t *hba = HBA;
2107 2107 MAILBOXQ *mbq;
2108 2108 MAILBOX4 *mb4;
2109 2109 MATCHMAP *mp;
2110 2110 uint32_t *iptr;
2111 2111 uint32_t mqdb;
2112 2112 off_t offset;
2113 2113
2114 2114 mbq = (MAILBOXQ *)mb;
2115 2115 mb4 = (MAILBOX4 *)mb;
2116 2116 mp = (MATCHMAP *) mbq->nonembed;
2117 2117 hba->mbox_mqe = (void *)mqe;
2118 2118
2119 2119 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2120 2120 (mb4->un.varSLIConfig.be.embedded)) {
2121 2121 /*
2122 2122 * If this is an embedded mbox, everything should fit
2123 2123 * into the mailbox area.
2124 2124 */
2125 2125 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2126 2126 MAILBOX_CMD_SLI4_BSIZE);
2127 2127
2128 2128 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2129 2129 4096, DDI_DMA_SYNC_FORDEV);
2130 2130
2131 2131 if (mb->mbxCommand != MBX_HEARTBEAT) {
2132 2132 emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
2133 2133 18, 0);
2134 2134 }
2135 2135 } else {
2136 2136 /* SLI_CONFIG and non-embedded */
2137 2137
2138 2138 /*
2139 2139 * If this is not embedded, the MQ area
2140 2140 * MUST contain a SGE pointer to a larger area for the
2141 2141 * non-embedded mailbox command.
2142 2142 * mp will point to the actual mailbox command which
2143 2143 * should be copied into the non-embedded area.
2144 2144 */
2145 2145 mb4->un.varSLIConfig.be.sge_cnt = 1;
2146 2146 mb4->un.varSLIConfig.be.payload_length = mp->size;
2147 2147 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2148 2148 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
2149 2149 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
2150 2150 *iptr = mp->size;
2151 2151
2152 2152 BE_SWAP32_BUFFER(mp->virt, mp->size);
2153 2153
2154 2154 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2155 2155 DDI_DMA_SYNC_FORDEV);
2156 2156
2157 2157 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2158 2158 MAILBOX_CMD_SLI4_BSIZE);
2159 2159
2160 2160 offset = (off_t)((uint64_t)((unsigned long)
2161 2161 hba->sli.sli4.mq.addr.virt) -
2162 2162 (uint64_t)((unsigned long)
2163 2163 hba->sli.sli4.slim2.virt));
2164 2164
2165 2165 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
2166 2166 4096, DDI_DMA_SYNC_FORDEV);
2167 2167
2168 2168 emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
2169 2169 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2170 2170 "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
2171 2171 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2172 2172 }
2173 2173
2174 2174 /* Ring the MQ Doorbell */
2175 2175 mqdb = hba->sli.sli4.mq.qid;
2176 2176 mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
2177 2177
2178 2178 if (mb->mbxCommand != MBX_HEARTBEAT) {
2179 2179 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2180 2180 "MQ RING: %08x", mqdb);
2181 2181 }
2182 2182
2183 2183 WRITE_BAR2_REG(hba, FC_MQDB_REG(hba), mqdb);
2184 2184 return (MBX_SUCCESS);
2185 2185
2186 2186 } /* emlxs_sli4_issue_mq() */
2187 2187
2188 2188
2189 2189 /*ARGSUSED*/
2190 2190 static uint32_t
2191 2191 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
2192 2192 {
2193 2193 emlxs_port_t *port = &PPORT;
2194 2194 MAILBOXQ *mbq;
2195 2195 MAILBOX4 *mb4;
2196 2196 MATCHMAP *mp = NULL;
2197 2197 uint32_t *iptr;
2198 2198 int nonembed = 0;
2199 2199
2200 2200 mbq = (MAILBOXQ *)mb;
2201 2201 mb4 = (MAILBOX4 *)mb;
2202 2202 mp = (MATCHMAP *) mbq->nonembed;
2203 2203 hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
2204 2204
2205 2205 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2206 2206 (mb4->un.varSLIConfig.be.embedded)) {
2207 2207 /*
2208 2208 * If this is an embedded mbox, everything should fit
2209 2209 * into the bootstrap mailbox area.
2210 2210 */
2211 2211 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2212 2212 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2213 2213 MAILBOX_CMD_SLI4_BSIZE);
2214 2214
2215 2215 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2216 2216 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
2217 2217 emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
2218 2218 } else {
2219 2219 /*
2220 2220 * If this is not embedded, the bootstrap mailbox area
2221 2221 * MUST contain a SGE pointer to a larger area for the
2222 2222 * non-embedded mailbox command.
2223 2223 * mp will point to the actual mailbox command which
2224 2224 * should be copied into the non-embedded area.
2225 2225 */
2226 2226 nonembed = 1;
2227 2227 mb4->un.varSLIConfig.be.sge_cnt = 1;
2228 2228 mb4->un.varSLIConfig.be.payload_length = mp->size;
2229 2229 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2230 2230 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
2231 2231 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
2232 2232 *iptr = mp->size;
2233 2233
2234 2234 BE_SWAP32_BUFFER(mp->virt, mp->size);
2235 2235
2236 2236 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2237 2237 DDI_DMA_SYNC_FORDEV);
2238 2238
2239 2239 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2240 2240 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2241 2241 MAILBOX_CMD_SLI4_BSIZE);
2242 2242
2243 2243 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2244 2244 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2245 2245 DDI_DMA_SYNC_FORDEV);
2246 2246
2247 2247 emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
2248 2248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2249 2249 "Extension Addr %p %p", mp->phys,
2250 2250 (uint32_t *)((uint8_t *)mp->virt));
2251 2251 iptr = (uint32_t *)((uint8_t *)mp->virt);
2252 2252 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2253 2253 }
2254 2254
2255 2255
2256 2256 /* NOTE: tmo is in 10ms ticks */
2257 2257 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2258 2258 return (MBX_TIMEOUT);
2259 2259 }
2260 2260
2261 2261 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2262 2262 (mb4->un.varSLIConfig.be.embedded)) {
2263 2263 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2264 2264 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
2265 2265
2266 2266 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2267 2267 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2268 2268 MAILBOX_CMD_SLI4_BSIZE);
2269 2269
2270 2270 emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
2271 2271
2272 2272 } else {
2273 2273 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2274 2274 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2275 2275 DDI_DMA_SYNC_FORKERNEL);
2276 2276
2277 2277 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2278 2278 DDI_DMA_SYNC_FORKERNEL);
2279 2279
2280 2280 BE_SWAP32_BUFFER(mp->virt, mp->size);
2281 2281
2282 2282 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2283 2283 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2284 2284 MAILBOX_CMD_SLI4_BSIZE);
2285 2285
2286 2286 emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
2287 2287 iptr = (uint32_t *)((uint8_t *)mp->virt);
2288 2288 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
2289 2289 }
2290 2290
2291 2291 #ifdef FMA_SUPPORT
2292 2292 if (nonembed && mp) {
2293 2293 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
2294 2294 != DDI_FM_OK) {
2295 2295 EMLXS_MSGF(EMLXS_CONTEXT,
2296 2296 &emlxs_invalid_dma_handle_msg,
2297 2297 "emlxs_sli4_issue_bootstrap: mp_hdl=%p",
2298 2298 mp->dma_handle);
2299 2299 return (MBXERR_DMA_ERROR);
2300 2300 }
2301 2301 }
2302 2302
2303 2303 if (emlxs_fm_check_dma_handle(hba,
2304 2304 hba->sli.sli4.bootstrapmb.dma_handle)
2305 2305 != DDI_FM_OK) {
2306 2306 EMLXS_MSGF(EMLXS_CONTEXT,
2307 2307 &emlxs_invalid_dma_handle_msg,
2308 2308 "emlxs_sli4_issue_bootstrap: hdl=%p",
2309 2309 hba->sli.sli4.bootstrapmb.dma_handle);
2310 2310 return (MBXERR_DMA_ERROR);
2311 2311 }
2312 2312 #endif
2313 2313
2314 2314 return (MBX_SUCCESS);
2315 2315
2316 2316 } /* emlxs_sli4_issue_bootstrap() */
2317 2317
2318 2318
2319 2319 /*ARGSUSED*/
2320 2320 static uint32_t
2321 2321 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2322 2322 uint32_t tmo)
2323 2323 {
2324 2324 emlxs_port_t *port;
2325 2325 MAILBOX4 *mb4;
2326 2326 MAILBOX *mb;
2327 2327 mbox_rsp_hdr_t *hdr_rsp;
2328 2328 MATCHMAP *mp;
2329 2329 uint32_t *iptr;
2330 2330 uint32_t rc;
2331 2331 uint32_t i;
2332 2332 uint32_t tmo_local;
2333 2333
2334 2334 if (!mbq->port) {
2335 2335 mbq->port = &PPORT;
2336 2336 }
2337 2337
2338 2338 port = (emlxs_port_t *)mbq->port;
2339 2339
2340 2340 mb4 = (MAILBOX4 *)mbq;
2341 2341 mb = (MAILBOX *)mbq;
2342 2342
2343 2343 mb->mbxStatus = MBX_SUCCESS;
2344 2344 rc = MBX_SUCCESS;
2345 2345
2346 2346 /* Check for minimum timeouts */
2347 2347 switch (mb->mbxCommand) {
2348 2348 /* Mailbox commands that erase/write flash */
2349 2349 case MBX_DOWN_LOAD:
2350 2350 case MBX_UPDATE_CFG:
2351 2351 case MBX_LOAD_AREA:
2352 2352 case MBX_LOAD_EXP_ROM:
2353 2353 case MBX_WRITE_NV:
2354 2354 case MBX_FLASH_WR_ULA:
2355 2355 case MBX_DEL_LD_ENTRY:
2356 2356 case MBX_LOAD_SM:
2357 2357 if (tmo < 300) {
2358 2358 tmo = 300;
2359 2359 }
2360 2360 break;
2361 2361
2362 2362 default:
2363 2363 if (tmo < 30) {
2364 2364 tmo = 30;
2365 2365 }
2366 2366 break;
2367 2367 }
2368 2368
2369 2369 /* Convert tmo seconds to 10 millisecond tics */
2370 2370 tmo_local = tmo * 100;
2371 2371
2372 2372 mutex_enter(&EMLXS_PORT_LOCK);
2373 2373
2374 2374 /* Adjust wait flag */
2375 2375 if (flag != MBX_NOWAIT) {
2376 2376 if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
2377 2377 flag = MBX_SLEEP;
2378 2378 } else {
2379 2379 flag = MBX_POLL;
2380 2380 }
2381 2381 } else {
2382 2382 /* Must have interrupts enabled to perform MBX_NOWAIT */
2383 2383 if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
2384 2384
2385 2385 mb->mbxStatus = MBX_HARDWARE_ERROR;
2386 2386 mutex_exit(&EMLXS_PORT_LOCK);
2387 2387
2388 2388 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2389 2389 "Interrupts disabled. %s failed.",
2390 2390 emlxs_mb_cmd_xlate(mb->mbxCommand));
2391 2391
2392 2392 return (MBX_HARDWARE_ERROR);
2393 2393 }
2394 2394 }
2395 2395
2396 2396 /* Check for hardware error ; special case SLI_CONFIG */
2397 2397 if ((hba->flag & FC_HARDWARE_ERROR) &&
2398 2398 ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
2399 2399 (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
2400 2400 COMMON_OPCODE_RESET))) {
2401 2401 mb->mbxStatus = MBX_HARDWARE_ERROR;
2402 2402
2403 2403 mutex_exit(&EMLXS_PORT_LOCK);
2404 2404
2405 2405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2406 2406 "Hardware error reported. %s failed. status=%x mb=%p",
2407 2407 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
2408 2408
2409 2409 return (MBX_HARDWARE_ERROR);
2410 2410 }
2411 2411
2412 2412 if (hba->mbox_queue_flag) {
2413 2413 /* If we are not polling, then queue it for later */
2414 2414 if (flag == MBX_NOWAIT) {
2415 2415 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2416 2416 "Busy. %s: mb=%p NoWait.",
2417 2417 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
2418 2418
2419 2419 emlxs_mb_put(hba, mbq);
2420 2420
2421 2421 HBASTATS.MboxBusy++;
2422 2422
2423 2423 mutex_exit(&EMLXS_PORT_LOCK);
2424 2424
2425 2425 return (MBX_BUSY);
2426 2426 }
2427 2427
2428 2428 while (hba->mbox_queue_flag) {
2429 2429 mutex_exit(&EMLXS_PORT_LOCK);
2430 2430
2431 2431 if (tmo_local-- == 0) {
2432 2432 EMLXS_MSGF(EMLXS_CONTEXT,
2433 2433 &emlxs_mbox_event_msg,
2434 2434 "Timeout. %s: mb=%p tmo=%d Waiting.",
2435 2435 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2436 2436 tmo);
2437 2437
2438 2438 /* Non-lethalStatus mailbox timeout */
2439 2439 /* Does not indicate a hardware error */
2440 2440 mb->mbxStatus = MBX_TIMEOUT;
2441 2441 return (MBX_TIMEOUT);
2442 2442 }
2443 2443
2444 2444 DELAYMS(10);
2445 2445 mutex_enter(&EMLXS_PORT_LOCK);
2446 2446 }
2447 2447 }
2448 2448
2449 2449 /* Initialize mailbox area */
2450 2450 emlxs_mb_init(hba, mbq, flag, tmo);
2451 2451
2452 2452 if (mb->mbxCommand == MBX_DOWN_LINK) {
2453 2453 hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
2454 2454 }
2455 2455
2456 2456 mutex_exit(&EMLXS_PORT_LOCK);
2457 2457 switch (flag) {
2458 2458
2459 2459 case MBX_NOWAIT:
2460 2460 if (mb->mbxCommand != MBX_HEARTBEAT) {
2461 2461 if (mb->mbxCommand != MBX_DOWN_LOAD
2462 2462 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2463 2463 EMLXS_MSGF(EMLXS_CONTEXT,
2464 2464 &emlxs_mbox_detail_msg,
2465 2465 "Sending. %s: mb=%p NoWait. embedded %d",
2466 2466 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2467 2467 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2468 2468 (mb4->un.varSLIConfig.be.embedded)));
2469 2469 }
2470 2470 }
2471 2471
2472 2472 iptr = hba->sli.sli4.mq.addr.virt;
2473 2473 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2474 2474 hba->sli.sli4.mq.host_index++;
2475 2475 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2476 2476 hba->sli.sli4.mq.host_index = 0;
2477 2477 }
2478 2478
2479 2479 if (mbq->bp) {
2480 2480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2481 2481 "BDE virt %p phys %p size x%x",
2482 2482 ((MATCHMAP *)mbq->bp)->virt,
2483 2483 ((MATCHMAP *)mbq->bp)->phys,
2484 2484 ((MATCHMAP *)mbq->bp)->size);
2485 2485 emlxs_data_dump(port, "DATA",
2486 2486 (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
2487 2487 }
2488 2488 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
2489 2489 break;
2490 2490
2491 2491 case MBX_POLL:
2492 2492 if (mb->mbxCommand != MBX_DOWN_LOAD
2493 2493 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2494 2494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2495 2495 "Sending. %s: mb=%p Poll. embedded %d",
2496 2496 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2497 2497 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2498 2498 (mb4->un.varSLIConfig.be.embedded)));
2499 2499 }
2500 2500
2501 2501 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2502 2502
2503 2503 /* Clean up the mailbox area */
2504 2504 if (rc == MBX_TIMEOUT) {
2505 2505 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2506 2506 "Timeout. %s: mb=%p tmo=%x Poll. embedded %d",
2507 2507 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2508 2508 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2509 2509 (mb4->un.varSLIConfig.be.embedded)));
2510 2510
2511 2511 hba->flag |= FC_MBOX_TIMEOUT;
2512 2512 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2513 2513 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2514 2514
2515 2515 } else {
2516 2516 if (mb->mbxCommand != MBX_DOWN_LOAD
2517 2517 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2518 2518 EMLXS_MSGF(EMLXS_CONTEXT,
2519 2519 &emlxs_mbox_detail_msg,
2520 2520 "Completed. %s: mb=%p status=%x Poll. "
2521 2521 "embedded %d",
2522 2522 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2523 2523 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2524 2524 (mb4->un.varSLIConfig.be.embedded)));
2525 2525 }
2526 2526
2527 2527 /* Process the result */
2528 2528 if (!(mbq->flag & MBQ_PASSTHRU)) {
2529 2529 if (mbq->mbox_cmpl) {
2530 2530 (void) (mbq->mbox_cmpl)(hba, mbq);
2531 2531 }
2532 2532 }
2533 2533
2534 2534 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2535 2535 }
2536 2536
2537 2537 mp = (MATCHMAP *)mbq->nonembed;
2538 2538 if (mp) {
2539 2539 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2540 2540 if (hdr_rsp->status) {
2541 2541 EMLXS_MSGF(EMLXS_CONTEXT,
2542 2542 &emlxs_mbox_detail_msg,
2543 2543 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
2544 2544 emlxs_mb_cmd_xlate(mb->mbxCommand),
2545 2545 hdr_rsp->status, hdr_rsp->extra_status);
2546 2546
2547 2547 mb->mbxStatus = MBX_NONEMBED_ERROR;
2548 2548 }
2549 2549 }
2550 2550 rc = mb->mbxStatus;
2551 2551
2552 2552 /* Attempt to send pending mailboxes */
2553 2553 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
2554 2554 if (mbq) {
2555 2555 /* Attempt to send pending mailboxes */
2556 2556 i = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
2557 2557 if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
2558 2558 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2559 2559 }
2560 2560 }
2561 2561 break;
2562 2562
2563 2563 case MBX_SLEEP:
2564 2564 if (mb->mbxCommand != MBX_DOWN_LOAD
2565 2565 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2566 2566 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2567 2567 "Sending. %s: mb=%p Sleep. embedded %d",
2568 2568 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2569 2569 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2570 2570 (mb4->un.varSLIConfig.be.embedded)));
2571 2571 }
2572 2572
2573 2573 iptr = hba->sli.sli4.mq.addr.virt;
2574 2574 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2575 2575 hba->sli.sli4.mq.host_index++;
2576 2576 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2577 2577 hba->sli.sli4.mq.host_index = 0;
2578 2578 }
2579 2579
2580 2580 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
2581 2581
2582 2582 if (rc != MBX_SUCCESS) {
2583 2583 break;
2584 2584 }
2585 2585
2586 2586 /* Wait for completion */
2587 2587 /* The driver clock is timing the mailbox. */
2588 2588
2589 2589 mutex_enter(&EMLXS_MBOX_LOCK);
2590 2590 while (!(mbq->flag & MBQ_COMPLETED)) {
2591 2591 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
2592 2592 }
2593 2593 mutex_exit(&EMLXS_MBOX_LOCK);
2594 2594
2595 2595 mp = (MATCHMAP *)mbq->nonembed;
2596 2596 if (mp) {
2597 2597 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2598 2598 if (hdr_rsp->status) {
2599 2599 EMLXS_MSGF(EMLXS_CONTEXT,
2600 2600 &emlxs_mbox_detail_msg,
2601 2601 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
2602 2602 emlxs_mb_cmd_xlate(mb->mbxCommand),
2603 2603 hdr_rsp->status, hdr_rsp->extra_status);
2604 2604
2605 2605 mb->mbxStatus = MBX_NONEMBED_ERROR;
2606 2606 }
2607 2607 }
2608 2608 rc = mb->mbxStatus;
2609 2609
2610 2610 if (rc == MBX_TIMEOUT) {
2611 2611 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2612 2612 "Timeout. %s: mb=%p tmo=%x Sleep. embedded %d",
2613 2613 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2614 2614 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2615 2615 (mb4->un.varSLIConfig.be.embedded)));
2616 2616 } else {
2617 2617 if (mb->mbxCommand != MBX_DOWN_LOAD
2618 2618 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2619 2619 EMLXS_MSGF(EMLXS_CONTEXT,
2620 2620 &emlxs_mbox_detail_msg,
2621 2621 "Completed. %s: mb=%p status=%x Sleep. "
2622 2622 "embedded %d",
2623 2623 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2624 2624 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2625 2625 (mb4->un.varSLIConfig.be.embedded)));
2626 2626 }
2627 2627 }
2628 2628 break;
2629 2629 }
2630 2630
2631 2631 return (rc);
2632 2632
2633 2633 } /* emlxs_sli4_issue_mbox_cmd() */
2634 2634
2635 2635
2636 2636
2637 2637 /*ARGSUSED*/
2638 2638 static uint32_t
2639 2639 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2640 2640 uint32_t tmo)
2641 2641 {
2642 2642 emlxs_port_t *port = &PPORT;
2643 2643 MAILBOX *mb;
2644 2644 mbox_rsp_hdr_t *hdr_rsp;
2645 2645 MATCHMAP *mp;
2646 2646 uint32_t rc;
2647 2647 uint32_t tmo_local;
2648 2648
2649 2649 mb = (MAILBOX *)mbq;
2650 2650
2651 2651 mb->mbxStatus = MBX_SUCCESS;
2652 2652 rc = MBX_SUCCESS;
2653 2653
2654 2654 if (tmo < 30) {
2655 2655 tmo = 30;
2656 2656 }
2657 2657
2658 2658 /* Convert tmo seconds to 10 millisecond tics */
2659 2659 tmo_local = tmo * 100;
2660 2660
2661 2661 flag = MBX_POLL;
2662 2662
2663 2663 /* Check for hardware error */
2664 2664 if (hba->flag & FC_HARDWARE_ERROR) {
2665 2665 mb->mbxStatus = MBX_HARDWARE_ERROR;
2666 2666 return (MBX_HARDWARE_ERROR);
2667 2667 }
2668 2668
2669 2669 /* Initialize mailbox area */
2670 2670 emlxs_mb_init(hba, mbq, flag, tmo);
2671 2671
2672 2672 switch (flag) {
2673 2673
2674 2674 case MBX_POLL:
2675 2675
2676 2676 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2677 2677
2678 2678 /* Clean up the mailbox area */
2679 2679 if (rc == MBX_TIMEOUT) {
2680 2680 hba->flag |= FC_MBOX_TIMEOUT;
2681 2681 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2682 2682 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2683 2683
2684 2684 } else {
2685 2685 /* Process the result */
2686 2686 if (!(mbq->flag & MBQ_PASSTHRU)) {
2687 2687 if (mbq->mbox_cmpl) {
2688 2688 (void) (mbq->mbox_cmpl)(hba, mbq);
2689 2689 }
2690 2690 }
2691 2691
2692 2692 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2693 2693 }
2694 2694
2695 2695 mp = (MATCHMAP *)mbq->nonembed;
2696 2696 if (mp) {
2697 2697 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2698 2698 if (hdr_rsp->status) {
2699 2699 EMLXS_MSGF(EMLXS_CONTEXT,
2700 2700 &emlxs_mbox_detail_msg,
2701 2701 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
2702 2702 emlxs_mb_cmd_xlate(mb->mbxCommand),
2703 2703 hdr_rsp->status, hdr_rsp->extra_status);
2704 2704
2705 2705 mb->mbxStatus = MBX_NONEMBED_ERROR;
2706 2706 }
2707 2707 }
2708 2708 rc = mb->mbxStatus;
2709 2709
2710 2710 break;
2711 2711 }
2712 2712
2713 2713 return (rc);
2714 2714
2715 2715 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
2716 2716
2717 2717
2718 2718
2719 2719 #ifdef SFCT_SUPPORT
2720 2720 /*ARGSUSED*/
2721 2721 static uint32_t
2722 2722 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
2723 2723 {
2724 2724 return (IOERR_NO_RESOURCES);
2725 2725
2726 2726 } /* emlxs_sli4_prep_fct_iocb() */
2727 2727 #endif /* SFCT_SUPPORT */
2728 2728
2729 2729
2730 2730 /*ARGSUSED*/
2731 2731 extern uint32_t
2732 2732 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
2733 2733 {
2734 2734 emlxs_hba_t *hba = HBA;
2735 2735 fc_packet_t *pkt;
2736 2736 CHANNEL *cp;
2737 2737 RPIobj_t *rpip;
2738 2738 XRIobj_t *xrip;
2739 2739 emlxs_wqe_t *wqe;
2740 2740 IOCBQ *iocbq;
2741 2741 NODELIST *node;
2742 2742 uint16_t iotag;
2743 2743 uint32_t did;
2744 2744 off_t offset;
2745 2745
2746 2746 pkt = PRIV2PKT(sbp);
2747 2747 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2748 2748 cp = &hba->chan[channel];
2749 2749
2750 2750 iocbq = &sbp->iocbq;
2751 2751 iocbq->channel = (void *) cp;
2752 2752 iocbq->port = (void *) port;
2753 2753
2754 2754 wqe = &iocbq->wqe;
2755 2755 bzero((void *)wqe, sizeof (emlxs_wqe_t));
2756 2756
2757 2757 /* Find target node object */
2758 2758 node = (NODELIST *)iocbq->node;
2759 2759 rpip = EMLXS_NODE_TO_RPI(port, node);
2760 2760
2761 2761 if (!rpip) {
2762 2762 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2763 2763 "Unable to find rpi. did=0x%x", did);
2764 2764
2765 2765 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2766 2766 IOERR_INVALID_RPI, 0);
2767 2767 return (0xff);
2768 2768 }
2769 2769
2770 2770 sbp->channel = cp;
2771 2771 /* Next allocate an Exchange for this command */
2772 2772 xrip = emlxs_sli4_alloc_xri(hba, sbp, rpip);
2773 2773
2774 2774 if (!xrip) {
2775 2775 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2776 2776 "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
2777 2777
2778 2778 return (FC_TRAN_BUSY);
2779 2779 }
2780 2780 sbp->bmp = NULL;
2781 2781 iotag = sbp->iotag;
2782 2782
2783 2783 #ifdef SLI4_FASTPATH_DEBUG
2784 2784 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, /* DEBUG */
2785 2785 "Prep FCP iotag: %x xri: %x", iotag, xrip->XRI);
2786 2786 #endif
2787 2787
2788 2788 /* Indicate this is a FCP cmd */
2789 2789 iocbq->flag |= IOCB_FCP_CMD;
2790 2790
2791 2791 if (emlxs_sli4_bde_setup(port, sbp)) {
2792 2792 emlxs_sli4_free_xri(hba, sbp, xrip, 1);
2793 2793 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2794 2794 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
2795 2795
2796 2796 return (FC_TRAN_BUSY);
2797 2797 }
2798 2798
2799 2799
2800 2800 /* DEBUG */
2801 2801 #ifdef DEBUG_FCP
2802 2802 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2803 2803 "SGLaddr virt %p phys %p size %d", xrip->SGList.virt,
2804 2804 xrip->SGList.phys, pkt->pkt_datalen);
2805 2805 emlxs_data_dump(port, "SGL", (uint32_t *)xrip->SGList.virt, 20, 0);
2806 2806 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2807 2807 "CMD virt %p len %d:%d:%d",
2808 2808 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
2809 2809 emlxs_data_dump(port, "FCP CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
2810 2810 #endif
2811 2811
2812 2812 offset = (off_t)((uint64_t)((unsigned long)
2813 2813 xrip->SGList.virt) -
2814 2814 (uint64_t)((unsigned long)
2815 2815 hba->sli.sli4.slim2.virt));
2816 2816
2817 2817 EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
2818 2818 xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
2819 2819
2820 2820 /* if device is FCP-2 device, set the following bit */
2821 2821 /* that says to run the FC-TAPE protocol. */
2822 2822 if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2823 2823 wqe->ERP = 1;
2824 2824 }
2825 2825
2826 2826 if (pkt->pkt_datalen == 0) {
2827 2827 wqe->Command = CMD_FCP_ICMND64_CR;
2828 2828 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2829 2829 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
2830 2830 wqe->Command = CMD_FCP_IREAD64_CR;
2831 2831 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2832 2832 wqe->PU = PARM_READ_CHECK;
2833 2833 } else {
2834 2834 wqe->Command = CMD_FCP_IWRITE64_CR;
2835 2835 wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
2836 2836 }
2837 2837 wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
2838 2838
2839 2839 wqe->ContextTag = rpip->RPI;
2840 2840 wqe->ContextType = WQE_RPI_CONTEXT;
2841 2841 wqe->XRITag = xrip->XRI;
2842 2842 wqe->Timer =
2843 2843 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2844 2844
2845 2845 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2846 2846 wqe->CCPE = 1;
2847 2847 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2848 2848 }
2849 2849
2850 2850 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2851 2851 case FC_TRAN_CLASS2:
2852 2852 wqe->Class = CLASS2;
2853 2853 break;
2854 2854 case FC_TRAN_CLASS3:
2855 2855 default:
2856 2856 wqe->Class = CLASS3;
2857 2857 break;
2858 2858 }
2859 2859 sbp->class = wqe->Class;
2860 2860 wqe->RequestTag = iotag;
2861 2861 wqe->CQId = 0x3ff; /* default CQ for response */
2862 2862 return (FC_SUCCESS);
2863 2863 } /* emlxs_sli4_prep_fcp_iocb() */
2864 2864
2865 2865
2866 2866 /*ARGSUSED*/
2867 2867 static uint32_t
2868 2868 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2869 2869 {
2870 2870 return (FC_TRAN_BUSY);
2871 2871
2872 2872 } /* emlxs_sli4_prep_ip_iocb() */
2873 2873
2874 2874
2875 2875 /*ARGSUSED*/
2876 2876 static uint32_t
2877 2877 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2878 2878 {
2879 2879 emlxs_hba_t *hba = HBA;
2880 2880 fc_packet_t *pkt;
2881 2881 IOCBQ *iocbq;
2882 2882 IOCB *iocb;
2883 2883 emlxs_wqe_t *wqe;
2884 2884 FCFIobj_t *fcfp;
2885 2885 RPIobj_t *rpip = NULL;
2886 2886 XRIobj_t *xrip;
2887 2887 CHANNEL *cp;
2888 2888 uint32_t did;
2889 2889 uint32_t cmd;
2890 2890 ULP_SGE64 stage_sge;
2891 2891 ULP_SGE64 *sge;
2892 2892 ddi_dma_cookie_t *cp_cmd;
2893 2893 ddi_dma_cookie_t *cp_resp;
2894 2894 emlxs_node_t *node;
2895 2895 off_t offset;
2896 2896
2897 2897 pkt = PRIV2PKT(sbp);
2898 2898 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2899 2899
2900 2900 iocbq = &sbp->iocbq;
2901 2901 wqe = &iocbq->wqe;
2902 2902 iocb = &iocbq->iocb;
2903 2903 bzero((void *)wqe, sizeof (emlxs_wqe_t));
2904 2904 bzero((void *)iocb, sizeof (IOCB));
2905 2905 cp = &hba->chan[hba->channel_els];
2906 2906
2907 2907 /* Initalize iocbq */
2908 2908 iocbq->port = (void *) port;
2909 2909 iocbq->channel = (void *) cp;
2910 2910
2911 2911 sbp->channel = cp;
2912 2912 sbp->bmp = NULL;
2913 2913
2914 2914 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2915 2915 cp_cmd = pkt->pkt_cmd_cookie;
2916 2916 cp_resp = pkt->pkt_resp_cookie;
2917 2917 #else
2918 2918 cp_cmd = &pkt->pkt_cmd_cookie;
2919 2919 cp_resp = &pkt->pkt_resp_cookie;
2920 2920 #endif /* >= EMLXS_MODREV3 */
2921 2921
2922 2922 /* CMD payload */
2923 2923 sge = &stage_sge;
2924 2924 sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
2925 2925 sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
2926 2926 sge->length = pkt->pkt_cmdlen;
2927 2927 sge->offset = 0;
2928 2928 sge->reserved = 0;
2929 2929
2930 2930 /* Initalize iocb */
2931 2931 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2932 2932 /* ELS Response */
2933 2933
2934 2934 xrip = emlxs_sli4_register_xri(hba, sbp,
2935 2935 pkt->pkt_cmd_fhdr.rx_id);
2936 2936
2937 2937 if (!xrip) {
2938 2938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2939 2939 "Unable to find XRI. rxid=%x",
2940 2940 pkt->pkt_cmd_fhdr.rx_id);
2941 2941
2942 2942 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2943 2943 IOERR_NO_XRI, 0);
2944 2944 return (0xff);
2945 2945 }
2946 2946
2947 2947 rpip = xrip->rpip;
2948 2948
2949 2949 if (!rpip) {
2950 2950 /* This means that we had a node registered */
2951 2951 /* when the unsol request came in but the node */
2952 2952 /* has since been unregistered. */
2953 2953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2954 2954 "Unable to find RPI. rxid=%x",
2955 2955 pkt->pkt_cmd_fhdr.rx_id);
2956 2956
2957 2957 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2958 2958 IOERR_INVALID_RPI, 0);
2959 2959 return (0xff);
2960 2960 }
2961 2961
2962 2962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2963 2963 "Prep ELS XRI: xri=%x iotag=%x oxid=%x rpi=%x",
2964 2964 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
2965 2965
2966 2966 wqe->Command = CMD_XMIT_ELS_RSP64_CX;
2967 2967 wqe->CmdType = WQE_TYPE_GEN;
2968 2968
2969 2969 wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
2970 2970 wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
2971 2971 wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2972 2972
2973 2973 wqe->un.ElsRsp.RemoteId = did;
2974 2974 wqe->PU = 0x3;
2975 2975
2976 2976 sge->last = 1;
2977 2977 /* Now sge is fully staged */
2978 2978
2979 2979 sge = xrip->SGList.virt;
2980 2980 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2981 2981 sizeof (ULP_SGE64));
2982 2982
2983 2983 wqe->ContextTag = port->VPIobj.VPI;
2984 2984 wqe->ContextType = WQE_VPI_CONTEXT;
2985 2985 wqe->OXId = xrip->rx_id;
2986 2986
2987 2987 } else {
2988 2988 /* ELS Request */
2989 2989
2990 2990 node = (emlxs_node_t *)iocbq->node;
2991 2991 rpip = EMLXS_NODE_TO_RPI(port, node);
2992 2992 fcfp = port->VPIobj.vfip->fcfp;
2993 2993
2994 2994 if (!rpip) {
2995 2995 rpip = port->VPIobj.rpip;
2996 2996 }
2997 2997
2998 2998 /* Next allocate an Exchange for this command */
2999 2999 xrip = emlxs_sli4_alloc_xri(hba, sbp, rpip);
3000 3000
3001 3001 if (!xrip) {
3002 3002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3003 3003 "Adapter Busy. Unable to allocate exchange. "
3004 3004 "did=0x%x", did);
3005 3005
3006 3006 return (FC_TRAN_BUSY);
3007 3007 }
3008 3008
3009 3009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3010 3010 "Prep ELS XRI: xri=%x iotag=%x rpi=%x", xrip->XRI,
3011 3011 xrip->iotag, rpip->RPI);
3012 3012
3013 3013 wqe->Command = CMD_ELS_REQUEST64_CR;
3014 3014 wqe->CmdType = WQE_TYPE_ELS;
3015 3015
3016 3016 wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
3017 3017 wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
3018 3018 wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
3019 3019
3020 3020 /* setup for rsp */
3021 3021 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3022 3022 iocb->ULPPU = 1; /* Wd4 is relative offset */
3023 3023
3024 3024 sge->last = 0;
3025 3025
3026 3026 sge = xrip->SGList.virt;
3027 3027 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
3028 3028 sizeof (ULP_SGE64));
3029 3029
3030 3030 wqe->un.ElsCmd.PayloadLength =
3031 3031 pkt->pkt_cmdlen; /* Byte offset of rsp data */
3032 3032
3033 3033 /* RSP payload */
3034 3034 sge = &stage_sge;
3035 3035 sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
3036 3036 sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
3037 3037 sge->length = pkt->pkt_rsplen;
3038 3038 sge->offset = 0;
3039 3039 sge->last = 1;
3040 3040 /* Now sge is fully staged */
3041 3041
3042 3042 sge = xrip->SGList.virt;
3043 3043 sge++;
3044 3044 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
3045 3045 sizeof (ULP_SGE64));
3046 3046 #ifdef DEBUG_ELS
3047 3047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3048 3048 "SGLaddr virt %p phys %p",
3049 3049 xrip->SGList.virt, xrip->SGList.phys);
3050 3050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3051 3051 "PAYLOAD virt %p phys %p",
3052 3052 pkt->pkt_cmd, cp_cmd->dmac_laddress);
3053 3053 emlxs_data_dump(port, "SGL", (uint32_t *)xrip->SGList.virt,
3054 3054 12, 0);
3055 3055 #endif
3056 3056
3057 3057 cmd = *((uint32_t *)pkt->pkt_cmd);
3058 3058 cmd &= ELS_CMD_MASK;
3059 3059
3060 3060 switch (cmd) {
3061 3061 case ELS_CMD_FLOGI:
3062 3062 wqe->un.ElsCmd.SP = 1;
3063 3063 wqe->ContextTag = fcfp->FCFI;
3064 3064 wqe->ContextType = WQE_FCFI_CONTEXT;
3065 3065 if (hba->flag & FC_FIP_SUPPORTED) {
3066 3066 wqe->CmdType |= WQE_TYPE_MASK_FIP;
3067 3067 wqe->ELSId |= WQE_ELSID_FLOGI;
3068 3068 }
3069 3069 break;
3070 3070 case ELS_CMD_FDISC:
3071 3071 wqe->un.ElsCmd.SP = 1;
3072 3072 wqe->ContextTag = port->VPIobj.VPI;
3073 3073 wqe->ContextType = WQE_VPI_CONTEXT;
3074 3074 if (hba->flag & FC_FIP_SUPPORTED) {
3075 3075 wqe->CmdType |= WQE_TYPE_MASK_FIP;
3076 3076 wqe->ELSId |= WQE_ELSID_FDISC;
3077 3077 }
3078 3078 break;
3079 3079 case ELS_CMD_LOGO:
3080 3080 if (did == FABRIC_DID) {
3081 3081 wqe->ContextTag = fcfp->FCFI;
3082 3082 wqe->ContextType = WQE_FCFI_CONTEXT;
3083 3083 if (hba->flag & FC_FIP_SUPPORTED) {
3084 3084 wqe->CmdType |= WQE_TYPE_MASK_FIP;
3085 3085 wqe->ELSId |= WQE_ELSID_LOGO;
3086 3086 }
3087 3087 } else {
3088 3088 wqe->ContextTag = port->VPIobj.VPI;
3089 3089 wqe->ContextType = WQE_VPI_CONTEXT;
3090 3090 }
3091 3091 break;
3092 3092
3093 3093 case ELS_CMD_SCR:
3094 3094 case ELS_CMD_PLOGI:
3095 3095 case ELS_CMD_PRLI:
3096 3096 default:
3097 3097 wqe->ContextTag = port->VPIobj.VPI;
3098 3098 wqe->ContextType = WQE_VPI_CONTEXT;
3099 3099 break;
3100 3100 }
3101 3101 wqe->un.ElsCmd.RemoteId = did;
3102 3102 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3103 3103 }
3104 3104
3105 3105 offset = (off_t)((uint64_t)((unsigned long)
3106 3106 xrip->SGList.virt) -
3107 3107 (uint64_t)((unsigned long)
3108 3108 hba->sli.sli4.slim2.virt));
3109 3109
3110 3110 EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
3111 3111 xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
3112 3112
3113 3113 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3114 3114 wqe->CCPE = 1;
3115 3115 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3116 3116 }
3117 3117
3118 3118 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3119 3119 case FC_TRAN_CLASS2:
3120 3120 wqe->Class = CLASS2;
3121 3121 break;
3122 3122 case FC_TRAN_CLASS3:
3123 3123 default:
3124 3124 wqe->Class = CLASS3;
3125 3125 break;
3126 3126 }
3127 3127 sbp->class = wqe->Class;
3128 3128 wqe->XRITag = xrip->XRI;
3129 3129 wqe->RequestTag = xrip->iotag;
3130 3130 wqe->CQId = 0x3ff;
3131 3131 return (FC_SUCCESS);
3132 3132
3133 3133 } /* emlxs_sli4_prep_els_iocb() */
3134 3134
3135 3135
3136 3136 /*ARGSUSED*/
3137 3137 static uint32_t
3138 3138 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3139 3139 {
3140 3140 emlxs_hba_t *hba = HBA;
3141 3141 fc_packet_t *pkt;
3142 3142 IOCBQ *iocbq;
3143 3143 IOCB *iocb;
3144 3144 emlxs_wqe_t *wqe;
3145 3145 NODELIST *node = NULL;
3146 3146 CHANNEL *cp;
3147 3147 RPIobj_t *rpip;
3148 3148 XRIobj_t *xrip;
3149 3149 uint32_t did;
3150 3150 off_t offset;
3151 3151
3152 3152 pkt = PRIV2PKT(sbp);
3153 3153 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3154 3154
3155 3155 iocbq = &sbp->iocbq;
3156 3156 wqe = &iocbq->wqe;
3157 3157 iocb = &iocbq->iocb;
3158 3158 bzero((void *)wqe, sizeof (emlxs_wqe_t));
3159 3159 bzero((void *)iocb, sizeof (IOCB));
3160 3160
3161 3161 cp = &hba->chan[hba->channel_ct];
3162 3162
3163 3163 iocbq->port = (void *) port;
3164 3164 iocbq->channel = (void *) cp;
3165 3165
3166 3166 sbp->bmp = NULL;
3167 3167 sbp->channel = cp;
3168 3168
3169 3169 /* Initalize wqe */
3170 3170 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3171 3171 /* CT Response */
3172 3172
3173 3173 xrip = emlxs_sli4_register_xri(hba, sbp,
3174 3174 pkt->pkt_cmd_fhdr.rx_id);
3175 3175
3176 3176 if (!xrip) {
3177 3177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3178 3178 "Unable to find XRI. rxid=%x",
3179 3179 pkt->pkt_cmd_fhdr.rx_id);
3180 3180
3181 3181 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3182 3182 IOERR_NO_XRI, 0);
3183 3183 return (0xff);
3184 3184 }
3185 3185
3186 3186 rpip = xrip->rpip;
3187 3187
3188 3188 if (!rpip) {
3189 3189 /* This means that we had a node registered */
3190 3190 /* when the unsol request came in but the node */
3191 3191 /* has since been unregistered. */
3192 3192 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3193 3193 "Unable to find RPI. rxid=%x",
3194 3194 pkt->pkt_cmd_fhdr.rx_id);
3195 3195
3196 3196 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3197 3197 IOERR_INVALID_RPI, 0);
3198 3198 return (0xff);
3199 3199 }
3200 3200
3201 3201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3202 3202 "Prep CT XRI: xri=%x iotag=%x oxid=%x", xrip->XRI,
3203 3203 xrip->iotag, xrip->rx_id);
3204 3204
3205 3205 if (emlxs_sli4_bde_setup(port, sbp)) {
3206 3206 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3207 3207 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3208 3208
3209 3209 return (FC_TRAN_BUSY);
3210 3210 }
3211 3211
3212 3212 wqe->CmdType = WQE_TYPE_GEN;
3213 3213 wqe->Command = CMD_XMIT_SEQUENCE64_CR;
3214 3214 wqe->un.XmitSeq.la = 1;
3215 3215
3216 3216 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3217 3217 wqe->un.XmitSeq.ls = 1;
3218 3218 }
3219 3219
3220 3220 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3221 3221 wqe->un.XmitSeq.si = 1;
3222 3222 }
3223 3223
3224 3224 wqe->un.XmitSeq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
3225 3225 wqe->un.XmitSeq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3226 3226 wqe->un.XmitSeq.Type = pkt->pkt_cmd_fhdr.type;
3227 3227 wqe->OXId = xrip->rx_id;
3228 3228 wqe->XC = 0; /* xri_tag is a new exchange */
3229 3229 wqe->CmdSpecific[0] = wqe->un.GenReq.Payload.tus.f.bdeSize;
3230 3230
3231 3231 } else {
3232 3232 /* CT Request */
3233 3233
3234 3234 node = (emlxs_node_t *)iocbq->node;
3235 3235 rpip = EMLXS_NODE_TO_RPI(port, node);
3236 3236
3237 3237 if (!rpip) {
3238 3238 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3239 3239 "Unable to find rpi. did=0x%x rpi=%x",
3240 3240 did, node->nlp_Rpi);
3241 3241
3242 3242 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3243 3243 IOERR_INVALID_RPI, 0);
3244 3244 return (0xff);
3245 3245 }
3246 3246
3247 3247 /* Next allocate an Exchange for this command */
3248 3248 xrip = emlxs_sli4_alloc_xri(hba, sbp, rpip);
3249 3249
3250 3250 if (!xrip) {
3251 3251 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3252 3252 "Adapter Busy. Unable to allocate exchange. "
3253 3253 "did=0x%x", did);
3254 3254
3255 3255 return (FC_TRAN_BUSY);
3256 3256 }
3257 3257
3258 3258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3259 3259 "Prep CT XRI: %x iotag %x", xrip->XRI, xrip->iotag);
3260 3260
3261 3261 if (emlxs_sli4_bde_setup(port, sbp)) {
3262 3262 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3263 3263 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3264 3264
3265 3265 emlxs_sli4_free_xri(hba, sbp, xrip, 1);
3266 3266 return (FC_TRAN_BUSY);
3267 3267 }
3268 3268
3269 3269 wqe->CmdType = WQE_TYPE_GEN;
3270 3270 wqe->Command = CMD_GEN_REQUEST64_CR;
3271 3271 wqe->un.GenReq.la = 1;
3272 3272 wqe->un.GenReq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
3273 3273 wqe->un.GenReq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3274 3274 wqe->un.GenReq.Type = pkt->pkt_cmd_fhdr.type;
3275 3275 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3276 3276
3277 3277 #ifdef DEBUG_CT
3278 3278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3279 3279 "SGLaddr virt %p phys %p", xrip->SGList.virt,
3280 3280 xrip->SGList.phys);
3281 3281 emlxs_data_dump(port, "SGL", (uint32_t *)xrip->SGList.virt,
3282 3282 12, 0);
3283 3283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3284 3284 "CMD virt %p len %d:%d",
3285 3285 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
3286 3286 emlxs_data_dump(port, "DATA", (uint32_t *)pkt->pkt_cmd, 20, 0);
3287 3287 #endif /* DEBUG_CT */
3288 3288 }
3289 3289
3290 3290 /* Setup for rsp */
3291 3291 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3292 3292 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3293 3293 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3294 3294 iocb->ULPPU = 1; /* Wd4 is relative offset */
3295 3295
3296 3296 offset = (off_t)((uint64_t)((unsigned long)
3297 3297 xrip->SGList.virt) -
3298 3298 (uint64_t)((unsigned long)
3299 3299 hba->sli.sli4.slim2.virt));
3300 3300
3301 3301 EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
3302 3302 xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
3303 3303
3304 3304 wqe->ContextTag = rpip->RPI;
3305 3305 wqe->ContextType = WQE_RPI_CONTEXT;
3306 3306 wqe->XRITag = xrip->XRI;
3307 3307
3308 3308 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3309 3309 wqe->CCPE = 1;
3310 3310 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3311 3311 }
3312 3312
3313 3313 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3314 3314 case FC_TRAN_CLASS2:
3315 3315 wqe->Class = CLASS2;
3316 3316 break;
3317 3317 case FC_TRAN_CLASS3:
3318 3318 default:
3319 3319 wqe->Class = CLASS3;
3320 3320 break;
3321 3321 }
3322 3322 sbp->class = wqe->Class;
3323 3323 wqe->RequestTag = xrip->iotag;
3324 3324 wqe->CQId = 0x3ff;
3325 3325 return (FC_SUCCESS);
3326 3326
3327 3327 } /* emlxs_sli4_prep_ct_iocb() */
3328 3328
3329 3329
3330 3330 /*ARGSUSED*/
3331 3331 static int
3332 3332 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
3333 3333 {
3334 3334 uint32_t *ptr;
3335 3335 EQE_u eqe;
3336 3336 int rc = 0;
3337 3337 off_t offset;
3338 3338
3339 3339 /* EMLXS_PORT_LOCK must be held when entering this routine */
3340 3340 ptr = eq->addr.virt;
3341 3341 ptr += eq->host_index;
3342 3342
3343 3343 offset = (off_t)((uint64_t)((unsigned long)
3344 3344 eq->addr.virt) -
3345 3345 (uint64_t)((unsigned long)
3346 3346 hba->sli.sli4.slim2.virt));
3347 3347
3348 3348 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
3349 3349 4096, DDI_DMA_SYNC_FORKERNEL);
3350 3350
3351 3351 mutex_enter(&EMLXS_PORT_LOCK);
3352 3352
3353 3353 eqe.word = *ptr;
3354 3354 eqe.word = BE_SWAP32(eqe.word);
3355 3355
3356 3356 if (eqe.word & EQE_VALID) {
3357 3357 rc = 1;
3358 3358 }
3359 3359
3360 3360 mutex_exit(&EMLXS_PORT_LOCK);
3361 3361
3362 3362 return (rc);
3363 3363
3364 3364 } /* emlxs_sli4_read_eq */
3365 3365
3366 3366
3367 3367 /*ARGSUSED*/
3368 3368 static void
3369 3369 emlxs_sli4_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
3370 3370 {
3371 3371 int rc = 0;
3372 3372 int i;
3373 3373 char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
3374 3374 char arg2;
3375 3375
3376 3376 /*
3377 3377 * Poll the eqe to see if the valid bit is set or not
3378 3378 */
3379 3379
3380 3380 for (;;) {
3381 3381 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3382 3382 /* only poll eqe0 */
3383 3383 rc = emlxs_sli4_read_eq(hba,
3384 3384 &hba->sli.sli4.eq[0]);
3385 3385 if (rc == 1) {
3386 3386 (void) bcopy((char *)&arg[0],
3387 3387 (char *)&arg2, sizeof (char));
3388 3388 break;
3389 3389 }
3390 3390 } else {
3391 3391 /* poll every msi vector */
3392 3392 for (i = 0; i < hba->intr_count; i++) {
3393 3393 rc = emlxs_sli4_read_eq(hba,
3394 3394 &hba->sli.sli4.eq[i]);
3395 3395
3396 3396 if (rc == 1) {
3397 3397 break;
3398 3398 }
3399 3399 }
3400 3400 if ((i != hba->intr_count) && (rc == 1)) {
3401 3401 (void) bcopy((char *)&arg[i],
3402 3402 (char *)&arg2, sizeof (char));
3403 3403 break;
3404 3404 }
3405 3405 }
3406 3406 }
3407 3407
3408 3408 /* process it here */
3409 3409 rc = emlxs_sli4_msi_intr((char *)hba, (char *)&arg2);
3410 3410
3411 3411 return;
3412 3412
3413 3413 } /* emlxs_sli4_poll_intr() */
3414 3414
3415 3415
3416 3416 /*ARGSUSED*/
3417 3417 static void
3418 3418 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
3419 3419 {
3420 3420 emlxs_port_t *port = &PPORT;
3421 3421
3422 3422 /* Save the event tag */
3423 3423 hba->link_event_tag = cqe->un.link.event_tag;
3424 3424
3425 3425 switch (cqe->event_code) {
3426 3426 case ASYNC_EVENT_CODE_LINK_STATE:
3427 3427 switch (cqe->un.link.link_status) {
3428 3428 case ASYNC_EVENT_PHYS_LINK_UP:
3429 3429 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3430 3430 "Link Async Event: PHYS_LINK_UP. val=%d type=%x",
3431 3431 cqe->valid, cqe->event_type);
3432 3432 break;
3433 3433
3434 3434 case ASYNC_EVENT_PHYS_LINK_DOWN:
3435 3435 case ASYNC_EVENT_LOGICAL_LINK_DOWN:
3436 3436 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3437 3437 "Link Async Event: LINK_DOWN. val=%d type=%x",
3438 3438 cqe->valid, cqe->event_type);
3439 3439
3440 3440 (void) emlxs_fcf_linkdown_notify(port);
3441 3441
3442 3442 mutex_enter(&EMLXS_PORT_LOCK);
3443 3443 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
3444 3444 mutex_exit(&EMLXS_PORT_LOCK);
3445 3445 break;
3446 3446
3447 3447 case ASYNC_EVENT_LOGICAL_LINK_UP:
3448 3448 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3449 3449 "Link Async Event: LOGICAL_LINK_UP. val=%d type=%x",
3450 3450 cqe->valid, cqe->event_type);
3451 3451
3452 3452 if (cqe->un.link.port_speed == PHY_1GHZ_LINK) {
3453 3453 hba->linkspeed = LA_1GHZ_LINK;
3454 3454 } else {
3455 3455 hba->linkspeed = LA_10GHZ_LINK;
3456 3456 }
3457 3457 hba->topology = TOPOLOGY_PT_PT;
3458 3458 hba->qos_linkspeed = cqe->un.link.qos_link_speed;
3459 3459
3460 3460 (void) emlxs_fcf_linkup_notify(port);
3461 3461 break;
3462 3462 }
3463 3463 break;
3464 3464 case ASYNC_EVENT_CODE_FCOE_FIP:
3465 3465 switch (cqe->un.fcoe.evt_type) {
3466 3466 case ASYNC_EVENT_NEW_FCF_DISC:
3467 3467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3468 3468 "FCOE Async Event: FCF_FOUND %d:%d",
3469 3469 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
3470 3470
3471 3471 (void) emlxs_fcf_found_notify(port,
3472 3472 cqe->un.fcoe.ref_index);
3473 3473 break;
3474 3474 case ASYNC_EVENT_FCF_TABLE_FULL:
3475 3475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3476 3476 "FCOE Async Event: FCFTAB_FULL %d:%d",
3477 3477 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
3478 3478
3479 3479 (void) emlxs_fcf_full_notify(port);
3480 3480 break;
3481 3481 case ASYNC_EVENT_FCF_DEAD:
3482 3482 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3483 3483 "FCOE Async Event: FCF_LOST %d:%d",
3484 3484 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
3485 3485
3486 3486 (void) emlxs_fcf_lost_notify(port,
3487 3487 cqe->un.fcoe.ref_index);
3488 3488 break;
3489 3489 case ASYNC_EVENT_VIRT_LINK_CLEAR:
3490 3490 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3491 3491 "FCOE Async Event: CVL %d",
3492 3492 cqe->un.fcoe.ref_index);
3493 3493
3494 3494 (void) emlxs_fcf_cvl_notify(port,
3495 3495 (cqe->un.fcoe.ref_index - hba->vpi_base));
3496 3496 break;
3497 3497
3498 3498 case ASYNC_EVENT_FCF_MODIFIED:
3499 3499 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3500 3500 "FCOE Async Event: FCF_CHANGED %d",
3501 3501 cqe->un.fcoe.ref_index);
3502 3502
3503 3503 (void) emlxs_fcf_changed_notify(port,
3504 3504 cqe->un.fcoe.ref_index);
3505 3505 break;
3506 3506 }
3507 3507 break;
3508 3508 case ASYNC_EVENT_CODE_DCBX:
3509 3509 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3510 3510 "DCBX Async Event Code %d: Not supported",
3511 3511 cqe->event_code);
3512 3512 break;
3513 3513 case ASYNC_EVENT_CODE_GRP_5:
3514 3514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3515 3515 "Group 5 Async Event type %d", cqe->event_type);
3516 3516 if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
3517 3517 hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
3518 3518 }
3519 3519 break;
3520 3520 default:
3521 3521 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3522 3522 "Unknown Async Event Code %d", cqe->event_code);
3523 3523 break;
3524 3524 }
3525 3525
3526 3526 } /* emlxs_sli4_process_async_event() */
3527 3527
3528 3528
3529 3529 /*ARGSUSED*/
3530 3530 static void
3531 3531 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
3532 3532 {
3533 3533 emlxs_port_t *port = &PPORT;
3534 3534 MAILBOX4 *mb;
3535 3535 MATCHMAP *mbox_bp;
3536 3536 MATCHMAP *mbox_nonembed;
3537 3537 MAILBOXQ *mbq = NULL;
3538 3538 uint32_t size;
3539 3539 uint32_t *iptr;
3540 3540 int rc;
3541 3541 off_t offset;
3542 3542
3543 3543 if (cqe->consumed && !cqe->completed) {
3544 3544 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3545 3545 "CQ ENTRY: Mbox event. Entry consumed but not completed");
3546 3546 return;
3547 3547 }
3548 3548
3549 3549 mutex_enter(&EMLXS_PORT_LOCK);
3550 3550 switch (hba->mbox_queue_flag) {
3551 3551 case 0:
3552 3552 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
3553 3553 "CQ ENTRY: Mbox event. No mailbox active.");
3554 3554
3555 3555 mutex_exit(&EMLXS_PORT_LOCK);
3556 3556 return;
3557 3557
3558 3558 case MBX_POLL:
3559 3559
3560 3560 /* Mark mailbox complete, this should wake up any polling */
3561 3561 /* threads. This can happen if interrupts are enabled while */
3562 3562 /* a polled mailbox command is outstanding. If we don't set */
3563 3563 /* MBQ_COMPLETED here, the polling thread may wait until */
3564 3564 /* timeout error occurs */
3565 3565
3566 3566 mutex_enter(&EMLXS_MBOX_LOCK);
3567 3567 mbq = (MAILBOXQ *)hba->mbox_mbq;
3568 3568 if (mbq) {
3569 3569 port = (emlxs_port_t *)mbq->port;
3570 3570 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3571 3571 "CQ ENTRY: Mbox event. Completing Polled command.");
3572 3572 mbq->flag |= MBQ_COMPLETED;
3573 3573 }
3574 3574 mutex_exit(&EMLXS_MBOX_LOCK);
3575 3575
3576 3576 mutex_exit(&EMLXS_PORT_LOCK);
3577 3577 return;
3578 3578
3579 3579 case MBX_SLEEP:
3580 3580 case MBX_NOWAIT:
3581 3581 /* Check mbox_timer, it acts as a service flag too */
3582 3582 /* The first to service the mbox queue will clear the timer */
3583 3583 if (hba->mbox_timer) {
3584 3584 hba->mbox_timer = 0;
3585 3585
3586 3586 mutex_enter(&EMLXS_MBOX_LOCK);
3587 3587 mbq = (MAILBOXQ *)hba->mbox_mbq;
3588 3588 mutex_exit(&EMLXS_MBOX_LOCK);
3589 3589 }
3590 3590
3591 3591 if (!mbq) {
3592 3592 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3593 3593 "Mailbox event. No service required.");
3594 3594 mutex_exit(&EMLXS_PORT_LOCK);
3595 3595 return;
3596 3596 }
3597 3597
3598 3598 mb = (MAILBOX4 *)mbq;
3599 3599 mutex_exit(&EMLXS_PORT_LOCK);
3600 3600 break;
3601 3601
3602 3602 default:
3603 3603 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
3604 3604 "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
3605 3605 hba->mbox_queue_flag);
3606 3606
3607 3607 mutex_exit(&EMLXS_PORT_LOCK);
3608 3608 return;
3609 3609 }
3610 3610
3611 3611 /* Set port context */
3612 3612 port = (emlxs_port_t *)mbq->port;
3613 3613
3614 3614 offset = (off_t)((uint64_t)((unsigned long)
3615 3615 hba->sli.sli4.mq.addr.virt) -
3616 3616 (uint64_t)((unsigned long)
3617 3617 hba->sli.sli4.slim2.virt));
3618 3618
3619 3619 /* Now that we are the owner, DMA Sync entire MQ if needed */
3620 3620 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3621 3621 4096, DDI_DMA_SYNC_FORDEV);
3622 3622
3623 3623 BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
3624 3624 MAILBOX_CMD_SLI4_BSIZE);
3625 3625
3626 3626 if (mb->mbxCommand != MBX_HEARTBEAT) {
3627 3627 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3628 3628 "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
3629 3629 mb->mbxStatus, mb->mbxCommand);
3630 3630
3631 3631 emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
3632 3632 12, 0);
3633 3633 }
3634 3634
3635 3635 if (mb->mbxCommand == MBX_SLI_CONFIG) {
3636 3636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3637 3637 "Mbox sge_cnt: %d length: %d embed: %d",
3638 3638 mb->un.varSLIConfig.be.sge_cnt,
3639 3639 mb->un.varSLIConfig.be.payload_length,
3640 3640 mb->un.varSLIConfig.be.embedded);
3641 3641 }
3642 3642
3643 3643 /* Now sync the memory buffer if one was used */
3644 3644 if (mbq->bp) {
3645 3645 mbox_bp = (MATCHMAP *)mbq->bp;
3646 3646 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
3647 3647 DDI_DMA_SYNC_FORKERNEL);
3648 3648 #ifdef FMA_SUPPORT
3649 3649 if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
3650 3650 != DDI_FM_OK) {
3651 3651 EMLXS_MSGF(EMLXS_CONTEXT,
3652 3652 &emlxs_invalid_dma_handle_msg,
3653 3653 "emlxs_sli4_process_mbox_event: hdl=%p",
3654 3654 mbox_bp->dma_handle);
3655 3655
3656 3656 mb->mbxStatus = MBXERR_DMA_ERROR;
3657 3657 }
3658 3658 #endif
3659 3659 }
3660 3660
3661 3661 /* Now sync the memory buffer if one was used */
3662 3662 if (mbq->nonembed) {
3663 3663 mbox_nonembed = (MATCHMAP *)mbq->nonembed;
3664 3664 size = mbox_nonembed->size;
3665 3665 EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
3666 3666 DDI_DMA_SYNC_FORKERNEL);
3667 3667 iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
3668 3668 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
3669 3669
3670 3670 #ifdef FMA_SUPPORT
3671 3671 if (emlxs_fm_check_dma_handle(hba,
3672 3672 mbox_nonembed->dma_handle) != DDI_FM_OK) {
3673 3673 EMLXS_MSGF(EMLXS_CONTEXT,
3674 3674 &emlxs_invalid_dma_handle_msg,
3675 3675 "emlxs_sli4_process_mbox_event: hdl=%p",
3676 3676 mbox_nonembed->dma_handle);
3677 3677
3678 3678 mb->mbxStatus = MBXERR_DMA_ERROR;
3679 3679 }
3680 3680 #endif
3681 3681 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3682 3682 }
3683 3683
3684 3684 /* Mailbox has been completely received at this point */
3685 3685
3686 3686 if (mb->mbxCommand == MBX_HEARTBEAT) {
3687 3687 hba->heartbeat_active = 0;
3688 3688 goto done;
3689 3689 }
3690 3690
3691 3691 if (hba->mbox_queue_flag == MBX_SLEEP) {
3692 3692 if (mb->mbxCommand != MBX_DOWN_LOAD
3693 3693 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3694 3694 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3695 3695 "Received. %s: status=%x Sleep.",
3696 3696 emlxs_mb_cmd_xlate(mb->mbxCommand),
3697 3697 mb->mbxStatus);
3698 3698 }
3699 3699 } else {
3700 3700 if (mb->mbxCommand != MBX_DOWN_LOAD
3701 3701 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3702 3702 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3703 3703 "Completed. %s: status=%x",
3704 3704 emlxs_mb_cmd_xlate(mb->mbxCommand),
3705 3705 mb->mbxStatus);
3706 3706 }
3707 3707 }
3708 3708
3709 3709 /* Filter out passthru mailbox */
3710 3710 if (mbq->flag & MBQ_PASSTHRU) {
3711 3711 goto done;
3712 3712 }
3713 3713
3714 3714 if (mb->mbxStatus) {
3715 3715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3716 3716 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
3717 3717 (uint32_t)mb->mbxStatus);
3718 3718 }
3719 3719
3720 3720 if (mbq->mbox_cmpl) {
3721 3721 rc = (mbq->mbox_cmpl)(hba, mbq);
3722 3722
3723 3723 /* If mbox was retried, return immediately */
3724 3724 if (rc) {
3725 3725 return;
3726 3726 }
3727 3727 }
3728 3728
3729 3729 done:
3730 3730
3731 3731 /* Clean up the mailbox area */
3732 3732 emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
3733 3733
3734 3734 /* Attempt to send pending mailboxes */
3735 3735 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3736 3736 if (mbq) {
3737 3737 /* Attempt to send pending mailboxes */
3738 3738 rc = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3739 3739 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3740 3740 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
3741 3741 }
3742 3742 }
3743 3743 return;
3744 3744
3745 3745 } /* emlxs_sli4_process_mbox_event() */
3746 3746
3747 3747
3748 3748 /*ARGSUSED*/
3749 3749 static void
3750 3750 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
3751 3751 {
3752 3752 #ifdef SLI4_FASTPATH_DEBUG
3753 3753 emlxs_port_t *port = &PPORT;
3754 3754 #endif
3755 3755 IOCBQ *iocbq;
3756 3756 IOCB *iocb;
3757 3757 uint32_t *iptr;
3758 3758 fc_packet_t *pkt;
3759 3759 emlxs_wqe_t *wqe;
3760 3760
3761 3761 iocbq = &sbp->iocbq;
3762 3762 wqe = &iocbq->wqe;
3763 3763 iocb = &iocbq->iocb;
3764 3764
3765 3765 #ifdef SLI4_FASTPATH_DEBUG
3766 3766 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3767 3767 "CQE to IOCB: cmd:x%x tag:x%x xri:x%x", wqe->Command,
3768 3768 wqe->RequestTag, wqe->XRITag);
3769 3769 #endif
3770 3770
3771 3771 iocb->ULPSTATUS = cqe->Status;
3772 3772 iocb->un.ulpWord[4] = cqe->Parameter;
3773 3773 iocb->ULPIOTAG = cqe->RequestTag;
3774 3774 iocb->ULPCONTEXT = wqe->XRITag;
3775 3775
3776 3776 switch (wqe->Command) {
3777 3777
3778 3778 case CMD_FCP_ICMND64_CR:
3779 3779 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
3780 3780 break;
3781 3781
3782 3782 case CMD_FCP_IREAD64_CR:
3783 3783 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
3784 3784 iocb->ULPPU = PARM_READ_CHECK;
3785 3785 if (iocb->ULPSTATUS == IOSTAT_FCP_RSP_ERROR) {
3786 3786 iocb->un.fcpi64.fcpi_parm =
3787 3787 wqe->un.FcpCmd.TotalTransferCount -
3788 3788 cqe->CmdSpecific;
3789 3789 }
3790 3790 break;
3791 3791
3792 3792 case CMD_FCP_IWRITE64_CR:
3793 3793 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
3794 3794 break;
3795 3795
3796 3796 case CMD_ELS_REQUEST64_CR:
3797 3797 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
3798 3798 iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
3799 3799 if (iocb->ULPSTATUS == 0) {
3800 3800 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3801 3801 }
3802 3802 if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
3803 3803 /* For LS_RJT, the driver populates the rsp buffer */
3804 3804 pkt = PRIV2PKT(sbp);
3805 3805 iptr = (uint32_t *)pkt->pkt_resp;
3806 3806 *iptr++ = ELS_CMD_LS_RJT;
3807 3807 *iptr = cqe->Parameter;
3808 3808 }
3809 3809 break;
3810 3810
3811 3811 case CMD_GEN_REQUEST64_CR:
3812 3812 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
3813 3813 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3814 3814 break;
3815 3815
3816 3816 case CMD_XMIT_SEQUENCE64_CR:
3817 3817 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3818 3818 break;
3819 3819
3820 3820 default:
3821 3821 iocb->ULPCOMMAND = wqe->Command;
3822 3822
3823 3823 }
3824 3824
3825 3825 } /* emlxs_CQE_to_IOCB() */
3826 3826
3827 3827
3828 3828 /*ARGSUSED*/
3829 3829 static void
3830 3830 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
3831 3831 {
3832 3832 #ifdef SFCT_SUPPORT
3833 3833 #ifdef FCT_IO_TRACE
3834 3834 emlxs_port_t *port = &PPORT;
3835 3835 #endif /* FCT_IO_TRACE */
3836 3836 #endif /* SFCT_SUPPORT */
3837 3837 CHANNEL *cp;
3838 3838 emlxs_buf_t *sbp;
3839 3839 IOCBQ *iocbq;
3840 3840 uint16_t i;
3841 3841 uint32_t trigger;
3842 3842 CQE_CmplWQ_t cqe;
3843 3843
3844 3844 mutex_enter(&EMLXS_FCTAB_LOCK);
3845 3845 for (i = 0; i < hba->max_iotag; i++) {
3846 3846 sbp = hba->fc_table[i];
3847 3847 if (sbp == NULL || sbp == STALE_PACKET) {
3848 3848 continue;
3849 3849 }
3850 3850 hba->fc_table[i] = STALE_PACKET;
3851 3851 hba->io_count--;
3852 3852 sbp->iotag = 0;
3853 3853 mutex_exit(&EMLXS_FCTAB_LOCK);
3854 3854
3855 3855 cp = sbp->channel;
3856 3856 bzero(&cqe, sizeof (CQE_CmplWQ_t));
3857 3857 cqe.RequestTag = i;
3858 3858 cqe.Status = IOSTAT_LOCAL_REJECT;
3859 3859 cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3860 3860
3861 3861 cp->hbaCmplCmd_sbp++;
↓ open down ↓ |
1857 lines elided |
↑ open up ↑ |
3862 3862
3863 3863 #ifdef SFCT_SUPPORT
3864 3864 #ifdef FCT_IO_TRACE
3865 3865 if (sbp->fct_cmd) {
3866 3866 emlxs_fct_io_trace(port, sbp->fct_cmd,
3867 3867 EMLXS_FCT_IOCB_COMPLETE);
3868 3868 }
3869 3869 #endif /* FCT_IO_TRACE */
3870 3870 #endif /* SFCT_SUPPORT */
3871 3871
3872 - atomic_add_32(&hba->io_active, -1);
3872 + atomic_dec_32(&hba->io_active);
3873 3873
3874 3874 /* Copy entry to sbp's iocbq */
3875 3875 iocbq = &sbp->iocbq;
3876 3876 emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3877 3877
3878 3878 iocbq->next = NULL;
3879 3879
3880 3880 /* Exchange is no longer busy on-chip, free it */
3881 3881 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
3882 3882
3883 3883 if (!(sbp->pkt_flags &
3884 3884 (PACKET_POLLED | PACKET_ALLOCATED))) {
3885 3885 /* Add the IOCB to the channel list */
3886 3886 mutex_enter(&cp->rsp_lock);
3887 3887 if (cp->rsp_head == NULL) {
3888 3888 cp->rsp_head = iocbq;
3889 3889 cp->rsp_tail = iocbq;
3890 3890 } else {
3891 3891 cp->rsp_tail->next = iocbq;
3892 3892 cp->rsp_tail = iocbq;
3893 3893 }
3894 3894 mutex_exit(&cp->rsp_lock);
3895 3895 trigger = 1;
3896 3896 } else {
3897 3897 emlxs_proc_channel_event(hba, cp, iocbq);
3898 3898 }
3899 3899 mutex_enter(&EMLXS_FCTAB_LOCK);
3900 3900 }
3901 3901 mutex_exit(&EMLXS_FCTAB_LOCK);
3902 3902
3903 3903 if (trigger) {
3904 3904 for (i = 0; i < hba->chan_count; i++) {
3905 3905 cp = &hba->chan[i];
3906 3906 if (cp->rsp_head != NULL) {
3907 3907 emlxs_thread_trigger2(&cp->intr_thread,
3908 3908 emlxs_proc_channel, cp);
3909 3909 }
3910 3910 }
3911 3911 }
3912 3912
3913 3913 } /* emlxs_sli4_hba_flush_chipq() */
3914 3914
3915 3915
3916 3916 /*ARGSUSED*/
3917 3917 static void
3918 3918 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
3919 3919 CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3920 3920 {
3921 3921 emlxs_port_t *port = &PPORT;
3922 3922 CHANNEL *cp;
3923 3923 uint16_t request_tag;
3924 3924 CQE_u *cq_entry;
3925 3925
3926 3926 request_tag = cqe->RequestTag;
3927 3927
3928 3928 cq_entry = (CQE_u *)cqe;
3929 3929
3930 3930 /* 1 to 1 mapping between CQ and channel */
3931 3931 cp = cq->channelp;
3932 3932
3933 3933 cp->hbaCmplCmd++;
3934 3934
3935 3935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3936 3936 "CQ ENTRY: OOR Cmpl: tag=%x", request_tag);
3937 3937
3938 3938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3939 3939 "CQ ENTRY: %08x %08x %08x %08x", cq_entry->word[0],
3940 3940 cq_entry->word[1], cq_entry->word[2], cq_entry->word[3]);
3941 3941
3942 3942 } /* emlxs_sli4_process_oor_wqe_cmpl() */
3943 3943
3944 3944
3945 3945 /*ARGSUSED*/
3946 3946 static void
3947 3947 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3948 3948 {
3949 3949 emlxs_port_t *port = &PPORT;
3950 3950 CHANNEL *cp;
3951 3951 emlxs_buf_t *sbp;
3952 3952 IOCBQ *iocbq;
3953 3953 uint16_t request_tag;
3954 3954 #ifdef SFCT_SUPPORT
3955 3955 fct_cmd_t *fct_cmd;
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
3956 3956 emlxs_buf_t *cmd_sbp;
3957 3957 #endif /* SFCT_SUPPORT */
3958 3958
3959 3959 request_tag = cqe->RequestTag;
3960 3960
3961 3961 /* 1 to 1 mapping between CQ and channel */
3962 3962 cp = cq->channelp;
3963 3963
3964 3964 mutex_enter(&EMLXS_FCTAB_LOCK);
3965 3965 sbp = hba->fc_table[request_tag];
3966 - atomic_add_32(&hba->io_active, -1);
3966 + atomic_dec_32(&hba->io_active);
3967 3967
3968 3968 if (sbp == STALE_PACKET) {
3969 3969 cp->hbaCmplCmd_sbp++;
3970 3970 mutex_exit(&EMLXS_FCTAB_LOCK);
3971 3971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3972 3972 "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3973 3973 return;
3974 3974 }
3975 3975
3976 3976 if (!sbp || !(sbp->xrip)) {
3977 3977 cp->hbaCmplCmd++;
3978 3978 mutex_exit(&EMLXS_FCTAB_LOCK);
3979 3979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3980 3980 "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3981 3981 sbp, request_tag);
3982 3982 return;
3983 3983 }
3984 3984
3985 3985 #ifdef SLI4_FASTPATH_DEBUG
3986 3986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3987 3987 "CQ ENTRY: process wqe compl");
3988 3988 #endif
3989 3989
3990 3990 cp->hbaCmplCmd_sbp++;
3991 3991
3992 3992 /* Copy entry to sbp's iocbq */
3993 3993 iocbq = &sbp->iocbq;
3994 3994 emlxs_CQE_to_IOCB(hba, cqe, sbp);
3995 3995
3996 3996 iocbq->next = NULL;
3997 3997
3998 3998 if (cqe->XB) {
3999 3999 /* Mark exchange as ABORT in progress */
4000 4000 sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
4001 4001 sbp->xrip->flag |= EMLXS_XRI_ABORT_INP;
4002 4002
4003 4003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4004 4004 "CQ ENTRY: ABORT INP: tag=%x xri=%x", request_tag,
4005 4005 sbp->xrip->XRI);
4006 4006
4007 4007 emlxs_sli4_free_xri(hba, sbp, 0, 0);
4008 4008 } else {
4009 4009 /* Exchange is no longer busy on-chip, free it */
4010 4010 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 0);
4011 4011 }
4012 4012
4013 4013 mutex_exit(&EMLXS_FCTAB_LOCK);
4014 4014
4015 4015 #ifdef SFCT_SUPPORT
4016 4016 fct_cmd = sbp->fct_cmd;
4017 4017 if (fct_cmd) {
4018 4018 cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4019 4019 mutex_enter(&cmd_sbp->fct_mtx);
4020 4020 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
4021 4021 mutex_exit(&cmd_sbp->fct_mtx);
4022 4022 }
4023 4023 #endif /* SFCT_SUPPORT */
4024 4024
4025 4025 /*
4026 4026 * If this is NOT a polled command completion
4027 4027 * or a driver allocated pkt, then defer pkt
4028 4028 * completion.
4029 4029 */
4030 4030 if (!(sbp->pkt_flags &
4031 4031 (PACKET_POLLED | PACKET_ALLOCATED))) {
4032 4032 /* Add the IOCB to the channel list */
4033 4033 mutex_enter(&cp->rsp_lock);
4034 4034 if (cp->rsp_head == NULL) {
4035 4035 cp->rsp_head = iocbq;
4036 4036 cp->rsp_tail = iocbq;
4037 4037 } else {
4038 4038 cp->rsp_tail->next = iocbq;
4039 4039 cp->rsp_tail = iocbq;
4040 4040 }
4041 4041 mutex_exit(&cp->rsp_lock);
4042 4042
4043 4043 /* Delay triggering thread till end of ISR */
4044 4044 cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
4045 4045 } else {
4046 4046 emlxs_proc_channel_event(hba, cp, iocbq);
4047 4047 }
4048 4048
4049 4049 } /* emlxs_sli4_process_wqe_cmpl() */
4050 4050
4051 4051
4052 4052 /*ARGSUSED*/
4053 4053 static void
4054 4054 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
4055 4055 CQE_RelWQ_t *cqe)
4056 4056 {
4057 4057 #ifdef SLI4_FASTPATH_DEBUG
4058 4058 emlxs_port_t *port = &PPORT;
4059 4059 #endif
4060 4060 WQ_DESC_t *wq;
4061 4061 CHANNEL *cp;
4062 4062 uint32_t i;
4063 4063
4064 4064 i = cqe->WQid;
4065 4065 wq = &hba->sli.sli4.wq[hba->sli.sli4.wq_map[i]];
4066 4066
4067 4067 #ifdef SLI4_FASTPATH_DEBUG
4068 4068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4069 4069 "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
4070 4070 cqe->WQindex);
4071 4071 #endif
4072 4072
4073 4073 wq->port_index = cqe->WQindex;
4074 4074
4075 4075 /* Cmd ring may be available. Try sending more iocbs */
4076 4076 for (i = 0; i < hba->chan_count; i++) {
4077 4077 cp = &hba->chan[i];
4078 4078 if (wq == (WQ_DESC_t *)cp->iopath) {
4079 4079 emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
4080 4080 }
4081 4081 }
4082 4082
4083 4083 } /* emlxs_sli4_process_release_wqe() */
4084 4084
4085 4085
4086 4086 /*ARGSUSED*/
4087 4087 emlxs_iocbq_t *
4088 4088 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
4089 4089 {
4090 4090 emlxs_queue_t *q;
4091 4091 emlxs_iocbq_t *iocbq;
4092 4092 emlxs_iocbq_t *prev;
4093 4093 fc_frame_hdr_t *fchdr2;
4094 4094 RXQ_DESC_t *rxq;
4095 4095
4096 4096 switch (fchdr->type) {
4097 4097 case 1: /* ELS */
4098 4098 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4099 4099 break;
4100 4100 case 0x20: /* CT */
4101 4101 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4102 4102 break;
4103 4103 default:
4104 4104 return (NULL);
4105 4105 }
4106 4106
4107 4107 mutex_enter(&rxq->lock);
4108 4108
4109 4109 q = &rxq->active;
4110 4110 iocbq = (emlxs_iocbq_t *)q->q_first;
4111 4111 prev = NULL;
4112 4112
4113 4113 while (iocbq) {
4114 4114
4115 4115 fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
4116 4116
4117 4117 if ((fchdr2->s_id == fchdr->s_id) &&
4118 4118 (fchdr2->ox_id == fchdr->ox_id) &&
4119 4119 (fchdr2->seq_id == fchdr->seq_id)) {
4120 4120 /* Remove iocbq */
4121 4121 if (prev) {
4122 4122 prev->next = iocbq->next;
4123 4123 }
4124 4124 if (q->q_first == (uint8_t *)iocbq) {
4125 4125 q->q_first = (uint8_t *)iocbq->next;
4126 4126 }
4127 4127 if (q->q_last == (uint8_t *)iocbq) {
4128 4128 q->q_last = (uint8_t *)prev;
4129 4129 }
4130 4130 q->q_cnt--;
4131 4131
4132 4132 break;
4133 4133 }
4134 4134
4135 4135 prev = iocbq;
4136 4136 iocbq = iocbq->next;
4137 4137 }
4138 4138
4139 4139 mutex_exit(&rxq->lock);
4140 4140
4141 4141 return (iocbq);
4142 4142
4143 4143 } /* emlxs_sli4_rxq_get() */
4144 4144
4145 4145
4146 4146 /*ARGSUSED*/
4147 4147 void
4148 4148 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
4149 4149 {
4150 4150 emlxs_queue_t *q;
4151 4151 fc_frame_hdr_t *fchdr;
4152 4152 RXQ_DESC_t *rxq;
4153 4153
4154 4154 fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
4155 4155
4156 4156 switch (fchdr->type) {
4157 4157 case 1: /* ELS */
4158 4158 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4159 4159 break;
4160 4160 case 0x20: /* CT */
4161 4161 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4162 4162 break;
4163 4163 default:
4164 4164 return;
4165 4165 }
4166 4166
4167 4167 mutex_enter(&rxq->lock);
4168 4168
4169 4169 q = &rxq->active;
4170 4170
4171 4171 if (q->q_last) {
4172 4172 ((emlxs_iocbq_t *)q->q_last)->next = iocbq;
4173 4173 q->q_cnt++;
4174 4174 } else {
4175 4175 q->q_first = (uint8_t *)iocbq;
4176 4176 q->q_cnt = 1;
4177 4177 }
4178 4178
4179 4179 q->q_last = (uint8_t *)iocbq;
4180 4180 iocbq->next = NULL;
4181 4181
4182 4182 mutex_exit(&rxq->lock);
4183 4183
4184 4184 return;
4185 4185
4186 4186 } /* emlxs_sli4_rxq_put() */
4187 4187
4188 4188
4189 4189 static void
4190 4190 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
4191 4191 {
4192 4192 emlxs_hba_t *hba = HBA;
4193 4193 emlxs_rqdbu_t rqdb;
4194 4194
4195 4195 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4196 4196 "RQ POST: rqid=%d count=1", rqid);
4197 4197
4198 4198 /* Ring the RQ doorbell once to repost the RQ buffer */
4199 4199 rqdb.word = 0;
4200 4200 rqdb.db.Qid = rqid;
4201 4201 rqdb.db.NumPosted = 1;
4202 4202
4203 4203 WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
4204 4204
4205 4205 } /* emlxs_sli4_rq_post() */
4206 4206
4207 4207
4208 4208 /*ARGSUSED*/
4209 4209 static void
4210 4210 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
4211 4211 CQE_UnsolRcv_t *cqe)
4212 4212 {
4213 4213 emlxs_port_t *port = &PPORT;
4214 4214 emlxs_port_t *vport;
4215 4215 RQ_DESC_t *hdr_rq;
4216 4216 RQ_DESC_t *data_rq;
4217 4217 MBUF_INFO *hdr_mp;
4218 4218 MBUF_INFO *data_mp;
4219 4219 MATCHMAP *seq_mp;
4220 4220 uint32_t *data;
4221 4221 fc_frame_hdr_t fchdr;
4222 4222 uint32_t hdr_rqi;
4223 4223 uint32_t host_index;
4224 4224 emlxs_iocbq_t *iocbq = NULL;
4225 4225 emlxs_iocb_t *iocb;
4226 4226 emlxs_node_t *node;
4227 4227 uint32_t i;
4228 4228 uint32_t seq_len;
4229 4229 uint32_t seq_cnt;
4230 4230 uint32_t buf_type;
4231 4231 char label[32];
4232 4232 emlxs_wqe_t *wqe;
4233 4233 CHANNEL *cp;
4234 4234 uint16_t iotag;
4235 4235 XRIobj_t *xrip;
4236 4236 RPIobj_t *rpip = NULL;
4237 4237 uint32_t cmd;
4238 4238 uint32_t posted = 0;
4239 4239 uint32_t abort = 1;
4240 4240 off_t offset;
4241 4241
4242 4242 hdr_rqi = hba->sli.sli4.rq_map[cqe->RQid];
4243 4243 hdr_rq = &hba->sli.sli4.rq[hdr_rqi];
4244 4244 data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
4245 4245
4246 4246 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4247 4247 "CQ ENTRY: Unsol Rcv: RQid=%d,%d index=%d status=%x "
4248 4248 "hdr_size=%d data_size=%d",
4249 4249 cqe->RQid, hdr_rqi, hdr_rq->host_index, cqe->Status, cqe->hdr_size,
4250 4250 cqe->data_size);
4251 4251
4252 4252 /* Validate the CQE */
4253 4253
4254 4254 /* Check status */
4255 4255 switch (cqe->Status) {
4256 4256 case RQ_STATUS_SUCCESS: /* 0x10 */
4257 4257 break;
4258 4258
4259 4259 case RQ_STATUS_BUFLEN_EXCEEDED: /* 0x11 */
4260 4260 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4261 4261 "CQ ENTRY: Unsol Rcv: Payload truncated.");
4262 4262 break;
4263 4263
4264 4264 case RQ_STATUS_NEED_BUFFER: /* 0x12 */
4265 4265 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4266 4266 "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
4267 4267 return;
4268 4268
4269 4269 case RQ_STATUS_FRAME_DISCARDED: /* 0x13 */
4270 4270 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4271 4271 "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
4272 4272 return;
4273 4273
4274 4274 default:
4275 4275 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4276 4276 "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
4277 4277 cqe->Status);
4278 4278 break;
4279 4279 }
4280 4280
4281 4281 /* Make sure there is a frame header */
4282 4282 if (cqe->hdr_size < sizeof (fc_frame_hdr_t)) {
4283 4283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4284 4284 "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
4285 4285 return;
4286 4286 }
4287 4287
4288 4288 /* Update host index */
4289 4289 mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
4290 4290 host_index = hdr_rq->host_index;
4291 4291 hdr_rq->host_index++;
4292 4292 if (hdr_rq->host_index >= hdr_rq->max_index) {
4293 4293 hdr_rq->host_index = 0;
4294 4294 }
4295 4295 data_rq->host_index = hdr_rq->host_index;
4296 4296 mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
4297 4297
4298 4298 /* Get the next header rqb */
4299 4299 hdr_mp = &hdr_rq->rqb[host_index];
4300 4300
4301 4301 offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
4302 4302 (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
4303 4303
4304 4304 EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
4305 4305 sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
4306 4306
4307 4307 LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
4308 4308 sizeof (fc_frame_hdr_t));
4309 4309
4310 4310 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4311 4311 "RQ HDR[%d]: rctl:%x type:%x "
4312 4312 "sid:%x did:%x oxid:%x rxid:%x",
4313 4313 host_index, fchdr.r_ctl, fchdr.type,
4314 4314 fchdr.s_id, fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
4315 4315
4316 4316 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4317 4317 "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
4318 4318 host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
4319 4319 fchdr.df_ctl, fchdr.ro);
4320 4320
4321 4321 /* Verify fc header type */
4322 4322 switch (fchdr.type) {
4323 4323 case 0: /* BLS */
4324 4324 if (fchdr.r_ctl != 0x81) {
4325 4325 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4326 4326 "RQ ENTRY: Unexpected FC rctl (0x%x) "
4327 4327 "received. Dropping...",
4328 4328 fchdr.r_ctl);
4329 4329
4330 4330 goto done;
4331 4331 }
4332 4332
4333 4333 /* Make sure there is no payload */
4334 4334 if (cqe->data_size != 0) {
4335 4335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4336 4336 "RQ ENTRY: ABTS payload provided. Dropping...");
4337 4337
4338 4338 goto done;
4339 4339 }
4340 4340
4341 4341 buf_type = 0xFFFFFFFF;
4342 4342 (void) strcpy(label, "ABTS");
4343 4343 cp = &hba->chan[hba->channel_els];
4344 4344 break;
4345 4345
4346 4346 case 0x01: /* ELS */
4347 4347 /* Make sure there is a payload */
4348 4348 if (cqe->data_size == 0) {
4349 4349 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4350 4350 "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
4351 4351 "Dropping...");
4352 4352
4353 4353 goto done;
4354 4354 }
4355 4355
4356 4356 buf_type = MEM_ELSBUF;
4357 4357 (void) strcpy(label, "Unsol ELS");
4358 4358 cp = &hba->chan[hba->channel_els];
4359 4359 break;
4360 4360
4361 4361 case 0x20: /* CT */
4362 4362 /* Make sure there is a payload */
4363 4363 if (cqe->data_size == 0) {
4364 4364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4365 4365 "RQ ENTRY: Unsol Rcv: No CT payload provided. "
4366 4366 "Dropping...");
4367 4367
4368 4368 goto done;
4369 4369 }
4370 4370
4371 4371 buf_type = MEM_CTBUF;
4372 4372 (void) strcpy(label, "Unsol CT");
4373 4373 cp = &hba->chan[hba->channel_ct];
4374 4374 break;
4375 4375
4376 4376 default:
4377 4377 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4378 4378 "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
4379 4379 fchdr.type);
4380 4380
4381 4381 goto done;
4382 4382 }
4383 4383 /* Fc Header is valid */
4384 4384
4385 4385 /* Check if this is an active sequence */
4386 4386 iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
4387 4387
4388 4388 if (!iocbq) {
4389 4389 if (fchdr.type != 0) {
4390 4390 if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
4391 4391 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4392 4392 "RQ ENTRY: %s: First of sequence not"
4393 4393 " set. Dropping...",
4394 4394 label);
4395 4395
4396 4396 goto done;
4397 4397 }
4398 4398 }
4399 4399
4400 4400 if (fchdr.seq_cnt != 0) {
4401 4401 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4402 4402 "RQ ENTRY: %s: Sequence count not zero (%d). "
4403 4403 "Dropping...",
4404 4404 label, fchdr.seq_cnt);
4405 4405
4406 4406 goto done;
4407 4407 }
4408 4408
4409 4409 /* Find vport (defaults to physical port) */
4410 4410 for (i = 0; i < MAX_VPORTS; i++) {
4411 4411 vport = &VPORT(i);
4412 4412
4413 4413 if (vport->did == fchdr.d_id) {
4414 4414 port = vport;
4415 4415 break;
4416 4416 }
4417 4417 }
4418 4418
4419 4419 /* Allocate an IOCBQ */
4420 4420 iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba,
4421 4421 MEM_IOCB, 1);
4422 4422
4423 4423 if (!iocbq) {
4424 4424 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4425 4425 "RQ ENTRY: %s: Out of IOCB "
4426 4426 "resources. Dropping...",
4427 4427 label);
4428 4428
4429 4429 goto done;
4430 4430 }
4431 4431
4432 4432 seq_mp = NULL;
4433 4433 if (fchdr.type != 0) {
4434 4434 /* Allocate a buffer */
4435 4435 seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type, 1);
4436 4436
4437 4437 if (!seq_mp) {
4438 4438 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4439 4439 "RQ ENTRY: %s: Out of buffer "
4440 4440 "resources. Dropping...",
4441 4441 label);
4442 4442
4443 4443 goto done;
4444 4444 }
4445 4445
4446 4446 iocbq->bp = (uint8_t *)seq_mp;
4447 4447 }
4448 4448
4449 4449 node = (void *)emlxs_node_find_did(port, fchdr.s_id);
4450 4450 if (node == NULL) {
4451 4451 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4452 4452 "RQ ENTRY: %s: Node not found. sid=%x",
4453 4453 label, fchdr.s_id);
4454 4454 }
4455 4455
4456 4456 /* Initialize the iocbq */
4457 4457 iocbq->port = port;
4458 4458 iocbq->channel = cp;
4459 4459 iocbq->node = node;
4460 4460
4461 4461 iocb = &iocbq->iocb;
4462 4462 iocb->RXSEQCNT = 0;
4463 4463 iocb->RXSEQLEN = 0;
4464 4464
4465 4465 seq_len = 0;
4466 4466 seq_cnt = 0;
4467 4467
4468 4468 } else {
4469 4469
4470 4470 iocb = &iocbq->iocb;
4471 4471 port = iocbq->port;
4472 4472 node = (emlxs_node_t *)iocbq->node;
4473 4473
4474 4474 seq_mp = (MATCHMAP *)iocbq->bp;
4475 4475 seq_len = iocb->RXSEQLEN;
4476 4476 seq_cnt = iocb->RXSEQCNT;
4477 4477
4478 4478 /* Check sequence order */
4479 4479 if (fchdr.seq_cnt != seq_cnt) {
4480 4480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4481 4481 "RQ ENTRY: %s: Out of order frame received "
4482 4482 "(%d != %d). Dropping...",
4483 4483 label, fchdr.seq_cnt, seq_cnt);
4484 4484
4485 4485 goto done;
4486 4486 }
4487 4487 }
4488 4488
4489 4489 /* We now have an iocbq */
4490 4490
4491 4491 if (!port->VPIobj.vfip) {
4492 4492 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4493 4493 "RQ ENTRY: %s: No fabric connection. "
4494 4494 "Dropping...",
4495 4495 label);
4496 4496
4497 4497 goto done;
4498 4498 }
4499 4499
4500 4500 /* Save the frame data to our seq buffer */
4501 4501 if (cqe->data_size && seq_mp) {
4502 4502 /* Get the next data rqb */
4503 4503 data_mp = &data_rq->rqb[host_index];
4504 4504
4505 4505 offset = (off_t)((uint64_t)((unsigned long)
4506 4506 data_mp->virt) -
4507 4507 (uint64_t)((unsigned long)
4508 4508 hba->sli.sli4.slim2.virt));
4509 4509
4510 4510 EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
4511 4511 cqe->data_size, DDI_DMA_SYNC_FORKERNEL);
4512 4512
4513 4513 data = (uint32_t *)data_mp->virt;
4514 4514
4515 4515 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4516 4516 "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
4517 4517 host_index, data[0], data[1], data[2], data[3],
4518 4518 data[4], data[5]);
4519 4519
4520 4520 /* Check sequence length */
4521 4521 if ((seq_len + cqe->data_size) > seq_mp->size) {
4522 4522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4523 4523 "RQ ENTRY: %s: Sequence buffer overflow. "
4524 4524 "(%d > %d). Dropping...",
4525 4525 label, (seq_len + cqe->data_size), seq_mp->size);
4526 4526
4527 4527 goto done;
4528 4528 }
4529 4529
4530 4530 /* Copy data to local receive buffer */
4531 4531 bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
4532 4532 seq_len), cqe->data_size);
4533 4533
4534 4534 seq_len += cqe->data_size;
4535 4535 }
4536 4536
4537 4537 /* If this is not the last frame of sequence, queue it. */
4538 4538 if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
4539 4539 /* Save sequence header */
4540 4540 if (seq_cnt == 0) {
4541 4541 bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
4542 4542 sizeof (fc_frame_hdr_t));
4543 4543 }
4544 4544
4545 4545 /* Update sequence info in iocb */
4546 4546 iocb->RXSEQCNT = seq_cnt + 1;
4547 4547 iocb->RXSEQLEN = seq_len;
4548 4548
4549 4549 /* Queue iocbq for next frame */
4550 4550 emlxs_sli4_rxq_put(hba, iocbq);
4551 4551
4552 4552 /* Don't free resources */
4553 4553 iocbq = NULL;
4554 4554
4555 4555 /* No need to abort */
4556 4556 abort = 0;
4557 4557
4558 4558 goto done;
4559 4559 }
4560 4560
4561 4561 emlxs_sli4_rq_post(port, hdr_rq->qid);
4562 4562 posted = 1;
4563 4563
4564 4564 /* End of sequence found. Process request now. */
4565 4565
4566 4566 if (seq_cnt > 0) {
4567 4567 /* Retrieve first frame of sequence */
4568 4568 bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
4569 4569 sizeof (fc_frame_hdr_t));
4570 4570
4571 4571 bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
4572 4572 }
4573 4573
4574 4574 /* Build rcv iocb and process it */
4575 4575 switch (fchdr.type) {
4576 4576 case 0: /* BLS */
4577 4577
4578 4578 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4579 4579 "RQ ENTRY: %s: xid:%x sid:%x. Sending BLS ACC...",
4580 4580 label, fchdr.ox_id, fchdr.s_id);
4581 4581
4582 4582 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4583 4583
4584 4584 /* Set up an iotag using special Abort iotags */
4585 4585 mutex_enter(&EMLXS_FCTAB_LOCK);
4586 4586 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4587 4587 hba->fc_oor_iotag = hba->max_iotag;
4588 4588 }
4589 4589 iotag = hba->fc_oor_iotag++;
4590 4590 mutex_exit(&EMLXS_FCTAB_LOCK);
4591 4591
4592 4592 /* BLS ACC Response */
4593 4593 wqe = &iocbq->wqe;
4594 4594 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4595 4595
4596 4596 wqe->Command = CMD_XMIT_BLS_RSP64_CX;
4597 4597 wqe->CmdType = WQE_TYPE_GEN;
4598 4598
4599 4599 wqe->un.BlsRsp.Payload0 = 0x80;
4600 4600 wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
4601 4601
4602 4602 wqe->un.BlsRsp.OXId = fchdr.ox_id;
4603 4603 wqe->un.BlsRsp.RXId = fchdr.rx_id;
4604 4604
4605 4605 wqe->un.BlsRsp.SeqCntLow = 0;
4606 4606 wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
4607 4607
4608 4608 wqe->un.BlsRsp.XO = 0;
4609 4609 wqe->un.BlsRsp.AR = 0;
4610 4610 wqe->un.BlsRsp.PT = 1;
4611 4611 wqe->un.BlsRsp.RemoteId = fchdr.s_id;
4612 4612
4613 4613 wqe->PU = 0x3;
4614 4614 wqe->ContextTag = port->VPIobj.VPI;
4615 4615 wqe->ContextType = WQE_VPI_CONTEXT;
4616 4616 wqe->OXId = (volatile uint16_t) fchdr.ox_id;
4617 4617 wqe->XRITag = 0xffff;
4618 4618
4619 4619 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4620 4620 wqe->CCPE = 1;
4621 4621 wqe->CCP = fchdr.rsvd;
4622 4622 }
4623 4623
4624 4624 wqe->Class = CLASS3;
4625 4625 wqe->RequestTag = iotag;
4626 4626 wqe->CQId = 0x3ff;
4627 4627
4628 4628 emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
4629 4629
4630 4630 break;
4631 4631
4632 4632 case 1: /* ELS */
4633 4633 if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
4634 4634 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4635 4635 "RQ ENTRY: %s: Port not yet enabled. "
4636 4636 "Dropping...",
4637 4637 label);
4638 4638
4639 4639 goto done;
4640 4640 }
4641 4641
4642 4642 cmd = *((uint32_t *)seq_mp->virt);
4643 4643 cmd &= ELS_CMD_MASK;
4644 4644 rpip = NULL;
4645 4645
4646 4646 if (cmd != ELS_CMD_LOGO) {
4647 4647 rpip = EMLXS_NODE_TO_RPI(port, node);
4648 4648 }
4649 4649
4650 4650 if (!rpip) {
4651 4651 rpip = port->VPIobj.rpip;
4652 4652 }
4653 4653
4654 4654 xrip = emlxs_sli4_reserve_xri(hba, rpip);
4655 4655
4656 4656 if (!xrip) {
4657 4657 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4658 4658 "RQ ENTRY: %s: Out of exchange "
4659 4659 "resources. Dropping...",
4660 4660 label);
4661 4661
4662 4662 goto done;
4663 4663 }
4664 4664
4665 4665 xrip->rx_id = fchdr.ox_id;
4666 4666
4667 4667 /* Build CMD_RCV_ELS64_CX */
4668 4668 iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
4669 4669 iocb->un.rcvels64.elsReq.tus.f.bdeSize = seq_len;
4670 4670 iocb->un.rcvels64.elsReq.addrLow = PADDR_LO(seq_mp->phys);
4671 4671 iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
4672 4672 iocb->ULPBDECOUNT = 1;
4673 4673
4674 4674 iocb->un.rcvels64.remoteID = fchdr.s_id;
4675 4675 iocb->un.rcvels64.parmRo = fchdr.d_id;
4676 4676
4677 4677 iocb->ULPPU = 0x3;
4678 4678 iocb->ULPCONTEXT = xrip->XRI;
4679 4679 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
4680 4680 iocb->ULPCLASS = CLASS3;
4681 4681 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
4682 4682
4683 4683 iocb->unsli3.ext_rcv.seq_len = seq_len;
4684 4684 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
4685 4685
4686 4686 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4687 4687 iocb->unsli3.ext_rcv.ccpe = 1;
4688 4688 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4689 4689 }
4690 4690
4691 4691 (void) emlxs_els_handle_unsol_req(port, iocbq->channel,
4692 4692 iocbq, seq_mp, seq_len);
4693 4693
4694 4694 break;
4695 4695
4696 4696 case 0x20: /* CT */
4697 4697 if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
4698 4698 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4699 4699 "RQ ENTRY: %s: Port not yet enabled. "
4700 4700 "Dropping...",
4701 4701 label);
4702 4702
4703 4703 goto done;
4704 4704 }
4705 4705
4706 4706 if (!node) {
4707 4707 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4708 4708 "RQ ENTRY: %s: Node not found (did=%x). "
4709 4709 "Dropping...",
4710 4710 label, fchdr.d_id);
4711 4711
4712 4712 goto done;
4713 4713 }
4714 4714
4715 4715 rpip = EMLXS_NODE_TO_RPI(port, node);
4716 4716
4717 4717 if (!rpip) {
4718 4718 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4719 4719 "RQ ENTRY: %s: RPI not found (did=%x rpi=%x). "
4720 4720 "Dropping...",
4721 4721 label, fchdr.d_id, node->nlp_Rpi);
4722 4722
4723 4723 goto done;
4724 4724 }
4725 4725
4726 4726 xrip = emlxs_sli4_reserve_xri(hba, rpip);
4727 4727
4728 4728 if (!xrip) {
4729 4729 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4730 4730 "RQ ENTRY: %s: Out of exchange "
4731 4731 "resources. Dropping...",
4732 4732 label);
4733 4733
4734 4734 goto done;
4735 4735 }
4736 4736
4737 4737 xrip->rx_id = fchdr.ox_id;
4738 4738
4739 4739 /* Build CMD_RCV_SEQ64_CX */
4740 4740 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
4741 4741 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize = seq_len;
4742 4742 iocb->un.rcvseq64.rcvBde.addrLow = PADDR_LO(seq_mp->phys);
4743 4743 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
4744 4744 iocb->ULPBDECOUNT = 1;
4745 4745
4746 4746 iocb->un.rcvseq64.xrsqRo = 0;
4747 4747 iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
4748 4748 iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
4749 4749 iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
4750 4750 iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
4751 4751
4752 4752 iocb->ULPPU = 0x3;
4753 4753 iocb->ULPCONTEXT = xrip->XRI;
4754 4754 iocb->ULPIOTAG = rpip->RPI;
4755 4755 iocb->ULPCLASS = CLASS3;
4756 4756 iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
4757 4757
4758 4758 iocb->unsli3.ext_rcv.seq_len = seq_len;
4759 4759 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
4760 4760
4761 4761 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4762 4762 iocb->unsli3.ext_rcv.ccpe = 1;
4763 4763 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4764 4764 }
4765 4765
4766 4766 (void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
4767 4767 iocbq, seq_mp, seq_len);
4768 4768
4769 4769 break;
4770 4770 }
4771 4771
4772 4772 /* Sequence handled, no need to abort */
4773 4773 abort = 0;
4774 4774
4775 4775 done:
4776 4776
4777 4777 if (!posted) {
4778 4778 emlxs_sli4_rq_post(port, hdr_rq->qid);
4779 4779 }
4780 4780
4781 4781 if (abort) {
4782 4782 /* Send ABTS for this exchange */
4783 4783 /* !!! Currently, we have no implementation for this !!! */
4784 4784 abort = 0;
4785 4785 }
4786 4786
4787 4787 /* Return memory resources to pools */
4788 4788 if (iocbq) {
4789 4789 if (iocbq->bp) {
4790 4790 emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
4791 4791 }
4792 4792
4793 4793 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
4794 4794 }
4795 4795
4796 4796 #ifdef FMA_SUPPORT
4797 4797 if (emlxs_fm_check_dma_handle(hba,
4798 4798 hba->sli.sli4.slim2.dma_handle)
4799 4799 != DDI_FM_OK) {
4800 4800 EMLXS_MSGF(EMLXS_CONTEXT,
4801 4801 &emlxs_invalid_dma_handle_msg,
4802 4802 "emlxs_sli4_process_unsol_rcv: hdl=%p",
4803 4803 hba->sli.sli4.slim2.dma_handle);
4804 4804
4805 4805 emlxs_thread_spawn(hba, emlxs_restart_thread,
4806 4806 0, 0);
4807 4807 }
4808 4808 #endif
4809 4809 return;
4810 4810
4811 4811 } /* emlxs_sli4_process_unsol_rcv() */
4812 4812
4813 4813
4814 4814 /*ARGSUSED*/
4815 4815 static void
4816 4816 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
4817 4817 CQE_XRI_Abort_t *cqe)
4818 4818 {
4819 4819 emlxs_port_t *port = &PPORT;
4820 4820 XRIobj_t *xrip;
4821 4821
4822 4822 mutex_enter(&EMLXS_FCTAB_LOCK);
4823 4823
4824 4824 xrip = emlxs_sli4_find_xri(hba, cqe->XRI);
4825 4825 if (xrip == NULL) {
4826 4826 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4827 4827 "CQ ENTRY: process xri aborted ignored");
4828 4828
4829 4829 mutex_exit(&EMLXS_FCTAB_LOCK);
4830 4830 return;
4831 4831 }
4832 4832
4833 4833 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4834 4834 "CQ ENTRY: process xri x%x aborted: IA %d EO %d BR %d",
4835 4835 cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
4836 4836
4837 4837 if (!(xrip->flag & EMLXS_XRI_ABORT_INP)) {
4838 4838 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4839 4839 "XRI Aborted: Bad state: x%x xri x%x",
4840 4840 xrip->flag, xrip->XRI);
4841 4841
4842 4842 mutex_exit(&EMLXS_FCTAB_LOCK);
4843 4843 return;
4844 4844 }
4845 4845
4846 4846 /* Exchange is no longer busy on-chip, free it */
4847 4847 emlxs_sli4_free_xri(hba, 0, xrip, 0);
4848 4848
4849 4849 mutex_exit(&EMLXS_FCTAB_LOCK);
4850 4850
4851 4851 return;
4852 4852
4853 4853 } /* emlxs_sli4_process_xri_aborted () */
4854 4854
4855 4855
4856 4856 /*ARGSUSED*/
4857 4857 static void
4858 4858 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
4859 4859 {
4860 4860 emlxs_port_t *port = &PPORT;
4861 4861 CQE_u *cqe;
4862 4862 CQE_u cq_entry;
4863 4863 uint32_t cqdb;
4864 4864 int num_entries = 0;
4865 4865 off_t offset;
4866 4866
4867 4867 /* EMLXS_PORT_LOCK must be held when entering this routine */
4868 4868
4869 4869 cqe = (CQE_u *)cq->addr.virt;
4870 4870 cqe += cq->host_index;
4871 4871
4872 4872 offset = (off_t)((uint64_t)((unsigned long)
4873 4873 cq->addr.virt) -
4874 4874 (uint64_t)((unsigned long)
4875 4875 hba->sli.sli4.slim2.virt));
4876 4876
4877 4877 EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
4878 4878 4096, DDI_DMA_SYNC_FORKERNEL);
4879 4879
4880 4880 for (;;) {
4881 4881 cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
4882 4882 if (!(cq_entry.word[3] & CQE_VALID))
4883 4883 break;
4884 4884
4885 4885 cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
4886 4886 cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
4887 4887 cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
4888 4888
4889 4889 #ifdef SLI4_FASTPATH_DEBUG
4890 4890 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4891 4891 "CQ ENTRY: %08x %08x %08x %08x", cq_entry.word[0],
4892 4892 cq_entry.word[1], cq_entry.word[2], cq_entry.word[3]);
4893 4893 #endif
4894 4894
4895 4895 num_entries++;
4896 4896 cqe->word[3] = 0;
4897 4897
4898 4898 cq->host_index++;
4899 4899 if (cq->host_index >= cq->max_index) {
4900 4900 cq->host_index = 0;
4901 4901 cqe = (CQE_u *)cq->addr.virt;
4902 4902 } else {
4903 4903 cqe++;
4904 4904 }
4905 4905 mutex_exit(&EMLXS_PORT_LOCK);
4906 4906
4907 4907 /* Now handle specific cq type */
4908 4908 if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
4909 4909 if (cq_entry.cqAsyncEntry.async_evt) {
4910 4910 emlxs_sli4_process_async_event(hba,
4911 4911 (CQE_ASYNC_t *)&cq_entry);
4912 4912 } else {
4913 4913 emlxs_sli4_process_mbox_event(hba,
4914 4914 (CQE_MBOX_t *)&cq_entry);
4915 4915 }
4916 4916 } else { /* EMLXS_CQ_TYPE_GROUP2 */
4917 4917 switch (cq_entry.cqCmplEntry.Code) {
4918 4918 case CQE_TYPE_WQ_COMPLETION:
4919 4919 if (cq_entry.cqCmplEntry.RequestTag <
4920 4920 hba->max_iotag) {
4921 4921 emlxs_sli4_process_wqe_cmpl(hba, cq,
4922 4922 (CQE_CmplWQ_t *)&cq_entry);
4923 4923 } else {
4924 4924 emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
4925 4925 (CQE_CmplWQ_t *)&cq_entry);
4926 4926 }
4927 4927 break;
4928 4928 case CQE_TYPE_RELEASE_WQE:
4929 4929 emlxs_sli4_process_release_wqe(hba, cq,
4930 4930 (CQE_RelWQ_t *)&cq_entry);
4931 4931 break;
4932 4932 case CQE_TYPE_UNSOL_RCV:
4933 4933 emlxs_sli4_process_unsol_rcv(hba, cq,
4934 4934 (CQE_UnsolRcv_t *)&cq_entry);
4935 4935 break;
4936 4936 case CQE_TYPE_XRI_ABORTED:
4937 4937 emlxs_sli4_process_xri_aborted(hba, cq,
4938 4938 (CQE_XRI_Abort_t *)&cq_entry);
4939 4939 break;
4940 4940 default:
4941 4941 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4942 4942 "Invalid CQ entry %d: %08x %08x %08x %08x",
4943 4943 cq_entry.cqCmplEntry.Code, cq_entry.word[0],
4944 4944 cq_entry.word[1], cq_entry.word[2],
4945 4945 cq_entry.word[3]);
4946 4946 break;
4947 4947 }
4948 4948 }
4949 4949
4950 4950 mutex_enter(&EMLXS_PORT_LOCK);
4951 4951 }
4952 4952
4953 4953 cqdb = cq->qid;
4954 4954 cqdb |= CQ_DB_REARM;
4955 4955 if (num_entries != 0) {
4956 4956 cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
4957 4957 }
4958 4958
4959 4959 #ifdef SLI4_FASTPATH_DEBUG
4960 4960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4961 4961 "CQ CLEAR: %08x: pops:x%x", cqdb, num_entries);
4962 4962 #endif
4963 4963
4964 4964 WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), cqdb);
4965 4965
4966 4966 /* EMLXS_PORT_LOCK must be held when exiting this routine */
4967 4967
4968 4968 } /* emlxs_sli4_process_cq() */
4969 4969
4970 4970
4971 4971 /*ARGSUSED*/
4972 4972 static void
4973 4973 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4974 4974 {
4975 4975 #ifdef SLI4_FASTPATH_DEBUG
4976 4976 emlxs_port_t *port = &PPORT;
4977 4977 #endif
4978 4978 uint32_t eqdb;
4979 4979 uint32_t *ptr;
4980 4980 CHANNEL *cp;
4981 4981 EQE_u eqe;
4982 4982 uint32_t i;
4983 4983 uint32_t value;
4984 4984 int num_entries = 0;
4985 4985 off_t offset;
4986 4986
4987 4987 /* EMLXS_PORT_LOCK must be held when entering this routine */
4988 4988
4989 4989 ptr = eq->addr.virt;
4990 4990 ptr += eq->host_index;
4991 4991
4992 4992 offset = (off_t)((uint64_t)((unsigned long)
4993 4993 eq->addr.virt) -
4994 4994 (uint64_t)((unsigned long)
4995 4995 hba->sli.sli4.slim2.virt));
4996 4996
4997 4997 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4998 4998 4096, DDI_DMA_SYNC_FORKERNEL);
4999 4999
5000 5000 for (;;) {
5001 5001 eqe.word = *ptr;
5002 5002 eqe.word = BE_SWAP32(eqe.word);
5003 5003
5004 5004 if (!(eqe.word & EQE_VALID))
5005 5005 break;
5006 5006
5007 5007 #ifdef SLI4_FASTPATH_DEBUG
5008 5008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5009 5009 "EQ ENTRY: %08x", eqe.word);
5010 5010 #endif
5011 5011
5012 5012 *ptr = 0;
5013 5013 num_entries++;
5014 5014 eq->host_index++;
5015 5015 if (eq->host_index >= eq->max_index) {
5016 5016 eq->host_index = 0;
5017 5017 ptr = eq->addr.virt;
5018 5018 } else {
5019 5019 ptr++;
5020 5020 }
5021 5021
5022 5022 value = hba->sli.sli4.cq_map[eqe.entry.CQId];
5023 5023
5024 5024 #ifdef SLI4_FASTPATH_DEBUG
5025 5025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5026 5026 "EQ ENTRY: CQIndex:x%x: cqid:x%x", value, eqe.entry.CQId);
5027 5027 #endif
5028 5028
5029 5029 emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[value]);
5030 5030 }
5031 5031
5032 5032 eqdb = eq->qid;
5033 5033 eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
5034 5034
5035 5035 #ifdef SLI4_FASTPATH_DEBUG
5036 5036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5037 5037 "EQ CLEAR: %08x: pops:x%x", eqdb, num_entries);
5038 5038 #endif
5039 5039
5040 5040 if (num_entries != 0) {
5041 5041 eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
5042 5042 for (i = 0; i < hba->chan_count; i++) {
5043 5043 cp = &hba->chan[i];
5044 5044 if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
5045 5045 cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
5046 5046 emlxs_thread_trigger2(&cp->intr_thread,
5047 5047 emlxs_proc_channel, cp);
5048 5048 }
5049 5049 }
5050 5050 }
5051 5051
5052 5052 WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), eqdb);
5053 5053
5054 5054 /* EMLXS_PORT_LOCK must be held when exiting this routine */
5055 5055
5056 5056 } /* emlxs_sli4_process_eq() */
5057 5057
5058 5058
5059 5059 #ifdef MSI_SUPPORT
5060 5060 /*ARGSUSED*/
5061 5061 static uint32_t
5062 5062 emlxs_sli4_msi_intr(char *arg1, char *arg2)
5063 5063 {
5064 5064 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
5065 5065 #ifdef SLI4_FASTPATH_DEBUG
5066 5066 emlxs_port_t *port = &PPORT;
5067 5067 #endif
5068 5068 uint16_t msgid;
5069 5069 int rc;
5070 5070
5071 5071 #ifdef SLI4_FASTPATH_DEBUG
5072 5072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5073 5073 "msiINTR arg1:%p arg2:%p", arg1, arg2);
5074 5074 #endif
5075 5075
5076 5076 /* Check for legacy interrupt handling */
5077 5077 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
5078 5078 rc = emlxs_sli4_intx_intr(arg1);
5079 5079 return (rc);
5080 5080 }
5081 5081
5082 5082 /* Get MSI message id */
5083 5083 msgid = (uint16_t)((unsigned long)arg2);
5084 5084
5085 5085 /* Validate the message id */
5086 5086 if (msgid >= hba->intr_count) {
5087 5087 msgid = 0;
5088 5088 }
5089 5089 mutex_enter(&EMLXS_PORT_LOCK);
5090 5090
5091 5091 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
5092 5092 mutex_exit(&EMLXS_PORT_LOCK);
5093 5093 return (DDI_INTR_UNCLAIMED);
5094 5094 }
5095 5095
5096 5096 /* The eq[] index == the MSI vector number */
5097 5097 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
5098 5098
5099 5099 mutex_exit(&EMLXS_PORT_LOCK);
5100 5100 return (DDI_INTR_CLAIMED);
5101 5101
5102 5102 } /* emlxs_sli4_msi_intr() */
5103 5103 #endif /* MSI_SUPPORT */
5104 5104
5105 5105
5106 5106 /*ARGSUSED*/
5107 5107 static int
5108 5108 emlxs_sli4_intx_intr(char *arg)
5109 5109 {
5110 5110 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
5111 5111 #ifdef SLI4_FASTPATH_DEBUG
5112 5112 emlxs_port_t *port = &PPORT;
5113 5113 #endif
5114 5114
5115 5115 #ifdef SLI4_FASTPATH_DEBUG
5116 5116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5117 5117 "intxINTR arg:%p", arg);
5118 5118 #endif
5119 5119
5120 5120 mutex_enter(&EMLXS_PORT_LOCK);
5121 5121
5122 5122 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
5123 5123 mutex_exit(&EMLXS_PORT_LOCK);
5124 5124 return (DDI_INTR_UNCLAIMED);
5125 5125 }
5126 5126
5127 5127 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
5128 5128
5129 5129 mutex_exit(&EMLXS_PORT_LOCK);
5130 5130 return (DDI_INTR_CLAIMED);
5131 5131 } /* emlxs_sli4_intx_intr() */
5132 5132
5133 5133
5134 5134 static void
5135 5135 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
5136 5136 {
5137 5137 emlxs_port_t *port = &PPORT;
5138 5138 uint32_t j;
5139 5139
5140 5140 mutex_enter(&EMLXS_PORT_LOCK);
5141 5141 if (hba->flag & FC_INTERLOCKED) {
5142 5142 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5143 5143
5144 5144 mutex_exit(&EMLXS_PORT_LOCK);
5145 5145
5146 5146 return;
5147 5147 }
5148 5148
5149 5149 j = 0;
5150 5150 while (j++ < 10000) {
5151 5151 if (hba->mbox_queue_flag == 0) {
5152 5152 break;
5153 5153 }
5154 5154
5155 5155 mutex_exit(&EMLXS_PORT_LOCK);
5156 5156 DELAYUS(100);
5157 5157 mutex_enter(&EMLXS_PORT_LOCK);
5158 5158 }
5159 5159
5160 5160 if (hba->mbox_queue_flag != 0) {
5161 5161 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5162 5162 "Board kill failed. Mailbox busy.");
5163 5163 mutex_exit(&EMLXS_PORT_LOCK);
5164 5164 return;
5165 5165 }
5166 5166
5167 5167 hba->flag |= FC_INTERLOCKED;
5168 5168
5169 5169 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5170 5170
5171 5171 mutex_exit(&EMLXS_PORT_LOCK);
5172 5172
5173 5173 } /* emlxs_sli4_hba_kill() */
5174 5174
5175 5175
5176 5176 static void
5177 5177 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
5178 5178 {
5179 5179 emlxs_config_t *cfg = &CFG;
5180 5180 int i;
5181 5181 int num_cq;
5182 5182 uint32_t data;
5183 5183
5184 5184 hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
5185 5185
5186 5186 num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
5187 5187 EMLXS_CQ_OFFSET_WQ;
5188 5188
5189 5189 /* ARM EQ / CQs */
5190 5190 for (i = 0; i < num_cq; i++) {
5191 5191 data = hba->sli.sli4.cq[i].qid;
5192 5192 data |= CQ_DB_REARM;
5193 5193 WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5194 5194 }
5195 5195 for (i = 0; i < hba->intr_count; i++) {
5196 5196 data = hba->sli.sli4.eq[i].qid;
5197 5197 data |= (EQ_DB_REARM | EQ_DB_EVENT);
5198 5198 WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5199 5199 }
5200 5200 } /* emlxs_sli4_enable_intr() */
5201 5201
5202 5202
5203 5203 static void
5204 5204 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
5205 5205 {
5206 5206 if (att) {
5207 5207 return;
5208 5208 }
5209 5209
5210 5210 hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
5211 5211
5212 5212 /* Short of reset, we cannot disable interrupts */
5213 5213 } /* emlxs_sli4_disable_intr() */
5214 5214
5215 5215
5216 5216 static void
5217 5217 emlxs_sli4_resource_free(emlxs_hba_t *hba)
5218 5218 {
5219 5219 emlxs_port_t *port = &PPORT;
5220 5220 MBUF_INFO *buf_info;
5221 5221 uint32_t i;
5222 5222
5223 5223 emlxs_fcf_fini(hba);
5224 5224
5225 5225 buf_info = &hba->sli.sli4.HeaderTmplate;
5226 5226 if (buf_info->virt) {
5227 5227 bzero(buf_info, sizeof (MBUF_INFO));
5228 5228 }
5229 5229
5230 5230 if (hba->sli.sli4.XRIp) {
5231 5231 if ((hba->sli.sli4.XRIinuse_f !=
5232 5232 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
5233 5233 (hba->sli.sli4.XRIinuse_b !=
5234 5234 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
5235 5235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
5236 5236 "XRIs inuse during free!: %p %p != %p\n",
5237 5237 hba->sli.sli4.XRIinuse_f,
5238 5238 hba->sli.sli4.XRIinuse_b,
5239 5239 &hba->sli.sli4.XRIinuse_f);
5240 5240 }
5241 5241 kmem_free(hba->sli.sli4.XRIp,
5242 5242 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
5243 5243 hba->sli.sli4.XRIp = NULL;
5244 5244
5245 5245 hba->sli.sli4.XRIfree_f =
5246 5246 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5247 5247 hba->sli.sli4.XRIfree_b =
5248 5248 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5249 5249 hba->sli.sli4.xrif_count = 0;
5250 5250 }
5251 5251
5252 5252 for (i = 0; i < EMLXS_MAX_EQS; i++) {
5253 5253 mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
5254 5254 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5255 5255 }
5256 5256 for (i = 0; i < EMLXS_MAX_CQS; i++) {
5257 5257 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5258 5258 }
5259 5259 for (i = 0; i < EMLXS_MAX_WQS; i++) {
5260 5260 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5261 5261 }
5262 5262 for (i = 0; i < EMLXS_MAX_RQS; i++) {
5263 5263 mutex_destroy(&hba->sli.sli4.rq[i].lock);
5264 5264 mutex_destroy(&hba->sli.sli4.rxq[i].lock);
5265 5265 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5266 5266 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5267 5267 }
5268 5268
5269 5269 /* Free the MQ */
5270 5270 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5271 5271
5272 5272 buf_info = &hba->sli.sli4.slim2;
5273 5273 if (buf_info->virt) {
5274 5274 buf_info->flags = FC_MBUF_DMA;
5275 5275 emlxs_mem_free(hba, buf_info);
5276 5276 bzero(buf_info, sizeof (MBUF_INFO));
5277 5277 }
5278 5278
5279 5279 /* Cleanup queue ordinal mapping */
5280 5280 for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
5281 5281 hba->sli.sli4.eq_map[i] = 0xffff;
5282 5282 }
5283 5283 for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
5284 5284 hba->sli.sli4.cq_map[i] = 0xffff;
5285 5285 }
5286 5286 for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
5287 5287 hba->sli.sli4.wq_map[i] = 0xffff;
5288 5288 }
5289 5289
5290 5290 } /* emlxs_sli4_resource_free() */
5291 5291
5292 5292
5293 5293 static int
5294 5294 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
5295 5295 {
5296 5296 emlxs_port_t *port = &PPORT;
5297 5297 emlxs_config_t *cfg = &CFG;
5298 5298 MBUF_INFO *buf_info;
5299 5299 uint16_t index;
5300 5300 int num_eq;
5301 5301 int num_wq;
5302 5302 uint16_t i;
5303 5303 uint32_t j;
5304 5304 uint32_t k;
5305 5305 uint32_t word;
5306 5306 XRIobj_t *xrip;
5307 5307 char buf[64];
5308 5308 RQE_t *rqe;
5309 5309 MBUF_INFO *rqb;
5310 5310 uint64_t phys;
5311 5311 uint64_t tmp_phys;
5312 5312 char *virt;
5313 5313 char *tmp_virt;
5314 5314 void *data_handle;
5315 5315 void *dma_handle;
5316 5316 int32_t size;
5317 5317 off_t offset;
5318 5318 uint32_t count = 0;
5319 5319
5320 5320 emlxs_fcf_init(hba);
5321 5321
5322 5322 /* EQs - 1 per Interrupt vector */
5323 5323 num_eq = hba->intr_count;
5324 5324 /* CQs - number of WQs + 1 for RQs + 1 for mbox/async events */
5325 5325 num_wq = cfg[CFG_NUM_WQ].current * num_eq;
5326 5326
5327 5327 /* Calculate total dmable memory we need */
5328 5328 /* EQ */
5329 5329 count += num_eq * 4096;
5330 5330 /* CQ */
5331 5331 count += (num_wq + EMLXS_CQ_OFFSET_WQ) * 4096;
5332 5332 /* WQ */
5333 5333 count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
5334 5334 /* MQ */
5335 5335 count += EMLXS_MAX_MQS * 4096;
5336 5336 /* RQ */
5337 5337 count += EMLXS_MAX_RQS * 4096;
5338 5338 /* RQB/E */
5339 5339 count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
5340 5340 /* SGL */
5341 5341 count += hba->sli.sli4.XRICount * hba->sli.sli4.mem_sgl_size;
5342 5342 /* RPI Head Template */
5343 5343 count += hba->sli.sli4.RPICount * sizeof (RPIHdrTmplate_t);
5344 5344
5345 5345 /* Allocate slim2 for SLI4 */
5346 5346 buf_info = &hba->sli.sli4.slim2;
5347 5347 buf_info->size = count;
5348 5348 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5349 5349 buf_info->align = ddi_ptob(hba->dip, 1L);
5350 5350
5351 5351 (void) emlxs_mem_alloc(hba, buf_info);
5352 5352
5353 5353 if (buf_info->virt == NULL) {
5354 5354 EMLXS_MSGF(EMLXS_CONTEXT,
5355 5355 &emlxs_init_failed_msg,
5356 5356 "Unable to allocate internal memory for SLI4: %d",
5357 5357 count);
5358 5358 goto failed;
5359 5359 }
5360 5360 bzero(buf_info->virt, buf_info->size);
5361 5361 EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
5362 5362 buf_info->size, DDI_DMA_SYNC_FORDEV);
5363 5363
5364 5364 /* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
5365 5365 data_handle = buf_info->data_handle;
5366 5366 dma_handle = buf_info->dma_handle;
5367 5367 phys = buf_info->phys;
5368 5368 virt = (char *)buf_info->virt;
5369 5369
5370 5370 /* Allocate space for queues */
5371 5371 size = 4096;
5372 5372 for (i = 0; i < num_eq; i++) {
5373 5373 buf_info = &hba->sli.sli4.eq[i].addr;
5374 5374 if (buf_info->virt == NULL) {
5375 5375 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5376 5376 buf_info->size = size;
5377 5377 buf_info->flags =
5378 5378 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5379 5379 buf_info->align = ddi_ptob(hba->dip, 1L);
5380 5380 buf_info->phys = phys;
5381 5381 buf_info->virt = (void *)virt;
5382 5382 buf_info->data_handle = data_handle;
5383 5383 buf_info->dma_handle = dma_handle;
5384 5384
5385 5385 phys += size;
5386 5386 virt += size;
5387 5387
5388 5388 hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
5389 5389 }
5390 5390
5391 5391 (void) sprintf(buf, "%s_eq%d_lastwq_lock mutex",
5392 5392 DRIVER_NAME, i);
5393 5393 mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, buf,
5394 5394 MUTEX_DRIVER, NULL);
5395 5395 }
5396 5396
5397 5397 size = 4096;
5398 5398 for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
5399 5399 buf_info = &hba->sli.sli4.cq[i].addr;
5400 5400 if (buf_info->virt == NULL) {
5401 5401 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5402 5402 buf_info->size = size;
5403 5403 buf_info->flags =
5404 5404 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5405 5405 buf_info->align = ddi_ptob(hba->dip, 1L);
5406 5406 buf_info->phys = phys;
5407 5407 buf_info->virt = (void *)virt;
5408 5408 buf_info->data_handle = data_handle;
5409 5409 buf_info->dma_handle = dma_handle;
5410 5410
5411 5411 phys += size;
5412 5412 virt += size;
5413 5413
5414 5414 hba->sli.sli4.cq[i].max_index = CQ_DEPTH;
5415 5415 }
5416 5416 }
5417 5417
5418 5418 /* WQs - NUM_WQ config parameter * number of EQs */
5419 5419 size = 4096 * EMLXS_NUM_WQ_PAGES;
5420 5420 for (i = 0; i < num_wq; i++) {
5421 5421 buf_info = &hba->sli.sli4.wq[i].addr;
5422 5422 if (buf_info->virt == NULL) {
5423 5423 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5424 5424 buf_info->size = size;
5425 5425 buf_info->flags =
5426 5426 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5427 5427 buf_info->align = ddi_ptob(hba->dip, 1L);
5428 5428 buf_info->phys = phys;
5429 5429 buf_info->virt = (void *)virt;
5430 5430 buf_info->data_handle = data_handle;
5431 5431 buf_info->dma_handle = dma_handle;
5432 5432
5433 5433 phys += size;
5434 5434 virt += size;
5435 5435
5436 5436 hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
5437 5437 hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
5438 5438 }
5439 5439 }
5440 5440
5441 5441 /* MQ */
5442 5442 size = 4096;
5443 5443 buf_info = &hba->sli.sli4.mq.addr;
5444 5444 if (!buf_info->virt) {
5445 5445 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5446 5446 buf_info->size = size;
5447 5447 buf_info->flags =
5448 5448 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5449 5449 buf_info->align = ddi_ptob(hba->dip, 1L);
5450 5450 buf_info->phys = phys;
5451 5451 buf_info->virt = (void *)virt;
5452 5452 buf_info->data_handle = data_handle;
5453 5453 buf_info->dma_handle = dma_handle;
5454 5454
5455 5455 phys += size;
5456 5456 virt += size;
5457 5457
5458 5458 hba->sli.sli4.mq.max_index = MQ_DEPTH;
5459 5459 }
5460 5460
5461 5461 /* RXQs */
5462 5462 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
5463 5463 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5464 5464
5465 5465 (void) sprintf(buf, "%s_rxq%d_lock mutex", DRIVER_NAME, i);
5466 5466 mutex_init(&hba->sli.sli4.rxq[i].lock, buf, MUTEX_DRIVER, NULL);
5467 5467 }
5468 5468
5469 5469 /* RQs */
5470 5470 size = 4096;
5471 5471 for (i = 0; i < EMLXS_MAX_RQS; i++) {
5472 5472 buf_info = &hba->sli.sli4.rq[i].addr;
5473 5473 if (buf_info->virt) {
5474 5474 continue;
5475 5475 }
5476 5476
5477 5477 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5478 5478 buf_info->size = size;
5479 5479 buf_info->flags =
5480 5480 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5481 5481 buf_info->align = ddi_ptob(hba->dip, 1L);
5482 5482 buf_info->phys = phys;
5483 5483 buf_info->virt = (void *)virt;
5484 5484 buf_info->data_handle = data_handle;
5485 5485 buf_info->dma_handle = dma_handle;
5486 5486
5487 5487 phys += size;
5488 5488 virt += size;
5489 5489
5490 5490 hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
5491 5491
5492 5492 (void) sprintf(buf, "%s_rq%d_lock mutex", DRIVER_NAME, i);
5493 5493 mutex_init(&hba->sli.sli4.rq[i].lock, buf, MUTEX_DRIVER, NULL);
5494 5494 }
5495 5495
5496 5496 /* Setup RQE */
5497 5497 for (i = 0; i < EMLXS_MAX_RQS; i++) {
5498 5498 size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
5499 5499 tmp_phys = phys;
5500 5500 tmp_virt = virt;
5501 5501
5502 5502 /* Initialize the RQEs */
5503 5503 rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
5504 5504 for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
5505 5505 phys = tmp_phys;
5506 5506 virt = tmp_virt;
5507 5507 for (k = 0; k < RQB_COUNT; k++) {
5508 5508 word = PADDR_HI(phys);
5509 5509 rqe->AddrHi = BE_SWAP32(word);
5510 5510
5511 5511 word = PADDR_LO(phys);
5512 5512 rqe->AddrLo = BE_SWAP32(word);
5513 5513
5514 5514 rqb = &hba->sli.sli4.rq[i].
5515 5515 rqb[k + (j * RQB_COUNT)];
5516 5516 rqb->size = size;
5517 5517 rqb->flags = FC_MBUF_DMA |
5518 5518 FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5519 5519 rqb->align = ddi_ptob(hba->dip, 1L);
5520 5520 rqb->phys = phys;
5521 5521 rqb->virt = (void *)virt;
5522 5522 rqb->data_handle = data_handle;
5523 5523 rqb->dma_handle = dma_handle;
5524 5524
5525 5525 phys += size;
5526 5526 virt += size;
5527 5527 #ifdef RQ_DEBUG
5528 5528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5529 5529 "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p tag=%08x",
5530 5530 i, j, k, mp, mp->tag);
5531 5531 #endif
5532 5532
5533 5533 rqe++;
5534 5534 }
5535 5535 }
5536 5536
5537 5537 offset = (off_t)((uint64_t)((unsigned long)
5538 5538 hba->sli.sli4.rq[i].addr.virt) -
5539 5539 (uint64_t)((unsigned long)
5540 5540 hba->sli.sli4.slim2.virt));
5541 5541
5542 5542 /* Sync the RQ buffer list */
5543 5543 EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
5544 5544 hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
5545 5545 }
5546 5546
5547 5547 if ((!hba->sli.sli4.XRIp) && (hba->sli.sli4.XRICount)) {
5548 5548 /* Initialize double linked lists */
5549 5549 hba->sli.sli4.XRIinuse_f =
5550 5550 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5551 5551 hba->sli.sli4.XRIinuse_b =
5552 5552 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5553 5553 hba->sli.sli4.xria_count = 0;
5554 5554
5555 5555 hba->sli.sli4.XRIfree_f =
5556 5556 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5557 5557 hba->sli.sli4.XRIfree_b =
5558 5558 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5559 5559 hba->sli.sli4.xria_count = 0;
5560 5560
5561 5561 hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
5562 5562 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
5563 5563
5564 5564 xrip = hba->sli.sli4.XRIp;
5565 5565 index = hba->sli.sli4.XRIBase;
5566 5566 size = hba->sli.sli4.mem_sgl_size;
5567 5567 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
5568 5568 xrip->sge_count =
5569 5569 (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
5570 5570 xrip->XRI = index;
5571 5571 xrip->iotag = i;
5572 5572 if ((xrip->XRI == 0) || (xrip->iotag == 0)) {
5573 5573 index++; /* Skip XRI 0 or IOTag 0 */
5574 5574 xrip++;
5575 5575 continue;
5576 5576 }
5577 5577 /* Add xrip to end of free list */
5578 5578 xrip->_b = hba->sli.sli4.XRIfree_b;
5579 5579 hba->sli.sli4.XRIfree_b->_f = xrip;
5580 5580 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5581 5581 hba->sli.sli4.XRIfree_b = xrip;
5582 5582 hba->sli.sli4.xrif_count++;
5583 5583
5584 5584 /* Allocate SGL for this xrip */
5585 5585 buf_info = &xrip->SGList;
5586 5586 buf_info->size = size;
5587 5587 buf_info->flags =
5588 5588 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5589 5589 buf_info->align = size;
5590 5590 buf_info->phys = phys;
5591 5591 buf_info->virt = (void *)virt;
5592 5592 buf_info->data_handle = data_handle;
5593 5593 buf_info->dma_handle = dma_handle;
5594 5594
5595 5595 phys += size;
5596 5596 virt += size;
5597 5597
5598 5598 xrip++;
5599 5599 index++;
5600 5600 }
5601 5601 }
5602 5602
5603 5603 size = sizeof (RPIHdrTmplate_t) * hba->sli.sli4.RPICount;
5604 5604 buf_info = &hba->sli.sli4.HeaderTmplate;
5605 5605 if ((buf_info->virt == NULL) && (hba->sli.sli4.RPICount)) {
5606 5606 bzero(buf_info, sizeof (MBUF_INFO));
5607 5607 buf_info->size = size;
5608 5608 buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
5609 5609 buf_info->align = ddi_ptob(hba->dip, 1L);
5610 5610 buf_info->phys = phys;
5611 5611 buf_info->virt = (void *)virt;
5612 5612 buf_info->data_handle = data_handle;
5613 5613 buf_info->dma_handle = dma_handle;
5614 5614 }
5615 5615
5616 5616 #ifdef FMA_SUPPORT
5617 5617 if (hba->sli.sli4.slim2.dma_handle) {
5618 5618 if (emlxs_fm_check_dma_handle(hba,
5619 5619 hba->sli.sli4.slim2.dma_handle)
5620 5620 != DDI_FM_OK) {
5621 5621 EMLXS_MSGF(EMLXS_CONTEXT,
5622 5622 &emlxs_invalid_dma_handle_msg,
5623 5623 "emlxs_sli4_resource_alloc: hdl=%p",
5624 5624 hba->sli.sli4.slim2.dma_handle);
5625 5625 goto failed;
5626 5626 }
5627 5627 }
5628 5628 #endif
5629 5629
5630 5630 return (0);
5631 5631
5632 5632 failed:
5633 5633
5634 5634 (void) emlxs_sli4_resource_free(hba);
5635 5635 return (ENOMEM);
5636 5636
5637 5637 } /* emlxs_sli4_resource_alloc */
5638 5638
5639 5639
5640 5640 static XRIobj_t *
5641 5641 emlxs_sli4_reserve_xri(emlxs_hba_t *hba, RPIobj_t *rpip)
5642 5642 {
5643 5643 emlxs_port_t *port = &PPORT;
5644 5644 XRIobj_t *xrip;
5645 5645 uint16_t iotag;
5646 5646
5647 5647 mutex_enter(&EMLXS_FCTAB_LOCK);
5648 5648
5649 5649 xrip = hba->sli.sli4.XRIfree_f;
5650 5650
5651 5651 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5652 5652 mutex_exit(&EMLXS_FCTAB_LOCK);
5653 5653
5654 5654 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5655 5655 "Unable to reserve XRI");
5656 5656
5657 5657 return (NULL);
5658 5658 }
5659 5659
5660 5660 iotag = xrip->iotag;
5661 5661
5662 5662 if ((!iotag) ||
5663 5663 ((hba->fc_table[iotag] != NULL) &&
5664 5664 (hba->fc_table[iotag] != STALE_PACKET))) {
5665 5665 /*
5666 5666 * No more command slots available, retry later
5667 5667 */
5668 5668 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5669 5669 "Adapter Busy. Unable to reserve iotag");
5670 5670
5671 5671 mutex_exit(&EMLXS_FCTAB_LOCK);
5672 5672 return (NULL);
5673 5673 }
5674 5674
5675 5675 xrip->state = XRI_STATE_ALLOCATED;
5676 5676 xrip->flag = EMLXS_XRI_RESERVED;
5677 5677 xrip->rpip = rpip;
5678 5678 xrip->sbp = NULL;
5679 5679
5680 5680 if (rpip) {
5681 5681 rpip->xri_count++;
5682 5682 }
5683 5683
5684 5684 /* Take it off free list */
5685 5685 (xrip->_b)->_f = xrip->_f;
5686 5686 (xrip->_f)->_b = xrip->_b;
5687 5687 xrip->_f = NULL;
5688 5688 xrip->_b = NULL;
5689 5689 hba->sli.sli4.xrif_count--;
5690 5690
5691 5691 /* Add it to end of inuse list */
5692 5692 xrip->_b = hba->sli.sli4.XRIinuse_b;
5693 5693 hba->sli.sli4.XRIinuse_b->_f = xrip;
5694 5694 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5695 5695 hba->sli.sli4.XRIinuse_b = xrip;
5696 5696 hba->sli.sli4.xria_count++;
5697 5697
5698 5698 mutex_exit(&EMLXS_FCTAB_LOCK);
5699 5699 return (xrip);
5700 5700
5701 5701 } /* emlxs_sli4_reserve_xri() */
5702 5702
5703 5703
5704 5704 extern uint32_t
5705 5705 emlxs_sli4_unreserve_xri(emlxs_hba_t *hba, uint16_t xri, uint32_t lock)
5706 5706 {
5707 5707 emlxs_port_t *port = &PPORT;
5708 5708 XRIobj_t *xrip;
5709 5709
5710 5710 if (lock) {
5711 5711 mutex_enter(&EMLXS_FCTAB_LOCK);
5712 5712 }
5713 5713
5714 5714 xrip = emlxs_sli4_find_xri(hba, xri);
5715 5715
5716 5716 if (!xrip || xrip->state == XRI_STATE_FREE) {
5717 5717 if (lock) {
5718 5718 mutex_exit(&EMLXS_FCTAB_LOCK);
5719 5719 }
5720 5720
5721 5721 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5722 5722 "emlxs_sli4_unreserve_xri: xri=%x already freed.",
5723 5723 xrip->XRI);
5724 5724 return (0);
5725 5725 }
5726 5726
5727 5727 if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
5728 5728 if (lock) {
5729 5729 mutex_exit(&EMLXS_FCTAB_LOCK);
5730 5730 }
5731 5731
5732 5732 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5733 5733 "emlxs_sli4_unreserve_xri: xri=%x in use.", xrip->XRI);
5734 5734 return (1);
5735 5735 }
5736 5736
5737 5737 if (xrip->iotag &&
5738 5738 (hba->fc_table[xrip->iotag] != NULL) &&
5739 5739 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
5740 5740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5741 5741 "emlxs_sli4_unreserve_xri:%x sbp dropped:%p",
5742 5742 xrip->XRI, hba->fc_table[xrip->iotag]);
5743 5743
5744 5744 hba->fc_table[xrip->iotag] = NULL;
5745 5745 hba->io_count--;
5746 5746 }
5747 5747
5748 5748 xrip->state = XRI_STATE_FREE;
5749 5749
5750 5750 if (xrip->rpip) {
5751 5751 xrip->rpip->xri_count--;
5752 5752 xrip->rpip = NULL;
5753 5753 }
5754 5754
5755 5755 /* Take it off inuse list */
5756 5756 (xrip->_b)->_f = xrip->_f;
5757 5757 (xrip->_f)->_b = xrip->_b;
5758 5758 xrip->_f = NULL;
5759 5759 xrip->_b = NULL;
5760 5760 hba->sli.sli4.xria_count--;
5761 5761
5762 5762 /* Add it to end of free list */
5763 5763 xrip->_b = hba->sli.sli4.XRIfree_b;
5764 5764 hba->sli.sli4.XRIfree_b->_f = xrip;
5765 5765 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5766 5766 hba->sli.sli4.XRIfree_b = xrip;
5767 5767 hba->sli.sli4.xrif_count++;
5768 5768
5769 5769 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5770 5770 "emlxs_sli4_unreserve_xri: xri=%x unreserved.", xrip->XRI);
5771 5771
5772 5772 if (lock) {
5773 5773 mutex_exit(&EMLXS_FCTAB_LOCK);
5774 5774 }
5775 5775
5776 5776 return (0);
5777 5777
5778 5778 } /* emlxs_sli4_unreserve_xri() */
5779 5779
5780 5780
5781 5781 static XRIobj_t *
5782 5782 emlxs_sli4_register_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, uint16_t xri)
5783 5783 {
5784 5784 emlxs_port_t *port = &PPORT;
5785 5785 uint16_t iotag;
5786 5786 XRIobj_t *xrip;
5787 5787
5788 5788 mutex_enter(&EMLXS_FCTAB_LOCK);
5789 5789
5790 5790 xrip = emlxs_sli4_find_xri(hba, xri);
5791 5791
5792 5792 if (!xrip) {
5793 5793 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5794 5794 "emlxs_sli4_register_xri: XRI not found.");
5795 5795
5796 5796
5797 5797 mutex_exit(&EMLXS_FCTAB_LOCK);
5798 5798 return (NULL);
5799 5799 }
5800 5800
5801 5801 if ((xrip->state == XRI_STATE_FREE) ||
5802 5802 !(xrip->flag & EMLXS_XRI_RESERVED)) {
5803 5803
5804 5804 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5805 5805 "emlxs_sli4_register_xri: Invalid XRI. xrip=%p "
5806 5806 "state=%x flag=%x",
5807 5807 xrip, xrip->state, xrip->flag);
5808 5808
5809 5809 mutex_exit(&EMLXS_FCTAB_LOCK);
5810 5810 return (NULL);
5811 5811 }
5812 5812
5813 5813 iotag = xrip->iotag;
5814 5814
5815 5815 if ((!iotag) ||
5816 5816 ((hba->fc_table[iotag] != NULL) &&
5817 5817 (hba->fc_table[iotag] != STALE_PACKET))) {
5818 5818
5819 5819 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5820 5820 "emlxs_sli4_register_xri: Invalid fc_table entry. "
5821 5821 "iotag=%x entry=%p",
5822 5822 iotag, hba->fc_table[iotag]);
5823 5823
5824 5824 mutex_exit(&EMLXS_FCTAB_LOCK);
5825 5825 return (NULL);
5826 5826 }
5827 5827
5828 5828 hba->fc_table[iotag] = sbp;
5829 5829 hba->io_count++;
5830 5830
5831 5831 sbp->iotag = iotag;
5832 5832 sbp->xrip = xrip;
5833 5833
5834 5834 xrip->flag &= ~EMLXS_XRI_RESERVED;
5835 5835 xrip->sbp = sbp;
5836 5836
5837 5837 mutex_exit(&EMLXS_FCTAB_LOCK);
5838 5838
5839 5839 return (xrip);
5840 5840
5841 5841 } /* emlxs_sli4_register_xri() */
5842 5842
5843 5843
5844 5844 /* Performs both reserve and register functions for XRI */
5845 5845 static XRIobj_t *
5846 5846 emlxs_sli4_alloc_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, RPIobj_t *rpip)
5847 5847 {
5848 5848 emlxs_port_t *port = &PPORT;
5849 5849 XRIobj_t *xrip;
5850 5850 uint16_t iotag;
5851 5851
5852 5852 mutex_enter(&EMLXS_FCTAB_LOCK);
5853 5853
5854 5854 xrip = hba->sli.sli4.XRIfree_f;
5855 5855
5856 5856 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5857 5857 mutex_exit(&EMLXS_FCTAB_LOCK);
5858 5858
5859 5859 return (NULL);
5860 5860 }
5861 5861
5862 5862 /* Get the iotag by registering the packet */
5863 5863 iotag = xrip->iotag;
5864 5864
5865 5865 if ((!iotag) ||
5866 5866 ((hba->fc_table[iotag] != NULL) &&
5867 5867 (hba->fc_table[iotag] != STALE_PACKET))) {
5868 5868 /*
5869 5869 * No more command slots available, retry later
5870 5870 */
5871 5871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5872 5872 "Adapter Busy. Unable to alloc iotag:(0x%x)(%p)",
5873 5873 iotag, hba->fc_table[iotag]);
5874 5874
5875 5875 mutex_exit(&EMLXS_FCTAB_LOCK);
5876 5876 return (NULL);
5877 5877 }
5878 5878
5879 5879 hba->fc_table[iotag] = sbp;
5880 5880 hba->io_count++;
5881 5881
5882 5882 sbp->iotag = iotag;
5883 5883 sbp->xrip = xrip;
5884 5884
5885 5885 xrip->state = XRI_STATE_ALLOCATED;
5886 5886 xrip->flag = 0;
5887 5887 xrip->rpip = rpip;
5888 5888 xrip->sbp = sbp;
5889 5889
5890 5890 if (rpip) {
5891 5891 rpip->xri_count++;
5892 5892 }
5893 5893
5894 5894 /* Take it off free list */
5895 5895 (xrip->_b)->_f = xrip->_f;
5896 5896 (xrip->_f)->_b = xrip->_b;
5897 5897 xrip->_f = NULL;
5898 5898 xrip->_b = NULL;
5899 5899 hba->sli.sli4.xrif_count--;
5900 5900
5901 5901 /* Add it to end of inuse list */
5902 5902 xrip->_b = hba->sli.sli4.XRIinuse_b;
5903 5903 hba->sli.sli4.XRIinuse_b->_f = xrip;
5904 5904 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5905 5905 hba->sli.sli4.XRIinuse_b = xrip;
5906 5906 hba->sli.sli4.xria_count++;
5907 5907
5908 5908 mutex_exit(&EMLXS_FCTAB_LOCK);
5909 5909
5910 5910 return (xrip);
5911 5911
5912 5912 } /* emlxs_sli4_alloc_xri() */
5913 5913
5914 5914
5915 5915 /* EMLXS_FCTAB_LOCK must be held to enter */
5916 5916 extern XRIobj_t *
5917 5917 emlxs_sli4_find_xri(emlxs_hba_t *hba, uint16_t xri)
5918 5918 {
5919 5919 emlxs_port_t *port = &PPORT;
5920 5920 XRIobj_t *xrip;
5921 5921
5922 5922 xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5923 5923 while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5924 5924 if ((xrip->state >= XRI_STATE_ALLOCATED) &&
5925 5925 (xrip->XRI == xri)) {
5926 5926 return (xrip);
5927 5927 }
5928 5928 xrip = xrip->_f;
5929 5929 }
5930 5930
5931 5931 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5932 5932 "Unable to find XRI x%x", xri);
5933 5933
5934 5934 return (NULL);
5935 5935
5936 5936 } /* emlxs_sli4_find_xri() */
5937 5937
5938 5938
5939 5939
5940 5940
5941 5941 extern void
5942 5942 emlxs_sli4_free_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, XRIobj_t *xrip,
5943 5943 uint8_t lock)
5944 5944 {
5945 5945 emlxs_port_t *port = &PPORT;
5946 5946
5947 5947 if (lock) {
5948 5948 mutex_enter(&EMLXS_FCTAB_LOCK);
5949 5949 }
5950 5950
5951 5951 if (xrip) {
5952 5952 if (xrip->state == XRI_STATE_FREE) {
5953 5953 if (lock) {
5954 5954 mutex_exit(&EMLXS_FCTAB_LOCK);
5955 5955 }
5956 5956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5957 5957 "Free XRI:%x, Already freed", xrip->XRI);
5958 5958 return;
5959 5959 }
5960 5960
5961 5961 if (xrip->iotag &&
5962 5962 (hba->fc_table[xrip->iotag] != NULL) &&
5963 5963 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
5964 5964 hba->fc_table[xrip->iotag] = NULL;
5965 5965 hba->io_count--;
5966 5966 }
5967 5967
5968 5968 xrip->state = XRI_STATE_FREE;
5969 5969 xrip->flag = 0;
5970 5970
5971 5971 if (xrip->rpip) {
5972 5972 xrip->rpip->xri_count--;
5973 5973 xrip->rpip = NULL;
5974 5974 }
5975 5975
5976 5976 /* Take it off inuse list */
5977 5977 (xrip->_b)->_f = xrip->_f;
5978 5978 (xrip->_f)->_b = xrip->_b;
5979 5979 xrip->_f = NULL;
5980 5980 xrip->_b = NULL;
5981 5981 hba->sli.sli4.xria_count--;
5982 5982
5983 5983 /* Add it to end of free list */
5984 5984 xrip->_b = hba->sli.sli4.XRIfree_b;
5985 5985 hba->sli.sli4.XRIfree_b->_f = xrip;
5986 5986 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5987 5987 hba->sli.sli4.XRIfree_b = xrip;
5988 5988 hba->sli.sli4.xrif_count++;
5989 5989 }
5990 5990
5991 5991 if (sbp) {
5992 5992 if (!(sbp->pkt_flags & PACKET_VALID) ||
5993 5993 (sbp->pkt_flags &
5994 5994 (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
5995 5995 if (lock) {
5996 5996 mutex_exit(&EMLXS_FCTAB_LOCK);
5997 5997 }
5998 5998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5999 5999 "Free XRI: sbp invalid. sbp=%p flags=%x xri=%x",
6000 6000 sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
6001 6001 return;
6002 6002 }
6003 6003
6004 6004 sbp->xrip = 0;
6005 6005
6006 6006 if (xrip && (xrip->iotag != sbp->iotag)) {
6007 6007 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
6008 6008 "sbp / iotag mismatch %p iotag:%d %d", sbp,
6009 6009 sbp->iotag, xrip->iotag);
6010 6010 }
6011 6011
6012 6012 if (sbp->iotag) {
6013 6013 if (sbp == hba->fc_table[sbp->iotag]) {
6014 6014 hba->fc_table[sbp->iotag] = NULL;
6015 6015 hba->io_count--;
6016 6016 }
6017 6017 sbp->iotag = 0;
6018 6018 }
6019 6019
6020 6020 if (lock) {
6021 6021 mutex_exit(&EMLXS_FCTAB_LOCK);
6022 6022 }
6023 6023
6024 6024 /* Clean up the sbp */
6025 6025 mutex_enter(&sbp->mtx);
6026 6026
6027 6027 if (sbp->pkt_flags & PACKET_IN_TXQ) {
6028 6028 sbp->pkt_flags &= ~PACKET_IN_TXQ;
6029 6029 hba->channel_tx_count--;
6030 6030 }
6031 6031
6032 6032 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6033 6033 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
6034 6034 }
6035 6035
6036 6036 mutex_exit(&sbp->mtx);
6037 6037 } else {
6038 6038 if (lock) {
6039 6039 mutex_exit(&EMLXS_FCTAB_LOCK);
6040 6040 }
6041 6041 }
6042 6042
6043 6043 } /* emlxs_sli4_free_xri() */
6044 6044
6045 6045
6046 6046 static int
6047 6047 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
6048 6048 {
6049 6049 MAILBOX4 *mb = (MAILBOX4 *)mbq;
6050 6050 emlxs_port_t *port = &PPORT;
6051 6051 XRIobj_t *xrip;
6052 6052 MATCHMAP *mp;
6053 6053 mbox_req_hdr_t *hdr_req;
6054 6054 uint32_t i, cnt, xri_cnt;
6055 6055 uint32_t size;
6056 6056 IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
6057 6057
6058 6058 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6059 6059 mbq->bp = NULL;
6060 6060 mbq->mbox_cmpl = NULL;
6061 6061
6062 6062 if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
6063 6063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6064 6064 "Unable to POST_SGL. Mailbox cmd=%x ",
6065 6065 mb->mbxCommand);
6066 6066 return (EIO);
6067 6067 }
6068 6068 mbq->nonembed = (void *)mp;
6069 6069
6070 6070 /*
6071 6071 * Signifies a non embedded command
6072 6072 */
6073 6073 mb->un.varSLIConfig.be.embedded = 0;
6074 6074 mb->mbxCommand = MBX_SLI_CONFIG;
6075 6075 mb->mbxOwner = OWN_HOST;
6076 6076
6077 6077 hdr_req = (mbox_req_hdr_t *)mp->virt;
6078 6078 post_sgl =
6079 6079 (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
6080 6080
6081 6081
6082 6082 xrip = hba->sli.sli4.XRIp;
6083 6083 cnt = hba->sli.sli4.XRICount;
6084 6084 while (cnt) {
6085 6085 bzero((void *) hdr_req, mp->size);
6086 6086 size = mp->size - IOCTL_HEADER_SZ;
6087 6087
6088 6088 mb->un.varSLIConfig.be.payload_length =
6089 6089 mp->size;
6090 6090 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6091 6091 IOCTL_SUBSYSTEM_FCOE;
6092 6092 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6093 6093 FCOE_OPCODE_CFG_POST_SGL_PAGES;
6094 6094 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6095 6095 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
6096 6096
6097 6097 hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
6098 6098 hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
6099 6099 hdr_req->timeout = 0;
6100 6100 hdr_req->req_length = size;
6101 6101
6102 6102 post_sgl->params.request.xri_count = 0;
6103 6103 post_sgl->params.request.xri_start = xrip->XRI;
6104 6104 xri_cnt = (size - sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
6105 6105 sizeof (FCOE_SGL_PAGES);
6106 6106 for (i = 0; i < xri_cnt; i++) {
6107 6107
6108 6108 post_sgl->params.request.xri_count++;
6109 6109 post_sgl->params.request.pages[i].sgl_page0.addrLow =
6110 6110 PADDR_LO(xrip->SGList.phys);
6111 6111 post_sgl->params.request.pages[i].sgl_page0.addrHigh =
6112 6112 PADDR_HI(xrip->SGList.phys);
6113 6113 cnt--;
6114 6114 xrip++;
6115 6115 if (cnt == 0) {
6116 6116 break;
6117 6117 }
6118 6118 }
6119 6119 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6120 6120 MBX_SUCCESS) {
6121 6121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6122 6122 "Unable to POST_SGL. Mailbox cmd=%x status=%x "
6123 6123 "XRI cnt:%d start:%d",
6124 6124 mb->mbxCommand, mb->mbxStatus,
6125 6125 post_sgl->params.request.xri_count,
6126 6126 post_sgl->params.request.xri_start);
6127 6127 emlxs_mem_buf_free(hba, mp);
6128 6128 mbq->nonembed = NULL;
6129 6129 return (EIO);
6130 6130 }
6131 6131 }
6132 6132 emlxs_mem_buf_free(hba, mp);
6133 6133 mbq->nonembed = NULL;
6134 6134 return (0);
6135 6135
6136 6136 } /* emlxs_sli4_post_sgl_pages() */
6137 6137
6138 6138
6139 6139 static int
6140 6140 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
6141 6141 {
6142 6142 MAILBOX4 *mb = (MAILBOX4 *)mbq;
6143 6143 emlxs_port_t *port = &PPORT;
6144 6144 int i, cnt;
6145 6145 uint64_t addr;
6146 6146 IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
6147 6147
6148 6148 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6149 6149 mbq->bp = NULL;
6150 6150 mbq->mbox_cmpl = NULL;
6151 6151
6152 6152 /*
6153 6153 * Signifies an embedded command
6154 6154 */
6155 6155 mb->un.varSLIConfig.be.embedded = 1;
6156 6156
6157 6157 mb->mbxCommand = MBX_SLI_CONFIG;
6158 6158 mb->mbxOwner = OWN_HOST;
6159 6159 mb->un.varSLIConfig.be.payload_length =
6160 6160 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
6161 6161 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6162 6162 IOCTL_SUBSYSTEM_FCOE;
6163 6163 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6164 6164 FCOE_OPCODE_POST_HDR_TEMPLATES;
6165 6165 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6166 6166 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
6167 6167 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
6168 6168 post_hdr =
6169 6169 (IOCTL_FCOE_POST_HDR_TEMPLATES *)&mb->un.varSLIConfig.payload;
6170 6170 addr = hba->sli.sli4.HeaderTmplate.phys;
6171 6171 post_hdr->params.request.num_pages = 0;
6172 6172 i = 0;
6173 6173 cnt = hba->sli.sli4.HeaderTmplate.size;
6174 6174 while (cnt > 0) {
6175 6175 post_hdr->params.request.num_pages++;
6176 6176 post_hdr->params.request.pages[i].addrLow = PADDR_LO(addr);
6177 6177 post_hdr->params.request.pages[i].addrHigh = PADDR_HI(addr);
6178 6178 i++;
6179 6179 addr += 4096;
6180 6180 cnt -= 4096;
6181 6181 }
6182 6182 post_hdr->params.request.starting_rpi_index = hba->sli.sli4.RPIBase;
6183 6183
6184 6184 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6185 6185 MBX_SUCCESS) {
6186 6186 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6187 6187 "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x status=%x ",
6188 6188 mb->mbxCommand, mb->mbxStatus);
6189 6189 return (EIO);
6190 6190 }
6191 6191 emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
6192 6192 return (0);
6193 6193
6194 6194 } /* emlxs_sli4_post_hdr_tmplates() */
6195 6195
6196 6196
6197 6197 static int
6198 6198 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
6199 6199 {
6200 6200 MAILBOX4 *mb = (MAILBOX4 *)mbq;
6201 6201 emlxs_port_t *port = &PPORT;
6202 6202 emlxs_config_t *cfg = &CFG;
6203 6203 IOCTL_COMMON_EQ_CREATE *eq;
6204 6204 IOCTL_COMMON_CQ_CREATE *cq;
6205 6205 IOCTL_FCOE_WQ_CREATE *wq;
6206 6206 IOCTL_FCOE_RQ_CREATE *rq;
6207 6207 IOCTL_COMMON_MQ_CREATE *mq;
6208 6208 IOCTL_COMMON_MCC_CREATE_EXT *mcc_ext;
6209 6209 emlxs_rqdbu_t rqdb;
6210 6210 uint16_t i, j;
6211 6211 uint16_t num_cq, total_cq;
6212 6212 uint16_t num_wq, total_wq;
6213 6213
6214 6214 /*
6215 6215 * The first CQ is reserved for ASYNC events,
6216 6216 * the second is reserved for unsol rcv, the rest
6217 6217 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
6218 6218 */
6219 6219
6220 6220 /* First initialize queue ordinal mapping */
6221 6221 for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
6222 6222 hba->sli.sli4.eq_map[i] = 0xffff;
6223 6223 }
6224 6224 for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
6225 6225 hba->sli.sli4.cq_map[i] = 0xffff;
6226 6226 }
6227 6227 for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
6228 6228 hba->sli.sli4.wq_map[i] = 0xffff;
6229 6229 }
6230 6230 for (i = 0; i < EMLXS_MAX_RQ_IDS; i++) {
6231 6231 hba->sli.sli4.rq_map[i] = 0xffff;
6232 6232 }
6233 6233
6234 6234 total_cq = 0;
6235 6235 total_wq = 0;
6236 6236
6237 6237 /* Create EQ's */
6238 6238 for (i = 0; i < hba->intr_count; i++) {
6239 6239 emlxs_mb_eq_create(hba, mbq, i);
6240 6240 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6241 6241 MBX_SUCCESS) {
6242 6242 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6243 6243 "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
6244 6244 i, mb->mbxCommand, mb->mbxStatus);
6245 6245 return (EIO);
6246 6246 }
6247 6247 eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
6248 6248 hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
6249 6249 hba->sli.sli4.eq_map[eq->params.response.EQId] = i;
6250 6250 hba->sli.sli4.eq[i].lastwq = total_wq;
6251 6251
6252 6252 emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
6253 6253 num_wq = cfg[CFG_NUM_WQ].current;
6254 6254 num_cq = num_wq;
6255 6255 if (i == 0) {
6256 6256 /* One for RQ handling, one for mbox/event handling */
6257 6257 num_cq += EMLXS_CQ_OFFSET_WQ;
6258 6258 }
6259 6259
6260 6260 for (j = 0; j < num_cq; j++) {
6261 6261 /* Reuse mbq from previous mbox */
6262 6262 bzero(mbq, sizeof (MAILBOXQ));
6263 6263
6264 6264 hba->sli.sli4.cq[total_cq].eqid =
6265 6265 hba->sli.sli4.eq[i].qid;
6266 6266
6267 6267 emlxs_mb_cq_create(hba, mbq, total_cq);
6268 6268 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6269 6269 MBX_SUCCESS) {
6270 6270 EMLXS_MSGF(EMLXS_CONTEXT,
6271 6271 &emlxs_init_failed_msg, "Unable to Create "
6272 6272 "CQ %d: Mailbox cmd=%x status=%x ",
6273 6273 total_cq, mb->mbxCommand, mb->mbxStatus);
6274 6274 return (EIO);
6275 6275 }
6276 6276 cq = (IOCTL_COMMON_CQ_CREATE *)
6277 6277 &mb->un.varSLIConfig.payload;
6278 6278 hba->sli.sli4.cq[total_cq].qid =
6279 6279 cq->params.response.CQId;
6280 6280 hba->sli.sli4.cq_map[cq->params.response.CQId] =
6281 6281 total_cq;
6282 6282
6283 6283 switch (total_cq) {
6284 6284 case EMLXS_CQ_MBOX:
6285 6285 /* First CQ is for async event handling */
6286 6286 hba->sli.sli4.cq[total_cq].type =
6287 6287 EMLXS_CQ_TYPE_GROUP1;
6288 6288 break;
6289 6289
6290 6290 case EMLXS_CQ_RCV:
6291 6291 /* Second CQ is for unsol receive handling */
6292 6292 hba->sli.sli4.cq[total_cq].type =
6293 6293 EMLXS_CQ_TYPE_GROUP2;
6294 6294 break;
6295 6295
6296 6296 default:
6297 6297 /* Setup CQ to channel mapping */
6298 6298 hba->sli.sli4.cq[total_cq].type =
6299 6299 EMLXS_CQ_TYPE_GROUP2;
6300 6300 hba->sli.sli4.cq[total_cq].channelp =
6301 6301 &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
6302 6302 break;
6303 6303 }
6304 6304 emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb, 18, 0);
6305 6305 total_cq++;
6306 6306 }
6307 6307
6308 6308 for (j = 0; j < num_wq; j++) {
6309 6309 /* Reuse mbq from previous mbox */
6310 6310 bzero(mbq, sizeof (MAILBOXQ));
6311 6311
6312 6312 hba->sli.sli4.wq[total_wq].cqid =
6313 6313 hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
6314 6314
6315 6315 emlxs_mb_wq_create(hba, mbq, total_wq);
6316 6316 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6317 6317 MBX_SUCCESS) {
6318 6318 EMLXS_MSGF(EMLXS_CONTEXT,
6319 6319 &emlxs_init_failed_msg, "Unable to Create "
6320 6320 "WQ %d: Mailbox cmd=%x status=%x ",
6321 6321 total_wq, mb->mbxCommand, mb->mbxStatus);
6322 6322 return (EIO);
6323 6323 }
6324 6324 wq = (IOCTL_FCOE_WQ_CREATE *)
6325 6325 &mb->un.varSLIConfig.payload;
6326 6326 hba->sli.sli4.wq[total_wq].qid =
6327 6327 wq->params.response.WQId;
6328 6328 hba->sli.sli4.wq_map[wq->params.response.WQId] =
6329 6329 total_wq;
6330 6330
6331 6331 hba->sli.sli4.wq[total_wq].cqid =
6332 6332 hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
6333 6333 emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb, 18, 0);
6334 6334 total_wq++;
6335 6335 }
6336 6336 hba->last_msiid = i;
6337 6337 }
6338 6338
6339 6339 /* We assume 1 RQ pair will handle ALL incoming data */
6340 6340 /* Create RQs */
6341 6341 for (i = 0; i < EMLXS_MAX_RQS; i++) {
6342 6342 /* Personalize the RQ */
6343 6343 switch (i) {
6344 6344 case 0:
6345 6345 hba->sli.sli4.rq[i].cqid =
6346 6346 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6347 6347 break;
6348 6348 case 1:
6349 6349 hba->sli.sli4.rq[i].cqid =
6350 6350 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6351 6351 break;
6352 6352 default:
6353 6353 hba->sli.sli4.rq[i].cqid = 0xffff;
6354 6354 }
6355 6355
6356 6356 /* Reuse mbq from previous mbox */
6357 6357 bzero(mbq, sizeof (MAILBOXQ));
6358 6358
6359 6359 emlxs_mb_rq_create(hba, mbq, i);
6360 6360 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6361 6361 MBX_SUCCESS) {
6362 6362 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6363 6363 "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
6364 6364 i, mb->mbxCommand, mb->mbxStatus);
6365 6365 return (EIO);
6366 6366 }
6367 6367 rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
6368 6368 hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
6369 6369 hba->sli.sli4.rq_map[rq->params.response.RQId] = i;
6370 6370 emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
6371 6371
6372 6372 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6373 6373 "RQ CREATE: rq[%d].qid=%d cqid=%d",
6374 6374 i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
6375 6375
6376 6376 /* Initialize the host_index */
6377 6377 hba->sli.sli4.rq[i].host_index = 0;
6378 6378
6379 6379 /* If Data queue was just created, */
6380 6380 /* then post buffers using the header qid */
6381 6381 if ((i & 0x1)) {
6382 6382 /* Ring the RQ doorbell to post buffers */
6383 6383 rqdb.word = 0;
6384 6384 rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
6385 6385 rqdb.db.NumPosted = RQB_COUNT;
6386 6386
6387 6387 WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
6388 6388
6389 6389 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6390 6390 "RQ CREATE: Doorbell rang: qid=%d count=%d",
6391 6391 hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
6392 6392 }
6393 6393 }
6394 6394
6395 6395 /* Create MQ */
6396 6396
6397 6397 /* Personalize the MQ */
6398 6398 hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
6399 6399
6400 6400 /* Reuse mbq from previous mbox */
6401 6401 bzero(mbq, sizeof (MAILBOXQ));
6402 6402
6403 6403 emlxs_mb_mcc_create_ext(hba, mbq);
6404 6404 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6405 6405 MBX_SUCCESS) {
6406 6406 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6407 6407 "Unable to Create MCC_EXT %d: Mailbox cmd=%x status=%x ",
6408 6408 i, mb->mbxCommand, mb->mbxStatus);
6409 6409
6410 6410 /* Reuse mbq from previous mbox */
6411 6411 bzero(mbq, sizeof (MAILBOXQ));
6412 6412
6413 6413 emlxs_mb_mq_create(hba, mbq);
6414 6414 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6415 6415 MBX_SUCCESS) {
6416 6416 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6417 6417 "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
6418 6418 i, mb->mbxCommand, mb->mbxStatus);
6419 6419 return (EIO);
6420 6420 }
6421 6421
6422 6422 mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
6423 6423 hba->sli.sli4.mq.qid = mq->params.response.MQId;
6424 6424 return (0);
6425 6425 }
6426 6426
6427 6427 mcc_ext = (IOCTL_COMMON_MCC_CREATE_EXT *)&mb->un.varSLIConfig.payload;
6428 6428 hba->sli.sli4.mq.qid = mcc_ext->params.response.id;
6429 6429 return (0);
6430 6430
6431 6431 } /* emlxs_sli4_create_queues() */
6432 6432
6433 6433
6434 6434 /*ARGSUSED*/
6435 6435 extern int
6436 6436 emlxs_sli4_check_fcf_config(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6437 6437 {
6438 6438 int i;
6439 6439
6440 6440 if (!(hba->flag & FC_FIP_SUPPORTED)) {
6441 6441 if (!hba->sli.sli4.cfgFCOE.length) {
6442 6442 /* Nothing specified, so everything matches */
6443 6443 /* For nonFIP only use index 0 */
6444 6444 if (fcfrec->fcf_index == 0) {
6445 6445 return (1); /* success */
6446 6446 }
6447 6447 return (0);
6448 6448 }
6449 6449
6450 6450 /* Just check FCMap for now */
6451 6451 if (bcmp((char *)fcfrec->fc_map,
6452 6452 hba->sli.sli4.cfgFCOE.FCMap, 3) == 0) {
6453 6453 return (1); /* success */
6454 6454 }
6455 6455 return (0);
6456 6456 }
6457 6457
6458 6458 /* For FIP mode, the FCF record must match Config Region 23 */
6459 6459
6460 6460 if (!hba->sli.sli4.cfgFCF.length) {
6461 6461 /* Nothing specified, so everything matches */
6462 6462 return (1); /* success */
6463 6463 }
6464 6464
6465 6465 /* Just check FabricName for now */
6466 6466 for (i = 0; i < MAX_FCFCONNECTLIST_ENTRIES; i++) {
6467 6467 if ((hba->sli.sli4.cfgFCF.entry[i].FabricNameValid) &&
6468 6468 (bcmp((char *)fcfrec->fabric_name_identifier,
6469 6469 hba->sli.sli4.cfgFCF.entry[i].FabricName, 8) == 0)) {
6470 6470 return (1); /* success */
6471 6471 }
6472 6472 }
6473 6473 return (0);
6474 6474
6475 6475 } /* emlxs_sli4_check_fcf_config() */
6476 6476
6477 6477
6478 6478 extern void
6479 6479 emlxs_sli4_timer(emlxs_hba_t *hba)
6480 6480 {
6481 6481 /* Perform SLI4 level timer checks */
6482 6482
6483 6483 emlxs_fcf_timer_notify(hba);
6484 6484
6485 6485 emlxs_sli4_timer_check_mbox(hba);
6486 6486
6487 6487 return;
6488 6488
6489 6489 } /* emlxs_sli4_timer() */
6490 6490
6491 6491
6492 6492 static void
6493 6493 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
6494 6494 {
6495 6495 emlxs_port_t *port = &PPORT;
6496 6496 emlxs_config_t *cfg = &CFG;
6497 6497 MAILBOX *mb = NULL;
6498 6498
6499 6499 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6500 6500 return;
6501 6501 }
6502 6502
6503 6503 mutex_enter(&EMLXS_PORT_LOCK);
6504 6504
6505 6505 /* Return if timer hasn't expired */
6506 6506 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6507 6507 mutex_exit(&EMLXS_PORT_LOCK);
6508 6508 return;
6509 6509 }
6510 6510
6511 6511 /* The first to service the mbox queue will clear the timer */
6512 6512 hba->mbox_timer = 0;
6513 6513
6514 6514 if (hba->mbox_queue_flag) {
6515 6515 if (hba->mbox_mbq) {
6516 6516 mb = (MAILBOX *)hba->mbox_mbq;
6517 6517 }
6518 6518 }
6519 6519
6520 6520 if (mb) {
6521 6521 switch (hba->mbox_queue_flag) {
6522 6522 case MBX_NOWAIT:
6523 6523 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6524 6524 "%s: Nowait.",
6525 6525 emlxs_mb_cmd_xlate(mb->mbxCommand));
6526 6526 break;
6527 6527
6528 6528 case MBX_SLEEP:
6529 6529 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6530 6530 "%s: mb=%p Sleep.",
6531 6531 emlxs_mb_cmd_xlate(mb->mbxCommand),
6532 6532 mb);
6533 6533 break;
6534 6534
6535 6535 case MBX_POLL:
6536 6536 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6537 6537 "%s: mb=%p Polled.",
6538 6538 emlxs_mb_cmd_xlate(mb->mbxCommand),
6539 6539 mb);
6540 6540 break;
6541 6541
6542 6542 default:
6543 6543 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6544 6544 "%s: mb=%p (%d).",
6545 6545 emlxs_mb_cmd_xlate(mb->mbxCommand),
6546 6546 mb, hba->mbox_queue_flag);
6547 6547 break;
6548 6548 }
6549 6549 } else {
6550 6550 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6551 6551 }
6552 6552
6553 6553 hba->flag |= FC_MBOX_TIMEOUT;
6554 6554 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6555 6555
6556 6556 mutex_exit(&EMLXS_PORT_LOCK);
6557 6557
6558 6558 /* Perform mailbox cleanup */
6559 6559 /* This will wake any sleeping or polling threads */
6560 6560 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6561 6561
6562 6562 /* Trigger adapter shutdown */
6563 6563 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
6564 6564
6565 6565 return;
6566 6566
6567 6567 } /* emlxs_sli4_timer_check_mbox() */
6568 6568
6569 6569
6570 6570 extern void
6571 6571 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
6572 6572 {
6573 6573 void *msg;
6574 6574
6575 6575 if (err) {
6576 6576 msg = &emlxs_sli_err_msg;
6577 6577 } else {
6578 6578 msg = &emlxs_sli_detail_msg;
6579 6579 }
6580 6580
6581 6581 if (cnt) {
6582 6582 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6583 6583 "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
6584 6584 *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
6585 6585 }
6586 6586 if (cnt > 6) {
6587 6587 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6588 6588 "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
6589 6589 *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
6590 6590 }
6591 6591 if (cnt > 12) {
6592 6592 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6593 6593 "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
6594 6594 *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
6595 6595 }
6596 6596 if (cnt > 18) {
6597 6597 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6598 6598 "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
6599 6599 *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
6600 6600 }
6601 6601 if (cnt > 24) {
6602 6602 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6603 6603 "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
6604 6604 *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
6605 6605 }
6606 6606 if (cnt > 30) {
6607 6607 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6608 6608 "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
6609 6609 *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
6610 6610 }
6611 6611 if (cnt > 36) {
6612 6612 EMLXS_MSGF(EMLXS_CONTEXT, msg,
6613 6613 "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
6614 6614 *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
6615 6615 }
6616 6616
6617 6617 } /* emlxs_data_dump() */
6618 6618
6619 6619
6620 6620 extern void
6621 6621 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
6622 6622 {
6623 6623 emlxs_port_t *port = &PPORT;
6624 6624 uint32_t ue_h;
6625 6625 uint32_t ue_l;
6626 6626 uint32_t on1;
6627 6627 uint32_t on2;
6628 6628
6629 6629 ue_l = ddi_get32(hba->pci_acc_handle,
6630 6630 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6631 6631 ue_h = ddi_get32(hba->pci_acc_handle,
6632 6632 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6633 6633 on1 = ddi_get32(hba->pci_acc_handle,
6634 6634 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
6635 6635 on2 = ddi_get32(hba->pci_acc_handle,
6636 6636 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
6637 6637
6638 6638 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6639 6639 "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
6640 6640 ue_l, ue_h, on1, on2);
6641 6641
6642 6642 #ifdef FMA_SUPPORT
6643 6643 /* Access handle validation */
6644 6644 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6645 6645 #endif /* FMA_SUPPORT */
6646 6646
6647 6647 } /* emlxs_ue_dump() */
6648 6648
6649 6649
6650 6650 static void
6651 6651 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
6652 6652 {
6653 6653 emlxs_port_t *port = &PPORT;
6654 6654 uint32_t ue_h;
6655 6655 uint32_t ue_l;
6656 6656
6657 6657 if (hba->flag & FC_HARDWARE_ERROR) {
6658 6658 return;
6659 6659 }
6660 6660
6661 6661 ue_l = ddi_get32(hba->pci_acc_handle,
6662 6662 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6663 6663 ue_h = ddi_get32(hba->pci_acc_handle,
6664 6664 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6665 6665
6666 6666 if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
6667 6667 (~hba->sli.sli4.ue_mask_hi & ue_h) ||
6668 6668 (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
6669 6669 /* Unrecoverable error detected */
6670 6670 /* Shut the HBA down */
6671 6671 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
6672 6672 "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
6673 6673 "maskHigh:%08x",
6674 6674 ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
6675 6675 hba->sli.sli4.ue_mask_hi);
6676 6676
6677 6677 EMLXS_STATE_CHANGE(hba, FC_ERROR);
6678 6678
6679 6679 emlxs_sli4_hba_flush_chipq(hba);
6680 6680
6681 6681 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
6682 6682 }
6683 6683
6684 6684 } /* emlxs_sli4_poll_erratt() */
6685 6685
6686 6686
6687 6687 extern uint32_t
6688 6688 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
6689 6689 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6690 6690 {
6691 6691 emlxs_hba_t *hba = HBA;
6692 6692 NODELIST *node;
6693 6693 RPIobj_t *rpip;
6694 6694 uint32_t rval;
6695 6695
6696 6696 /* Check for invalid node ids to register */
6697 6697 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
6698 6698 return (1);
6699 6699 }
6700 6700
6701 6701 if (did & 0xff000000) {
6702 6702 return (1);
6703 6703 }
6704 6704
6705 6705 if ((rval = emlxs_mb_check_sparm(hba, param))) {
6706 6706 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6707 6707 "Invalid service parameters. did=%06x rval=%d", did,
6708 6708 rval);
6709 6709
6710 6710 return (1);
6711 6711 }
6712 6712
6713 6713 /* Check if the node limit has been reached */
6714 6714 if (port->node_count >= hba->max_nodes) {
6715 6715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6716 6716 "Limit reached. did=%06x count=%d", did,
6717 6717 port->node_count);
6718 6718
6719 6719 return (1);
6720 6720 }
6721 6721
6722 6722 node = emlxs_node_find_did(port, did);
6723 6723 rpip = EMLXS_NODE_TO_RPI(port, node);
6724 6724
6725 6725 rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
6726 6726 (void *)ubp, (void *)iocbq);
6727 6727
6728 6728 return (rval);
6729 6729
6730 6730 } /* emlxs_sli4_reg_did() */
6731 6731
6732 6732
6733 6733 extern uint32_t
6734 6734 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
6735 6735 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6736 6736 {
6737 6737 RPIobj_t *rpip;
6738 6738 uint32_t rval;
6739 6739
6740 6740 if (!node) {
6741 6741 /* Unreg all nodes */
6742 6742 (void) emlxs_sli4_unreg_all_nodes(port);
6743 6743 return (1);
6744 6744 }
6745 6745
6746 6746 /* Check for base node */
6747 6747 if (node == &port->node_base) {
6748 6748 /* Just flush base node */
6749 6749 (void) emlxs_tx_node_flush(port, &port->node_base,
6750 6750 0, 0, 0);
6751 6751
6752 6752 (void) emlxs_chipq_node_flush(port, 0,
6753 6753 &port->node_base, 0);
6754 6754
6755 6755 port->did = 0;
6756 6756
6757 6757 /* Return now */
6758 6758 return (1);
6759 6759 }
6760 6760
6761 6761 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6762 6762 "unreg_node:%p did=%x rpi=%d",
6763 6763 node, node->nlp_DID, node->nlp_Rpi);
6764 6764
6765 6765 rpip = EMLXS_NODE_TO_RPI(port, node);
6766 6766
6767 6767 if (!rpip) {
6768 6768 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6769 6769 "unreg_node:%p did=%x rpi=%d. RPI not found.",
6770 6770 node, node->nlp_DID, node->nlp_Rpi);
6771 6771
6772 6772 emlxs_node_rm(port, node);
6773 6773 return (1);
6774 6774 }
6775 6775
6776 6776 rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
6777 6777 (void *)iocbq);
6778 6778
6779 6779 return (rval);
6780 6780
6781 6781 } /* emlxs_sli4_unreg_node() */
6782 6782
6783 6783
6784 6784 extern uint32_t
6785 6785 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
6786 6786 {
6787 6787 NODELIST *nlp;
6788 6788 int i;
6789 6789 uint32_t found;
6790 6790
6791 6791 /* Set the node tags */
6792 6792 /* We will process all nodes with this tag */
6793 6793 rw_enter(&port->node_rwlock, RW_READER);
6794 6794 found = 0;
6795 6795 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
6796 6796 nlp = port->node_table[i];
6797 6797 while (nlp != NULL) {
6798 6798 found = 1;
6799 6799 nlp->nlp_tag = 1;
6800 6800 nlp = nlp->nlp_list_next;
6801 6801 }
6802 6802 }
6803 6803 rw_exit(&port->node_rwlock);
6804 6804
6805 6805 if (!found) {
6806 6806 return (0);
6807 6807 }
6808 6808
6809 6809 for (;;) {
6810 6810 rw_enter(&port->node_rwlock, RW_READER);
6811 6811 found = 0;
6812 6812 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
6813 6813 nlp = port->node_table[i];
6814 6814 while (nlp != NULL) {
6815 6815 if (!nlp->nlp_tag) {
6816 6816 nlp = nlp->nlp_list_next;
6817 6817 continue;
6818 6818 }
6819 6819 nlp->nlp_tag = 0;
6820 6820 found = 1;
6821 6821 break;
6822 6822 }
6823 6823
6824 6824 if (found) {
6825 6825 break;
6826 6826 }
6827 6827 }
6828 6828 rw_exit(&port->node_rwlock);
6829 6829
6830 6830 if (!found) {
6831 6831 break;
6832 6832 }
6833 6833
6834 6834 (void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
6835 6835 }
6836 6836
6837 6837 return (0);
6838 6838
6839 6839 } /* emlxs_sli4_unreg_all_nodes() */
↓ open down ↓ |
2863 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX