Print this page
5255 uts shouldn't open-code ISP2
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon_rsrc.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon_rsrc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
25 25
26 26 /*
27 27 * hermon_rsrc.c
28 28 * Hermon Resource Management Routines
29 29 *
30 30 * Implements all the routines necessary for setup, teardown, and
31 31 * alloc/free of all Hermon resources, including those that are managed
32 32 * by Hermon hardware or which live in Hermon's direct attached DDR memory.
33 33 */
34 34
35 +#include <sys/sysmacros.h>
35 36 #include <sys/types.h>
36 37 #include <sys/conf.h>
37 38 #include <sys/ddi.h>
38 39 #include <sys/sunddi.h>
39 40 #include <sys/modctl.h>
40 41 #include <sys/vmem.h>
41 42 #include <sys/bitmap.h>
42 43
43 44 #include <sys/ib/adapters/hermon/hermon.h>
44 45
45 46 int hermon_rsrc_verbose = 0;
46 47
47 48 /*
48 49 * The following routines are used for initializing and destroying
49 50 * the resource pools used by the Hermon resource allocation routines.
50 51 * They consist of four classes of object:
51 52 *
52 53 * Mailboxes: The "In" and "Out" mailbox types are used by the Hermon
53 54 * command interface routines. Mailboxes are used to pass information
54 55 * back and forth to the Hermon firmware. Either type of mailbox may
55 56 * be allocated from Hermon's direct attached DDR memory or from system
56 57 * memory (although currently all "In" mailboxes are in DDR and all "out"
57 58 * mailboxes come from system memory.
58 59 *
59 60 * HW entry objects: These objects represent resources required by the Hermon
60 61 * hardware. These objects include things like Queue Pair contexts (QPC),
61 62 * Completion Queue contexts (CQC), Event Queue contexts (EQC), RDB (for
62 63 * supporting RDMA Read/Atomic), Multicast Group entries (MCG), Memory
63 64 * Protection Table entries (MPT), Memory Translation Table entries (MTT).
64 65 *
65 66 * What these objects all have in common is that they are each required
66 67 * to come from ICM memory, they are always allocated from tables, and
67 68 * they are not to be directly accessed (read or written) by driver
68 69 * software (Mellanox FMR access to MPT is an exception).
69 70 * The other notable exceptions are the UAR pages (UAR_PG) which are
70 71 * allocated from the UAR address space rather than DDR, and the UD
71 72 * address vectors (UDAV) which are similar to the common object types
72 73 * with the major difference being that UDAVs _are_ directly read and
73 74 * written by driver software.
74 75 *
75 76 * SW handle objects: These objects represent resources required by Hermon
76 77 * driver software. They are primarily software tracking structures,
77 78 * which are allocated from system memory (using kmem_cache). Several of
78 79 * the objects have both a "constructor" and "destructor" method
79 80 * associated with them (see below).
80 81 *
81 82 * Protection Domain (PD) handle objects: These objects are very much like
82 83 * a SW handle object with the notable difference that all PD handle
83 84 * objects have an actual Protection Domain number (PD) associated with
84 85 * them (and the PD number is allocated/managed through a separate
85 86 * vmem_arena specifically set aside for this purpose.
86 87 */
87 88
88 89 static int hermon_rsrc_mbox_init(hermon_state_t *state,
89 90 hermon_rsrc_mbox_info_t *info);
90 91 static void hermon_rsrc_mbox_fini(hermon_state_t *state,
91 92 hermon_rsrc_mbox_info_t *info);
92 93
93 94 static int hermon_rsrc_sw_handles_init(hermon_state_t *state,
94 95 hermon_rsrc_sw_hdl_info_t *info);
95 96 static void hermon_rsrc_sw_handles_fini(hermon_state_t *state,
96 97 hermon_rsrc_sw_hdl_info_t *info);
97 98
98 99 static int hermon_rsrc_pd_handles_init(hermon_state_t *state,
99 100 hermon_rsrc_sw_hdl_info_t *info);
100 101 static void hermon_rsrc_pd_handles_fini(hermon_state_t *state,
101 102 hermon_rsrc_sw_hdl_info_t *info);
102 103
103 104 /*
104 105 * The following routines are used for allocating and freeing the specific
105 106 * types of objects described above from their associated resource pools.
106 107 */
107 108 static int hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info,
108 109 uint_t num, hermon_rsrc_t *hdl);
109 110 static void hermon_rsrc_mbox_free(hermon_rsrc_t *hdl);
110 111
111 112 static int hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info,
112 113 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
113 114 static void hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
114 115 hermon_rsrc_t *hdl);
115 116 static int hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info,
116 117 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
117 118
118 119 static int hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info,
119 120 uint_t num, hermon_rsrc_t *hdl, int num_to_hdl);
120 121 static int hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
121 122 hermon_rsrc_t *hdl, int num_to_hdl);
122 123
123 124 static int hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
124 125 uint_t sleepflag, hermon_rsrc_t *hdl);
125 126 static void hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info,
126 127 hermon_rsrc_t *hdl);
127 128
128 129 static int hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
129 130 uint_t sleepflag, hermon_rsrc_t *hdl);
130 131 static void hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info,
131 132 hermon_rsrc_t *hdl);
132 133
133 134 static int hermon_rsrc_fexch_alloc(hermon_state_t *state,
134 135 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
135 136 static void hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl);
136 137 static int hermon_rsrc_rfci_alloc(hermon_state_t *state,
137 138 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
138 139 static void hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl);
139 140
140 141 /*
141 142 * The following routines are the constructors and destructors for several
142 143 * of the SW handle type objects. For certain types of SW handles objects
143 144 * (all of which are implemented using kmem_cache), we need to do some
144 145 * special field initialization (specifically, mutex_init/destroy). These
145 146 * routines enable that init and teardown.
146 147 */
147 148 static int hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags);
148 149 static void hermon_rsrc_pdhdl_destructor(void *pd, void *state);
149 150 static int hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags);
150 151 static void hermon_rsrc_cqhdl_destructor(void *cq, void *state);
151 152 static int hermon_rsrc_qphdl_constructor(void *cq, void *priv, int flags);
152 153 static void hermon_rsrc_qphdl_destructor(void *cq, void *state);
153 154 static int hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags);
154 155 static void hermon_rsrc_srqhdl_destructor(void *srq, void *state);
155 156 static int hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags);
156 157 static void hermon_rsrc_refcnt_destructor(void *rc, void *state);
157 158 static int hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags);
158 159 static void hermon_rsrc_ahhdl_destructor(void *ah, void *state);
159 160 static int hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags);
160 161 static void hermon_rsrc_mrhdl_destructor(void *mr, void *state);
161 162
162 163 /*
163 164 * Special routine to calculate and return the size of a MCG object based
164 165 * on current driver configuration (specifically, the number of QP per MCG
165 166 * that has been configured.
166 167 */
167 168 static int hermon_rsrc_mcg_entry_get_size(hermon_state_t *state,
168 169 uint_t *mcg_size_shift);
169 170
170 171
171 172 /*
172 173 * hermon_rsrc_alloc()
173 174 *
174 175 * Context: Can be called from interrupt or base context.
175 176 * The "sleepflag" parameter is used by all object allocators to
176 177 * determine whether to SLEEP for resources or not.
177 178 */
178 179 int
179 180 hermon_rsrc_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
180 181 uint_t sleepflag, hermon_rsrc_t **hdl)
181 182 {
182 183 hermon_rsrc_pool_info_t *rsrc_pool;
183 184 hermon_rsrc_t *tmp_rsrc_hdl;
184 185 int flag, status = DDI_FAILURE;
185 186
186 187 ASSERT(state != NULL);
187 188 ASSERT(hdl != NULL);
188 189
189 190 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
190 191 ASSERT(rsrc_pool != NULL);
191 192
192 193 /*
193 194 * Allocate space for the object used to track the resource handle
194 195 */
195 196 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
196 197 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
197 198 if (tmp_rsrc_hdl == NULL) {
198 199 return (DDI_FAILURE);
199 200 }
200 201 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
201 202
202 203 /*
203 204 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
204 205 * to know what type of resource is being freed.
205 206 */
206 207 tmp_rsrc_hdl->rsrc_type = rsrc;
207 208
208 209 /*
209 210 * Depending on resource type, call the appropriate alloc routine
210 211 */
211 212 switch (rsrc) {
212 213 case HERMON_IN_MBOX:
213 214 case HERMON_OUT_MBOX:
214 215 case HERMON_INTR_IN_MBOX:
215 216 case HERMON_INTR_OUT_MBOX:
216 217 status = hermon_rsrc_mbox_alloc(rsrc_pool, num, tmp_rsrc_hdl);
217 218 break;
218 219
219 220 case HERMON_DMPT:
220 221 /* Allocate "num" (contiguous/aligned for FEXCH) DMPTs */
221 222 case HERMON_QPC:
222 223 /* Allocate "num" (contiguous/aligned for RSS) QPCs */
223 224 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, num,
224 225 sleepflag, tmp_rsrc_hdl);
225 226 break;
226 227
227 228 case HERMON_QPC_FEXCH_PORT1:
228 229 case HERMON_QPC_FEXCH_PORT2:
229 230 /* Allocate "num" contiguous/aligned QPCs for FEXCH */
230 231 status = hermon_rsrc_fexch_alloc(state, rsrc, num,
231 232 sleepflag, tmp_rsrc_hdl);
232 233 break;
233 234
234 235 case HERMON_QPC_RFCI_PORT1:
235 236 case HERMON_QPC_RFCI_PORT2:
236 237 /* Allocate "num" contiguous/aligned QPCs for RFCI */
237 238 status = hermon_rsrc_rfci_alloc(state, rsrc, num,
238 239 sleepflag, tmp_rsrc_hdl);
239 240 break;
240 241
241 242 case HERMON_MTT:
242 243 case HERMON_CQC:
243 244 case HERMON_SRQC:
244 245 case HERMON_EQC:
245 246 case HERMON_MCG:
246 247 case HERMON_UARPG:
247 248 /* Allocate "num" unaligned resources */
248 249 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1,
249 250 sleepflag, tmp_rsrc_hdl);
250 251 break;
251 252
252 253 case HERMON_MRHDL:
253 254 case HERMON_EQHDL:
254 255 case HERMON_CQHDL:
255 256 case HERMON_SRQHDL:
256 257 case HERMON_AHHDL:
257 258 case HERMON_QPHDL:
258 259 case HERMON_REFCNT:
259 260 status = hermon_rsrc_swhdl_alloc(rsrc_pool, sleepflag,
260 261 tmp_rsrc_hdl);
261 262 break;
262 263
263 264 case HERMON_PDHDL:
264 265 status = hermon_rsrc_pdhdl_alloc(rsrc_pool, sleepflag,
265 266 tmp_rsrc_hdl);
266 267 break;
267 268
268 269 case HERMON_RDB: /* handled during HERMON_QPC */
269 270 case HERMON_ALTC: /* handled during HERMON_QPC */
270 271 case HERMON_AUXC: /* handled during HERMON_QPC */
271 272 case HERMON_CMPT_QPC: /* handled during HERMON_QPC */
272 273 case HERMON_CMPT_SRQC: /* handled during HERMON_SRQC */
273 274 case HERMON_CMPT_CQC: /* handled during HERMON_CPC */
274 275 case HERMON_CMPT_EQC: /* handled during HERMON_EPC */
275 276 default:
276 277 HERMON_WARNING(state, "unexpected resource type in alloc ");
277 278 cmn_err(CE_WARN, "Resource type %x \n", rsrc_pool->rsrc_type);
278 279 break;
279 280 }
280 281
281 282 /*
282 283 * If the resource allocation failed, then free the special resource
283 284 * tracking structure and return failure. Otherwise return the
284 285 * handle for the resource tracking structure.
285 286 */
286 287 if (status != DDI_SUCCESS) {
287 288 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
288 289 return (DDI_FAILURE);
289 290 } else {
290 291 *hdl = tmp_rsrc_hdl;
291 292 return (DDI_SUCCESS);
292 293 }
293 294 }
294 295
295 296
296 297 /*
297 298 * hermon_rsrc_reserve()
298 299 *
299 300 * Context: Can only be called from attach.
300 301 * The "sleepflag" parameter is used by all object allocators to
301 302 * determine whether to SLEEP for resources or not.
302 303 */
303 304 int
304 305 hermon_rsrc_reserve(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
305 306 uint_t sleepflag, hermon_rsrc_t **hdl)
306 307 {
307 308 hermon_rsrc_pool_info_t *rsrc_pool;
308 309 hermon_rsrc_t *tmp_rsrc_hdl;
309 310 int flag, status = DDI_FAILURE;
310 311
311 312 ASSERT(state != NULL);
312 313 ASSERT(hdl != NULL);
313 314
314 315 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
315 316 ASSERT(rsrc_pool != NULL);
316 317
317 318 /*
318 319 * Allocate space for the object used to track the resource handle
319 320 */
320 321 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
321 322 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
322 323 if (tmp_rsrc_hdl == NULL) {
323 324 return (DDI_FAILURE);
324 325 }
325 326 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
326 327
327 328 /*
328 329 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
329 330 * to know what type of resource is being freed.
330 331 */
331 332 tmp_rsrc_hdl->rsrc_type = rsrc;
332 333
333 334 switch (rsrc) {
334 335 case HERMON_QPC:
335 336 case HERMON_DMPT:
336 337 case HERMON_MTT:
337 338 /*
338 339 * Reserve num resources, naturally aligned (N * num).
339 340 */
340 341 status = hermon_rsrc_hw_entry_reserve(rsrc_pool, num, num,
341 342 sleepflag, tmp_rsrc_hdl);
342 343 break;
343 344
344 345 default:
345 346 HERMON_WARNING(state, "unexpected resource type in reserve ");
346 347 cmn_err(CE_WARN, "Resource type %x \n", rsrc);
347 348 break;
348 349 }
349 350
350 351 /*
351 352 * If the resource allocation failed, then free the special resource
352 353 * tracking structure and return failure. Otherwise return the
353 354 * handle for the resource tracking structure.
354 355 */
355 356 if (status != DDI_SUCCESS) {
356 357 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
357 358 return (DDI_FAILURE);
358 359 } else {
359 360 *hdl = tmp_rsrc_hdl;
360 361 return (DDI_SUCCESS);
361 362 }
362 363 }
363 364
364 365
365 366 /*
366 367 * hermon_rsrc_fexch_alloc()
367 368 *
368 369 * Context: Can only be called from base context.
369 370 * The "sleepflag" parameter is used by all object allocators to
370 371 * determine whether to SLEEP for resources or not.
371 372 */
372 373 static int
373 374 hermon_rsrc_fexch_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
374 375 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
375 376 {
376 377 hermon_fcoib_t *fcoib;
377 378 void *addr;
378 379 uint32_t fexch_qpn_base;
379 380 hermon_rsrc_pool_info_t *qpc_pool, *mpt_pool, *mtt_pool;
380 381 int flag, status;
381 382 hermon_rsrc_t mpt_hdl; /* temporary, just for icm_confirm */
382 383 hermon_rsrc_t mtt_hdl; /* temporary, just for icm_confirm */
383 384 uint_t portm1; /* hca_port_number - 1 */
384 385 uint_t nummtt;
385 386 vmem_t *vmp;
386 387
387 388 ASSERT(state != NULL);
388 389 ASSERT(hdl != NULL);
389 390
390 391 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
391 392 return (DDI_FAILURE);
392 393
393 394 portm1 = rsrc - HERMON_QPC_FEXCH_PORT1;
394 395 fcoib = &state->hs_fcoib;
395 396 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
396 397
397 398 /* Allocate from the FEXCH QP range */
398 399 vmp = fcoib->hfc_fexch_vmemp[portm1];
399 400 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
400 401 if (addr == NULL) {
401 402 return (DDI_FAILURE);
402 403 }
403 404 fexch_qpn_base = (uint32_t)((uintptr_t)addr -
404 405 fcoib->hfc_vmemstart + fcoib->hfc_fexch_base[portm1]);
405 406
406 407 /* ICM confirm for the FEXCH QP range */
407 408 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
408 409 hdl->hr_len = num << qpc_pool->rsrc_shift;
409 410 hdl->hr_addr = addr; /* used only for vmem_xfree */
410 411 hdl->hr_indx = fexch_qpn_base;
411 412
412 413 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
413 414 if (status != DDI_SUCCESS) {
414 415 vmem_xfree(vmp, addr, num);
415 416 return (DDI_FAILURE);
416 417 }
417 418
418 419 /* ICM confirm for the Primary MKEYs (client side only) */
419 420 mpt_pool = &state->hs_rsrc_hdl[HERMON_DMPT];
420 421 mpt_hdl.hr_len = num << mpt_pool->rsrc_shift;
421 422 mpt_hdl.hr_addr = NULL;
422 423 mpt_hdl.hr_indx = fcoib->hfc_mpt_base[portm1] +
423 424 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]);
424 425
425 426 status = hermon_rsrc_hw_entry_icm_confirm(mpt_pool, num, &mpt_hdl, 0);
426 427 if (status != DDI_SUCCESS) {
427 428 status = hermon_rsrc_hw_entry_icm_free(qpc_pool, hdl, 1);
428 429 vmem_xfree(vmp, addr, num);
429 430 return (DDI_FAILURE);
430 431 }
431 432
432 433 /* ICM confirm for the MTTs of the Primary MKEYs (client side only) */
433 434 nummtt = fcoib->hfc_mtts_per_mpt;
434 435 num *= nummtt;
435 436 mtt_pool = &state->hs_rsrc_hdl[HERMON_MTT];
436 437 mtt_hdl.hr_len = num << mtt_pool->rsrc_shift;
437 438 mtt_hdl.hr_addr = NULL;
438 439 mtt_hdl.hr_indx = fcoib->hfc_mtt_base[portm1] +
439 440 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]) *
440 441 nummtt;
441 442
442 443 status = hermon_rsrc_hw_entry_icm_confirm(mtt_pool, num, &mtt_hdl, 0);
443 444 if (status != DDI_SUCCESS) {
444 445 vmem_xfree(vmp, addr, num);
445 446 return (DDI_FAILURE);
446 447 }
447 448 return (DDI_SUCCESS);
448 449 }
449 450
450 451 static void
451 452 hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl)
452 453 {
453 454 hermon_fcoib_t *fcoib;
454 455 uint_t portm1; /* hca_port_number - 1 */
455 456
456 457 ASSERT(state != NULL);
457 458 ASSERT(hdl != NULL);
458 459
459 460 portm1 = hdl->rsrc_type - HERMON_QPC_FEXCH_PORT1;
460 461 fcoib = &state->hs_fcoib;
461 462 vmem_xfree(fcoib->hfc_fexch_vmemp[portm1], hdl->hr_addr,
462 463 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
463 464 }
464 465
465 466 /*
466 467 * hermon_rsrc_rfci_alloc()
467 468 *
468 469 * Context: Can only be called from base context.
469 470 * The "sleepflag" parameter is used by all object allocators to
470 471 * determine whether to SLEEP for resources or not.
471 472 */
472 473 static int
473 474 hermon_rsrc_rfci_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
474 475 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
475 476 {
476 477 hermon_fcoib_t *fcoib;
477 478 void *addr;
478 479 uint32_t rfci_qpn_base;
479 480 hermon_rsrc_pool_info_t *qpc_pool;
480 481 int flag, status;
481 482 uint_t portm1; /* hca_port_number - 1 */
482 483 vmem_t *vmp;
483 484
484 485 ASSERT(state != NULL);
485 486 ASSERT(hdl != NULL);
486 487
487 488 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
488 489 return (DDI_FAILURE);
489 490
490 491 portm1 = rsrc - HERMON_QPC_RFCI_PORT1;
491 492 fcoib = &state->hs_fcoib;
492 493 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
493 494
494 495 /* Allocate from the RFCI QP range */
495 496 vmp = fcoib->hfc_rfci_vmemp[portm1];
496 497 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
497 498 if (addr == NULL) {
498 499 return (DDI_FAILURE);
499 500 }
500 501 rfci_qpn_base = (uint32_t)((uintptr_t)addr -
501 502 fcoib->hfc_vmemstart + fcoib->hfc_rfci_base[portm1]);
502 503
503 504 /* ICM confirm for the RFCI QP */
504 505 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
505 506 hdl->hr_len = num << qpc_pool->rsrc_shift;
506 507 hdl->hr_addr = addr; /* used only for vmem_xfree */
507 508 hdl->hr_indx = rfci_qpn_base;
508 509
509 510 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
510 511 if (status != DDI_SUCCESS) {
511 512 vmem_xfree(vmp, addr, num);
512 513 return (DDI_FAILURE);
513 514 }
514 515 return (DDI_SUCCESS);
515 516 }
516 517
517 518 static void
518 519 hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl)
519 520 {
520 521 hermon_fcoib_t *fcoib;
521 522 uint_t portm1; /* hca_port_number - 1 */
522 523
523 524 ASSERT(state != NULL);
524 525 ASSERT(hdl != NULL);
525 526
526 527 portm1 = hdl->rsrc_type - HERMON_QPC_RFCI_PORT1;
527 528 fcoib = &state->hs_fcoib;
528 529 vmem_xfree(fcoib->hfc_rfci_vmemp[portm1], hdl->hr_addr,
529 530 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
530 531 }
531 532
532 533
533 534 /*
534 535 * hermon_rsrc_free()
535 536 * Context: Can be called from interrupt or base context.
536 537 */
537 538 void
538 539 hermon_rsrc_free(hermon_state_t *state, hermon_rsrc_t **hdl)
539 540 {
540 541 hermon_rsrc_pool_info_t *rsrc_pool;
541 542
542 543 ASSERT(state != NULL);
543 544 ASSERT(hdl != NULL);
544 545
545 546 rsrc_pool = &state->hs_rsrc_hdl[(*hdl)->rsrc_type];
546 547 ASSERT(rsrc_pool != NULL);
547 548
548 549 /*
549 550 * Depending on resource type, call the appropriate free routine
550 551 */
551 552 switch (rsrc_pool->rsrc_type) {
552 553 case HERMON_IN_MBOX:
553 554 case HERMON_OUT_MBOX:
554 555 case HERMON_INTR_IN_MBOX:
555 556 case HERMON_INTR_OUT_MBOX:
556 557 hermon_rsrc_mbox_free(*hdl);
557 558 break;
558 559
559 560 case HERMON_QPC_FEXCH_PORT1:
560 561 case HERMON_QPC_FEXCH_PORT2:
561 562 hermon_rsrc_fexch_free(state, *hdl);
562 563 break;
563 564
564 565 case HERMON_QPC_RFCI_PORT1:
565 566 case HERMON_QPC_RFCI_PORT2:
566 567 hermon_rsrc_rfci_free(state, *hdl);
567 568 break;
568 569
569 570 case HERMON_QPC:
570 571 case HERMON_CQC:
571 572 case HERMON_SRQC:
572 573 case HERMON_EQC:
573 574 case HERMON_DMPT:
574 575 case HERMON_MCG:
575 576 case HERMON_MTT:
576 577 case HERMON_UARPG:
577 578 hermon_rsrc_hw_entry_free(rsrc_pool, *hdl);
578 579 break;
579 580
580 581 case HERMON_MRHDL:
581 582 case HERMON_EQHDL:
582 583 case HERMON_CQHDL:
583 584 case HERMON_SRQHDL:
584 585 case HERMON_AHHDL:
585 586 case HERMON_QPHDL:
586 587 case HERMON_REFCNT:
587 588 hermon_rsrc_swhdl_free(rsrc_pool, *hdl);
588 589 break;
589 590
590 591 case HERMON_PDHDL:
591 592 hermon_rsrc_pdhdl_free(rsrc_pool, *hdl);
592 593 break;
593 594
594 595 case HERMON_RDB:
595 596 case HERMON_ALTC:
596 597 case HERMON_AUXC:
597 598 case HERMON_CMPT_QPC:
598 599 case HERMON_CMPT_SRQC:
599 600 case HERMON_CMPT_CQC:
600 601 case HERMON_CMPT_EQC:
601 602 default:
602 603 cmn_err(CE_CONT, "!rsrc_type = 0x%x\n", rsrc_pool->rsrc_type);
603 604 break;
604 605 }
605 606
606 607 /*
607 608 * Free the special resource tracking structure, set the handle to
608 609 * NULL, and return.
609 610 */
610 611 kmem_cache_free(state->hs_rsrc_cache, *hdl);
611 612 *hdl = NULL;
612 613 }
613 614
614 615
615 616 /*
616 617 * hermon_rsrc_init_phase1()
617 618 *
618 619 * Completes the first phase of Hermon resource/configuration init.
619 620 * This involves creating the kmem_cache for the "hermon_rsrc_t"
620 621 * structs, allocating the space for the resource pool handles,
621 622 * and setting up the "Out" mailboxes.
622 623 *
623 624 * When this function completes, the Hermon driver is ready to
624 625 * post the following commands which return information only in the
625 626 * "Out" mailbox: QUERY_DDR, QUERY_FW, QUERY_DEV_LIM, and QUERY_ADAPTER
626 627 * If any of these commands are to be posted at this time, they must be
627 628 * done so only when "spinning" (as the outstanding command list and
628 629 * EQ setup code has not yet run)
629 630 *
630 631 * Context: Only called from attach() path context
631 632 */
632 633 int
633 634 hermon_rsrc_init_phase1(hermon_state_t *state)
634 635 {
635 636 hermon_rsrc_pool_info_t *rsrc_pool;
636 637 hermon_rsrc_mbox_info_t mbox_info;
637 638 hermon_rsrc_cleanup_level_t cleanup;
638 639 hermon_cfg_profile_t *cfgprof;
639 640 uint64_t num, size;
640 641 int status;
641 642 char *rsrc_name;
642 643
643 644 ASSERT(state != NULL);
644 645
645 646 /* This is where Phase 1 of resource initialization begins */
646 647 cleanup = HERMON_RSRC_CLEANUP_LEVEL0;
647 648
648 649 /* Build kmem cache name from Hermon instance */
649 650 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
650 651 HERMON_RSRC_NAME(rsrc_name, HERMON_RSRC_CACHE);
651 652
652 653 /*
653 654 * Create the kmem_cache for "hermon_rsrc_t" structures
654 655 * (kmem_cache_create will SLEEP until successful)
655 656 */
656 657 state->hs_rsrc_cache = kmem_cache_create(rsrc_name,
657 658 sizeof (hermon_rsrc_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
658 659
659 660 /*
660 661 * Allocate an array of hermon_rsrc_pool_info_t's (used in all
661 662 * subsequent resource allocations)
662 663 */
663 664 state->hs_rsrc_hdl = kmem_zalloc(HERMON_NUM_RESOURCES *
664 665 sizeof (hermon_rsrc_pool_info_t), KM_SLEEP);
665 666
666 667 /* Pull in the configuration profile */
667 668 cfgprof = state->hs_cfg_profile;
668 669
669 670 /* Initialize the resource pool for "out" mailboxes */
670 671 num = ((uint64_t)1 << cfgprof->cp_log_num_outmbox);
671 672 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
672 673 rsrc_pool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
673 674 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
674 675 rsrc_pool->rsrc_pool_size = (size * num);
675 676 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
676 677 rsrc_pool->rsrc_quantum = (uint_t)size;
677 678 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
678 679 rsrc_pool->rsrc_state = state;
679 680 mbox_info.mbi_num = num;
680 681 mbox_info.mbi_size = size;
681 682 mbox_info.mbi_rsrcpool = rsrc_pool;
682 683 status = hermon_rsrc_mbox_init(state, &mbox_info);
683 684 if (status != DDI_SUCCESS) {
684 685 hermon_rsrc_fini(state, cleanup);
685 686 status = DDI_FAILURE;
686 687 goto rsrcinitp1_fail;
687 688 }
688 689 cleanup = HERMON_RSRC_CLEANUP_LEVEL1;
689 690
690 691 /* Initialize the mailbox list */
691 692 status = hermon_outmbox_list_init(state);
692 693 if (status != DDI_SUCCESS) {
693 694 hermon_rsrc_fini(state, cleanup);
694 695 status = DDI_FAILURE;
695 696 goto rsrcinitp1_fail;
696 697 }
697 698 cleanup = HERMON_RSRC_CLEANUP_LEVEL2;
698 699
699 700 /* Initialize the resource pool for "interrupt out" mailboxes */
700 701 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_outmbox);
701 702 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
702 703 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
703 704 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
704 705 rsrc_pool->rsrc_pool_size = (size * num);
705 706 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
706 707 rsrc_pool->rsrc_quantum = (uint_t)size;
707 708 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
708 709 rsrc_pool->rsrc_state = state;
709 710 mbox_info.mbi_num = num;
710 711 mbox_info.mbi_size = size;
711 712 mbox_info.mbi_rsrcpool = rsrc_pool;
712 713 status = hermon_rsrc_mbox_init(state, &mbox_info);
713 714 if (status != DDI_SUCCESS) {
714 715 hermon_rsrc_fini(state, cleanup);
715 716 status = DDI_FAILURE;
716 717 goto rsrcinitp1_fail;
717 718 }
718 719 cleanup = HERMON_RSRC_CLEANUP_LEVEL3;
719 720
720 721 /* Initialize the mailbox list */
721 722 status = hermon_intr_outmbox_list_init(state);
722 723 if (status != DDI_SUCCESS) {
723 724 hermon_rsrc_fini(state, cleanup);
724 725 status = DDI_FAILURE;
725 726 goto rsrcinitp1_fail;
726 727 }
727 728 cleanup = HERMON_RSRC_CLEANUP_LEVEL4;
728 729
729 730 /* Initialize the resource pool for "in" mailboxes */
730 731 num = ((uint64_t)1 << cfgprof->cp_log_num_inmbox);
731 732 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
732 733 rsrc_pool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
733 734 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
734 735 rsrc_pool->rsrc_pool_size = (size * num);
735 736 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
736 737 rsrc_pool->rsrc_quantum = (uint_t)size;
737 738 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
738 739 rsrc_pool->rsrc_state = state;
739 740 mbox_info.mbi_num = num;
740 741 mbox_info.mbi_size = size;
741 742 mbox_info.mbi_rsrcpool = rsrc_pool;
742 743 status = hermon_rsrc_mbox_init(state, &mbox_info);
743 744 if (status != DDI_SUCCESS) {
744 745 hermon_rsrc_fini(state, cleanup);
745 746 status = DDI_FAILURE;
746 747 goto rsrcinitp1_fail;
747 748 }
748 749 cleanup = HERMON_RSRC_CLEANUP_LEVEL5;
749 750
750 751 /* Initialize the mailbox list */
751 752 status = hermon_inmbox_list_init(state);
752 753 if (status != DDI_SUCCESS) {
753 754 hermon_rsrc_fini(state, cleanup);
754 755 status = DDI_FAILURE;
755 756 goto rsrcinitp1_fail;
756 757 }
757 758 cleanup = HERMON_RSRC_CLEANUP_LEVEL6;
758 759
759 760 /* Initialize the resource pool for "interrupt in" mailboxes */
760 761 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_inmbox);
761 762 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
762 763 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
763 764 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
764 765 rsrc_pool->rsrc_pool_size = (size * num);
765 766 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
766 767 rsrc_pool->rsrc_quantum = (uint_t)size;
767 768 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
768 769 rsrc_pool->rsrc_state = state;
769 770 mbox_info.mbi_num = num;
770 771 mbox_info.mbi_size = size;
771 772 mbox_info.mbi_rsrcpool = rsrc_pool;
772 773 status = hermon_rsrc_mbox_init(state, &mbox_info);
773 774 if (status != DDI_SUCCESS) {
774 775 hermon_rsrc_fini(state, cleanup);
775 776 status = DDI_FAILURE;
776 777 goto rsrcinitp1_fail;
777 778 }
778 779 cleanup = HERMON_RSRC_CLEANUP_LEVEL7;
779 780
780 781 /* Initialize the mailbox list */
781 782 status = hermon_intr_inmbox_list_init(state);
782 783 if (status != DDI_SUCCESS) {
783 784 hermon_rsrc_fini(state, cleanup);
784 785 status = DDI_FAILURE;
785 786 goto rsrcinitp1_fail;
786 787 }
787 788 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
788 789 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
789 790 return (DDI_SUCCESS);
790 791
791 792 rsrcinitp1_fail:
792 793 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
793 794 return (status);
794 795 }
795 796
796 797
797 798 /*
798 799 * hermon_rsrc_init_phase2()
799 800 * Context: Only called from attach() path context
800 801 */
801 802 int
802 803 hermon_rsrc_init_phase2(hermon_state_t *state)
803 804 {
804 805 hermon_rsrc_sw_hdl_info_t hdl_info;
805 806 hermon_rsrc_hw_entry_info_t entry_info;
806 807 hermon_rsrc_pool_info_t *rsrc_pool;
807 808 hermon_rsrc_cleanup_level_t cleanup, ncleanup;
808 809 hermon_cfg_profile_t *cfgprof;
809 810 hermon_hw_querydevlim_t *devlim;
810 811 uint64_t num, max, num_prealloc;
811 812 uint_t mcg_size, mcg_size_shift;
812 813 int i, status;
813 814 char *rsrc_name;
814 815
815 816 ASSERT(state != NULL);
816 817
817 818 /* Phase 2 initialization begins where Phase 1 left off */
818 819 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
819 820
820 821 /* Allocate the ICM resource name space */
821 822
822 823 /* Build the ICM vmem arena names from Hermon instance */
823 824 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
824 825
825 826 /*
826 827 * Initialize the resource pools for all objects that exist in
827 828 * context memory (ICM). The ICM consists of context tables, each
828 829 * type of resource (QP, CQ, EQ, etc) having it's own context table
829 830 * (QPC, CQC, EQC, etc...).
830 831 */
831 832 cfgprof = state->hs_cfg_profile;
832 833 devlim = &state->hs_devlim;
833 834
834 835 /*
835 836 * Initialize the resource pools for each of the driver resources.
836 837 * With a few exceptions, these resources fall into the two cateogories
837 838 * of either hw_entries or sw_entries.
838 839 */
839 840
840 841 /*
841 842 * Initialize the resource pools for ICM (hardware) types first.
842 843 * These resources are managed through vmem arenas, which are
843 844 * created via the rsrc pool initialization routine. Note that,
844 845 * due to further calculations, the MCG resource pool is
845 846 * initialized seperately.
846 847 */
847 848 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
848 849
849 850 rsrc_pool = &state->hs_rsrc_hdl[i];
850 851 rsrc_pool->rsrc_type = i;
851 852 rsrc_pool->rsrc_state = state;
852 853
853 854 /* Set the resource-specific attributes */
854 855 switch (i) {
855 856 case HERMON_MTT:
856 857 max = ((uint64_t)1 << devlim->log_max_mtt);
857 858 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_mtt);
858 859 HERMON_RSRC_NAME(rsrc_name, HERMON_MTT_VMEM);
859 860 ncleanup = HERMON_RSRC_CLEANUP_LEVEL9;
860 861 break;
861 862
862 863 case HERMON_DMPT:
863 864 max = ((uint64_t)1 << devlim->log_max_dmpt);
864 865 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_dmpt);
865 866 HERMON_RSRC_NAME(rsrc_name, HERMON_DMPT_VMEM);
866 867 ncleanup = HERMON_RSRC_CLEANUP_LEVEL10;
867 868 break;
868 869
869 870 case HERMON_QPC:
870 871 max = ((uint64_t)1 << devlim->log_max_qp);
871 872 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_qp);
872 873 HERMON_RSRC_NAME(rsrc_name, HERMON_QPC_VMEM);
873 874 ncleanup = HERMON_RSRC_CLEANUP_LEVEL11;
874 875 break;
875 876
876 877 case HERMON_CQC:
877 878 max = ((uint64_t)1 << devlim->log_max_cq);
878 879 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_cq);
879 880 HERMON_RSRC_NAME(rsrc_name, HERMON_CQC_VMEM);
880 881 ncleanup = HERMON_RSRC_CLEANUP_LEVEL13;
881 882 break;
882 883
883 884 case HERMON_SRQC:
884 885 max = ((uint64_t)1 << devlim->log_max_srq);
885 886 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_srq);
886 887 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQC_VMEM);
887 888 ncleanup = HERMON_RSRC_CLEANUP_LEVEL16;
888 889 break;
889 890
890 891 case HERMON_EQC:
891 892 max = ((uint64_t)1 << devlim->log_max_eq);
892 893 num_prealloc = state->hs_rsvd_eqs;
893 894 HERMON_RSRC_NAME(rsrc_name, HERMON_EQC_VMEM);
894 895 ncleanup = HERMON_RSRC_CLEANUP_LEVEL18;
895 896 break;
896 897
897 898 case HERMON_MCG: /* handled below */
898 899 case HERMON_AUXC:
899 900 case HERMON_ALTC:
900 901 case HERMON_RDB:
901 902 case HERMON_CMPT_QPC:
902 903 case HERMON_CMPT_SRQC:
903 904 case HERMON_CMPT_CQC:
904 905 case HERMON_CMPT_EQC:
905 906 default:
906 907 /* We don't need to initialize this rsrc here. */
907 908 continue;
908 909 }
909 910
910 911 /* Set the common values for all resource pools */
911 912 rsrc_pool->rsrc_state = state;
912 913 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
913 914 rsrc_pool->rsrc_pool_size = state->hs_icm[i].table_size;
914 915 rsrc_pool->rsrc_align = state->hs_icm[i].table_size;
915 916 rsrc_pool->rsrc_shift = state->hs_icm[i].log_object_size;
916 917 rsrc_pool->rsrc_quantum = state->hs_icm[i].object_size;
917 918
918 919 /* Now, initialize the entry_info and call the init routine */
919 920 entry_info.hwi_num = state->hs_icm[i].num_entries;
920 921 entry_info.hwi_max = max;
921 922 entry_info.hwi_prealloc = num_prealloc;
922 923 entry_info.hwi_rsrcpool = rsrc_pool;
923 924 entry_info.hwi_rsrcname = rsrc_name;
924 925 status = hermon_rsrc_hw_entries_init(state, &entry_info);
925 926 if (status != DDI_SUCCESS) {
926 927 hermon_rsrc_fini(state, cleanup);
927 928 status = DDI_FAILURE;
928 929 goto rsrcinitp2_fail;
929 930 }
930 931 cleanup = ncleanup;
931 932 }
932 933
933 934 /*
934 935 * Initialize the Multicast Group (MCG) entries. First, calculate
935 936 * (and validate) the size of the MCGs.
936 937 */
937 938 status = hermon_rsrc_mcg_entry_get_size(state, &mcg_size_shift);
938 939 if (status != DDI_SUCCESS) {
939 940 hermon_rsrc_fini(state, cleanup);
940 941 status = DDI_FAILURE;
941 942 goto rsrcinitp2_fail;
942 943 }
943 944 mcg_size = HERMON_MCGMEM_SZ(state);
944 945
945 946 /*
946 947 * Initialize the resource pool for the MCG table entries. Notice
947 948 * that the number of MCGs is configurable. Note also that a certain
948 949 * number of MCGs must be set aside for Hermon firmware use (they
949 950 * correspond to the number of MCGs used by the internal hash
950 951 * function).
951 952 */
952 953 num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
953 954 max = ((uint64_t)1 << devlim->log_max_mcg);
954 955 num_prealloc = ((uint64_t)1 << cfgprof->cp_log_num_mcg_hash);
955 956 rsrc_pool = &state->hs_rsrc_hdl[HERMON_MCG];
956 957 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
957 958 rsrc_pool->rsrc_pool_size = (mcg_size * num);
958 959 rsrc_pool->rsrc_shift = mcg_size_shift;
959 960 rsrc_pool->rsrc_quantum = mcg_size;
960 961 rsrc_pool->rsrc_align = (mcg_size * num);
961 962 rsrc_pool->rsrc_state = state;
962 963 HERMON_RSRC_NAME(rsrc_name, HERMON_MCG_VMEM);
963 964 entry_info.hwi_num = num;
964 965 entry_info.hwi_max = max;
965 966 entry_info.hwi_prealloc = num_prealloc;
966 967 entry_info.hwi_rsrcpool = rsrc_pool;
967 968 entry_info.hwi_rsrcname = rsrc_name;
968 969 status = hermon_rsrc_hw_entries_init(state, &entry_info);
969 970 if (status != DDI_SUCCESS) {
970 971 hermon_rsrc_fini(state, cleanup);
971 972 status = DDI_FAILURE;
972 973 goto rsrcinitp2_fail;
973 974 }
974 975 cleanup = HERMON_RSRC_CLEANUP_LEVEL19;
975 976
976 977 /*
977 978 * Initialize the full range of ICM for the AUXC resource.
978 979 * This is done because its size is so small, about 1 byte per QP.
979 980 */
980 981
981 982 /*
982 983 * Initialize the Hermon command handling interfaces. This step
983 984 * sets up the outstanding command tracking mechanism for easy access
984 985 * and fast allocation (see hermon_cmd.c for more details).
985 986 */
986 987 status = hermon_outstanding_cmdlist_init(state);
987 988 if (status != DDI_SUCCESS) {
988 989 hermon_rsrc_fini(state, cleanup);
989 990 status = DDI_FAILURE;
990 991 goto rsrcinitp2_fail;
991 992 }
992 993 cleanup = HERMON_RSRC_CLEANUP_LEVEL20;
993 994
994 995 /* Initialize the resource pool and vmem arena for the PD handles */
995 996 rsrc_pool = &state->hs_rsrc_hdl[HERMON_PDHDL];
996 997 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
997 998 rsrc_pool->rsrc_quantum = sizeof (struct hermon_sw_pd_s);
998 999 rsrc_pool->rsrc_state = state;
999 1000 HERMON_RSRC_NAME(rsrc_name, HERMON_PDHDL_CACHE);
1000 1001 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_pd);
1001 1002 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_pd);
1002 1003 hdl_info.swi_rsrcpool = rsrc_pool;
1003 1004 hdl_info.swi_constructor = hermon_rsrc_pdhdl_constructor;
1004 1005 hdl_info.swi_destructor = hermon_rsrc_pdhdl_destructor;
1005 1006 hdl_info.swi_rsrcname = rsrc_name;
1006 1007 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1007 1008 status = hermon_rsrc_pd_handles_init(state, &hdl_info);
1008 1009 if (status != DDI_SUCCESS) {
1009 1010 hermon_rsrc_fini(state, cleanup);
1010 1011 status = DDI_FAILURE;
1011 1012 goto rsrcinitp2_fail;
1012 1013 }
1013 1014 cleanup = HERMON_RSRC_CLEANUP_LEVEL21;
1014 1015
1015 1016 /*
1016 1017 * Initialize the resource pools for the rest of the software handles.
1017 1018 * This includes MR handles, EQ handles, QP handles, etc. These
1018 1019 * objects are almost entirely managed using kmem_cache routines,
1019 1020 * and do not utilize a vmem arena.
1020 1021 */
1021 1022 for (i = HERMON_NUM_ICM_RESOURCES; i < HERMON_NUM_RESOURCES; i++) {
1022 1023 rsrc_pool = &state->hs_rsrc_hdl[i];
1023 1024 rsrc_pool->rsrc_type = i;
1024 1025
1025 1026 /* Set the resource-specific attributes */
1026 1027 switch (i) {
1027 1028 case HERMON_MRHDL:
1028 1029 rsrc_pool->rsrc_quantum =
1029 1030 sizeof (struct hermon_sw_mr_s);
1030 1031 HERMON_RSRC_NAME(rsrc_name, HERMON_MRHDL_CACHE);
1031 1032 hdl_info.swi_num =
1032 1033 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1033 1034 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1034 1035 hdl_info.swi_max =
1035 1036 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1036 1037 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1037 1038 hdl_info.swi_constructor =
1038 1039 hermon_rsrc_mrhdl_constructor;
1039 1040 hdl_info.swi_destructor = hermon_rsrc_mrhdl_destructor;
1040 1041 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1041 1042 ncleanup = HERMON_RSRC_CLEANUP_LEVEL22;
1042 1043 break;
1043 1044
1044 1045 case HERMON_EQHDL:
1045 1046 rsrc_pool->rsrc_quantum =
1046 1047 sizeof (struct hermon_sw_eq_s);
1047 1048 HERMON_RSRC_NAME(rsrc_name, HERMON_EQHDL_CACHE);
1048 1049 hdl_info.swi_num = HERMON_NUM_EQ;
1049 1050 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_eq);
1050 1051 hdl_info.swi_constructor = NULL;
1051 1052 hdl_info.swi_destructor = NULL;
1052 1053 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1053 1054 ncleanup = HERMON_RSRC_CLEANUP_LEVEL23;
1054 1055 break;
1055 1056
1056 1057 case HERMON_CQHDL:
1057 1058 rsrc_pool->rsrc_quantum =
1058 1059 sizeof (struct hermon_sw_cq_s);
1059 1060 HERMON_RSRC_NAME(rsrc_name, HERMON_CQHDL_CACHE);
1060 1061 hdl_info.swi_num =
1061 1062 (uint64_t)1 << cfgprof->cp_log_num_cq;
1062 1063 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_cq;
1063 1064 hdl_info.swi_constructor =
1064 1065 hermon_rsrc_cqhdl_constructor;
1065 1066 hdl_info.swi_destructor = hermon_rsrc_cqhdl_destructor;
1066 1067 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1067 1068 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1068 1069 ncleanup = HERMON_RSRC_CLEANUP_LEVEL24;
1069 1070 break;
1070 1071
1071 1072 case HERMON_SRQHDL:
1072 1073 rsrc_pool->rsrc_quantum =
1073 1074 sizeof (struct hermon_sw_srq_s);
1074 1075 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQHDL_CACHE);
1075 1076 hdl_info.swi_num =
1076 1077 (uint64_t)1 << cfgprof->cp_log_num_srq;
1077 1078 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_srq;
1078 1079 hdl_info.swi_constructor =
1079 1080 hermon_rsrc_srqhdl_constructor;
1080 1081 hdl_info.swi_destructor = hermon_rsrc_srqhdl_destructor;
1081 1082 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1082 1083 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1083 1084 ncleanup = HERMON_RSRC_CLEANUP_LEVEL25;
1084 1085 break;
1085 1086
1086 1087 case HERMON_AHHDL:
1087 1088 rsrc_pool->rsrc_quantum =
1088 1089 sizeof (struct hermon_sw_ah_s);
1089 1090 HERMON_RSRC_NAME(rsrc_name, HERMON_AHHDL_CACHE);
1090 1091 hdl_info.swi_num =
1091 1092 (uint64_t)1 << cfgprof->cp_log_num_ah;
1092 1093 hdl_info.swi_max = HERMON_NUM_AH;
1093 1094 hdl_info.swi_constructor =
1094 1095 hermon_rsrc_ahhdl_constructor;
1095 1096 hdl_info.swi_destructor = hermon_rsrc_ahhdl_destructor;
1096 1097 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1097 1098 ncleanup = HERMON_RSRC_CLEANUP_LEVEL26;
1098 1099 break;
1099 1100
1100 1101 case HERMON_QPHDL:
1101 1102 rsrc_pool->rsrc_quantum =
1102 1103 sizeof (struct hermon_sw_qp_s);
1103 1104 HERMON_RSRC_NAME(rsrc_name, HERMON_QPHDL_CACHE);
1104 1105 hdl_info.swi_num =
1105 1106 (uint64_t)1 << cfgprof->cp_log_num_qp;
1106 1107 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_qp;
1107 1108 hdl_info.swi_constructor =
1108 1109 hermon_rsrc_qphdl_constructor;
1109 1110 hdl_info.swi_destructor = hermon_rsrc_qphdl_destructor;
1110 1111 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1111 1112 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1112 1113 ncleanup = HERMON_RSRC_CLEANUP_LEVEL27;
1113 1114 break;
1114 1115
1115 1116 case HERMON_REFCNT:
1116 1117 rsrc_pool->rsrc_quantum = sizeof (hermon_sw_refcnt_t);
1117 1118 HERMON_RSRC_NAME(rsrc_name, HERMON_REFCNT_CACHE);
1118 1119 hdl_info.swi_num =
1119 1120 (uint64_t)1 << cfgprof->cp_log_num_dmpt;
1120 1121 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_dmpt;
1121 1122 hdl_info.swi_constructor =
1122 1123 hermon_rsrc_refcnt_constructor;
1123 1124 hdl_info.swi_destructor = hermon_rsrc_refcnt_destructor;
1124 1125 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1125 1126 ncleanup = HERMON_RSRC_CLEANUP_LEVEL28;
1126 1127 break;
1127 1128
1128 1129 default:
1129 1130 continue;
1130 1131 }
1131 1132
1132 1133 /* Set the common values and call the init routine */
1133 1134 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
1134 1135 rsrc_pool->rsrc_state = state;
1135 1136 hdl_info.swi_rsrcpool = rsrc_pool;
1136 1137 hdl_info.swi_rsrcname = rsrc_name;
1137 1138 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1138 1139 if (status != DDI_SUCCESS) {
1139 1140 hermon_rsrc_fini(state, cleanup);
1140 1141 status = DDI_FAILURE;
1141 1142 goto rsrcinitp2_fail;
1142 1143 }
1143 1144 cleanup = ncleanup;
1144 1145 }
1145 1146
1146 1147 /*
1147 1148 * Initialize a resource pool for the MCG handles. Notice that for
1148 1149 * these MCG handles, we are allocating a table of structures (used to
1149 1150 * keep track of the MCG entries that are being written to hardware
1150 1151 * and to speed up multicast attach/detach operations).
1151 1152 */
1152 1153 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1153 1154 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_mcg);
1154 1155 hdl_info.swi_flags = HERMON_SWHDL_TABLE_INIT;
1155 1156 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1156 1157 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1157 1158 if (status != DDI_SUCCESS) {
1158 1159 hermon_rsrc_fini(state, cleanup);
1159 1160 status = DDI_FAILURE;
1160 1161 goto rsrcinitp2_fail;
1161 1162 }
1162 1163 state->hs_mcghdl = hdl_info.swi_table_ptr;
1163 1164 cleanup = HERMON_RSRC_CLEANUP_LEVEL29;
1164 1165
1165 1166 /*
1166 1167 * Last, initialize the resource pool for the UAR pages, which contain
1167 1168 * the hardware's doorbell registers. Each process supported in User
1168 1169 * Mode is assigned a UAR page. Also coming from this pool are the
1169 1170 * kernel-assigned UAR page, and any hardware-reserved pages. Note
1170 1171 * that the number of UAR pages is configurable, the value must be less
1171 1172 * than the maximum value (obtained from the QUERY_DEV_LIM command) or
1172 1173 * the initialization will fail. Note also that we assign the base
1173 1174 * address of the UAR BAR to the rsrc_start parameter.
1174 1175 */
1175 1176 num = ((uint64_t)1 << cfgprof->cp_log_num_uar);
1176 1177 max = num;
1177 1178 num_prealloc = max(devlim->num_rsvd_uar, 128);
1178 1179 rsrc_pool = &state->hs_rsrc_hdl[HERMON_UARPG];
1179 1180 rsrc_pool->rsrc_loc = HERMON_IN_UAR;
1180 1181 rsrc_pool->rsrc_pool_size = (num << PAGESHIFT);
1181 1182 rsrc_pool->rsrc_shift = PAGESHIFT;
1182 1183 rsrc_pool->rsrc_quantum = (uint_t)PAGESIZE;
1183 1184 rsrc_pool->rsrc_align = PAGESIZE;
1184 1185 rsrc_pool->rsrc_state = state;
1185 1186 rsrc_pool->rsrc_start = (void *)state->hs_reg_uar_baseaddr;
1186 1187 HERMON_RSRC_NAME(rsrc_name, HERMON_UAR_PAGE_VMEM_ATTCH);
1187 1188 entry_info.hwi_num = num;
1188 1189 entry_info.hwi_max = max;
1189 1190 entry_info.hwi_prealloc = num_prealloc;
1190 1191 entry_info.hwi_rsrcpool = rsrc_pool;
1191 1192 entry_info.hwi_rsrcname = rsrc_name;
1192 1193 status = hermon_rsrc_hw_entries_init(state, &entry_info);
1193 1194 if (status != DDI_SUCCESS) {
1194 1195 hermon_rsrc_fini(state, cleanup);
1195 1196 status = DDI_FAILURE;
1196 1197 goto rsrcinitp2_fail;
1197 1198 }
1198 1199
1199 1200 cleanup = HERMON_RSRC_CLEANUP_ALL;
1200 1201
1201 1202 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1202 1203 return (DDI_SUCCESS);
1203 1204
1204 1205 rsrcinitp2_fail:
1205 1206 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1206 1207 return (status);
1207 1208 }
1208 1209
1209 1210
1210 1211 /*
1211 1212 * hermon_rsrc_fini()
1212 1213 * Context: Only called from attach() and/or detach() path contexts
1213 1214 */
1214 1215 void
1215 1216 hermon_rsrc_fini(hermon_state_t *state, hermon_rsrc_cleanup_level_t clean)
1216 1217 {
1217 1218 hermon_rsrc_sw_hdl_info_t hdl_info;
1218 1219 hermon_rsrc_hw_entry_info_t entry_info;
1219 1220 hermon_rsrc_mbox_info_t mbox_info;
1220 1221 hermon_cfg_profile_t *cfgprof;
1221 1222
1222 1223 ASSERT(state != NULL);
1223 1224
1224 1225 cfgprof = state->hs_cfg_profile;
1225 1226
1226 1227 /*
1227 1228 * If init code above is shortened up (see comments), then we
1228 1229 * need to establish how to safely and simply clean up from any
1229 1230 * given failure point. Flags, maybe...
1230 1231 */
1231 1232
1232 1233 switch (clean) {
1233 1234 /*
1234 1235 * If we add more resources that need to be cleaned up here, we should
1235 1236 * ensure that HERMON_RSRC_CLEANUP_ALL is still the first entry (i.e.
1236 1237 * corresponds to the last resource allocated).
1237 1238 */
1238 1239
1239 1240 case HERMON_RSRC_CLEANUP_ALL:
1240 1241 case HERMON_RSRC_CLEANUP_LEVEL31:
1241 1242 /* Cleanup the UAR page resource pool, first the dbr pages */
1242 1243 if (state->hs_kern_dbr) {
1243 1244 hermon_dbr_kern_free(state);
1244 1245 state->hs_kern_dbr = NULL;
1245 1246 }
1246 1247
1247 1248 /* NS then, the pool itself */
1248 1249 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_UARPG];
1249 1250 hermon_rsrc_hw_entries_fini(state, &entry_info);
1250 1251
1251 1252 /* FALLTHROUGH */
1252 1253
1253 1254 case HERMON_RSRC_CLEANUP_LEVEL30:
1254 1255 /* Cleanup the central MCG handle pointers list */
1255 1256 hdl_info.swi_rsrcpool = NULL;
1256 1257 hdl_info.swi_table_ptr = state->hs_mcghdl;
1257 1258 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1258 1259 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1259 1260 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1260 1261 /* FALLTHROUGH */
1261 1262
1262 1263 case HERMON_RSRC_CLEANUP_LEVEL29:
1263 1264 /* Cleanup the reference count resource pool */
1264 1265 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_REFCNT];
1265 1266 hdl_info.swi_table_ptr = NULL;
1266 1267 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1267 1268 /* FALLTHROUGH */
1268 1269
1269 1270 case HERMON_RSRC_CLEANUP_LEVEL28:
1270 1271 /* Cleanup the QP handle resource pool */
1271 1272 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPHDL];
1272 1273 hdl_info.swi_table_ptr = NULL;
1273 1274 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_qp);
1274 1275 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1275 1276 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1276 1277 /* FALLTHROUGH */
1277 1278 case HERMON_RSRC_CLEANUP_LEVEL27:
1278 1279 /* Cleanup the address handle resrouce pool */
1279 1280 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AHHDL];
1280 1281 hdl_info.swi_table_ptr = NULL;
1281 1282 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1282 1283 /* FALLTHROUGH */
1283 1284
1284 1285 case HERMON_RSRC_CLEANUP_LEVEL26:
1285 1286 /* Cleanup the SRQ handle resource pool. */
1286 1287 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQHDL];
1287 1288 hdl_info.swi_table_ptr = NULL;
1288 1289 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_srq);
1289 1290 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1290 1291 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1291 1292 /* FALLTHROUGH */
1292 1293
1293 1294 case HERMON_RSRC_CLEANUP_LEVEL25:
1294 1295 /* Cleanup the CQ handle resource pool */
1295 1296 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQHDL];
1296 1297 hdl_info.swi_table_ptr = NULL;
1297 1298 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_cq);
1298 1299 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1299 1300 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1300 1301 /* FALLTHROUGH */
1301 1302
1302 1303 case HERMON_RSRC_CLEANUP_LEVEL24:
1303 1304 /* Cleanup the EQ handle resource pool */
1304 1305 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQHDL];
1305 1306 hdl_info.swi_table_ptr = NULL;
1306 1307 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1307 1308 /* FALLTHROUGH */
1308 1309
1309 1310 case HERMON_RSRC_CLEANUP_LEVEL23:
1310 1311 /* Cleanup the MR handle resource pool */
1311 1312 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MRHDL];
1312 1313 hdl_info.swi_table_ptr = NULL;
1313 1314 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1314 1315 /* FALLTHROUGH */
1315 1316
1316 1317 case HERMON_RSRC_CLEANUP_LEVEL22:
1317 1318 /* Cleanup the PD handle resource pool */
1318 1319 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_PDHDL];
1319 1320 hdl_info.swi_table_ptr = NULL;
1320 1321 hermon_rsrc_pd_handles_fini(state, &hdl_info);
1321 1322 /* FALLTHROUGH */
1322 1323
1323 1324 case HERMON_RSRC_CLEANUP_LEVEL21:
1324 1325 /* Currently unused - FALLTHROUGH */
1325 1326
1326 1327 case HERMON_RSRC_CLEANUP_LEVEL20:
1327 1328 /* Cleanup the outstanding command list */
1328 1329 hermon_outstanding_cmdlist_fini(state);
1329 1330 /* FALLTHROUGH */
1330 1331
1331 1332 case HERMON_RSRC_CLEANUP_LEVEL19:
1332 1333 /* Cleanup the EQC table resource pool */
1333 1334 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQC];
1334 1335 hermon_rsrc_hw_entries_fini(state, &entry_info);
1335 1336 /* FALLTHROUGH */
1336 1337
1337 1338 case HERMON_RSRC_CLEANUP_LEVEL18:
1338 1339 /* Cleanup the MCG table resource pool */
1339 1340 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MCG];
1340 1341 hermon_rsrc_hw_entries_fini(state, &entry_info);
1341 1342 /* FALLTHROUGH */
1342 1343
1343 1344 case HERMON_RSRC_CLEANUP_LEVEL17:
1344 1345 /* Currently Unused - fallthrough */
1345 1346 case HERMON_RSRC_CLEANUP_LEVEL16:
1346 1347 /* Cleanup the SRQC table resource pool */
1347 1348 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQC];
1348 1349 hermon_rsrc_hw_entries_fini(state, &entry_info);
1349 1350 /* FALLTHROUGH */
1350 1351
1351 1352 case HERMON_RSRC_CLEANUP_LEVEL15:
1352 1353 /* Cleanup the AUXC table resource pool */
1353 1354 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AUXC];
1354 1355 hermon_rsrc_hw_entries_fini(state, &entry_info);
1355 1356 /* FALLTHROUGH */
1356 1357
1357 1358 case HERMON_RSRC_CLEANUP_LEVEL14:
1358 1359 /* Cleanup the ALTCF table resource pool */
1359 1360 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_ALTC];
1360 1361 hermon_rsrc_hw_entries_fini(state, &entry_info);
1361 1362 /* FALLTHROUGH */
1362 1363
1363 1364 case HERMON_RSRC_CLEANUP_LEVEL13:
1364 1365 /* Cleanup the CQC table resource pool */
1365 1366 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQC];
1366 1367 hermon_rsrc_hw_entries_fini(state, &entry_info);
1367 1368 /* FALLTHROUGH */
1368 1369
1369 1370 case HERMON_RSRC_CLEANUP_LEVEL12:
1370 1371 /* Cleanup the RDB table resource pool */
1371 1372 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_RDB];
1372 1373 hermon_rsrc_hw_entries_fini(state, &entry_info);
1373 1374 /* FALLTHROUGH */
1374 1375
1375 1376 case HERMON_RSRC_CLEANUP_LEVEL11:
1376 1377 /* Cleanup the QPC table resource pool */
1377 1378 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPC];
1378 1379 hermon_rsrc_hw_entries_fini(state, &entry_info);
1379 1380 /* FALLTHROUGH */
1380 1381
1381 1382 case HERMON_RSRC_CLEANUP_LEVEL10EQ:
1382 1383 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1383 1384 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_EQC];
1384 1385 hermon_rsrc_hw_entries_fini(state, &entry_info);
1385 1386 /* FALLTHROUGH */
1386 1387
1387 1388 case HERMON_RSRC_CLEANUP_LEVEL10CQ:
1388 1389 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1389 1390 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_CQC];
1390 1391 hermon_rsrc_hw_entries_fini(state, &entry_info);
1391 1392 /* FALLTHROUGH */
1392 1393
1393 1394 case HERMON_RSRC_CLEANUP_LEVEL10SRQ:
1394 1395 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1395 1396 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_SRQC];
1396 1397 hermon_rsrc_hw_entries_fini(state, &entry_info);
1397 1398 /* FALLTHROUGH */
1398 1399
1399 1400 case HERMON_RSRC_CLEANUP_LEVEL10QP:
1400 1401 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1401 1402 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_QPC];
1402 1403 hermon_rsrc_hw_entries_fini(state, &entry_info);
1403 1404 /* FALLTHROUGH */
1404 1405
1405 1406 case HERMON_RSRC_CLEANUP_LEVEL10:
1406 1407 /* Cleanup the dMPT table resource pool */
1407 1408 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_DMPT];
1408 1409 hermon_rsrc_hw_entries_fini(state, &entry_info);
1409 1410 /* FALLTHROUGH */
1410 1411
1411 1412 case HERMON_RSRC_CLEANUP_LEVEL9:
1412 1413 /* Cleanup the MTT table resource pool */
1413 1414 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MTT];
1414 1415 hermon_rsrc_hw_entries_fini(state, &entry_info);
1415 1416 break;
1416 1417
1417 1418 /*
1418 1419 * The cleanup below comes from the "Phase 1" initialization step.
1419 1420 * (see hermon_rsrc_init_phase1() above)
1420 1421 */
1421 1422 case HERMON_RSRC_CLEANUP_PHASE1_COMPLETE:
1422 1423 /* Cleanup the "In" mailbox list */
1423 1424 hermon_intr_inmbox_list_fini(state);
1424 1425 /* FALLTHROUGH */
1425 1426
1426 1427 case HERMON_RSRC_CLEANUP_LEVEL7:
1427 1428 /* Cleanup the interrupt "In" mailbox resource pool */
1428 1429 mbox_info.mbi_rsrcpool =
1429 1430 &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
1430 1431 hermon_rsrc_mbox_fini(state, &mbox_info);
1431 1432 /* FALLTHROUGH */
1432 1433
1433 1434 case HERMON_RSRC_CLEANUP_LEVEL6:
1434 1435 /* Cleanup the "In" mailbox list */
1435 1436 hermon_inmbox_list_fini(state);
1436 1437 /* FALLTHROUGH */
1437 1438
1438 1439 case HERMON_RSRC_CLEANUP_LEVEL5:
1439 1440 /* Cleanup the "In" mailbox resource pool */
1440 1441 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
1441 1442 hermon_rsrc_mbox_fini(state, &mbox_info);
1442 1443 /* FALLTHROUGH */
1443 1444
1444 1445 case HERMON_RSRC_CLEANUP_LEVEL4:
1445 1446 /* Cleanup the interrupt "Out" mailbox list */
1446 1447 hermon_intr_outmbox_list_fini(state);
1447 1448 /* FALLTHROUGH */
1448 1449
1449 1450 case HERMON_RSRC_CLEANUP_LEVEL3:
1450 1451 /* Cleanup the "Out" mailbox resource pool */
1451 1452 mbox_info.mbi_rsrcpool =
1452 1453 &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
1453 1454 hermon_rsrc_mbox_fini(state, &mbox_info);
1454 1455 /* FALLTHROUGH */
1455 1456
1456 1457 case HERMON_RSRC_CLEANUP_LEVEL2:
1457 1458 /* Cleanup the "Out" mailbox list */
1458 1459 hermon_outmbox_list_fini(state);
1459 1460 /* FALLTHROUGH */
1460 1461
1461 1462 case HERMON_RSRC_CLEANUP_LEVEL1:
1462 1463 /* Cleanup the "Out" mailbox resource pool */
1463 1464 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
1464 1465 hermon_rsrc_mbox_fini(state, &mbox_info);
1465 1466 /* FALLTHROUGH */
1466 1467
1467 1468 case HERMON_RSRC_CLEANUP_LEVEL0:
1468 1469 /* Free the array of hermon_rsrc_pool_info_t's */
1469 1470
1470 1471 kmem_free(state->hs_rsrc_hdl, HERMON_NUM_RESOURCES *
1471 1472 sizeof (hermon_rsrc_pool_info_t));
1472 1473
1473 1474 kmem_cache_destroy(state->hs_rsrc_cache);
1474 1475 break;
1475 1476
1476 1477 default:
1477 1478 HERMON_WARNING(state, "unexpected resource cleanup level");
1478 1479 break;
1479 1480 }
1480 1481 }
1481 1482
1482 1483
1483 1484 /*
1484 1485 * hermon_rsrc_mbox_init()
1485 1486 * Context: Only called from attach() path context
1486 1487 */
1487 1488 static int
1488 1489 hermon_rsrc_mbox_init(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1489 1490 {
1490 1491 hermon_rsrc_pool_info_t *rsrc_pool;
1491 1492 hermon_rsrc_priv_mbox_t *priv;
1492 1493
1493 1494 ASSERT(state != NULL);
1494 1495 ASSERT(info != NULL);
1495 1496
1496 1497 rsrc_pool = info->mbi_rsrcpool;
1497 1498 ASSERT(rsrc_pool != NULL);
1498 1499
1499 1500 /* Allocate and initialize mailbox private structure */
1500 1501 priv = kmem_zalloc(sizeof (hermon_rsrc_priv_mbox_t), KM_SLEEP);
1501 1502 priv->pmb_dip = state->hs_dip;
1502 1503 priv->pmb_devaccattr = state->hs_reg_accattr;
1503 1504 priv->pmb_xfer_mode = DDI_DMA_CONSISTENT;
1504 1505
1505 1506 /*
1506 1507 * Initialize many of the default DMA attributes. Then set alignment
1507 1508 * and scatter-gather restrictions specific for mailbox memory.
1508 1509 */
1509 1510 hermon_dma_attr_init(state, &priv->pmb_dmaattr);
1510 1511 priv->pmb_dmaattr.dma_attr_align = HERMON_MBOX_ALIGN;
1511 1512 priv->pmb_dmaattr.dma_attr_sgllen = 1;
1512 1513 priv->pmb_dmaattr.dma_attr_flags = 0;
1513 1514 rsrc_pool->rsrc_private = priv;
1514 1515
1515 1516 ASSERT(rsrc_pool->rsrc_loc == HERMON_IN_SYSMEM);
1516 1517
1517 1518 rsrc_pool->rsrc_start = NULL;
1518 1519 rsrc_pool->rsrc_vmp = NULL;
1519 1520
1520 1521 return (DDI_SUCCESS);
1521 1522 }
1522 1523
1523 1524
1524 1525 /*
1525 1526 * hermon_rsrc_mbox_fini()
1526 1527 * Context: Only called from attach() and/or detach() path contexts
1527 1528 */
1528 1529 /* ARGSUSED */
1529 1530 static void
1530 1531 hermon_rsrc_mbox_fini(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1531 1532 {
1532 1533 hermon_rsrc_pool_info_t *rsrc_pool;
1533 1534
1534 1535 ASSERT(state != NULL);
1535 1536 ASSERT(info != NULL);
1536 1537
1537 1538 rsrc_pool = info->mbi_rsrcpool;
1538 1539 ASSERT(rsrc_pool != NULL);
1539 1540
1540 1541 /* Free up the private struct */
1541 1542 kmem_free(rsrc_pool->rsrc_private, sizeof (hermon_rsrc_priv_mbox_t));
1542 1543 }
1543 1544
1544 1545
1545 1546 /*
1546 1547 * hermon_rsrc_hw_entries_init()
1547 1548 * Context: Only called from attach() path context
1548 1549 */
1549 1550 int
1550 1551 hermon_rsrc_hw_entries_init(hermon_state_t *state,
1551 1552 hermon_rsrc_hw_entry_info_t *info)
1552 1553 {
1553 1554 hermon_rsrc_pool_info_t *rsrc_pool;
1554 1555 hermon_rsrc_t *rsvd_rsrc = NULL;
1555 1556 vmem_t *vmp;
1556 1557 uint64_t num_hwentry, max_hwentry, num_prealloc;
1557 1558 int status;
1558 1559
1559 1560 ASSERT(state != NULL);
1560 1561 ASSERT(info != NULL);
1561 1562
1562 1563 rsrc_pool = info->hwi_rsrcpool;
1563 1564 ASSERT(rsrc_pool != NULL);
1564 1565 num_hwentry = info->hwi_num;
1565 1566 max_hwentry = info->hwi_max;
1566 1567 num_prealloc = info->hwi_prealloc;
1567 1568
1568 1569 if (hermon_rsrc_verbose) {
1569 1570 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init: "
1570 1571 "rsrc_type (0x%x) num (%llx) max (0x%llx) prealloc "
1571 1572 "(0x%llx)", rsrc_pool->rsrc_type, (longlong_t)num_hwentry,
1572 1573 (longlong_t)max_hwentry, (longlong_t)num_prealloc);
1573 1574 }
1574 1575
1575 1576 /* Make sure number of HW entries makes sense */
1576 1577 if (num_hwentry > max_hwentry) {
1577 1578 return (DDI_FAILURE);
1578 1579 }
1579 1580
1580 1581 /* Set this pool's rsrc_start from the initial ICM allocation */
1581 1582 if (rsrc_pool->rsrc_start == 0) {
1582 1583
1583 1584 /* use a ROUND value that works on both 32 and 64-bit kernels */
1584 1585 rsrc_pool->rsrc_start = (void *)(uintptr_t)0x10000000;
1585 1586
1586 1587 if (hermon_rsrc_verbose) {
1587 1588 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1588 1589 " rsrc_type (0x%x) rsrc_start set (0x%lx)",
1589 1590 rsrc_pool->rsrc_type, rsrc_pool->rsrc_start);
1590 1591 }
1591 1592 }
1592 1593
1593 1594 /*
1594 1595 * Create new vmem arena for the HW entries table if rsrc_quantum
1595 1596 * is non-zero. Otherwise if rsrc_quantum is zero, then these HW
1596 1597 * entries are not going to be dynamically allocatable (i.e. they
1597 1598 * won't be allocated/freed through hermon_rsrc_alloc/free). This
1598 1599 * latter option is used for both ALTC and CMPT resources which
1599 1600 * are managed by hardware.
1600 1601 */
1601 1602 if (rsrc_pool->rsrc_quantum != 0) {
1602 1603 vmp = vmem_create(info->hwi_rsrcname,
1603 1604 (void *)(uintptr_t)rsrc_pool->rsrc_start,
1604 1605 rsrc_pool->rsrc_pool_size, rsrc_pool->rsrc_quantum,
1605 1606 NULL, NULL, NULL, 0, VM_SLEEP);
1606 1607 if (vmp == NULL) {
1607 1608 /* failed to create vmem arena */
1608 1609 return (DDI_FAILURE);
1609 1610 }
1610 1611 rsrc_pool->rsrc_vmp = vmp;
1611 1612 if (hermon_rsrc_verbose) {
1612 1613 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1613 1614 " rsrc_type (0x%x) created vmem arena for rsrc",
1614 1615 rsrc_pool->rsrc_type);
1615 1616 }
1616 1617 } else {
1617 1618 /* we do not require a vmem arena */
1618 1619 rsrc_pool->rsrc_vmp = NULL;
1619 1620 if (hermon_rsrc_verbose) {
1620 1621 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1621 1622 " rsrc_type (0x%x) vmem arena not required",
1622 1623 rsrc_pool->rsrc_type);
1623 1624 }
1624 1625 }
1625 1626
1626 1627 /* Allocate hardware reserved resources, if any */
1627 1628 if (num_prealloc != 0) {
1628 1629 status = hermon_rsrc_alloc(state, rsrc_pool->rsrc_type,
1629 1630 num_prealloc, HERMON_SLEEP, &rsvd_rsrc);
1630 1631 if (status != DDI_SUCCESS) {
1631 1632 /* unable to preallocate the reserved entries */
1632 1633 if (rsrc_pool->rsrc_vmp != NULL) {
1633 1634 vmem_destroy(rsrc_pool->rsrc_vmp);
1634 1635 }
1635 1636 return (DDI_FAILURE);
1636 1637 }
1637 1638 }
1638 1639 rsrc_pool->rsrc_private = rsvd_rsrc;
1639 1640
1640 1641 return (DDI_SUCCESS);
1641 1642 }
1642 1643
1643 1644
1644 1645 /*
1645 1646 * hermon_rsrc_hw_entries_fini()
1646 1647 * Context: Only called from attach() and/or detach() path contexts
1647 1648 */
1648 1649 void
1649 1650 hermon_rsrc_hw_entries_fini(hermon_state_t *state,
1650 1651 hermon_rsrc_hw_entry_info_t *info)
1651 1652 {
1652 1653 hermon_rsrc_pool_info_t *rsrc_pool;
1653 1654 hermon_rsrc_t *rsvd_rsrc;
1654 1655
1655 1656 ASSERT(state != NULL);
1656 1657 ASSERT(info != NULL);
1657 1658
1658 1659 rsrc_pool = info->hwi_rsrcpool;
1659 1660 ASSERT(rsrc_pool != NULL);
1660 1661
1661 1662 /* Free up any "reserved" (i.e. preallocated) HW entries */
1662 1663 rsvd_rsrc = (hermon_rsrc_t *)rsrc_pool->rsrc_private;
1663 1664 if (rsvd_rsrc != NULL) {
1664 1665 hermon_rsrc_free(state, &rsvd_rsrc);
1665 1666 }
1666 1667
1667 1668 /*
1668 1669 * If we've actually setup a vmem arena for the HW entries, then
1669 1670 * destroy it now
1670 1671 */
1671 1672 if (rsrc_pool->rsrc_vmp != NULL) {
1672 1673 vmem_destroy(rsrc_pool->rsrc_vmp);
1673 1674 }
1674 1675 }
1675 1676
1676 1677
1677 1678 /*
1678 1679 * hermon_rsrc_sw_handles_init()
1679 1680 * Context: Only called from attach() path context
1680 1681 */
1681 1682 /* ARGSUSED */
1682 1683 static int
1683 1684 hermon_rsrc_sw_handles_init(hermon_state_t *state,
1684 1685 hermon_rsrc_sw_hdl_info_t *info)
1685 1686 {
1686 1687 hermon_rsrc_pool_info_t *rsrc_pool;
1687 1688 uint64_t num_swhdl, max_swhdl, prealloc_sz;
1688 1689
1689 1690 ASSERT(state != NULL);
1690 1691 ASSERT(info != NULL);
1691 1692
1692 1693 rsrc_pool = info->swi_rsrcpool;
1693 1694 ASSERT(rsrc_pool != NULL);
1694 1695 num_swhdl = info->swi_num;
1695 1696 max_swhdl = info->swi_max;
1696 1697 prealloc_sz = info->swi_prealloc_sz;
1697 1698
1698 1699
1699 1700 /* Make sure number of SW handles makes sense */
1700 1701 if (num_swhdl > max_swhdl) {
1701 1702 return (DDI_FAILURE);
1702 1703 }
1703 1704
1704 1705 /*
1705 1706 * Depending on the flags parameter, create a kmem_cache for some
1706 1707 * number of software handle structures. Note: kmem_cache_create()
1707 1708 * will SLEEP until successful.
1708 1709 */
1709 1710 if (info->swi_flags & HERMON_SWHDL_KMEMCACHE_INIT) {
1710 1711 rsrc_pool->rsrc_private = kmem_cache_create(
1711 1712 info->swi_rsrcname, rsrc_pool->rsrc_quantum, 0,
1712 1713 info->swi_constructor, info->swi_destructor, NULL,
1713 1714 rsrc_pool->rsrc_state, NULL, 0);
1714 1715 }
1715 1716
1716 1717
1717 1718 /* Allocate the central list of SW handle pointers */
1718 1719 if (info->swi_flags & HERMON_SWHDL_TABLE_INIT) {
1719 1720 info->swi_table_ptr = kmem_zalloc(num_swhdl * prealloc_sz,
1720 1721 KM_SLEEP);
1721 1722 }
1722 1723
1723 1724 return (DDI_SUCCESS);
1724 1725 }
1725 1726
1726 1727
1727 1728 /*
1728 1729 * hermon_rsrc_sw_handles_fini()
1729 1730 * Context: Only called from attach() and/or detach() path contexts
1730 1731 */
1731 1732 /* ARGSUSED */
1732 1733 static void
1733 1734 hermon_rsrc_sw_handles_fini(hermon_state_t *state,
1734 1735 hermon_rsrc_sw_hdl_info_t *info)
1735 1736 {
1736 1737 hermon_rsrc_pool_info_t *rsrc_pool;
1737 1738 uint64_t num_swhdl, prealloc_sz;
1738 1739
1739 1740 ASSERT(state != NULL);
1740 1741 ASSERT(info != NULL);
1741 1742
1742 1743 rsrc_pool = info->swi_rsrcpool;
1743 1744 num_swhdl = info->swi_num;
1744 1745 prealloc_sz = info->swi_prealloc_sz;
1745 1746
1746 1747 /*
1747 1748 * If a "software handle" kmem_cache exists for this resource, then
1748 1749 * destroy it now
1749 1750 */
1750 1751 if (rsrc_pool != NULL) {
1751 1752 kmem_cache_destroy(rsrc_pool->rsrc_private);
1752 1753 }
1753 1754
1754 1755 /* Free up this central list of SW handle pointers */
1755 1756 if (info->swi_table_ptr != NULL) {
1756 1757 kmem_free(info->swi_table_ptr, num_swhdl * prealloc_sz);
1757 1758 }
1758 1759 }
1759 1760
1760 1761
1761 1762 /*
1762 1763 * hermon_rsrc_pd_handles_init()
1763 1764 * Context: Only called from attach() path context
1764 1765 */
1765 1766 static int
1766 1767 hermon_rsrc_pd_handles_init(hermon_state_t *state,
1767 1768 hermon_rsrc_sw_hdl_info_t *info)
1768 1769 {
1769 1770 hermon_rsrc_pool_info_t *rsrc_pool;
1770 1771 vmem_t *vmp;
1771 1772 char vmem_name[HERMON_RSRC_NAME_MAXLEN];
1772 1773 int status;
1773 1774
1774 1775 ASSERT(state != NULL);
1775 1776 ASSERT(info != NULL);
1776 1777
1777 1778 rsrc_pool = info->swi_rsrcpool;
1778 1779 ASSERT(rsrc_pool != NULL);
1779 1780
1780 1781 /* Initialize the resource pool for software handle table */
1781 1782 status = hermon_rsrc_sw_handles_init(state, info);
1782 1783 if (status != DDI_SUCCESS) {
1783 1784 return (DDI_FAILURE);
1784 1785 }
1785 1786
1786 1787 /* Build vmem arena name from Hermon instance */
1787 1788 HERMON_RSRC_NAME(vmem_name, HERMON_PDHDL_VMEM);
1788 1789
1789 1790 /* Create new vmem arena for PD numbers */
1790 1791 vmp = vmem_create(vmem_name, (caddr_t)1, info->swi_num, 1, NULL,
1791 1792 NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
1792 1793 if (vmp == NULL) {
1793 1794 /* Unable to create vmem arena */
1794 1795 info->swi_table_ptr = NULL;
1795 1796 hermon_rsrc_sw_handles_fini(state, info);
1796 1797 return (DDI_FAILURE);
1797 1798 }
1798 1799 rsrc_pool->rsrc_vmp = vmp;
1799 1800
1800 1801 return (DDI_SUCCESS);
1801 1802 }
1802 1803
1803 1804
1804 1805 /*
1805 1806 * hermon_rsrc_pd_handles_fini()
1806 1807 * Context: Only called from attach() and/or detach() path contexts
1807 1808 */
1808 1809 static void
1809 1810 hermon_rsrc_pd_handles_fini(hermon_state_t *state,
1810 1811 hermon_rsrc_sw_hdl_info_t *info)
1811 1812 {
1812 1813 hermon_rsrc_pool_info_t *rsrc_pool;
1813 1814
1814 1815 ASSERT(state != NULL);
1815 1816 ASSERT(info != NULL);
1816 1817
1817 1818 rsrc_pool = info->swi_rsrcpool;
1818 1819
1819 1820 /* Destroy the specially created UAR scratch table vmem arena */
1820 1821 vmem_destroy(rsrc_pool->rsrc_vmp);
1821 1822
1822 1823 /* Destroy the "hermon_sw_pd_t" kmem_cache */
1823 1824 hermon_rsrc_sw_handles_fini(state, info);
1824 1825 }
1825 1826
1826 1827
1827 1828 /*
1828 1829 * hermon_rsrc_mbox_alloc()
1829 1830 * Context: Only called from attach() path context
1830 1831 */
1831 1832 static int
1832 1833 hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1833 1834 hermon_rsrc_t *hdl)
1834 1835 {
1835 1836 hermon_rsrc_priv_mbox_t *priv;
1836 1837 caddr_t kaddr;
1837 1838 size_t real_len, temp_len;
1838 1839 int status;
1839 1840
1840 1841 ASSERT(pool_info != NULL);
1841 1842 ASSERT(hdl != NULL);
1842 1843
1843 1844 /* Get the private pointer for the mailboxes */
1844 1845 priv = pool_info->rsrc_private;
1845 1846 ASSERT(priv != NULL);
1846 1847
1847 1848 /* Allocate a DMA handle for the mailbox */
1848 1849 status = ddi_dma_alloc_handle(priv->pmb_dip, &priv->pmb_dmaattr,
1849 1850 DDI_DMA_SLEEP, NULL, &hdl->hr_dmahdl);
1850 1851 if (status != DDI_SUCCESS) {
1851 1852 return (DDI_FAILURE);
1852 1853 }
1853 1854
1854 1855 /* Allocate memory for the mailbox */
1855 1856 temp_len = (num << pool_info->rsrc_shift);
1856 1857 status = ddi_dma_mem_alloc(hdl->hr_dmahdl, temp_len,
1857 1858 &priv->pmb_devaccattr, priv->pmb_xfer_mode, DDI_DMA_SLEEP,
1858 1859 NULL, &kaddr, &real_len, &hdl->hr_acchdl);
1859 1860 if (status != DDI_SUCCESS) {
1860 1861 /* No more memory available for mailbox entries */
1861 1862 ddi_dma_free_handle(&hdl->hr_dmahdl);
1862 1863 return (DDI_FAILURE);
1863 1864 }
1864 1865
1865 1866 hdl->hr_addr = (void *)kaddr;
1866 1867 hdl->hr_len = (uint32_t)real_len;
1867 1868
1868 1869 return (DDI_SUCCESS);
1869 1870 }
1870 1871
1871 1872
1872 1873 /*
1873 1874 * hermon_rsrc_mbox_free()
1874 1875 * Context: Can be called from interrupt or base context.
1875 1876 */
1876 1877 static void
1877 1878 hermon_rsrc_mbox_free(hermon_rsrc_t *hdl)
1878 1879 {
1879 1880 ASSERT(hdl != NULL);
1880 1881
1881 1882 /* Use ddi_dma_mem_free() to free up sys memory for mailbox */
1882 1883 ddi_dma_mem_free(&hdl->hr_acchdl);
1883 1884
1884 1885 /* Free the DMA handle for the mailbox */
1885 1886 ddi_dma_free_handle(&hdl->hr_dmahdl);
1886 1887 }
1887 1888
1888 1889
1889 1890 /*
1890 1891 * hermon_rsrc_hw_entry_alloc()
1891 1892 * Context: Can be called from interrupt or base context.
1892 1893 */
1893 1894 static int
1894 1895 hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1895 1896 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1896 1897 {
1897 1898 void *addr;
1898 1899 uint64_t offset;
1899 1900 uint32_t align;
1900 1901 int status;
1901 1902 int flag;
1902 1903
1903 1904 ASSERT(pool_info != NULL);
1904 1905 ASSERT(hdl != NULL);
1905 1906
1906 1907 /*
1907 1908 * Use vmem_xalloc() to get a properly aligned pointer (based on
1908 1909 * the number requested) to the HW entry(ies). This handles the
1909 1910 * cases (for special QPCs and for RDB entries) where we need more
1910 1911 * than one and need to ensure that they are properly aligned.
1911 1912 */
1912 1913 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1913 1914 hdl->hr_len = (num << pool_info->rsrc_shift);
1914 1915 align = (num_align << pool_info->rsrc_shift);
1915 1916
1916 1917 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1917 1918 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1918 1919
1919 1920 if (addr == NULL) {
1920 1921 /* No more HW entries available */
1921 1922 return (DDI_FAILURE);
1922 1923 }
1923 1924
1924 1925 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1925 1926
1926 1927 /* Calculate vaddr and HW table index */
1927 1928 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1928 1929 hdl->hr_addr = addr; /* only used for mbox and uarpg resources */
1929 1930 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1930 1931
1931 1932 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
1932 1933 int num_to_hdl;
1933 1934 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
1934 1935
1935 1936 num_to_hdl = (rsrc_type == HERMON_QPC ||
1936 1937 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
1937 1938
1938 1939 /* confirm ICM is mapped, and allocate if necessary */
1939 1940 status = hermon_rsrc_hw_entry_icm_confirm(pool_info, num, hdl,
1940 1941 num_to_hdl);
1941 1942 if (status != DDI_SUCCESS) {
1942 1943 return (DDI_FAILURE);
1943 1944 }
1944 1945 hdl->hr_addr = NULL; /* not used for ICM resources */
1945 1946 }
1946 1947
1947 1948 return (DDI_SUCCESS);
1948 1949 }
1949 1950
1950 1951
1951 1952 /*
1952 1953 * hermon_rsrc_hw_entry_reserve()
1953 1954 * Context: Can be called from interrupt or base context.
1954 1955 */
1955 1956 int
1956 1957 hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1957 1958 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1958 1959 {
1959 1960 void *addr;
1960 1961 uint64_t offset;
1961 1962 uint32_t align;
1962 1963 int flag;
1963 1964
1964 1965 ASSERT(pool_info != NULL);
1965 1966 ASSERT(hdl != NULL);
1966 1967 ASSERT(pool_info->rsrc_loc == HERMON_IN_ICM);
1967 1968
1968 1969 /*
1969 1970 * Use vmem_xalloc() to get a properly aligned pointer (based on
1970 1971 * the number requested) to the HW entry(ies). This handles the
1971 1972 * cases (for special QPCs and for RDB entries) where we need more
1972 1973 * than one and need to ensure that they are properly aligned.
1973 1974 */
1974 1975 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1975 1976 hdl->hr_len = (num << pool_info->rsrc_shift);
1976 1977 align = (num_align << pool_info->rsrc_shift);
1977 1978
1978 1979 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1979 1980 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1980 1981
1981 1982 if (addr == NULL) {
1982 1983 /* No more HW entries available */
1983 1984 return (DDI_FAILURE);
1984 1985 }
1985 1986
1986 1987 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1987 1988
1988 1989 /* Calculate vaddr and HW table index */
1989 1990 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1990 1991 hdl->hr_addr = NULL;
1991 1992 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1992 1993
1993 1994 /* ICM will be allocated and mapped if and when it gets used */
1994 1995
1995 1996 return (DDI_SUCCESS);
1996 1997 }
1997 1998
1998 1999
1999 2000 /*
2000 2001 * hermon_rsrc_hw_entry_free()
2001 2002 * Context: Can be called from interrupt or base context.
2002 2003 */
2003 2004 static void
2004 2005 hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
2005 2006 hermon_rsrc_t *hdl)
2006 2007 {
2007 2008 void *addr;
2008 2009 uint64_t offset;
2009 2010 int status;
2010 2011
2011 2012 ASSERT(pool_info != NULL);
2012 2013 ASSERT(hdl != NULL);
2013 2014
2014 2015 /* Calculate the allocated address */
2015 2016 offset = hdl->hr_indx << pool_info->rsrc_shift;
2016 2017 addr = (void *)(uintptr_t)(offset + (uintptr_t)pool_info->rsrc_start);
2017 2018
2018 2019 /* Use vmem_xfree() to free up the HW table entry */
2019 2020 vmem_xfree(pool_info->rsrc_vmp, addr, hdl->hr_len);
2020 2021
2021 2022 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
2022 2023 int num_to_hdl;
2023 2024 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
2024 2025
2025 2026 num_to_hdl = (rsrc_type == HERMON_QPC ||
2026 2027 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
2027 2028
2028 2029 /* free ICM references, and free ICM if required */
2029 2030 status = hermon_rsrc_hw_entry_icm_free(pool_info, hdl,
2030 2031 num_to_hdl);
2031 2032 if (status != DDI_SUCCESS)
2032 2033 HERMON_WARNING(pool_info->rsrc_state,
2033 2034 "failure in hw_entry_free");
2034 2035 }
2035 2036 }
2036 2037
2037 2038 /*
2038 2039 * hermon_rsrc_hw_entry_icm_confirm()
2039 2040 * Context: Can be called from interrupt or base context.
2040 2041 */
2041 2042 static int
2042 2043 hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info, uint_t num,
2043 2044 hermon_rsrc_t *hdl, int num_to_hdl)
2044 2045 {
2045 2046 hermon_state_t *state;
2046 2047 hermon_icm_table_t *icm_table;
2047 2048 uint8_t *bitmap;
2048 2049 hermon_dma_info_t *dma_info;
2049 2050 hermon_rsrc_type_t type;
2050 2051 uint32_t rindx, span_offset;
2051 2052 uint32_t span_avail;
2052 2053 int num_backed;
2053 2054 int status;
2054 2055 uint32_t index1, index2;
2055 2056
2056 2057 /*
2057 2058 * Utility routine responsible for ensuring that there is memory
2058 2059 * backing the ICM resources allocated via hermon_rsrc_hw_entry_alloc().
2059 2060 * Confirm existing ICM mapping(s) or allocate ICM memory for the
2060 2061 * given hardware resources being allocated, and increment the
2061 2062 * ICM DMA structure(s) reference count.
2062 2063 *
2063 2064 * We may be allocating more objects than can fit in a single span,
2064 2065 * or more than will fit in the remaining contiguous memory (from
2065 2066 * the offset indicated by hdl->ar_indx) in the span in question.
2066 2067 * In either of these cases, we'll be breaking up our allocation
2067 2068 * into multiple spans.
2068 2069 */
2069 2070 state = pool_info->rsrc_state;
2070 2071 type = pool_info->rsrc_type;
2071 2072 icm_table = &state->hs_icm[type];
2072 2073
2073 2074 rindx = hdl->hr_indx;
2074 2075 hermon_index(index1, index2, rindx, icm_table, span_offset);
2075 2076
2076 2077 if (hermon_rsrc_verbose) {
2077 2078 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_confirm: "
2078 2079 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x): ",
2079 2080 type, num, hdl->hr_len, index1, index2);
2080 2081 }
2081 2082
2082 2083 mutex_enter(&icm_table->icm_table_lock);
2083 2084 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2084 2085 while (num) {
2085 2086 #ifndef __lock_lint
2086 2087 while (icm_table->icm_busy) {
2087 2088 cv_wait(&icm_table->icm_table_cv,
2088 2089 &icm_table->icm_table_lock);
2089 2090 }
2090 2091 #endif
2091 2092 if (!HERMON_BMAP_BIT_ISSET(bitmap, index2)) {
2092 2093 /* Allocate ICM for this span */
2093 2094 icm_table->icm_busy = 1;
2094 2095 mutex_exit(&icm_table->icm_table_lock);
2095 2096 status = hermon_icm_alloc(state, type, index1, index2);
2096 2097 mutex_enter(&icm_table->icm_table_lock);
2097 2098 icm_table->icm_busy = 0;
2098 2099 cv_broadcast(&icm_table->icm_table_cv);
2099 2100 if (status != DDI_SUCCESS) {
2100 2101 goto fail_alloc;
2101 2102 }
2102 2103 if (hermon_rsrc_verbose) {
2103 2104 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2104 2105 "hw_entry_icm_confirm: ALLOCATED ICM: "
2105 2106 "type (0x%x) index (0x%x, 0x%x)",
2106 2107 type, index1, index2);
2107 2108 }
2108 2109 }
2109 2110
2110 2111 /*
2111 2112 * We need to increment the refcnt of this span by the
2112 2113 * number of objects in this resource allocation that are
2113 2114 * backed by this span. Given that the rsrc allocation is
2114 2115 * contiguous, this value will be the number of objects in
2115 2116 * the span from 'span_offset' onward, either up to a max
2116 2117 * of the total number of objects, or the end of the span.
2117 2118 * So, determine the number of objects that can be backed
2118 2119 * by this span ('span_avail'), then determine the number
2119 2120 * of backed resources.
2120 2121 */
2121 2122 span_avail = icm_table->span - span_offset;
2122 2123 if (num > span_avail) {
2123 2124 num_backed = span_avail;
2124 2125 } else {
2125 2126 num_backed = num;
2126 2127 }
2127 2128
2128 2129 /*
2129 2130 * Now that we know 'num_backed', increment the refcnt,
2130 2131 * decrement the total number, and set 'span_offset' to
2131 2132 * 0 in case we roll over into the next span.
2132 2133 */
2133 2134 dma_info[index2].icm_refcnt += num_backed;
2134 2135 rindx += num_backed;
2135 2136 num -= num_backed;
2136 2137
2137 2138 if (hermon_rsrc_verbose) {
2138 2139 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) index "
2139 2140 "(0x%x, 0x%x) num_backed (0x%x)",
2140 2141 type, index1, index2, num_backed);
2141 2142 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) refcnt now "
2142 2143 "(0x%x) num_remaining (0x%x)", type,
2143 2144 dma_info[index2].icm_refcnt, num);
2144 2145 }
2145 2146 if (num == 0)
2146 2147 break;
2147 2148
2148 2149 hermon_index(index1, index2, rindx, icm_table, span_offset);
2149 2150 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2150 2151 }
2151 2152 mutex_exit(&icm_table->icm_table_lock);
2152 2153
2153 2154 return (DDI_SUCCESS);
2154 2155
2155 2156 fail_alloc:
2156 2157 /* JBDB */
2157 2158 if (hermon_rsrc_verbose) {
2158 2159 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2159 2160 "hw_entry_icm_confirm: FAILED ICM ALLOC: "
2160 2161 "type (0x%x) num remaind (0x%x) index (0x%x, 0x%x)"
2161 2162 "refcnt (0x%x)", type, num, index1, index2,
2162 2163 icm_table->icm_dma[index1][index2].icm_refcnt);
2163 2164 }
2164 2165 IBTF_DPRINTF_L2("hermon", "WARNING: "
2165 2166 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2166 2167
2167 2168 #if needs_work
2168 2169 /* free refcnt's and any spans we've allocated */
2169 2170 while (index-- != start) {
2170 2171 /*
2171 2172 * JBDB - This is a bit tricky. We need to
2172 2173 * free refcnt's on any spans that we've
2173 2174 * incremented them on, and completely free
2174 2175 * spans that we've allocated. How do we do
2175 2176 * this here? Does it need to be as involved
2176 2177 * as the core of icm_free() below, or can
2177 2178 * we leverage breadcrumbs somehow?
2178 2179 */
2179 2180 HERMON_WARNING(state, "unable to allocate ICM memory: "
2180 2181 "UNIMPLEMENTED HANDLING!!");
2181 2182 }
2182 2183 #else
2183 2184 cmn_err(CE_WARN,
2184 2185 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2185 2186 #endif
2186 2187 mutex_exit(&icm_table->icm_table_lock);
2187 2188
2188 2189 HERMON_WARNING(state, "unable to allocate ICM memory");
2189 2190 return (DDI_FAILURE);
2190 2191 }
2191 2192
2192 2193 /*
2193 2194 * hermon_rsrc_hw_entry_icm_free()
2194 2195 * Context: Can be called from interrupt or base context.
2195 2196 */
2196 2197 static int
2197 2198 hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
2198 2199 hermon_rsrc_t *hdl, int num_to_hdl)
2199 2200 {
2200 2201 hermon_state_t *state;
2201 2202 hermon_icm_table_t *icm_table;
2202 2203 uint8_t *bitmap;
2203 2204 hermon_dma_info_t *dma_info;
2204 2205 hermon_rsrc_type_t type;
2205 2206 uint32_t span_offset;
2206 2207 uint32_t span_remain;
2207 2208 int num_freed;
2208 2209 int num;
2209 2210 uint32_t index1, index2, rindx;
2210 2211
2211 2212 /*
2212 2213 * Utility routine responsible for freeing references to ICM
2213 2214 * DMA spans, and freeing the ICM memory if necessary.
2214 2215 *
2215 2216 * We may have allocated objects in a single contiguous resource
2216 2217 * allocation that reside in a number of spans, at any given
2217 2218 * starting offset within a span. We therefore must determine
2218 2219 * where this allocation starts, and then determine if we need
2219 2220 * to free objects in more than one span.
2220 2221 */
2221 2222 state = pool_info->rsrc_state;
2222 2223 type = pool_info->rsrc_type;
2223 2224 icm_table = &state->hs_icm[type];
2224 2225
2225 2226 rindx = hdl->hr_indx;
2226 2227 hermon_index(index1, index2, rindx, icm_table, span_offset);
2227 2228 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2228 2229
2229 2230 /* determine the number of ICM objects in this allocation */
2230 2231 num = hdl->hr_len >> pool_info->rsrc_shift;
2231 2232
2232 2233 if (hermon_rsrc_verbose) {
2233 2234 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_free: "
2234 2235 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x)",
2235 2236 type, num, hdl->hr_len, index1, index2);
2236 2237 }
2237 2238 mutex_enter(&icm_table->icm_table_lock);
2238 2239 while (num) {
2239 2240 /*
2240 2241 * As with the ICM confirm code above, we need to
2241 2242 * decrement the ICM span(s) by the number of
2242 2243 * resources being freed. So, determine the number
2243 2244 * of objects that are backed in this span from
2244 2245 * 'span_offset' onward, and set 'num_freed' to
2245 2246 * the smaller of either that number ('span_remain'),
2246 2247 * or the total number of objects being freed.
2247 2248 */
2248 2249 span_remain = icm_table->span - span_offset;
2249 2250 if (num > span_remain) {
2250 2251 num_freed = span_remain;
2251 2252 } else {
2252 2253 num_freed = num;
2253 2254 }
2254 2255
2255 2256 /*
2256 2257 * Now that we know 'num_freed', decrement the refcnt,
2257 2258 * decrement the total number, and set 'span_offset' to
2258 2259 * 0 in case we roll over into the next span.
2259 2260 */
2260 2261 dma_info[index2].icm_refcnt -= num_freed;
2261 2262 num -= num_freed;
2262 2263 rindx += num_freed;
2263 2264
2264 2265 if (hermon_rsrc_verbose) {
2265 2266 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) index "
2266 2267 "(0x%x, 0x%x) num_freed (0x%x)", type,
2267 2268 index1, index2, num_freed);
2268 2269 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) refcnt now "
2269 2270 "(0x%x) num remaining (0x%x)", type,
2270 2271 icm_table->icm_dma[index1][index2].icm_refcnt, num);
2271 2272 }
2272 2273
2273 2274 #if HERMON_ICM_FREE_ENABLED
2274 2275 /* If we've freed the last object in this span, free it */
2275 2276 if ((index1 != 0 || index2 != 0) &&
2276 2277 (dma_info[index2].icm_refcnt == 0)) {
2277 2278 if (hermon_rsrc_verbose) {
2278 2279 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry"
2279 2280 "_icm_free: freeing ICM type (0x%x) index"
2280 2281 " (0x%x, 0x%x)", type, index1, index2);
2281 2282 }
2282 2283 hermon_icm_free(state, type, index1, index2);
2283 2284 }
2284 2285 #endif
2285 2286 if (num == 0)
2286 2287 break;
2287 2288
2288 2289 hermon_index(index1, index2, rindx, icm_table, span_offset);
2289 2290 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2290 2291 }
2291 2292 mutex_exit(&icm_table->icm_table_lock);
2292 2293
2293 2294 return (DDI_SUCCESS);
2294 2295 }
2295 2296
2296 2297
2297 2298
2298 2299 /*
2299 2300 * hermon_rsrc_swhdl_alloc()
2300 2301 * Context: Can be called from interrupt or base context.
2301 2302 */
2302 2303 static int
2303 2304 hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2304 2305 hermon_rsrc_t *hdl)
2305 2306 {
2306 2307 void *addr;
2307 2308 int flag;
2308 2309
2309 2310 ASSERT(pool_info != NULL);
2310 2311 ASSERT(hdl != NULL);
2311 2312
2312 2313 /* Allocate the software handle structure */
2313 2314 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2314 2315 addr = kmem_cache_alloc(pool_info->rsrc_private, flag);
2315 2316 if (addr == NULL) {
2316 2317 return (DDI_FAILURE);
2317 2318 }
2318 2319 hdl->hr_len = pool_info->rsrc_quantum;
2319 2320 hdl->hr_addr = addr;
2320 2321
2321 2322 return (DDI_SUCCESS);
2322 2323 }
2323 2324
2324 2325
2325 2326 /*
2326 2327 * hermon_rsrc_swhdl_free()
2327 2328 * Context: Can be called from interrupt or base context.
2328 2329 */
2329 2330 static void
2330 2331 hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2331 2332 {
2332 2333 ASSERT(pool_info != NULL);
2333 2334 ASSERT(hdl != NULL);
2334 2335
2335 2336 /* Free the software handle structure */
2336 2337 kmem_cache_free(pool_info->rsrc_private, hdl->hr_addr);
2337 2338 }
2338 2339
2339 2340
2340 2341 /*
2341 2342 * hermon_rsrc_pdhdl_alloc()
2342 2343 * Context: Can be called from interrupt or base context.
2343 2344 */
2344 2345 static int
2345 2346 hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2346 2347 hermon_rsrc_t *hdl)
2347 2348 {
2348 2349 hermon_pdhdl_t addr;
2349 2350 void *tmpaddr;
2350 2351 int flag, status;
2351 2352
2352 2353 ASSERT(pool_info != NULL);
2353 2354 ASSERT(hdl != NULL);
2354 2355
2355 2356 /* Allocate the software handle */
2356 2357 status = hermon_rsrc_swhdl_alloc(pool_info, sleepflag, hdl);
2357 2358 if (status != DDI_SUCCESS) {
2358 2359 return (DDI_FAILURE);
2359 2360 }
2360 2361 addr = (hermon_pdhdl_t)hdl->hr_addr;
2361 2362 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*addr))
2362 2363
2363 2364 /* Allocate a PD number for the handle */
2364 2365 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
2365 2366 tmpaddr = vmem_alloc(pool_info->rsrc_vmp, 1, flag);
2366 2367 if (tmpaddr == NULL) {
2367 2368 /* No more PD number entries available */
2368 2369 hermon_rsrc_swhdl_free(pool_info, hdl);
2369 2370 return (DDI_FAILURE);
2370 2371 }
2371 2372 addr->pd_pdnum = (uint32_t)(uintptr_t)tmpaddr;
2372 2373 addr->pd_rsrcp = hdl;
2373 2374 hdl->hr_indx = addr->pd_pdnum;
2374 2375
2375 2376 return (DDI_SUCCESS);
2376 2377 }
2377 2378
2378 2379
2379 2380 /*
2380 2381 * hermon_rsrc_pdhdl_free()
2381 2382 * Context: Can be called from interrupt or base context.
2382 2383 */
2383 2384 static void
2384 2385 hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2385 2386 {
2386 2387 ASSERT(pool_info != NULL);
2387 2388 ASSERT(hdl != NULL);
2388 2389
2389 2390 /* Use vmem_free() to free up the PD number */
2390 2391 vmem_free(pool_info->rsrc_vmp, (void *)(uintptr_t)hdl->hr_indx, 1);
2391 2392
2392 2393 /* Free the software handle structure */
2393 2394 hermon_rsrc_swhdl_free(pool_info, hdl);
2394 2395 }
2395 2396
2396 2397
2397 2398 /*
2398 2399 * hermon_rsrc_pdhdl_constructor()
2399 2400 * Context: Can be called from interrupt or base context.
2400 2401 */
2401 2402 /* ARGSUSED */
2402 2403 static int
2403 2404 hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags)
2404 2405 {
2405 2406 hermon_pdhdl_t pdhdl;
2406 2407 hermon_state_t *state;
2407 2408
2408 2409 pdhdl = (hermon_pdhdl_t)pd;
2409 2410 state = (hermon_state_t *)priv;
2410 2411
2411 2412 mutex_init(&pdhdl->pd_lock, NULL, MUTEX_DRIVER,
2412 2413 DDI_INTR_PRI(state->hs_intrmsi_pri));
2413 2414
2414 2415 return (DDI_SUCCESS);
2415 2416 }
2416 2417
2417 2418
2418 2419 /*
2419 2420 * hermon_rsrc_pdhdl_destructor()
2420 2421 * Context: Can be called from interrupt or base context.
2421 2422 */
2422 2423 /* ARGSUSED */
2423 2424 static void
2424 2425 hermon_rsrc_pdhdl_destructor(void *pd, void *priv)
2425 2426 {
2426 2427 hermon_pdhdl_t pdhdl;
2427 2428
2428 2429 pdhdl = (hermon_pdhdl_t)pd;
2429 2430
2430 2431 mutex_destroy(&pdhdl->pd_lock);
2431 2432 }
2432 2433
2433 2434
2434 2435 /*
2435 2436 * hermon_rsrc_cqhdl_constructor()
2436 2437 * Context: Can be called from interrupt or base context.
2437 2438 */
2438 2439 /* ARGSUSED */
2439 2440 static int
2440 2441 hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags)
2441 2442 {
2442 2443 hermon_cqhdl_t cqhdl;
2443 2444 hermon_state_t *state;
2444 2445
2445 2446 cqhdl = (hermon_cqhdl_t)cq;
2446 2447 state = (hermon_state_t *)priv;
2447 2448
2448 2449 mutex_init(&cqhdl->cq_lock, NULL, MUTEX_DRIVER,
2449 2450 DDI_INTR_PRI(state->hs_intrmsi_pri));
2450 2451
2451 2452 return (DDI_SUCCESS);
2452 2453 }
2453 2454
2454 2455
2455 2456 /*
2456 2457 * hermon_rsrc_cqhdl_destructor()
2457 2458 * Context: Can be called from interrupt or base context.
2458 2459 */
2459 2460 /* ARGSUSED */
2460 2461 static void
2461 2462 hermon_rsrc_cqhdl_destructor(void *cq, void *priv)
2462 2463 {
2463 2464 hermon_cqhdl_t cqhdl;
2464 2465
2465 2466 cqhdl = (hermon_cqhdl_t)cq;
2466 2467
2467 2468 mutex_destroy(&cqhdl->cq_lock);
2468 2469 }
2469 2470
2470 2471
2471 2472 /*
2472 2473 * hermon_rsrc_qphdl_constructor()
2473 2474 * Context: Can be called from interrupt or base context.
2474 2475 */
2475 2476 /* ARGSUSED */
2476 2477 static int
2477 2478 hermon_rsrc_qphdl_constructor(void *qp, void *priv, int flags)
2478 2479 {
2479 2480 hermon_qphdl_t qphdl;
2480 2481 hermon_state_t *state;
2481 2482
2482 2483 qphdl = (hermon_qphdl_t)qp;
2483 2484 state = (hermon_state_t *)priv;
2484 2485
2485 2486 mutex_init(&qphdl->qp_lock, NULL, MUTEX_DRIVER,
2486 2487 DDI_INTR_PRI(state->hs_intrmsi_pri));
2487 2488
2488 2489 return (DDI_SUCCESS);
2489 2490 }
2490 2491
2491 2492
2492 2493 /*
2493 2494 * hermon_rsrc_qphdl_destructor()
2494 2495 * Context: Can be called from interrupt or base context.
2495 2496 */
2496 2497 /* ARGSUSED */
2497 2498 static void
2498 2499 hermon_rsrc_qphdl_destructor(void *qp, void *priv)
2499 2500 {
2500 2501 hermon_qphdl_t qphdl;
2501 2502
2502 2503 qphdl = (hermon_qphdl_t)qp;
2503 2504
2504 2505 mutex_destroy(&qphdl->qp_lock);
2505 2506 }
2506 2507
2507 2508
2508 2509 /*
2509 2510 * hermon_rsrc_srqhdl_constructor()
2510 2511 * Context: Can be called from interrupt or base context.
2511 2512 */
2512 2513 /* ARGSUSED */
2513 2514 static int
2514 2515 hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags)
2515 2516 {
2516 2517 hermon_srqhdl_t srqhdl;
2517 2518 hermon_state_t *state;
2518 2519
2519 2520 srqhdl = (hermon_srqhdl_t)srq;
2520 2521 state = (hermon_state_t *)priv;
2521 2522
2522 2523 mutex_init(&srqhdl->srq_lock, NULL, MUTEX_DRIVER,
2523 2524 DDI_INTR_PRI(state->hs_intrmsi_pri));
2524 2525
2525 2526 return (DDI_SUCCESS);
2526 2527 }
2527 2528
2528 2529
2529 2530 /*
2530 2531 * hermon_rsrc_srqhdl_destructor()
2531 2532 * Context: Can be called from interrupt or base context.
2532 2533 */
2533 2534 /* ARGSUSED */
2534 2535 static void
2535 2536 hermon_rsrc_srqhdl_destructor(void *srq, void *priv)
2536 2537 {
2537 2538 hermon_srqhdl_t srqhdl;
2538 2539
2539 2540 srqhdl = (hermon_srqhdl_t)srq;
2540 2541
2541 2542 mutex_destroy(&srqhdl->srq_lock);
2542 2543 }
2543 2544
2544 2545
2545 2546 /*
2546 2547 * hermon_rsrc_refcnt_constructor()
2547 2548 * Context: Can be called from interrupt or base context.
2548 2549 */
2549 2550 /* ARGSUSED */
2550 2551 static int
2551 2552 hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags)
2552 2553 {
2553 2554 hermon_sw_refcnt_t *refcnt;
2554 2555 hermon_state_t *state;
2555 2556
2556 2557 refcnt = (hermon_sw_refcnt_t *)rc;
2557 2558 state = (hermon_state_t *)priv;
2558 2559
2559 2560 mutex_init(&refcnt->swrc_lock, NULL, MUTEX_DRIVER,
2560 2561 DDI_INTR_PRI(state->hs_intrmsi_pri));
2561 2562
2562 2563 return (DDI_SUCCESS);
2563 2564 }
2564 2565
2565 2566
2566 2567 /*
2567 2568 * hermon_rsrc_refcnt_destructor()
2568 2569 * Context: Can be called from interrupt or base context.
2569 2570 */
2570 2571 /* ARGSUSED */
2571 2572 static void
2572 2573 hermon_rsrc_refcnt_destructor(void *rc, void *priv)
2573 2574 {
2574 2575 hermon_sw_refcnt_t *refcnt;
2575 2576
2576 2577 refcnt = (hermon_sw_refcnt_t *)rc;
2577 2578
2578 2579 mutex_destroy(&refcnt->swrc_lock);
2579 2580 }
2580 2581
2581 2582
2582 2583 /*
2583 2584 * hermon_rsrc_ahhdl_constructor()
2584 2585 * Context: Can be called from interrupt or base context.
2585 2586 */
2586 2587 /* ARGSUSED */
2587 2588 static int
2588 2589 hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags)
2589 2590 {
2590 2591 hermon_ahhdl_t ahhdl;
2591 2592 hermon_state_t *state;
2592 2593
2593 2594 ahhdl = (hermon_ahhdl_t)ah;
2594 2595 state = (hermon_state_t *)priv;
2595 2596
2596 2597 mutex_init(&ahhdl->ah_lock, NULL, MUTEX_DRIVER,
2597 2598 DDI_INTR_PRI(state->hs_intrmsi_pri));
2598 2599 return (DDI_SUCCESS);
2599 2600 }
2600 2601
2601 2602
2602 2603 /*
2603 2604 * hermon_rsrc_ahhdl_destructor()
2604 2605 * Context: Can be called from interrupt or base context.
2605 2606 */
2606 2607 /* ARGSUSED */
2607 2608 static void
2608 2609 hermon_rsrc_ahhdl_destructor(void *ah, void *priv)
2609 2610 {
2610 2611 hermon_ahhdl_t ahhdl;
2611 2612
2612 2613 ahhdl = (hermon_ahhdl_t)ah;
2613 2614
2614 2615 mutex_destroy(&ahhdl->ah_lock);
2615 2616 }
2616 2617
2617 2618
2618 2619 /*
2619 2620 * hermon_rsrc_mrhdl_constructor()
2620 2621 * Context: Can be called from interrupt or base context.
2621 2622 */
2622 2623 /* ARGSUSED */
2623 2624 static int
2624 2625 hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags)
2625 2626 {
2626 2627 hermon_mrhdl_t mrhdl;
2627 2628 hermon_state_t *state;
2628 2629
2629 2630 mrhdl = (hermon_mrhdl_t)mr;
2630 2631 state = (hermon_state_t *)priv;
2631 2632
2632 2633 mutex_init(&mrhdl->mr_lock, NULL, MUTEX_DRIVER,
2633 2634 DDI_INTR_PRI(state->hs_intrmsi_pri));
2634 2635
2635 2636 return (DDI_SUCCESS);
2636 2637 }
2637 2638
2638 2639
2639 2640 /*
2640 2641 * hermon_rsrc_mrhdl_destructor()
2641 2642 * Context: Can be called from interrupt or base context.
2642 2643 */
2643 2644 /* ARGSUSED */
2644 2645 static void
2645 2646 hermon_rsrc_mrhdl_destructor(void *mr, void *priv)
2646 2647 {
2647 2648 hermon_mrhdl_t mrhdl;
2648 2649
2649 2650 mrhdl = (hermon_mrhdl_t)mr;
2650 2651
2651 2652 mutex_destroy(&mrhdl->mr_lock);
2652 2653 }
2653 2654
2654 2655
2655 2656 /*
2656 2657 * hermon_rsrc_mcg_entry_get_size()
2657 2658 */
2658 2659 static int
↓ open down ↓ |
2614 lines elided |
↑ open up ↑ |
2659 2660 hermon_rsrc_mcg_entry_get_size(hermon_state_t *state, uint_t *mcg_size_shift)
2660 2661 {
2661 2662 uint_t num_qp_per_mcg, max_qp_per_mcg, log2;
2662 2663
2663 2664 /*
2664 2665 * Round the configured number of QP per MCG to next larger
2665 2666 * power-of-2 size and update.
2666 2667 */
2667 2668 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg + 8;
2668 2669 log2 = highbit(num_qp_per_mcg);
2669 - if ((num_qp_per_mcg & (num_qp_per_mcg - 1)) == 0) {
2670 + if (ISP2(num_qp_per_mcg)) {
2670 2671 log2 = log2 - 1;
2671 2672 }
2672 2673 state->hs_cfg_profile->cp_num_qp_per_mcg = (1 << log2) - 8;
2673 2674
2674 2675 /* Now make sure number of QP per MCG makes sense */
2675 2676 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg;
2676 2677 max_qp_per_mcg = (1 << state->hs_devlim.log_max_qp_mcg);
2677 2678 if (num_qp_per_mcg > max_qp_per_mcg) {
2678 2679 return (DDI_FAILURE);
2679 2680 }
2680 2681
2681 2682 /* Return the (shift) size of an individual MCG HW entry */
2682 2683 *mcg_size_shift = log2 + 2;
2683 2684
2684 2685 return (DDI_SUCCESS);
2685 2686 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX