292 * Number of 32 bit and 64 bit libraries in lib_va hash.
293 */
294 static uint_t libs_mapped_32 = 0;
295 static uint_t libs_mapped_64 = 0;
296
297 /*
298 * Free up the resources associated with lvp as well as lvp itself.
299 * We also decrement the number of libraries mapped via a lib_va
300 * cached virtual address.
301 */
302 void
303 lib_va_free(struct lib_va *lvp)
304 {
305 int is_64bit = lvp->lv_flags & LV_ELF64;
306 ASSERT(lvp->lv_refcnt == 0);
307
308 if (lvp->lv_base_va != NULL) {
309 vmem_xfree(is_64bit ? lib_va_64_arena : lib_va_32_arena,
310 lvp->lv_base_va, lvp->lv_len);
311 if (is_64bit) {
312 atomic_add_32(&libs_mapped_64, -1);
313 } else {
314 atomic_add_32(&libs_mapped_32, -1);
315 }
316 }
317 kmem_free(lvp, sizeof (struct lib_va));
318 }
319
320 /*
321 * See if the file associated with the vap passed in is in the lib_va hash.
322 * If it is and the file has not been modified since last use, then
323 * return a pointer to that data. Otherwise, return NULL if the file has
324 * changed or the file was not found in the hash.
325 */
326 static struct lib_va *
327 lib_va_find(vattr_t *vap)
328 {
329 struct lib_va *lvp;
330 struct lib_va *del = NULL;
331 struct lib_va **tmp;
332 uint_t index;
333 index = LIB_VA_HASH(vap->va_nodeid);
334
455 lvp->lv_fsid = vap->va_fsid;
456 lvp->lv_ctime.tv_sec = vap->va_ctime.tv_sec;
457 lvp->lv_ctime.tv_nsec = vap->va_ctime.tv_nsec;
458 lvp->lv_mtime.tv_sec = vap->va_mtime.tv_sec;
459 lvp->lv_mtime.tv_nsec = vap->va_mtime.tv_nsec;
460 lvp->lv_next = NULL;
461 lvp->lv_refcnt = 1;
462
463 /* Caller responsible for filling this and lv_mps out */
464 lvp->lv_num_segs = 0;
465
466 if (model == DATAMODEL_LP64) {
467 lvp->lv_flags = LV_ELF64;
468 } else {
469 ASSERT(model == DATAMODEL_ILP32);
470 lvp->lv_flags = LV_ELF32;
471 }
472
473 if (base_va != NULL) {
474 if (model == DATAMODEL_LP64) {
475 atomic_add_32(&libs_mapped_64, 1);
476 } else {
477 ASSERT(model == DATAMODEL_ILP32);
478 atomic_add_32(&libs_mapped_32, 1);
479 }
480 }
481 ASSERT(*tmp == NULL);
482 *tmp = lvp;
483 mutex_exit(LIB_VA_HASH_MUTEX(index));
484 if (del) {
485 ASSERT(del->lv_refcnt == 0);
486 MOBJ_STAT_ADD(lib_va_add_delete);
487 lib_va_free(del);
488 }
489 return (lvp);
490 }
491
492 /*
493 * Release the hold on lvp which was acquired by lib_va_find or lib_va_add_hash.
494 * In addition, if this is the last hold and lvp is marked for deletion,
495 * free up it's reserved address space and free the structure.
496 */
497 static void
498 lib_va_release(struct lib_va *lvp)
|
292 * Number of 32 bit and 64 bit libraries in lib_va hash.
293 */
294 static uint_t libs_mapped_32 = 0;
295 static uint_t libs_mapped_64 = 0;
296
297 /*
298 * Free up the resources associated with lvp as well as lvp itself.
299 * We also decrement the number of libraries mapped via a lib_va
300 * cached virtual address.
301 */
302 void
303 lib_va_free(struct lib_va *lvp)
304 {
305 int is_64bit = lvp->lv_flags & LV_ELF64;
306 ASSERT(lvp->lv_refcnt == 0);
307
308 if (lvp->lv_base_va != NULL) {
309 vmem_xfree(is_64bit ? lib_va_64_arena : lib_va_32_arena,
310 lvp->lv_base_va, lvp->lv_len);
311 if (is_64bit) {
312 atomic_dec_32(&libs_mapped_64);
313 } else {
314 atomic_dec_32(&libs_mapped_32);
315 }
316 }
317 kmem_free(lvp, sizeof (struct lib_va));
318 }
319
320 /*
321 * See if the file associated with the vap passed in is in the lib_va hash.
322 * If it is and the file has not been modified since last use, then
323 * return a pointer to that data. Otherwise, return NULL if the file has
324 * changed or the file was not found in the hash.
325 */
326 static struct lib_va *
327 lib_va_find(vattr_t *vap)
328 {
329 struct lib_va *lvp;
330 struct lib_va *del = NULL;
331 struct lib_va **tmp;
332 uint_t index;
333 index = LIB_VA_HASH(vap->va_nodeid);
334
455 lvp->lv_fsid = vap->va_fsid;
456 lvp->lv_ctime.tv_sec = vap->va_ctime.tv_sec;
457 lvp->lv_ctime.tv_nsec = vap->va_ctime.tv_nsec;
458 lvp->lv_mtime.tv_sec = vap->va_mtime.tv_sec;
459 lvp->lv_mtime.tv_nsec = vap->va_mtime.tv_nsec;
460 lvp->lv_next = NULL;
461 lvp->lv_refcnt = 1;
462
463 /* Caller responsible for filling this and lv_mps out */
464 lvp->lv_num_segs = 0;
465
466 if (model == DATAMODEL_LP64) {
467 lvp->lv_flags = LV_ELF64;
468 } else {
469 ASSERT(model == DATAMODEL_ILP32);
470 lvp->lv_flags = LV_ELF32;
471 }
472
473 if (base_va != NULL) {
474 if (model == DATAMODEL_LP64) {
475 atomic_inc_32(&libs_mapped_64);
476 } else {
477 ASSERT(model == DATAMODEL_ILP32);
478 atomic_inc_32(&libs_mapped_32);
479 }
480 }
481 ASSERT(*tmp == NULL);
482 *tmp = lvp;
483 mutex_exit(LIB_VA_HASH_MUTEX(index));
484 if (del) {
485 ASSERT(del->lv_refcnt == 0);
486 MOBJ_STAT_ADD(lib_va_add_delete);
487 lib_va_free(del);
488 }
489 return (lvp);
490 }
491
492 /*
493 * Release the hold on lvp which was acquired by lib_va_find or lib_va_add_hash.
494 * In addition, if this is the last hold and lvp is marked for deletion,
495 * free up it's reserved address space and free the structure.
496 */
497 static void
498 lib_va_release(struct lib_va *lvp)
|