1436 /*
1437 * Create an arena called name whose initial span is [base, base + size).
1438 * The arena's natural unit of currency is quantum, so vmem_alloc()
1439 * guarantees quantum-aligned results. The arena may import new spans
1440 * by invoking afunc() on source, and may return those spans by invoking
1441 * ffunc() on source. To make small allocations fast and scalable,
1442 * the arena offers high-performance caching for each integer multiple
1443 * of quantum up to qcache_max.
1444 */
1445 static vmem_t *
1446 vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
1447 void *(*afunc)(vmem_t *, size_t, int),
1448 void (*ffunc)(vmem_t *, void *, size_t),
1449 vmem_t *source, size_t qcache_max, int vmflag)
1450 {
1451 int i;
1452 size_t nqcache;
1453 vmem_t *vmp, *cur, **vmpp;
1454 vmem_seg_t *vsp;
1455 vmem_freelist_t *vfp;
1456 uint32_t id = atomic_add_32_nv(&vmem_id, 1);
1457
1458 if (vmem_vmem_arena != NULL) {
1459 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1460 vmflag & VM_KMFLAGS);
1461 } else {
1462 ASSERT(id <= VMEM_INITIAL);
1463 vmp = &vmem0[id - 1];
1464 }
1465
1466 /* An identifier arena must inherit from another identifier arena */
1467 ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) ==
1468 (vmflag & VMC_IDENTIFIER)));
1469
1470 if (vmp == NULL)
1471 return (NULL);
1472 bzero(vmp, sizeof (vmem_t));
1473
1474 (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1475 mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
1476 cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
1538 NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
1539 }
1540 }
1541
1542 if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
1543 "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) /
1544 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) {
1545 vmp->vm_ksp->ks_data = &vmp->vm_kstat;
1546 kstat_install(vmp->vm_ksp);
1547 }
1548
1549 mutex_enter(&vmem_list_lock);
1550 vmpp = &vmem_list;
1551 while ((cur = *vmpp) != NULL)
1552 vmpp = &cur->vm_next;
1553 *vmpp = vmp;
1554 mutex_exit(&vmem_list_lock);
1555
1556 if (vmp->vm_cflags & VMC_POPULATOR) {
1557 ASSERT(vmem_populators < VMEM_INITIAL);
1558 vmem_populator[atomic_add_32_nv(&vmem_populators, 1) - 1] = vmp;
1559 mutex_enter(&vmp->vm_lock);
1560 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1561 mutex_exit(&vmp->vm_lock);
1562 }
1563
1564 if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1565 vmem_destroy(vmp);
1566 return (NULL);
1567 }
1568
1569 return (vmp);
1570 }
1571
1572 vmem_t *
1573 vmem_xcreate(const char *name, void *base, size_t size, size_t quantum,
1574 vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1575 size_t qcache_max, int vmflag)
1576 {
1577 ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC)));
1578 vmflag &= ~(VMC_POPULATOR | VMC_XALLOC);
|
1436 /*
1437 * Create an arena called name whose initial span is [base, base + size).
1438 * The arena's natural unit of currency is quantum, so vmem_alloc()
1439 * guarantees quantum-aligned results. The arena may import new spans
1440 * by invoking afunc() on source, and may return those spans by invoking
1441 * ffunc() on source. To make small allocations fast and scalable,
1442 * the arena offers high-performance caching for each integer multiple
1443 * of quantum up to qcache_max.
1444 */
1445 static vmem_t *
1446 vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
1447 void *(*afunc)(vmem_t *, size_t, int),
1448 void (*ffunc)(vmem_t *, void *, size_t),
1449 vmem_t *source, size_t qcache_max, int vmflag)
1450 {
1451 int i;
1452 size_t nqcache;
1453 vmem_t *vmp, *cur, **vmpp;
1454 vmem_seg_t *vsp;
1455 vmem_freelist_t *vfp;
1456 uint32_t id = atomic_inc_32_nv(&vmem_id);
1457
1458 if (vmem_vmem_arena != NULL) {
1459 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1460 vmflag & VM_KMFLAGS);
1461 } else {
1462 ASSERT(id <= VMEM_INITIAL);
1463 vmp = &vmem0[id - 1];
1464 }
1465
1466 /* An identifier arena must inherit from another identifier arena */
1467 ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) ==
1468 (vmflag & VMC_IDENTIFIER)));
1469
1470 if (vmp == NULL)
1471 return (NULL);
1472 bzero(vmp, sizeof (vmem_t));
1473
1474 (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1475 mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
1476 cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
1538 NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
1539 }
1540 }
1541
1542 if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
1543 "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) /
1544 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) {
1545 vmp->vm_ksp->ks_data = &vmp->vm_kstat;
1546 kstat_install(vmp->vm_ksp);
1547 }
1548
1549 mutex_enter(&vmem_list_lock);
1550 vmpp = &vmem_list;
1551 while ((cur = *vmpp) != NULL)
1552 vmpp = &cur->vm_next;
1553 *vmpp = vmp;
1554 mutex_exit(&vmem_list_lock);
1555
1556 if (vmp->vm_cflags & VMC_POPULATOR) {
1557 ASSERT(vmem_populators < VMEM_INITIAL);
1558 vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
1559 mutex_enter(&vmp->vm_lock);
1560 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1561 mutex_exit(&vmp->vm_lock);
1562 }
1563
1564 if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1565 vmem_destroy(vmp);
1566 return (NULL);
1567 }
1568
1569 return (vmp);
1570 }
1571
1572 vmem_t *
1573 vmem_xcreate(const char *name, void *base, size_t size, size_t quantum,
1574 vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1575 size_t qcache_max, int vmflag)
1576 {
1577 ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC)));
1578 vmflag &= ~(VMC_POPULATOR | VMC_XALLOC);
|