Print this page
5255 uts shouldn't open-code ISP2


 188         }
 189 
 190         if (vpm_cache_size > VPMAP_MAXCACHE) {
 191                 vpm_cache_size = VPMAP_MAXCACHE;
 192         }
 193 
 194         /*
 195          * Number of freelists.
 196          */
 197         if (vpm_nfreelist == 0) {
 198                 vpm_nfreelist = max_ncpus;
 199         } else if (vpm_nfreelist < 0 || vpm_nfreelist > 2 * max_ncpus) {
 200                 cmn_err(CE_WARN, "vpmap create : number of freelist "
 201                 "vpm_nfreelist %d using %d", vpm_nfreelist, max_ncpus);
 202                 vpm_nfreelist = 2 * max_ncpus;
 203         }
 204 
 205         /*
 206          * Round it up to the next power of 2
 207          */
 208         if (vpm_nfreelist & (vpm_nfreelist - 1)) {
 209                 vpm_nfreelist = 1 << (highbit(vpm_nfreelist));
 210         }
 211         vpmd_freemsk = vpm_nfreelist - 1;
 212 
 213         /*
 214          * Use a per cpu rotor index to spread the allocations evenly
 215          * across the available vpm freelists.
 216          */
 217         vpmd_cpu = kmem_zalloc(sizeof (union vpm_cpu) * max_ncpus, KM_SLEEP);
 218         ndx = 0;
 219         for (i = 0; i < max_ncpus; i++) {
 220 
 221                 vpmd_cpu[i].vfree_ndx = ndx;
 222                 ndx = (ndx + 1) & vpmd_freemsk;
 223         }
 224 
 225         /*
 226          * Allocate and initialize the freelist.
 227          */
 228         vpmd_free = kmem_zalloc(vpm_nfreelist * sizeof (struct vpmfree),




 188         }
 189 
 190         if (vpm_cache_size > VPMAP_MAXCACHE) {
 191                 vpm_cache_size = VPMAP_MAXCACHE;
 192         }
 193 
 194         /*
 195          * Number of freelists.
 196          */
 197         if (vpm_nfreelist == 0) {
 198                 vpm_nfreelist = max_ncpus;
 199         } else if (vpm_nfreelist < 0 || vpm_nfreelist > 2 * max_ncpus) {
 200                 cmn_err(CE_WARN, "vpmap create : number of freelist "
 201                 "vpm_nfreelist %d using %d", vpm_nfreelist, max_ncpus);
 202                 vpm_nfreelist = 2 * max_ncpus;
 203         }
 204 
 205         /*
 206          * Round it up to the next power of 2
 207          */
 208         if (!ISP2(vpm_nfreelist)) {
 209                 vpm_nfreelist = 1 << (highbit(vpm_nfreelist));
 210         }
 211         vpmd_freemsk = vpm_nfreelist - 1;
 212 
 213         /*
 214          * Use a per cpu rotor index to spread the allocations evenly
 215          * across the available vpm freelists.
 216          */
 217         vpmd_cpu = kmem_zalloc(sizeof (union vpm_cpu) * max_ncpus, KM_SLEEP);
 218         ndx = 0;
 219         for (i = 0; i < max_ncpus; i++) {
 220 
 221                 vpmd_cpu[i].vfree_ndx = ndx;
 222                 ndx = (ndx + 1) & vpmd_freemsk;
 223         }
 224 
 225         /*
 226          * Allocate and initialize the freelist.
 227          */
 228         vpmd_free = kmem_zalloc(vpm_nfreelist * sizeof (struct vpmfree),