1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/param.h>
  26 #include <sys/user.h>
  27 #include <sys/mman.h>
  28 #include <sys/kmem.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/systm.h>
  32 #include <sys/tuneable.h>
  33 #include <vm/hat.h>
  34 #include <vm/seg.h>
  35 #include <vm/as.h>
  36 #include <vm/anon.h>
  37 #include <vm/page.h>
  38 #include <sys/buf.h>
  39 #include <sys/swap.h>
  40 #include <sys/atomic.h>
  41 #include <vm/seg_spt.h>
  42 #include <sys/debug.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/shm.h>
  45 #include <sys/shm_impl.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/vmsystm.h>
  48 #include <sys/policy.h>
  49 #include <sys/project.h>
  50 #include <sys/tnf_probe.h>
  51 #include <sys/zone.h>
  52 
  53 #define SEGSPTADDR      (caddr_t)0x0
  54 
  55 /*
  56  * # pages used for spt
  57  */
  58 size_t  spt_used;
  59 
  60 /*
  61  * segspt_minfree is the memory left for system after ISM
  62  * locked its pages; it is set up to 5% of availrmem in
  63  * sptcreate when ISM is created.  ISM should not use more
  64  * than ~90% of availrmem; if it does, then the performance
  65  * of the system may decrease. Machines with large memories may
  66  * be able to use up more memory for ISM so we set the default
  67  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
  68  * If somebody wants even more memory for ISM (risking hanging
  69  * the system) they can patch the segspt_minfree to smaller number.
  70  */
  71 pgcnt_t segspt_minfree = 0;
  72 
  73 static int segspt_create(struct seg *seg, caddr_t argsp);
  74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
  75 static void segspt_free(struct seg *seg);
  76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
  77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
  78 
  79 static void
  80 segspt_badop()
  81 {
  82         panic("segspt_badop called");
  83         /*NOTREACHED*/
  84 }
  85 
  86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
  87 
  88 struct seg_ops segspt_ops = {
  89         .dup            = SEGSPT_BADOP(int),
  90         .unmap          = segspt_unmap,
  91         .free           = segspt_free,
  92         .fault          = SEGSPT_BADOP(int),
  93         .faulta         = SEGSPT_BADOP(faultcode_t),
  94         .setprot        = SEGSPT_BADOP(int),
  95         .checkprot      = SEGSPT_BADOP(int),
  96         .kluster        = SEGSPT_BADOP(int),
  97         .swapout        = SEGSPT_BADOP(size_t),
  98         .sync           = SEGSPT_BADOP(int),
  99         .incore         = SEGSPT_BADOP(size_t),
 100         .lockop         = SEGSPT_BADOP(int),
 101         .getprot        = SEGSPT_BADOP(int),
 102         .getoffset      = SEGSPT_BADOP(u_offset_t),
 103         .gettype        = SEGSPT_BADOP(int),
 104         .getvp          = SEGSPT_BADOP(int),
 105         .advise         = SEGSPT_BADOP(int),
 106         .dump           = SEGSPT_BADOP(void),
 107         .pagelock       = SEGSPT_BADOP(int),
 108         .setpagesize    = SEGSPT_BADOP(int),
 109         .getmemid       = SEGSPT_BADOP(int),
 110         .getpolicy      = segspt_getpolicy,
 111         .capable        = SEGSPT_BADOP(int),
 112 };
 113 
 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
 116 static void segspt_shmfree(struct seg *seg);
 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
 118                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
 121                         register size_t len, register uint_t prot);
 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
 123                         uint_t prot);
 124 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
 125 static size_t   segspt_shmswapout(struct seg *seg);
 126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
 127                         register char *vec);
 128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
 129                         int attr, uint_t flags);
 130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 131                         int attr, int op, ulong_t *lockmap, size_t pos);
 132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 133                         uint_t *protv);
 134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 138                         uint_t behav);
 139 static void segspt_shmdump(struct seg *seg);
 140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 141                         struct page ***, enum lock_type, enum seg_rw);
 142 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 143 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 144 
 145 struct seg_ops segspt_shmops = {
 146         .dup            = segspt_shmdup,
 147         .unmap          = segspt_shmunmap,
 148         .free           = segspt_shmfree,
 149         .fault          = segspt_shmfault,
 150         .faulta         = segspt_shmfaulta,
 151         .setprot        = segspt_shmsetprot,
 152         .checkprot      = segspt_shmcheckprot,
 153         .kluster        = segspt_shmkluster,
 154         .swapout        = segspt_shmswapout,
 155         .sync           = segspt_shmsync,
 156         .incore         = segspt_shmincore,
 157         .lockop         = segspt_shmlockop,
 158         .getprot        = segspt_shmgetprot,
 159         .getoffset      = segspt_shmgetoffset,
 160         .gettype        = segspt_shmgettype,
 161         .getvp          = segspt_shmgetvp,
 162         .advise         = segspt_shmadvise,
 163         .dump           = segspt_shmdump,
 164         .pagelock       = segspt_shmpagelock,
 165         .getmemid       = segspt_shmgetmemid,
 166         .getpolicy      = segspt_shmgetpolicy,
 167 };
 168 
 169 static void segspt_purge(struct seg *seg);
 170 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 171                 enum seg_rw, int);
 172 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 173                 page_t **ppa);
 174 
 175 
 176 
 177 /*ARGSUSED*/
 178 int
 179 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 180         uint_t prot, uint_t flags, uint_t share_szc)
 181 {
 182         int     err;
 183         struct  as      *newas;
 184         struct  segspt_crargs sptcargs;
 185 
 186 #ifdef DEBUG
 187         TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
 188                         tnf_ulong, size, size );
 189 #endif
 190         if (segspt_minfree == 0)        /* leave min 5% of availrmem for */
 191                 segspt_minfree = availrmem/20;  /* for the system */
 192 
 193         if (!hat_supported(HAT_SHARED_PT, (void *)0))
 194                 return (EINVAL);
 195 
 196         /*
 197          * get a new as for this shared memory segment
 198          */
 199         newas = as_alloc();
 200         newas->a_proc = NULL;
 201         sptcargs.amp = amp;
 202         sptcargs.prot = prot;
 203         sptcargs.flags = flags;
 204         sptcargs.szc = share_szc;
 205         /*
 206          * create a shared page table (spt) segment
 207          */
 208 
 209         if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
 210                 as_free(newas);
 211                 return (err);
 212         }
 213         *sptseg = sptcargs.seg_spt;
 214         return (0);
 215 }
 216 
 217 void
 218 sptdestroy(struct as *as, struct anon_map *amp)
 219 {
 220 
 221 #ifdef DEBUG
 222         TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
 223 #endif
 224         (void) as_unmap(as, SEGSPTADDR, amp->size);
 225         as_free(as);
 226 }
 227 
 228 /*
 229  * called from seg_free().
 230  * free (i.e., unlock, unmap, return to free list)
 231  *  all the pages in the given seg.
 232  */
 233 void
 234 segspt_free(struct seg  *seg)
 235 {
 236         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 237 
 238         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 239 
 240         if (sptd != NULL) {
 241                 if (sptd->spt_realsize)
 242                         segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 243 
 244         if (sptd->spt_ppa_lckcnt)
 245                 kmem_free(sptd->spt_ppa_lckcnt,
 246                     sizeof (*sptd->spt_ppa_lckcnt)
 247                     * btopr(sptd->spt_amp->size));
 248                 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
 249                 cv_destroy(&sptd->spt_cv);
 250                 mutex_destroy(&sptd->spt_lock);
 251                 kmem_free(sptd, sizeof (*sptd));
 252         }
 253 }
 254 
 255 /*ARGSUSED*/
 256 static int
 257 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 258         uint_t flags)
 259 {
 260         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 261 
 262         return (0);
 263 }
 264 
 265 /*ARGSUSED*/
 266 static size_t
 267 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 268 {
 269         caddr_t eo_seg;
 270         pgcnt_t npages;
 271         struct shm_data *shmd = (struct shm_data *)seg->s_data;
 272         struct seg      *sptseg;
 273         struct spt_data *sptd;
 274 
 275         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 276 #ifdef lint
 277         seg = seg;
 278 #endif
 279         sptseg = shmd->shm_sptseg;
 280         sptd = sptseg->s_data;
 281 
 282         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 283                 eo_seg = addr + len;
 284                 while (addr < eo_seg) {
 285                         /* page exists, and it's locked. */
 286                         *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
 287                             SEG_PAGE_ANON;
 288                         addr += PAGESIZE;
 289                 }
 290                 return (len);
 291         } else {
 292                 struct  anon_map *amp = shmd->shm_amp;
 293                 struct  anon    *ap;
 294                 page_t          *pp;
 295                 pgcnt_t         anon_index;
 296                 struct vnode    *vp;
 297                 u_offset_t      off;
 298                 ulong_t         i;
 299                 int             ret;
 300                 anon_sync_obj_t cookie;
 301 
 302                 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 303                 anon_index = seg_page(seg, addr);
 304                 npages = btopr(len);
 305                 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
 306                         return (EINVAL);
 307                 }
 308                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
 309                 for (i = 0; i < npages; i++, anon_index++) {
 310                         ret = 0;
 311                         anon_array_enter(amp, anon_index, &cookie);
 312                         ap = anon_get_ptr(amp->ahp, anon_index);
 313                         if (ap != NULL) {
 314                                 swap_xlate(ap, &vp, &off);
 315                                 anon_array_exit(&cookie);
 316                                 pp = page_lookup_nowait(vp, off, SE_SHARED);
 317                                 if (pp != NULL) {
 318                                         ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
 319                                         page_unlock(pp);
 320                                 }
 321                         } else {
 322                                 anon_array_exit(&cookie);
 323                         }
 324                         if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
 325                                 ret |= SEG_PAGE_LOCKED;
 326                         }
 327                         *vec++ = (char)ret;
 328                 }
 329                 ANON_LOCK_EXIT(&amp->a_rwlock);
 330                 return (len);
 331         }
 332 }
 333 
 334 static int
 335 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 336 {
 337         size_t share_size;
 338 
 339         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 340 
 341         /*
 342          * seg.s_size may have been rounded up to the largest page size
 343          * in shmat().
 344          * XXX This should be cleanedup. sptdestroy should take a length
 345          * argument which should be the same as sptcreate. Then
 346          * this rounding would not be needed (or is done in shm.c)
 347          * Only the check for full segment will be needed.
 348          *
 349          * XXX -- shouldn't raddr == 0 always? These tests don't seem
 350          * to be useful at all.
 351          */
 352         share_size = page_get_pagesize(seg->s_szc);
 353         ssize = P2ROUNDUP(ssize, share_size);
 354 
 355         if (raddr == seg->s_base && ssize == seg->s_size) {
 356                 seg_free(seg);
 357                 return (0);
 358         } else
 359                 return (EINVAL);
 360 }
 361 
 362 int
 363 segspt_create(struct seg *seg, caddr_t argsp)
 364 {
 365         int             err;
 366         caddr_t         addr = seg->s_base;
 367         struct spt_data *sptd;
 368         struct  segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
 369         struct anon_map *amp = sptcargs->amp;
 370         struct kshmid   *sp = amp->a_sp;
 371         struct  cred    *cred = CRED();
 372         ulong_t         i, j, anon_index = 0;
 373         pgcnt_t         npages = btopr(amp->size);
 374         struct vnode    *vp;
 375         page_t          **ppa;
 376         uint_t          hat_flags;
 377         size_t          pgsz;
 378         pgcnt_t         pgcnt;
 379         caddr_t         a;
 380         pgcnt_t         pidx;
 381         size_t          sz;
 382         proc_t          *procp = curproc;
 383         rctl_qty_t      lockedbytes = 0;
 384         kproject_t      *proj;
 385 
 386         /*
 387          * We are holding the a_lock on the underlying dummy as,
 388          * so we can make calls to the HAT layer.
 389          */
 390         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 391         ASSERT(sp != NULL);
 392 
 393 #ifdef DEBUG
 394         TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 395             tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 396 #endif
 397         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 398                 if (err = anon_swap_adjust(npages))
 399                         return (err);
 400         }
 401         err = ENOMEM;
 402 
 403         if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
 404                 goto out1;
 405 
 406         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 407                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
 408                     KM_NOSLEEP)) == NULL)
 409                         goto out2;
 410         }
 411 
 412         mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
 413 
 414         if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
 415                 goto out3;
 416 
 417         seg->s_ops = &segspt_ops;
 418         sptd->spt_vp = vp;
 419         sptd->spt_amp = amp;
 420         sptd->spt_prot = sptcargs->prot;
 421         sptd->spt_flags = sptcargs->flags;
 422         seg->s_data = (caddr_t)sptd;
 423         sptd->spt_ppa = NULL;
 424         sptd->spt_ppa_lckcnt = NULL;
 425         seg->s_szc = sptcargs->szc;
 426         cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
 427         sptd->spt_gen = 0;
 428 
 429         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 430         if (seg->s_szc > amp->a_szc) {
 431                 amp->a_szc = seg->s_szc;
 432         }
 433         ANON_LOCK_EXIT(&amp->a_rwlock);
 434 
 435         /*
 436          * Set policy to affect initial allocation of pages in
 437          * anon_map_createpages()
 438          */
 439         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
 440             NULL, 0, ptob(npages));
 441 
 442         if (sptcargs->flags & SHM_PAGEABLE) {
 443                 size_t  share_sz;
 444                 pgcnt_t new_npgs, more_pgs;
 445                 struct anon_hdr *nahp;
 446                 zone_t *zone;
 447 
 448                 share_sz = page_get_pagesize(seg->s_szc);
 449                 if (!IS_P2ALIGNED(amp->size, share_sz)) {
 450                         /*
 451                          * We are rounding up the size of the anon array
 452                          * on 4 M boundary because we always create 4 M
 453                          * of page(s) when locking, faulting pages and we
 454                          * don't have to check for all corner cases e.g.
 455                          * if there is enough space to allocate 4 M
 456                          * page.
 457                          */
 458                         new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
 459                         more_pgs = new_npgs - npages;
 460 
 461                         /*
 462                          * The zone will never be NULL, as a fully created
 463                          * shm always has an owning zone.
 464                          */
 465                         zone = sp->shm_perm.ipc_zone_ref.zref_zone;
 466                         ASSERT(zone != NULL);
 467                         if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
 468                                 err = ENOMEM;
 469                                 goto out4;
 470                         }
 471 
 472                         nahp = anon_create(new_npgs, ANON_SLEEP);
 473                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 474                         (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
 475                             ANON_SLEEP);
 476                         anon_release(amp->ahp, npages);
 477                         amp->ahp = nahp;
 478                         ASSERT(amp->swresv == ptob(npages));
 479                         amp->swresv = amp->size = ptob(new_npgs);
 480                         ANON_LOCK_EXIT(&amp->a_rwlock);
 481                         npages = new_npgs;
 482                 }
 483 
 484                 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
 485                     sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
 486                 sptd->spt_pcachecnt = 0;
 487                 sptd->spt_realsize = ptob(npages);
 488                 sptcargs->seg_spt = seg;
 489                 return (0);
 490         }
 491 
 492         /*
 493          * get array of pages for each anon slot in amp
 494          */
 495         if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
 496             seg, addr, S_CREATE, cred)) != 0)
 497                 goto out4;
 498 
 499         mutex_enter(&sp->shm_mlock);
 500 
 501         /* May be partially locked, so, count bytes to charge for locking */
 502         for (i = 0; i < npages; i++)
 503                 if (ppa[i]->p_lckcnt == 0)
 504                         lockedbytes += PAGESIZE;
 505 
 506         proj = sp->shm_perm.ipc_proj;
 507 
 508         if (lockedbytes > 0) {
 509                 mutex_enter(&procp->p_lock);
 510                 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
 511                         mutex_exit(&procp->p_lock);
 512                         mutex_exit(&sp->shm_mlock);
 513                         for (i = 0; i < npages; i++)
 514                                 page_unlock(ppa[i]);
 515                         err = ENOMEM;
 516                         goto out4;
 517                 }
 518                 mutex_exit(&procp->p_lock);
 519         }
 520 
 521         /*
 522          * addr is initial address corresponding to the first page on ppa list
 523          */
 524         for (i = 0; i < npages; i++) {
 525                 /* attempt to lock all pages */
 526                 if (page_pp_lock(ppa[i], 0, 1) == 0) {
 527                         /*
 528                          * if unable to lock any page, unlock all
 529                          * of them and return error
 530                          */
 531                         for (j = 0; j < i; j++)
 532                                 page_pp_unlock(ppa[j], 0, 1);
 533                         for (i = 0; i < npages; i++)
 534                                 page_unlock(ppa[i]);
 535                         rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
 536                         mutex_exit(&sp->shm_mlock);
 537                         err = ENOMEM;
 538                         goto out4;
 539                 }
 540         }
 541         mutex_exit(&sp->shm_mlock);
 542 
 543         /*
 544          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
 545          * for the entire life of the segment. For example platforms
 546          * that do not support Dynamic Reconfiguration.
 547          */
 548         hat_flags = HAT_LOAD_SHARE;
 549         if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
 550                 hat_flags |= HAT_LOAD_LOCK;
 551 
 552         /*
 553          * Load translations one lare page at a time
 554          * to make sure we don't create mappings bigger than
 555          * segment's size code in case underlying pages
 556          * are shared with segvn's segment that uses bigger
 557          * size code than we do.
 558          */
 559         pgsz = page_get_pagesize(seg->s_szc);
 560         pgcnt = page_get_pagecnt(seg->s_szc);
 561         for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
 562                 sz = MIN(pgsz, ptob(npages - pidx));
 563                 hat_memload_array(seg->s_as->a_hat, a, sz,
 564                     &ppa[pidx], sptd->spt_prot, hat_flags);
 565         }
 566 
 567         /*
 568          * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 569          * we will leave the pages locked SE_SHARED for the life
 570          * of the ISM segment. This will prevent any calls to
 571          * hat_pageunload() on this ISM segment for those platforms.
 572          */
 573         if (!(hat_flags & HAT_LOAD_LOCK)) {
 574                 /*
 575                  * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
 576                  * we no longer need to hold the SE_SHARED lock on the pages,
 577                  * since L_PAGELOCK and F_SOFTLOCK calls will grab the
 578                  * SE_SHARED lock on the pages as necessary.
 579                  */
 580                 for (i = 0; i < npages; i++)
 581                         page_unlock(ppa[i]);
 582         }
 583         sptd->spt_pcachecnt = 0;
 584         kmem_free(ppa, ((sizeof (page_t *)) * npages));
 585         sptd->spt_realsize = ptob(npages);
 586         atomic_add_long(&spt_used, npages);
 587         sptcargs->seg_spt = seg;
 588         return (0);
 589 
 590 out4:
 591         seg->s_data = NULL;
 592         kmem_free(vp, sizeof (*vp));
 593         cv_destroy(&sptd->spt_cv);
 594 out3:
 595         mutex_destroy(&sptd->spt_lock);
 596         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 597                 kmem_free(ppa, (sizeof (*ppa) * npages));
 598 out2:
 599         kmem_free(sptd, sizeof (*sptd));
 600 out1:
 601         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 602                 anon_swap_restore(npages);
 603         return (err);
 604 }
 605 
 606 /*ARGSUSED*/
 607 void
 608 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
 609 {
 610         struct page     *pp;
 611         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 612         pgcnt_t         npages;
 613         ulong_t         anon_idx;
 614         struct anon_map *amp;
 615         struct anon     *ap;
 616         struct vnode    *vp;
 617         u_offset_t      off;
 618         uint_t          hat_flags;
 619         int             root = 0;
 620         pgcnt_t         pgs, curnpgs = 0;
 621         page_t          *rootpp;
 622         rctl_qty_t      unlocked_bytes = 0;
 623         kproject_t      *proj;
 624         kshmid_t        *sp;
 625 
 626         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 627 
 628         len = P2ROUNDUP(len, PAGESIZE);
 629 
 630         npages = btop(len);
 631 
 632         hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 633         if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 634             (sptd->spt_flags & SHM_PAGEABLE)) {
 635                 hat_flags = HAT_UNLOAD_UNMAP;
 636         }
 637 
 638         hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
 639 
 640         amp = sptd->spt_amp;
 641         if (sptd->spt_flags & SHM_PAGEABLE)
 642                 npages = btop(amp->size);
 643 
 644         ASSERT(amp != NULL);
 645 
 646         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 647                 sp = amp->a_sp;
 648                 proj = sp->shm_perm.ipc_proj;
 649                 mutex_enter(&sp->shm_mlock);
 650         }
 651         for (anon_idx = 0; anon_idx < npages; anon_idx++) {
 652                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 653                         if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
 654                                 panic("segspt_free_pages: null app");
 655                                 /*NOTREACHED*/
 656                         }
 657                 } else {
 658                         if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
 659                             == NULL)
 660                                 continue;
 661                 }
 662                 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
 663                 swap_xlate(ap, &vp, &off);
 664 
 665                 /*
 666                  * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
 667                  * the pages won't be having SE_SHARED lock at this
 668                  * point.
 669                  *
 670                  * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 671                  * the pages are still held SE_SHARED locked from the
 672                  * original segspt_create()
 673                  *
 674                  * Our goal is to get SE_EXCL lock on each page, remove
 675                  * permanent lock on it and invalidate the page.
 676                  */
 677                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 678                         if (hat_flags == HAT_UNLOAD_UNMAP)
 679                                 pp = page_lookup(vp, off, SE_EXCL);
 680                         else {
 681                                 if ((pp = page_find(vp, off)) == NULL) {
 682                                         panic("segspt_free_pages: "
 683                                             "page not locked");
 684                                         /*NOTREACHED*/
 685                                 }
 686                                 if (!page_tryupgrade(pp)) {
 687                                         page_unlock(pp);
 688                                         pp = page_lookup(vp, off, SE_EXCL);
 689                                 }
 690                         }
 691                         if (pp == NULL) {
 692                                 panic("segspt_free_pages: "
 693                                     "page not in the system");
 694                                 /*NOTREACHED*/
 695                         }
 696                         ASSERT(pp->p_lckcnt > 0);
 697                         page_pp_unlock(pp, 0, 1);
 698                         if (pp->p_lckcnt == 0)
 699                                 unlocked_bytes += PAGESIZE;
 700                 } else {
 701                         if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
 702                                 continue;
 703                 }
 704                 /*
 705                  * It's logical to invalidate the pages here as in most cases
 706                  * these were created by segspt.
 707                  */
 708                 if (pp->p_szc != 0) {
 709                         if (root == 0) {
 710                                 ASSERT(curnpgs == 0);
 711                                 root = 1;
 712                                 rootpp = pp;
 713                                 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
 714                                 ASSERT(pgs > 1);
 715                                 ASSERT(IS_P2ALIGNED(pgs, pgs));
 716                                 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
 717                                 curnpgs--;
 718                         } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
 719                                 ASSERT(curnpgs == 1);
 720                                 ASSERT(page_pptonum(pp) ==
 721                                     page_pptonum(rootpp) + (pgs - 1));
 722                                 page_destroy_pages(rootpp);
 723                                 root = 0;
 724                                 curnpgs = 0;
 725                         } else {
 726                                 ASSERT(curnpgs > 1);
 727                                 ASSERT(page_pptonum(pp) ==
 728                                     page_pptonum(rootpp) + (pgs - curnpgs));
 729                                 curnpgs--;
 730                         }
 731                 } else {
 732                         if (root != 0 || curnpgs != 0) {
 733                                 panic("segspt_free_pages: bad large page");
 734                                 /*NOTREACHED*/
 735                         }
 736                         /*
 737                          * Before destroying the pages, we need to take care
 738                          * of the rctl locked memory accounting. For that
 739                          * we need to calculte the unlocked_bytes.
 740                          */
 741                         if (pp->p_lckcnt > 0)
 742                                 unlocked_bytes += PAGESIZE;
 743                         /*LINTED: constant in conditional context */
 744                         VN_DISPOSE(pp, B_INVAL, 0, kcred);
 745                 }
 746         }
 747         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 748                 if (unlocked_bytes > 0)
 749                         rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
 750                 mutex_exit(&sp->shm_mlock);
 751         }
 752         if (root != 0 || curnpgs != 0) {
 753                 panic("segspt_free_pages: bad large page");
 754                 /*NOTREACHED*/
 755         }
 756 
 757         /*
 758          * mark that pages have been released
 759          */
 760         sptd->spt_realsize = 0;
 761 
 762         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 763                 atomic_add_long(&spt_used, -npages);
 764                 anon_swap_restore(npages);
 765         }
 766 }
 767 
 768 /*
 769  * Get memory allocation policy info for specified address in given segment
 770  */
 771 static lgrp_mem_policy_info_t *
 772 segspt_getpolicy(struct seg *seg, caddr_t addr)
 773 {
 774         struct anon_map         *amp;
 775         ulong_t                 anon_index;
 776         lgrp_mem_policy_info_t  *policy_info;
 777         struct spt_data         *spt_data;
 778 
 779         ASSERT(seg != NULL);
 780 
 781         /*
 782          * Get anon_map from segspt
 783          *
 784          * Assume that no lock needs to be held on anon_map, since
 785          * it should be protected by its reference count which must be
 786          * nonzero for an existing segment
 787          * Need to grab readers lock on policy tree though
 788          */
 789         spt_data = (struct spt_data *)seg->s_data;
 790         if (spt_data == NULL)
 791                 return (NULL);
 792         amp = spt_data->spt_amp;
 793         ASSERT(amp->refcnt != 0);
 794 
 795         /*
 796          * Get policy info
 797          *
 798          * Assume starting anon index of 0
 799          */
 800         anon_index = seg_page(seg, addr);
 801         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
 802 
 803         return (policy_info);
 804 }
 805 
 806 /*
 807  * DISM only.
 808  * Return locked pages over a given range.
 809  *
 810  * We will cache all DISM locked pages and save the pplist for the
 811  * entire segment in the ppa field of the underlying DISM segment structure.
 812  * Later, during a call to segspt_reclaim() we will use this ppa array
 813  * to page_unlock() all of the pages and then we will free this ppa list.
 814  */
 815 /*ARGSUSED*/
 816 static int
 817 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
 818     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 819 {
 820         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
 821         struct  seg     *sptseg = shmd->shm_sptseg;
 822         struct  spt_data *sptd = sptseg->s_data;
 823         pgcnt_t pg_idx, npages, tot_npages, npgs;
 824         struct  page **pplist, **pl, **ppa, *pp;
 825         struct  anon_map *amp;
 826         spgcnt_t        an_idx;
 827         int     ret = ENOTSUP;
 828         uint_t  pl_built = 0;
 829         struct  anon *ap;
 830         struct  vnode *vp;
 831         u_offset_t off;
 832         pgcnt_t claim_availrmem = 0;
 833         uint_t  szc;
 834 
 835         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 836         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 837 
 838         /*
 839          * We want to lock/unlock the entire ISM segment. Therefore,
 840          * we will be using the underlying sptseg and it's base address
 841          * and length for the caching arguments.
 842          */
 843         ASSERT(sptseg);
 844         ASSERT(sptd);
 845 
 846         pg_idx = seg_page(seg, addr);
 847         npages = btopr(len);
 848 
 849         /*
 850          * check if the request is larger than number of pages covered
 851          * by amp
 852          */
 853         if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
 854                 *ppp = NULL;
 855                 return (ENOTSUP);
 856         }
 857 
 858         if (type == L_PAGEUNLOCK) {
 859                 ASSERT(sptd->spt_ppa != NULL);
 860 
 861                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
 862                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 863 
 864                 /*
 865                  * If someone is blocked while unmapping, we purge
 866                  * segment page cache and thus reclaim pplist synchronously
 867                  * without waiting for seg_pasync_thread. This speeds up
 868                  * unmapping in cases where munmap(2) is called, while
 869                  * raw async i/o is still in progress or where a thread
 870                  * exits on data fault in a multithreaded application.
 871                  */
 872                 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
 873                     (AS_ISUNMAPWAIT(seg->s_as) &&
 874                     shmd->shm_softlockcnt > 0)) {
 875                         segspt_purge(seg);
 876                 }
 877                 return (0);
 878         }
 879 
 880         /* The L_PAGELOCK case ... */
 881 
 882         if (sptd->spt_flags & DISM_PPA_CHANGED) {
 883                 segspt_purge(seg);
 884                 /*
 885                  * for DISM ppa needs to be rebuild since
 886                  * number of locked pages could be changed
 887                  */
 888                 *ppp = NULL;
 889                 return (ENOTSUP);
 890         }
 891 
 892         /*
 893          * First try to find pages in segment page cache, without
 894          * holding the segment lock.
 895          */
 896         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 897             S_WRITE, SEGP_FORCE_WIRED);
 898         if (pplist != NULL) {
 899                 ASSERT(sptd->spt_ppa != NULL);
 900                 ASSERT(sptd->spt_ppa == pplist);
 901                 ppa = sptd->spt_ppa;
 902                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 903                         if (ppa[an_idx] == NULL) {
 904                                 seg_pinactive(seg, NULL, seg->s_base,
 905                                     sptd->spt_amp->size, ppa,
 906                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 907                                 *ppp = NULL;
 908                                 return (ENOTSUP);
 909                         }
 910                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 911                                 npgs = page_get_pagecnt(szc);
 912                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 913                         } else {
 914                                 an_idx++;
 915                         }
 916                 }
 917                 /*
 918                  * Since we cache the entire DISM segment, we want to
 919                  * set ppp to point to the first slot that corresponds
 920                  * to the requested addr, i.e. pg_idx.
 921                  */
 922                 *ppp = &(sptd->spt_ppa[pg_idx]);
 923                 return (0);
 924         }
 925 
 926         mutex_enter(&sptd->spt_lock);
 927         /*
 928          * try to find pages in segment page cache with mutex
 929          */
 930         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 931             S_WRITE, SEGP_FORCE_WIRED);
 932         if (pplist != NULL) {
 933                 ASSERT(sptd->spt_ppa != NULL);
 934                 ASSERT(sptd->spt_ppa == pplist);
 935                 ppa = sptd->spt_ppa;
 936                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 937                         if (ppa[an_idx] == NULL) {
 938                                 mutex_exit(&sptd->spt_lock);
 939                                 seg_pinactive(seg, NULL, seg->s_base,
 940                                     sptd->spt_amp->size, ppa,
 941                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 942                                 *ppp = NULL;
 943                                 return (ENOTSUP);
 944                         }
 945                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 946                                 npgs = page_get_pagecnt(szc);
 947                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 948                         } else {
 949                                 an_idx++;
 950                         }
 951                 }
 952                 /*
 953                  * Since we cache the entire DISM segment, we want to
 954                  * set ppp to point to the first slot that corresponds
 955                  * to the requested addr, i.e. pg_idx.
 956                  */
 957                 mutex_exit(&sptd->spt_lock);
 958                 *ppp = &(sptd->spt_ppa[pg_idx]);
 959                 return (0);
 960         }
 961         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
 962             SEGP_FORCE_WIRED) == SEGP_FAIL) {
 963                 mutex_exit(&sptd->spt_lock);
 964                 *ppp = NULL;
 965                 return (ENOTSUP);
 966         }
 967 
 968         /*
 969          * No need to worry about protections because DISM pages are always rw.
 970          */
 971         pl = pplist = NULL;
 972         amp = sptd->spt_amp;
 973 
 974         /*
 975          * Do we need to build the ppa array?
 976          */
 977         if (sptd->spt_ppa == NULL) {
 978                 pgcnt_t lpg_cnt = 0;
 979 
 980                 pl_built = 1;
 981                 tot_npages = btopr(sptd->spt_amp->size);
 982 
 983                 ASSERT(sptd->spt_pcachecnt == 0);
 984                 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
 985                 pl = pplist;
 986 
 987                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 988                 for (an_idx = 0; an_idx < tot_npages; ) {
 989                         ap = anon_get_ptr(amp->ahp, an_idx);
 990                         /*
 991                          * Cache only mlocked pages. For large pages
 992                          * if one (constituent) page is mlocked
 993                          * all pages for that large page
 994                          * are cached also. This is for quick
 995                          * lookups of ppa array;
 996                          */
 997                         if ((ap != NULL) && (lpg_cnt != 0 ||
 998                             (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
 999 
1000                                 swap_xlate(ap, &vp, &off);
1001                                 pp = page_lookup(vp, off, SE_SHARED);
1002                                 ASSERT(pp != NULL);
1003                                 if (lpg_cnt == 0) {
1004                                         lpg_cnt++;
1005                                         /*
1006                                          * For a small page, we are done --
1007                                          * lpg_count is reset to 0 below.
1008                                          *
1009                                          * For a large page, we are guaranteed
1010                                          * to find the anon structures of all
1011                                          * constituent pages and a non-zero
1012                                          * lpg_cnt ensures that we don't test
1013                                          * for mlock for these. We are done
1014                                          * when lpg_count reaches (npgs + 1).
1015                                          * If we are not the first constituent
1016                                          * page, restart at the first one.
1017                                          */
1018                                         npgs = page_get_pagecnt(pp->p_szc);
1019                                         if (!IS_P2ALIGNED(an_idx, npgs)) {
1020                                                 an_idx = P2ALIGN(an_idx, npgs);
1021                                                 page_unlock(pp);
1022                                                 continue;
1023                                         }
1024                                 }
1025                                 if (++lpg_cnt > npgs)
1026                                         lpg_cnt = 0;
1027 
1028                                 /*
1029                                  * availrmem is decremented only
1030                                  * for unlocked pages
1031                                  */
1032                                 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1033                                         claim_availrmem++;
1034                                 pplist[an_idx] = pp;
1035                         }
1036                         an_idx++;
1037                 }
1038                 ANON_LOCK_EXIT(&amp->a_rwlock);
1039 
1040                 if (claim_availrmem) {
1041                         mutex_enter(&freemem_lock);
1042                         if (availrmem < tune.t_minarmem + claim_availrmem) {
1043                                 mutex_exit(&freemem_lock);
1044                                 ret = ENOTSUP;
1045                                 claim_availrmem = 0;
1046                                 goto insert_fail;
1047                         } else {
1048                                 availrmem -= claim_availrmem;
1049                         }
1050                         mutex_exit(&freemem_lock);
1051                 }
1052 
1053                 sptd->spt_ppa = pl;
1054         } else {
1055                 /*
1056                  * We already have a valid ppa[].
1057                  */
1058                 pl = sptd->spt_ppa;
1059         }
1060 
1061         ASSERT(pl != NULL);
1062 
1063         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1064             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1065             segspt_reclaim);
1066         if (ret == SEGP_FAIL) {
1067                 /*
1068                  * seg_pinsert failed. We return
1069                  * ENOTSUP, so that the as_pagelock() code will
1070                  * then try the slower F_SOFTLOCK path.
1071                  */
1072                 if (pl_built) {
1073                         /*
1074                          * No one else has referenced the ppa[].
1075                          * We created it and we need to destroy it.
1076                          */
1077                         sptd->spt_ppa = NULL;
1078                 }
1079                 ret = ENOTSUP;
1080                 goto insert_fail;
1081         }
1082 
1083         /*
1084          * In either case, we increment softlockcnt on the 'real' segment.
1085          */
1086         sptd->spt_pcachecnt++;
1087         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1088 
1089         ppa = sptd->spt_ppa;
1090         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1091                 if (ppa[an_idx] == NULL) {
1092                         mutex_exit(&sptd->spt_lock);
1093                         seg_pinactive(seg, NULL, seg->s_base,
1094                             sptd->spt_amp->size,
1095                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1096                         *ppp = NULL;
1097                         return (ENOTSUP);
1098                 }
1099                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1100                         npgs = page_get_pagecnt(szc);
1101                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1102                 } else {
1103                         an_idx++;
1104                 }
1105         }
1106         /*
1107          * We can now drop the sptd->spt_lock since the ppa[]
1108          * exists and he have incremented pacachecnt.
1109          */
1110         mutex_exit(&sptd->spt_lock);
1111 
1112         /*
1113          * Since we cache the entire segment, we want to
1114          * set ppp to point to the first slot that corresponds
1115          * to the requested addr, i.e. pg_idx.
1116          */
1117         *ppp = &(sptd->spt_ppa[pg_idx]);
1118         return (0);
1119 
1120 insert_fail:
1121         /*
1122          * We will only reach this code if we tried and failed.
1123          *
1124          * And we can drop the lock on the dummy seg, once we've failed
1125          * to set up a new ppa[].
1126          */
1127         mutex_exit(&sptd->spt_lock);
1128 
1129         if (pl_built) {
1130                 if (claim_availrmem) {
1131                         mutex_enter(&freemem_lock);
1132                         availrmem += claim_availrmem;
1133                         mutex_exit(&freemem_lock);
1134                 }
1135 
1136                 /*
1137                  * We created pl and we need to destroy it.
1138                  */
1139                 pplist = pl;
1140                 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1141                         if (pplist[an_idx] != NULL)
1142                                 page_unlock(pplist[an_idx]);
1143                 }
1144                 kmem_free(pl, sizeof (page_t *) * tot_npages);
1145         }
1146 
1147         if (shmd->shm_softlockcnt <= 0) {
1148                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1149                         mutex_enter(&seg->s_as->a_contents);
1150                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1151                                 AS_CLRUNMAPWAIT(seg->s_as);
1152                                 cv_broadcast(&seg->s_as->a_cv);
1153                         }
1154                         mutex_exit(&seg->s_as->a_contents);
1155                 }
1156         }
1157         *ppp = NULL;
1158         return (ret);
1159 }
1160 
1161 
1162 
1163 /*
1164  * return locked pages over a given range.
1165  *
1166  * We will cache the entire ISM segment and save the pplist for the
1167  * entire segment in the ppa field of the underlying ISM segment structure.
1168  * Later, during a call to segspt_reclaim() we will use this ppa array
1169  * to page_unlock() all of the pages and then we will free this ppa list.
1170  */
1171 /*ARGSUSED*/
1172 static int
1173 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1174     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1175 {
1176         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1177         struct seg      *sptseg = shmd->shm_sptseg;
1178         struct spt_data *sptd = sptseg->s_data;
1179         pgcnt_t np, page_index, npages;
1180         caddr_t a, spt_base;
1181         struct page **pplist, **pl, *pp;
1182         struct anon_map *amp;
1183         ulong_t anon_index;
1184         int ret = ENOTSUP;
1185         uint_t  pl_built = 0;
1186         struct anon *ap;
1187         struct vnode *vp;
1188         u_offset_t off;
1189 
1190         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1191         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1192 
1193 
1194         /*
1195          * We want to lock/unlock the entire ISM segment. Therefore,
1196          * we will be using the underlying sptseg and it's base address
1197          * and length for the caching arguments.
1198          */
1199         ASSERT(sptseg);
1200         ASSERT(sptd);
1201 
1202         if (sptd->spt_flags & SHM_PAGEABLE) {
1203                 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1204         }
1205 
1206         page_index = seg_page(seg, addr);
1207         npages = btopr(len);
1208 
1209         /*
1210          * check if the request is larger than number of pages covered
1211          * by amp
1212          */
1213         if (page_index + npages > btopr(sptd->spt_amp->size)) {
1214                 *ppp = NULL;
1215                 return (ENOTSUP);
1216         }
1217 
1218         if (type == L_PAGEUNLOCK) {
1219 
1220                 ASSERT(sptd->spt_ppa != NULL);
1221 
1222                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1223                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1224 
1225                 /*
1226                  * If someone is blocked while unmapping, we purge
1227                  * segment page cache and thus reclaim pplist synchronously
1228                  * without waiting for seg_pasync_thread. This speeds up
1229                  * unmapping in cases where munmap(2) is called, while
1230                  * raw async i/o is still in progress or where a thread
1231                  * exits on data fault in a multithreaded application.
1232                  */
1233                 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1234                         segspt_purge(seg);
1235                 }
1236                 return (0);
1237         }
1238 
1239         /* The L_PAGELOCK case... */
1240 
1241         /*
1242          * First try to find pages in segment page cache, without
1243          * holding the segment lock.
1244          */
1245         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1246             S_WRITE, SEGP_FORCE_WIRED);
1247         if (pplist != NULL) {
1248                 ASSERT(sptd->spt_ppa == pplist);
1249                 ASSERT(sptd->spt_ppa[page_index]);
1250                 /*
1251                  * Since we cache the entire ISM segment, we want to
1252                  * set ppp to point to the first slot that corresponds
1253                  * to the requested addr, i.e. page_index.
1254                  */
1255                 *ppp = &(sptd->spt_ppa[page_index]);
1256                 return (0);
1257         }
1258 
1259         mutex_enter(&sptd->spt_lock);
1260 
1261         /*
1262          * try to find pages in segment page cache
1263          */
1264         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1265             S_WRITE, SEGP_FORCE_WIRED);
1266         if (pplist != NULL) {
1267                 ASSERT(sptd->spt_ppa == pplist);
1268                 /*
1269                  * Since we cache the entire segment, we want to
1270                  * set ppp to point to the first slot that corresponds
1271                  * to the requested addr, i.e. page_index.
1272                  */
1273                 mutex_exit(&sptd->spt_lock);
1274                 *ppp = &(sptd->spt_ppa[page_index]);
1275                 return (0);
1276         }
1277 
1278         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1279             SEGP_FORCE_WIRED) == SEGP_FAIL) {
1280                 mutex_exit(&sptd->spt_lock);
1281                 *ppp = NULL;
1282                 return (ENOTSUP);
1283         }
1284 
1285         /*
1286          * No need to worry about protections because ISM pages
1287          * are always rw.
1288          */
1289         pl = pplist = NULL;
1290 
1291         /*
1292          * Do we need to build the ppa array?
1293          */
1294         if (sptd->spt_ppa == NULL) {
1295                 ASSERT(sptd->spt_ppa == pplist);
1296 
1297                 spt_base = sptseg->s_base;
1298                 pl_built = 1;
1299 
1300                 /*
1301                  * availrmem is decremented once during anon_swap_adjust()
1302                  * and is incremented during the anon_unresv(), which is
1303                  * called from shm_rm_amp() when the segment is destroyed.
1304                  */
1305                 amp = sptd->spt_amp;
1306                 ASSERT(amp != NULL);
1307 
1308                 /* pcachecnt is protected by sptd->spt_lock */
1309                 ASSERT(sptd->spt_pcachecnt == 0);
1310                 pplist = kmem_zalloc(sizeof (page_t *)
1311                     * btopr(sptd->spt_amp->size), KM_SLEEP);
1312                 pl = pplist;
1313 
1314                 anon_index = seg_page(sptseg, spt_base);
1315 
1316                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1317                 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1318                     a += PAGESIZE, anon_index++, pplist++) {
1319                         ap = anon_get_ptr(amp->ahp, anon_index);
1320                         ASSERT(ap != NULL);
1321                         swap_xlate(ap, &vp, &off);
1322                         pp = page_lookup(vp, off, SE_SHARED);
1323                         ASSERT(pp != NULL);
1324                         *pplist = pp;
1325                 }
1326                 ANON_LOCK_EXIT(&amp->a_rwlock);
1327 
1328                 if (a < (spt_base + sptd->spt_amp->size)) {
1329                         ret = ENOTSUP;
1330                         goto insert_fail;
1331                 }
1332                 sptd->spt_ppa = pl;
1333         } else {
1334                 /*
1335                  * We already have a valid ppa[].
1336                  */
1337                 pl = sptd->spt_ppa;
1338         }
1339 
1340         ASSERT(pl != NULL);
1341 
1342         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1343             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1344             segspt_reclaim);
1345         if (ret == SEGP_FAIL) {
1346                 /*
1347                  * seg_pinsert failed. We return
1348                  * ENOTSUP, so that the as_pagelock() code will
1349                  * then try the slower F_SOFTLOCK path.
1350                  */
1351                 if (pl_built) {
1352                         /*
1353                          * No one else has referenced the ppa[].
1354                          * We created it and we need to destroy it.
1355                          */
1356                         sptd->spt_ppa = NULL;
1357                 }
1358                 ret = ENOTSUP;
1359                 goto insert_fail;
1360         }
1361 
1362         /*
1363          * In either case, we increment softlockcnt on the 'real' segment.
1364          */
1365         sptd->spt_pcachecnt++;
1366         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1367 
1368         /*
1369          * We can now drop the sptd->spt_lock since the ppa[]
1370          * exists and he have incremented pacachecnt.
1371          */
1372         mutex_exit(&sptd->spt_lock);
1373 
1374         /*
1375          * Since we cache the entire segment, we want to
1376          * set ppp to point to the first slot that corresponds
1377          * to the requested addr, i.e. page_index.
1378          */
1379         *ppp = &(sptd->spt_ppa[page_index]);
1380         return (0);
1381 
1382 insert_fail:
1383         /*
1384          * We will only reach this code if we tried and failed.
1385          *
1386          * And we can drop the lock on the dummy seg, once we've failed
1387          * to set up a new ppa[].
1388          */
1389         mutex_exit(&sptd->spt_lock);
1390 
1391         if (pl_built) {
1392                 /*
1393                  * We created pl and we need to destroy it.
1394                  */
1395                 pplist = pl;
1396                 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1397                 while (np) {
1398                         page_unlock(*pplist);
1399                         np--;
1400                         pplist++;
1401                 }
1402                 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1403         }
1404         if (shmd->shm_softlockcnt <= 0) {
1405                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1406                         mutex_enter(&seg->s_as->a_contents);
1407                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1408                                 AS_CLRUNMAPWAIT(seg->s_as);
1409                                 cv_broadcast(&seg->s_as->a_cv);
1410                         }
1411                         mutex_exit(&seg->s_as->a_contents);
1412                 }
1413         }
1414         *ppp = NULL;
1415         return (ret);
1416 }
1417 
1418 /*
1419  * purge any cached pages in the I/O page cache
1420  */
1421 static void
1422 segspt_purge(struct seg *seg)
1423 {
1424         seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1425 }
1426 
1427 static int
1428 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1429         enum seg_rw rw, int async)
1430 {
1431         struct seg *seg = (struct seg *)ptag;
1432         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
1433         struct  seg     *sptseg;
1434         struct  spt_data *sptd;
1435         pgcnt_t npages, i, free_availrmem = 0;
1436         int     done = 0;
1437 
1438 #ifdef lint
1439         addr = addr;
1440 #endif
1441         sptseg = shmd->shm_sptseg;
1442         sptd = sptseg->s_data;
1443         npages = (len >> PAGESHIFT);
1444         ASSERT(npages);
1445         ASSERT(sptd->spt_pcachecnt != 0);
1446         ASSERT(sptd->spt_ppa == pplist);
1447         ASSERT(npages == btopr(sptd->spt_amp->size));
1448         ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1449 
1450         /*
1451          * Acquire the lock on the dummy seg and destroy the
1452          * ppa array IF this is the last pcachecnt.
1453          */
1454         mutex_enter(&sptd->spt_lock);
1455         if (--sptd->spt_pcachecnt == 0) {
1456                 for (i = 0; i < npages; i++) {
1457                         if (pplist[i] == NULL) {
1458                                 continue;
1459                         }
1460                         if (rw == S_WRITE) {
1461                                 hat_setrefmod(pplist[i]);
1462                         } else {
1463                                 hat_setref(pplist[i]);
1464                         }
1465                         if ((sptd->spt_flags & SHM_PAGEABLE) &&
1466                             (sptd->spt_ppa_lckcnt[i] == 0))
1467                                 free_availrmem++;
1468                         page_unlock(pplist[i]);
1469                 }
1470                 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1471                         mutex_enter(&freemem_lock);
1472                         availrmem += free_availrmem;
1473                         mutex_exit(&freemem_lock);
1474                 }
1475                 /*
1476                  * Since we want to cach/uncache the entire ISM segment,
1477                  * we will track the pplist in a segspt specific field
1478                  * ppa, that is initialized at the time we add an entry to
1479                  * the cache.
1480                  */
1481                 ASSERT(sptd->spt_pcachecnt == 0);
1482                 kmem_free(pplist, sizeof (page_t *) * npages);
1483                 sptd->spt_ppa = NULL;
1484                 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1485                 sptd->spt_gen++;
1486                 cv_broadcast(&sptd->spt_cv);
1487                 done = 1;
1488         }
1489         mutex_exit(&sptd->spt_lock);
1490 
1491         /*
1492          * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1493          * may not hold AS lock (in this case async argument is not 0). This
1494          * means if softlockcnt drops to 0 after the decrement below address
1495          * space may get freed. We can't allow it since after softlock
1496          * derement to 0 we still need to access as structure for possible
1497          * wakeup of unmap waiters. To prevent the disappearance of as we take
1498          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1499          * this mutex as a barrier to make sure this routine completes before
1500          * segment is freed.
1501          *
1502          * The second complication we have to deal with in async case is a
1503          * possibility of missed wake up of unmap wait thread. When we don't
1504          * hold as lock here we may take a_contents lock before unmap wait
1505          * thread that was first to see softlockcnt was still not 0. As a
1506          * result we'll fail to wake up an unmap wait thread. To avoid this
1507          * race we set nounmapwait flag in as structure if we drop softlockcnt
1508          * to 0 if async is not 0.  unmapwait thread
1509          * will not block if this flag is set.
1510          */
1511         if (async)
1512                 mutex_enter(&shmd->shm_segfree_syncmtx);
1513 
1514         /*
1515          * Now decrement softlockcnt.
1516          */
1517         ASSERT(shmd->shm_softlockcnt > 0);
1518         atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1519 
1520         if (shmd->shm_softlockcnt <= 0) {
1521                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1522                         mutex_enter(&seg->s_as->a_contents);
1523                         if (async)
1524                                 AS_SETNOUNMAPWAIT(seg->s_as);
1525                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1526                                 AS_CLRUNMAPWAIT(seg->s_as);
1527                                 cv_broadcast(&seg->s_as->a_cv);
1528                         }
1529                         mutex_exit(&seg->s_as->a_contents);
1530                 }
1531         }
1532 
1533         if (async)
1534                 mutex_exit(&shmd->shm_segfree_syncmtx);
1535 
1536         return (done);
1537 }
1538 
1539 /*
1540  * Do a F_SOFTUNLOCK call over the range requested.
1541  * The range must have already been F_SOFTLOCK'ed.
1542  *
1543  * The calls to acquire and release the anon map lock mutex were
1544  * removed in order to avoid a deadly embrace during a DR
1545  * memory delete operation.  (Eg. DR blocks while waiting for a
1546  * exclusive lock on a page that is being used for kaio; the
1547  * thread that will complete the kaio and call segspt_softunlock
1548  * blocks on the anon map lock; another thread holding the anon
1549  * map lock blocks on another page lock via the segspt_shmfault
1550  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1551  *
1552  * The appropriateness of the removal is based upon the following:
1553  * 1. If we are holding a segment's reader lock and the page is held
1554  * shared, then the corresponding element in anonmap which points to
1555  * anon struct cannot change and there is no need to acquire the
1556  * anonymous map lock.
1557  * 2. Threads in segspt_softunlock have a reader lock on the segment
1558  * and already have the shared page lock, so we are guaranteed that
1559  * the anon map slot cannot change and therefore can call anon_get_ptr()
1560  * without grabbing the anonymous map lock.
1561  * 3. Threads that softlock a shared page break copy-on-write, even if
1562  * its a read.  Thus cow faults can be ignored with respect to soft
1563  * unlocking, since the breaking of cow means that the anon slot(s) will
1564  * not be shared.
1565  */
1566 static void
1567 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1568         size_t len, enum seg_rw rw)
1569 {
1570         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1571         struct seg      *sptseg;
1572         struct spt_data *sptd;
1573         page_t *pp;
1574         caddr_t adr;
1575         struct vnode *vp;
1576         u_offset_t offset;
1577         ulong_t anon_index;
1578         struct anon_map *amp;           /* XXX - for locknest */
1579         struct anon *ap = NULL;
1580         pgcnt_t npages;
1581 
1582         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1583 
1584         sptseg = shmd->shm_sptseg;
1585         sptd = sptseg->s_data;
1586 
1587         /*
1588          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1589          * and therefore their pages are SE_SHARED locked
1590          * for the entire life of the segment.
1591          */
1592         if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1593             ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1594                 goto softlock_decrement;
1595         }
1596 
1597         /*
1598          * Any thread is free to do a page_find and
1599          * page_unlock() on the pages within this seg.
1600          *
1601          * We are already holding the as->a_lock on the user's
1602          * real segment, but we need to hold the a_lock on the
1603          * underlying dummy as. This is mostly to satisfy the
1604          * underlying HAT layer.
1605          */
1606         AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1607         hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1608         AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1609 
1610         amp = sptd->spt_amp;
1611         ASSERT(amp != NULL);
1612         anon_index = seg_page(sptseg, sptseg_addr);
1613 
1614         for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1615                 ap = anon_get_ptr(amp->ahp, anon_index++);
1616                 ASSERT(ap != NULL);
1617                 swap_xlate(ap, &vp, &offset);
1618 
1619                 /*
1620                  * Use page_find() instead of page_lookup() to
1621                  * find the page since we know that it has a
1622                  * "shared" lock.
1623                  */
1624                 pp = page_find(vp, offset);
1625                 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1626                 if (pp == NULL) {
1627                         panic("segspt_softunlock: "
1628                             "addr %p, ap %p, vp %p, off %llx",
1629                             (void *)adr, (void *)ap, (void *)vp, offset);
1630                         /*NOTREACHED*/
1631                 }
1632 
1633                 if (rw == S_WRITE) {
1634                         hat_setrefmod(pp);
1635                 } else if (rw != S_OTHER) {
1636                         hat_setref(pp);
1637                 }
1638                 page_unlock(pp);
1639         }
1640 
1641 softlock_decrement:
1642         npages = btopr(len);
1643         ASSERT(shmd->shm_softlockcnt >= npages);
1644         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1645         if (shmd->shm_softlockcnt == 0) {
1646                 /*
1647                  * All SOFTLOCKS are gone. Wakeup any waiting
1648                  * unmappers so they can try again to unmap.
1649                  * Check for waiters first without the mutex
1650                  * held so we don't always grab the mutex on
1651                  * softunlocks.
1652                  */
1653                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1654                         mutex_enter(&seg->s_as->a_contents);
1655                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1656                                 AS_CLRUNMAPWAIT(seg->s_as);
1657                                 cv_broadcast(&seg->s_as->a_cv);
1658                         }
1659                         mutex_exit(&seg->s_as->a_contents);
1660                 }
1661         }
1662 }
1663 
1664 int
1665 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1666 {
1667         struct shm_data *shmd_arg = (struct shm_data *)argsp;
1668         struct shm_data *shmd;
1669         struct anon_map *shm_amp = shmd_arg->shm_amp;
1670         struct spt_data *sptd;
1671         int error = 0;
1672 
1673         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1674 
1675         shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1676         if (shmd == NULL)
1677                 return (ENOMEM);
1678 
1679         shmd->shm_sptas = shmd_arg->shm_sptas;
1680         shmd->shm_amp = shm_amp;
1681         shmd->shm_sptseg = shmd_arg->shm_sptseg;
1682 
1683         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1684             NULL, 0, seg->s_size);
1685 
1686         mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1687 
1688         seg->s_data = (void *)shmd;
1689         seg->s_ops = &segspt_shmops;
1690         seg->s_szc = shmd->shm_sptseg->s_szc;
1691         sptd = shmd->shm_sptseg->s_data;
1692 
1693         if (sptd->spt_flags & SHM_PAGEABLE) {
1694                 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1695                     KM_NOSLEEP)) == NULL) {
1696                         seg->s_data = (void *)NULL;
1697                         kmem_free(shmd, (sizeof (*shmd)));
1698                         return (ENOMEM);
1699                 }
1700                 shmd->shm_lckpgs = 0;
1701                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1702                         if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1703                             shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1704                             seg->s_size, seg->s_szc)) != 0) {
1705                                 kmem_free(shmd->shm_vpage,
1706                                     btopr(shm_amp->size));
1707                         }
1708                 }
1709         } else {
1710                 error = hat_share(seg->s_as->a_hat, seg->s_base,
1711                     shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1712                     seg->s_size, seg->s_szc);
1713         }
1714         if (error) {
1715                 seg->s_szc = 0;
1716                 seg->s_data = (void *)NULL;
1717                 kmem_free(shmd, (sizeof (*shmd)));
1718         } else {
1719                 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1720                 shm_amp->refcnt++;
1721                 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1722         }
1723         return (error);
1724 }
1725 
1726 int
1727 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1728 {
1729         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1730         int reclaim = 1;
1731 
1732         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1733 retry:
1734         if (shmd->shm_softlockcnt > 0) {
1735                 if (reclaim == 1) {
1736                         segspt_purge(seg);
1737                         reclaim = 0;
1738                         goto retry;
1739                 }
1740                 return (EAGAIN);
1741         }
1742 
1743         if (ssize != seg->s_size) {
1744 #ifdef DEBUG
1745                 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1746                     ssize, seg->s_size);
1747 #endif
1748                 return (EINVAL);
1749         }
1750 
1751         (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1752             NULL, 0);
1753         hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1754 
1755         seg_free(seg);
1756 
1757         return (0);
1758 }
1759 
1760 void
1761 segspt_shmfree(struct seg *seg)
1762 {
1763         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1764         struct anon_map *shm_amp = shmd->shm_amp;
1765 
1766         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1767 
1768         (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1769             MC_UNLOCK, NULL, 0);
1770 
1771         /*
1772          * Need to increment refcnt when attaching
1773          * and decrement when detaching because of dup().
1774          */
1775         ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1776         shm_amp->refcnt--;
1777         ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1778 
1779         if (shmd->shm_vpage) {       /* only for DISM */
1780                 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1781                 shmd->shm_vpage = NULL;
1782         }
1783 
1784         /*
1785          * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1786          * still working with this segment without holding as lock.
1787          */
1788         ASSERT(shmd->shm_softlockcnt == 0);
1789         mutex_enter(&shmd->shm_segfree_syncmtx);
1790         mutex_destroy(&shmd->shm_segfree_syncmtx);
1791 
1792         kmem_free(shmd, sizeof (*shmd));
1793 }
1794 
1795 /*ARGSUSED*/
1796 int
1797 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1798 {
1799         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1800 
1801         /*
1802          * Shared page table is more than shared mapping.
1803          *  Individual process sharing page tables can't change prot
1804          *  because there is only one set of page tables.
1805          *  This will be allowed after private page table is
1806          *  supported.
1807          */
1808 /* need to return correct status error? */
1809         return (0);
1810 }
1811 
1812 
1813 faultcode_t
1814 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1815     size_t len, enum fault_type type, enum seg_rw rw)
1816 {
1817         struct  shm_data        *shmd = (struct shm_data *)seg->s_data;
1818         struct  seg             *sptseg = shmd->shm_sptseg;
1819         struct  as              *curspt = shmd->shm_sptas;
1820         struct  spt_data        *sptd = sptseg->s_data;
1821         pgcnt_t npages;
1822         size_t  size;
1823         caddr_t segspt_addr, shm_addr;
1824         page_t  **ppa;
1825         int     i;
1826         ulong_t an_idx = 0;
1827         int     err = 0;
1828         int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1829         size_t  pgsz;
1830         pgcnt_t pgcnt;
1831         caddr_t a;
1832         pgcnt_t pidx;
1833 
1834 #ifdef lint
1835         hat = hat;
1836 #endif
1837         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1838 
1839         /*
1840          * Because of the way spt is implemented
1841          * the realsize of the segment does not have to be
1842          * equal to the segment size itself. The segment size is
1843          * often in multiples of a page size larger than PAGESIZE.
1844          * The realsize is rounded up to the nearest PAGESIZE
1845          * based on what the user requested. This is a bit of
1846          * ungliness that is historical but not easily fixed
1847          * without re-designing the higher levels of ISM.
1848          */
1849         ASSERT(addr >= seg->s_base);
1850         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1851                 return (FC_NOMAP);
1852         /*
1853          * For all of the following cases except F_PROT, we need to
1854          * make any necessary adjustments to addr and len
1855          * and get all of the necessary page_t's into an array called ppa[].
1856          *
1857          * The code in shmat() forces base addr and len of ISM segment
1858          * to be aligned to largest page size supported. Therefore,
1859          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1860          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1861          * in large pagesize chunks, or else we will screw up the HAT
1862          * layer by calling hat_memload_array() with differing page sizes
1863          * over a given virtual range.
1864          */
1865         pgsz = page_get_pagesize(sptseg->s_szc);
1866         pgcnt = page_get_pagecnt(sptseg->s_szc);
1867         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1868         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1869         npages = btopr(size);
1870 
1871         /*
1872          * Now we need to convert from addr in segshm to addr in segspt.
1873          */
1874         an_idx = seg_page(seg, shm_addr);
1875         segspt_addr = sptseg->s_base + ptob(an_idx);
1876 
1877         ASSERT((segspt_addr + ptob(npages)) <=
1878             (sptseg->s_base + sptd->spt_realsize));
1879         ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1880 
1881         switch (type) {
1882 
1883         case F_SOFTLOCK:
1884 
1885                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1886                 /*
1887                  * Fall through to the F_INVAL case to load up the hat layer
1888                  * entries with the HAT_LOAD_LOCK flag.
1889                  */
1890                 /* FALLTHRU */
1891         case F_INVAL:
1892 
1893                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1894                         return (FC_NOMAP);
1895 
1896                 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1897 
1898                 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1899                 if (err != 0) {
1900                         if (type == F_SOFTLOCK) {
1901                                 atomic_add_long((ulong_t *)(
1902                                     &(shmd->shm_softlockcnt)), -npages);
1903                         }
1904                         goto dism_err;
1905                 }
1906                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1907                 a = segspt_addr;
1908                 pidx = 0;
1909                 if (type == F_SOFTLOCK) {
1910 
1911                         /*
1912                          * Load up the translation keeping it
1913                          * locked and don't unlock the page.
1914                          */
1915                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1916                                 hat_memload_array(sptseg->s_as->a_hat,
1917                                     a, pgsz, &ppa[pidx], sptd->spt_prot,
1918                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1919                         }
1920                 } else {
1921                         if (hat == seg->s_as->a_hat) {
1922 
1923                                 /*
1924                                  * Migrate pages marked for migration
1925                                  */
1926                                 if (lgrp_optimizations())
1927                                         page_migrate(seg, shm_addr, ppa,
1928                                             npages);
1929 
1930                                 /* CPU HAT */
1931                                 for (; pidx < npages;
1932                                     a += pgsz, pidx += pgcnt) {
1933                                         hat_memload_array(sptseg->s_as->a_hat,
1934                                             a, pgsz, &ppa[pidx],
1935                                             sptd->spt_prot,
1936                                             HAT_LOAD_SHARE);
1937                                 }
1938                         } else {
1939                                 /* XHAT. Pass real address */
1940                                 hat_memload_array(hat, shm_addr,
1941                                     size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1942                         }
1943 
1944                         /*
1945                          * And now drop the SE_SHARED lock(s).
1946                          */
1947                         if (dyn_ism_unmap) {
1948                                 for (i = 0; i < npages; i++) {
1949                                         page_unlock(ppa[i]);
1950                                 }
1951                         }
1952                 }
1953 
1954                 if (!dyn_ism_unmap) {
1955                         if (hat_share(seg->s_as->a_hat, shm_addr,
1956                             curspt->a_hat, segspt_addr, ptob(npages),
1957                             seg->s_szc) != 0) {
1958                                 panic("hat_share err in DISM fault");
1959                                 /* NOTREACHED */
1960                         }
1961                         if (type == F_INVAL) {
1962                                 for (i = 0; i < npages; i++) {
1963                                         page_unlock(ppa[i]);
1964                                 }
1965                         }
1966                 }
1967                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1968 dism_err:
1969                 kmem_free(ppa, npages * sizeof (page_t *));
1970                 return (err);
1971 
1972         case F_SOFTUNLOCK:
1973 
1974                 /*
1975                  * This is a bit ugly, we pass in the real seg pointer,
1976                  * but the segspt_addr is the virtual address within the
1977                  * dummy seg.
1978                  */
1979                 segspt_softunlock(seg, segspt_addr, size, rw);
1980                 return (0);
1981 
1982         case F_PROT:
1983 
1984                 /*
1985                  * This takes care of the unusual case where a user
1986                  * allocates a stack in shared memory and a register
1987                  * window overflow is written to that stack page before
1988                  * it is otherwise modified.
1989                  *
1990                  * We can get away with this because ISM segments are
1991                  * always rw. Other than this unusual case, there
1992                  * should be no instances of protection violations.
1993                  */
1994                 return (0);
1995 
1996         default:
1997 #ifdef DEBUG
1998                 panic("segspt_dismfault default type?");
1999 #else
2000                 return (FC_NOMAP);
2001 #endif
2002         }
2003 }
2004 
2005 
2006 faultcode_t
2007 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2008     size_t len, enum fault_type type, enum seg_rw rw)
2009 {
2010         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2011         struct seg              *sptseg = shmd->shm_sptseg;
2012         struct as               *curspt = shmd->shm_sptas;
2013         struct spt_data         *sptd   = sptseg->s_data;
2014         pgcnt_t npages;
2015         size_t size;
2016         caddr_t sptseg_addr, shm_addr;
2017         page_t *pp, **ppa;
2018         int     i;
2019         u_offset_t offset;
2020         ulong_t anon_index = 0;
2021         struct vnode *vp;
2022         struct anon_map *amp;           /* XXX - for locknest */
2023         struct anon *ap = NULL;
2024         size_t          pgsz;
2025         pgcnt_t         pgcnt;
2026         caddr_t         a;
2027         pgcnt_t         pidx;
2028         size_t          sz;
2029 
2030 #ifdef lint
2031         hat = hat;
2032 #endif
2033 
2034         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2035 
2036         if (sptd->spt_flags & SHM_PAGEABLE) {
2037                 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2038         }
2039 
2040         /*
2041          * Because of the way spt is implemented
2042          * the realsize of the segment does not have to be
2043          * equal to the segment size itself. The segment size is
2044          * often in multiples of a page size larger than PAGESIZE.
2045          * The realsize is rounded up to the nearest PAGESIZE
2046          * based on what the user requested. This is a bit of
2047          * ungliness that is historical but not easily fixed
2048          * without re-designing the higher levels of ISM.
2049          */
2050         ASSERT(addr >= seg->s_base);
2051         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2052                 return (FC_NOMAP);
2053         /*
2054          * For all of the following cases except F_PROT, we need to
2055          * make any necessary adjustments to addr and len
2056          * and get all of the necessary page_t's into an array called ppa[].
2057          *
2058          * The code in shmat() forces base addr and len of ISM segment
2059          * to be aligned to largest page size supported. Therefore,
2060          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2061          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2062          * in large pagesize chunks, or else we will screw up the HAT
2063          * layer by calling hat_memload_array() with differing page sizes
2064          * over a given virtual range.
2065          */
2066         pgsz = page_get_pagesize(sptseg->s_szc);
2067         pgcnt = page_get_pagecnt(sptseg->s_szc);
2068         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2069         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2070         npages = btopr(size);
2071 
2072         /*
2073          * Now we need to convert from addr in segshm to addr in segspt.
2074          */
2075         anon_index = seg_page(seg, shm_addr);
2076         sptseg_addr = sptseg->s_base + ptob(anon_index);
2077 
2078         /*
2079          * And now we may have to adjust npages downward if we have
2080          * exceeded the realsize of the segment or initial anon
2081          * allocations.
2082          */
2083         if ((sptseg_addr + ptob(npages)) >
2084             (sptseg->s_base + sptd->spt_realsize))
2085                 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2086 
2087         npages = btopr(size);
2088 
2089         ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2090         ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2091 
2092         switch (type) {
2093 
2094         case F_SOFTLOCK:
2095 
2096                 /*
2097                  * availrmem is decremented once during anon_swap_adjust()
2098                  * and is incremented during the anon_unresv(), which is
2099                  * called from shm_rm_amp() when the segment is destroyed.
2100                  */
2101                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2102                 /*
2103                  * Some platforms assume that ISM pages are SE_SHARED
2104                  * locked for the entire life of the segment.
2105                  */
2106                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2107                         return (0);
2108                 /*
2109                  * Fall through to the F_INVAL case to load up the hat layer
2110                  * entries with the HAT_LOAD_LOCK flag.
2111                  */
2112 
2113                 /* FALLTHRU */
2114         case F_INVAL:
2115 
2116                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2117                         return (FC_NOMAP);
2118 
2119                 /*
2120                  * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2121                  * may still rely on this call to hat_share(). That
2122                  * would imply that those hat's can fault on a
2123                  * HAT_LOAD_LOCK translation, which would seem
2124                  * contradictory.
2125                  */
2126                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2127                         if (hat_share(seg->s_as->a_hat, seg->s_base,
2128                             curspt->a_hat, sptseg->s_base,
2129                             sptseg->s_size, sptseg->s_szc) != 0) {
2130                                 panic("hat_share error in ISM fault");
2131                                 /*NOTREACHED*/
2132                         }
2133                         return (0);
2134                 }
2135                 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2136 
2137                 /*
2138                  * I see no need to lock the real seg,
2139                  * here, because all of our work will be on the underlying
2140                  * dummy seg.
2141                  *
2142                  * sptseg_addr and npages now account for large pages.
2143                  */
2144                 amp = sptd->spt_amp;
2145                 ASSERT(amp != NULL);
2146                 anon_index = seg_page(sptseg, sptseg_addr);
2147 
2148                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2149                 for (i = 0; i < npages; i++) {
2150                         ap = anon_get_ptr(amp->ahp, anon_index++);
2151                         ASSERT(ap != NULL);
2152                         swap_xlate(ap, &vp, &offset);
2153                         pp = page_lookup(vp, offset, SE_SHARED);
2154                         ASSERT(pp != NULL);
2155                         ppa[i] = pp;
2156                 }
2157                 ANON_LOCK_EXIT(&amp->a_rwlock);
2158                 ASSERT(i == npages);
2159 
2160                 /*
2161                  * We are already holding the as->a_lock on the user's
2162                  * real segment, but we need to hold the a_lock on the
2163                  * underlying dummy as. This is mostly to satisfy the
2164                  * underlying HAT layer.
2165                  */
2166                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2167                 a = sptseg_addr;
2168                 pidx = 0;
2169                 if (type == F_SOFTLOCK) {
2170                         /*
2171                          * Load up the translation keeping it
2172                          * locked and don't unlock the page.
2173                          */
2174                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2175                                 sz = MIN(pgsz, ptob(npages - pidx));
2176                                 hat_memload_array(sptseg->s_as->a_hat, a,
2177                                     sz, &ppa[pidx], sptd->spt_prot,
2178                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2179                         }
2180                 } else {
2181                         if (hat == seg->s_as->a_hat) {
2182 
2183                                 /*
2184                                  * Migrate pages marked for migration.
2185                                  */
2186                                 if (lgrp_optimizations())
2187                                         page_migrate(seg, shm_addr, ppa,
2188                                             npages);
2189 
2190                                 /* CPU HAT */
2191                                 for (; pidx < npages;
2192                                     a += pgsz, pidx += pgcnt) {
2193                                         sz = MIN(pgsz, ptob(npages - pidx));
2194                                         hat_memload_array(sptseg->s_as->a_hat,
2195                                             a, sz, &ppa[pidx],
2196                                             sptd->spt_prot, HAT_LOAD_SHARE);
2197                                 }
2198                         } else {
2199                                 /* XHAT. Pass real address */
2200                                 hat_memload_array(hat, shm_addr,
2201                                     ptob(npages), ppa, sptd->spt_prot,
2202                                     HAT_LOAD_SHARE);
2203                         }
2204 
2205                         /*
2206                          * And now drop the SE_SHARED lock(s).
2207                          */
2208                         for (i = 0; i < npages; i++)
2209                                 page_unlock(ppa[i]);
2210                 }
2211                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2212 
2213                 kmem_free(ppa, sizeof (page_t *) * npages);
2214                 return (0);
2215         case F_SOFTUNLOCK:
2216 
2217                 /*
2218                  * This is a bit ugly, we pass in the real seg pointer,
2219                  * but the sptseg_addr is the virtual address within the
2220                  * dummy seg.
2221                  */
2222                 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2223                 return (0);
2224 
2225         case F_PROT:
2226 
2227                 /*
2228                  * This takes care of the unusual case where a user
2229                  * allocates a stack in shared memory and a register
2230                  * window overflow is written to that stack page before
2231                  * it is otherwise modified.
2232                  *
2233                  * We can get away with this because ISM segments are
2234                  * always rw. Other than this unusual case, there
2235                  * should be no instances of protection violations.
2236                  */
2237                 return (0);
2238 
2239         default:
2240 #ifdef DEBUG
2241                 cmn_err(CE_WARN, "segspt_shmfault default type?");
2242 #endif
2243                 return (FC_NOMAP);
2244         }
2245 }
2246 
2247 /*ARGSUSED*/
2248 static faultcode_t
2249 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2250 {
2251         return (0);
2252 }
2253 
2254 /*ARGSUSED*/
2255 static int
2256 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2257 {
2258         return (0);
2259 }
2260 
2261 /*ARGSUSED*/
2262 static size_t
2263 segspt_shmswapout(struct seg *seg)
2264 {
2265         return (0);
2266 }
2267 
2268 /*
2269  * duplicate the shared page tables
2270  */
2271 int
2272 segspt_shmdup(struct seg *seg, struct seg *newseg)
2273 {
2274         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2275         struct anon_map         *amp = shmd->shm_amp;
2276         struct shm_data         *shmd_new;
2277         struct seg              *spt_seg = shmd->shm_sptseg;
2278         struct spt_data         *sptd = spt_seg->s_data;
2279         int                     error = 0;
2280 
2281         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2282 
2283         shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2284         newseg->s_data = (void *)shmd_new;
2285         shmd_new->shm_sptas = shmd->shm_sptas;
2286         shmd_new->shm_amp = amp;
2287         shmd_new->shm_sptseg = shmd->shm_sptseg;
2288         newseg->s_ops = &segspt_shmops;
2289         newseg->s_szc = seg->s_szc;
2290         ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2291 
2292         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2293         amp->refcnt++;
2294         ANON_LOCK_EXIT(&amp->a_rwlock);
2295 
2296         if (sptd->spt_flags & SHM_PAGEABLE) {
2297                 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2298                 shmd_new->shm_lckpgs = 0;
2299                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2300                         if ((error = hat_share(newseg->s_as->a_hat,
2301                             newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2302                             seg->s_size, seg->s_szc)) != 0) {
2303                                 kmem_free(shmd_new->shm_vpage,
2304                                     btopr(amp->size));
2305                         }
2306                 }
2307                 return (error);
2308         } else {
2309                 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2310                     shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2311                     seg->s_szc));
2312 
2313         }
2314 }
2315 
2316 /*ARGSUSED*/
2317 int
2318 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2319 {
2320         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2321         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2322 
2323         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2324 
2325         /*
2326          * ISM segment is always rw.
2327          */
2328         return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2329 }
2330 
2331 /*
2332  * Return an array of locked large pages, for empty slots allocate
2333  * private zero-filled anon pages.
2334  */
2335 static int
2336 spt_anon_getpages(
2337         struct seg *sptseg,
2338         caddr_t sptaddr,
2339         size_t len,
2340         page_t *ppa[])
2341 {
2342         struct  spt_data *sptd = sptseg->s_data;
2343         struct  anon_map *amp = sptd->spt_amp;
2344         enum    seg_rw rw = sptd->spt_prot;
2345         uint_t  szc = sptseg->s_szc;
2346         size_t  pg_sz, share_sz = page_get_pagesize(szc);
2347         pgcnt_t lp_npgs;
2348         caddr_t lp_addr, e_sptaddr;
2349         uint_t  vpprot, ppa_szc = 0;
2350         struct  vpage *vpage = NULL;
2351         ulong_t j, ppa_idx;
2352         int     err, ierr = 0;
2353         pgcnt_t an_idx;
2354         anon_sync_obj_t cookie;
2355         int anon_locked = 0;
2356         pgcnt_t amp_pgs;
2357 
2358 
2359         ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2360         ASSERT(len != 0);
2361 
2362         pg_sz = share_sz;
2363         lp_npgs = btop(pg_sz);
2364         lp_addr = sptaddr;
2365         e_sptaddr = sptaddr + len;
2366         an_idx = seg_page(sptseg, sptaddr);
2367         ppa_idx = 0;
2368 
2369         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2370 
2371         amp_pgs = page_get_pagecnt(amp->a_szc);
2372 
2373         /*CONSTCOND*/
2374         while (1) {
2375                 for (; lp_addr < e_sptaddr;
2376                     an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2377 
2378                         /*
2379                          * If we're currently locked, and we get to a new
2380                          * page, unlock our current anon chunk.
2381                          */
2382                         if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2383                                 anon_array_exit(&cookie);
2384                                 anon_locked = 0;
2385                         }
2386                         if (!anon_locked) {
2387                                 anon_array_enter(amp, an_idx, &cookie);
2388                                 anon_locked = 1;
2389                         }
2390                         ppa_szc = (uint_t)-1;
2391                         ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2392                             lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2393                             &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2394 
2395                         if (ierr != 0) {
2396                                 if (ierr > 0) {
2397                                         err = FC_MAKE_ERR(ierr);
2398                                         goto lpgs_err;
2399                                 }
2400                                 break;
2401                         }
2402                 }
2403                 if (lp_addr == e_sptaddr) {
2404                         break;
2405                 }
2406                 ASSERT(lp_addr < e_sptaddr);
2407 
2408                 /*
2409                  * ierr == -1 means we failed to allocate a large page.
2410                  * so do a size down operation.
2411                  *
2412                  * ierr == -2 means some other process that privately shares
2413                  * pages with this process has allocated a larger page and we
2414                  * need to retry with larger pages. So do a size up
2415                  * operation. This relies on the fact that large pages are
2416                  * never partially shared i.e. if we share any constituent
2417                  * page of a large page with another process we must share the
2418                  * entire large page. Note this cannot happen for SOFTLOCK
2419                  * case, unless current address (lpaddr) is at the beginning
2420                  * of the next page size boundary because the other process
2421                  * couldn't have relocated locked pages.
2422                  */
2423                 ASSERT(ierr == -1 || ierr == -2);
2424                 if (segvn_anypgsz) {
2425                         ASSERT(ierr == -2 || szc != 0);
2426                         ASSERT(ierr == -1 || szc < sptseg->s_szc);
2427                         szc = (ierr == -1) ? szc - 1 : szc + 1;
2428                 } else {
2429                         /*
2430                          * For faults and segvn_anypgsz == 0
2431                          * we need to be careful not to loop forever
2432                          * if existing page is found with szc other
2433                          * than 0 or seg->s_szc. This could be due
2434                          * to page relocations on behalf of DR or
2435                          * more likely large page creation. For this
2436                          * case simply re-size to existing page's szc
2437                          * if returned by anon_map_getpages().
2438                          */
2439                         if (ppa_szc == (uint_t)-1) {
2440                                 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2441                         } else {
2442                                 ASSERT(ppa_szc <= sptseg->s_szc);
2443                                 ASSERT(ierr == -2 || ppa_szc < szc);
2444                                 ASSERT(ierr == -1 || ppa_szc > szc);
2445                                 szc = ppa_szc;
2446                         }
2447                 }
2448                 pg_sz = page_get_pagesize(szc);
2449                 lp_npgs = btop(pg_sz);
2450                 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2451         }
2452         if (anon_locked) {
2453                 anon_array_exit(&cookie);
2454         }
2455         ANON_LOCK_EXIT(&amp->a_rwlock);
2456         return (0);
2457 
2458 lpgs_err:
2459         if (anon_locked) {
2460                 anon_array_exit(&cookie);
2461         }
2462         ANON_LOCK_EXIT(&amp->a_rwlock);
2463         for (j = 0; j < ppa_idx; j++)
2464                 page_unlock(ppa[j]);
2465         return (err);
2466 }
2467 
2468 /*
2469  * count the number of bytes in a set of spt pages that are currently not
2470  * locked
2471  */
2472 static rctl_qty_t
2473 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2474 {
2475         ulong_t i;
2476         rctl_qty_t unlocked = 0;
2477 
2478         for (i = 0; i < npages; i++) {
2479                 if (ppa[i]->p_lckcnt == 0)
2480                         unlocked += PAGESIZE;
2481         }
2482         return (unlocked);
2483 }
2484 
2485 extern  u_longlong_t randtick(void);
2486 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2487 #define NLCK    (NCPU_P2)
2488 /* Random number with a range [0, n-1], n must be power of two */
2489 #define RAND_P2(n)      \
2490         ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2491 
2492 int
2493 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2494     page_t **ppa, ulong_t *lockmap, size_t pos,
2495     rctl_qty_t *locked)
2496 {
2497         struct  shm_data *shmd = seg->s_data;
2498         struct  spt_data *sptd = shmd->shm_sptseg->s_data;
2499         ulong_t i;
2500         int     kernel;
2501         pgcnt_t nlck = 0;
2502         int     rv = 0;
2503         int     use_reserved = 1;
2504 
2505         /* return the number of bytes actually locked */
2506         *locked = 0;
2507 
2508         /*
2509          * To avoid contention on freemem_lock, availrmem and pages_locked
2510          * global counters are updated only every nlck locked pages instead of
2511          * every time.  Reserve nlck locks up front and deduct from this
2512          * reservation for each page that requires a lock.  When the reservation
2513          * is consumed, reserve again.  nlck is randomized, so the competing
2514          * threads do not fall into a cyclic lock contention pattern. When
2515          * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2516          * is used to lock pages.
2517          */
2518         for (i = 0; i < npages; anon_index++, pos++, i++) {
2519                 if (nlck == 0 && use_reserved == 1) {
2520                         nlck = NLCK + RAND_P2(NLCK);
2521                         /* if fewer loops left, decrease nlck */
2522                         nlck = MIN(nlck, npages - i);
2523                         /*
2524                          * Reserve nlck locks up front and deduct from this
2525                          * reservation for each page that requires a lock.  When
2526                          * the reservation is consumed, reserve again.
2527                          */
2528                         mutex_enter(&freemem_lock);
2529                         if ((availrmem - nlck) < pages_pp_maximum) {
2530                                 /* Do not do advance memory reserves */
2531                                 use_reserved = 0;
2532                         } else {
2533                                 availrmem       -= nlck;
2534                                 pages_locked    += nlck;
2535                         }
2536                         mutex_exit(&freemem_lock);
2537                 }
2538                 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2539                         if (sptd->spt_ppa_lckcnt[anon_index] <
2540                             (ushort_t)DISM_LOCK_MAX) {
2541                                 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2542                                     (ushort_t)DISM_LOCK_MAX) {
2543                                         cmn_err(CE_WARN,
2544                                             "DISM page lock limit "
2545                                             "reached on DISM offset 0x%lx\n",
2546                                             anon_index << PAGESHIFT);
2547                                 }
2548                                 kernel = (sptd->spt_ppa &&
2549                                     sptd->spt_ppa[anon_index]);
2550                                 if (!page_pp_lock(ppa[i], 0, kernel ||
2551                                     use_reserved)) {
2552                                         sptd->spt_ppa_lckcnt[anon_index]--;
2553                                         rv = EAGAIN;
2554                                         break;
2555                                 }
2556                                 /* if this is a newly locked page, count it */
2557                                 if (ppa[i]->p_lckcnt == 1) {
2558                                         if (kernel == 0 && use_reserved == 1)
2559                                                 nlck--;
2560                                         *locked += PAGESIZE;
2561                                 }
2562                                 shmd->shm_lckpgs++;
2563                                 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2564                                 if (lockmap != NULL)
2565                                         BT_SET(lockmap, pos);
2566                         }
2567                 }
2568         }
2569         /* Return unused lock reservation */
2570         if (nlck != 0 && use_reserved == 1) {
2571                 mutex_enter(&freemem_lock);
2572                 availrmem       += nlck;
2573                 pages_locked    -= nlck;
2574                 mutex_exit(&freemem_lock);
2575         }
2576 
2577         return (rv);
2578 }
2579 
2580 int
2581 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2582     rctl_qty_t *unlocked)
2583 {
2584         struct shm_data *shmd = seg->s_data;
2585         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2586         struct anon_map *amp = sptd->spt_amp;
2587         struct anon     *ap;
2588         struct vnode    *vp;
2589         u_offset_t      off;
2590         struct page     *pp;
2591         int             kernel;
2592         anon_sync_obj_t cookie;
2593         ulong_t         i;
2594         pgcnt_t         nlck = 0;
2595         pgcnt_t         nlck_limit = NLCK;
2596 
2597         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2598         for (i = 0; i < npages; i++, anon_index++) {
2599                 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2600                         anon_array_enter(amp, anon_index, &cookie);
2601                         ap = anon_get_ptr(amp->ahp, anon_index);
2602                         ASSERT(ap);
2603 
2604                         swap_xlate(ap, &vp, &off);
2605                         anon_array_exit(&cookie);
2606                         pp = page_lookup(vp, off, SE_SHARED);
2607                         ASSERT(pp);
2608                         /*
2609                          * availrmem is decremented only for pages which are not
2610                          * in seg pcache, for pages in seg pcache availrmem was
2611                          * decremented in _dismpagelock()
2612                          */
2613                         kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2614                         ASSERT(pp->p_lckcnt > 0);
2615 
2616                         /*
2617                          * lock page but do not change availrmem, we do it
2618                          * ourselves every nlck loops.
2619                          */
2620                         page_pp_unlock(pp, 0, 1);
2621                         if (pp->p_lckcnt == 0) {
2622                                 if (kernel == 0)
2623                                         nlck++;
2624                                 *unlocked += PAGESIZE;
2625                         }
2626                         page_unlock(pp);
2627                         shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2628                         sptd->spt_ppa_lckcnt[anon_index]--;
2629                         shmd->shm_lckpgs--;
2630                 }
2631 
2632                 /*
2633                  * To reduce freemem_lock contention, do not update availrmem
2634                  * until at least NLCK pages have been unlocked.
2635                  * 1. No need to update if nlck is zero
2636                  * 2. Always update if the last iteration
2637                  */
2638                 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2639                         mutex_enter(&freemem_lock);
2640                         availrmem       += nlck;
2641                         pages_locked    -= nlck;
2642                         mutex_exit(&freemem_lock);
2643                         nlck = 0;
2644                         nlck_limit = NLCK + RAND_P2(NLCK);
2645                 }
2646         }
2647         ANON_LOCK_EXIT(&amp->a_rwlock);
2648 
2649         return (0);
2650 }
2651 
2652 /*ARGSUSED*/
2653 static int
2654 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2655     int attr, int op, ulong_t *lockmap, size_t pos)
2656 {
2657         struct shm_data *shmd = seg->s_data;
2658         struct seg      *sptseg = shmd->shm_sptseg;
2659         struct spt_data *sptd = sptseg->s_data;
2660         struct kshmid   *sp = sptd->spt_amp->a_sp;
2661         pgcnt_t         npages, a_npages;
2662         page_t          **ppa;
2663         pgcnt_t         an_idx, a_an_idx, ppa_idx;
2664         caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2665         size_t          a_len;                  /* aligned len */
2666         size_t          share_sz;
2667         ulong_t         i;
2668         int             sts = 0;
2669         rctl_qty_t      unlocked = 0;
2670         rctl_qty_t      locked = 0;
2671         struct proc     *p = curproc;
2672         kproject_t      *proj;
2673 
2674         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2675         ASSERT(sp != NULL);
2676 
2677         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2678                 return (0);
2679         }
2680 
2681         addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2682         an_idx = seg_page(seg, addr);
2683         npages = btopr(len);
2684 
2685         if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2686                 return (ENOMEM);
2687         }
2688 
2689         /*
2690          * A shm's project never changes, so no lock needed.
2691          * The shm has a hold on the project, so it will not go away.
2692          * Since we have a mapping to shm within this zone, we know
2693          * that the zone will not go away.
2694          */
2695         proj = sp->shm_perm.ipc_proj;
2696 
2697         if (op == MC_LOCK) {
2698 
2699                 /*
2700                  * Need to align addr and size request if they are not
2701                  * aligned so we can always allocate large page(s) however
2702                  * we only lock what was requested in initial request.
2703                  */
2704                 share_sz = page_get_pagesize(sptseg->s_szc);
2705                 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2706                 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2707                     share_sz);
2708                 a_npages = btop(a_len);
2709                 a_an_idx = seg_page(seg, a_addr);
2710                 spt_addr = sptseg->s_base + ptob(a_an_idx);
2711                 ppa_idx = an_idx - a_an_idx;
2712 
2713                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2714                     KM_NOSLEEP)) == NULL) {
2715                         return (ENOMEM);
2716                 }
2717 
2718                 /*
2719                  * Don't cache any new pages for IO and
2720                  * flush any cached pages.
2721                  */
2722                 mutex_enter(&sptd->spt_lock);
2723                 if (sptd->spt_ppa != NULL)
2724                         sptd->spt_flags |= DISM_PPA_CHANGED;
2725 
2726                 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2727                 if (sts != 0) {
2728                         mutex_exit(&sptd->spt_lock);
2729                         kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2730                         return (sts);
2731                 }
2732 
2733                 mutex_enter(&sp->shm_mlock);
2734                 /* enforce locked memory rctl */
2735                 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2736 
2737                 mutex_enter(&p->p_lock);
2738                 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2739                         mutex_exit(&p->p_lock);
2740                         sts = EAGAIN;
2741                 } else {
2742                         mutex_exit(&p->p_lock);
2743                         sts = spt_lockpages(seg, an_idx, npages,
2744                             &ppa[ppa_idx], lockmap, pos, &locked);
2745 
2746                         /*
2747                          * correct locked count if not all pages could be
2748                          * locked
2749                          */
2750                         if ((unlocked - locked) > 0) {
2751                                 rctl_decr_locked_mem(NULL, proj,
2752                                     (unlocked - locked), 0);
2753                         }
2754                 }
2755                 /*
2756                  * unlock pages
2757                  */
2758                 for (i = 0; i < a_npages; i++)
2759                         page_unlock(ppa[i]);
2760                 if (sptd->spt_ppa != NULL)
2761                         sptd->spt_flags |= DISM_PPA_CHANGED;
2762                 mutex_exit(&sp->shm_mlock);
2763                 mutex_exit(&sptd->spt_lock);
2764 
2765                 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2766 
2767         } else if (op == MC_UNLOCK) { /* unlock */
2768                 page_t          **ppa;
2769 
2770                 mutex_enter(&sptd->spt_lock);
2771                 if (shmd->shm_lckpgs == 0) {
2772                         mutex_exit(&sptd->spt_lock);
2773                         return (0);
2774                 }
2775                 /*
2776                  * Don't cache new IO pages.
2777                  */
2778                 if (sptd->spt_ppa != NULL)
2779                         sptd->spt_flags |= DISM_PPA_CHANGED;
2780 
2781                 mutex_enter(&sp->shm_mlock);
2782                 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2783                 if ((ppa = sptd->spt_ppa) != NULL)
2784                         sptd->spt_flags |= DISM_PPA_CHANGED;
2785                 mutex_exit(&sptd->spt_lock);
2786 
2787                 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2788                 mutex_exit(&sp->shm_mlock);
2789 
2790                 if (ppa != NULL)
2791                         seg_ppurge_wiredpp(ppa);
2792         }
2793         return (sts);
2794 }
2795 
2796 /*ARGSUSED*/
2797 int
2798 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2799 {
2800         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2801         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2802         spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2803 
2804         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2805 
2806         /*
2807          * ISM segment is always rw.
2808          */
2809         while (--pgno >= 0)
2810                 *protv++ = sptd->spt_prot;
2811         return (0);
2812 }
2813 
2814 /*ARGSUSED*/
2815 u_offset_t
2816 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2817 {
2818         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2819 
2820         /* Offset does not matter in ISM memory */
2821 
2822         return ((u_offset_t)0);
2823 }
2824 
2825 /* ARGSUSED */
2826 int
2827 segspt_shmgettype(struct seg *seg, caddr_t addr)
2828 {
2829         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2830         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2831 
2832         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2833 
2834         /*
2835          * The shared memory mapping is always MAP_SHARED, SWAP is only
2836          * reserved for DISM
2837          */
2838         return (MAP_SHARED |
2839             ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2840 }
2841 
2842 /*ARGSUSED*/
2843 int
2844 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2845 {
2846         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2847         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2848 
2849         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2850 
2851         *vpp = sptd->spt_vp;
2852         return (0);
2853 }
2854 
2855 /*
2856  * We need to wait for pending IO to complete to a DISM segment in order for
2857  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2858  * than enough time to wait.
2859  */
2860 static clock_t spt_pcache_wait = 120;
2861 
2862 /*ARGSUSED*/
2863 static int
2864 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2865 {
2866         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2867         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2868         struct anon_map *amp;
2869         pgcnt_t pg_idx;
2870         ushort_t gen;
2871         clock_t end_lbolt;
2872         int writer;
2873         page_t **ppa;
2874 
2875         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2876 
2877         if (behav == MADV_FREE) {
2878                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2879                         return (0);
2880 
2881                 amp = sptd->spt_amp;
2882                 pg_idx = seg_page(seg, addr);
2883 
2884                 mutex_enter(&sptd->spt_lock);
2885                 if ((ppa = sptd->spt_ppa) == NULL) {
2886                         mutex_exit(&sptd->spt_lock);
2887                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2888                         anon_disclaim(amp, pg_idx, len);
2889                         ANON_LOCK_EXIT(&amp->a_rwlock);
2890                         return (0);
2891                 }
2892 
2893                 sptd->spt_flags |= DISM_PPA_CHANGED;
2894                 gen = sptd->spt_gen;
2895 
2896                 mutex_exit(&sptd->spt_lock);
2897 
2898                 /*
2899                  * Purge all DISM cached pages
2900                  */
2901                 seg_ppurge_wiredpp(ppa);
2902 
2903                 /*
2904                  * Drop the AS_LOCK so that other threads can grab it
2905                  * in the as_pageunlock path and hopefully get the segment
2906                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2907                  * to keep this segment resident.
2908                  */
2909                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2910                 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2911                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2912 
2913                 mutex_enter(&sptd->spt_lock);
2914 
2915                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2916 
2917                 /*
2918                  * Try to wait for pages to get kicked out of the seg_pcache.
2919                  */
2920                 while (sptd->spt_gen == gen &&
2921                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2922                     ddi_get_lbolt() < end_lbolt) {
2923                         if (!cv_timedwait_sig(&sptd->spt_cv,
2924                             &sptd->spt_lock, end_lbolt)) {
2925                                 break;
2926                         }
2927                 }
2928 
2929                 mutex_exit(&sptd->spt_lock);
2930 
2931                 /* Regrab the AS_LOCK and release our hold on the segment */
2932                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2933                     writer ? RW_WRITER : RW_READER);
2934                 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2935                 if (shmd->shm_softlockcnt <= 0) {
2936                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2937                                 mutex_enter(&seg->s_as->a_contents);
2938                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2939                                         AS_CLRUNMAPWAIT(seg->s_as);
2940                                         cv_broadcast(&seg->s_as->a_cv);
2941                                 }
2942                                 mutex_exit(&seg->s_as->a_contents);
2943                         }
2944                 }
2945 
2946                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2947                 anon_disclaim(amp, pg_idx, len);
2948                 ANON_LOCK_EXIT(&amp->a_rwlock);
2949         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2950             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2951                 int                     already_set;
2952                 ulong_t                 anon_index;
2953                 lgrp_mem_policy_t       policy;
2954                 caddr_t                 shm_addr;
2955                 size_t                  share_size;
2956                 size_t                  size;
2957                 struct seg              *sptseg = shmd->shm_sptseg;
2958                 caddr_t                 sptseg_addr;
2959 
2960                 /*
2961                  * Align address and length to page size of underlying segment
2962                  */
2963                 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2964                 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2965                 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2966                     share_size);
2967 
2968                 amp = shmd->shm_amp;
2969                 anon_index = seg_page(seg, shm_addr);
2970 
2971                 /*
2972                  * And now we may have to adjust size downward if we have
2973                  * exceeded the realsize of the segment or initial anon
2974                  * allocations.
2975                  */
2976                 sptseg_addr = sptseg->s_base + ptob(anon_index);
2977                 if ((sptseg_addr + size) >
2978                     (sptseg->s_base + sptd->spt_realsize))
2979                         size = (sptseg->s_base + sptd->spt_realsize) -
2980                             sptseg_addr;
2981 
2982                 /*
2983                  * Set memory allocation policy for this segment
2984                  */
2985                 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2986                 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2987                     NULL, 0, len);
2988 
2989                 /*
2990                  * If random memory allocation policy set already,
2991                  * don't bother reapplying it.
2992                  */
2993                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2994                         return (0);
2995 
2996                 /*
2997                  * Mark any existing pages in the given range for
2998                  * migration, flushing the I/O page cache, and using
2999                  * underlying segment to calculate anon index and get
3000                  * anonmap and vnode pointer from
3001                  */
3002                 if (shmd->shm_softlockcnt > 0)
3003                         segspt_purge(seg);
3004 
3005                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3006         }
3007 
3008         return (0);
3009 }
3010 
3011 /*ARGSUSED*/
3012 void
3013 segspt_shmdump(struct seg *seg)
3014 {
3015         /* no-op for ISM segment */
3016 }
3017 
3018 /*
3019  * get a memory ID for an addr in a given segment
3020  */
3021 static int
3022 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3023 {
3024         struct shm_data *shmd = (struct shm_data *)seg->s_data;
3025         struct anon     *ap;
3026         size_t          anon_index;
3027         struct anon_map *amp = shmd->shm_amp;
3028         struct spt_data *sptd = shmd->shm_sptseg->s_data;
3029         struct seg      *sptseg = shmd->shm_sptseg;
3030         anon_sync_obj_t cookie;
3031 
3032         anon_index = seg_page(seg, addr);
3033 
3034         if (addr > (seg->s_base + sptd->spt_realsize)) {
3035                 return (EFAULT);
3036         }
3037 
3038         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3039         anon_array_enter(amp, anon_index, &cookie);
3040         ap = anon_get_ptr(amp->ahp, anon_index);
3041         if (ap == NULL) {
3042                 struct page *pp;
3043                 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3044 
3045                 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3046                 if (pp == NULL) {
3047                         anon_array_exit(&cookie);
3048                         ANON_LOCK_EXIT(&amp->a_rwlock);
3049                         return (ENOMEM);
3050                 }
3051                 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3052                 page_unlock(pp);
3053         }
3054         anon_array_exit(&cookie);
3055         ANON_LOCK_EXIT(&amp->a_rwlock);
3056         memidp->val[0] = (uintptr_t)ap;
3057         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3058         return (0);
3059 }
3060 
3061 /*
3062  * Get memory allocation policy info for specified address in given segment
3063  */
3064 static lgrp_mem_policy_info_t *
3065 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3066 {
3067         struct anon_map         *amp;
3068         ulong_t                 anon_index;
3069         lgrp_mem_policy_info_t  *policy_info;
3070         struct shm_data         *shm_data;
3071 
3072         ASSERT(seg != NULL);
3073 
3074         /*
3075          * Get anon_map from segshm
3076          *
3077          * Assume that no lock needs to be held on anon_map, since
3078          * it should be protected by its reference count which must be
3079          * nonzero for an existing segment
3080          * Need to grab readers lock on policy tree though
3081          */
3082         shm_data = (struct shm_data *)seg->s_data;
3083         if (shm_data == NULL)
3084                 return (NULL);
3085         amp = shm_data->shm_amp;
3086         ASSERT(amp->refcnt != 0);
3087 
3088         /*
3089          * Get policy info
3090          *
3091          * Assume starting anon index of 0
3092          */
3093         anon_index = seg_page(seg, addr);
3094         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3095 
3096         return (policy_info);
3097 }