1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/param.h>
  26 #include <sys/user.h>
  27 #include <sys/mman.h>
  28 #include <sys/kmem.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/systm.h>
  32 #include <sys/tuneable.h>
  33 #include <vm/hat.h>
  34 #include <vm/seg.h>
  35 #include <vm/as.h>
  36 #include <vm/anon.h>
  37 #include <vm/page.h>
  38 #include <sys/buf.h>
  39 #include <sys/swap.h>
  40 #include <sys/atomic.h>
  41 #include <vm/seg_spt.h>
  42 #include <sys/debug.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/shm.h>
  45 #include <sys/shm_impl.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/vmsystm.h>
  48 #include <sys/policy.h>
  49 #include <sys/project.h>
  50 #include <sys/tnf_probe.h>
  51 #include <sys/zone.h>
  52 
  53 #define SEGSPTADDR      (caddr_t)0x0
  54 
  55 /*
  56  * # pages used for spt
  57  */
  58 size_t  spt_used;
  59 
  60 /*
  61  * segspt_minfree is the memory left for system after ISM
  62  * locked its pages; it is set up to 5% of availrmem in
  63  * sptcreate when ISM is created.  ISM should not use more
  64  * than ~90% of availrmem; if it does, then the performance
  65  * of the system may decrease. Machines with large memories may
  66  * be able to use up more memory for ISM so we set the default
  67  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
  68  * If somebody wants even more memory for ISM (risking hanging
  69  * the system) they can patch the segspt_minfree to smaller number.
  70  */
  71 pgcnt_t segspt_minfree = 0;
  72 
  73 static int segspt_create(struct seg *seg, caddr_t argsp);
  74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
  75 static void segspt_free(struct seg *seg);
  76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
  77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
  78 
  79 static void
  80 segspt_badop()
  81 {
  82         panic("segspt_badop called");
  83         /*NOTREACHED*/
  84 }
  85 
  86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
  87 
  88 struct seg_ops segspt_ops = {
  89         SEGSPT_BADOP(int),              /* dup */
  90         segspt_unmap,
  91         segspt_free,
  92         SEGSPT_BADOP(int),              /* fault */
  93         SEGSPT_BADOP(faultcode_t),      /* faulta */
  94         SEGSPT_BADOP(int),              /* setprot */
  95         SEGSPT_BADOP(int),              /* checkprot */
  96         SEGSPT_BADOP(int),              /* kluster */
  97         SEGSPT_BADOP(size_t),           /* swapout */
  98         SEGSPT_BADOP(int),              /* sync */
  99         SEGSPT_BADOP(size_t),           /* incore */
 100         SEGSPT_BADOP(int),              /* lockop */
 101         SEGSPT_BADOP(int),              /* getprot */
 102         SEGSPT_BADOP(u_offset_t),       /* getoffset */
 103         SEGSPT_BADOP(int),              /* gettype */
 104         SEGSPT_BADOP(int),              /* getvp */
 105         SEGSPT_BADOP(int),              /* advise */
 106         SEGSPT_BADOP(void),             /* dump */
 107         SEGSPT_BADOP(int),              /* pagelock */
 108         SEGSPT_BADOP(int),              /* setpgsz */
 109         SEGSPT_BADOP(int),              /* getmemid */
 110         segspt_getpolicy,               /* getpolicy */
 111         SEGSPT_BADOP(int),              /* capable */
 112         seg_inherit_notsup              /* inherit */
 113 };
 114 
 115 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
 116 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
 117 static void segspt_shmfree(struct seg *seg);
 118 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
 119                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
 120 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
 121 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
 122                         register size_t len, register uint_t prot);
 123 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
 124                         uint_t prot);
 125 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
 126 static size_t   segspt_shmswapout(struct seg *seg);
 127 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
 128                         register char *vec);
 129 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
 130                         int attr, uint_t flags);
 131 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 132                         int attr, int op, ulong_t *lockmap, size_t pos);
 133 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 134                         uint_t *protv);
 135 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 136 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 137 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 138 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 139                         uint_t behav);
 140 static void segspt_shmdump(struct seg *seg);
 141 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 142                         struct page ***, enum lock_type, enum seg_rw);
 143 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
 144 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 145 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 146 static int segspt_shmcapable(struct seg *, segcapability_t);
 147 
 148 struct seg_ops segspt_shmops = {
 149         segspt_shmdup,
 150         segspt_shmunmap,
 151         segspt_shmfree,
 152         segspt_shmfault,
 153         segspt_shmfaulta,
 154         segspt_shmsetprot,
 155         segspt_shmcheckprot,
 156         segspt_shmkluster,
 157         segspt_shmswapout,
 158         segspt_shmsync,
 159         segspt_shmincore,
 160         segspt_shmlockop,
 161         segspt_shmgetprot,
 162         segspt_shmgetoffset,
 163         segspt_shmgettype,
 164         segspt_shmgetvp,
 165         segspt_shmadvise,       /* advise */
 166         segspt_shmdump,
 167         segspt_shmpagelock,
 168         segspt_shmsetpgsz,
 169         segspt_shmgetmemid,
 170         segspt_shmgetpolicy,
 171         segspt_shmcapable,
 172         seg_inherit_notsup
 173 };
 174 
 175 static void segspt_purge(struct seg *seg);
 176 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 177                 enum seg_rw, int);
 178 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 179                 page_t **ppa);
 180 
 181 
 182 
 183 /*ARGSUSED*/
 184 int
 185 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 186         uint_t prot, uint_t flags, uint_t share_szc)
 187 {
 188         int     err;
 189         struct  as      *newas;
 190         struct  segspt_crargs sptcargs;
 191 
 192 #ifdef DEBUG
 193         TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
 194                         tnf_ulong, size, size );
 195 #endif
 196         if (segspt_minfree == 0)        /* leave min 5% of availrmem for */
 197                 segspt_minfree = availrmem/20;  /* for the system */
 198 
 199         if (!hat_supported(HAT_SHARED_PT, (void *)0))
 200                 return (EINVAL);
 201 
 202         /*
 203          * get a new as for this shared memory segment
 204          */
 205         newas = as_alloc();
 206         newas->a_proc = NULL;
 207         sptcargs.amp = amp;
 208         sptcargs.prot = prot;
 209         sptcargs.flags = flags;
 210         sptcargs.szc = share_szc;
 211         /*
 212          * create a shared page table (spt) segment
 213          */
 214 
 215         if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
 216                 as_free(newas);
 217                 return (err);
 218         }
 219         *sptseg = sptcargs.seg_spt;
 220         return (0);
 221 }
 222 
 223 void
 224 sptdestroy(struct as *as, struct anon_map *amp)
 225 {
 226 
 227 #ifdef DEBUG
 228         TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
 229 #endif
 230         (void) as_unmap(as, SEGSPTADDR, amp->size);
 231         as_free(as);
 232 }
 233 
 234 /*
 235  * called from seg_free().
 236  * free (i.e., unlock, unmap, return to free list)
 237  *  all the pages in the given seg.
 238  */
 239 void
 240 segspt_free(struct seg  *seg)
 241 {
 242         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 243 
 244         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 245 
 246         if (sptd != NULL) {
 247                 if (sptd->spt_realsize)
 248                         segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 249 
 250         if (sptd->spt_ppa_lckcnt)
 251                 kmem_free(sptd->spt_ppa_lckcnt,
 252                     sizeof (*sptd->spt_ppa_lckcnt)
 253                     * btopr(sptd->spt_amp->size));
 254                 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
 255                 cv_destroy(&sptd->spt_cv);
 256                 mutex_destroy(&sptd->spt_lock);
 257                 kmem_free(sptd, sizeof (*sptd));
 258         }
 259 }
 260 
 261 /*ARGSUSED*/
 262 static int
 263 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 264         uint_t flags)
 265 {
 266         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 267 
 268         return (0);
 269 }
 270 
 271 /*ARGSUSED*/
 272 static size_t
 273 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 274 {
 275         caddr_t eo_seg;
 276         pgcnt_t npages;
 277         struct shm_data *shmd = (struct shm_data *)seg->s_data;
 278         struct seg      *sptseg;
 279         struct spt_data *sptd;
 280 
 281         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 282 #ifdef lint
 283         seg = seg;
 284 #endif
 285         sptseg = shmd->shm_sptseg;
 286         sptd = sptseg->s_data;
 287 
 288         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 289                 eo_seg = addr + len;
 290                 while (addr < eo_seg) {
 291                         /* page exists, and it's locked. */
 292                         *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
 293                             SEG_PAGE_ANON;
 294                         addr += PAGESIZE;
 295                 }
 296                 return (len);
 297         } else {
 298                 struct  anon_map *amp = shmd->shm_amp;
 299                 struct  anon    *ap;
 300                 page_t          *pp;
 301                 pgcnt_t         anon_index;
 302                 struct vnode    *vp;
 303                 u_offset_t      off;
 304                 ulong_t         i;
 305                 int             ret;
 306                 anon_sync_obj_t cookie;
 307 
 308                 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 309                 anon_index = seg_page(seg, addr);
 310                 npages = btopr(len);
 311                 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
 312                         return (EINVAL);
 313                 }
 314                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
 315                 for (i = 0; i < npages; i++, anon_index++) {
 316                         ret = 0;
 317                         anon_array_enter(amp, anon_index, &cookie);
 318                         ap = anon_get_ptr(amp->ahp, anon_index);
 319                         if (ap != NULL) {
 320                                 swap_xlate(ap, &vp, &off);
 321                                 anon_array_exit(&cookie);
 322                                 pp = page_lookup_nowait(vp, off, SE_SHARED);
 323                                 if (pp != NULL) {
 324                                         ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
 325                                         page_unlock(pp);
 326                                 }
 327                         } else {
 328                                 anon_array_exit(&cookie);
 329                         }
 330                         if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
 331                                 ret |= SEG_PAGE_LOCKED;
 332                         }
 333                         *vec++ = (char)ret;
 334                 }
 335                 ANON_LOCK_EXIT(&amp->a_rwlock);
 336                 return (len);
 337         }
 338 }
 339 
 340 static int
 341 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 342 {
 343         size_t share_size;
 344 
 345         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 346 
 347         /*
 348          * seg.s_size may have been rounded up to the largest page size
 349          * in shmat().
 350          * XXX This should be cleanedup. sptdestroy should take a length
 351          * argument which should be the same as sptcreate. Then
 352          * this rounding would not be needed (or is done in shm.c)
 353          * Only the check for full segment will be needed.
 354          *
 355          * XXX -- shouldn't raddr == 0 always? These tests don't seem
 356          * to be useful at all.
 357          */
 358         share_size = page_get_pagesize(seg->s_szc);
 359         ssize = P2ROUNDUP(ssize, share_size);
 360 
 361         if (raddr == seg->s_base && ssize == seg->s_size) {
 362                 seg_free(seg);
 363                 return (0);
 364         } else
 365                 return (EINVAL);
 366 }
 367 
 368 int
 369 segspt_create(struct seg *seg, caddr_t argsp)
 370 {
 371         int             err;
 372         caddr_t         addr = seg->s_base;
 373         struct spt_data *sptd;
 374         struct  segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
 375         struct anon_map *amp = sptcargs->amp;
 376         struct kshmid   *sp = amp->a_sp;
 377         struct  cred    *cred = CRED();
 378         ulong_t         i, j, anon_index = 0;
 379         pgcnt_t         npages = btopr(amp->size);
 380         struct vnode    *vp;
 381         page_t          **ppa;
 382         uint_t          hat_flags;
 383         size_t          pgsz;
 384         pgcnt_t         pgcnt;
 385         caddr_t         a;
 386         pgcnt_t         pidx;
 387         size_t          sz;
 388         proc_t          *procp = curproc;
 389         rctl_qty_t      lockedbytes = 0;
 390         kproject_t      *proj;
 391 
 392         /*
 393          * We are holding the a_lock on the underlying dummy as,
 394          * so we can make calls to the HAT layer.
 395          */
 396         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 397         ASSERT(sp != NULL);
 398 
 399 #ifdef DEBUG
 400         TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 401             tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 402 #endif
 403         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 404                 if (err = anon_swap_adjust(npages))
 405                         return (err);
 406         }
 407         err = ENOMEM;
 408 
 409         if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
 410                 goto out1;
 411 
 412         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 413                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
 414                     KM_NOSLEEP)) == NULL)
 415                         goto out2;
 416         }
 417 
 418         mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
 419 
 420         if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
 421                 goto out3;
 422 
 423         seg->s_ops = &segspt_ops;
 424         sptd->spt_vp = vp;
 425         sptd->spt_amp = amp;
 426         sptd->spt_prot = sptcargs->prot;
 427         sptd->spt_flags = sptcargs->flags;
 428         seg->s_data = (caddr_t)sptd;
 429         sptd->spt_ppa = NULL;
 430         sptd->spt_ppa_lckcnt = NULL;
 431         seg->s_szc = sptcargs->szc;
 432         cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
 433         sptd->spt_gen = 0;
 434 
 435         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 436         if (seg->s_szc > amp->a_szc) {
 437                 amp->a_szc = seg->s_szc;
 438         }
 439         ANON_LOCK_EXIT(&amp->a_rwlock);
 440 
 441         /*
 442          * Set policy to affect initial allocation of pages in
 443          * anon_map_createpages()
 444          */
 445         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
 446             NULL, 0, ptob(npages));
 447 
 448         if (sptcargs->flags & SHM_PAGEABLE) {
 449                 size_t  share_sz;
 450                 pgcnt_t new_npgs, more_pgs;
 451                 struct anon_hdr *nahp;
 452                 zone_t *zone;
 453 
 454                 share_sz = page_get_pagesize(seg->s_szc);
 455                 if (!IS_P2ALIGNED(amp->size, share_sz)) {
 456                         /*
 457                          * We are rounding up the size of the anon array
 458                          * on 4 M boundary because we always create 4 M
 459                          * of page(s) when locking, faulting pages and we
 460                          * don't have to check for all corner cases e.g.
 461                          * if there is enough space to allocate 4 M
 462                          * page.
 463                          */
 464                         new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
 465                         more_pgs = new_npgs - npages;
 466 
 467                         /*
 468                          * The zone will never be NULL, as a fully created
 469                          * shm always has an owning zone.
 470                          */
 471                         zone = sp->shm_perm.ipc_zone_ref.zref_zone;
 472                         ASSERT(zone != NULL);
 473                         if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
 474                                 err = ENOMEM;
 475                                 goto out4;
 476                         }
 477 
 478                         nahp = anon_create(new_npgs, ANON_SLEEP);
 479                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 480                         (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
 481                             ANON_SLEEP);
 482                         anon_release(amp->ahp, npages);
 483                         amp->ahp = nahp;
 484                         ASSERT(amp->swresv == ptob(npages));
 485                         amp->swresv = amp->size = ptob(new_npgs);
 486                         ANON_LOCK_EXIT(&amp->a_rwlock);
 487                         npages = new_npgs;
 488                 }
 489 
 490                 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
 491                     sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
 492                 sptd->spt_pcachecnt = 0;
 493                 sptd->spt_realsize = ptob(npages);
 494                 sptcargs->seg_spt = seg;
 495                 return (0);
 496         }
 497 
 498         /*
 499          * get array of pages for each anon slot in amp
 500          */
 501         if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
 502             seg, addr, S_CREATE, cred)) != 0)
 503                 goto out4;
 504 
 505         mutex_enter(&sp->shm_mlock);
 506 
 507         /* May be partially locked, so, count bytes to charge for locking */
 508         for (i = 0; i < npages; i++)
 509                 if (ppa[i]->p_lckcnt == 0)
 510                         lockedbytes += PAGESIZE;
 511 
 512         proj = sp->shm_perm.ipc_proj;
 513 
 514         if (lockedbytes > 0) {
 515                 mutex_enter(&procp->p_lock);
 516                 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
 517                         mutex_exit(&procp->p_lock);
 518                         mutex_exit(&sp->shm_mlock);
 519                         for (i = 0; i < npages; i++)
 520                                 page_unlock(ppa[i]);
 521                         err = ENOMEM;
 522                         goto out4;
 523                 }
 524                 mutex_exit(&procp->p_lock);
 525         }
 526 
 527         /*
 528          * addr is initial address corresponding to the first page on ppa list
 529          */
 530         for (i = 0; i < npages; i++) {
 531                 /* attempt to lock all pages */
 532                 if (page_pp_lock(ppa[i], 0, 1) == 0) {
 533                         /*
 534                          * if unable to lock any page, unlock all
 535                          * of them and return error
 536                          */
 537                         for (j = 0; j < i; j++)
 538                                 page_pp_unlock(ppa[j], 0, 1);
 539                         for (i = 0; i < npages; i++)
 540                                 page_unlock(ppa[i]);
 541                         rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
 542                         mutex_exit(&sp->shm_mlock);
 543                         err = ENOMEM;
 544                         goto out4;
 545                 }
 546         }
 547         mutex_exit(&sp->shm_mlock);
 548 
 549         /*
 550          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
 551          * for the entire life of the segment. For example platforms
 552          * that do not support Dynamic Reconfiguration.
 553          */
 554         hat_flags = HAT_LOAD_SHARE;
 555         if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
 556                 hat_flags |= HAT_LOAD_LOCK;
 557 
 558         /*
 559          * Load translations one lare page at a time
 560          * to make sure we don't create mappings bigger than
 561          * segment's size code in case underlying pages
 562          * are shared with segvn's segment that uses bigger
 563          * size code than we do.
 564          */
 565         pgsz = page_get_pagesize(seg->s_szc);
 566         pgcnt = page_get_pagecnt(seg->s_szc);
 567         for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
 568                 sz = MIN(pgsz, ptob(npages - pidx));
 569                 hat_memload_array(seg->s_as->a_hat, a, sz,
 570                     &ppa[pidx], sptd->spt_prot, hat_flags);
 571         }
 572 
 573         /*
 574          * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 575          * we will leave the pages locked SE_SHARED for the life
 576          * of the ISM segment. This will prevent any calls to
 577          * hat_pageunload() on this ISM segment for those platforms.
 578          */
 579         if (!(hat_flags & HAT_LOAD_LOCK)) {
 580                 /*
 581                  * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
 582                  * we no longer need to hold the SE_SHARED lock on the pages,
 583                  * since L_PAGELOCK and F_SOFTLOCK calls will grab the
 584                  * SE_SHARED lock on the pages as necessary.
 585                  */
 586                 for (i = 0; i < npages; i++)
 587                         page_unlock(ppa[i]);
 588         }
 589         sptd->spt_pcachecnt = 0;
 590         kmem_free(ppa, ((sizeof (page_t *)) * npages));
 591         sptd->spt_realsize = ptob(npages);
 592         atomic_add_long(&spt_used, npages);
 593         sptcargs->seg_spt = seg;
 594         return (0);
 595 
 596 out4:
 597         seg->s_data = NULL;
 598         kmem_free(vp, sizeof (*vp));
 599         cv_destroy(&sptd->spt_cv);
 600 out3:
 601         mutex_destroy(&sptd->spt_lock);
 602         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 603                 kmem_free(ppa, (sizeof (*ppa) * npages));
 604 out2:
 605         kmem_free(sptd, sizeof (*sptd));
 606 out1:
 607         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 608                 anon_swap_restore(npages);
 609         return (err);
 610 }
 611 
 612 /*ARGSUSED*/
 613 void
 614 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
 615 {
 616         struct page     *pp;
 617         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 618         pgcnt_t         npages;
 619         ulong_t         anon_idx;
 620         struct anon_map *amp;
 621         struct anon     *ap;
 622         struct vnode    *vp;
 623         u_offset_t      off;
 624         uint_t          hat_flags;
 625         int             root = 0;
 626         pgcnt_t         pgs, curnpgs = 0;
 627         page_t          *rootpp;
 628         rctl_qty_t      unlocked_bytes = 0;
 629         kproject_t      *proj;
 630         kshmid_t        *sp;
 631 
 632         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 633 
 634         len = P2ROUNDUP(len, PAGESIZE);
 635 
 636         npages = btop(len);
 637 
 638         hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 639         if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 640             (sptd->spt_flags & SHM_PAGEABLE)) {
 641                 hat_flags = HAT_UNLOAD_UNMAP;
 642         }
 643 
 644         hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
 645 
 646         amp = sptd->spt_amp;
 647         if (sptd->spt_flags & SHM_PAGEABLE)
 648                 npages = btop(amp->size);
 649 
 650         ASSERT(amp != NULL);
 651 
 652         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 653                 sp = amp->a_sp;
 654                 proj = sp->shm_perm.ipc_proj;
 655                 mutex_enter(&sp->shm_mlock);
 656         }
 657         for (anon_idx = 0; anon_idx < npages; anon_idx++) {
 658                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 659                         if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
 660                                 panic("segspt_free_pages: null app");
 661                                 /*NOTREACHED*/
 662                         }
 663                 } else {
 664                         if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
 665                             == NULL)
 666                                 continue;
 667                 }
 668                 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
 669                 swap_xlate(ap, &vp, &off);
 670 
 671                 /*
 672                  * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
 673                  * the pages won't be having SE_SHARED lock at this
 674                  * point.
 675                  *
 676                  * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 677                  * the pages are still held SE_SHARED locked from the
 678                  * original segspt_create()
 679                  *
 680                  * Our goal is to get SE_EXCL lock on each page, remove
 681                  * permanent lock on it and invalidate the page.
 682                  */
 683                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 684                         if (hat_flags == HAT_UNLOAD_UNMAP)
 685                                 pp = page_lookup(vp, off, SE_EXCL);
 686                         else {
 687                                 if ((pp = page_find(vp, off)) == NULL) {
 688                                         panic("segspt_free_pages: "
 689                                             "page not locked");
 690                                         /*NOTREACHED*/
 691                                 }
 692                                 if (!page_tryupgrade(pp)) {
 693                                         page_unlock(pp);
 694                                         pp = page_lookup(vp, off, SE_EXCL);
 695                                 }
 696                         }
 697                         if (pp == NULL) {
 698                                 panic("segspt_free_pages: "
 699                                     "page not in the system");
 700                                 /*NOTREACHED*/
 701                         }
 702                         ASSERT(pp->p_lckcnt > 0);
 703                         page_pp_unlock(pp, 0, 1);
 704                         if (pp->p_lckcnt == 0)
 705                                 unlocked_bytes += PAGESIZE;
 706                 } else {
 707                         if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
 708                                 continue;
 709                 }
 710                 /*
 711                  * It's logical to invalidate the pages here as in most cases
 712                  * these were created by segspt.
 713                  */
 714                 if (pp->p_szc != 0) {
 715                         if (root == 0) {
 716                                 ASSERT(curnpgs == 0);
 717                                 root = 1;
 718                                 rootpp = pp;
 719                                 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
 720                                 ASSERT(pgs > 1);
 721                                 ASSERT(IS_P2ALIGNED(pgs, pgs));
 722                                 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
 723                                 curnpgs--;
 724                         } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
 725                                 ASSERT(curnpgs == 1);
 726                                 ASSERT(page_pptonum(pp) ==
 727                                     page_pptonum(rootpp) + (pgs - 1));
 728                                 page_destroy_pages(rootpp);
 729                                 root = 0;
 730                                 curnpgs = 0;
 731                         } else {
 732                                 ASSERT(curnpgs > 1);
 733                                 ASSERT(page_pptonum(pp) ==
 734                                     page_pptonum(rootpp) + (pgs - curnpgs));
 735                                 curnpgs--;
 736                         }
 737                 } else {
 738                         if (root != 0 || curnpgs != 0) {
 739                                 panic("segspt_free_pages: bad large page");
 740                                 /*NOTREACHED*/
 741                         }
 742                         /*
 743                          * Before destroying the pages, we need to take care
 744                          * of the rctl locked memory accounting. For that
 745                          * we need to calculte the unlocked_bytes.
 746                          */
 747                         if (pp->p_lckcnt > 0)
 748                                 unlocked_bytes += PAGESIZE;
 749                         /*LINTED: constant in conditional context */
 750                         VN_DISPOSE(pp, B_INVAL, 0, kcred);
 751                 }
 752         }
 753         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 754                 if (unlocked_bytes > 0)
 755                         rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
 756                 mutex_exit(&sp->shm_mlock);
 757         }
 758         if (root != 0 || curnpgs != 0) {
 759                 panic("segspt_free_pages: bad large page");
 760                 /*NOTREACHED*/
 761         }
 762 
 763         /*
 764          * mark that pages have been released
 765          */
 766         sptd->spt_realsize = 0;
 767 
 768         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 769                 atomic_add_long(&spt_used, -npages);
 770                 anon_swap_restore(npages);
 771         }
 772 }
 773 
 774 /*
 775  * Get memory allocation policy info for specified address in given segment
 776  */
 777 static lgrp_mem_policy_info_t *
 778 segspt_getpolicy(struct seg *seg, caddr_t addr)
 779 {
 780         struct anon_map         *amp;
 781         ulong_t                 anon_index;
 782         lgrp_mem_policy_info_t  *policy_info;
 783         struct spt_data         *spt_data;
 784 
 785         ASSERT(seg != NULL);
 786 
 787         /*
 788          * Get anon_map from segspt
 789          *
 790          * Assume that no lock needs to be held on anon_map, since
 791          * it should be protected by its reference count which must be
 792          * nonzero for an existing segment
 793          * Need to grab readers lock on policy tree though
 794          */
 795         spt_data = (struct spt_data *)seg->s_data;
 796         if (spt_data == NULL)
 797                 return (NULL);
 798         amp = spt_data->spt_amp;
 799         ASSERT(amp->refcnt != 0);
 800 
 801         /*
 802          * Get policy info
 803          *
 804          * Assume starting anon index of 0
 805          */
 806         anon_index = seg_page(seg, addr);
 807         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
 808 
 809         return (policy_info);
 810 }
 811 
 812 /*
 813  * DISM only.
 814  * Return locked pages over a given range.
 815  *
 816  * We will cache all DISM locked pages and save the pplist for the
 817  * entire segment in the ppa field of the underlying DISM segment structure.
 818  * Later, during a call to segspt_reclaim() we will use this ppa array
 819  * to page_unlock() all of the pages and then we will free this ppa list.
 820  */
 821 /*ARGSUSED*/
 822 static int
 823 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
 824     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 825 {
 826         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
 827         struct  seg     *sptseg = shmd->shm_sptseg;
 828         struct  spt_data *sptd = sptseg->s_data;
 829         pgcnt_t pg_idx, npages, tot_npages, npgs;
 830         struct  page **pplist, **pl, **ppa, *pp;
 831         struct  anon_map *amp;
 832         spgcnt_t        an_idx;
 833         int     ret = ENOTSUP;
 834         uint_t  pl_built = 0;
 835         struct  anon *ap;
 836         struct  vnode *vp;
 837         u_offset_t off;
 838         pgcnt_t claim_availrmem = 0;
 839         uint_t  szc;
 840 
 841         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 842         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 843 
 844         /*
 845          * We want to lock/unlock the entire ISM segment. Therefore,
 846          * we will be using the underlying sptseg and it's base address
 847          * and length for the caching arguments.
 848          */
 849         ASSERT(sptseg);
 850         ASSERT(sptd);
 851 
 852         pg_idx = seg_page(seg, addr);
 853         npages = btopr(len);
 854 
 855         /*
 856          * check if the request is larger than number of pages covered
 857          * by amp
 858          */
 859         if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
 860                 *ppp = NULL;
 861                 return (ENOTSUP);
 862         }
 863 
 864         if (type == L_PAGEUNLOCK) {
 865                 ASSERT(sptd->spt_ppa != NULL);
 866 
 867                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
 868                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 869 
 870                 /*
 871                  * If someone is blocked while unmapping, we purge
 872                  * segment page cache and thus reclaim pplist synchronously
 873                  * without waiting for seg_pasync_thread. This speeds up
 874                  * unmapping in cases where munmap(2) is called, while
 875                  * raw async i/o is still in progress or where a thread
 876                  * exits on data fault in a multithreaded application.
 877                  */
 878                 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
 879                     (AS_ISUNMAPWAIT(seg->s_as) &&
 880                     shmd->shm_softlockcnt > 0)) {
 881                         segspt_purge(seg);
 882                 }
 883                 return (0);
 884         }
 885 
 886         /* The L_PAGELOCK case ... */
 887 
 888         if (sptd->spt_flags & DISM_PPA_CHANGED) {
 889                 segspt_purge(seg);
 890                 /*
 891                  * for DISM ppa needs to be rebuild since
 892                  * number of locked pages could be changed
 893                  */
 894                 *ppp = NULL;
 895                 return (ENOTSUP);
 896         }
 897 
 898         /*
 899          * First try to find pages in segment page cache, without
 900          * holding the segment lock.
 901          */
 902         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 903             S_WRITE, SEGP_FORCE_WIRED);
 904         if (pplist != NULL) {
 905                 ASSERT(sptd->spt_ppa != NULL);
 906                 ASSERT(sptd->spt_ppa == pplist);
 907                 ppa = sptd->spt_ppa;
 908                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 909                         if (ppa[an_idx] == NULL) {
 910                                 seg_pinactive(seg, NULL, seg->s_base,
 911                                     sptd->spt_amp->size, ppa,
 912                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 913                                 *ppp = NULL;
 914                                 return (ENOTSUP);
 915                         }
 916                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 917                                 npgs = page_get_pagecnt(szc);
 918                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 919                         } else {
 920                                 an_idx++;
 921                         }
 922                 }
 923                 /*
 924                  * Since we cache the entire DISM segment, we want to
 925                  * set ppp to point to the first slot that corresponds
 926                  * to the requested addr, i.e. pg_idx.
 927                  */
 928                 *ppp = &(sptd->spt_ppa[pg_idx]);
 929                 return (0);
 930         }
 931 
 932         mutex_enter(&sptd->spt_lock);
 933         /*
 934          * try to find pages in segment page cache with mutex
 935          */
 936         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 937             S_WRITE, SEGP_FORCE_WIRED);
 938         if (pplist != NULL) {
 939                 ASSERT(sptd->spt_ppa != NULL);
 940                 ASSERT(sptd->spt_ppa == pplist);
 941                 ppa = sptd->spt_ppa;
 942                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 943                         if (ppa[an_idx] == NULL) {
 944                                 mutex_exit(&sptd->spt_lock);
 945                                 seg_pinactive(seg, NULL, seg->s_base,
 946                                     sptd->spt_amp->size, ppa,
 947                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 948                                 *ppp = NULL;
 949                                 return (ENOTSUP);
 950                         }
 951                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 952                                 npgs = page_get_pagecnt(szc);
 953                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 954                         } else {
 955                                 an_idx++;
 956                         }
 957                 }
 958                 /*
 959                  * Since we cache the entire DISM segment, we want to
 960                  * set ppp to point to the first slot that corresponds
 961                  * to the requested addr, i.e. pg_idx.
 962                  */
 963                 mutex_exit(&sptd->spt_lock);
 964                 *ppp = &(sptd->spt_ppa[pg_idx]);
 965                 return (0);
 966         }
 967         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
 968             SEGP_FORCE_WIRED) == SEGP_FAIL) {
 969                 mutex_exit(&sptd->spt_lock);
 970                 *ppp = NULL;
 971                 return (ENOTSUP);
 972         }
 973 
 974         /*
 975          * No need to worry about protections because DISM pages are always rw.
 976          */
 977         pl = pplist = NULL;
 978         amp = sptd->spt_amp;
 979 
 980         /*
 981          * Do we need to build the ppa array?
 982          */
 983         if (sptd->spt_ppa == NULL) {
 984                 pgcnt_t lpg_cnt = 0;
 985 
 986                 pl_built = 1;
 987                 tot_npages = btopr(sptd->spt_amp->size);
 988 
 989                 ASSERT(sptd->spt_pcachecnt == 0);
 990                 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
 991                 pl = pplist;
 992 
 993                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 994                 for (an_idx = 0; an_idx < tot_npages; ) {
 995                         ap = anon_get_ptr(amp->ahp, an_idx);
 996                         /*
 997                          * Cache only mlocked pages. For large pages
 998                          * if one (constituent) page is mlocked
 999                          * all pages for that large page
1000                          * are cached also. This is for quick
1001                          * lookups of ppa array;
1002                          */
1003                         if ((ap != NULL) && (lpg_cnt != 0 ||
1004                             (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1005 
1006                                 swap_xlate(ap, &vp, &off);
1007                                 pp = page_lookup(vp, off, SE_SHARED);
1008                                 ASSERT(pp != NULL);
1009                                 if (lpg_cnt == 0) {
1010                                         lpg_cnt++;
1011                                         /*
1012                                          * For a small page, we are done --
1013                                          * lpg_count is reset to 0 below.
1014                                          *
1015                                          * For a large page, we are guaranteed
1016                                          * to find the anon structures of all
1017                                          * constituent pages and a non-zero
1018                                          * lpg_cnt ensures that we don't test
1019                                          * for mlock for these. We are done
1020                                          * when lpg_count reaches (npgs + 1).
1021                                          * If we are not the first constituent
1022                                          * page, restart at the first one.
1023                                          */
1024                                         npgs = page_get_pagecnt(pp->p_szc);
1025                                         if (!IS_P2ALIGNED(an_idx, npgs)) {
1026                                                 an_idx = P2ALIGN(an_idx, npgs);
1027                                                 page_unlock(pp);
1028                                                 continue;
1029                                         }
1030                                 }
1031                                 if (++lpg_cnt > npgs)
1032                                         lpg_cnt = 0;
1033 
1034                                 /*
1035                                  * availrmem is decremented only
1036                                  * for unlocked pages
1037                                  */
1038                                 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1039                                         claim_availrmem++;
1040                                 pplist[an_idx] = pp;
1041                         }
1042                         an_idx++;
1043                 }
1044                 ANON_LOCK_EXIT(&amp->a_rwlock);
1045 
1046                 if (claim_availrmem) {
1047                         mutex_enter(&freemem_lock);
1048                         if (availrmem < tune.t_minarmem + claim_availrmem) {
1049                                 mutex_exit(&freemem_lock);
1050                                 ret = ENOTSUP;
1051                                 claim_availrmem = 0;
1052                                 goto insert_fail;
1053                         } else {
1054                                 availrmem -= claim_availrmem;
1055                         }
1056                         mutex_exit(&freemem_lock);
1057                 }
1058 
1059                 sptd->spt_ppa = pl;
1060         } else {
1061                 /*
1062                  * We already have a valid ppa[].
1063                  */
1064                 pl = sptd->spt_ppa;
1065         }
1066 
1067         ASSERT(pl != NULL);
1068 
1069         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1070             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1071             segspt_reclaim);
1072         if (ret == SEGP_FAIL) {
1073                 /*
1074                  * seg_pinsert failed. We return
1075                  * ENOTSUP, so that the as_pagelock() code will
1076                  * then try the slower F_SOFTLOCK path.
1077                  */
1078                 if (pl_built) {
1079                         /*
1080                          * No one else has referenced the ppa[].
1081                          * We created it and we need to destroy it.
1082                          */
1083                         sptd->spt_ppa = NULL;
1084                 }
1085                 ret = ENOTSUP;
1086                 goto insert_fail;
1087         }
1088 
1089         /*
1090          * In either case, we increment softlockcnt on the 'real' segment.
1091          */
1092         sptd->spt_pcachecnt++;
1093         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1094 
1095         ppa = sptd->spt_ppa;
1096         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1097                 if (ppa[an_idx] == NULL) {
1098                         mutex_exit(&sptd->spt_lock);
1099                         seg_pinactive(seg, NULL, seg->s_base,
1100                             sptd->spt_amp->size,
1101                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1102                         *ppp = NULL;
1103                         return (ENOTSUP);
1104                 }
1105                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1106                         npgs = page_get_pagecnt(szc);
1107                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1108                 } else {
1109                         an_idx++;
1110                 }
1111         }
1112         /*
1113          * We can now drop the sptd->spt_lock since the ppa[]
1114          * exists and he have incremented pacachecnt.
1115          */
1116         mutex_exit(&sptd->spt_lock);
1117 
1118         /*
1119          * Since we cache the entire segment, we want to
1120          * set ppp to point to the first slot that corresponds
1121          * to the requested addr, i.e. pg_idx.
1122          */
1123         *ppp = &(sptd->spt_ppa[pg_idx]);
1124         return (0);
1125 
1126 insert_fail:
1127         /*
1128          * We will only reach this code if we tried and failed.
1129          *
1130          * And we can drop the lock on the dummy seg, once we've failed
1131          * to set up a new ppa[].
1132          */
1133         mutex_exit(&sptd->spt_lock);
1134 
1135         if (pl_built) {
1136                 if (claim_availrmem) {
1137                         mutex_enter(&freemem_lock);
1138                         availrmem += claim_availrmem;
1139                         mutex_exit(&freemem_lock);
1140                 }
1141 
1142                 /*
1143                  * We created pl and we need to destroy it.
1144                  */
1145                 pplist = pl;
1146                 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1147                         if (pplist[an_idx] != NULL)
1148                                 page_unlock(pplist[an_idx]);
1149                 }
1150                 kmem_free(pl, sizeof (page_t *) * tot_npages);
1151         }
1152 
1153         if (shmd->shm_softlockcnt <= 0) {
1154                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1155                         mutex_enter(&seg->s_as->a_contents);
1156                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1157                                 AS_CLRUNMAPWAIT(seg->s_as);
1158                                 cv_broadcast(&seg->s_as->a_cv);
1159                         }
1160                         mutex_exit(&seg->s_as->a_contents);
1161                 }
1162         }
1163         *ppp = NULL;
1164         return (ret);
1165 }
1166 
1167 
1168 
1169 /*
1170  * return locked pages over a given range.
1171  *
1172  * We will cache the entire ISM segment and save the pplist for the
1173  * entire segment in the ppa field of the underlying ISM segment structure.
1174  * Later, during a call to segspt_reclaim() we will use this ppa array
1175  * to page_unlock() all of the pages and then we will free this ppa list.
1176  */
1177 /*ARGSUSED*/
1178 static int
1179 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1180     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1181 {
1182         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1183         struct seg      *sptseg = shmd->shm_sptseg;
1184         struct spt_data *sptd = sptseg->s_data;
1185         pgcnt_t np, page_index, npages;
1186         caddr_t a, spt_base;
1187         struct page **pplist, **pl, *pp;
1188         struct anon_map *amp;
1189         ulong_t anon_index;
1190         int ret = ENOTSUP;
1191         uint_t  pl_built = 0;
1192         struct anon *ap;
1193         struct vnode *vp;
1194         u_offset_t off;
1195 
1196         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1197         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1198 
1199 
1200         /*
1201          * We want to lock/unlock the entire ISM segment. Therefore,
1202          * we will be using the underlying sptseg and it's base address
1203          * and length for the caching arguments.
1204          */
1205         ASSERT(sptseg);
1206         ASSERT(sptd);
1207 
1208         if (sptd->spt_flags & SHM_PAGEABLE) {
1209                 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1210         }
1211 
1212         page_index = seg_page(seg, addr);
1213         npages = btopr(len);
1214 
1215         /*
1216          * check if the request is larger than number of pages covered
1217          * by amp
1218          */
1219         if (page_index + npages > btopr(sptd->spt_amp->size)) {
1220                 *ppp = NULL;
1221                 return (ENOTSUP);
1222         }
1223 
1224         if (type == L_PAGEUNLOCK) {
1225 
1226                 ASSERT(sptd->spt_ppa != NULL);
1227 
1228                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1229                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1230 
1231                 /*
1232                  * If someone is blocked while unmapping, we purge
1233                  * segment page cache and thus reclaim pplist synchronously
1234                  * without waiting for seg_pasync_thread. This speeds up
1235                  * unmapping in cases where munmap(2) is called, while
1236                  * raw async i/o is still in progress or where a thread
1237                  * exits on data fault in a multithreaded application.
1238                  */
1239                 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1240                         segspt_purge(seg);
1241                 }
1242                 return (0);
1243         }
1244 
1245         /* The L_PAGELOCK case... */
1246 
1247         /*
1248          * First try to find pages in segment page cache, without
1249          * holding the segment lock.
1250          */
1251         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1252             S_WRITE, SEGP_FORCE_WIRED);
1253         if (pplist != NULL) {
1254                 ASSERT(sptd->spt_ppa == pplist);
1255                 ASSERT(sptd->spt_ppa[page_index]);
1256                 /*
1257                  * Since we cache the entire ISM segment, we want to
1258                  * set ppp to point to the first slot that corresponds
1259                  * to the requested addr, i.e. page_index.
1260                  */
1261                 *ppp = &(sptd->spt_ppa[page_index]);
1262                 return (0);
1263         }
1264 
1265         mutex_enter(&sptd->spt_lock);
1266 
1267         /*
1268          * try to find pages in segment page cache
1269          */
1270         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1271             S_WRITE, SEGP_FORCE_WIRED);
1272         if (pplist != NULL) {
1273                 ASSERT(sptd->spt_ppa == pplist);
1274                 /*
1275                  * Since we cache the entire segment, we want to
1276                  * set ppp to point to the first slot that corresponds
1277                  * to the requested addr, i.e. page_index.
1278                  */
1279                 mutex_exit(&sptd->spt_lock);
1280                 *ppp = &(sptd->spt_ppa[page_index]);
1281                 return (0);
1282         }
1283 
1284         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1285             SEGP_FORCE_WIRED) == SEGP_FAIL) {
1286                 mutex_exit(&sptd->spt_lock);
1287                 *ppp = NULL;
1288                 return (ENOTSUP);
1289         }
1290 
1291         /*
1292          * No need to worry about protections because ISM pages
1293          * are always rw.
1294          */
1295         pl = pplist = NULL;
1296 
1297         /*
1298          * Do we need to build the ppa array?
1299          */
1300         if (sptd->spt_ppa == NULL) {
1301                 ASSERT(sptd->spt_ppa == pplist);
1302 
1303                 spt_base = sptseg->s_base;
1304                 pl_built = 1;
1305 
1306                 /*
1307                  * availrmem is decremented once during anon_swap_adjust()
1308                  * and is incremented during the anon_unresv(), which is
1309                  * called from shm_rm_amp() when the segment is destroyed.
1310                  */
1311                 amp = sptd->spt_amp;
1312                 ASSERT(amp != NULL);
1313 
1314                 /* pcachecnt is protected by sptd->spt_lock */
1315                 ASSERT(sptd->spt_pcachecnt == 0);
1316                 pplist = kmem_zalloc(sizeof (page_t *)
1317                     * btopr(sptd->spt_amp->size), KM_SLEEP);
1318                 pl = pplist;
1319 
1320                 anon_index = seg_page(sptseg, spt_base);
1321 
1322                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1323                 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1324                     a += PAGESIZE, anon_index++, pplist++) {
1325                         ap = anon_get_ptr(amp->ahp, anon_index);
1326                         ASSERT(ap != NULL);
1327                         swap_xlate(ap, &vp, &off);
1328                         pp = page_lookup(vp, off, SE_SHARED);
1329                         ASSERT(pp != NULL);
1330                         *pplist = pp;
1331                 }
1332                 ANON_LOCK_EXIT(&amp->a_rwlock);
1333 
1334                 if (a < (spt_base + sptd->spt_amp->size)) {
1335                         ret = ENOTSUP;
1336                         goto insert_fail;
1337                 }
1338                 sptd->spt_ppa = pl;
1339         } else {
1340                 /*
1341                  * We already have a valid ppa[].
1342                  */
1343                 pl = sptd->spt_ppa;
1344         }
1345 
1346         ASSERT(pl != NULL);
1347 
1348         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1349             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1350             segspt_reclaim);
1351         if (ret == SEGP_FAIL) {
1352                 /*
1353                  * seg_pinsert failed. We return
1354                  * ENOTSUP, so that the as_pagelock() code will
1355                  * then try the slower F_SOFTLOCK path.
1356                  */
1357                 if (pl_built) {
1358                         /*
1359                          * No one else has referenced the ppa[].
1360                          * We created it and we need to destroy it.
1361                          */
1362                         sptd->spt_ppa = NULL;
1363                 }
1364                 ret = ENOTSUP;
1365                 goto insert_fail;
1366         }
1367 
1368         /*
1369          * In either case, we increment softlockcnt on the 'real' segment.
1370          */
1371         sptd->spt_pcachecnt++;
1372         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1373 
1374         /*
1375          * We can now drop the sptd->spt_lock since the ppa[]
1376          * exists and he have incremented pacachecnt.
1377          */
1378         mutex_exit(&sptd->spt_lock);
1379 
1380         /*
1381          * Since we cache the entire segment, we want to
1382          * set ppp to point to the first slot that corresponds
1383          * to the requested addr, i.e. page_index.
1384          */
1385         *ppp = &(sptd->spt_ppa[page_index]);
1386         return (0);
1387 
1388 insert_fail:
1389         /*
1390          * We will only reach this code if we tried and failed.
1391          *
1392          * And we can drop the lock on the dummy seg, once we've failed
1393          * to set up a new ppa[].
1394          */
1395         mutex_exit(&sptd->spt_lock);
1396 
1397         if (pl_built) {
1398                 /*
1399                  * We created pl and we need to destroy it.
1400                  */
1401                 pplist = pl;
1402                 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1403                 while (np) {
1404                         page_unlock(*pplist);
1405                         np--;
1406                         pplist++;
1407                 }
1408                 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1409         }
1410         if (shmd->shm_softlockcnt <= 0) {
1411                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1412                         mutex_enter(&seg->s_as->a_contents);
1413                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1414                                 AS_CLRUNMAPWAIT(seg->s_as);
1415                                 cv_broadcast(&seg->s_as->a_cv);
1416                         }
1417                         mutex_exit(&seg->s_as->a_contents);
1418                 }
1419         }
1420         *ppp = NULL;
1421         return (ret);
1422 }
1423 
1424 /*
1425  * purge any cached pages in the I/O page cache
1426  */
1427 static void
1428 segspt_purge(struct seg *seg)
1429 {
1430         seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1431 }
1432 
1433 static int
1434 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1435         enum seg_rw rw, int async)
1436 {
1437         struct seg *seg = (struct seg *)ptag;
1438         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
1439         struct  seg     *sptseg;
1440         struct  spt_data *sptd;
1441         pgcnt_t npages, i, free_availrmem = 0;
1442         int     done = 0;
1443 
1444 #ifdef lint
1445         addr = addr;
1446 #endif
1447         sptseg = shmd->shm_sptseg;
1448         sptd = sptseg->s_data;
1449         npages = (len >> PAGESHIFT);
1450         ASSERT(npages);
1451         ASSERT(sptd->spt_pcachecnt != 0);
1452         ASSERT(sptd->spt_ppa == pplist);
1453         ASSERT(npages == btopr(sptd->spt_amp->size));
1454         ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1455 
1456         /*
1457          * Acquire the lock on the dummy seg and destroy the
1458          * ppa array IF this is the last pcachecnt.
1459          */
1460         mutex_enter(&sptd->spt_lock);
1461         if (--sptd->spt_pcachecnt == 0) {
1462                 for (i = 0; i < npages; i++) {
1463                         if (pplist[i] == NULL) {
1464                                 continue;
1465                         }
1466                         if (rw == S_WRITE) {
1467                                 hat_setrefmod(pplist[i]);
1468                         } else {
1469                                 hat_setref(pplist[i]);
1470                         }
1471                         if ((sptd->spt_flags & SHM_PAGEABLE) &&
1472                             (sptd->spt_ppa_lckcnt[i] == 0))
1473                                 free_availrmem++;
1474                         page_unlock(pplist[i]);
1475                 }
1476                 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1477                         mutex_enter(&freemem_lock);
1478                         availrmem += free_availrmem;
1479                         mutex_exit(&freemem_lock);
1480                 }
1481                 /*
1482                  * Since we want to cach/uncache the entire ISM segment,
1483                  * we will track the pplist in a segspt specific field
1484                  * ppa, that is initialized at the time we add an entry to
1485                  * the cache.
1486                  */
1487                 ASSERT(sptd->spt_pcachecnt == 0);
1488                 kmem_free(pplist, sizeof (page_t *) * npages);
1489                 sptd->spt_ppa = NULL;
1490                 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1491                 sptd->spt_gen++;
1492                 cv_broadcast(&sptd->spt_cv);
1493                 done = 1;
1494         }
1495         mutex_exit(&sptd->spt_lock);
1496 
1497         /*
1498          * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1499          * may not hold AS lock (in this case async argument is not 0). This
1500          * means if softlockcnt drops to 0 after the decrement below address
1501          * space may get freed. We can't allow it since after softlock
1502          * derement to 0 we still need to access as structure for possible
1503          * wakeup of unmap waiters. To prevent the disappearance of as we take
1504          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1505          * this mutex as a barrier to make sure this routine completes before
1506          * segment is freed.
1507          *
1508          * The second complication we have to deal with in async case is a
1509          * possibility of missed wake up of unmap wait thread. When we don't
1510          * hold as lock here we may take a_contents lock before unmap wait
1511          * thread that was first to see softlockcnt was still not 0. As a
1512          * result we'll fail to wake up an unmap wait thread. To avoid this
1513          * race we set nounmapwait flag in as structure if we drop softlockcnt
1514          * to 0 if async is not 0.  unmapwait thread
1515          * will not block if this flag is set.
1516          */
1517         if (async)
1518                 mutex_enter(&shmd->shm_segfree_syncmtx);
1519 
1520         /*
1521          * Now decrement softlockcnt.
1522          */
1523         ASSERT(shmd->shm_softlockcnt > 0);
1524         atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1525 
1526         if (shmd->shm_softlockcnt <= 0) {
1527                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1528                         mutex_enter(&seg->s_as->a_contents);
1529                         if (async)
1530                                 AS_SETNOUNMAPWAIT(seg->s_as);
1531                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1532                                 AS_CLRUNMAPWAIT(seg->s_as);
1533                                 cv_broadcast(&seg->s_as->a_cv);
1534                         }
1535                         mutex_exit(&seg->s_as->a_contents);
1536                 }
1537         }
1538 
1539         if (async)
1540                 mutex_exit(&shmd->shm_segfree_syncmtx);
1541 
1542         return (done);
1543 }
1544 
1545 /*
1546  * Do a F_SOFTUNLOCK call over the range requested.
1547  * The range must have already been F_SOFTLOCK'ed.
1548  *
1549  * The calls to acquire and release the anon map lock mutex were
1550  * removed in order to avoid a deadly embrace during a DR
1551  * memory delete operation.  (Eg. DR blocks while waiting for a
1552  * exclusive lock on a page that is being used for kaio; the
1553  * thread that will complete the kaio and call segspt_softunlock
1554  * blocks on the anon map lock; another thread holding the anon
1555  * map lock blocks on another page lock via the segspt_shmfault
1556  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1557  *
1558  * The appropriateness of the removal is based upon the following:
1559  * 1. If we are holding a segment's reader lock and the page is held
1560  * shared, then the corresponding element in anonmap which points to
1561  * anon struct cannot change and there is no need to acquire the
1562  * anonymous map lock.
1563  * 2. Threads in segspt_softunlock have a reader lock on the segment
1564  * and already have the shared page lock, so we are guaranteed that
1565  * the anon map slot cannot change and therefore can call anon_get_ptr()
1566  * without grabbing the anonymous map lock.
1567  * 3. Threads that softlock a shared page break copy-on-write, even if
1568  * its a read.  Thus cow faults can be ignored with respect to soft
1569  * unlocking, since the breaking of cow means that the anon slot(s) will
1570  * not be shared.
1571  */
1572 static void
1573 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1574         size_t len, enum seg_rw rw)
1575 {
1576         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1577         struct seg      *sptseg;
1578         struct spt_data *sptd;
1579         page_t *pp;
1580         caddr_t adr;
1581         struct vnode *vp;
1582         u_offset_t offset;
1583         ulong_t anon_index;
1584         struct anon_map *amp;           /* XXX - for locknest */
1585         struct anon *ap = NULL;
1586         pgcnt_t npages;
1587 
1588         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1589 
1590         sptseg = shmd->shm_sptseg;
1591         sptd = sptseg->s_data;
1592 
1593         /*
1594          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1595          * and therefore their pages are SE_SHARED locked
1596          * for the entire life of the segment.
1597          */
1598         if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1599             ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1600                 goto softlock_decrement;
1601         }
1602 
1603         /*
1604          * Any thread is free to do a page_find and
1605          * page_unlock() on the pages within this seg.
1606          *
1607          * We are already holding the as->a_lock on the user's
1608          * real segment, but we need to hold the a_lock on the
1609          * underlying dummy as. This is mostly to satisfy the
1610          * underlying HAT layer.
1611          */
1612         AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1613         hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1614         AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1615 
1616         amp = sptd->spt_amp;
1617         ASSERT(amp != NULL);
1618         anon_index = seg_page(sptseg, sptseg_addr);
1619 
1620         for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1621                 ap = anon_get_ptr(amp->ahp, anon_index++);
1622                 ASSERT(ap != NULL);
1623                 swap_xlate(ap, &vp, &offset);
1624 
1625                 /*
1626                  * Use page_find() instead of page_lookup() to
1627                  * find the page since we know that it has a
1628                  * "shared" lock.
1629                  */
1630                 pp = page_find(vp, offset);
1631                 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1632                 if (pp == NULL) {
1633                         panic("segspt_softunlock: "
1634                             "addr %p, ap %p, vp %p, off %llx",
1635                             (void *)adr, (void *)ap, (void *)vp, offset);
1636                         /*NOTREACHED*/
1637                 }
1638 
1639                 if (rw == S_WRITE) {
1640                         hat_setrefmod(pp);
1641                 } else if (rw != S_OTHER) {
1642                         hat_setref(pp);
1643                 }
1644                 page_unlock(pp);
1645         }
1646 
1647 softlock_decrement:
1648         npages = btopr(len);
1649         ASSERT(shmd->shm_softlockcnt >= npages);
1650         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1651         if (shmd->shm_softlockcnt == 0) {
1652                 /*
1653                  * All SOFTLOCKS are gone. Wakeup any waiting
1654                  * unmappers so they can try again to unmap.
1655                  * Check for waiters first without the mutex
1656                  * held so we don't always grab the mutex on
1657                  * softunlocks.
1658                  */
1659                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1660                         mutex_enter(&seg->s_as->a_contents);
1661                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1662                                 AS_CLRUNMAPWAIT(seg->s_as);
1663                                 cv_broadcast(&seg->s_as->a_cv);
1664                         }
1665                         mutex_exit(&seg->s_as->a_contents);
1666                 }
1667         }
1668 }
1669 
1670 int
1671 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1672 {
1673         struct shm_data *shmd_arg = (struct shm_data *)argsp;
1674         struct shm_data *shmd;
1675         struct anon_map *shm_amp = shmd_arg->shm_amp;
1676         struct spt_data *sptd;
1677         int error = 0;
1678 
1679         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1680 
1681         shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1682         if (shmd == NULL)
1683                 return (ENOMEM);
1684 
1685         shmd->shm_sptas = shmd_arg->shm_sptas;
1686         shmd->shm_amp = shm_amp;
1687         shmd->shm_sptseg = shmd_arg->shm_sptseg;
1688 
1689         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1690             NULL, 0, seg->s_size);
1691 
1692         mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1693 
1694         seg->s_data = (void *)shmd;
1695         seg->s_ops = &segspt_shmops;
1696         seg->s_szc = shmd->shm_sptseg->s_szc;
1697         sptd = shmd->shm_sptseg->s_data;
1698 
1699         if (sptd->spt_flags & SHM_PAGEABLE) {
1700                 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1701                     KM_NOSLEEP)) == NULL) {
1702                         seg->s_data = (void *)NULL;
1703                         kmem_free(shmd, (sizeof (*shmd)));
1704                         return (ENOMEM);
1705                 }
1706                 shmd->shm_lckpgs = 0;
1707                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1708                         if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1709                             shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1710                             seg->s_size, seg->s_szc)) != 0) {
1711                                 kmem_free(shmd->shm_vpage,
1712                                     btopr(shm_amp->size));
1713                         }
1714                 }
1715         } else {
1716                 error = hat_share(seg->s_as->a_hat, seg->s_base,
1717                     shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1718                     seg->s_size, seg->s_szc);
1719         }
1720         if (error) {
1721                 seg->s_szc = 0;
1722                 seg->s_data = (void *)NULL;
1723                 kmem_free(shmd, (sizeof (*shmd)));
1724         } else {
1725                 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1726                 shm_amp->refcnt++;
1727                 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1728         }
1729         return (error);
1730 }
1731 
1732 int
1733 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1734 {
1735         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1736         int reclaim = 1;
1737 
1738         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1739 retry:
1740         if (shmd->shm_softlockcnt > 0) {
1741                 if (reclaim == 1) {
1742                         segspt_purge(seg);
1743                         reclaim = 0;
1744                         goto retry;
1745                 }
1746                 return (EAGAIN);
1747         }
1748 
1749         if (ssize != seg->s_size) {
1750 #ifdef DEBUG
1751                 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1752                     ssize, seg->s_size);
1753 #endif
1754                 return (EINVAL);
1755         }
1756 
1757         (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1758             NULL, 0);
1759         hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1760 
1761         seg_free(seg);
1762 
1763         return (0);
1764 }
1765 
1766 void
1767 segspt_shmfree(struct seg *seg)
1768 {
1769         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1770         struct anon_map *shm_amp = shmd->shm_amp;
1771 
1772         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1773 
1774         (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1775             MC_UNLOCK, NULL, 0);
1776 
1777         /*
1778          * Need to increment refcnt when attaching
1779          * and decrement when detaching because of dup().
1780          */
1781         ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1782         shm_amp->refcnt--;
1783         ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1784 
1785         if (shmd->shm_vpage) {       /* only for DISM */
1786                 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1787                 shmd->shm_vpage = NULL;
1788         }
1789 
1790         /*
1791          * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1792          * still working with this segment without holding as lock.
1793          */
1794         ASSERT(shmd->shm_softlockcnt == 0);
1795         mutex_enter(&shmd->shm_segfree_syncmtx);
1796         mutex_destroy(&shmd->shm_segfree_syncmtx);
1797 
1798         kmem_free(shmd, sizeof (*shmd));
1799 }
1800 
1801 /*ARGSUSED*/
1802 int
1803 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1804 {
1805         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1806 
1807         /*
1808          * Shared page table is more than shared mapping.
1809          *  Individual process sharing page tables can't change prot
1810          *  because there is only one set of page tables.
1811          *  This will be allowed after private page table is
1812          *  supported.
1813          */
1814 /* need to return correct status error? */
1815         return (0);
1816 }
1817 
1818 
1819 faultcode_t
1820 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1821     size_t len, enum fault_type type, enum seg_rw rw)
1822 {
1823         struct  shm_data        *shmd = (struct shm_data *)seg->s_data;
1824         struct  seg             *sptseg = shmd->shm_sptseg;
1825         struct  as              *curspt = shmd->shm_sptas;
1826         struct  spt_data        *sptd = sptseg->s_data;
1827         pgcnt_t npages;
1828         size_t  size;
1829         caddr_t segspt_addr, shm_addr;
1830         page_t  **ppa;
1831         int     i;
1832         ulong_t an_idx = 0;
1833         int     err = 0;
1834         int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1835         size_t  pgsz;
1836         pgcnt_t pgcnt;
1837         caddr_t a;
1838         pgcnt_t pidx;
1839 
1840 #ifdef lint
1841         hat = hat;
1842 #endif
1843         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1844 
1845         /*
1846          * Because of the way spt is implemented
1847          * the realsize of the segment does not have to be
1848          * equal to the segment size itself. The segment size is
1849          * often in multiples of a page size larger than PAGESIZE.
1850          * The realsize is rounded up to the nearest PAGESIZE
1851          * based on what the user requested. This is a bit of
1852          * ungliness that is historical but not easily fixed
1853          * without re-designing the higher levels of ISM.
1854          */
1855         ASSERT(addr >= seg->s_base);
1856         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1857                 return (FC_NOMAP);
1858         /*
1859          * For all of the following cases except F_PROT, we need to
1860          * make any necessary adjustments to addr and len
1861          * and get all of the necessary page_t's into an array called ppa[].
1862          *
1863          * The code in shmat() forces base addr and len of ISM segment
1864          * to be aligned to largest page size supported. Therefore,
1865          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1866          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1867          * in large pagesize chunks, or else we will screw up the HAT
1868          * layer by calling hat_memload_array() with differing page sizes
1869          * over a given virtual range.
1870          */
1871         pgsz = page_get_pagesize(sptseg->s_szc);
1872         pgcnt = page_get_pagecnt(sptseg->s_szc);
1873         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1874         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1875         npages = btopr(size);
1876 
1877         /*
1878          * Now we need to convert from addr in segshm to addr in segspt.
1879          */
1880         an_idx = seg_page(seg, shm_addr);
1881         segspt_addr = sptseg->s_base + ptob(an_idx);
1882 
1883         ASSERT((segspt_addr + ptob(npages)) <=
1884             (sptseg->s_base + sptd->spt_realsize));
1885         ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1886 
1887         switch (type) {
1888 
1889         case F_SOFTLOCK:
1890 
1891                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1892                 /*
1893                  * Fall through to the F_INVAL case to load up the hat layer
1894                  * entries with the HAT_LOAD_LOCK flag.
1895                  */
1896                 /* FALLTHRU */
1897         case F_INVAL:
1898 
1899                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1900                         return (FC_NOMAP);
1901 
1902                 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1903 
1904                 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1905                 if (err != 0) {
1906                         if (type == F_SOFTLOCK) {
1907                                 atomic_add_long((ulong_t *)(
1908                                     &(shmd->shm_softlockcnt)), -npages);
1909                         }
1910                         goto dism_err;
1911                 }
1912                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1913                 a = segspt_addr;
1914                 pidx = 0;
1915                 if (type == F_SOFTLOCK) {
1916 
1917                         /*
1918                          * Load up the translation keeping it
1919                          * locked and don't unlock the page.
1920                          */
1921                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1922                                 hat_memload_array(sptseg->s_as->a_hat,
1923                                     a, pgsz, &ppa[pidx], sptd->spt_prot,
1924                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1925                         }
1926                 } else {
1927                         if (hat == seg->s_as->a_hat) {
1928 
1929                                 /*
1930                                  * Migrate pages marked for migration
1931                                  */
1932                                 if (lgrp_optimizations())
1933                                         page_migrate(seg, shm_addr, ppa,
1934                                             npages);
1935 
1936                                 /* CPU HAT */
1937                                 for (; pidx < npages;
1938                                     a += pgsz, pidx += pgcnt) {
1939                                         hat_memload_array(sptseg->s_as->a_hat,
1940                                             a, pgsz, &ppa[pidx],
1941                                             sptd->spt_prot,
1942                                             HAT_LOAD_SHARE);
1943                                 }
1944                         } else {
1945                                 /* XHAT. Pass real address */
1946                                 hat_memload_array(hat, shm_addr,
1947                                     size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1948                         }
1949 
1950                         /*
1951                          * And now drop the SE_SHARED lock(s).
1952                          */
1953                         if (dyn_ism_unmap) {
1954                                 for (i = 0; i < npages; i++) {
1955                                         page_unlock(ppa[i]);
1956                                 }
1957                         }
1958                 }
1959 
1960                 if (!dyn_ism_unmap) {
1961                         if (hat_share(seg->s_as->a_hat, shm_addr,
1962                             curspt->a_hat, segspt_addr, ptob(npages),
1963                             seg->s_szc) != 0) {
1964                                 panic("hat_share err in DISM fault");
1965                                 /* NOTREACHED */
1966                         }
1967                         if (type == F_INVAL) {
1968                                 for (i = 0; i < npages; i++) {
1969                                         page_unlock(ppa[i]);
1970                                 }
1971                         }
1972                 }
1973                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1974 dism_err:
1975                 kmem_free(ppa, npages * sizeof (page_t *));
1976                 return (err);
1977 
1978         case F_SOFTUNLOCK:
1979 
1980                 /*
1981                  * This is a bit ugly, we pass in the real seg pointer,
1982                  * but the segspt_addr is the virtual address within the
1983                  * dummy seg.
1984                  */
1985                 segspt_softunlock(seg, segspt_addr, size, rw);
1986                 return (0);
1987 
1988         case F_PROT:
1989 
1990                 /*
1991                  * This takes care of the unusual case where a user
1992                  * allocates a stack in shared memory and a register
1993                  * window overflow is written to that stack page before
1994                  * it is otherwise modified.
1995                  *
1996                  * We can get away with this because ISM segments are
1997                  * always rw. Other than this unusual case, there
1998                  * should be no instances of protection violations.
1999                  */
2000                 return (0);
2001 
2002         default:
2003 #ifdef DEBUG
2004                 panic("segspt_dismfault default type?");
2005 #else
2006                 return (FC_NOMAP);
2007 #endif
2008         }
2009 }
2010 
2011 
2012 faultcode_t
2013 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2014     size_t len, enum fault_type type, enum seg_rw rw)
2015 {
2016         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2017         struct seg              *sptseg = shmd->shm_sptseg;
2018         struct as               *curspt = shmd->shm_sptas;
2019         struct spt_data         *sptd   = sptseg->s_data;
2020         pgcnt_t npages;
2021         size_t size;
2022         caddr_t sptseg_addr, shm_addr;
2023         page_t *pp, **ppa;
2024         int     i;
2025         u_offset_t offset;
2026         ulong_t anon_index = 0;
2027         struct vnode *vp;
2028         struct anon_map *amp;           /* XXX - for locknest */
2029         struct anon *ap = NULL;
2030         size_t          pgsz;
2031         pgcnt_t         pgcnt;
2032         caddr_t         a;
2033         pgcnt_t         pidx;
2034         size_t          sz;
2035 
2036 #ifdef lint
2037         hat = hat;
2038 #endif
2039 
2040         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2041 
2042         if (sptd->spt_flags & SHM_PAGEABLE) {
2043                 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2044         }
2045 
2046         /*
2047          * Because of the way spt is implemented
2048          * the realsize of the segment does not have to be
2049          * equal to the segment size itself. The segment size is
2050          * often in multiples of a page size larger than PAGESIZE.
2051          * The realsize is rounded up to the nearest PAGESIZE
2052          * based on what the user requested. This is a bit of
2053          * ungliness that is historical but not easily fixed
2054          * without re-designing the higher levels of ISM.
2055          */
2056         ASSERT(addr >= seg->s_base);
2057         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2058                 return (FC_NOMAP);
2059         /*
2060          * For all of the following cases except F_PROT, we need to
2061          * make any necessary adjustments to addr and len
2062          * and get all of the necessary page_t's into an array called ppa[].
2063          *
2064          * The code in shmat() forces base addr and len of ISM segment
2065          * to be aligned to largest page size supported. Therefore,
2066          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2067          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2068          * in large pagesize chunks, or else we will screw up the HAT
2069          * layer by calling hat_memload_array() with differing page sizes
2070          * over a given virtual range.
2071          */
2072         pgsz = page_get_pagesize(sptseg->s_szc);
2073         pgcnt = page_get_pagecnt(sptseg->s_szc);
2074         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2075         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2076         npages = btopr(size);
2077 
2078         /*
2079          * Now we need to convert from addr in segshm to addr in segspt.
2080          */
2081         anon_index = seg_page(seg, shm_addr);
2082         sptseg_addr = sptseg->s_base + ptob(anon_index);
2083 
2084         /*
2085          * And now we may have to adjust npages downward if we have
2086          * exceeded the realsize of the segment or initial anon
2087          * allocations.
2088          */
2089         if ((sptseg_addr + ptob(npages)) >
2090             (sptseg->s_base + sptd->spt_realsize))
2091                 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2092 
2093         npages = btopr(size);
2094 
2095         ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2096         ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2097 
2098         switch (type) {
2099 
2100         case F_SOFTLOCK:
2101 
2102                 /*
2103                  * availrmem is decremented once during anon_swap_adjust()
2104                  * and is incremented during the anon_unresv(), which is
2105                  * called from shm_rm_amp() when the segment is destroyed.
2106                  */
2107                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2108                 /*
2109                  * Some platforms assume that ISM pages are SE_SHARED
2110                  * locked for the entire life of the segment.
2111                  */
2112                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2113                         return (0);
2114                 /*
2115                  * Fall through to the F_INVAL case to load up the hat layer
2116                  * entries with the HAT_LOAD_LOCK flag.
2117                  */
2118 
2119                 /* FALLTHRU */
2120         case F_INVAL:
2121 
2122                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2123                         return (FC_NOMAP);
2124 
2125                 /*
2126                  * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2127                  * may still rely on this call to hat_share(). That
2128                  * would imply that those hat's can fault on a
2129                  * HAT_LOAD_LOCK translation, which would seem
2130                  * contradictory.
2131                  */
2132                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2133                         if (hat_share(seg->s_as->a_hat, seg->s_base,
2134                             curspt->a_hat, sptseg->s_base,
2135                             sptseg->s_size, sptseg->s_szc) != 0) {
2136                                 panic("hat_share error in ISM fault");
2137                                 /*NOTREACHED*/
2138                         }
2139                         return (0);
2140                 }
2141                 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2142 
2143                 /*
2144                  * I see no need to lock the real seg,
2145                  * here, because all of our work will be on the underlying
2146                  * dummy seg.
2147                  *
2148                  * sptseg_addr and npages now account for large pages.
2149                  */
2150                 amp = sptd->spt_amp;
2151                 ASSERT(amp != NULL);
2152                 anon_index = seg_page(sptseg, sptseg_addr);
2153 
2154                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2155                 for (i = 0; i < npages; i++) {
2156                         ap = anon_get_ptr(amp->ahp, anon_index++);
2157                         ASSERT(ap != NULL);
2158                         swap_xlate(ap, &vp, &offset);
2159                         pp = page_lookup(vp, offset, SE_SHARED);
2160                         ASSERT(pp != NULL);
2161                         ppa[i] = pp;
2162                 }
2163                 ANON_LOCK_EXIT(&amp->a_rwlock);
2164                 ASSERT(i == npages);
2165 
2166                 /*
2167                  * We are already holding the as->a_lock on the user's
2168                  * real segment, but we need to hold the a_lock on the
2169                  * underlying dummy as. This is mostly to satisfy the
2170                  * underlying HAT layer.
2171                  */
2172                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2173                 a = sptseg_addr;
2174                 pidx = 0;
2175                 if (type == F_SOFTLOCK) {
2176                         /*
2177                          * Load up the translation keeping it
2178                          * locked and don't unlock the page.
2179                          */
2180                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2181                                 sz = MIN(pgsz, ptob(npages - pidx));
2182                                 hat_memload_array(sptseg->s_as->a_hat, a,
2183                                     sz, &ppa[pidx], sptd->spt_prot,
2184                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2185                         }
2186                 } else {
2187                         if (hat == seg->s_as->a_hat) {
2188 
2189                                 /*
2190                                  * Migrate pages marked for migration.
2191                                  */
2192                                 if (lgrp_optimizations())
2193                                         page_migrate(seg, shm_addr, ppa,
2194                                             npages);
2195 
2196                                 /* CPU HAT */
2197                                 for (; pidx < npages;
2198                                     a += pgsz, pidx += pgcnt) {
2199                                         sz = MIN(pgsz, ptob(npages - pidx));
2200                                         hat_memload_array(sptseg->s_as->a_hat,
2201                                             a, sz, &ppa[pidx],
2202                                             sptd->spt_prot, HAT_LOAD_SHARE);
2203                                 }
2204                         } else {
2205                                 /* XHAT. Pass real address */
2206                                 hat_memload_array(hat, shm_addr,
2207                                     ptob(npages), ppa, sptd->spt_prot,
2208                                     HAT_LOAD_SHARE);
2209                         }
2210 
2211                         /*
2212                          * And now drop the SE_SHARED lock(s).
2213                          */
2214                         for (i = 0; i < npages; i++)
2215                                 page_unlock(ppa[i]);
2216                 }
2217                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2218 
2219                 kmem_free(ppa, sizeof (page_t *) * npages);
2220                 return (0);
2221         case F_SOFTUNLOCK:
2222 
2223                 /*
2224                  * This is a bit ugly, we pass in the real seg pointer,
2225                  * but the sptseg_addr is the virtual address within the
2226                  * dummy seg.
2227                  */
2228                 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2229                 return (0);
2230 
2231         case F_PROT:
2232 
2233                 /*
2234                  * This takes care of the unusual case where a user
2235                  * allocates a stack in shared memory and a register
2236                  * window overflow is written to that stack page before
2237                  * it is otherwise modified.
2238                  *
2239                  * We can get away with this because ISM segments are
2240                  * always rw. Other than this unusual case, there
2241                  * should be no instances of protection violations.
2242                  */
2243                 return (0);
2244 
2245         default:
2246 #ifdef DEBUG
2247                 cmn_err(CE_WARN, "segspt_shmfault default type?");
2248 #endif
2249                 return (FC_NOMAP);
2250         }
2251 }
2252 
2253 /*ARGSUSED*/
2254 static faultcode_t
2255 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2256 {
2257         return (0);
2258 }
2259 
2260 /*ARGSUSED*/
2261 static int
2262 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2263 {
2264         return (0);
2265 }
2266 
2267 /*ARGSUSED*/
2268 static size_t
2269 segspt_shmswapout(struct seg *seg)
2270 {
2271         return (0);
2272 }
2273 
2274 /*
2275  * duplicate the shared page tables
2276  */
2277 int
2278 segspt_shmdup(struct seg *seg, struct seg *newseg)
2279 {
2280         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2281         struct anon_map         *amp = shmd->shm_amp;
2282         struct shm_data         *shmd_new;
2283         struct seg              *spt_seg = shmd->shm_sptseg;
2284         struct spt_data         *sptd = spt_seg->s_data;
2285         int                     error = 0;
2286 
2287         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2288 
2289         shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2290         newseg->s_data = (void *)shmd_new;
2291         shmd_new->shm_sptas = shmd->shm_sptas;
2292         shmd_new->shm_amp = amp;
2293         shmd_new->shm_sptseg = shmd->shm_sptseg;
2294         newseg->s_ops = &segspt_shmops;
2295         newseg->s_szc = seg->s_szc;
2296         ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2297 
2298         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2299         amp->refcnt++;
2300         ANON_LOCK_EXIT(&amp->a_rwlock);
2301 
2302         if (sptd->spt_flags & SHM_PAGEABLE) {
2303                 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2304                 shmd_new->shm_lckpgs = 0;
2305                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2306                         if ((error = hat_share(newseg->s_as->a_hat,
2307                             newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2308                             seg->s_size, seg->s_szc)) != 0) {
2309                                 kmem_free(shmd_new->shm_vpage,
2310                                     btopr(amp->size));
2311                         }
2312                 }
2313                 return (error);
2314         } else {
2315                 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2316                     shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2317                     seg->s_szc));
2318 
2319         }
2320 }
2321 
2322 /*ARGSUSED*/
2323 int
2324 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2325 {
2326         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2327         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2328 
2329         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2330 
2331         /*
2332          * ISM segment is always rw.
2333          */
2334         return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2335 }
2336 
2337 /*
2338  * Return an array of locked large pages, for empty slots allocate
2339  * private zero-filled anon pages.
2340  */
2341 static int
2342 spt_anon_getpages(
2343         struct seg *sptseg,
2344         caddr_t sptaddr,
2345         size_t len,
2346         page_t *ppa[])
2347 {
2348         struct  spt_data *sptd = sptseg->s_data;
2349         struct  anon_map *amp = sptd->spt_amp;
2350         enum    seg_rw rw = sptd->spt_prot;
2351         uint_t  szc = sptseg->s_szc;
2352         size_t  pg_sz, share_sz = page_get_pagesize(szc);
2353         pgcnt_t lp_npgs;
2354         caddr_t lp_addr, e_sptaddr;
2355         uint_t  vpprot, ppa_szc = 0;
2356         struct  vpage *vpage = NULL;
2357         ulong_t j, ppa_idx;
2358         int     err, ierr = 0;
2359         pgcnt_t an_idx;
2360         anon_sync_obj_t cookie;
2361         int anon_locked = 0;
2362         pgcnt_t amp_pgs;
2363 
2364 
2365         ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2366         ASSERT(len != 0);
2367 
2368         pg_sz = share_sz;
2369         lp_npgs = btop(pg_sz);
2370         lp_addr = sptaddr;
2371         e_sptaddr = sptaddr + len;
2372         an_idx = seg_page(sptseg, sptaddr);
2373         ppa_idx = 0;
2374 
2375         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2376 
2377         amp_pgs = page_get_pagecnt(amp->a_szc);
2378 
2379         /*CONSTCOND*/
2380         while (1) {
2381                 for (; lp_addr < e_sptaddr;
2382                     an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2383 
2384                         /*
2385                          * If we're currently locked, and we get to a new
2386                          * page, unlock our current anon chunk.
2387                          */
2388                         if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2389                                 anon_array_exit(&cookie);
2390                                 anon_locked = 0;
2391                         }
2392                         if (!anon_locked) {
2393                                 anon_array_enter(amp, an_idx, &cookie);
2394                                 anon_locked = 1;
2395                         }
2396                         ppa_szc = (uint_t)-1;
2397                         ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2398                             lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2399                             &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2400 
2401                         if (ierr != 0) {
2402                                 if (ierr > 0) {
2403                                         err = FC_MAKE_ERR(ierr);
2404                                         goto lpgs_err;
2405                                 }
2406                                 break;
2407                         }
2408                 }
2409                 if (lp_addr == e_sptaddr) {
2410                         break;
2411                 }
2412                 ASSERT(lp_addr < e_sptaddr);
2413 
2414                 /*
2415                  * ierr == -1 means we failed to allocate a large page.
2416                  * so do a size down operation.
2417                  *
2418                  * ierr == -2 means some other process that privately shares
2419                  * pages with this process has allocated a larger page and we
2420                  * need to retry with larger pages. So do a size up
2421                  * operation. This relies on the fact that large pages are
2422                  * never partially shared i.e. if we share any constituent
2423                  * page of a large page with another process we must share the
2424                  * entire large page. Note this cannot happen for SOFTLOCK
2425                  * case, unless current address (lpaddr) is at the beginning
2426                  * of the next page size boundary because the other process
2427                  * couldn't have relocated locked pages.
2428                  */
2429                 ASSERT(ierr == -1 || ierr == -2);
2430                 if (segvn_anypgsz) {
2431                         ASSERT(ierr == -2 || szc != 0);
2432                         ASSERT(ierr == -1 || szc < sptseg->s_szc);
2433                         szc = (ierr == -1) ? szc - 1 : szc + 1;
2434                 } else {
2435                         /*
2436                          * For faults and segvn_anypgsz == 0
2437                          * we need to be careful not to loop forever
2438                          * if existing page is found with szc other
2439                          * than 0 or seg->s_szc. This could be due
2440                          * to page relocations on behalf of DR or
2441                          * more likely large page creation. For this
2442                          * case simply re-size to existing page's szc
2443                          * if returned by anon_map_getpages().
2444                          */
2445                         if (ppa_szc == (uint_t)-1) {
2446                                 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2447                         } else {
2448                                 ASSERT(ppa_szc <= sptseg->s_szc);
2449                                 ASSERT(ierr == -2 || ppa_szc < szc);
2450                                 ASSERT(ierr == -1 || ppa_szc > szc);
2451                                 szc = ppa_szc;
2452                         }
2453                 }
2454                 pg_sz = page_get_pagesize(szc);
2455                 lp_npgs = btop(pg_sz);
2456                 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2457         }
2458         if (anon_locked) {
2459                 anon_array_exit(&cookie);
2460         }
2461         ANON_LOCK_EXIT(&amp->a_rwlock);
2462         return (0);
2463 
2464 lpgs_err:
2465         if (anon_locked) {
2466                 anon_array_exit(&cookie);
2467         }
2468         ANON_LOCK_EXIT(&amp->a_rwlock);
2469         for (j = 0; j < ppa_idx; j++)
2470                 page_unlock(ppa[j]);
2471         return (err);
2472 }
2473 
2474 /*
2475  * count the number of bytes in a set of spt pages that are currently not
2476  * locked
2477  */
2478 static rctl_qty_t
2479 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2480 {
2481         ulong_t i;
2482         rctl_qty_t unlocked = 0;
2483 
2484         for (i = 0; i < npages; i++) {
2485                 if (ppa[i]->p_lckcnt == 0)
2486                         unlocked += PAGESIZE;
2487         }
2488         return (unlocked);
2489 }
2490 
2491 extern  u_longlong_t randtick(void);
2492 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2493 #define NLCK    (NCPU_P2)
2494 /* Random number with a range [0, n-1], n must be power of two */
2495 #define RAND_P2(n)      \
2496         ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2497 
2498 int
2499 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2500     page_t **ppa, ulong_t *lockmap, size_t pos,
2501     rctl_qty_t *locked)
2502 {
2503         struct  shm_data *shmd = seg->s_data;
2504         struct  spt_data *sptd = shmd->shm_sptseg->s_data;
2505         ulong_t i;
2506         int     kernel;
2507         pgcnt_t nlck = 0;
2508         int     rv = 0;
2509         int     use_reserved = 1;
2510 
2511         /* return the number of bytes actually locked */
2512         *locked = 0;
2513 
2514         /*
2515          * To avoid contention on freemem_lock, availrmem and pages_locked
2516          * global counters are updated only every nlck locked pages instead of
2517          * every time.  Reserve nlck locks up front and deduct from this
2518          * reservation for each page that requires a lock.  When the reservation
2519          * is consumed, reserve again.  nlck is randomized, so the competing
2520          * threads do not fall into a cyclic lock contention pattern. When
2521          * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2522          * is used to lock pages.
2523          */
2524         for (i = 0; i < npages; anon_index++, pos++, i++) {
2525                 if (nlck == 0 && use_reserved == 1) {
2526                         nlck = NLCK + RAND_P2(NLCK);
2527                         /* if fewer loops left, decrease nlck */
2528                         nlck = MIN(nlck, npages - i);
2529                         /*
2530                          * Reserve nlck locks up front and deduct from this
2531                          * reservation for each page that requires a lock.  When
2532                          * the reservation is consumed, reserve again.
2533                          */
2534                         mutex_enter(&freemem_lock);
2535                         if ((availrmem - nlck) < pages_pp_maximum) {
2536                                 /* Do not do advance memory reserves */
2537                                 use_reserved = 0;
2538                         } else {
2539                                 availrmem       -= nlck;
2540                                 pages_locked    += nlck;
2541                         }
2542                         mutex_exit(&freemem_lock);
2543                 }
2544                 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2545                         if (sptd->spt_ppa_lckcnt[anon_index] <
2546                             (ushort_t)DISM_LOCK_MAX) {
2547                                 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2548                                     (ushort_t)DISM_LOCK_MAX) {
2549                                         cmn_err(CE_WARN,
2550                                             "DISM page lock limit "
2551                                             "reached on DISM offset 0x%lx\n",
2552                                             anon_index << PAGESHIFT);
2553                                 }
2554                                 kernel = (sptd->spt_ppa &&
2555                                     sptd->spt_ppa[anon_index]);
2556                                 if (!page_pp_lock(ppa[i], 0, kernel ||
2557                                     use_reserved)) {
2558                                         sptd->spt_ppa_lckcnt[anon_index]--;
2559                                         rv = EAGAIN;
2560                                         break;
2561                                 }
2562                                 /* if this is a newly locked page, count it */
2563                                 if (ppa[i]->p_lckcnt == 1) {
2564                                         if (kernel == 0 && use_reserved == 1)
2565                                                 nlck--;
2566                                         *locked += PAGESIZE;
2567                                 }
2568                                 shmd->shm_lckpgs++;
2569                                 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2570                                 if (lockmap != NULL)
2571                                         BT_SET(lockmap, pos);
2572                         }
2573                 }
2574         }
2575         /* Return unused lock reservation */
2576         if (nlck != 0 && use_reserved == 1) {
2577                 mutex_enter(&freemem_lock);
2578                 availrmem       += nlck;
2579                 pages_locked    -= nlck;
2580                 mutex_exit(&freemem_lock);
2581         }
2582 
2583         return (rv);
2584 }
2585 
2586 int
2587 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2588     rctl_qty_t *unlocked)
2589 {
2590         struct shm_data *shmd = seg->s_data;
2591         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2592         struct anon_map *amp = sptd->spt_amp;
2593         struct anon     *ap;
2594         struct vnode    *vp;
2595         u_offset_t      off;
2596         struct page     *pp;
2597         int             kernel;
2598         anon_sync_obj_t cookie;
2599         ulong_t         i;
2600         pgcnt_t         nlck = 0;
2601         pgcnt_t         nlck_limit = NLCK;
2602 
2603         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2604         for (i = 0; i < npages; i++, anon_index++) {
2605                 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2606                         anon_array_enter(amp, anon_index, &cookie);
2607                         ap = anon_get_ptr(amp->ahp, anon_index);
2608                         ASSERT(ap);
2609 
2610                         swap_xlate(ap, &vp, &off);
2611                         anon_array_exit(&cookie);
2612                         pp = page_lookup(vp, off, SE_SHARED);
2613                         ASSERT(pp);
2614                         /*
2615                          * availrmem is decremented only for pages which are not
2616                          * in seg pcache, for pages in seg pcache availrmem was
2617                          * decremented in _dismpagelock()
2618                          */
2619                         kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2620                         ASSERT(pp->p_lckcnt > 0);
2621 
2622                         /*
2623                          * lock page but do not change availrmem, we do it
2624                          * ourselves every nlck loops.
2625                          */
2626                         page_pp_unlock(pp, 0, 1);
2627                         if (pp->p_lckcnt == 0) {
2628                                 if (kernel == 0)
2629                                         nlck++;
2630                                 *unlocked += PAGESIZE;
2631                         }
2632                         page_unlock(pp);
2633                         shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2634                         sptd->spt_ppa_lckcnt[anon_index]--;
2635                         shmd->shm_lckpgs--;
2636                 }
2637 
2638                 /*
2639                  * To reduce freemem_lock contention, do not update availrmem
2640                  * until at least NLCK pages have been unlocked.
2641                  * 1. No need to update if nlck is zero
2642                  * 2. Always update if the last iteration
2643                  */
2644                 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2645                         mutex_enter(&freemem_lock);
2646                         availrmem       += nlck;
2647                         pages_locked    -= nlck;
2648                         mutex_exit(&freemem_lock);
2649                         nlck = 0;
2650                         nlck_limit = NLCK + RAND_P2(NLCK);
2651                 }
2652         }
2653         ANON_LOCK_EXIT(&amp->a_rwlock);
2654 
2655         return (0);
2656 }
2657 
2658 /*ARGSUSED*/
2659 static int
2660 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2661     int attr, int op, ulong_t *lockmap, size_t pos)
2662 {
2663         struct shm_data *shmd = seg->s_data;
2664         struct seg      *sptseg = shmd->shm_sptseg;
2665         struct spt_data *sptd = sptseg->s_data;
2666         struct kshmid   *sp = sptd->spt_amp->a_sp;
2667         pgcnt_t         npages, a_npages;
2668         page_t          **ppa;
2669         pgcnt_t         an_idx, a_an_idx, ppa_idx;
2670         caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2671         size_t          a_len;                  /* aligned len */
2672         size_t          share_sz;
2673         ulong_t         i;
2674         int             sts = 0;
2675         rctl_qty_t      unlocked = 0;
2676         rctl_qty_t      locked = 0;
2677         struct proc     *p = curproc;
2678         kproject_t      *proj;
2679 
2680         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2681         ASSERT(sp != NULL);
2682 
2683         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2684                 return (0);
2685         }
2686 
2687         addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2688         an_idx = seg_page(seg, addr);
2689         npages = btopr(len);
2690 
2691         if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2692                 return (ENOMEM);
2693         }
2694 
2695         /*
2696          * A shm's project never changes, so no lock needed.
2697          * The shm has a hold on the project, so it will not go away.
2698          * Since we have a mapping to shm within this zone, we know
2699          * that the zone will not go away.
2700          */
2701         proj = sp->shm_perm.ipc_proj;
2702 
2703         if (op == MC_LOCK) {
2704 
2705                 /*
2706                  * Need to align addr and size request if they are not
2707                  * aligned so we can always allocate large page(s) however
2708                  * we only lock what was requested in initial request.
2709                  */
2710                 share_sz = page_get_pagesize(sptseg->s_szc);
2711                 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2712                 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2713                     share_sz);
2714                 a_npages = btop(a_len);
2715                 a_an_idx = seg_page(seg, a_addr);
2716                 spt_addr = sptseg->s_base + ptob(a_an_idx);
2717                 ppa_idx = an_idx - a_an_idx;
2718 
2719                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2720                     KM_NOSLEEP)) == NULL) {
2721                         return (ENOMEM);
2722                 }
2723 
2724                 /*
2725                  * Don't cache any new pages for IO and
2726                  * flush any cached pages.
2727                  */
2728                 mutex_enter(&sptd->spt_lock);
2729                 if (sptd->spt_ppa != NULL)
2730                         sptd->spt_flags |= DISM_PPA_CHANGED;
2731 
2732                 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2733                 if (sts != 0) {
2734                         mutex_exit(&sptd->spt_lock);
2735                         kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2736                         return (sts);
2737                 }
2738 
2739                 mutex_enter(&sp->shm_mlock);
2740                 /* enforce locked memory rctl */
2741                 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2742 
2743                 mutex_enter(&p->p_lock);
2744                 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2745                         mutex_exit(&p->p_lock);
2746                         sts = EAGAIN;
2747                 } else {
2748                         mutex_exit(&p->p_lock);
2749                         sts = spt_lockpages(seg, an_idx, npages,
2750                             &ppa[ppa_idx], lockmap, pos, &locked);
2751 
2752                         /*
2753                          * correct locked count if not all pages could be
2754                          * locked
2755                          */
2756                         if ((unlocked - locked) > 0) {
2757                                 rctl_decr_locked_mem(NULL, proj,
2758                                     (unlocked - locked), 0);
2759                         }
2760                 }
2761                 /*
2762                  * unlock pages
2763                  */
2764                 for (i = 0; i < a_npages; i++)
2765                         page_unlock(ppa[i]);
2766                 if (sptd->spt_ppa != NULL)
2767                         sptd->spt_flags |= DISM_PPA_CHANGED;
2768                 mutex_exit(&sp->shm_mlock);
2769                 mutex_exit(&sptd->spt_lock);
2770 
2771                 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2772 
2773         } else if (op == MC_UNLOCK) { /* unlock */
2774                 page_t          **ppa;
2775 
2776                 mutex_enter(&sptd->spt_lock);
2777                 if (shmd->shm_lckpgs == 0) {
2778                         mutex_exit(&sptd->spt_lock);
2779                         return (0);
2780                 }
2781                 /*
2782                  * Don't cache new IO pages.
2783                  */
2784                 if (sptd->spt_ppa != NULL)
2785                         sptd->spt_flags |= DISM_PPA_CHANGED;
2786 
2787                 mutex_enter(&sp->shm_mlock);
2788                 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2789                 if ((ppa = sptd->spt_ppa) != NULL)
2790                         sptd->spt_flags |= DISM_PPA_CHANGED;
2791                 mutex_exit(&sptd->spt_lock);
2792 
2793                 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2794                 mutex_exit(&sp->shm_mlock);
2795 
2796                 if (ppa != NULL)
2797                         seg_ppurge_wiredpp(ppa);
2798         }
2799         return (sts);
2800 }
2801 
2802 /*ARGSUSED*/
2803 int
2804 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2805 {
2806         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2807         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2808         spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2809 
2810         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2811 
2812         /*
2813          * ISM segment is always rw.
2814          */
2815         while (--pgno >= 0)
2816                 *protv++ = sptd->spt_prot;
2817         return (0);
2818 }
2819 
2820 /*ARGSUSED*/
2821 u_offset_t
2822 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2823 {
2824         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2825 
2826         /* Offset does not matter in ISM memory */
2827 
2828         return ((u_offset_t)0);
2829 }
2830 
2831 /* ARGSUSED */
2832 int
2833 segspt_shmgettype(struct seg *seg, caddr_t addr)
2834 {
2835         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2836         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2837 
2838         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2839 
2840         /*
2841          * The shared memory mapping is always MAP_SHARED, SWAP is only
2842          * reserved for DISM
2843          */
2844         return (MAP_SHARED |
2845             ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2846 }
2847 
2848 /*ARGSUSED*/
2849 int
2850 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2851 {
2852         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2853         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2854 
2855         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2856 
2857         *vpp = sptd->spt_vp;
2858         return (0);
2859 }
2860 
2861 /*
2862  * We need to wait for pending IO to complete to a DISM segment in order for
2863  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2864  * than enough time to wait.
2865  */
2866 static clock_t spt_pcache_wait = 120;
2867 
2868 /*ARGSUSED*/
2869 static int
2870 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2871 {
2872         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2873         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2874         struct anon_map *amp;
2875         pgcnt_t pg_idx;
2876         ushort_t gen;
2877         clock_t end_lbolt;
2878         int writer;
2879         page_t **ppa;
2880 
2881         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2882 
2883         if (behav == MADV_FREE) {
2884                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2885                         return (0);
2886 
2887                 amp = sptd->spt_amp;
2888                 pg_idx = seg_page(seg, addr);
2889 
2890                 mutex_enter(&sptd->spt_lock);
2891                 if ((ppa = sptd->spt_ppa) == NULL) {
2892                         mutex_exit(&sptd->spt_lock);
2893                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2894                         anon_disclaim(amp, pg_idx, len);
2895                         ANON_LOCK_EXIT(&amp->a_rwlock);
2896                         return (0);
2897                 }
2898 
2899                 sptd->spt_flags |= DISM_PPA_CHANGED;
2900                 gen = sptd->spt_gen;
2901 
2902                 mutex_exit(&sptd->spt_lock);
2903 
2904                 /*
2905                  * Purge all DISM cached pages
2906                  */
2907                 seg_ppurge_wiredpp(ppa);
2908 
2909                 /*
2910                  * Drop the AS_LOCK so that other threads can grab it
2911                  * in the as_pageunlock path and hopefully get the segment
2912                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2913                  * to keep this segment resident.
2914                  */
2915                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2916                 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2917                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2918 
2919                 mutex_enter(&sptd->spt_lock);
2920 
2921                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2922 
2923                 /*
2924                  * Try to wait for pages to get kicked out of the seg_pcache.
2925                  */
2926                 while (sptd->spt_gen == gen &&
2927                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2928                     ddi_get_lbolt() < end_lbolt) {
2929                         if (!cv_timedwait_sig(&sptd->spt_cv,
2930                             &sptd->spt_lock, end_lbolt)) {
2931                                 break;
2932                         }
2933                 }
2934 
2935                 mutex_exit(&sptd->spt_lock);
2936 
2937                 /* Regrab the AS_LOCK and release our hold on the segment */
2938                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2939                     writer ? RW_WRITER : RW_READER);
2940                 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2941                 if (shmd->shm_softlockcnt <= 0) {
2942                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2943                                 mutex_enter(&seg->s_as->a_contents);
2944                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2945                                         AS_CLRUNMAPWAIT(seg->s_as);
2946                                         cv_broadcast(&seg->s_as->a_cv);
2947                                 }
2948                                 mutex_exit(&seg->s_as->a_contents);
2949                         }
2950                 }
2951 
2952                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2953                 anon_disclaim(amp, pg_idx, len);
2954                 ANON_LOCK_EXIT(&amp->a_rwlock);
2955         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2956             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2957                 int                     already_set;
2958                 ulong_t                 anon_index;
2959                 lgrp_mem_policy_t       policy;
2960                 caddr_t                 shm_addr;
2961                 size_t                  share_size;
2962                 size_t                  size;
2963                 struct seg              *sptseg = shmd->shm_sptseg;
2964                 caddr_t                 sptseg_addr;
2965 
2966                 /*
2967                  * Align address and length to page size of underlying segment
2968                  */
2969                 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2970                 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2971                 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2972                     share_size);
2973 
2974                 amp = shmd->shm_amp;
2975                 anon_index = seg_page(seg, shm_addr);
2976 
2977                 /*
2978                  * And now we may have to adjust size downward if we have
2979                  * exceeded the realsize of the segment or initial anon
2980                  * allocations.
2981                  */
2982                 sptseg_addr = sptseg->s_base + ptob(anon_index);
2983                 if ((sptseg_addr + size) >
2984                     (sptseg->s_base + sptd->spt_realsize))
2985                         size = (sptseg->s_base + sptd->spt_realsize) -
2986                             sptseg_addr;
2987 
2988                 /*
2989                  * Set memory allocation policy for this segment
2990                  */
2991                 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2992                 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2993                     NULL, 0, len);
2994 
2995                 /*
2996                  * If random memory allocation policy set already,
2997                  * don't bother reapplying it.
2998                  */
2999                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
3000                         return (0);
3001 
3002                 /*
3003                  * Mark any existing pages in the given range for
3004                  * migration, flushing the I/O page cache, and using
3005                  * underlying segment to calculate anon index and get
3006                  * anonmap and vnode pointer from
3007                  */
3008                 if (shmd->shm_softlockcnt > 0)
3009                         segspt_purge(seg);
3010 
3011                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3012         }
3013 
3014         return (0);
3015 }
3016 
3017 /*ARGSUSED*/
3018 void
3019 segspt_shmdump(struct seg *seg)
3020 {
3021         /* no-op for ISM segment */
3022 }
3023 
3024 /*ARGSUSED*/
3025 static faultcode_t
3026 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3027 {
3028         return (ENOTSUP);
3029 }
3030 
3031 /*
3032  * get a memory ID for an addr in a given segment
3033  */
3034 static int
3035 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3036 {
3037         struct shm_data *shmd = (struct shm_data *)seg->s_data;
3038         struct anon     *ap;
3039         size_t          anon_index;
3040         struct anon_map *amp = shmd->shm_amp;
3041         struct spt_data *sptd = shmd->shm_sptseg->s_data;
3042         struct seg      *sptseg = shmd->shm_sptseg;
3043         anon_sync_obj_t cookie;
3044 
3045         anon_index = seg_page(seg, addr);
3046 
3047         if (addr > (seg->s_base + sptd->spt_realsize)) {
3048                 return (EFAULT);
3049         }
3050 
3051         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3052         anon_array_enter(amp, anon_index, &cookie);
3053         ap = anon_get_ptr(amp->ahp, anon_index);
3054         if (ap == NULL) {
3055                 struct page *pp;
3056                 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3057 
3058                 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3059                 if (pp == NULL) {
3060                         anon_array_exit(&cookie);
3061                         ANON_LOCK_EXIT(&amp->a_rwlock);
3062                         return (ENOMEM);
3063                 }
3064                 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3065                 page_unlock(pp);
3066         }
3067         anon_array_exit(&cookie);
3068         ANON_LOCK_EXIT(&amp->a_rwlock);
3069         memidp->val[0] = (uintptr_t)ap;
3070         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3071         return (0);
3072 }
3073 
3074 /*
3075  * Get memory allocation policy info for specified address in given segment
3076  */
3077 static lgrp_mem_policy_info_t *
3078 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3079 {
3080         struct anon_map         *amp;
3081         ulong_t                 anon_index;
3082         lgrp_mem_policy_info_t  *policy_info;
3083         struct shm_data         *shm_data;
3084 
3085         ASSERT(seg != NULL);
3086 
3087         /*
3088          * Get anon_map from segshm
3089          *
3090          * Assume that no lock needs to be held on anon_map, since
3091          * it should be protected by its reference count which must be
3092          * nonzero for an existing segment
3093          * Need to grab readers lock on policy tree though
3094          */
3095         shm_data = (struct shm_data *)seg->s_data;
3096         if (shm_data == NULL)
3097                 return (NULL);
3098         amp = shm_data->shm_amp;
3099         ASSERT(amp->refcnt != 0);
3100 
3101         /*
3102          * Get policy info
3103          *
3104          * Assume starting anon index of 0
3105          */
3106         anon_index = seg_page(seg, addr);
3107         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3108 
3109         return (policy_info);
3110 }
3111 
3112 /*ARGSUSED*/
3113 static int
3114 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3115 {
3116         return (0);
3117 }