1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2015, Joyent, Inc. All rights reserved.
  24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  25  */
  26 
  27 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  28 /*        All Rights Reserved   */
  29 
  30 /*
  31  * University Copyright- Copyright (c) 1982, 1986, 1988
  32  * The Regents of the University of California
  33  * All Rights Reserved
  34  *
  35  * University Acknowledgment- Portions of this document are derived from
  36  * software developed by the University of California, Berkeley, and its
  37  * contributors.
  38  */
  39 
  40 /*
  41  * VM - shared or copy-on-write from a vnode/anonymous memory.
  42  */
  43 
  44 #include <sys/types.h>
  45 #include <sys/param.h>
  46 #include <sys/t_lock.h>
  47 #include <sys/errno.h>
  48 #include <sys/systm.h>
  49 #include <sys/mman.h>
  50 #include <sys/debug.h>
  51 #include <sys/cred.h>
  52 #include <sys/vmsystm.h>
  53 #include <sys/tuneable.h>
  54 #include <sys/bitmap.h>
  55 #include <sys/swap.h>
  56 #include <sys/kmem.h>
  57 #include <sys/sysmacros.h>
  58 #include <sys/vtrace.h>
  59 #include <sys/cmn_err.h>
  60 #include <sys/callb.h>
  61 #include <sys/vm.h>
  62 #include <sys/dumphdr.h>
  63 #include <sys/lgrp.h>
  64 
  65 #include <vm/hat.h>
  66 #include <vm/as.h>
  67 #include <vm/seg.h>
  68 #include <vm/seg_vn.h>
  69 #include <vm/pvn.h>
  70 #include <vm/anon.h>
  71 #include <vm/page.h>
  72 #include <vm/vpage.h>
  73 #include <sys/proc.h>
  74 #include <sys/task.h>
  75 #include <sys/project.h>
  76 #include <sys/zone.h>
  77 #include <sys/shm_impl.h>
  78 
  79 /*
  80  * segvn_fault needs a temporary page list array.  To avoid calling kmem all
  81  * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
  82  * it can.  In the rare case when this page list is not large enough, it
  83  * goes and gets a large enough array from kmem.
  84  *
  85  * This small page list array covers either 8 pages or 64kB worth of pages -
  86  * whichever is smaller.
  87  */
  88 #define PVN_MAX_GETPAGE_SZ      0x10000
  89 #define PVN_MAX_GETPAGE_NUM     0x8
  90 
  91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
  92 #define PVN_GETPAGE_SZ  ptob(PVN_MAX_GETPAGE_NUM)
  93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
  94 #else
  95 #define PVN_GETPAGE_SZ  PVN_MAX_GETPAGE_SZ
  96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
  97 #endif
  98 
  99 /*
 100  * Private seg op routines.
 101  */
 102 static int      segvn_dup(struct seg *seg, struct seg *newseg);
 103 static int      segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
 104 static void     segvn_free(struct seg *seg);
 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
 106                     caddr_t addr, size_t len, enum fault_type type,
 107                     enum seg_rw rw);
 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
 109 static int      segvn_setprot(struct seg *seg, caddr_t addr,
 110                     size_t len, uint_t prot);
 111 static int      segvn_checkprot(struct seg *seg, caddr_t addr,
 112                     size_t len, uint_t prot);
 113 static int      segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
 114 static int      segvn_sync(struct seg *seg, caddr_t addr, size_t len,
 115                     int attr, uint_t flags);
 116 static size_t   segvn_incore(struct seg *seg, caddr_t addr, size_t len,
 117                     char *vec);
 118 static int      segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
 119                     int attr, int op, ulong_t *lockmap, size_t pos);
 120 static int      segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
 121                     uint_t *protv);
 122 static u_offset_t       segvn_getoffset(struct seg *seg, caddr_t addr);
 123 static int      segvn_gettype(struct seg *seg, caddr_t addr);
 124 static int      segvn_getvp(struct seg *seg, caddr_t addr,
 125                     struct vnode **vpp);
 126 static int      segvn_advise(struct seg *seg, caddr_t addr, size_t len,
 127                     uint_t behav);
 128 static void     segvn_dump(struct seg *seg);
 129 static int      segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
 130                     struct page ***ppp, enum lock_type type, enum seg_rw rw);
 131 static int      segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
 132                     uint_t szc);
 133 static int      segvn_getmemid(struct seg *seg, caddr_t addr,
 134                     memid_t *memidp);
 135 static lgrp_mem_policy_info_t   *segvn_getpolicy(struct seg *, caddr_t);
 136 static int      segvn_capable(struct seg *seg, segcapability_t capable);
 137 static int      segvn_inherit(struct seg *, caddr_t, size_t, uint_t);
 138 
 139 struct  seg_ops segvn_ops = {
 140         segvn_dup,
 141         segvn_unmap,
 142         segvn_free,
 143         segvn_fault,
 144         segvn_faulta,
 145         segvn_setprot,
 146         segvn_checkprot,
 147         segvn_kluster,
 148         segvn_sync,
 149         segvn_incore,
 150         segvn_lockop,
 151         segvn_getprot,
 152         segvn_getoffset,
 153         segvn_gettype,
 154         segvn_getvp,
 155         segvn_advise,
 156         segvn_dump,
 157         segvn_pagelock,
 158         segvn_setpagesize,
 159         segvn_getmemid,
 160         segvn_getpolicy,
 161         segvn_capable,
 162         segvn_inherit
 163 };
 164 
 165 /*
 166  * Common zfod structures, provided as a shorthand for others to use.
 167  */
 168 static segvn_crargs_t zfod_segvn_crargs =
 169         SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
 170 static segvn_crargs_t kzfod_segvn_crargs =
 171         SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
 172         PROT_ALL & ~PROT_USER);
 173 static segvn_crargs_t stack_noexec_crargs =
 174         SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
 175 
 176 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs;   /* user zfod argsp */
 177 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
 178 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs;     /* executable stack */
 179 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
 180 
 181 #define vpgtob(n)       ((n) * sizeof (struct vpage))   /* For brevity */
 182 
 183 size_t  segvn_comb_thrshld = UINT_MAX;  /* patchable -- see 1196681 */
 184 
 185 size_t  segvn_pglock_comb_thrshld = (1UL << 16);  /* 64K */
 186 size_t  segvn_pglock_comb_balign = (1UL << 16);           /* 64K */
 187 uint_t  segvn_pglock_comb_bshift;
 188 size_t  segvn_pglock_comb_palign;
 189 
 190 static int      segvn_concat(struct seg *, struct seg *, int);
 191 static int      segvn_extend_prev(struct seg *, struct seg *,
 192                     struct segvn_crargs *, size_t);
 193 static int      segvn_extend_next(struct seg *, struct seg *,
 194                     struct segvn_crargs *, size_t);
 195 static void     segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
 196 static void     segvn_pagelist_rele(page_t **);
 197 static void     segvn_setvnode_mpss(vnode_t *);
 198 static void     segvn_relocate_pages(page_t **, page_t *);
 199 static int      segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
 200 static int      segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
 201     uint_t, page_t **, page_t **, uint_t *, int *);
 202 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
 203     caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
 204 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
 205     caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
 206 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
 207     u_offset_t, struct vpage *, page_t **, uint_t,
 208     enum fault_type, enum seg_rw, int);
 209 static void     segvn_vpage(struct seg *);
 210 static size_t   segvn_count_swap_by_vpages(struct seg *);
 211 
 212 static void segvn_purge(struct seg *seg);
 213 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
 214     enum seg_rw, int);
 215 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
 216     enum seg_rw, int);
 217 
 218 static int sameprot(struct seg *, caddr_t, size_t);
 219 
 220 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
 221 static int segvn_clrszc(struct seg *);
 222 static struct seg *segvn_split_seg(struct seg *, caddr_t);
 223 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
 224     ulong_t, uint_t);
 225 
 226 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
 227     size_t, void *, u_offset_t);
 228 
 229 static struct kmem_cache *segvn_cache;
 230 static struct kmem_cache **segvn_szc_cache;
 231 
 232 #ifdef VM_STATS
 233 static struct segvnvmstats_str {
 234         ulong_t fill_vp_pages[31];
 235         ulong_t fltvnpages[49];
 236         ulong_t fullszcpages[10];
 237         ulong_t relocatepages[3];
 238         ulong_t fltanpages[17];
 239         ulong_t pagelock[2];
 240         ulong_t demoterange[3];
 241 } segvnvmstats;
 242 #endif /* VM_STATS */
 243 
 244 #define SDR_RANGE       1               /* demote entire range */
 245 #define SDR_END         2               /* demote non aligned ends only */
 246 
 247 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) {          \
 248                 if ((len) != 0) {                                             \
 249                         lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);  \
 250                         ASSERT(lpgaddr >= (seg)->s_base);               \
 251                         lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) +    \
 252                             (len)), pgsz);                                    \
 253                         ASSERT(lpgeaddr > lpgaddr);                        \
 254                         ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size);    \
 255                 } else {                                                      \
 256                         lpgeaddr = lpgaddr = (addr);                          \
 257                 }                                                             \
 258         }
 259 
 260 /*ARGSUSED*/
 261 static int
 262 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
 263 {
 264         struct segvn_data *svd = buf;
 265 
 266         rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
 267         mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
 268         svd->svn_trnext = svd->svn_trprev = NULL;
 269         return (0);
 270 }
 271 
 272 /*ARGSUSED1*/
 273 static void
 274 segvn_cache_destructor(void *buf, void *cdrarg)
 275 {
 276         struct segvn_data *svd = buf;
 277 
 278         rw_destroy(&svd->lock);
 279         mutex_destroy(&svd->segfree_syncmtx);
 280 }
 281 
 282 /*ARGSUSED*/
 283 static int
 284 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
 285 {
 286         bzero(buf, sizeof (svntr_t));
 287         return (0);
 288 }
 289 
 290 /*
 291  * Patching this variable to non-zero allows the system to run with
 292  * stacks marked as "not executable".  It's a bit of a kludge, but is
 293  * provided as a tweakable for platforms that export those ABIs
 294  * (e.g. sparc V8) that have executable stacks enabled by default.
 295  * There are also some restrictions for platforms that don't actually
 296  * implement 'noexec' protections.
 297  *
 298  * Once enabled, the system is (therefore) unable to provide a fully
 299  * ABI-compliant execution environment, though practically speaking,
 300  * most everything works.  The exceptions are generally some interpreters
 301  * and debuggers that create executable code on the stack and jump
 302  * into it (without explicitly mprotecting the address range to include
 303  * PROT_EXEC).
 304  *
 305  * One important class of applications that are disabled are those
 306  * that have been transformed into malicious agents using one of the
 307  * numerous "buffer overflow" attacks.  See 4007890.
 308  */
 309 int noexec_user_stack = 0;
 310 int noexec_user_stack_log = 1;
 311 
 312 int segvn_lpg_disable = 0;
 313 uint_t segvn_maxpgszc = 0;
 314 
 315 ulong_t segvn_vmpss_clrszc_cnt;
 316 ulong_t segvn_vmpss_clrszc_err;
 317 ulong_t segvn_fltvnpages_clrszc_cnt;
 318 ulong_t segvn_fltvnpages_clrszc_err;
 319 ulong_t segvn_setpgsz_align_err;
 320 ulong_t segvn_setpgsz_anon_align_err;
 321 ulong_t segvn_setpgsz_getattr_err;
 322 ulong_t segvn_setpgsz_eof_err;
 323 ulong_t segvn_faultvnmpss_align_err1;
 324 ulong_t segvn_faultvnmpss_align_err2;
 325 ulong_t segvn_faultvnmpss_align_err3;
 326 ulong_t segvn_faultvnmpss_align_err4;
 327 ulong_t segvn_faultvnmpss_align_err5;
 328 ulong_t segvn_vmpss_pageio_deadlk_err;
 329 
 330 int segvn_use_regions = 1;
 331 
 332 /*
 333  * Segvn supports text replication optimization for NUMA platforms. Text
 334  * replica's are represented by anon maps (amp). There's one amp per text file
 335  * region per lgroup. A process chooses the amp for each of its text mappings
 336  * based on the lgroup assignment of its main thread (t_tid = 1). All
 337  * processes that want a replica on a particular lgroup for the same text file
 338  * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
 339  * with vp,off,size,szc used as a key. Text replication segments are read only
 340  * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
 341  * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
 342  * pages. Replication amp is assigned to a segment when it gets its first
 343  * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
 344  * rechecks periodically if the process still maps an amp local to the main
 345  * thread. If not async thread forces process to remap to an amp in the new
 346  * home lgroup of the main thread. Current text replication implementation
 347  * only provides the benefit to workloads that do most of their work in the
 348  * main thread of a process or all the threads of a process run in the same
 349  * lgroup. To extend text replication benefit to different types of
 350  * multithreaded workloads further work would be needed in the hat layer to
 351  * allow the same virtual address in the same hat to simultaneously map
 352  * different physical addresses (i.e. page table replication would be needed
 353  * for x86).
 354  *
 355  * amp pages are used instead of vnode pages as long as segment has a very
 356  * simple life cycle.  It's created via segvn_create(), handles S_EXEC
 357  * (S_READ) pagefaults and is fully unmapped.  If anything more complicated
 358  * happens such as protection is changed, real COW fault happens, pagesize is
 359  * changed, MC_LOCK is requested or segment is partially unmapped we turn off
 360  * text replication by converting the segment back to vnode only segment
 361  * (unmap segment's address range and set svd->amp to NULL).
 362  *
 363  * The original file can be changed after amp is inserted into
 364  * svntr_hashtab. Processes that are launched after the file is already
 365  * changed can't use the replica's created prior to the file change. To
 366  * implement this functionality hash entries are timestamped. Replica's can
 367  * only be used if current file modification time is the same as the timestamp
 368  * saved when hash entry was created. However just timestamps alone are not
 369  * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
 370  * deal with file changes via MAP_SHARED mappings differently. When writable
 371  * MAP_SHARED mappings are created to vnodes marked as executable we mark all
 372  * existing replica's for this vnode as not usable for future text
 373  * mappings. And we don't create new replica's for files that currently have
 374  * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
 375  * true).
 376  */
 377 
 378 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR  (20)
 379 size_t  segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
 380 
 381 static ulong_t                  svntr_hashtab_sz = 512;
 382 static svntr_bucket_t           *svntr_hashtab = NULL;
 383 static struct kmem_cache        *svntr_cache;
 384 static svntr_stats_t            *segvn_textrepl_stats;
 385 static ksema_t                  segvn_trasync_sem;
 386 
 387 int                             segvn_disable_textrepl = 1;
 388 size_t                          textrepl_size_thresh = (size_t)-1;
 389 size_t                          segvn_textrepl_bytes = 0;
 390 size_t                          segvn_textrepl_max_bytes = 0;
 391 clock_t                         segvn_update_textrepl_interval = 0;
 392 int                             segvn_update_tr_time = 10;
 393 int                             segvn_disable_textrepl_update = 0;
 394 
 395 static void segvn_textrepl(struct seg *);
 396 static void segvn_textunrepl(struct seg *, int);
 397 static void segvn_inval_trcache(vnode_t *);
 398 static void segvn_trasync_thread(void);
 399 static void segvn_trupdate_wakeup(void *);
 400 static void segvn_trupdate(void);
 401 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
 402     ulong_t);
 403 
 404 /*
 405  * Initialize segvn data structures
 406  */
 407 void
 408 segvn_init(void)
 409 {
 410         uint_t maxszc;
 411         uint_t szc;
 412         size_t pgsz;
 413 
 414         segvn_cache = kmem_cache_create("segvn_cache",
 415             sizeof (struct segvn_data), 0,
 416             segvn_cache_constructor, segvn_cache_destructor, NULL,
 417             NULL, NULL, 0);
 418 
 419         if (segvn_lpg_disable == 0) {
 420                 szc = maxszc = page_num_pagesizes() - 1;
 421                 if (szc == 0) {
 422                         segvn_lpg_disable = 1;
 423                 }
 424                 if (page_get_pagesize(0) != PAGESIZE) {
 425                         panic("segvn_init: bad szc 0");
 426                         /*NOTREACHED*/
 427                 }
 428                 while (szc != 0) {
 429                         pgsz = page_get_pagesize(szc);
 430                         if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
 431                                 panic("segvn_init: bad szc %d", szc);
 432                                 /*NOTREACHED*/
 433                         }
 434                         szc--;
 435                 }
 436                 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
 437                         segvn_maxpgszc = maxszc;
 438         }
 439 
 440         if (segvn_maxpgszc) {
 441                 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
 442                     (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
 443                     KM_SLEEP);
 444         }
 445 
 446         for (szc = 1; szc <= segvn_maxpgszc; szc++) {
 447                 char    str[32];
 448 
 449                 (void) sprintf(str, "segvn_szc_cache%d", szc);
 450                 segvn_szc_cache[szc] = kmem_cache_create(str,
 451                     page_get_pagecnt(szc) * sizeof (page_t *), 0,
 452                     NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
 453         }
 454 
 455 
 456         if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
 457                 segvn_use_regions = 0;
 458 
 459         /*
 460          * For now shared regions and text replication segvn support
 461          * are mutually exclusive. This is acceptable because
 462          * currently significant benefit from text replication was
 463          * only observed on AMD64 NUMA platforms (due to relatively
 464          * small L2$ size) and currently we don't support shared
 465          * regions on x86.
 466          */
 467         if (segvn_use_regions && !segvn_disable_textrepl) {
 468                 segvn_disable_textrepl = 1;
 469         }
 470 
 471 #if defined(_LP64)
 472         if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
 473             !segvn_disable_textrepl) {
 474                 ulong_t i;
 475                 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
 476 
 477                 svntr_cache = kmem_cache_create("svntr_cache",
 478                     sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
 479                     NULL, NULL, NULL, 0);
 480                 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
 481                 for (i = 0; i < svntr_hashtab_sz; i++) {
 482                         mutex_init(&svntr_hashtab[i].tr_lock,  NULL,
 483                             MUTEX_DEFAULT, NULL);
 484                 }
 485                 segvn_textrepl_max_bytes = ptob(physmem) /
 486                     segvn_textrepl_max_bytes_factor;
 487                 segvn_textrepl_stats = kmem_zalloc(NCPU *
 488                     sizeof (svntr_stats_t), KM_SLEEP);
 489                 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
 490                 (void) thread_create(NULL, 0, segvn_trasync_thread,
 491                     NULL, 0, &p0, TS_RUN, minclsyspri);
 492         }
 493 #endif
 494 
 495         if (!ISP2(segvn_pglock_comb_balign) ||
 496             segvn_pglock_comb_balign < PAGESIZE) {
 497                 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
 498         }
 499         segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
 500         segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
 501 }
 502 
 503 #define SEGVN_PAGEIO    ((void *)0x1)
 504 #define SEGVN_NOPAGEIO  ((void *)0x2)
 505 
 506 static void
 507 segvn_setvnode_mpss(vnode_t *vp)
 508 {
 509         int err;
 510 
 511         ASSERT(vp->v_mpssdata == NULL ||
 512             vp->v_mpssdata == SEGVN_PAGEIO ||
 513             vp->v_mpssdata == SEGVN_NOPAGEIO);
 514 
 515         if (vp->v_mpssdata == NULL) {
 516                 if (vn_vmpss_usepageio(vp)) {
 517                         err = VOP_PAGEIO(vp, (page_t *)NULL,
 518                             (u_offset_t)0, 0, 0, CRED(), NULL);
 519                 } else {
 520                         err = ENOSYS;
 521                 }
 522                 /*
 523                  * set v_mpssdata just once per vnode life
 524                  * so that it never changes.
 525                  */
 526                 mutex_enter(&vp->v_lock);
 527                 if (vp->v_mpssdata == NULL) {
 528                         if (err == EINVAL) {
 529                                 vp->v_mpssdata = SEGVN_PAGEIO;
 530                         } else {
 531                                 vp->v_mpssdata = SEGVN_NOPAGEIO;
 532                         }
 533                 }
 534                 mutex_exit(&vp->v_lock);
 535         }
 536 }
 537 
 538 int
 539 segvn_create(struct seg *seg, void *argsp)
 540 {
 541         struct segvn_crargs *a = (struct segvn_crargs *)argsp;
 542         struct segvn_data *svd;
 543         size_t swresv = 0;
 544         struct cred *cred;
 545         struct anon_map *amp;
 546         int error = 0;
 547         size_t pgsz;
 548         lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
 549         int use_rgn = 0;
 550         int trok = 0;
 551 
 552         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 553 
 554         if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
 555                 panic("segvn_create type");
 556                 /*NOTREACHED*/
 557         }
 558 
 559         /*
 560          * Check arguments.  If a shared anon structure is given then
 561          * it is illegal to also specify a vp.
 562          */
 563         if (a->amp != NULL && a->vp != NULL) {
 564                 panic("segvn_create anon_map");
 565                 /*NOTREACHED*/
 566         }
 567 
 568         if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
 569             a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
 570             segvn_use_regions) {
 571                 use_rgn = 1;
 572         }
 573 
 574         /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
 575         if (a->type == MAP_SHARED)
 576                 a->flags &= ~MAP_NORESERVE;
 577 
 578         if (a->szc != 0) {
 579                 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
 580                     (a->amp != NULL && a->type == MAP_PRIVATE) ||
 581                     (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
 582                         a->szc = 0;
 583                 } else {
 584                         if (a->szc > segvn_maxpgszc)
 585                                 a->szc = segvn_maxpgszc;
 586                         pgsz = page_get_pagesize(a->szc);
 587                         if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
 588                             !IS_P2ALIGNED(seg->s_size, pgsz)) {
 589                                 a->szc = 0;
 590                         } else if (a->vp != NULL) {
 591                                 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
 592                                         /*
 593                                          * paranoid check.
 594                                          * hat_page_demote() is not supported
 595                                          * on swapfs pages.
 596                                          */
 597                                         a->szc = 0;
 598                                 } else if (map_addr_vacalign_check(seg->s_base,
 599                                     a->offset & PAGEMASK)) {
 600                                         a->szc = 0;
 601                                 }
 602                         } else if (a->amp != NULL) {
 603                                 pgcnt_t anum = btopr(a->offset);
 604                                 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
 605                                 if (!IS_P2ALIGNED(anum, pgcnt)) {
 606                                         a->szc = 0;
 607                                 }
 608                         }
 609                 }
 610         }
 611 
 612         /*
 613          * If segment may need private pages, reserve them now.
 614          */
 615         if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
 616             (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
 617                 if (anon_resv_zone(seg->s_size,
 618                     seg->s_as->a_proc->p_zone) == 0)
 619                         return (EAGAIN);
 620                 swresv = seg->s_size;
 621                 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
 622                     seg, swresv, 1);
 623         }
 624 
 625         /*
 626          * Reserve any mapping structures that may be required.
 627          *
 628          * Don't do it for segments that may use regions. It's currently a
 629          * noop in the hat implementations anyway.
 630          */
 631         if (!use_rgn) {
 632                 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 633         }
 634 
 635         if (a->cred) {
 636                 cred = a->cred;
 637                 crhold(cred);
 638         } else {
 639                 crhold(cred = CRED());
 640         }
 641 
 642         /* Inform the vnode of the new mapping */
 643         if (a->vp != NULL) {
 644                 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
 645                     seg->s_as, seg->s_base, seg->s_size, a->prot,
 646                     a->maxprot, a->type, cred, NULL);
 647                 if (error) {
 648                         if (swresv != 0) {
 649                                 anon_unresv_zone(swresv,
 650                                     seg->s_as->a_proc->p_zone);
 651                                 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
 652                                     "anon proc:%p %lu %u", seg, swresv, 0);
 653                         }
 654                         crfree(cred);
 655                         if (!use_rgn) {
 656                                 hat_unload(seg->s_as->a_hat, seg->s_base,
 657                                     seg->s_size, HAT_UNLOAD_UNMAP);
 658                         }
 659                         return (error);
 660                 }
 661                 /*
 662                  * svntr_hashtab will be NULL if we support shared regions.
 663                  */
 664                 trok = ((a->flags & MAP_TEXT) &&
 665                     (seg->s_size > textrepl_size_thresh ||
 666                     (a->flags & _MAP_TEXTREPL)) &&
 667                     lgrp_optimizations() && svntr_hashtab != NULL &&
 668                     a->type == MAP_PRIVATE && swresv == 0 &&
 669                     !(a->flags & MAP_NORESERVE) &&
 670                     seg->s_as != &kas && a->vp->v_type == VREG);
 671 
 672                 ASSERT(!trok || !use_rgn);
 673         }
 674 
 675         /*
 676          * MAP_NORESERVE mappings don't count towards the VSZ of a process
 677          * until we fault the pages in.
 678          */
 679         if ((a->vp == NULL || a->vp->v_type != VREG) &&
 680             a->flags & MAP_NORESERVE) {
 681                 seg->s_as->a_resvsize -= seg->s_size;
 682         }
 683 
 684         /*
 685          * If more than one segment in the address space, and they're adjacent
 686          * virtually, try to concatenate them.  Don't concatenate if an
 687          * explicit anon_map structure was supplied (e.g., SystemV shared
 688          * memory) or if we'll use text replication for this segment.
 689          */
 690         if (a->amp == NULL && !use_rgn && !trok) {
 691                 struct seg *pseg, *nseg;
 692                 struct segvn_data *psvd, *nsvd;
 693                 lgrp_mem_policy_t ppolicy, npolicy;
 694                 uint_t  lgrp_mem_policy_flags = 0;
 695                 extern lgrp_mem_policy_t lgrp_mem_default_policy;
 696 
 697                 /*
 698                  * Memory policy flags (lgrp_mem_policy_flags) is valid when
 699                  * extending stack/heap segments.
 700                  */
 701                 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
 702                     !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
 703                         lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
 704                 } else {
 705                         /*
 706                          * Get policy when not extending it from another segment
 707                          */
 708                         mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
 709                 }
 710 
 711                 /*
 712                  * First, try to concatenate the previous and new segments
 713                  */
 714                 pseg = AS_SEGPREV(seg->s_as, seg);
 715                 if (pseg != NULL &&
 716                     pseg->s_base + pseg->s_size == seg->s_base &&
 717                     pseg->s_ops == &segvn_ops) {
 718                         /*
 719                          * Get memory allocation policy from previous segment.
 720                          * When extension is specified (e.g. for heap) apply
 721                          * this policy to the new segment regardless of the
 722                          * outcome of segment concatenation.  Extension occurs
 723                          * for non-default policy otherwise default policy is
 724                          * used and is based on extended segment size.
 725                          */
 726                         psvd = (struct segvn_data *)pseg->s_data;
 727                         ppolicy = psvd->policy_info.mem_policy;
 728                         if (lgrp_mem_policy_flags ==
 729                             LGRP_MP_FLAG_EXTEND_UP) {
 730                                 if (ppolicy != lgrp_mem_default_policy) {
 731                                         mpolicy = ppolicy;
 732                                 } else {
 733                                         mpolicy = lgrp_mem_policy_default(
 734                                             pseg->s_size + seg->s_size,
 735                                             a->type);
 736                                 }
 737                         }
 738 
 739                         if (mpolicy == ppolicy &&
 740                             (pseg->s_size + seg->s_size <=
 741                             segvn_comb_thrshld || psvd->amp == NULL) &&
 742                             segvn_extend_prev(pseg, seg, a, swresv) == 0) {
 743                                 /*
 744                                  * success! now try to concatenate
 745                                  * with following seg
 746                                  */
 747                                 crfree(cred);
 748                                 nseg = AS_SEGNEXT(pseg->s_as, pseg);
 749                                 if (nseg != NULL &&
 750                                     nseg != pseg &&
 751                                     nseg->s_ops == &segvn_ops &&
 752                                     pseg->s_base + pseg->s_size ==
 753                                     nseg->s_base)
 754                                         (void) segvn_concat(pseg, nseg, 0);
 755                                 ASSERT(pseg->s_szc == 0 ||
 756                                     (a->szc == pseg->s_szc &&
 757                                     IS_P2ALIGNED(pseg->s_base, pgsz) &&
 758                                     IS_P2ALIGNED(pseg->s_size, pgsz)));
 759                                 return (0);
 760                         }
 761                 }
 762 
 763                 /*
 764                  * Failed, so try to concatenate with following seg
 765                  */
 766                 nseg = AS_SEGNEXT(seg->s_as, seg);
 767                 if (nseg != NULL &&
 768                     seg->s_base + seg->s_size == nseg->s_base &&
 769                     nseg->s_ops == &segvn_ops) {
 770                         /*
 771                          * Get memory allocation policy from next segment.
 772                          * When extension is specified (e.g. for stack) apply
 773                          * this policy to the new segment regardless of the
 774                          * outcome of segment concatenation.  Extension occurs
 775                          * for non-default policy otherwise default policy is
 776                          * used and is based on extended segment size.
 777                          */
 778                         nsvd = (struct segvn_data *)nseg->s_data;
 779                         npolicy = nsvd->policy_info.mem_policy;
 780                         if (lgrp_mem_policy_flags ==
 781                             LGRP_MP_FLAG_EXTEND_DOWN) {
 782                                 if (npolicy != lgrp_mem_default_policy) {
 783                                         mpolicy = npolicy;
 784                                 } else {
 785                                         mpolicy = lgrp_mem_policy_default(
 786                                             nseg->s_size + seg->s_size,
 787                                             a->type);
 788                                 }
 789                         }
 790 
 791                         if (mpolicy == npolicy &&
 792                             segvn_extend_next(seg, nseg, a, swresv) == 0) {
 793                                 crfree(cred);
 794                                 ASSERT(nseg->s_szc == 0 ||
 795                                     (a->szc == nseg->s_szc &&
 796                                     IS_P2ALIGNED(nseg->s_base, pgsz) &&
 797                                     IS_P2ALIGNED(nseg->s_size, pgsz)));
 798                                 return (0);
 799                         }
 800                 }
 801         }
 802 
 803         if (a->vp != NULL) {
 804                 VN_HOLD(a->vp);
 805                 if (a->type == MAP_SHARED)
 806                         lgrp_shm_policy_init(NULL, a->vp);
 807         }
 808         svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
 809 
 810         seg->s_ops = &segvn_ops;
 811         seg->s_data = (void *)svd;
 812         seg->s_szc = a->szc;
 813 
 814         svd->seg = seg;
 815         svd->vp = a->vp;
 816         /*
 817          * Anonymous mappings have no backing file so the offset is meaningless.
 818          */
 819         svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
 820         svd->prot = a->prot;
 821         svd->maxprot = a->maxprot;
 822         svd->pageprot = 0;
 823         svd->type = a->type;
 824         svd->vpage = NULL;
 825         svd->cred = cred;
 826         svd->advice = MADV_NORMAL;
 827         svd->pageadvice = 0;
 828         svd->flags = (ushort_t)a->flags;
 829         svd->softlockcnt = 0;
 830         svd->softlockcnt_sbase = 0;
 831         svd->softlockcnt_send = 0;
 832         svd->svn_inz = 0;
 833         svd->rcookie = HAT_INVALID_REGION_COOKIE;
 834         svd->pageswap = 0;
 835 
 836         if (a->szc != 0 && a->vp != NULL) {
 837                 segvn_setvnode_mpss(a->vp);
 838         }
 839         if (svd->type == MAP_SHARED && svd->vp != NULL &&
 840             (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
 841                 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
 842                 segvn_inval_trcache(svd->vp);
 843         }
 844 
 845         amp = a->amp;
 846         if ((svd->amp = amp) == NULL) {
 847                 svd->anon_index = 0;
 848                 if (svd->type == MAP_SHARED) {
 849                         svd->swresv = 0;
 850                         /*
 851                          * Shared mappings to a vp need no other setup.
 852                          * If we have a shared mapping to an anon_map object
 853                          * which hasn't been allocated yet,  allocate the
 854                          * struct now so that it will be properly shared
 855                          * by remembering the swap reservation there.
 856                          */
 857                         if (a->vp == NULL) {
 858                                 svd->amp = anonmap_alloc(seg->s_size, swresv,
 859                                     ANON_SLEEP);
 860                                 svd->amp->a_szc = seg->s_szc;
 861                         }
 862                 } else {
 863                         /*
 864                          * Private mapping (with or without a vp).
 865                          * Allocate anon_map when needed.
 866                          */
 867                         svd->swresv = swresv;
 868                 }
 869         } else {
 870                 pgcnt_t anon_num;
 871 
 872                 /*
 873                  * Mapping to an existing anon_map structure without a vp.
 874                  * For now we will insure that the segment size isn't larger
 875                  * than the size - offset gives us.  Later on we may wish to
 876                  * have the anon array dynamically allocated itself so that
 877                  * we don't always have to allocate all the anon pointer slots.
 878                  * This of course involves adding extra code to check that we
 879                  * aren't trying to use an anon pointer slot beyond the end
 880                  * of the currently allocated anon array.
 881                  */
 882                 if ((amp->size - a->offset) < seg->s_size) {
 883                         panic("segvn_create anon_map size");
 884                         /*NOTREACHED*/
 885                 }
 886 
 887                 anon_num = btopr(a->offset);
 888 
 889                 if (a->type == MAP_SHARED) {
 890                         /*
 891                          * SHARED mapping to a given anon_map.
 892                          */
 893                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 894                         amp->refcnt++;
 895                         if (a->szc > amp->a_szc) {
 896                                 amp->a_szc = a->szc;
 897                         }
 898                         ANON_LOCK_EXIT(&amp->a_rwlock);
 899                         svd->anon_index = anon_num;
 900                         svd->swresv = 0;
 901                 } else {
 902                         /*
 903                          * PRIVATE mapping to a given anon_map.
 904                          * Make sure that all the needed anon
 905                          * structures are created (so that we will
 906                          * share the underlying pages if nothing
 907                          * is written by this mapping) and then
 908                          * duplicate the anon array as is done
 909                          * when a privately mapped segment is dup'ed.
 910                          */
 911                         struct anon *ap;
 912                         caddr_t addr;
 913                         caddr_t eaddr;
 914                         ulong_t anon_idx;
 915                         int hat_flag = HAT_LOAD;
 916 
 917                         if (svd->flags & MAP_TEXT) {
 918                                 hat_flag |= HAT_LOAD_TEXT;
 919                         }
 920 
 921                         svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
 922                         svd->amp->a_szc = seg->s_szc;
 923                         svd->anon_index = 0;
 924                         svd->swresv = swresv;
 925 
 926                         /*
 927                          * Prevent 2 threads from allocating anon
 928                          * slots simultaneously.
 929                          */
 930                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 931                         eaddr = seg->s_base + seg->s_size;
 932 
 933                         for (anon_idx = anon_num, addr = seg->s_base;
 934                             addr < eaddr; addr += PAGESIZE, anon_idx++) {
 935                                 page_t *pp;
 936 
 937                                 if ((ap = anon_get_ptr(amp->ahp,
 938                                     anon_idx)) != NULL)
 939                                         continue;
 940 
 941                                 /*
 942                                  * Allocate the anon struct now.
 943                                  * Might as well load up translation
 944                                  * to the page while we're at it...
 945                                  */
 946                                 pp = anon_zero(seg, addr, &ap, cred);
 947                                 if (ap == NULL || pp == NULL) {
 948                                         panic("segvn_create anon_zero");
 949                                         /*NOTREACHED*/
 950                                 }
 951 
 952                                 /*
 953                                  * Re-acquire the anon_map lock and
 954                                  * initialize the anon array entry.
 955                                  */
 956                                 ASSERT(anon_get_ptr(amp->ahp,
 957                                     anon_idx) == NULL);
 958                                 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
 959                                     ANON_SLEEP);
 960 
 961                                 ASSERT(seg->s_szc == 0);
 962                                 ASSERT(!IS_VMODSORT(pp->p_vnode));
 963 
 964                                 ASSERT(use_rgn == 0);
 965                                 hat_memload(seg->s_as->a_hat, addr, pp,
 966                                     svd->prot & ~PROT_WRITE, hat_flag);
 967 
 968                                 page_unlock(pp);
 969                         }
 970                         ASSERT(seg->s_szc == 0);
 971                         anon_dup(amp->ahp, anon_num, svd->amp->ahp,
 972                             0, seg->s_size);
 973                         ANON_LOCK_EXIT(&amp->a_rwlock);
 974                 }
 975         }
 976 
 977         /*
 978          * Set default memory allocation policy for segment
 979          *
 980          * Always set policy for private memory at least for initialization
 981          * even if this is a shared memory segment
 982          */
 983         (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
 984 
 985         if (svd->type == MAP_SHARED)
 986                 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
 987                     svd->vp, svd->offset, seg->s_size);
 988 
 989         if (use_rgn) {
 990                 ASSERT(!trok);
 991                 ASSERT(svd->amp == NULL);
 992                 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
 993                     seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
 994                     (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
 995                     HAT_REGION_TEXT);
 996         }
 997 
 998         ASSERT(!trok || !(svd->prot & PROT_WRITE));
 999         svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
1000 
1001         return (0);
1002 }
1003 
1004 /*
1005  * Concatenate two existing segments, if possible.
1006  * Return 0 on success, -1 if two segments are not compatible
1007  * or -2 on memory allocation failure.
1008  * If amp_cat == 1 then try and concat segments with anon maps
1009  */
1010 static int
1011 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
1012 {
1013         struct segvn_data *svd1 = seg1->s_data;
1014         struct segvn_data *svd2 = seg2->s_data;
1015         struct anon_map *amp1 = svd1->amp;
1016         struct anon_map *amp2 = svd2->amp;
1017         struct vpage *vpage1 = svd1->vpage;
1018         struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1019         size_t size, nvpsize;
1020         pgcnt_t npages1, npages2;
1021 
1022         ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1023         ASSERT(AS_WRITE_HELD(seg1->s_as));
1024         ASSERT(seg1->s_ops == seg2->s_ops);
1025 
1026         if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1027             HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1028                 return (-1);
1029         }
1030 
1031         /* both segments exist, try to merge them */
1032 #define incompat(x)     (svd1->x != svd2->x)
1033         if (incompat(vp) || incompat(maxprot) ||
1034             (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1035             (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1036             incompat(type) || incompat(cred) || incompat(flags) ||
1037             seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1038             (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1039                 return (-1);
1040 #undef incompat
1041 
1042         /*
1043          * vp == NULL implies zfod, offset doesn't matter
1044          */
1045         if (svd1->vp != NULL &&
1046             svd1->offset + seg1->s_size != svd2->offset) {
1047                 return (-1);
1048         }
1049 
1050         /*
1051          * Don't concatenate if either segment uses text replication.
1052          */
1053         if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1054                 return (-1);
1055         }
1056 
1057         /*
1058          * Fail early if we're not supposed to concatenate
1059          * segments with non NULL amp.
1060          */
1061         if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1062                 return (-1);
1063         }
1064 
1065         if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1066                 if (amp1 != amp2) {
1067                         return (-1);
1068                 }
1069                 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1070                     svd2->anon_index) {
1071                         return (-1);
1072                 }
1073                 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1074         }
1075 
1076         /*
1077          * If either seg has vpages, create a new merged vpage array.
1078          */
1079         if (vpage1 != NULL || vpage2 != NULL) {
1080                 struct vpage *vp, *evp;
1081 
1082                 npages1 = seg_pages(seg1);
1083                 npages2 = seg_pages(seg2);
1084                 nvpsize = vpgtob(npages1 + npages2);
1085 
1086                 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1087                         return (-2);
1088                 }
1089 
1090                 if (vpage1 != NULL) {
1091                         bcopy(vpage1, nvpage, vpgtob(npages1));
1092                 } else {
1093                         evp = nvpage + npages1;
1094                         for (vp = nvpage; vp < evp; vp++) {
1095                                 VPP_SETPROT(vp, svd1->prot);
1096                                 VPP_SETADVICE(vp, svd1->advice);
1097                         }
1098                 }
1099 
1100                 if (vpage2 != NULL) {
1101                         bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1102                 } else {
1103                         evp = nvpage + npages1 + npages2;
1104                         for (vp = nvpage + npages1; vp < evp; vp++) {
1105                                 VPP_SETPROT(vp, svd2->prot);
1106                                 VPP_SETADVICE(vp, svd2->advice);
1107                         }
1108                 }
1109 
1110                 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1111                         ASSERT(svd1->swresv == seg1->s_size);
1112                         ASSERT(!(svd1->flags & MAP_NORESERVE));
1113                         ASSERT(!(svd2->flags & MAP_NORESERVE));
1114                         evp = nvpage + npages1;
1115                         for (vp = nvpage; vp < evp; vp++) {
1116                                 VPP_SETSWAPRES(vp);
1117                         }
1118                 }
1119 
1120                 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1121                         ASSERT(svd2->swresv == seg2->s_size);
1122                         ASSERT(!(svd1->flags & MAP_NORESERVE));
1123                         ASSERT(!(svd2->flags & MAP_NORESERVE));
1124                         vp = nvpage + npages1;
1125                         evp = vp + npages2;
1126                         for (; vp < evp; vp++) {
1127                                 VPP_SETSWAPRES(vp);
1128                         }
1129                 }
1130         }
1131         ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1132             (svd1->pageswap == 0 && svd2->pageswap == 0));
1133 
1134         /*
1135          * If either segment has private pages, create a new merged anon
1136          * array. If mergeing shared anon segments just decrement anon map's
1137          * refcnt.
1138          */
1139         if (amp1 != NULL && svd1->type == MAP_SHARED) {
1140                 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1141                 ANON_LOCK_ENTER(&amp1->a_rwlock, RW_WRITER);
1142                 ASSERT(amp1->refcnt >= 2);
1143                 amp1->refcnt--;
1144                 ANON_LOCK_EXIT(&amp1->a_rwlock);
1145                 svd2->amp = NULL;
1146         } else if (amp1 != NULL || amp2 != NULL) {
1147                 struct anon_hdr *nahp;
1148                 struct anon_map *namp = NULL;
1149                 size_t asize;
1150 
1151                 ASSERT(svd1->type == MAP_PRIVATE);
1152 
1153                 asize = seg1->s_size + seg2->s_size;
1154                 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1155                         if (nvpage != NULL) {
1156                                 kmem_free(nvpage, nvpsize);
1157                         }
1158                         return (-2);
1159                 }
1160                 if (amp1 != NULL) {
1161                         /*
1162                          * XXX anon rwlock is not really needed because
1163                          * this is a private segment and we are writers.
1164                          */
1165                         ANON_LOCK_ENTER(&amp1->a_rwlock, RW_WRITER);
1166                         ASSERT(amp1->refcnt == 1);
1167                         if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1168                             nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1169                                 anon_release(nahp, btop(asize));
1170                                 ANON_LOCK_EXIT(&amp1->a_rwlock);
1171                                 if (nvpage != NULL) {
1172                                         kmem_free(nvpage, nvpsize);
1173                                 }
1174                                 return (-2);
1175                         }
1176                 }
1177                 if (amp2 != NULL) {
1178                         ANON_LOCK_ENTER(&amp2->a_rwlock, RW_WRITER);
1179                         ASSERT(amp2->refcnt == 1);
1180                         if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1181                             nahp, btop(seg1->s_size), btop(seg2->s_size),
1182                             ANON_NOSLEEP)) {
1183                                 anon_release(nahp, btop(asize));
1184                                 ANON_LOCK_EXIT(&amp2->a_rwlock);
1185                                 if (amp1 != NULL) {
1186                                         ANON_LOCK_EXIT(&amp1->a_rwlock);
1187                                 }
1188                                 if (nvpage != NULL) {
1189                                         kmem_free(nvpage, nvpsize);
1190                                 }
1191                                 return (-2);
1192                         }
1193                 }
1194                 if (amp1 != NULL) {
1195                         namp = amp1;
1196                         anon_release(amp1->ahp, btop(amp1->size));
1197                 }
1198                 if (amp2 != NULL) {
1199                         if (namp == NULL) {
1200                                 ASSERT(amp1 == NULL);
1201                                 namp = amp2;
1202                                 anon_release(amp2->ahp, btop(amp2->size));
1203                         } else {
1204                                 amp2->refcnt--;
1205                                 ANON_LOCK_EXIT(&amp2->a_rwlock);
1206                                 anonmap_free(amp2);
1207                         }
1208                         svd2->amp = NULL; /* needed for seg_free */
1209                 }
1210                 namp->ahp = nahp;
1211                 namp->size = asize;
1212                 svd1->amp = namp;
1213                 svd1->anon_index = 0;
1214                 ANON_LOCK_EXIT(&namp->a_rwlock);
1215         }
1216         /*
1217          * Now free the old vpage structures.
1218          */
1219         if (nvpage != NULL) {
1220                 if (vpage1 != NULL) {
1221                         kmem_free(vpage1, vpgtob(npages1));
1222                 }
1223                 if (vpage2 != NULL) {
1224                         svd2->vpage = NULL;
1225                         kmem_free(vpage2, vpgtob(npages2));
1226                 }
1227                 if (svd2->pageprot) {
1228                         svd1->pageprot = 1;
1229                 }
1230                 if (svd2->pageadvice) {
1231                         svd1->pageadvice = 1;
1232                 }
1233                 if (svd2->pageswap) {
1234                         svd1->pageswap = 1;
1235                 }
1236                 svd1->vpage = nvpage;
1237         }
1238 
1239         /* all looks ok, merge segments */
1240         svd1->swresv += svd2->swresv;
1241         svd2->swresv = 0;  /* so seg_free doesn't release swap space */
1242         size = seg2->s_size;
1243         seg_free(seg2);
1244         seg1->s_size += size;
1245         return (0);
1246 }
1247 
1248 /*
1249  * Extend the previous segment (seg1) to include the
1250  * new segment (seg2 + a), if possible.
1251  * Return 0 on success.
1252  */
1253 static int
1254 segvn_extend_prev(seg1, seg2, a, swresv)
1255         struct seg *seg1, *seg2;
1256         struct segvn_crargs *a;
1257         size_t swresv;
1258 {
1259         struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1260         size_t size;
1261         struct anon_map *amp1;
1262         struct vpage *new_vpage;
1263 
1264         /*
1265          * We don't need any segment level locks for "segvn" data
1266          * since the address space is "write" locked.
1267          */
1268         ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as));
1269 
1270         if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1271                 return (-1);
1272         }
1273 
1274         /* second segment is new, try to extend first */
1275         /* XXX - should also check cred */
1276         if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1277             (!svd1->pageprot && (svd1->prot != a->prot)) ||
1278             svd1->type != a->type || svd1->flags != a->flags ||
1279             seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1280                 return (-1);
1281 
1282         /* vp == NULL implies zfod, offset doesn't matter */
1283         if (svd1->vp != NULL &&
1284             svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1285                 return (-1);
1286 
1287         if (svd1->tr_state != SEGVN_TR_OFF) {
1288                 return (-1);
1289         }
1290 
1291         amp1 = svd1->amp;
1292         if (amp1) {
1293                 pgcnt_t newpgs;
1294 
1295                 /*
1296                  * Segment has private pages, can data structures
1297                  * be expanded?
1298                  *
1299                  * Acquire the anon_map lock to prevent it from changing,
1300                  * if it is shared.  This ensures that the anon_map
1301                  * will not change while a thread which has a read/write
1302                  * lock on an address space references it.
1303                  * XXX - Don't need the anon_map lock at all if "refcnt"
1304                  * is 1.
1305                  *
1306                  * Can't grow a MAP_SHARED segment with an anonmap because
1307                  * there may be existing anon slots where we want to extend
1308                  * the segment and we wouldn't know what to do with them
1309                  * (e.g., for tmpfs right thing is to just leave them there,
1310                  * for /dev/zero they should be cleared out).
1311                  */
1312                 if (svd1->type == MAP_SHARED)
1313                         return (-1);
1314 
1315                 ANON_LOCK_ENTER(&amp1->a_rwlock, RW_WRITER);
1316                 if (amp1->refcnt > 1) {
1317                         ANON_LOCK_EXIT(&amp1->a_rwlock);
1318                         return (-1);
1319                 }
1320                 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1321                     btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1322 
1323                 if (newpgs == 0) {
1324                         ANON_LOCK_EXIT(&amp1->a_rwlock);
1325                         return (-1);
1326                 }
1327                 amp1->size = ptob(newpgs);
1328                 ANON_LOCK_EXIT(&amp1->a_rwlock);
1329         }
1330         if (svd1->vpage != NULL) {
1331                 struct vpage *vp, *evp;
1332                 new_vpage =
1333                     kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1334                         KM_NOSLEEP);
1335                 if (new_vpage == NULL)
1336                         return (-1);
1337                 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1338                 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1339                 svd1->vpage = new_vpage;
1340 
1341                 vp = new_vpage + seg_pages(seg1);
1342                 evp = vp + seg_pages(seg2);
1343                 for (; vp < evp; vp++)
1344                         VPP_SETPROT(vp, a->prot);
1345                 if (svd1->pageswap && swresv) {
1346                         ASSERT(!(svd1->flags & MAP_NORESERVE));
1347                         ASSERT(swresv == seg2->s_size);
1348                         vp = new_vpage + seg_pages(seg1);
1349                         for (; vp < evp; vp++) {
1350                                 VPP_SETSWAPRES(vp);
1351                         }
1352                 }
1353         }
1354         ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1355         size = seg2->s_size;
1356         seg_free(seg2);
1357         seg1->s_size += size;
1358         svd1->swresv += swresv;
1359         if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1360             svd1->type == MAP_SHARED && svd1->vp != NULL &&
1361             (svd1->vp->v_flag & VVMEXEC)) {
1362                 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1363                 segvn_inval_trcache(svd1->vp);
1364         }
1365         return (0);
1366 }
1367 
1368 /*
1369  * Extend the next segment (seg2) to include the
1370  * new segment (seg1 + a), if possible.
1371  * Return 0 on success.
1372  */
1373 static int
1374 segvn_extend_next(
1375         struct seg *seg1,
1376         struct seg *seg2,
1377         struct segvn_crargs *a,
1378         size_t swresv)
1379 {
1380         struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1381         size_t size;
1382         struct anon_map *amp2;
1383         struct vpage *new_vpage;
1384 
1385         /*
1386          * We don't need any segment level locks for "segvn" data
1387          * since the address space is "write" locked.
1388          */
1389         ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as));
1390 
1391         if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1392                 return (-1);
1393         }
1394 
1395         /* first segment is new, try to extend second */
1396         /* XXX - should also check cred */
1397         if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1398             (!svd2->pageprot && (svd2->prot != a->prot)) ||
1399             svd2->type != a->type || svd2->flags != a->flags ||
1400             seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1401                 return (-1);
1402         /* vp == NULL implies zfod, offset doesn't matter */
1403         if (svd2->vp != NULL &&
1404             (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1405                 return (-1);
1406 
1407         if (svd2->tr_state != SEGVN_TR_OFF) {
1408                 return (-1);
1409         }
1410 
1411         amp2 = svd2->amp;
1412         if (amp2) {
1413                 pgcnt_t newpgs;
1414 
1415                 /*
1416                  * Segment has private pages, can data structures
1417                  * be expanded?
1418                  *
1419                  * Acquire the anon_map lock to prevent it from changing,
1420                  * if it is shared.  This ensures that the anon_map
1421                  * will not change while a thread which has a read/write
1422                  * lock on an address space references it.
1423                  *
1424                  * XXX - Don't need the anon_map lock at all if "refcnt"
1425                  * is 1.
1426                  */
1427                 if (svd2->type == MAP_SHARED)
1428                         return (-1);
1429 
1430                 ANON_LOCK_ENTER(&amp2->a_rwlock, RW_WRITER);
1431                 if (amp2->refcnt > 1) {
1432                         ANON_LOCK_EXIT(&amp2->a_rwlock);
1433                         return (-1);
1434                 }
1435                 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1436                     btop(seg2->s_size), btop(seg1->s_size),
1437                     ANON_NOSLEEP | ANON_GROWDOWN);
1438 
1439                 if (newpgs == 0) {
1440                         ANON_LOCK_EXIT(&amp2->a_rwlock);
1441                         return (-1);
1442                 }
1443                 amp2->size = ptob(newpgs);
1444                 ANON_LOCK_EXIT(&amp2->a_rwlock);
1445         }
1446         if (svd2->vpage != NULL) {
1447                 struct vpage *vp, *evp;
1448                 new_vpage =
1449                     kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1450                     KM_NOSLEEP);
1451                 if (new_vpage == NULL) {
1452                         /* Not merging segments so adjust anon_index back */
1453                         if (amp2)
1454                                 svd2->anon_index += seg_pages(seg1);
1455                         return (-1);
1456                 }
1457                 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1458                     vpgtob(seg_pages(seg2)));
1459                 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1460                 svd2->vpage = new_vpage;
1461 
1462                 vp = new_vpage;
1463                 evp = vp + seg_pages(seg1);
1464                 for (; vp < evp; vp++)
1465                         VPP_SETPROT(vp, a->prot);
1466                 if (svd2->pageswap && swresv) {
1467                         ASSERT(!(svd2->flags & MAP_NORESERVE));
1468                         ASSERT(swresv == seg1->s_size);
1469                         vp = new_vpage;
1470                         for (; vp < evp; vp++) {
1471                                 VPP_SETSWAPRES(vp);
1472                         }
1473                 }
1474         }
1475         ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1476         size = seg1->s_size;
1477         seg_free(seg1);
1478         seg2->s_size += size;
1479         seg2->s_base -= size;
1480         svd2->offset -= size;
1481         svd2->swresv += swresv;
1482         if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1483             svd2->type == MAP_SHARED && svd2->vp != NULL &&
1484             (svd2->vp->v_flag & VVMEXEC)) {
1485                 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1486                 segvn_inval_trcache(svd2->vp);
1487         }
1488         return (0);
1489 }
1490 
1491 /*
1492  * Duplicate all the pages in the segment. This may break COW sharing for a
1493  * given page. If the page is marked with inherit zero set, then instead of
1494  * duplicating the page, we zero the page.
1495  */
1496 static int
1497 segvn_dup_pages(struct seg *seg, struct seg *newseg)
1498 {
1499         int error;
1500         uint_t prot;
1501         page_t *pp;
1502         struct anon *ap, *newap;
1503         size_t i;
1504         caddr_t addr;
1505 
1506         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1507         struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data;
1508         ulong_t old_idx = svd->anon_index;
1509         ulong_t new_idx = 0;
1510 
1511         i = btopr(seg->s_size);
1512         addr = seg->s_base;
1513 
1514         /*
1515          * XXX break cow sharing using PAGESIZE
1516          * pages. They will be relocated into larger
1517          * pages at fault time.
1518          */
1519         while (i-- > 0) {
1520                 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1521                         struct vpage *vpp;
1522 
1523                         vpp = &svd->vpage[seg_page(seg, addr)];
1524 
1525                         /*
1526                          * prot need not be computed below 'cause anon_private
1527                          * is going to ignore it anyway as child doesn't inherit
1528                          * pagelock from parent.
1529                          */
1530                         prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1531 
1532                         /*
1533                          * Check whether we should zero this or dup it.
1534                          */
1535                         if (svd->svn_inz == SEGVN_INZ_ALL ||
1536                             (svd->svn_inz == SEGVN_INZ_VPP &&
1537                             VPP_ISINHZERO(vpp))) {
1538                                 pp = anon_zero(newseg, addr, &newap,
1539                                     newsvd->cred);
1540                         } else {
1541                                 page_t *anon_pl[1+1];
1542                                 uint_t vpprot;
1543                                 error = anon_getpage(&ap, &vpprot, anon_pl,
1544                                     PAGESIZE, seg, addr, S_READ, svd->cred);
1545                                 if (error != 0)
1546                                         return (error);
1547 
1548                                 pp = anon_private(&newap, newseg, addr, prot,
1549                                     anon_pl[0], 0, newsvd->cred);
1550                         }
1551                         if (pp == NULL) {
1552                                 return (ENOMEM);
1553                         }
1554                         (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap,
1555                             ANON_SLEEP);
1556                         page_unlock(pp);
1557                 }
1558                 addr += PAGESIZE;
1559                 old_idx++;
1560                 new_idx++;
1561         }
1562 
1563         return (0);
1564 }
1565 
1566 static int
1567 segvn_dup(struct seg *seg, struct seg *newseg)
1568 {
1569         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1570         struct segvn_data *newsvd;
1571         pgcnt_t npages = seg_pages(seg);
1572         int error = 0;
1573         size_t len;
1574         struct anon_map *amp;
1575 
1576         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1577         ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1578 
1579         /*
1580          * If segment has anon reserved, reserve more for the new seg.
1581          * For a MAP_NORESERVE segment swresv will be a count of all the
1582          * allocated anon slots; thus we reserve for the child as many slots
1583          * as the parent has allocated. This semantic prevents the child or
1584          * parent from dieing during a copy-on-write fault caused by trying
1585          * to write a shared pre-existing anon page.
1586          */
1587         if ((len = svd->swresv) != 0) {
1588                 if (anon_resv(svd->swresv) == 0)
1589                         return (ENOMEM);
1590 
1591                 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1592                     seg, len, 0);
1593         }
1594 
1595         newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1596 
1597         newseg->s_ops = &segvn_ops;
1598         newseg->s_data = (void *)newsvd;
1599         newseg->s_szc = seg->s_szc;
1600 
1601         newsvd->seg = newseg;
1602         if ((newsvd->vp = svd->vp) != NULL) {
1603                 VN_HOLD(svd->vp);
1604                 if (svd->type == MAP_SHARED)
1605                         lgrp_shm_policy_init(NULL, svd->vp);
1606         }
1607         newsvd->offset = svd->offset;
1608         newsvd->prot = svd->prot;
1609         newsvd->maxprot = svd->maxprot;
1610         newsvd->pageprot = svd->pageprot;
1611         newsvd->type = svd->type;
1612         newsvd->cred = svd->cred;
1613         crhold(newsvd->cred);
1614         newsvd->advice = svd->advice;
1615         newsvd->pageadvice = svd->pageadvice;
1616         newsvd->svn_inz = svd->svn_inz;
1617         newsvd->swresv = svd->swresv;
1618         newsvd->pageswap = svd->pageswap;
1619         newsvd->flags = svd->flags;
1620         newsvd->softlockcnt = 0;
1621         newsvd->softlockcnt_sbase = 0;
1622         newsvd->softlockcnt_send = 0;
1623         newsvd->policy_info = svd->policy_info;
1624         newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1625 
1626         if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1627                 /*
1628                  * Not attaching to a shared anon object.
1629                  */
1630                 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1631                     svd->tr_state == SEGVN_TR_OFF);
1632                 if (svd->tr_state == SEGVN_TR_ON) {
1633                         ASSERT(newsvd->vp != NULL && amp != NULL);
1634                         newsvd->tr_state = SEGVN_TR_INIT;
1635                 } else {
1636                         newsvd->tr_state = svd->tr_state;
1637                 }
1638                 newsvd->amp = NULL;
1639                 newsvd->anon_index = 0;
1640         } else {
1641                 /* regions for now are only used on pure vnode segments */
1642                 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1643                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1644                 newsvd->tr_state = SEGVN_TR_OFF;
1645                 if (svd->type == MAP_SHARED) {
1646                         ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1647                         newsvd->amp = amp;
1648                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1649                         amp->refcnt++;
1650                         ANON_LOCK_EXIT(&amp->a_rwlock);
1651                         newsvd->anon_index = svd->anon_index;
1652                 } else {
1653                         int reclaim = 1;
1654 
1655                         /*
1656                          * Allocate and initialize new anon_map structure.
1657                          */
1658                         newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1659                             ANON_SLEEP);
1660                         newsvd->amp->a_szc = newseg->s_szc;
1661                         newsvd->anon_index = 0;
1662                         ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1663                             svd->svn_inz == SEGVN_INZ_ALL ||
1664                             svd->svn_inz == SEGVN_INZ_VPP);
1665 
1666                         /*
1667                          * We don't have to acquire the anon_map lock
1668                          * for the new segment (since it belongs to an
1669                          * address space that is still not associated
1670                          * with any process), or the segment in the old
1671                          * address space (since all threads in it
1672                          * are stopped while duplicating the address space).
1673                          */
1674 
1675                         /*
1676                          * The goal of the following code is to make sure that
1677                          * softlocked pages do not end up as copy on write
1678                          * pages.  This would cause problems where one
1679                          * thread writes to a page that is COW and a different
1680                          * thread in the same process has softlocked it.  The
1681                          * softlock lock would move away from this process
1682                          * because the write would cause this process to get
1683                          * a copy (without the softlock).
1684                          *
1685                          * The strategy here is to just break the
1686                          * sharing on pages that could possibly be
1687                          * softlocked.
1688                          *
1689                          * In addition, if any pages have been marked that they
1690                          * should be inherited as zero, then we immediately go
1691                          * ahead and break COW and zero them. In the case of a
1692                          * softlocked page that should be inherited zero, we
1693                          * break COW and just get a zero page.
1694                          */
1695 retry:
1696                         if (svd->softlockcnt ||
1697                             svd->svn_inz != SEGVN_INZ_NONE) {
1698                                 /*
1699                                  * The softlock count might be non zero
1700                                  * because some pages are still stuck in the
1701                                  * cache for lazy reclaim. Flush the cache
1702                                  * now. This should drop the count to zero.
1703                                  * [or there is really I/O going on to these
1704                                  * pages]. Note, we have the writers lock so
1705                                  * nothing gets inserted during the flush.
1706                                  */
1707                                 if (svd->softlockcnt && reclaim == 1) {
1708                                         segvn_purge(seg);
1709                                         reclaim = 0;
1710                                         goto retry;
1711                                 }
1712 
1713                                 error = segvn_dup_pages(seg, newseg);
1714                                 if (error != 0) {
1715                                         newsvd->vpage = NULL;
1716                                         goto out;
1717                                 }
1718                         } else {        /* common case */
1719                                 if (seg->s_szc != 0) {
1720                                         /*
1721                                          * If at least one of anon slots of a
1722                                          * large page exists then make sure
1723                                          * all anon slots of a large page
1724                                          * exist to avoid partial cow sharing
1725                                          * of a large page in the future.
1726                                          */
1727                                         anon_dup_fill_holes(amp->ahp,
1728                                             svd->anon_index, newsvd->amp->ahp,
1729                                             0, seg->s_size, seg->s_szc,
1730                                             svd->vp != NULL);
1731                                 } else {
1732                                         anon_dup(amp->ahp, svd->anon_index,
1733                                             newsvd->amp->ahp, 0, seg->s_size);
1734                                 }
1735 
1736                                 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1737                                     seg->s_size, PROT_WRITE);
1738                         }
1739                 }
1740         }
1741         /*
1742          * If necessary, create a vpage structure for the new segment.
1743          * Do not copy any page lock indications.
1744          */
1745         if (svd->vpage != NULL) {
1746                 uint_t i;
1747                 struct vpage *ovp = svd->vpage;
1748                 struct vpage *nvp;
1749 
1750                 nvp = newsvd->vpage =
1751                     kmem_alloc(vpgtob(npages), KM_SLEEP);
1752                 for (i = 0; i < npages; i++) {
1753                         *nvp = *ovp++;
1754                         VPP_CLRPPLOCK(nvp++);
1755                 }
1756         } else
1757                 newsvd->vpage = NULL;
1758 
1759         /* Inform the vnode of the new mapping */
1760         if (newsvd->vp != NULL) {
1761                 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1762                     newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1763                     newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1764         }
1765 out:
1766         if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1767                 ASSERT(newsvd->amp == NULL);
1768                 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1769                 newsvd->rcookie = svd->rcookie;
1770                 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1771         }
1772         return (error);
1773 }
1774 
1775 
1776 /*
1777  * callback function to invoke free_vp_pages() for only those pages actually
1778  * processed by the HAT when a shared region is destroyed.
1779  */
1780 extern int free_pages;
1781 
1782 static void
1783 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1784     size_t r_size, void *r_obj, u_offset_t r_objoff)
1785 {
1786         u_offset_t off;
1787         size_t len;
1788         vnode_t *vp = (vnode_t *)r_obj;
1789 
1790         ASSERT(eaddr > saddr);
1791         ASSERT(saddr >= r_saddr);
1792         ASSERT(saddr < r_saddr + r_size);
1793         ASSERT(eaddr > r_saddr);
1794         ASSERT(eaddr <= r_saddr + r_size);
1795         ASSERT(vp != NULL);
1796 
1797         if (!free_pages) {
1798                 return;
1799         }
1800 
1801         len = eaddr - saddr;
1802         off = (saddr - r_saddr) + r_objoff;
1803         free_vp_pages(vp, off, len);
1804 }
1805 
1806 /*
1807  * callback function used by segvn_unmap to invoke free_vp_pages() for only
1808  * those pages actually processed by the HAT
1809  */
1810 static void
1811 segvn_hat_unload_callback(hat_callback_t *cb)
1812 {
1813         struct seg              *seg = cb->hcb_data;
1814         struct segvn_data       *svd = (struct segvn_data *)seg->s_data;
1815         size_t                  len;
1816         u_offset_t              off;
1817 
1818         ASSERT(svd->vp != NULL);
1819         ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1820         ASSERT(cb->hcb_start_addr >= seg->s_base);
1821 
1822         len = cb->hcb_end_addr - cb->hcb_start_addr;
1823         off = cb->hcb_start_addr - seg->s_base;
1824         free_vp_pages(svd->vp, svd->offset + off, len);
1825 }
1826 
1827 /*
1828  * This function determines the number of bytes of swap reserved by
1829  * a segment for which per-page accounting is present. It is used to
1830  * calculate the correct value of a segvn_data's swresv.
1831  */
1832 static size_t
1833 segvn_count_swap_by_vpages(struct seg *seg)
1834 {
1835         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1836         struct vpage *vp, *evp;
1837         size_t nswappages = 0;
1838 
1839         ASSERT(svd->pageswap);
1840         ASSERT(svd->vpage != NULL);
1841 
1842         evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1843 
1844         for (vp = svd->vpage; vp < evp; vp++) {
1845                 if (VPP_ISSWAPRES(vp))
1846                         nswappages++;
1847         }
1848 
1849         return (nswappages << PAGESHIFT);
1850 }
1851 
1852 static int
1853 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1854 {
1855         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1856         struct segvn_data *nsvd;
1857         struct seg *nseg;
1858         struct anon_map *amp;
1859         pgcnt_t opages;         /* old segment size in pages */
1860         pgcnt_t npages;         /* new segment size in pages */
1861         pgcnt_t dpages;         /* pages being deleted (unmapped) */
1862         hat_callback_t callback;        /* used for free_vp_pages() */
1863         hat_callback_t *cbp = NULL;
1864         caddr_t nbase;
1865         size_t nsize;
1866         size_t oswresv;
1867         int reclaim = 1;
1868 
1869         /*
1870          * We don't need any segment level locks for "segvn" data
1871          * since the address space is "write" locked.
1872          */
1873         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1874 
1875         /*
1876          * Fail the unmap if pages are SOFTLOCKed through this mapping.
1877          * softlockcnt is protected from change by the as write lock.
1878          */
1879 retry:
1880         if (svd->softlockcnt > 0) {
1881                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1882 
1883                 /*
1884                  * If this is shared segment non 0 softlockcnt
1885                  * means locked pages are still in use.
1886                  */
1887                 if (svd->type == MAP_SHARED) {
1888                         return (EAGAIN);
1889                 }
1890 
1891                 /*
1892                  * since we do have the writers lock nobody can fill
1893                  * the cache during the purge. The flush either succeeds
1894                  * or we still have pending I/Os.
1895                  */
1896                 if (reclaim == 1) {
1897                         segvn_purge(seg);
1898                         reclaim = 0;
1899                         goto retry;
1900                 }
1901                 return (EAGAIN);
1902         }
1903 
1904         /*
1905          * Check for bad sizes
1906          */
1907         if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1908             (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1909                 panic("segvn_unmap");
1910                 /*NOTREACHED*/
1911         }
1912 
1913         if (seg->s_szc != 0) {
1914                 size_t pgsz = page_get_pagesize(seg->s_szc);
1915                 int err;
1916                 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1917                         ASSERT(seg->s_base != addr || seg->s_size != len);
1918                         if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1919                                 ASSERT(svd->amp == NULL);
1920                                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1921                                 hat_leave_region(seg->s_as->a_hat,
1922                                     svd->rcookie, HAT_REGION_TEXT);
1923                                 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1924                                 /*
1925                                  * could pass a flag to segvn_demote_range()
1926                                  * below to tell it not to do any unloads but
1927                                  * this case is rare enough to not bother for
1928                                  * now.
1929                                  */
1930                         } else if (svd->tr_state == SEGVN_TR_INIT) {
1931                                 svd->tr_state = SEGVN_TR_OFF;
1932                         } else if (svd->tr_state == SEGVN_TR_ON) {
1933                                 ASSERT(svd->amp != NULL);
1934                                 segvn_textunrepl(seg, 1);
1935                                 ASSERT(svd->amp == NULL);
1936                                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1937                         }
1938                         VM_STAT_ADD(segvnvmstats.demoterange[0]);
1939                         err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1940                         if (err == 0) {
1941                                 return (IE_RETRY);
1942                         }
1943                         return (err);
1944                 }
1945         }
1946 
1947         /* Inform the vnode of the unmapping. */
1948         if (svd->vp) {
1949                 int error;
1950 
1951                 error = VOP_DELMAP(svd->vp,
1952                     (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1953                     seg->s_as, addr, len, svd->prot, svd->maxprot,
1954                     svd->type, svd->cred, NULL);
1955 
1956                 if (error == EAGAIN)
1957                         return (error);
1958         }
1959 
1960         /*
1961          * Remove any page locks set through this mapping.
1962          * If text replication is not off no page locks could have been
1963          * established via this mapping.
1964          */
1965         if (svd->tr_state == SEGVN_TR_OFF) {
1966                 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1967         }
1968 
1969         if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1970                 ASSERT(svd->amp == NULL);
1971                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1972                 ASSERT(svd->type == MAP_PRIVATE);
1973                 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1974                     HAT_REGION_TEXT);
1975                 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1976         } else if (svd->tr_state == SEGVN_TR_ON) {
1977                 ASSERT(svd->amp != NULL);
1978                 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1979                 segvn_textunrepl(seg, 1);
1980                 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1981         } else {
1982                 if (svd->tr_state != SEGVN_TR_OFF) {
1983                         ASSERT(svd->tr_state == SEGVN_TR_INIT);
1984                         svd->tr_state = SEGVN_TR_OFF;
1985                 }
1986                 /*
1987                  * Unload any hardware translations in the range to be taken
1988                  * out. Use a callback to invoke free_vp_pages() effectively.
1989                  */
1990                 if (svd->vp != NULL && free_pages != 0) {
1991                         callback.hcb_data = seg;
1992                         callback.hcb_function = segvn_hat_unload_callback;
1993                         cbp = &callback;
1994                 }
1995                 hat_unload_callback(seg->s_as->a_hat, addr, len,
1996                     HAT_UNLOAD_UNMAP, cbp);
1997 
1998                 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1999                     (svd->vp->v_flag & VVMEXEC) &&
2000                     ((svd->prot & PROT_WRITE) || svd->pageprot)) {
2001                         segvn_inval_trcache(svd->vp);
2002                 }
2003         }
2004 
2005         /*
2006          * Check for entire segment
2007          */
2008         if (addr == seg->s_base && len == seg->s_size) {
2009                 seg_free(seg);
2010                 return (0);
2011         }
2012 
2013         opages = seg_pages(seg);
2014         dpages = btop(len);
2015         npages = opages - dpages;
2016         amp = svd->amp;
2017         ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
2018 
2019         /*
2020          * Check for beginning of segment
2021          */
2022         if (addr == seg->s_base) {
2023                 if (svd->vpage != NULL) {
2024                         size_t nbytes;
2025                         struct vpage *ovpage;
2026 
2027                         ovpage = svd->vpage; /* keep pointer to vpage */
2028 
2029                         nbytes = vpgtob(npages);
2030                         svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2031                         bcopy(&ovpage[dpages], svd->vpage, nbytes);
2032 
2033                         /* free up old vpage */
2034                         kmem_free(ovpage, vpgtob(opages));
2035                 }
2036                 if (amp != NULL) {
2037                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2038                         if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2039                                 /*
2040                                  * Shared anon map is no longer in use. Before
2041                                  * freeing its pages purge all entries from
2042                                  * pcache that belong to this amp.
2043                                  */
2044                                 if (svd->type == MAP_SHARED) {
2045                                         ASSERT(amp->refcnt == 1);
2046                                         ASSERT(svd->softlockcnt == 0);
2047                                         anonmap_purge(amp);
2048                                 }
2049                                 /*
2050                                  * Free up now unused parts of anon_map array.
2051                                  */
2052                                 if (amp->a_szc == seg->s_szc) {
2053                                         if (seg->s_szc != 0) {
2054                                                 anon_free_pages(amp->ahp,
2055                                                     svd->anon_index, len,
2056                                                     seg->s_szc);
2057                                         } else {
2058                                                 anon_free(amp->ahp,
2059                                                     svd->anon_index,
2060                                                     len);
2061                                         }
2062                                 } else {
2063                                         ASSERT(svd->type == MAP_SHARED);
2064                                         ASSERT(amp->a_szc > seg->s_szc);
2065                                         anon_shmap_free_pages(amp,
2066                                             svd->anon_index, len);
2067                                 }
2068 
2069                                 /*
2070                                  * Unreserve swap space for the
2071                                  * unmapped chunk of this segment in
2072                                  * case it's MAP_SHARED
2073                                  */
2074                                 if (svd->type == MAP_SHARED) {
2075                                         anon_unresv_zone(len,
2076                                             seg->s_as->a_proc->p_zone);
2077                                         amp->swresv -= len;
2078                                 }
2079                         }
2080                         ANON_LOCK_EXIT(&amp->a_rwlock);
2081                         svd->anon_index += dpages;
2082                 }
2083                 if (svd->vp != NULL)
2084                         svd->offset += len;
2085 
2086                 seg->s_base += len;
2087                 seg->s_size -= len;
2088 
2089                 if (svd->swresv) {
2090                         if (svd->flags & MAP_NORESERVE) {
2091                                 ASSERT(amp);
2092                                 oswresv = svd->swresv;
2093 
2094                                 svd->swresv = ptob(anon_pages(amp->ahp,
2095                                     svd->anon_index, npages));
2096                                 anon_unresv_zone(oswresv - svd->swresv,
2097                                     seg->s_as->a_proc->p_zone);
2098                                 if (SEG_IS_PARTIAL_RESV(seg))
2099                                         seg->s_as->a_resvsize -= oswresv -
2100                                             svd->swresv;
2101                         } else {
2102                                 size_t unlen;
2103 
2104                                 if (svd->pageswap) {
2105                                         oswresv = svd->swresv;
2106                                         svd->swresv =
2107                                             segvn_count_swap_by_vpages(seg);
2108                                         ASSERT(oswresv >= svd->swresv);
2109                                         unlen = oswresv - svd->swresv;
2110                                 } else {
2111                                         svd->swresv -= len;
2112                                         ASSERT(svd->swresv == seg->s_size);
2113                                         unlen = len;
2114                                 }
2115                                 anon_unresv_zone(unlen,
2116                                     seg->s_as->a_proc->p_zone);
2117                         }
2118                         TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2119                             seg, len, 0);
2120                 }
2121 
2122                 return (0);
2123         }
2124 
2125         /*
2126          * Check for end of segment
2127          */
2128         if (addr + len == seg->s_base + seg->s_size) {
2129                 if (svd->vpage != NULL) {
2130                         size_t nbytes;
2131                         struct vpage *ovpage;
2132 
2133                         ovpage = svd->vpage; /* keep pointer to vpage */
2134 
2135                         nbytes = vpgtob(npages);
2136                         svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2137                         bcopy(ovpage, svd->vpage, nbytes);
2138 
2139                         /* free up old vpage */
2140                         kmem_free(ovpage, vpgtob(opages));
2141 
2142                 }
2143                 if (amp != NULL) {
2144                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2145                         if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2146                                 /*
2147                                  * Free up now unused parts of anon_map array.
2148                                  */
2149                                 ulong_t an_idx = svd->anon_index + npages;
2150 
2151                                 /*
2152                                  * Shared anon map is no longer in use. Before
2153                                  * freeing its pages purge all entries from
2154                                  * pcache that belong to this amp.
2155                                  */
2156                                 if (svd->type == MAP_SHARED) {
2157                                         ASSERT(amp->refcnt == 1);
2158                                         ASSERT(svd->softlockcnt == 0);
2159                                         anonmap_purge(amp);
2160                                 }
2161 
2162                                 if (amp->a_szc == seg->s_szc) {
2163                                         if (seg->s_szc != 0) {
2164                                                 anon_free_pages(amp->ahp,
2165                                                     an_idx, len,
2166                                                     seg->s_szc);
2167                                         } else {
2168                                                 anon_free(amp->ahp, an_idx,
2169                                                     len);
2170                                         }
2171                                 } else {
2172                                         ASSERT(svd->type == MAP_SHARED);
2173                                         ASSERT(amp->a_szc > seg->s_szc);
2174                                         anon_shmap_free_pages(amp,
2175                                             an_idx, len);
2176                                 }
2177 
2178                                 /*
2179                                  * Unreserve swap space for the
2180                                  * unmapped chunk of this segment in
2181                                  * case it's MAP_SHARED
2182                                  */
2183                                 if (svd->type == MAP_SHARED) {
2184                                         anon_unresv_zone(len,
2185                                             seg->s_as->a_proc->p_zone);
2186                                         amp->swresv -= len;
2187                                 }
2188                         }
2189                         ANON_LOCK_EXIT(&amp->a_rwlock);
2190                 }
2191 
2192                 seg->s_size -= len;
2193 
2194                 if (svd->swresv) {
2195                         if (svd->flags & MAP_NORESERVE) {
2196                                 ASSERT(amp);
2197                                 oswresv = svd->swresv;
2198                                 svd->swresv = ptob(anon_pages(amp->ahp,
2199                                     svd->anon_index, npages));
2200                                 anon_unresv_zone(oswresv - svd->swresv,
2201                                     seg->s_as->a_proc->p_zone);
2202                                 if (SEG_IS_PARTIAL_RESV(seg))
2203                                         seg->s_as->a_resvsize -= oswresv -
2204                                             svd->swresv;
2205                         } else {
2206                                 size_t unlen;
2207 
2208                                 if (svd->pageswap) {
2209                                         oswresv = svd->swresv;
2210                                         svd->swresv =
2211                                             segvn_count_swap_by_vpages(seg);
2212                                         ASSERT(oswresv >= svd->swresv);
2213                                         unlen = oswresv - svd->swresv;
2214                                 } else {
2215                                         svd->swresv -= len;
2216                                         ASSERT(svd->swresv == seg->s_size);
2217                                         unlen = len;
2218                                 }
2219                                 anon_unresv_zone(unlen,
2220                                     seg->s_as->a_proc->p_zone);
2221                         }
2222                         TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2223                             "anon proc:%p %lu %u", seg, len, 0);
2224                 }
2225 
2226                 return (0);
2227         }
2228 
2229         /*
2230          * The section to go is in the middle of the segment,
2231          * have to make it into two segments.  nseg is made for
2232          * the high end while seg is cut down at the low end.
2233          */
2234         nbase = addr + len;                             /* new seg base */
2235         nsize = (seg->s_base + seg->s_size) - nbase;      /* new seg size */
2236         seg->s_size = addr - seg->s_base;         /* shrink old seg */
2237         nseg = seg_alloc(seg->s_as, nbase, nsize);
2238         if (nseg == NULL) {
2239                 panic("segvn_unmap seg_alloc");
2240                 /*NOTREACHED*/
2241         }
2242         nseg->s_ops = seg->s_ops;
2243         nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2244         nseg->s_data = (void *)nsvd;
2245         nseg->s_szc = seg->s_szc;
2246         *nsvd = *svd;
2247         nsvd->seg = nseg;
2248         nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2249         nsvd->swresv = 0;
2250         nsvd->softlockcnt = 0;
2251         nsvd->softlockcnt_sbase = 0;
2252         nsvd->softlockcnt_send = 0;
2253         nsvd->svn_inz = svd->svn_inz;
2254         ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2255 
2256         if (svd->vp != NULL) {
2257                 VN_HOLD(nsvd->vp);
2258                 if (nsvd->type == MAP_SHARED)
2259                         lgrp_shm_policy_init(NULL, nsvd->vp);
2260         }
2261         crhold(svd->cred);
2262 
2263         if (svd->vpage == NULL) {
2264                 nsvd->vpage = NULL;
2265         } else {
2266                 /* need to split vpage into two arrays */
2267                 size_t nbytes;
2268                 struct vpage *ovpage;
2269 
2270                 ovpage = svd->vpage;         /* keep pointer to vpage */
2271 
2272                 npages = seg_pages(seg);        /* seg has shrunk */
2273                 nbytes = vpgtob(npages);
2274                 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2275 
2276                 bcopy(ovpage, svd->vpage, nbytes);
2277 
2278                 npages = seg_pages(nseg);
2279                 nbytes = vpgtob(npages);
2280                 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2281 
2282                 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2283 
2284                 /* free up old vpage */
2285                 kmem_free(ovpage, vpgtob(opages));
2286         }
2287 
2288         if (amp == NULL) {
2289                 nsvd->amp = NULL;
2290                 nsvd->anon_index = 0;
2291         } else {
2292                 /*
2293                  * Need to create a new anon map for the new segment.
2294                  * We'll also allocate a new smaller array for the old
2295                  * smaller segment to save space.
2296                  */
2297                 opages = btop((uintptr_t)(addr - seg->s_base));
2298                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2299                 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2300                         /*
2301                          * Free up now unused parts of anon_map array.
2302                          */
2303                         ulong_t an_idx = svd->anon_index + opages;
2304 
2305                         /*
2306                          * Shared anon map is no longer in use. Before
2307                          * freeing its pages purge all entries from
2308                          * pcache that belong to this amp.
2309                          */
2310                         if (svd->type == MAP_SHARED) {
2311                                 ASSERT(amp->refcnt == 1);
2312                                 ASSERT(svd->softlockcnt == 0);
2313                                 anonmap_purge(amp);
2314                         }
2315 
2316                         if (amp->a_szc == seg->s_szc) {
2317                                 if (seg->s_szc != 0) {
2318                                         anon_free_pages(amp->ahp, an_idx, len,
2319                                             seg->s_szc);
2320                                 } else {
2321                                         anon_free(amp->ahp, an_idx,
2322                                             len);
2323                                 }
2324                         } else {
2325                                 ASSERT(svd->type == MAP_SHARED);
2326                                 ASSERT(amp->a_szc > seg->s_szc);
2327                                 anon_shmap_free_pages(amp, an_idx, len);
2328                         }
2329 
2330                         /*
2331                          * Unreserve swap space for the
2332                          * unmapped chunk of this segment in
2333                          * case it's MAP_SHARED
2334                          */
2335                         if (svd->type == MAP_SHARED) {
2336                                 anon_unresv_zone(len,
2337                                     seg->s_as->a_proc->p_zone);
2338                                 amp->swresv -= len;
2339                         }
2340                 }
2341                 nsvd->anon_index = svd->anon_index +
2342                     btop((uintptr_t)(nseg->s_base - seg->s_base));
2343                 if (svd->type == MAP_SHARED) {
2344                         amp->refcnt++;
2345                         nsvd->amp = amp;
2346                 } else {
2347                         struct anon_map *namp;
2348                         struct anon_hdr *nahp;
2349 
2350                         ASSERT(svd->type == MAP_PRIVATE);
2351                         nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2352                         namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2353                         namp->a_szc = seg->s_szc;
2354                         (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2355                             0, btop(seg->s_size), ANON_SLEEP);
2356                         (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2357                             namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2358                         anon_release(amp->ahp, btop(amp->size));
2359                         svd->anon_index = 0;
2360                         nsvd->anon_index = 0;
2361                         amp->ahp = nahp;
2362                         amp->size = seg->s_size;
2363                         nsvd->amp = namp;
2364                 }
2365                 ANON_LOCK_EXIT(&amp->a_rwlock);
2366         }
2367         if (svd->swresv) {
2368                 if (svd->flags & MAP_NORESERVE) {
2369                         ASSERT(amp);
2370                         oswresv = svd->swresv;
2371                         svd->swresv = ptob(anon_pages(amp->ahp,
2372                             svd->anon_index, btop(seg->s_size)));
2373                         nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2374                             nsvd->anon_index, btop(nseg->s_size)));
2375                         ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2376                         anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2377                             seg->s_as->a_proc->p_zone);
2378                         if (SEG_IS_PARTIAL_RESV(seg))
2379                                 seg->s_as->a_resvsize -= oswresv -
2380                                     (svd->swresv + nsvd->swresv);
2381                 } else {
2382                         size_t unlen;
2383 
2384                         if (svd->pageswap) {
2385                                 oswresv = svd->swresv;
2386                                 svd->swresv = segvn_count_swap_by_vpages(seg);
2387                                 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2388                                 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2389                                 unlen = oswresv - (svd->swresv + nsvd->swresv);
2390                         } else {
2391                                 if (seg->s_size + nseg->s_size + len !=
2392                                     svd->swresv) {
2393                                         panic("segvn_unmap: cannot split "
2394                                             "swap reservation");
2395                                         /*NOTREACHED*/
2396                                 }
2397                                 svd->swresv = seg->s_size;
2398                                 nsvd->swresv = nseg->s_size;
2399                                 unlen = len;
2400                         }
2401                         anon_unresv_zone(unlen,
2402                             seg->s_as->a_proc->p_zone);
2403                 }
2404                 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2405                     seg, len, 0);
2406         }
2407 
2408         return (0);                     /* I'm glad that's all over with! */
2409 }
2410 
2411 static void
2412 segvn_free(struct seg *seg)
2413 {
2414         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2415         pgcnt_t npages = seg_pages(seg);
2416         struct anon_map *amp;
2417         size_t len;
2418 
2419         /*
2420          * We don't need any segment level locks for "segvn" data
2421          * since the address space is "write" locked.
2422          */
2423         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2424         ASSERT(svd->tr_state == SEGVN_TR_OFF);
2425 
2426         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2427 
2428         /*
2429          * Be sure to unlock pages. XXX Why do things get free'ed instead
2430          * of unmapped? XXX
2431          */
2432         (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2433             0, MC_UNLOCK, NULL, 0);
2434 
2435         /*
2436          * Deallocate the vpage and anon pointers if necessary and possible.
2437          */
2438         if (svd->vpage != NULL) {
2439                 kmem_free(svd->vpage, vpgtob(npages));
2440                 svd->vpage = NULL;
2441         }
2442         if ((amp = svd->amp) != NULL) {
2443                 /*
2444                  * If there are no more references to this anon_map
2445                  * structure, then deallocate the structure after freeing
2446                  * up all the anon slot pointers that we can.
2447                  */
2448                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2449                 ASSERT(amp->a_szc >= seg->s_szc);
2450                 if (--amp->refcnt == 0) {
2451                         if (svd->type == MAP_PRIVATE) {
2452                                 /*
2453                                  * Private - we only need to anon_free
2454                                  * the part that this segment refers to.
2455                                  */
2456                                 if (seg->s_szc != 0) {
2457                                         anon_free_pages(amp->ahp,
2458                                             svd->anon_index, seg->s_size,
2459                                             seg->s_szc);
2460                                 } else {
2461                                         anon_free(amp->ahp, svd->anon_index,
2462                                             seg->s_size);
2463                                 }
2464                         } else {
2465 
2466                                 /*
2467                                  * Shared anon map is no longer in use. Before
2468                                  * freeing its pages purge all entries from
2469                                  * pcache that belong to this amp.
2470                                  */
2471                                 ASSERT(svd->softlockcnt == 0);
2472                                 anonmap_purge(amp);
2473 
2474                                 /*
2475                                  * Shared - anon_free the entire
2476                                  * anon_map's worth of stuff and
2477                                  * release any swap reservation.
2478                                  */
2479                                 if (amp->a_szc != 0) {
2480                                         anon_shmap_free_pages(amp, 0,
2481                                             amp->size);
2482                                 } else {
2483                                         anon_free(amp->ahp, 0, amp->size);
2484                                 }
2485                                 if ((len = amp->swresv) != 0) {
2486                                         anon_unresv_zone(len,
2487                                             seg->s_as->a_proc->p_zone);
2488                                         TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2489                                             "anon proc:%p %lu %u", seg, len, 0);
2490                                 }
2491                         }
2492                         svd->amp = NULL;
2493                         ANON_LOCK_EXIT(&amp->a_rwlock);
2494                         anonmap_free(amp);
2495                 } else if (svd->type == MAP_PRIVATE) {
2496                         /*
2497                          * We had a private mapping which still has
2498                          * a held anon_map so just free up all the
2499                          * anon slot pointers that we were using.
2500                          */
2501                         if (seg->s_szc != 0) {
2502                                 anon_free_pages(amp->ahp, svd->anon_index,
2503                                     seg->s_size, seg->s_szc);
2504                         } else {
2505                                 anon_free(amp->ahp, svd->anon_index,
2506                                     seg->s_size);
2507                         }
2508                         ANON_LOCK_EXIT(&amp->a_rwlock);
2509                 } else {
2510                         ANON_LOCK_EXIT(&amp->a_rwlock);
2511                 }
2512         }
2513 
2514         /*
2515          * Release swap reservation.
2516          */
2517         if ((len = svd->swresv) != 0) {
2518                 anon_unresv_zone(svd->swresv,
2519                     seg->s_as->a_proc->p_zone);
2520                 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2521                     seg, len, 0);
2522                 if (SEG_IS_PARTIAL_RESV(seg))
2523                         seg->s_as->a_resvsize -= svd->swresv;
2524                 svd->swresv = 0;
2525         }
2526         /*
2527          * Release claim on vnode, credentials, and finally free the
2528          * private data.
2529          */
2530         if (svd->vp != NULL) {
2531                 if (svd->type == MAP_SHARED)
2532                         lgrp_shm_policy_fini(NULL, svd->vp);
2533                 VN_RELE(svd->vp);
2534                 svd->vp = NULL;
2535         }
2536         crfree(svd->cred);
2537         svd->pageprot = 0;
2538         svd->pageadvice = 0;
2539         svd->pageswap = 0;
2540         svd->cred = NULL;
2541 
2542         /*
2543          * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2544          * still working with this segment without holding as lock (in case
2545          * it's called by pcache async thread).
2546          */
2547         ASSERT(svd->softlockcnt == 0);
2548         mutex_enter(&svd->segfree_syncmtx);
2549         mutex_exit(&svd->segfree_syncmtx);
2550 
2551         seg->s_data = NULL;
2552         kmem_cache_free(segvn_cache, svd);
2553 }
2554 
2555 /*
2556  * Do a F_SOFTUNLOCK call over the range requested.  The range must have
2557  * already been F_SOFTLOCK'ed.
2558  * Caller must always match addr and len of a softunlock with a previous
2559  * softlock with exactly the same addr and len.
2560  */
2561 static void
2562 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2563 {
2564         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2565         page_t *pp;
2566         caddr_t adr;
2567         struct vnode *vp;
2568         u_offset_t offset;
2569         ulong_t anon_index;
2570         struct anon_map *amp;
2571         struct anon *ap = NULL;
2572 
2573         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2574         ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2575 
2576         if ((amp = svd->amp) != NULL)
2577                 anon_index = svd->anon_index + seg_page(seg, addr);
2578 
2579         if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2580                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2581                 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2582         } else {
2583                 hat_unlock(seg->s_as->a_hat, addr, len);
2584         }
2585         for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2586                 if (amp != NULL) {
2587                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2588                         if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2589                             != NULL) {
2590                                 swap_xlate(ap, &vp, &offset);
2591                         } else {
2592                                 vp = svd->vp;
2593                                 offset = svd->offset +
2594                                     (uintptr_t)(adr - seg->s_base);
2595                         }
2596                         ANON_LOCK_EXIT(&amp->a_rwlock);
2597                 } else {
2598                         vp = svd->vp;
2599                         offset = svd->offset +
2600                             (uintptr_t)(adr - seg->s_base);
2601                 }
2602 
2603                 /*
2604                  * Use page_find() instead of page_lookup() to
2605                  * find the page since we know that it is locked.
2606                  */
2607                 pp = page_find(vp, offset);
2608                 if (pp == NULL) {
2609                         panic(
2610                             "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2611                             (void *)adr, (void *)ap, (void *)vp, offset);
2612                         /*NOTREACHED*/
2613                 }
2614 
2615                 if (rw == S_WRITE) {
2616                         hat_setrefmod(pp);
2617                         if (seg->s_as->a_vbits)
2618                                 hat_setstat(seg->s_as, adr, PAGESIZE,
2619                                     P_REF | P_MOD);
2620                 } else if (rw != S_OTHER) {
2621                         hat_setref(pp);
2622                         if (seg->s_as->a_vbits)
2623                                 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2624                 }
2625                 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2626                     "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2627                 page_unlock(pp);
2628         }
2629         ASSERT(svd->softlockcnt >= btop(len));
2630         if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2631                 /*
2632                  * All SOFTLOCKS are gone. Wakeup any waiting
2633                  * unmappers so they can try again to unmap.
2634                  * Check for waiters first without the mutex
2635                  * held so we don't always grab the mutex on
2636                  * softunlocks.
2637                  */
2638                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2639                         mutex_enter(&seg->s_as->a_contents);
2640                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2641                                 AS_CLRUNMAPWAIT(seg->s_as);
2642                                 cv_broadcast(&seg->s_as->a_cv);
2643                         }
2644                         mutex_exit(&seg->s_as->a_contents);
2645                 }
2646         }
2647 }
2648 
2649 #define PAGE_HANDLED    ((page_t *)-1)
2650 
2651 /*
2652  * Release all the pages in the NULL terminated ppp list
2653  * which haven't already been converted to PAGE_HANDLED.
2654  */
2655 static void
2656 segvn_pagelist_rele(page_t **ppp)
2657 {
2658         for (; *ppp != NULL; ppp++) {
2659                 if (*ppp != PAGE_HANDLED)
2660                         page_unlock(*ppp);
2661         }
2662 }
2663 
2664 static int stealcow = 1;
2665 
2666 /*
2667  * Workaround for viking chip bug.  See bug id 1220902.
2668  * To fix this down in pagefault() would require importing so
2669  * much as and segvn code as to be unmaintainable.
2670  */
2671 int enable_mbit_wa = 0;
2672 
2673 /*
2674  * Handles all the dirty work of getting the right
2675  * anonymous pages and loading up the translations.
2676  * This routine is called only from segvn_fault()
2677  * when looping over the range of addresses requested.
2678  *
2679  * The basic algorithm here is:
2680  *      If this is an anon_zero case
2681  *              Call anon_zero to allocate page
2682  *              Load up translation
2683  *              Return
2684  *      endif
2685  *      If this is an anon page
2686  *              Use anon_getpage to get the page
2687  *      else
2688  *              Find page in pl[] list passed in
2689  *      endif
2690  *      If not a cow
2691  *              Load up the translation to the page
2692  *              return
2693  *      endif
2694  *      Call anon_private to handle cow
2695  *      Load up (writable) translation to new page
2696  */
2697 static faultcode_t
2698 segvn_faultpage(
2699         struct hat *hat,                /* the hat to use for mapping */
2700         struct seg *seg,                /* seg_vn of interest */
2701         caddr_t addr,                   /* address in as */
2702         u_offset_t off,                 /* offset in vp */
2703         struct vpage *vpage,            /* pointer to vpage for vp, off */
2704         page_t *pl[],                   /* object source page pointer */
2705         uint_t vpprot,                  /* access allowed to object pages */
2706         enum fault_type type,           /* type of fault */
2707         enum seg_rw rw,                 /* type of access at fault */
2708         int brkcow)                     /* we may need to break cow */
2709 {
2710         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2711         page_t *pp, **ppp;
2712         uint_t pageflags = 0;
2713         page_t *anon_pl[1 + 1];
2714         page_t *opp = NULL;             /* original page */
2715         uint_t prot;
2716         int err;
2717         int cow;
2718         int claim;
2719         int steal = 0;
2720         ulong_t anon_index;
2721         struct anon *ap, *oldap;
2722         struct anon_map *amp;
2723         int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2724         int anon_lock = 0;
2725         anon_sync_obj_t cookie;
2726 
2727         if (svd->flags & MAP_TEXT) {
2728                 hat_flag |= HAT_LOAD_TEXT;
2729         }
2730 
2731         ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2732         ASSERT(seg->s_szc == 0);
2733         ASSERT(svd->tr_state != SEGVN_TR_INIT);
2734 
2735         /*
2736          * Initialize protection value for this page.
2737          * If we have per page protection values check it now.
2738          */
2739         if (svd->pageprot) {
2740                 uint_t protchk;
2741 
2742                 switch (rw) {
2743                 case S_READ:
2744                         protchk = PROT_READ;
2745                         break;
2746                 case S_WRITE:
2747                         protchk = PROT_WRITE;
2748                         break;
2749                 case S_EXEC:
2750                         protchk = PROT_EXEC;
2751                         break;
2752                 case S_OTHER:
2753                 default:
2754                         protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2755                         break;
2756                 }
2757 
2758                 prot = VPP_PROT(vpage);
2759                 if ((prot & protchk) == 0)
2760                         return (FC_PROT);       /* illegal access type */
2761         } else {
2762                 prot = svd->prot;
2763         }
2764 
2765         if (type == F_SOFTLOCK) {
2766                 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2767         }
2768 
2769         /*
2770          * Always acquire the anon array lock to prevent 2 threads from
2771          * allocating separate anon slots for the same "addr".
2772          */
2773 
2774         if ((amp = svd->amp) != NULL) {
2775                 ASSERT(RW_READ_HELD(&amp->a_rwlock));
2776                 anon_index = svd->anon_index + seg_page(seg, addr);
2777                 anon_array_enter(amp, anon_index, &cookie);
2778                 anon_lock = 1;
2779         }
2780 
2781         if (svd->vp == NULL && amp != NULL) {
2782                 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2783                         /*
2784                          * Allocate a (normally) writable anonymous page of
2785                          * zeroes. If no advance reservations, reserve now.
2786                          */
2787                         if (svd->flags & MAP_NORESERVE) {
2788                                 if (anon_resv_zone(ptob(1),
2789                                     seg->s_as->a_proc->p_zone)) {
2790                                         atomic_add_long(&svd->swresv, ptob(1));
2791                                         atomic_add_long(&seg->s_as->a_resvsize,
2792                                             ptob(1));
2793                                 } else {
2794                                         err = ENOMEM;
2795                                         goto out;
2796                                 }
2797                         }
2798                         if ((pp = anon_zero(seg, addr, &ap,
2799                             svd->cred)) == NULL) {
2800                                 err = ENOMEM;
2801                                 goto out;       /* out of swap space */
2802                         }
2803                         /*
2804                          * Re-acquire the anon_map lock and
2805                          * initialize the anon array entry.
2806                          */
2807                         (void) anon_set_ptr(amp->ahp, anon_index, ap,
2808                             ANON_SLEEP);
2809 
2810                         ASSERT(pp->p_szc == 0);
2811 
2812                         /*
2813                          * Handle pages that have been marked for migration
2814                          */
2815                         if (lgrp_optimizations())
2816                                 page_migrate(seg, addr, &pp, 1);
2817 
2818                         if (enable_mbit_wa) {
2819                                 if (rw == S_WRITE)
2820                                         hat_setmod(pp);
2821                                 else if (!hat_ismod(pp))
2822                                         prot &= ~PROT_WRITE;
2823                         }
2824                         /*
2825                          * If AS_PAGLCK is set in a_flags (via memcntl(2)
2826                          * with MC_LOCKAS, MCL_FUTURE) and this is a
2827                          * MAP_NORESERVE segment, we may need to
2828                          * permanently lock the page as it is being faulted
2829                          * for the first time. The following text applies
2830                          * only to MAP_NORESERVE segments:
2831                          *
2832                          * As per memcntl(2), if this segment was created
2833                          * after MCL_FUTURE was applied (a "future"
2834                          * segment), its pages must be locked.  If this
2835                          * segment existed at MCL_FUTURE application (a
2836                          * "past" segment), the interface is unclear.
2837                          *
2838                          * We decide to lock only if vpage is present:
2839                          *
2840                          * - "future" segments will have a vpage array (see
2841                          *    as_map), and so will be locked as required
2842                          *
2843                          * - "past" segments may not have a vpage array,
2844                          *    depending on whether events (such as
2845                          *    mprotect) have occurred. Locking if vpage
2846                          *    exists will preserve legacy behavior.  Not
2847                          *    locking if vpage is absent, will not break
2848                          *    the interface or legacy behavior.  Note that
2849                          *    allocating vpage here if it's absent requires
2850                          *    upgrading the segvn reader lock, the cost of
2851                          *    which does not seem worthwhile.
2852                          *
2853                          * Usually testing and setting VPP_ISPPLOCK and
2854                          * VPP_SETPPLOCK requires holding the segvn lock as
2855                          * writer, but in this case all readers are
2856                          * serializing on the anon array lock.
2857                          */
2858                         if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2859                             (svd->flags & MAP_NORESERVE) &&
2860                             !VPP_ISPPLOCK(vpage)) {
2861                                 proc_t *p = seg->s_as->a_proc;
2862                                 ASSERT(svd->type == MAP_PRIVATE);
2863                                 mutex_enter(&p->p_lock);
2864                                 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2865                                     1) == 0) {
2866                                         claim = VPP_PROT(vpage) & PROT_WRITE;
2867                                         if (page_pp_lock(pp, claim, 0)) {
2868                                                 VPP_SETPPLOCK(vpage);
2869                                         } else {
2870                                                 rctl_decr_locked_mem(p, NULL,
2871                                                     PAGESIZE, 1);
2872                                         }
2873                                 }
2874                                 mutex_exit(&p->p_lock);
2875                         }
2876 
2877                         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2878                         hat_memload(hat, addr, pp, prot, hat_flag);
2879 
2880                         if (!(hat_flag & HAT_LOAD_LOCK))
2881                                 page_unlock(pp);
2882 
2883                         anon_array_exit(&cookie);
2884                         return (0);
2885                 }
2886         }
2887 
2888         /*
2889          * Obtain the page structure via anon_getpage() if it is
2890          * a private copy of an object (the result of a previous
2891          * copy-on-write).
2892          */
2893         if (amp != NULL) {
2894                 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2895                         err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2896                             seg, addr, rw, svd->cred);
2897                         if (err)
2898                                 goto out;
2899 
2900                         if (svd->type == MAP_SHARED) {
2901                                 /*
2902                                  * If this is a shared mapping to an
2903                                  * anon_map, then ignore the write
2904                                  * permissions returned by anon_getpage().
2905                                  * They apply to the private mappings
2906                                  * of this anon_map.
2907                                  */
2908                                 vpprot |= PROT_WRITE;
2909                         }
2910                         opp = anon_pl[0];
2911                 }
2912         }
2913 
2914         /*
2915          * Search the pl[] list passed in if it is from the
2916          * original object (i.e., not a private copy).
2917          */
2918         if (opp == NULL) {
2919                 /*
2920                  * Find original page.  We must be bringing it in
2921                  * from the list in pl[].
2922                  */
2923                 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2924                         if (opp == PAGE_HANDLED)
2925                                 continue;
2926                         ASSERT(opp->p_vnode == svd->vp); /* XXX */
2927                         if (opp->p_offset == off)
2928                                 break;
2929                 }
2930                 if (opp == NULL) {
2931                         panic("segvn_faultpage not found");
2932                         /*NOTREACHED*/
2933                 }
2934                 *ppp = PAGE_HANDLED;
2935 
2936         }
2937 
2938         ASSERT(PAGE_LOCKED(opp));
2939 
2940         TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2941             "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2942 
2943         /*
2944          * The fault is treated as a copy-on-write fault if a
2945          * write occurs on a private segment and the object
2946          * page (i.e., mapping) is write protected.  We assume
2947          * that fatal protection checks have already been made.
2948          */
2949 
2950         if (brkcow) {
2951                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2952                 cow = !(vpprot & PROT_WRITE);
2953         } else if (svd->tr_state == SEGVN_TR_ON) {
2954                 /*
2955                  * If we are doing text replication COW on first touch.
2956                  */
2957                 ASSERT(amp != NULL);
2958                 ASSERT(svd->vp != NULL);
2959                 ASSERT(rw != S_WRITE);
2960                 cow = (ap == NULL);
2961         } else {
2962                 cow = 0;
2963         }
2964 
2965         /*
2966          * If not a copy-on-write case load the translation
2967          * and return.
2968          */
2969         if (cow == 0) {
2970 
2971                 /*
2972                  * Handle pages that have been marked for migration
2973                  */
2974                 if (lgrp_optimizations())
2975                         page_migrate(seg, addr, &opp, 1);
2976 
2977                 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2978                         if (rw == S_WRITE)
2979                                 hat_setmod(opp);
2980                         else if (rw != S_OTHER && !hat_ismod(opp))
2981                                 prot &= ~PROT_WRITE;
2982                 }
2983 
2984                 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2985                     (!svd->pageprot && svd->prot == (prot & vpprot)));
2986                 ASSERT(amp == NULL ||
2987                     svd->rcookie == HAT_INVALID_REGION_COOKIE);
2988                 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2989                     svd->rcookie);
2990 
2991                 if (!(hat_flag & HAT_LOAD_LOCK))
2992                         page_unlock(opp);
2993 
2994                 if (anon_lock) {
2995                         anon_array_exit(&cookie);
2996                 }
2997                 return (0);
2998         }
2999 
3000         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3001 
3002         hat_setref(opp);
3003 
3004         ASSERT(amp != NULL && anon_lock);
3005 
3006         /*
3007          * Steal the page only if it isn't a private page
3008          * since stealing a private page is not worth the effort.
3009          */
3010         if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
3011                 steal = 1;
3012 
3013         /*
3014          * Steal the original page if the following conditions are true:
3015          *
3016          * We are low on memory, the page is not private, page is not large,
3017          * not shared, not modified, not `locked' or if we have it `locked'
3018          * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3019          * that the page is not shared) and if it doesn't have any
3020          * translations. page_struct_lock isn't needed to look at p_cowcnt
3021          * and p_lckcnt because we first get exclusive lock on page.
3022          */
3023         (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
3024 
3025         if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
3026             page_tryupgrade(opp) && !hat_ismod(opp) &&
3027             ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
3028             (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
3029             vpage != NULL && VPP_ISPPLOCK(vpage)))) {
3030                 /*
3031                  * Check if this page has other translations
3032                  * after unloading our translation.
3033                  */
3034                 if (hat_page_is_mapped(opp)) {
3035                         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3036                         hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3037                             HAT_UNLOAD);
3038                 }
3039 
3040                 /*
3041                  * hat_unload() might sync back someone else's recent
3042                  * modification, so check again.
3043                  */
3044                 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
3045                         pageflags |= STEAL_PAGE;
3046         }
3047 
3048         /*
3049          * If we have a vpage pointer, see if it indicates that we have
3050          * ``locked'' the page we map -- if so, tell anon_private to
3051          * transfer the locking resource to the new page.
3052          *
3053          * See Statement at the beginning of segvn_lockop regarding
3054          * the way lockcnts/cowcnts are handled during COW.
3055          *
3056          */
3057         if (vpage != NULL && VPP_ISPPLOCK(vpage))
3058                 pageflags |= LOCK_PAGE;
3059 
3060         /*
3061          * Allocate a private page and perform the copy.
3062          * For MAP_NORESERVE reserve swap space now, unless this
3063          * is a cow fault on an existing anon page in which case
3064          * MAP_NORESERVE will have made advance reservations.
3065          */
3066         if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3067                 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3068                         atomic_add_long(&svd->swresv, ptob(1));
3069                         atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3070                 } else {
3071                         page_unlock(opp);
3072                         err = ENOMEM;
3073                         goto out;
3074                 }
3075         }
3076         oldap = ap;
3077         pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3078         if (pp == NULL) {
3079                 err = ENOMEM;   /* out of swap space */
3080                 goto out;
3081         }
3082 
3083         /*
3084          * If we copied away from an anonymous page, then
3085          * we are one step closer to freeing up an anon slot.
3086          *
3087          * NOTE:  The original anon slot must be released while
3088          * holding the "anon_map" lock.  This is necessary to prevent
3089          * other threads from obtaining a pointer to the anon slot
3090          * which may be freed if its "refcnt" is 1.
3091          */
3092         if (oldap != NULL)
3093                 anon_decref(oldap);
3094 
3095         (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3096 
3097         /*
3098          * Handle pages that have been marked for migration
3099          */
3100         if (lgrp_optimizations())
3101                 page_migrate(seg, addr, &pp, 1);
3102 
3103         ASSERT(pp->p_szc == 0);
3104 
3105         ASSERT(!IS_VMODSORT(pp->p_vnode));
3106         if (enable_mbit_wa) {
3107                 if (rw == S_WRITE)
3108                         hat_setmod(pp);
3109                 else if (!hat_ismod(pp))
3110                         prot &= ~PROT_WRITE;
3111         }
3112 
3113         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3114         hat_memload(hat, addr, pp, prot, hat_flag);
3115 
3116         if (!(hat_flag & HAT_LOAD_LOCK))
3117                 page_unlock(pp);
3118 
3119         ASSERT(anon_lock);
3120         anon_array_exit(&cookie);
3121         return (0);
3122 out:
3123         if (anon_lock)
3124                 anon_array_exit(&cookie);
3125 
3126         if (type == F_SOFTLOCK) {
3127                 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3128         }
3129         return (FC_MAKE_ERR(err));
3130 }
3131 
3132 /*
3133  * relocate a bunch of smaller targ pages into one large repl page. all targ
3134  * pages must be complete pages smaller than replacement pages.
3135  * it's assumed that no page's szc can change since they are all PAGESIZE or
3136  * complete large pages locked SHARED.
3137  */
3138 static void
3139 segvn_relocate_pages(page_t **targ, page_t *replacement)
3140 {
3141         page_t *pp;
3142         pgcnt_t repl_npgs, curnpgs;
3143         pgcnt_t i;
3144         uint_t repl_szc = replacement->p_szc;
3145         page_t *first_repl = replacement;
3146         page_t *repl;
3147         spgcnt_t npgs;
3148 
3149         VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3150 
3151         ASSERT(repl_szc != 0);
3152         npgs = repl_npgs = page_get_pagecnt(repl_szc);
3153 
3154         i = 0;
3155         while (repl_npgs) {
3156                 spgcnt_t nreloc;
3157                 int err;
3158                 ASSERT(replacement != NULL);
3159                 pp = targ[i];
3160                 ASSERT(pp->p_szc < repl_szc);
3161                 ASSERT(PAGE_EXCL(pp));
3162                 ASSERT(!PP_ISFREE(pp));
3163                 curnpgs = page_get_pagecnt(pp->p_szc);
3164                 if (curnpgs == 1) {
3165                         VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3166                         repl = replacement;
3167                         page_sub(&replacement, repl);
3168                         ASSERT(PAGE_EXCL(repl));
3169                         ASSERT(!PP_ISFREE(repl));
3170                         ASSERT(repl->p_szc == repl_szc);
3171                 } else {
3172                         page_t *repl_savepp;
3173                         int j;
3174                         VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3175                         repl_savepp = replacement;
3176                         for (j = 0; j < curnpgs; j++) {
3177                                 repl = replacement;
3178                                 page_sub(&replacement, repl);
3179                                 ASSERT(PAGE_EXCL(repl));
3180                                 ASSERT(!PP_ISFREE(repl));
3181                                 ASSERT(repl->p_szc == repl_szc);
3182                                 ASSERT(page_pptonum(targ[i + j]) ==
3183                                     page_pptonum(targ[i]) + j);
3184                         }
3185                         repl = repl_savepp;
3186                         ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3187                 }
3188                 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3189                 if (err || nreloc != curnpgs) {
3190                         panic("segvn_relocate_pages: "
3191                             "page_relocate failed err=%d curnpgs=%ld "
3192                             "nreloc=%ld", err, curnpgs, nreloc);
3193                 }
3194                 ASSERT(curnpgs <= repl_npgs);
3195                 repl_npgs -= curnpgs;
3196                 i += curnpgs;
3197         }
3198         ASSERT(replacement == NULL);
3199 
3200         repl = first_repl;
3201         repl_npgs = npgs;
3202         for (i = 0; i < repl_npgs; i++) {
3203                 ASSERT(PAGE_EXCL(repl));
3204                 ASSERT(!PP_ISFREE(repl));
3205                 targ[i] = repl;
3206                 page_downgrade(targ[i]);
3207                 repl++;
3208         }
3209 }
3210 
3211 /*
3212  * Check if all pages in ppa array are complete smaller than szc pages and
3213  * their roots will still be aligned relative to their current size if the
3214  * entire ppa array is relocated into one szc page. If these conditions are
3215  * not met return 0.
3216  *
3217  * If all pages are properly aligned attempt to upgrade their locks
3218  * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3219  * upgrdfail was set to 0 by caller.
3220  *
3221  * Return 1 if all pages are aligned and locked exclusively.
3222  *
3223  * If all pages in ppa array happen to be physically contiguous to make one
3224  * szc page and all exclusive locks are successfully obtained promote the page
3225  * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3226  */
3227 static int
3228 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3229 {
3230         page_t *pp;
3231         pfn_t pfn;
3232         pgcnt_t totnpgs = page_get_pagecnt(szc);
3233         pfn_t first_pfn;
3234         int contig = 1;
3235         pgcnt_t i;
3236         pgcnt_t j;
3237         uint_t curszc;
3238         pgcnt_t curnpgs;
3239         int root = 0;
3240 
3241         ASSERT(szc > 0);
3242 
3243         VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3244 
3245         for (i = 0; i < totnpgs; i++) {
3246                 pp = ppa[i];
3247                 ASSERT(PAGE_SHARED(pp));
3248                 ASSERT(!PP_ISFREE(pp));
3249                 pfn = page_pptonum(pp);
3250                 if (i == 0) {
3251                         if (!IS_P2ALIGNED(pfn, totnpgs)) {
3252                                 contig = 0;
3253                         } else {
3254                                 first_pfn = pfn;
3255                         }
3256                 } else if (contig && pfn != first_pfn + i) {
3257                         contig = 0;
3258                 }
3259                 if (pp->p_szc == 0) {
3260                         if (root) {
3261                                 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3262                                 return (0);
3263                         }
3264                 } else if (!root) {
3265                         if ((curszc = pp->p_szc) >= szc) {
3266                                 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3267                                 return (0);
3268                         }
3269                         if (curszc == 0) {
3270                                 /*
3271                                  * p_szc changed means we don't have all pages
3272                                  * locked. return failure.
3273                                  */
3274                                 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3275                                 return (0);
3276                         }
3277                         curnpgs = page_get_pagecnt(curszc);
3278                         if (!IS_P2ALIGNED(pfn, curnpgs) ||
3279                             !IS_P2ALIGNED(i, curnpgs)) {
3280                                 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3281                                 return (0);
3282                         }
3283                         root = 1;
3284                 } else {
3285                         ASSERT(i > 0);
3286                         VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3287                         if (pp->p_szc != curszc) {
3288                                 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3289                                 return (0);
3290                         }
3291                         if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3292                                 panic("segvn_full_szcpages: "
3293                                     "large page not physically contiguous");
3294                         }
3295                         if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3296                                 root = 0;
3297                         }
3298                 }
3299         }
3300 
3301         for (i = 0; i < totnpgs; i++) {
3302                 ASSERT(ppa[i]->p_szc < szc);
3303                 if (!page_tryupgrade(ppa[i])) {
3304                         for (j = 0; j < i; j++) {
3305                                 page_downgrade(ppa[j]);
3306                         }
3307                         *pszc = ppa[i]->p_szc;
3308                         *upgrdfail = 1;
3309                         VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3310                         return (0);
3311                 }
3312         }
3313 
3314         /*
3315          * When a page is put a free cachelist its szc is set to 0.  if file
3316          * system reclaimed pages from cachelist targ pages will be physically
3317          * contiguous with 0 p_szc.  in this case just upgrade szc of targ
3318          * pages without any relocations.
3319          * To avoid any hat issues with previous small mappings
3320          * hat_pageunload() the target pages first.
3321          */
3322         if (contig) {
3323                 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3324                 for (i = 0; i < totnpgs; i++) {
3325                         (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3326                 }
3327                 for (i = 0; i < totnpgs; i++) {
3328                         ppa[i]->p_szc = szc;
3329                 }
3330                 for (i = 0; i < totnpgs; i++) {
3331                         ASSERT(PAGE_EXCL(ppa[i]));
3332                         page_downgrade(ppa[i]);
3333                 }
3334                 if (pszc != NULL) {
3335                         *pszc = szc;
3336                 }
3337         }
3338         VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3339         return (1);
3340 }
3341 
3342 /*
3343  * Create physically contiguous pages for [vp, off] - [vp, off +
3344  * page_size(szc)) range and for private segment return them in ppa array.
3345  * Pages are created either via IO or relocations.
3346  *
3347  * Return 1 on success and 0 on failure.
3348  *
3349  * If physically contiguous pages already exist for this range return 1 without
3350  * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3351  * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3352  */
3353 
3354 static int
3355 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3356     uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3357     int *downsize)
3358 
3359 {
3360         page_t *pplist = *ppplist;
3361         size_t pgsz = page_get_pagesize(szc);
3362         pgcnt_t pages = btop(pgsz);
3363         ulong_t start_off = off;
3364         u_offset_t eoff = off + pgsz;
3365         spgcnt_t nreloc;
3366         u_offset_t io_off = off;
3367         size_t io_len;
3368         page_t *io_pplist = NULL;
3369         page_t *done_pplist = NULL;
3370         pgcnt_t pgidx = 0;
3371         page_t *pp;
3372         page_t *newpp;
3373         page_t *targpp;
3374         int io_err = 0;
3375         int i;
3376         pfn_t pfn;
3377         ulong_t ppages;
3378         page_t *targ_pplist = NULL;
3379         page_t *repl_pplist = NULL;
3380         page_t *tmp_pplist;
3381         int nios = 0;
3382         uint_t pszc;
3383         struct vattr va;
3384 
3385         VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3386 
3387         ASSERT(szc != 0);
3388         ASSERT(pplist->p_szc == szc);
3389 
3390         /*
3391          * downsize will be set to 1 only if we fail to lock pages. this will
3392          * allow subsequent faults to try to relocate the page again. If we
3393          * fail due to misalignment don't downsize and let the caller map the
3394          * whole region with small mappings to avoid more faults into the area
3395          * where we can't get large pages anyway.
3396          */
3397         *downsize = 0;
3398 
3399         while (off < eoff) {
3400                 newpp = pplist;
3401                 ASSERT(newpp != NULL);
3402                 ASSERT(PAGE_EXCL(newpp));
3403                 ASSERT(!PP_ISFREE(newpp));
3404                 /*
3405                  * we pass NULL for nrelocp to page_lookup_create()
3406                  * so that it doesn't relocate. We relocate here
3407                  * later only after we make sure we can lock all
3408                  * pages in the range we handle and they are all
3409                  * aligned.
3410                  */
3411                 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3412                 ASSERT(pp != NULL);
3413                 ASSERT(!PP_ISFREE(pp));
3414                 ASSERT(pp->p_vnode == vp);
3415                 ASSERT(pp->p_offset == off);
3416                 if (pp == newpp) {
3417                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3418                         page_sub(&pplist, pp);
3419                         ASSERT(PAGE_EXCL(pp));
3420                         ASSERT(page_iolock_assert(pp));
3421                         page_list_concat(&io_pplist, &pp);
3422                         off += PAGESIZE;
3423                         continue;
3424                 }
3425                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3426                 pfn = page_pptonum(pp);
3427                 pszc = pp->p_szc;
3428                 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3429                     IS_P2ALIGNED(pfn, pages)) {
3430                         ASSERT(repl_pplist == NULL);
3431                         ASSERT(done_pplist == NULL);
3432                         ASSERT(pplist == *ppplist);
3433                         page_unlock(pp);
3434                         page_free_replacement_page(pplist);
3435                         page_create_putback(pages);
3436                         *ppplist = NULL;
3437                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3438                         return (1);
3439                 }
3440                 if (pszc >= szc) {
3441                         page_unlock(pp);
3442                         segvn_faultvnmpss_align_err1++;
3443                         goto out;
3444                 }
3445                 ppages = page_get_pagecnt(pszc);
3446                 if (!IS_P2ALIGNED(pfn, ppages)) {
3447                         ASSERT(pszc > 0);
3448                         /*
3449                          * sizing down to pszc won't help.
3450                          */
3451                         page_unlock(pp);
3452                         segvn_faultvnmpss_align_err2++;
3453                         goto out;
3454                 }
3455                 pfn = page_pptonum(newpp);
3456                 if (!IS_P2ALIGNED(pfn, ppages)) {
3457                         ASSERT(pszc > 0);
3458                         /*
3459                          * sizing down to pszc won't help.
3460                          */
3461                         page_unlock(pp);
3462                         segvn_faultvnmpss_align_err3++;
3463                         goto out;
3464                 }
3465                 if (!PAGE_EXCL(pp)) {
3466                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3467                         page_unlock(pp);
3468                         *downsize = 1;
3469                         *ret_pszc = pp->p_szc;
3470                         goto out;
3471                 }
3472                 targpp = pp;
3473                 if (io_pplist != NULL) {
3474                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3475                         io_len = off - io_off;
3476                         /*
3477                          * Some file systems like NFS don't check EOF
3478                          * conditions in VOP_PAGEIO(). Check it here
3479                          * now that pages are locked SE_EXCL. Any file
3480                          * truncation will wait until the pages are
3481                          * unlocked so no need to worry that file will
3482                          * be truncated after we check its size here.
3483                          * XXX fix NFS to remove this check.
3484                          */
3485                         va.va_mask = AT_SIZE;
3486                         if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3487                                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3488                                 page_unlock(targpp);
3489                                 goto out;
3490                         }
3491                         if (btopr(va.va_size) < btopr(io_off + io_len)) {
3492                                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3493                                 *downsize = 1;
3494                                 *ret_pszc = 0;
3495                                 page_unlock(targpp);
3496                                 goto out;
3497                         }
3498                         io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3499                                 B_READ, svd->cred, NULL);
3500                         if (io_err) {
3501                                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3502                                 page_unlock(targpp);
3503                                 if (io_err == EDEADLK) {
3504                                         segvn_vmpss_pageio_deadlk_err++;
3505                                 }
3506                                 goto out;
3507                         }
3508                         nios++;
3509                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3510                         while (io_pplist != NULL) {
3511                                 pp = io_pplist;
3512                                 page_sub(&io_pplist, pp);
3513                                 ASSERT(page_iolock_assert(pp));
3514                                 page_io_unlock(pp);
3515                                 pgidx = (pp->p_offset - start_off) >>
3516                                     PAGESHIFT;
3517                                 ASSERT(pgidx < pages);
3518                                 ppa[pgidx] = pp;
3519                                 page_list_concat(&done_pplist, &pp);
3520                         }
3521                 }
3522                 pp = targpp;
3523                 ASSERT(PAGE_EXCL(pp));
3524                 ASSERT(pp->p_szc <= pszc);
3525                 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3526                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3527                         page_unlock(pp);
3528                         *downsize = 1;
3529                         *ret_pszc = pp->p_szc;
3530                         goto out;
3531                 }
3532                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3533                 /*
3534                  * page szc chould have changed before the entire group was
3535                  * locked. reread page szc.
3536                  */
3537                 pszc = pp->p_szc;
3538                 ppages = page_get_pagecnt(pszc);
3539 
3540                 /* link just the roots */
3541                 page_list_concat(&targ_pplist, &pp);
3542                 page_sub(&pplist, newpp);
3543                 page_list_concat(&repl_pplist, &newpp);
3544                 off += PAGESIZE;
3545                 while (--ppages != 0) {
3546                         newpp = pplist;
3547                         page_sub(&pplist, newpp);
3548                         off += PAGESIZE;
3549                 }
3550                 io_off = off;
3551         }
3552         if (io_pplist != NULL) {
3553                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3554                 io_len = eoff - io_off;
3555                 va.va_mask = AT_SIZE;
3556                 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3557                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3558                         goto out;
3559                 }
3560                 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3561                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3562                         *downsize = 1;
3563                         *ret_pszc = 0;
3564                         goto out;
3565                 }
3566                 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3567                     B_READ, svd->cred, NULL);
3568                 if (io_err) {
3569                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3570                         if (io_err == EDEADLK) {
3571                                 segvn_vmpss_pageio_deadlk_err++;
3572                         }
3573                         goto out;
3574                 }
3575                 nios++;
3576                 while (io_pplist != NULL) {
3577                         pp = io_pplist;
3578                         page_sub(&io_pplist, pp);
3579                         ASSERT(page_iolock_assert(pp));
3580                         page_io_unlock(pp);
3581                         pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3582                         ASSERT(pgidx < pages);
3583                         ppa[pgidx] = pp;
3584                 }
3585         }
3586         /*
3587          * we're now bound to succeed or panic.
3588          * remove pages from done_pplist. it's not needed anymore.
3589          */
3590         while (done_pplist != NULL) {
3591                 pp = done_pplist;
3592                 page_sub(&done_pplist, pp);
3593         }
3594         VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3595         ASSERT(pplist == NULL);
3596         *ppplist = NULL;
3597         while (targ_pplist != NULL) {
3598                 int ret;
3599                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3600                 ASSERT(repl_pplist);
3601                 pp = targ_pplist;
3602                 page_sub(&targ_pplist, pp);
3603                 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3604                 newpp = repl_pplist;
3605                 page_sub(&repl_pplist, newpp);
3606 #ifdef DEBUG
3607                 pfn = page_pptonum(pp);
3608                 pszc = pp->p_szc;
3609                 ppages = page_get_pagecnt(pszc);
3610                 ASSERT(IS_P2ALIGNED(pfn, ppages));
3611                 pfn = page_pptonum(newpp);
3612                 ASSERT(IS_P2ALIGNED(pfn, ppages));
3613                 ASSERT(P2PHASE(pfn, pages) == pgidx);
3614 #endif
3615                 nreloc = 0;
3616                 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3617                 if (ret != 0 || nreloc == 0) {
3618                         panic("segvn_fill_vp_pages: "
3619                             "page_relocate failed");
3620                 }
3621                 pp = newpp;
3622                 while (nreloc-- != 0) {
3623                         ASSERT(PAGE_EXCL(pp));
3624                         ASSERT(pp->p_vnode == vp);
3625                         ASSERT(pgidx ==
3626                             ((pp->p_offset - start_off) >> PAGESHIFT));
3627                         ppa[pgidx++] = pp;
3628                         pp++;
3629                 }
3630         }
3631 
3632         if (svd->type == MAP_PRIVATE) {
3633                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3634                 for (i = 0; i < pages; i++) {
3635                         ASSERT(ppa[i] != NULL);
3636                         ASSERT(PAGE_EXCL(ppa[i]));
3637                         ASSERT(ppa[i]->p_vnode == vp);
3638                         ASSERT(ppa[i]->p_offset ==
3639                             start_off + (i << PAGESHIFT));
3640                         page_downgrade(ppa[i]);
3641                 }
3642                 ppa[pages] = NULL;
3643         } else {
3644                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3645                 /*
3646                  * the caller will still call VOP_GETPAGE() for shared segments
3647                  * to check FS write permissions. For private segments we map
3648                  * file read only anyway.  so no VOP_GETPAGE is needed.
3649                  */
3650                 for (i = 0; i < pages; i++) {
3651                         ASSERT(ppa[i] != NULL);
3652                         ASSERT(PAGE_EXCL(ppa[i]));
3653                         ASSERT(ppa[i]->p_vnode == vp);
3654                         ASSERT(ppa[i]->p_offset ==
3655                             start_off + (i << PAGESHIFT));
3656                         page_unlock(ppa[i]);
3657                 }
3658                 ppa[0] = NULL;
3659         }
3660 
3661         return (1);
3662 out:
3663         /*
3664          * Do the cleanup. Unlock target pages we didn't relocate. They are
3665          * linked on targ_pplist by root pages. reassemble unused replacement
3666          * and io pages back to pplist.
3667          */
3668         if (io_pplist != NULL) {
3669                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3670                 pp = io_pplist;
3671                 do {
3672                         ASSERT(pp->p_vnode == vp);
3673                         ASSERT(pp->p_offset == io_off);
3674                         ASSERT(page_iolock_assert(pp));
3675                         page_io_unlock(pp);
3676                         page_hashout(pp, NULL);
3677                         io_off += PAGESIZE;
3678                 } while ((pp = pp->p_next) != io_pplist);
3679                 page_list_concat(&io_pplist, &pplist);
3680                 pplist = io_pplist;
3681         }
3682         tmp_pplist = NULL;
3683         while (targ_pplist != NULL) {
3684                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3685                 pp = targ_pplist;
3686                 ASSERT(PAGE_EXCL(pp));
3687                 page_sub(&targ_pplist, pp);
3688 
3689                 pszc = pp->p_szc;
3690                 ppages = page_get_pagecnt(pszc);
3691                 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3692 
3693                 if (pszc != 0) {
3694                         group_page_unlock(pp);
3695                 }
3696                 page_unlock(pp);
3697 
3698                 pp = repl_pplist;
3699                 ASSERT(pp != NULL);
3700                 ASSERT(PAGE_EXCL(pp));
3701                 ASSERT(pp->p_szc == szc);
3702                 page_sub(&repl_pplist, pp);
3703 
3704                 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3705 
3706                 /* relink replacement page */
3707                 page_list_concat(&tmp_pplist, &pp);
3708                 while (--ppages != 0) {
3709                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3710                         pp++;
3711                         ASSERT(PAGE_EXCL(pp));
3712                         ASSERT(pp->p_szc == szc);
3713                         page_list_concat(&tmp_pplist, &pp);
3714                 }
3715         }
3716         if (tmp_pplist != NULL) {
3717                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3718                 page_list_concat(&tmp_pplist, &pplist);
3719                 pplist = tmp_pplist;
3720         }
3721         /*
3722          * at this point all pages are either on done_pplist or
3723          * pplist. They can't be all on done_pplist otherwise
3724          * we'd've been done.
3725          */
3726         ASSERT(pplist != NULL);
3727         if (nios != 0) {
3728                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3729                 pp = pplist;
3730                 do {
3731                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3732                         ASSERT(pp->p_szc == szc);
3733                         ASSERT(PAGE_EXCL(pp));
3734                         ASSERT(pp->p_vnode != vp);
3735                         pp->p_szc = 0;
3736                 } while ((pp = pp->p_next) != pplist);
3737 
3738                 pp = done_pplist;
3739                 do {
3740                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3741                         ASSERT(pp->p_szc == szc);
3742                         ASSERT(PAGE_EXCL(pp));
3743                         ASSERT(pp->p_vnode == vp);
3744                         pp->p_szc = 0;
3745                 } while ((pp = pp->p_next) != done_pplist);
3746 
3747                 while (pplist != NULL) {
3748                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3749                         pp = pplist;
3750                         page_sub(&pplist, pp);
3751                         page_free(pp, 0);
3752                 }
3753 
3754                 while (done_pplist != NULL) {
3755                         VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3756                         pp = done_pplist;
3757                         page_sub(&done_pplist, pp);
3758                         page_unlock(pp);
3759                 }
3760                 *ppplist = NULL;
3761                 return (0);
3762         }
3763         ASSERT(pplist == *ppplist);
3764         if (io_err) {
3765                 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3766                 /*
3767                  * don't downsize on io error.
3768                  * see if vop_getpage succeeds.
3769                  * pplist may still be used in this case
3770                  * for relocations.
3771                  */
3772                 return (0);
3773         }
3774         VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3775         page_free_replacement_page(pplist);
3776         page_create_putback(pages);
3777         *ppplist = NULL;
3778         return (0);
3779 }
3780 
3781 int segvn_anypgsz = 0;
3782 
3783 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages)                          \
3784                 if ((type) == F_SOFTLOCK) {                             \
3785                         atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3786                             -(pages));                                  \
3787                 }
3788 
3789 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot)              \
3790                 if (IS_VMODSORT((ppa)[0]->p_vnode)) {                        \
3791                         if ((rw) == S_WRITE) {                          \
3792                                 for (i = 0; i < (pages); i++) {              \
3793                                         ASSERT((ppa)[i]->p_vnode ==  \
3794                                             (ppa)[0]->p_vnode);              \
3795                                         hat_setmod((ppa)[i]);           \
3796                                 }                                       \
3797                         } else if ((rw) != S_OTHER &&                   \
3798                             ((prot) & (vpprot) & PROT_WRITE)) {         \
3799                                 for (i = 0; i < (pages); i++) {              \
3800                                         ASSERT((ppa)[i]->p_vnode ==  \
3801                                             (ppa)[0]->p_vnode);              \
3802                                         if (!hat_ismod((ppa)[i])) {     \
3803                                                 prot &= ~PROT_WRITE;        \
3804                                                 break;                  \
3805                                         }                               \
3806                                 }                                       \
3807                         }                                               \
3808                 }
3809 
3810 #ifdef  VM_STATS
3811 
3812 #define SEGVN_VMSTAT_FLTVNPAGES(idx)                                    \
3813                 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3814 
3815 #else /* VM_STATS */
3816 
3817 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3818 
3819 #endif
3820 
3821 static faultcode_t
3822 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3823     caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3824     caddr_t eaddr, int brkcow)
3825 {
3826         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3827         struct anon_map *amp = svd->amp;
3828         uchar_t segtype = svd->type;
3829         uint_t szc = seg->s_szc;
3830         size_t pgsz = page_get_pagesize(szc);
3831         size_t maxpgsz = pgsz;
3832         pgcnt_t pages = btop(pgsz);
3833         pgcnt_t maxpages = pages;
3834         size_t ppasize = (pages + 1) * sizeof (page_t *);
3835         caddr_t a = lpgaddr;
3836         caddr_t maxlpgeaddr = lpgeaddr;
3837         u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3838         ulong_t aindx = svd->anon_index + seg_page(seg, a);
3839         struct vpage *vpage = (svd->vpage != NULL) ?
3840             &svd->vpage[seg_page(seg, a)] : NULL;
3841         vnode_t *vp = svd->vp;
3842         page_t **ppa;
3843         uint_t  pszc;
3844         size_t  ppgsz;
3845         pgcnt_t ppages;
3846         faultcode_t err = 0;
3847         int ierr;
3848         int vop_size_err = 0;
3849         uint_t protchk, prot, vpprot;
3850         ulong_t i;
3851         int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3852         anon_sync_obj_t an_cookie;
3853         enum seg_rw arw;
3854         int alloc_failed = 0;
3855         int adjszc_chk;
3856         struct vattr va;
3857         page_t *pplist;
3858         pfn_t pfn;
3859         int physcontig;
3860         int upgrdfail;
3861         int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3862         int tron = (svd->tr_state == SEGVN_TR_ON);
3863 
3864         ASSERT(szc != 0);
3865         ASSERT(vp != NULL);
3866         ASSERT(brkcow == 0 || amp != NULL);
3867         ASSERT(tron == 0 || amp != NULL);
3868         ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3869         ASSERT(!(svd->flags & MAP_NORESERVE));
3870         ASSERT(type != F_SOFTUNLOCK);
3871         ASSERT(IS_P2ALIGNED(a, maxpgsz));
3872         ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3873         ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3874         ASSERT(seg->s_szc < NBBY * sizeof (int));
3875         ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3876         ASSERT(svd->tr_state != SEGVN_TR_INIT);
3877 
3878         VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3879         VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3880 
3881         if (svd->flags & MAP_TEXT) {
3882                 hat_flag |= HAT_LOAD_TEXT;
3883         }
3884 
3885         if (svd->pageprot) {
3886                 switch (rw) {
3887                 case S_READ:
3888                         protchk = PROT_READ;
3889                         break;
3890                 case S_WRITE:
3891                         protchk = PROT_WRITE;
3892                         break;
3893                 case S_EXEC:
3894                         protchk = PROT_EXEC;
3895                         break;
3896                 case S_OTHER:
3897                 default:
3898                         protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3899                         break;
3900                 }
3901         } else {
3902                 prot = svd->prot;
3903                 /* caller has already done segment level protection check. */
3904         }
3905 
3906         if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3907                 SEGVN_VMSTAT_FLTVNPAGES(2);
3908                 arw = S_READ;
3909         } else {
3910                 arw = rw;
3911         }
3912 
3913         ppa = kmem_alloc(ppasize, KM_SLEEP);
3914 
3915         VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3916 
3917         for (;;) {
3918                 adjszc_chk = 0;
3919                 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3920                         if (adjszc_chk) {
3921                                 while (szc < seg->s_szc) {
3922                                         uintptr_t e;
3923                                         uint_t tszc;
3924                                         tszc = segvn_anypgsz_vnode ? szc + 1 :
3925                                             seg->s_szc;
3926                                         ppgsz = page_get_pagesize(tszc);
3927                                         if (!IS_P2ALIGNED(a, ppgsz) ||
3928                                             ((alloc_failed >> tszc) & 0x1)) {
3929                                                 break;
3930                                         }
3931                                         SEGVN_VMSTAT_FLTVNPAGES(4);
3932                                         szc = tszc;
3933                                         pgsz = ppgsz;
3934                                         pages = btop(pgsz);
3935                                         e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3936                                         lpgeaddr = (caddr_t)e;
3937                                 }
3938                         }
3939 
3940                 again:
3941                         if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3942                                 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3943                                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3944                                 anon_array_enter(amp, aindx, &an_cookie);
3945                                 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3946                                         SEGVN_VMSTAT_FLTVNPAGES(5);
3947                                         ASSERT(anon_pages(amp->ahp, aindx,
3948                                             maxpages) == maxpages);
3949                                         anon_array_exit(&an_cookie);
3950                                         ANON_LOCK_EXIT(&amp->a_rwlock);
3951                                         err = segvn_fault_anonpages(hat, seg,
3952                                             a, a + maxpgsz, type, rw,
3953                                             MAX(a, addr),
3954                                             MIN(a + maxpgsz, eaddr), brkcow);
3955                                         if (err != 0) {
3956                                                 SEGVN_VMSTAT_FLTVNPAGES(6);
3957                                                 goto out;
3958                                         }
3959                                         if (szc < seg->s_szc) {
3960                                                 szc = seg->s_szc;
3961                                                 pgsz = maxpgsz;
3962                                                 pages = maxpages;
3963                                                 lpgeaddr = maxlpgeaddr;
3964                                         }
3965                                         goto next;
3966                                 } else {
3967                                         ASSERT(anon_pages(amp->ahp, aindx,
3968                                             maxpages) == 0);
3969                                         SEGVN_VMSTAT_FLTVNPAGES(7);
3970                                         anon_array_exit(&an_cookie);
3971                                         ANON_LOCK_EXIT(&amp->a_rwlock);
3972                                 }
3973                         }
3974                         ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3975                         ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3976 
3977                         if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3978                                 ASSERT(vpage != NULL);
3979                                 prot = VPP_PROT(vpage);
3980                                 ASSERT(sameprot(seg, a, maxpgsz));
3981                                 if ((prot & protchk) == 0) {
3982                                         SEGVN_VMSTAT_FLTVNPAGES(8);
3983                                         err = FC_PROT;
3984                                         goto out;
3985                                 }
3986                         }
3987                         if (type == F_SOFTLOCK) {
3988                                 atomic_add_long((ulong_t *)&svd->softlockcnt,
3989                                     pages);
3990                         }
3991 
3992                         pplist = NULL;
3993                         physcontig = 0;
3994                         ppa[0] = NULL;
3995                         if (!brkcow && !tron && szc &&
3996                             !page_exists_physcontig(vp, off, szc,
3997                             segtype == MAP_PRIVATE ? ppa : NULL)) {
3998                                 SEGVN_VMSTAT_FLTVNPAGES(9);
3999                                 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
4000                                     szc, 0, 0) && type != F_SOFTLOCK) {
4001                                         SEGVN_VMSTAT_FLTVNPAGES(10);
4002                                         pszc = 0;
4003                                         ierr = -1;
4004                                         alloc_failed |= (1 << szc);
4005                                         break;
4006                                 }
4007                                 if (pplist != NULL &&
4008                                     vp->v_mpssdata == SEGVN_PAGEIO) {
4009                                         int downsize;
4010                                         SEGVN_VMSTAT_FLTVNPAGES(11);
4011                                         physcontig = segvn_fill_vp_pages(svd,
4012                                             vp, off, szc, ppa, &pplist,
4013                                             &pszc, &downsize);
4014                                         ASSERT(!physcontig || pplist == NULL);
4015                                         if (!physcontig && downsize &&
4016                                             type != F_SOFTLOCK) {
4017                                                 ASSERT(pplist == NULL);
4018                                                 SEGVN_VMSTAT_FLTVNPAGES(12);
4019                                                 ierr = -1;
4020                                                 break;
4021                                         }
4022                                         ASSERT(!physcontig ||
4023                                             segtype == MAP_PRIVATE ||
4024                                             ppa[0] == NULL);
4025                                         if (physcontig && ppa[0] == NULL) {
4026                                                 physcontig = 0;
4027                                         }
4028                                 }
4029                         } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4030                                 SEGVN_VMSTAT_FLTVNPAGES(13);
4031                                 ASSERT(segtype == MAP_PRIVATE);
4032                                 physcontig = 1;
4033                         }
4034 
4035                         if (!physcontig) {
4036                                 SEGVN_VMSTAT_FLTVNPAGES(14);
4037                                 ppa[0] = NULL;
4038                                 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
4039                                     &vpprot, ppa, pgsz, seg, a, arw,
4040                                     svd->cred, NULL);
4041 #ifdef DEBUG
4042                                 if (ierr == 0) {
4043                                         for (i = 0; i < pages; i++) {
4044                                                 ASSERT(PAGE_LOCKED(ppa[i]));
4045                                                 ASSERT(!PP_ISFREE(ppa[i]));
4046                                                 ASSERT(ppa[i]->p_vnode == vp);
4047                                                 ASSERT(ppa[i]->p_offset ==
4048                                                     off + (i << PAGESHIFT));
4049                                         }
4050                                 }
4051 #endif /* DEBUG */
4052                                 if (segtype == MAP_PRIVATE) {
4053                                         SEGVN_VMSTAT_FLTVNPAGES(15);
4054                                         vpprot &= ~PROT_WRITE;
4055                                 }
4056                         } else {
4057                                 ASSERT(segtype == MAP_PRIVATE);
4058                                 SEGVN_VMSTAT_FLTVNPAGES(16);
4059                                 vpprot = PROT_ALL & ~PROT_WRITE;
4060                                 ierr = 0;
4061                         }
4062 
4063                         if (ierr != 0) {
4064                                 SEGVN_VMSTAT_FLTVNPAGES(17);
4065                                 if (pplist != NULL) {
4066                                         SEGVN_VMSTAT_FLTVNPAGES(18);
4067                                         page_free_replacement_page(pplist);
4068                                         page_create_putback(pages);
4069                                 }
4070                                 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4071                                 if (a + pgsz <= eaddr) {
4072                                         SEGVN_VMSTAT_FLTVNPAGES(19);
4073                                         err = FC_MAKE_ERR(ierr);
4074                                         goto out;
4075                                 }
4076                                 va.va_mask = AT_SIZE;
4077                                 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4078                                         SEGVN_VMSTAT_FLTVNPAGES(20);
4079                                         err = FC_MAKE_ERR(EIO);
4080                                         goto out;
4081                                 }
4082                                 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4083                                         SEGVN_VMSTAT_FLTVNPAGES(21);
4084                                         err = FC_MAKE_ERR(ierr);
4085                                         goto out;
4086                                 }
4087                                 if (btopr(va.va_size) <
4088                                     btopr(off + (eaddr - a))) {
4089                                         SEGVN_VMSTAT_FLTVNPAGES(22);
4090                                         err = FC_MAKE_ERR(ierr);
4091                                         goto out;
4092                                 }
4093                                 if (brkcow || tron || type == F_SOFTLOCK) {
4094                                         /* can't reduce map area */
4095                                         SEGVN_VMSTAT_FLTVNPAGES(23);
4096                                         vop_size_err = 1;
4097                                         goto out;
4098                                 }
4099                                 SEGVN_VMSTAT_FLTVNPAGES(24);
4100                                 ASSERT(szc != 0);
4101                                 pszc = 0;
4102                                 ierr = -1;
4103                                 break;
4104                         }
4105 
4106                         if (amp != NULL) {
4107                                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
4108                                 anon_array_enter(amp, aindx, &an_cookie);
4109                         }
4110                         if (amp != NULL &&
4111                             anon_get_ptr(amp->ahp, aindx) != NULL) {
4112                                 ulong_t taindx = P2ALIGN(aindx, maxpages);
4113 
4114                                 SEGVN_VMSTAT_FLTVNPAGES(25);
4115                                 ASSERT(anon_pages(amp->ahp, taindx,
4116                                     maxpages) == maxpages);
4117                                 for (i = 0; i < pages; i++) {
4118                                         page_unlock(ppa[i]);
4119                                 }
4120                                 anon_array_exit(&an_cookie);
4121                                 ANON_LOCK_EXIT(&amp->a_rwlock);
4122                                 if (pplist != NULL) {
4123                                         page_free_replacement_page(pplist);
4124                                         page_create_putback(pages);
4125                                 }
4126                                 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4127                                 if (szc < seg->s_szc) {
4128                                         SEGVN_VMSTAT_FLTVNPAGES(26);
4129                                         /*
4130                                          * For private segments SOFTLOCK
4131                                          * either always breaks cow (any rw
4132                                          * type except S_READ_NOCOW) or
4133                                          * address space is locked as writer
4134                                          * (S_READ_NOCOW case) and anon slots
4135                                          * can't show up on second check.
4136                                          * Therefore if we are here for
4137                                          * SOFTLOCK case it must be a cow
4138                                          * break but cow break never reduces
4139                                          * szc. text replication (tron) in
4140                                          * this case works as cow break.
4141                                          * Thus the assert below.
4142                                          */
4143                                         ASSERT(!brkcow && !tron &&
4144                                             type != F_SOFTLOCK);
4145                                         pszc = seg->s_szc;
4146                                         ierr = -2;
4147                                         break;
4148                                 }
4149                                 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4150                                 goto again;
4151                         }
4152 #ifdef DEBUG
4153                         if (amp != NULL) {
4154                                 ulong_t taindx = P2ALIGN(aindx, maxpages);
4155                                 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4156                         }
4157 #endif /* DEBUG */
4158 
4159                         if (brkcow || tron) {
4160                                 ASSERT(amp != NULL);
4161                                 ASSERT(pplist == NULL);
4162                                 ASSERT(szc == seg->s_szc);
4163                                 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4164                                 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4165                                 SEGVN_VMSTAT_FLTVNPAGES(27);
4166                                 ierr = anon_map_privatepages(amp, aindx, szc,
4167                                     seg, a, prot, ppa, vpage, segvn_anypgsz,
4168                                     tron ? PG_LOCAL : 0, svd->cred);
4169                                 if (ierr != 0) {
4170                                         SEGVN_VMSTAT_FLTVNPAGES(28);
4171                                         anon_array_exit(&an_cookie);
4172                                         ANON_LOCK_EXIT(&amp->a_rwlock);
4173                                         SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4174                                         err = FC_MAKE_ERR(ierr);
4175                                         goto out;
4176                                 }
4177 
4178                                 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4179                                 /*
4180                                  * p_szc can't be changed for locked
4181                                  * swapfs pages.
4182                                  */
4183                                 ASSERT(svd->rcookie ==
4184                                     HAT_INVALID_REGION_COOKIE);
4185                                 hat_memload_array(hat, a, pgsz, ppa, prot,
4186                                     hat_flag);
4187 
4188                                 if (!(hat_flag & HAT_LOAD_LOCK)) {
4189                                         SEGVN_VMSTAT_FLTVNPAGES(29);
4190                                         for (i = 0; i < pages; i++) {
4191                                                 page_unlock(ppa[i]);
4192                                         }
4193                                 }
4194                                 anon_array_exit(&an_cookie);
4195                                 ANON_LOCK_EXIT(&amp->a_rwlock);
4196                                 goto next;
4197                         }
4198 
4199                         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4200                             (!svd->pageprot && svd->prot == (prot & vpprot)));
4201 
4202                         pfn = page_pptonum(ppa[0]);
4203                         /*
4204                          * hat_page_demote() needs an SE_EXCL lock on one of
4205                          * constituent page_t's and it decreases root's p_szc
4206                          * last. This means if root's p_szc is equal szc and
4207                          * all its constituent pages are locked
4208                          * hat_page_demote() that could have changed p_szc to
4209                          * szc is already done and no new have page_demote()
4210                          * can start for this large page.
4211                          */
4212 
4213                         /*
4214                          * we need to make sure same mapping size is used for
4215                          * the same address range if there's a possibility the
4216                          * adddress is already mapped because hat layer panics
4217                          * when translation is loaded for the range already
4218                          * mapped with a different page size.  We achieve it
4219                          * by always using largest page size possible subject
4220                          * to the constraints of page size, segment page size
4221                          * and page alignment.  Since mappings are invalidated
4222                          * when those constraints change and make it
4223                          * impossible to use previously used mapping size no
4224                          * mapping size conflicts should happen.
4225                          */
4226 
4227                 chkszc:
4228                         if ((pszc = ppa[0]->p_szc) == szc &&
4229                             IS_P2ALIGNED(pfn, pages)) {
4230 
4231                                 SEGVN_VMSTAT_FLTVNPAGES(30);
4232 #ifdef DEBUG
4233                                 for (i = 0; i < pages; i++) {
4234                                         ASSERT(PAGE_LOCKED(ppa[i]));
4235                                         ASSERT(!PP_ISFREE(ppa[i]));
4236                                         ASSERT(page_pptonum(ppa[i]) ==
4237                                             pfn + i);
4238                                         ASSERT(ppa[i]->p_szc == szc);
4239                                         ASSERT(ppa[i]->p_vnode == vp);
4240                                         ASSERT(ppa[i]->p_offset ==
4241                                             off + (i << PAGESHIFT));
4242                                 }
4243 #endif /* DEBUG */
4244                                 /*
4245                                  * All pages are of szc we need and they are
4246                                  * all locked so they can't change szc. load
4247                                  * translations.
4248                                  *
4249                                  * if page got promoted since last check
4250                                  * we don't need pplist.
4251                                  */
4252                                 if (pplist != NULL) {
4253                                         page_free_replacement_page(pplist);
4254                                         page_create_putback(pages);
4255                                 }
4256                                 if (PP_ISMIGRATE(ppa[0])) {
4257                                         page_migrate(seg, a, ppa, pages);
4258                                 }
4259                                 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4260                                     prot, vpprot);
4261                                 hat_memload_array_region(hat, a, pgsz,
4262                                     ppa, prot & vpprot, hat_flag,
4263                                     svd->rcookie);
4264 
4265                                 if (!(hat_flag & HAT_LOAD_LOCK)) {
4266                                         for (i = 0; i < pages; i++) {
4267                                                 page_unlock(ppa[i]);
4268                                         }
4269                                 }
4270                                 if (amp != NULL) {
4271                                         anon_array_exit(&an_cookie);
4272                                         ANON_LOCK_EXIT(&amp->a_rwlock);
4273                                 }
4274                                 goto next;
4275                         }
4276 
4277                         /*
4278                          * See if upsize is possible.
4279                          */
4280                         if (pszc > szc && szc < seg->s_szc &&
4281                             (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4282                                 pgcnt_t aphase;
4283                                 uint_t pszc1 = MIN(pszc, seg->s_szc);
4284                                 ppgsz = page_get_pagesize(pszc1);
4285                                 ppages = btop(ppgsz);
4286                                 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4287 
4288                                 ASSERT(type != F_SOFTLOCK);
4289 
4290                                 SEGVN_VMSTAT_FLTVNPAGES(31);
4291                                 if (aphase != P2PHASE(pfn, ppages)) {
4292                                         segvn_faultvnmpss_align_err4++;
4293                                 } else {
4294                                         SEGVN_VMSTAT_FLTVNPAGES(32);
4295                                         if (pplist != NULL) {
4296                                                 page_t *pl = pplist;
4297                                                 page_free_replacement_page(pl);
4298                                                 page_create_putback(pages);
4299                                         }
4300                                         for (i = 0; i < pages; i++) {
4301                                                 page_unlock(ppa[i]);
4302                                         }
4303                                         if (amp != NULL) {
4304                                                 anon_array_exit(&an_cookie);
4305                                                 ANON_LOCK_EXIT(&amp->a_rwlock);
4306                                         }
4307                                         pszc = pszc1;
4308                                         ierr = -2;
4309                                         break;
4310                                 }
4311                         }
4312 
4313                         /*
4314                          * check if we should use smallest mapping size.
4315                          */
4316                         upgrdfail = 0;
4317                         if (szc == 0 ||
4318                             (pszc >= szc &&
4319                             !IS_P2ALIGNED(pfn, pages)) ||
4320                             (pszc < szc &&
4321                             !segvn_full_szcpages(ppa, szc, &upgrdfail,
4322                             &pszc))) {
4323 
4324                                 if (upgrdfail && type != F_SOFTLOCK) {
4325                                         /*
4326                                          * segvn_full_szcpages failed to lock
4327                                          * all pages EXCL. Size down.
4328                                          */
4329                                         ASSERT(pszc < szc);
4330 
4331                                         SEGVN_VMSTAT_FLTVNPAGES(33);
4332 
4333                                         if (pplist != NULL) {
4334                                                 page_t *pl = pplist;
4335                                                 page_free_replacement_page(pl);
4336                                                 page_create_putback(pages);
4337                                         }
4338 
4339                                         for (i = 0; i < pages; i++) {
4340                                                 page_unlock(ppa[i]);
4341                                         }
4342                                         if (amp != NULL) {
4343                                                 anon_array_exit(&an_cookie);
4344                                                 ANON_LOCK_EXIT(&amp->a_rwlock);
4345                                         }
4346                                         ierr = -1;
4347                                         break;
4348                                 }
4349                                 if (szc != 0 && !upgrdfail) {
4350                                         segvn_faultvnmpss_align_err5++;
4351                                 }
4352                                 SEGVN_VMSTAT_FLTVNPAGES(34);
4353                                 if (pplist != NULL) {
4354                                         page_free_replacement_page(pplist);
4355                                         page_create_putback(pages);
4356                                 }
4357                                 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4358                                     prot, vpprot);
4359                                 if (upgrdfail && segvn_anypgsz_vnode) {
4360                                         /* SOFTLOCK case */
4361                                         hat_memload_array_region(hat, a, pgsz,
4362                                             ppa, prot & vpprot, hat_flag,
4363                                             svd->rcookie);
4364                                 } else {
4365                                         for (i = 0; i < pages; i++) {
4366                                                 hat_memload_region(hat,
4367                                                     a + (i << PAGESHIFT),
4368                                                     ppa[i], prot & vpprot,
4369                                                     hat_flag, svd->rcookie);
4370                                         }
4371                                 }
4372                                 if (!(hat_flag & HAT_LOAD_LOCK)) {
4373                                         for (i = 0; i < pages; i++) {
4374                                                 page_unlock(ppa[i]);
4375                                         }
4376                                 }
4377                                 if (amp != NULL) {
4378                                         anon_array_exit(&an_cookie);
4379                                         ANON_LOCK_EXIT(&amp->a_rwlock);
4380                                 }
4381                                 goto next;
4382                         }
4383 
4384                         if (pszc == szc) {
4385                                 /*
4386                                  * segvn_full_szcpages() upgraded pages szc.
4387                                  */
4388                                 ASSERT(pszc == ppa[0]->p_szc);
4389                                 ASSERT(IS_P2ALIGNED(pfn, pages));
4390                                 goto chkszc;
4391                         }
4392 
4393                         if (pszc > szc) {
4394                                 kmutex_t *szcmtx;
4395                                 SEGVN_VMSTAT_FLTVNPAGES(35);
4396                                 /*
4397                                  * p_szc of ppa[0] can change since we haven't
4398                                  * locked all constituent pages. Call
4399                                  * page_lock_szc() to prevent szc changes.
4400                                  * This should be a rare case that happens when
4401                                  * multiple segments use a different page size
4402                                  * to map the same file offsets.
4403                                  */
4404                                 szcmtx = page_szc_lock(ppa[0]);
4405                                 pszc = ppa[0]->p_szc;
4406                                 ASSERT(szcmtx != NULL || pszc == 0);
4407                                 ASSERT(ppa[0]->p_szc <= pszc);
4408                                 if (pszc <= szc) {
4409                                         SEGVN_VMSTAT_FLTVNPAGES(36);
4410                                         if (szcmtx != NULL) {
4411                                                 mutex_exit(szcmtx);
4412                                         }
4413                                         goto chkszc;
4414                                 }
4415                                 if (pplist != NULL) {
4416                                         /*
4417                                          * page got promoted since last check.
4418                                          * we don't need preaalocated large
4419                                          * page.
4420                                          */
4421                                         SEGVN_VMSTAT_FLTVNPAGES(37);
4422                                         page_free_replacement_page(pplist);
4423                                         page_create_putback(pages);
4424                                 }
4425                                 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4426                                     prot, vpprot);
4427                                 hat_memload_array_region(hat, a, pgsz, ppa,
4428                                     prot & vpprot, hat_flag, svd->rcookie);
4429                                 mutex_exit(szcmtx);
4430                                 if (!(hat_flag & HAT_LOAD_LOCK)) {
4431                                         for (i = 0; i < pages; i++) {
4432                                                 page_unlock(ppa[i]);
4433                                         }
4434                                 }
4435                                 if (amp != NULL) {
4436                                         anon_array_exit(&an_cookie);
4437                                         ANON_LOCK_EXIT(&amp->a_rwlock);
4438                                 }
4439                                 goto next;
4440                         }
4441 
4442                         /*
4443                          * if page got demoted since last check
4444                          * we could have not allocated larger page.
4445                          * allocate now.
4446                          */
4447                         if (pplist == NULL &&
4448                             page_alloc_pages(vp, seg, a, &pplist, NULL,
4449                             szc, 0, 0) && type != F_SOFTLOCK) {
4450                                 SEGVN_VMSTAT_FLTVNPAGES(38);
4451                                 for (i = 0; i < pages; i++) {
4452                                         page_unlock(ppa[i]);
4453                                 }
4454                                 if (amp != NULL) {
4455                                         anon_array_exit(&an_cookie);
4456                                         ANON_LOCK_EXIT(&amp->a_rwlock);
4457                                 }
4458                                 ierr = -1;
4459                                 alloc_failed |= (1 << szc);
4460                                 break;
4461                         }
4462 
4463                         SEGVN_VMSTAT_FLTVNPAGES(39);
4464 
4465                         if (pplist != NULL) {
4466                                 segvn_relocate_pages(ppa, pplist);
4467 #ifdef DEBUG
4468                         } else {
4469                                 ASSERT(type == F_SOFTLOCK);
4470                                 SEGVN_VMSTAT_FLTVNPAGES(40);
4471 #endif /* DEBUG */
4472                         }
4473 
4474                         SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4475 
4476                         if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4477                                 ASSERT(type == F_SOFTLOCK);
4478                                 for (i = 0; i < pages; i++) {
4479                                         ASSERT(ppa[i]->p_szc < szc);
4480                                         hat_memload_region(hat,
4481                                             a + (i << PAGESHIFT),
4482                                             ppa[i], prot & vpprot, hat_flag,
4483                                             svd->rcookie);
4484                                 }
4485                         } else {
4486                                 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4487                                 hat_memload_array_region(hat, a, pgsz, ppa,
4488                                     prot & vpprot, hat_flag, svd->rcookie);
4489                         }
4490                         if (!(hat_flag & HAT_LOAD_LOCK)) {
4491                                 for (i = 0; i < pages; i++) {
4492                                         ASSERT(PAGE_SHARED(ppa[i]));
4493                                         page_unlock(ppa[i]);
4494                                 }
4495                         }
4496                         if (amp != NULL) {
4497                                 anon_array_exit(&an_cookie);
4498                                 ANON_LOCK_EXIT(&amp->a_rwlock);
4499                         }
4500 
4501                 next:
4502                         if (vpage != NULL) {
4503                                 vpage += pages;
4504                         }
4505                         adjszc_chk = 1;
4506                 }
4507                 if (a == lpgeaddr)
4508                         break;
4509                 ASSERT(a < lpgeaddr);
4510 
4511                 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4512 
4513                 /*
4514                  * ierr == -1 means we failed to map with a large page.
4515                  * (either due to allocation/relocation failures or
4516                  * misalignment with other mappings to this file.
4517                  *
4518                  * ierr == -2 means some other thread allocated a large page
4519                  * after we gave up tp map with a large page.  retry with
4520                  * larger mapping.
4521                  */
4522                 ASSERT(ierr == -1 || ierr == -2);
4523                 ASSERT(ierr == -2 || szc != 0);
4524                 ASSERT(ierr == -1 || szc < seg->s_szc);
4525                 if (ierr == -2) {
4526                         SEGVN_VMSTAT_FLTVNPAGES(41);
4527                         ASSERT(pszc > szc && pszc <= seg->s_szc);
4528                         szc = pszc;
4529                 } else if (segvn_anypgsz_vnode) {
4530                         SEGVN_VMSTAT_FLTVNPAGES(42);
4531                         szc--;
4532                 } else {
4533                         SEGVN_VMSTAT_FLTVNPAGES(43);
4534                         ASSERT(pszc < szc);
4535                         /*
4536                          * other process created pszc large page.
4537                          * but we still have to drop to 0 szc.
4538                          */
4539                         szc = 0;
4540                 }
4541 
4542                 pgsz = page_get_pagesize(szc);
4543                 pages = btop(pgsz);
4544                 if (ierr == -2) {
4545                         /*
4546                          * Size up case. Note lpgaddr may only be needed for
4547                          * softlock case so we don't adjust it here.
4548                          */
4549                         a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4550                         ASSERT(a >= lpgaddr);
4551                         lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4552                         off = svd->offset + (uintptr_t)(a - seg->s_base);
4553                         aindx = svd->anon_index + seg_page(seg, a);
4554                         vpage = (svd->vpage != NULL) ?
4555                             &svd->vpage[seg_page(seg, a)] : NULL;
4556                 } else {
4557                         /*
4558                          * Size down case. Note lpgaddr may only be needed for
4559                          * softlock case so we don't adjust it here.
4560                          */
4561                         ASSERT(IS_P2ALIGNED(a, pgsz));
4562                         ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4563                         lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4564                         ASSERT(a < lpgeaddr);
4565                         if (a < addr) {
4566                                 SEGVN_VMSTAT_FLTVNPAGES(44);
4567                                 /*
4568                                  * The beginning of the large page region can
4569                                  * be pulled to the right to make a smaller
4570                                  * region. We haven't yet faulted a single
4571                                  * page.
4572                                  */
4573                                 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4574                                 ASSERT(a >= lpgaddr);
4575                                 off = svd->offset +
4576                                     (uintptr_t)(a - seg->s_base);
4577                                 aindx = svd->anon_index + seg_page(seg, a);
4578                                 vpage = (svd->vpage != NULL) ?
4579                                     &svd->vpage[seg_page(seg, a)] : NULL;
4580                         }
4581                 }
4582         }
4583 out:
4584         kmem_free(ppa, ppasize);
4585         if (!err && !vop_size_err) {
4586                 SEGVN_VMSTAT_FLTVNPAGES(45);
4587                 return (0);
4588         }
4589         if (type == F_SOFTLOCK && a > lpgaddr) {
4590                 SEGVN_VMSTAT_FLTVNPAGES(46);
4591                 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4592         }
4593         if (!vop_size_err) {
4594                 SEGVN_VMSTAT_FLTVNPAGES(47);
4595                 return (err);
4596         }
4597         ASSERT(brkcow || tron || type == F_SOFTLOCK);
4598         /*
4599          * Large page end is mapped beyond the end of file and it's a cow
4600          * fault (can be a text replication induced cow) or softlock so we can't
4601          * reduce the map area.  For now just demote the segment. This should
4602          * really only happen if the end of the file changed after the mapping
4603          * was established since when large page segments are created we make
4604          * sure they don't extend beyond the end of the file.
4605          */
4606         SEGVN_VMSTAT_FLTVNPAGES(48);
4607 
4608         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4609         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4610         err = 0;
4611         if (seg->s_szc != 0) {
4612                 segvn_fltvnpages_clrszc_cnt++;
4613                 ASSERT(svd->softlockcnt == 0);
4614                 err = segvn_clrszc(seg);
4615                 if (err != 0) {
4616                         segvn_fltvnpages_clrszc_err++;
4617                 }
4618         }
4619         ASSERT(err || seg->s_szc == 0);
4620         SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4621         /* segvn_fault will do its job as if szc had been zero to begin with */
4622         return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4623 }
4624 
4625 /*
4626  * This routine will attempt to fault in one large page.
4627  * it will use smaller pages if that fails.
4628  * It should only be called for pure anonymous segments.
4629  */
4630 static faultcode_t
4631 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4632     caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4633     caddr_t eaddr, int brkcow)
4634 {
4635         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4636         struct anon_map *amp = svd->amp;
4637         uchar_t segtype = svd->type;
4638         uint_t szc = seg->s_szc;
4639         size_t pgsz = page_get_pagesize(szc);
4640         size_t maxpgsz = pgsz;
4641         pgcnt_t pages = btop(pgsz);
4642         uint_t ppaszc = szc;
4643         caddr_t a = lpgaddr;
4644         ulong_t aindx = svd->anon_index + seg_page(seg, a);
4645         struct vpage *vpage = (svd->vpage != NULL) ?
4646             &svd->vpage[seg_page(seg, a)] : NULL;
4647         page_t **ppa;
4648         uint_t  ppa_szc;
4649         faultcode_t err;
4650         int ierr;
4651         uint_t protchk, prot, vpprot;
4652         ulong_t i;
4653         int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4654         anon_sync_obj_t cookie;
4655         int adjszc_chk;
4656         int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4657 
4658         ASSERT(szc != 0);
4659         ASSERT(amp != NULL);
4660         ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4661         ASSERT(!(svd->flags & MAP_NORESERVE));
4662         ASSERT(type != F_SOFTUNLOCK);
4663         ASSERT(IS_P2ALIGNED(a, maxpgsz));
4664         ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4665         ASSERT(svd->tr_state != SEGVN_TR_INIT);
4666 
4667         ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4668 
4669         VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4670         VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4671 
4672         if (svd->flags & MAP_TEXT) {
4673                 hat_flag |= HAT_LOAD_TEXT;
4674         }
4675 
4676         if (svd->pageprot) {
4677                 switch (rw) {
4678                 case S_READ:
4679                         protchk = PROT_READ;
4680                         break;
4681                 case S_WRITE:
4682                         protchk = PROT_WRITE;
4683                         break;
4684                 case S_EXEC:
4685                         protchk = PROT_EXEC;
4686                         break;
4687                 case S_OTHER:
4688                 default:
4689                         protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4690                         break;
4691                 }
4692                 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4693         } else {
4694                 prot = svd->prot;
4695                 /* caller has already done segment level protection check. */
4696         }
4697 
4698         ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4699         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
4700         for (;;) {
4701                 adjszc_chk = 0;
4702                 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4703                         if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4704                                 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4705                                 ASSERT(vpage != NULL);
4706                                 prot = VPP_PROT(vpage);
4707                                 ASSERT(sameprot(seg, a, maxpgsz));
4708                                 if ((prot & protchk) == 0) {
4709                                         err = FC_PROT;
4710                                         goto error;
4711                                 }
4712                         }
4713                         if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4714                             pgsz < maxpgsz) {
4715                                 ASSERT(a > lpgaddr);
4716                                 szc = seg->s_szc;
4717                                 pgsz = maxpgsz;
4718                                 pages = btop(pgsz);
4719                                 ASSERT(IS_P2ALIGNED(aindx, pages));
4720                                 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4721                                     pgsz);
4722                         }
4723                         if (type == F_SOFTLOCK) {
4724                                 atomic_add_long((ulong_t *)&svd->softlockcnt,
4725                                     pages);
4726                         }
4727                         anon_array_enter(amp, aindx, &cookie);
4728                         ppa_szc = (uint_t)-1;
4729                         ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4730                             prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4731                             segvn_anypgsz, pgflags, svd->cred);
4732                         if (ierr != 0) {
4733                                 anon_array_exit(&cookie);
4734                                 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4735                                 if (type == F_SOFTLOCK) {
4736                                         atomic_add_long(
4737                                             (ulong_t *)&svd->softlockcnt,
4738                                             -pages);
4739                                 }
4740                                 if (ierr > 0) {
4741                                         VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4742                                         err = FC_MAKE_ERR(ierr);
4743                                         goto error;
4744                                 }
4745                                 break;
4746                         }
4747 
4748                         ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4749 
4750                         ASSERT(segtype == MAP_SHARED ||
4751                             ppa[0]->p_szc <= szc);
4752                         ASSERT(segtype == MAP_PRIVATE ||
4753                             ppa[0]->p_szc >= szc);
4754 
4755                         /*
4756                          * Handle pages that have been marked for migration
4757                          */
4758                         if (lgrp_optimizations())
4759                                 page_migrate(seg, a, ppa, pages);
4760 
4761                         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4762 
4763                         if (segtype == MAP_SHARED) {
4764                                 vpprot |= PROT_WRITE;
4765                         }
4766 
4767                         hat_memload_array(hat, a, pgsz, ppa,
4768                             prot & vpprot, hat_flag);
4769 
4770                         if (hat_flag & HAT_LOAD_LOCK) {
4771                                 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4772                         } else {
4773                                 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4774                                 for (i = 0; i < pages; i++)
4775                                         page_unlock(ppa[i]);
4776                         }
4777                         if (vpage != NULL)
4778                                 vpage += pages;
4779 
4780                         anon_array_exit(&cookie);
4781                         adjszc_chk = 1;
4782                 }
4783                 if (a == lpgeaddr)
4784                         break;
4785                 ASSERT(a < lpgeaddr);
4786                 /*
4787                  * ierr == -1 means we failed to allocate a large page.
4788                  * so do a size down operation.
4789                  *
4790                  * ierr == -2 means some other process that privately shares
4791                  * pages with this process has allocated a larger page and we
4792                  * need to retry with larger pages. So do a size up
4793                  * operation. This relies on the fact that large pages are
4794                  * never partially shared i.e. if we share any constituent
4795                  * page of a large page with another process we must share the
4796                  * entire large page. Note this cannot happen for SOFTLOCK
4797                  * case, unless current address (a) is at the beginning of the
4798                  * next page size boundary because the other process couldn't
4799                  * have relocated locked pages.
4800                  */
4801                 ASSERT(ierr == -1 || ierr == -2);
4802 
4803                 if (segvn_anypgsz) {
4804                         ASSERT(ierr == -2 || szc != 0);
4805                         ASSERT(ierr == -1 || szc < seg->s_szc);
4806                         szc = (ierr == -1) ? szc - 1 : szc + 1;
4807                 } else {
4808                         /*
4809                          * For non COW faults and segvn_anypgsz == 0
4810                          * we need to be careful not to loop forever
4811                          * if existing page is found with szc other
4812                          * than 0 or seg->s_szc. This could be due
4813                          * to page relocations on behalf of DR or
4814                          * more likely large page creation. For this
4815                          * case simply re-size to existing page's szc
4816                          * if returned by anon_map_getpages().
4817                          */
4818                         if (ppa_szc == (uint_t)-1) {
4819                                 szc = (ierr == -1) ? 0 : seg->s_szc;
4820                         } else {
4821                                 ASSERT(ppa_szc <= seg->s_szc);
4822                                 ASSERT(ierr == -2 || ppa_szc < szc);
4823                                 ASSERT(ierr == -1 || ppa_szc > szc);
4824                                 szc = ppa_szc;
4825                         }
4826                 }
4827 
4828                 pgsz = page_get_pagesize(szc);
4829                 pages = btop(pgsz);
4830                 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4831                     (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4832                 if (type == F_SOFTLOCK) {
4833                         /*
4834                          * For softlocks we cannot reduce the fault area
4835                          * (calculated based on the largest page size for this
4836                          * segment) for size down and a is already next
4837                          * page size aligned as assertted above for size
4838                          * ups. Therefore just continue in case of softlock.
4839                          */
4840                         VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4841                         continue; /* keep lint happy */
4842                 } else if (ierr == -2) {
4843 
4844                         /*
4845                          * Size up case. Note lpgaddr may only be needed for
4846                          * softlock case so we don't adjust it here.
4847                          */
4848                         VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4849                         a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4850                         ASSERT(a >= lpgaddr);
4851                         lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4852                         aindx = svd->anon_index + seg_page(seg, a);
4853                         vpage = (svd->vpage != NULL) ?
4854                             &svd->vpage[seg_page(seg, a)] : NULL;
4855                 } else {
4856                         /*
4857                          * Size down case. Note lpgaddr may only be needed for
4858                          * softlock case so we don't adjust it here.
4859                          */
4860                         VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4861                         ASSERT(IS_P2ALIGNED(a, pgsz));
4862                         ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4863                         lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4864                         ASSERT(a < lpgeaddr);
4865                         if (a < addr) {
4866                                 /*
4867                                  * The beginning of the large page region can
4868                                  * be pulled to the right to make a smaller
4869                                  * region. We haven't yet faulted a single
4870                                  * page.
4871                                  */
4872                                 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4873                                 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4874                                 ASSERT(a >= lpgaddr);
4875                                 aindx = svd->anon_index + seg_page(seg, a);
4876                                 vpage = (svd->vpage != NULL) ?
4877                                     &svd->vpage[seg_page(seg, a)] : NULL;
4878                         }
4879                 }
4880         }
4881         VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4882         ANON_LOCK_EXIT(&amp->a_rwlock);
4883         kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4884         return (0);
4885 error:
4886         VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4887         ANON_LOCK_EXIT(&amp->a_rwlock);
4888         kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4889         if (type == F_SOFTLOCK && a > lpgaddr) {
4890                 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4891                 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4892         }
4893         return (err);
4894 }
4895 
4896 int fltadvice = 1;      /* set to free behind pages for sequential access */
4897 
4898 /*
4899  * This routine is called via a machine specific fault handling routine.
4900  * It is also called by software routines wishing to lock or unlock
4901  * a range of addresses.
4902  *
4903  * Here is the basic algorithm:
4904  *      If unlocking
4905  *              Call segvn_softunlock
4906  *              Return
4907  *      endif
4908  *      Checking and set up work
4909  *      If we will need some non-anonymous pages
4910  *              Call VOP_GETPAGE over the range of non-anonymous pages
4911  *      endif
4912  *      Loop over all addresses requested
4913  *              Call segvn_faultpage passing in page list
4914  *                  to load up translations and handle anonymous pages
4915  *      endloop
4916  *      Load up translation to any additional pages in page list not
4917  *          already handled that fit into this segment
4918  */
4919 static faultcode_t
4920 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4921     enum fault_type type, enum seg_rw rw)
4922 {
4923         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4924         page_t **plp, **ppp, *pp;
4925         u_offset_t off;
4926         caddr_t a;
4927         struct vpage *vpage;
4928         uint_t vpprot, prot;
4929         int err;
4930         page_t *pl[PVN_GETPAGE_NUM + 1];
4931         size_t plsz, pl_alloc_sz;
4932         size_t page;
4933         ulong_t anon_index;
4934         struct anon_map *amp;
4935         int dogetpage = 0;
4936         caddr_t lpgaddr, lpgeaddr;
4937         size_t pgsz;
4938         anon_sync_obj_t cookie;
4939         int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4940 
4941         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
4942         ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4943 
4944         /*
4945          * First handle the easy stuff
4946          */
4947         if (type == F_SOFTUNLOCK) {
4948                 if (rw == S_READ_NOCOW) {
4949                         rw = S_READ;
4950                         ASSERT(AS_WRITE_HELD(seg->s_as));
4951                 }
4952                 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4953                 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4954                     page_get_pagesize(seg->s_szc);
4955                 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4956                 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4957                 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4958                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4959                 return (0);
4960         }
4961 
4962         ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4963             !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4964         if (brkcow == 0) {
4965                 if (svd->tr_state == SEGVN_TR_INIT) {
4966                         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4967                         if (svd->tr_state == SEGVN_TR_INIT) {
4968                                 ASSERT(svd->vp != NULL && svd->amp == NULL);
4969                                 ASSERT(svd->flags & MAP_TEXT);
4970                                 ASSERT(svd->type == MAP_PRIVATE);
4971                                 segvn_textrepl(seg);
4972                                 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4973                                 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4974                                     svd->amp != NULL);
4975                         }
4976                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4977                 }
4978         } else if (svd->tr_state != SEGVN_TR_OFF) {
4979                 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4980 
4981                 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4982                         ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4983                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4984                         return (FC_PROT);
4985                 }
4986 
4987                 if (svd->tr_state == SEGVN_TR_ON) {
4988                         ASSERT(svd->vp != NULL && svd->amp != NULL);
4989                         segvn_textunrepl(seg, 0);
4990                         ASSERT(svd->amp == NULL &&
4991                             svd->tr_state == SEGVN_TR_OFF);
4992                 } else if (svd->tr_state != SEGVN_TR_OFF) {
4993                         svd->tr_state = SEGVN_TR_OFF;
4994                 }
4995                 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4996                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4997         }
4998 
4999 top:
5000         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5001 
5002         /*
5003          * If we have the same protections for the entire segment,
5004          * insure that the access being attempted is legitimate.
5005          */
5006 
5007         if (svd->pageprot == 0) {
5008                 uint_t protchk;
5009 
5010                 switch (rw) {
5011                 case S_READ:
5012                 case S_READ_NOCOW:
5013                         protchk = PROT_READ;
5014                         break;
5015                 case S_WRITE:
5016                         protchk = PROT_WRITE;
5017                         break;
5018                 case S_EXEC:
5019                         protchk = PROT_EXEC;
5020                         break;
5021                 case S_OTHER:
5022                 default:
5023                         protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
5024                         break;
5025                 }
5026 
5027                 if ((svd->prot & protchk) == 0) {
5028                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5029                         return (FC_PROT);       /* illegal access type */
5030                 }
5031         }
5032 
5033         if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5034                 /* this must be SOFTLOCK S_READ fault */
5035                 ASSERT(svd->amp == NULL);
5036                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5037                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5038                 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5039                 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5040                         /*
5041                          * this must be the first ever non S_READ_NOCOW
5042                          * softlock for this segment.
5043                          */
5044                         ASSERT(svd->softlockcnt == 0);
5045                         hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5046                             HAT_REGION_TEXT);
5047                         svd->rcookie = HAT_INVALID_REGION_COOKIE;
5048                 }
5049                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5050                 goto top;
5051         }
5052 
5053         /*
5054          * We can't allow the long term use of softlocks for vmpss segments,
5055          * because in some file truncation cases we should be able to demote
5056          * the segment, which requires that there are no softlocks.  The
5057          * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5058          * segment is S_READ_NOCOW, where the caller holds the address space
5059          * locked as writer and calls softunlock before dropping the as lock.
5060          * S_READ_NOCOW is used by /proc to read memory from another user.
5061          *
5062          * Another deadlock between SOFTLOCK and file truncation can happen
5063          * because segvn_fault_vnodepages() calls the FS one pagesize at
5064          * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5065          * can cause a deadlock because the first set of page_t's remain
5066          * locked SE_SHARED.  To avoid this, we demote segments on a first
5067          * SOFTLOCK if they have a length greater than the segment's
5068          * page size.
5069          *
5070          * So for now, we only avoid demoting a segment on a SOFTLOCK when
5071          * the access type is S_READ_NOCOW and the fault length is less than
5072          * or equal to the segment's page size. While this is quite restrictive,
5073          * it should be the most common case of SOFTLOCK against a vmpss
5074          * segment.
5075          *
5076          * For S_READ_NOCOW, it's safe not to do a copy on write because the
5077          * caller makes sure no COW will be caused by another thread for a
5078          * softlocked page.
5079          */
5080         if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5081                 int demote = 0;
5082 
5083                 if (rw != S_READ_NOCOW) {
5084                         demote = 1;
5085                 }
5086                 if (!demote && len > PAGESIZE) {
5087                         pgsz = page_get_pagesize(seg->s_szc);
5088                         CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5089                             lpgeaddr);
5090                         if (lpgeaddr - lpgaddr > pgsz) {
5091                                 demote = 1;
5092                         }
5093                 }
5094 
5095                 ASSERT(demote || AS_WRITE_HELD(seg->s_as));
5096 
5097                 if (demote) {
5098                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5099                         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5100                         if (seg->s_szc != 0) {
5101                                 segvn_vmpss_clrszc_cnt++;
5102                                 ASSERT(svd->softlockcnt == 0);
5103                                 err = segvn_clrszc(seg);
5104                                 if (err) {
5105                                         segvn_vmpss_clrszc_err++;
5106                                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5107                                         return (FC_MAKE_ERR(err));
5108                                 }
5109                         }
5110                         ASSERT(seg->s_szc == 0);
5111                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5112                         goto top;
5113                 }
5114         }
5115 
5116         /*
5117          * Check to see if we need to allocate an anon_map structure.
5118          */
5119         if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5120                 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5121                 /*
5122                  * Drop the "read" lock on the segment and acquire
5123                  * the "write" version since we have to allocate the
5124                  * anon_map.
5125                  */
5126                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5127                 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5128 
5129                 if (svd->amp == NULL) {
5130                         svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5131                         svd->amp->a_szc = seg->s_szc;
5132                 }
5133                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5134 
5135                 /*
5136                  * Start all over again since segment protections
5137                  * may have changed after we dropped the "read" lock.
5138                  */
5139                 goto top;
5140         }
5141 
5142         /*
5143          * S_READ_NOCOW vs S_READ distinction was
5144          * only needed for the code above. After
5145          * that we treat it as S_READ.
5146          */
5147         if (rw == S_READ_NOCOW) {
5148                 ASSERT(type == F_SOFTLOCK);
5149                 ASSERT(AS_WRITE_HELD(seg->s_as));
5150                 rw = S_READ;
5151         }
5152 
5153         amp = svd->amp;
5154 
5155         /*
5156          * MADV_SEQUENTIAL work is ignored for large page segments.
5157          */
5158         if (seg->s_szc != 0) {
5159                 pgsz = page_get_pagesize(seg->s_szc);
5160                 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5161                 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5162                 if (svd->vp == NULL) {
5163                         err = segvn_fault_anonpages(hat, seg, lpgaddr,
5164                             lpgeaddr, type, rw, addr, addr + len, brkcow);
5165                 } else {
5166                         err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5167                             lpgeaddr, type, rw, addr, addr + len, brkcow);
5168                         if (err == IE_RETRY) {
5169                                 ASSERT(seg->s_szc == 0);
5170                                 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5171                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5172                                 goto top;
5173                         }
5174                 }
5175                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5176                 return (err);
5177         }
5178 
5179         page = seg_page(seg, addr);
5180         if (amp != NULL) {
5181                 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5182                 anon_index = svd->anon_index + page;
5183 
5184                 if (type == F_PROT && rw == S_READ &&
5185                     svd->tr_state == SEGVN_TR_OFF &&
5186                     svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5187                         size_t index = anon_index;
5188                         struct anon *ap;
5189 
5190                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5191                         /*
5192                          * The fast path could apply to S_WRITE also, except
5193                          * that the protection fault could be caused by lazy
5194                          * tlb flush when ro->rw. In this case, the pte is
5195                          * RW already. But RO in the other cpu's tlb causes
5196                          * the fault. Since hat_chgprot won't do anything if
5197                          * pte doesn't change, we may end up faulting
5198                          * indefinitely until the RO tlb entry gets replaced.
5199                          */
5200                         for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5201                                 anon_array_enter(amp, index, &cookie);
5202                                 ap = anon_get_ptr(amp->ahp, index);
5203                                 anon_array_exit(&cookie);
5204                                 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5205                                         ANON_LOCK_EXIT(&amp->a_rwlock);
5206                                         goto slow;
5207                                 }
5208                         }
5209                         hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5210                         ANON_LOCK_EXIT(&amp->a_rwlock);
5211                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5212                         return (0);
5213                 }
5214         }
5215 slow:
5216 
5217         if (svd->vpage == NULL)
5218                 vpage = NULL;
5219         else
5220                 vpage = &svd->vpage[page];
5221 
5222         off = svd->offset + (uintptr_t)(addr - seg->s_base);
5223 
5224         /*
5225          * If MADV_SEQUENTIAL has been set for the particular page we
5226          * are faulting on, free behind all pages in the segment and put
5227          * them on the free list.
5228          */
5229 
5230         if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5231                 struct vpage *vpp;
5232                 ulong_t fanon_index;
5233                 size_t fpage;
5234                 u_offset_t pgoff, fpgoff;
5235                 struct vnode *fvp;
5236                 struct anon *fap = NULL;
5237 
5238                 if (svd->advice == MADV_SEQUENTIAL ||
5239                     (svd->pageadvice &&
5240                     VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5241                         pgoff = off - PAGESIZE;
5242                         fpage = page - 1;
5243                         if (vpage != NULL)
5244                                 vpp = &svd->vpage[fpage];
5245                         if (amp != NULL)
5246                                 fanon_index = svd->anon_index + fpage;
5247 
5248                         while (pgoff > svd->offset) {
5249                                 if (svd->advice != MADV_SEQUENTIAL &&
5250                                     (!svd->pageadvice || (vpage &&
5251                                     VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5252                                         break;
5253 
5254                                 /*
5255                                  * If this is an anon page, we must find the
5256                                  * correct <vp, offset> for it
5257                                  */
5258                                 fap = NULL;
5259                                 if (amp != NULL) {
5260                                         ANON_LOCK_ENTER(&amp->a_rwlock,
5261                                             RW_READER);
5262                                         anon_array_enter(amp, fanon_index,
5263                                             &cookie);
5264                                         fap = anon_get_ptr(amp->ahp,
5265                                             fanon_index);
5266                                         if (fap != NULL) {
5267                                                 swap_xlate(fap, &fvp, &fpgoff);
5268                                         } else {
5269                                                 fpgoff = pgoff;
5270                                                 fvp = svd->vp;
5271                                         }
5272                                         anon_array_exit(&cookie);
5273                                         ANON_LOCK_EXIT(&amp->a_rwlock);
5274                                 } else {
5275                                         fpgoff = pgoff;
5276                                         fvp = svd->vp;
5277                                 }
5278                                 if (fvp == NULL)
5279                                         break;  /* XXX */
5280                                 /*
5281                                  * Skip pages that are free or have an
5282                                  * "exclusive" lock.
5283                                  */
5284                                 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5285                                 if (pp == NULL)
5286                                         break;
5287                                 /*
5288                                  * We don't need the page_struct_lock to test
5289                                  * as this is only advisory; even if we
5290                                  * acquire it someone might race in and lock
5291                                  * the page after we unlock and before the
5292                                  * PUTPAGE, then VOP_PUTPAGE will do nothing.
5293                                  */
5294                                 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5295                                         /*
5296                                          * Hold the vnode before releasing
5297                                          * the page lock to prevent it from
5298                                          * being freed and re-used by some
5299                                          * other thread.
5300                                          */
5301                                         VN_HOLD(fvp);
5302                                         page_unlock(pp);
5303                                         /*
5304                                          * We should build a page list
5305                                          * to kluster putpages XXX
5306                                          */
5307                                         (void) VOP_PUTPAGE(fvp,
5308                                             (offset_t)fpgoff, PAGESIZE,
5309                                             (B_DONTNEED|B_FREE|B_ASYNC),
5310                                             svd->cred, NULL);
5311                                         VN_RELE(fvp);
5312                                 } else {
5313                                         /*
5314                                          * XXX - Should the loop terminate if
5315                                          * the page is `locked'?
5316                                          */
5317                                         page_unlock(pp);
5318                                 }
5319                                 --vpp;
5320                                 --fanon_index;
5321                                 pgoff -= PAGESIZE;
5322                         }
5323                 }
5324         }
5325 
5326         plp = pl;
5327         *plp = NULL;
5328         pl_alloc_sz = 0;
5329 
5330         /*
5331          * See if we need to call VOP_GETPAGE for
5332          * *any* of the range being faulted on.
5333          * We can skip all of this work if there
5334          * was no original vnode.
5335          */
5336         if (svd->vp != NULL) {
5337                 u_offset_t vp_off;
5338                 size_t vp_len;
5339                 struct anon *ap;
5340                 vnode_t *vp;
5341 
5342                 vp_off = off;
5343                 vp_len = len;
5344 
5345                 if (amp == NULL)
5346                         dogetpage = 1;
5347                 else {
5348                         /*
5349                          * Only acquire reader lock to prevent amp->ahp
5350                          * from being changed.  It's ok to miss pages,
5351                          * hence we don't do anon_array_enter
5352                          */
5353                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5354                         ap = anon_get_ptr(amp->ahp, anon_index);
5355 
5356                         if (len <= PAGESIZE)
5357                                 /* inline non_anon() */
5358                                 dogetpage = (ap == NULL);
5359                         else
5360                                 dogetpage = non_anon(amp->ahp, anon_index,
5361                                     &vp_off, &vp_len);
5362                         ANON_LOCK_EXIT(&amp->a_rwlock);
5363                 }
5364 
5365                 if (dogetpage) {
5366                         enum seg_rw arw;
5367                         struct as *as = seg->s_as;
5368 
5369                         if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5370                                 /*
5371                                  * Page list won't fit in local array,
5372                                  * allocate one of the needed size.
5373                                  */
5374                                 pl_alloc_sz =
5375                                     (btop(len) + 1) * sizeof (page_t *);
5376                                 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5377                                 plp[0] = NULL;
5378                                 plsz = len;
5379                         } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5380                             svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5381                             (((size_t)(addr + PAGESIZE) <
5382                             (size_t)(seg->s_base + seg->s_size)) &&
5383                             hat_probe(as->a_hat, addr + PAGESIZE))) {
5384                                 /*
5385                                  * Ask VOP_GETPAGE to return the exact number
5386                                  * of pages if
5387                                  * (a) this is a COW fault, or
5388                                  * (b) this is a software fault, or
5389                                  * (c) next page is already mapped.
5390                                  */
5391                                 plsz = len;
5392                         } else {
5393                                 /*
5394                                  * Ask VOP_GETPAGE to return adjacent pages
5395                                  * within the segment.
5396                                  */
5397                                 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5398                                     ((seg->s_base + seg->s_size) - addr));
5399                                 ASSERT((addr + plsz) <=
5400                                     (seg->s_base + seg->s_size));
5401                         }
5402 
5403                         /*
5404                          * Need to get some non-anonymous pages.
5405                          * We need to make only one call to GETPAGE to do
5406                          * this to prevent certain deadlocking conditions
5407                          * when we are doing locking.  In this case
5408                          * non_anon() should have picked up the smallest
5409                          * range which includes all the non-anonymous
5410                          * pages in the requested range.  We have to
5411                          * be careful regarding which rw flag to pass in
5412                          * because on a private mapping, the underlying
5413                          * object is never allowed to be written.
5414                          */
5415                         if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5416                                 arw = S_READ;
5417                         } else {
5418                                 arw = rw;
5419                         }
5420                         vp = svd->vp;
5421                         TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5422                             "segvn_getpage:seg %p addr %p vp %p",
5423                             seg, addr, vp);
5424                         err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5425                             &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5426                             svd->cred, NULL);
5427                         if (err) {
5428                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5429                                 segvn_pagelist_rele(plp);
5430                                 if (pl_alloc_sz)
5431                                         kmem_free(plp, pl_alloc_sz);
5432                                 return (FC_MAKE_ERR(err));
5433                         }
5434                         if (svd->type == MAP_PRIVATE)
5435                                 vpprot &= ~PROT_WRITE;
5436                 }
5437         }
5438 
5439         /*
5440          * N.B. at this time the plp array has all the needed non-anon
5441          * pages in addition to (possibly) having some adjacent pages.
5442          */
5443 
5444         /*
5445          * Always acquire the anon_array_lock to prevent
5446          * 2 threads from allocating separate anon slots for
5447          * the same "addr".
5448          *
5449          * If this is a copy-on-write fault and we don't already
5450          * have the anon_array_lock, acquire it to prevent the
5451          * fault routine from handling multiple copy-on-write faults
5452          * on the same "addr" in the same address space.
5453          *
5454          * Only one thread should deal with the fault since after
5455          * it is handled, the other threads can acquire a translation
5456          * to the newly created private page.  This prevents two or
5457          * more threads from creating different private pages for the
5458          * same fault.
5459          *
5460          * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5461          * to prevent deadlock between this thread and another thread
5462          * which has soft-locked this page and wants to acquire serial_lock.
5463          * ( bug 4026339 )
5464          *
5465          * The fix for bug 4026339 becomes unnecessary when using the
5466          * locking scheme with per amp rwlock and a global set of hash
5467          * lock, anon_array_lock.  If we steal a vnode page when low
5468          * on memory and upgrad the page lock through page_rename,
5469          * then the page is PAGE_HANDLED, nothing needs to be done
5470          * for this page after returning from segvn_faultpage.
5471          *
5472          * But really, the page lock should be downgraded after
5473          * the stolen page is page_rename'd.
5474          */
5475 
5476         if (amp != NULL)
5477                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5478 
5479         /*
5480          * Ok, now loop over the address range and handle faults
5481          */
5482         for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5483                 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5484                     type, rw, brkcow);
5485                 if (err) {
5486                         if (amp != NULL)
5487                                 ANON_LOCK_EXIT(&amp->a_rwlock);
5488                         if (type == F_SOFTLOCK && a > addr) {
5489                                 segvn_softunlock(seg, addr, (a - addr),
5490                                     S_OTHER);
5491                         }
5492                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5493                         segvn_pagelist_rele(plp);
5494                         if (pl_alloc_sz)
5495                                 kmem_free(plp, pl_alloc_sz);
5496                         return (err);
5497                 }
5498                 if (vpage) {
5499                         vpage++;
5500                 } else if (svd->vpage) {
5501                         page = seg_page(seg, addr);
5502                         vpage = &svd->vpage[++page];
5503                 }
5504         }
5505 
5506         /* Didn't get pages from the underlying fs so we're done */
5507         if (!dogetpage)
5508                 goto done;
5509 
5510         /*
5511          * Now handle any other pages in the list returned.
5512          * If the page can be used, load up the translations now.
5513          * Note that the for loop will only be entered if "plp"
5514          * is pointing to a non-NULL page pointer which means that
5515          * VOP_GETPAGE() was called and vpprot has been initialized.
5516          */
5517         if (svd->pageprot == 0)
5518                 prot = svd->prot & vpprot;
5519 
5520 
5521         /*
5522          * Large Files: diff should be unsigned value because we started
5523          * supporting > 2GB segment sizes from 2.5.1 and when a
5524          * large file of size > 2GB gets mapped to address space
5525          * the diff value can be > 2GB.
5526          */
5527 
5528         for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5529                 size_t diff;
5530                 struct anon *ap;
5531                 int anon_index;
5532                 anon_sync_obj_t cookie;
5533                 int hat_flag = HAT_LOAD_ADV;
5534 
5535                 if (svd->flags & MAP_TEXT) {
5536                         hat_flag |= HAT_LOAD_TEXT;
5537                 }
5538 
5539                 if (pp == PAGE_HANDLED)
5540                         continue;
5541 
5542                 if (svd->tr_state != SEGVN_TR_ON &&
5543                     pp->p_offset >=  svd->offset &&
5544                     pp->p_offset < svd->offset + seg->s_size) {
5545 
5546                         diff = pp->p_offset - svd->offset;
5547 
5548                         /*
5549                          * Large Files: Following is the assertion
5550                          * validating the above cast.
5551                          */
5552                         ASSERT(svd->vp == pp->p_vnode);
5553 
5554                         page = btop(diff);
5555                         if (svd->pageprot)
5556                                 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5557 
5558                         /*
5559                          * Prevent other threads in the address space from
5560                          * creating private pages (i.e., allocating anon slots)
5561                          * while we are in the process of loading translations
5562                          * to additional pages returned by the underlying
5563                          * object.
5564                          */
5565                         if (amp != NULL) {
5566                                 anon_index = svd->anon_index + page;
5567                                 anon_array_enter(amp, anon_index, &cookie);
5568                                 ap = anon_get_ptr(amp->ahp, anon_index);
5569                         }
5570                         if ((amp == NULL) || (ap == NULL)) {
5571                                 if (IS_VMODSORT(pp->p_vnode) ||
5572                                     enable_mbit_wa) {
5573                                         if (rw == S_WRITE)
5574                                                 hat_setmod(pp);
5575                                         else if (rw != S_OTHER &&
5576                                             !hat_ismod(pp))
5577                                                 prot &= ~PROT_WRITE;
5578                                 }
5579                                 /*
5580                                  * Skip mapping read ahead pages marked
5581                                  * for migration, so they will get migrated
5582                                  * properly on fault
5583                                  */
5584                                 ASSERT(amp == NULL ||
5585                                     svd->rcookie == HAT_INVALID_REGION_COOKIE);
5586                                 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5587                                         hat_memload_region(hat,
5588                                             seg->s_base + diff,
5589                                             pp, prot, hat_flag,
5590                                             svd->rcookie);
5591                                 }
5592                         }
5593                         if (amp != NULL)
5594                                 anon_array_exit(&cookie);
5595                 }
5596                 page_unlock(pp);
5597         }
5598 done:
5599         if (amp != NULL)
5600                 ANON_LOCK_EXIT(&amp->a_rwlock);
5601         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5602         if (pl_alloc_sz)
5603                 kmem_free(plp, pl_alloc_sz);
5604         return (0);
5605 }
5606 
5607 /*
5608  * This routine is used to start I/O on pages asynchronously.  XXX it will
5609  * only create PAGESIZE pages. At fault time they will be relocated into
5610  * larger pages.
5611  */
5612 static faultcode_t
5613 segvn_faulta(struct seg *seg, caddr_t addr)
5614 {
5615         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5616         int err;
5617         struct anon_map *amp;
5618         vnode_t *vp;
5619 
5620         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5621 
5622         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5623         if ((amp = svd->amp) != NULL) {
5624                 struct anon *ap;
5625 
5626                 /*
5627                  * Reader lock to prevent amp->ahp from being changed.
5628                  * This is advisory, it's ok to miss a page, so
5629                  * we don't do anon_array_enter lock.
5630                  */
5631                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5632                 if ((ap = anon_get_ptr(amp->ahp,
5633                     svd->anon_index + seg_page(seg, addr))) != NULL) {
5634 
5635                         err = anon_getpage(&ap, NULL, NULL,
5636                             0, seg, addr, S_READ, svd->cred);
5637 
5638                         ANON_LOCK_EXIT(&amp->a_rwlock);
5639                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5640                         if (err)
5641                                 return (FC_MAKE_ERR(err));
5642                         return (0);
5643                 }
5644                 ANON_LOCK_EXIT(&amp->a_rwlock);
5645         }
5646 
5647         if (svd->vp == NULL) {
5648                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5649                 return (0);                     /* zfod page - do nothing now */
5650         }
5651 
5652         vp = svd->vp;
5653         TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5654             "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5655         err = VOP_GETPAGE(vp,
5656             (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5657             PAGESIZE, NULL, NULL, 0, seg, addr,
5658             S_OTHER, svd->cred, NULL);
5659 
5660         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5661         if (err)
5662                 return (FC_MAKE_ERR(err));
5663         return (0);
5664 }
5665 
5666 static int
5667 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5668 {
5669         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5670         struct vpage *cvp, *svp, *evp;
5671         struct vnode *vp;
5672         size_t pgsz;
5673         pgcnt_t pgcnt;
5674         anon_sync_obj_t cookie;
5675         int unload_done = 0;
5676 
5677         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5678 
5679         if ((svd->maxprot & prot) != prot)
5680                 return (EACCES);                        /* violated maxprot */
5681 
5682         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5683 
5684         /* return if prot is the same */
5685         if (!svd->pageprot && svd->prot == prot) {
5686                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5687                 return (0);
5688         }
5689 
5690         /*
5691          * Since we change protections we first have to flush the cache.
5692          * This makes sure all the pagelock calls have to recheck
5693          * protections.
5694          */
5695         if (svd->softlockcnt > 0) {
5696                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5697 
5698                 /*
5699                  * If this is shared segment non 0 softlockcnt
5700                  * means locked pages are still in use.
5701                  */
5702                 if (svd->type == MAP_SHARED) {
5703                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5704                         return (EAGAIN);
5705                 }
5706 
5707                 /*
5708                  * Since we do have the segvn writers lock nobody can fill
5709                  * the cache with entries belonging to this seg during
5710                  * the purge. The flush either succeeds or we still have
5711                  * pending I/Os.
5712                  */
5713                 segvn_purge(seg);
5714                 if (svd->softlockcnt > 0) {
5715                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5716                         return (EAGAIN);
5717                 }
5718         }
5719 
5720         if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5721                 ASSERT(svd->amp == NULL);
5722                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5723                 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5724                     HAT_REGION_TEXT);
5725                 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5726                 unload_done = 1;
5727         } else if (svd->tr_state == SEGVN_TR_INIT) {
5728                 svd->tr_state = SEGVN_TR_OFF;
5729         } else if (svd->tr_state == SEGVN_TR_ON) {
5730                 ASSERT(svd->amp != NULL);
5731                 segvn_textunrepl(seg, 0);
5732                 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5733                 unload_done = 1;
5734         }
5735 
5736         if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5737             svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5738                 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5739                 segvn_inval_trcache(svd->vp);
5740         }
5741         if (seg->s_szc != 0) {
5742                 int err;
5743                 pgsz = page_get_pagesize(seg->s_szc);
5744                 pgcnt = pgsz >> PAGESHIFT;
5745                 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5746                 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5747                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5748                         ASSERT(seg->s_base != addr || seg->s_size != len);
5749                         /*
5750                          * If we are holding the as lock as a reader then
5751                          * we need to return IE_RETRY and let the as
5752                          * layer drop and re-acquire the lock as a writer.
5753                          */
5754                         if (AS_READ_HELD(seg->s_as))
5755                                 return (IE_RETRY);
5756                         VM_STAT_ADD(segvnvmstats.demoterange[1]);
5757                         if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5758                                 err = segvn_demote_range(seg, addr, len,
5759                                     SDR_END, 0);
5760                         } else {
5761                                 uint_t szcvec = map_pgszcvec(seg->s_base,
5762                                     pgsz, (uintptr_t)seg->s_base,
5763                                     (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5764                                 err = segvn_demote_range(seg, addr, len,
5765                                     SDR_END, szcvec);
5766                         }
5767                         if (err == 0)
5768                                 return (IE_RETRY);
5769                         if (err == ENOMEM)
5770                                 return (IE_NOMEM);
5771                         return (err);
5772                 }
5773         }
5774 
5775 
5776         /*
5777          * If it's a private mapping and we're making it writable then we
5778          * may have to reserve the additional swap space now. If we are
5779          * making writable only a part of the segment then we use its vpage
5780          * array to keep a record of the pages for which we have reserved
5781          * swap. In this case we set the pageswap field in the segment's
5782          * segvn structure to record this.
5783          *
5784          * If it's a private mapping to a file (i.e., vp != NULL) and we're
5785          * removing write permission on the entire segment and we haven't
5786          * modified any pages, we can release the swap space.
5787          */
5788         if (svd->type == MAP_PRIVATE) {
5789                 if (prot & PROT_WRITE) {
5790                         if (!(svd->flags & MAP_NORESERVE) &&
5791                             !(svd->swresv && svd->pageswap == 0)) {
5792                                 size_t sz = 0;
5793 
5794                                 /*
5795                                  * Start by determining how much swap
5796                                  * space is required.
5797                                  */
5798                                 if (addr == seg->s_base &&
5799                                     len == seg->s_size &&
5800                                     svd->pageswap == 0) {
5801                                         /* The whole segment */
5802                                         sz = seg->s_size;
5803                                 } else {
5804                                         /*
5805                                          * Make sure that the vpage array
5806                                          * exists, and make a note of the
5807                                          * range of elements corresponding
5808                                          * to len.
5809                                          */
5810                                         segvn_vpage(seg);
5811                                         if (svd->vpage == NULL) {
5812                                                 SEGVN_LOCK_EXIT(seg->s_as,
5813                                                     &svd->lock);
5814                                                 return (ENOMEM);
5815                                         }
5816                                         svp = &svd->vpage[seg_page(seg, addr)];
5817                                         evp = &svd->vpage[seg_page(seg,
5818                                             addr + len)];
5819 
5820                                         if (svd->pageswap == 0) {
5821                                                 /*
5822                                                  * This is the first time we've
5823                                                  * asked for a part of this
5824                                                  * segment, so we need to
5825                                                  * reserve everything we've
5826                                                  * been asked for.
5827                                                  */
5828                                                 sz = len;
5829                                         } else {
5830                                                 /*
5831                                                  * We have to count the number
5832                                                  * of pages required.
5833                                                  */
5834                                                 for (cvp = svp;  cvp < evp;
5835                                                     cvp++) {
5836                                                         if (!VPP_ISSWAPRES(cvp))
5837                                                                 sz++;
5838                                                 }
5839                                                 sz <<= PAGESHIFT;
5840                                         }
5841                                 }
5842 
5843                                 /* Try to reserve the necessary swap. */
5844                                 if (anon_resv_zone(sz,
5845                                     seg->s_as->a_proc->p_zone) == 0) {
5846                                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5847                                         return (IE_NOMEM);
5848                                 }
5849 
5850                                 /*
5851                                  * Make a note of how much swap space
5852                                  * we've reserved.
5853                                  */
5854                                 if (svd->pageswap == 0 && sz == seg->s_size) {
5855                                         svd->swresv = sz;
5856                                 } else {
5857                                         ASSERT(svd->vpage != NULL);
5858                                         svd->swresv += sz;
5859                                         svd->pageswap = 1;
5860                                         for (cvp = svp; cvp < evp; cvp++) {
5861                                                 if (!VPP_ISSWAPRES(cvp))
5862                                                         VPP_SETSWAPRES(cvp);
5863                                         }
5864                                 }
5865                         }
5866                 } else {
5867                         /*
5868                          * Swap space is released only if this segment
5869                          * does not map anonymous memory, since read faults
5870                          * on such segments still need an anon slot to read
5871                          * in the data.
5872                          */
5873                         if (svd->swresv != 0 && svd->vp != NULL &&
5874                             svd->amp == NULL && addr == seg->s_base &&
5875                             len == seg->s_size && svd->pageprot == 0) {
5876                                 ASSERT(svd->pageswap == 0);
5877                                 anon_unresv_zone(svd->swresv,
5878                                     seg->s_as->a_proc->p_zone);
5879                                 svd->swresv = 0;
5880                                 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5881                                     "anon proc:%p %lu %u", seg, 0, 0);
5882                         }
5883                 }
5884         }
5885 
5886         if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5887                 if (svd->prot == prot) {
5888                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5889                         return (0);                     /* all done */
5890                 }
5891                 svd->prot = (uchar_t)prot;
5892         } else if (svd->type == MAP_PRIVATE) {
5893                 struct anon *ap = NULL;
5894                 page_t *pp;
5895                 u_offset_t offset, off;
5896                 struct anon_map *amp;
5897                 ulong_t anon_idx = 0;
5898 
5899                 /*
5900                  * A vpage structure exists or else the change does not
5901                  * involve the entire segment.  Establish a vpage structure
5902                  * if none is there.  Then, for each page in the range,
5903                  * adjust its individual permissions.  Note that write-
5904                  * enabling a MAP_PRIVATE page can affect the claims for
5905                  * locked down memory.  Overcommitting memory terminates
5906                  * the operation.
5907                  */
5908                 segvn_vpage(seg);
5909                 if (svd->vpage == NULL) {
5910                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5911                         return (ENOMEM);
5912                 }
5913                 svd->pageprot = 1;
5914                 if ((amp = svd->amp) != NULL) {
5915                         anon_idx = svd->anon_index + seg_page(seg, addr);
5916                         ASSERT(seg->s_szc == 0 ||
5917                             IS_P2ALIGNED(anon_idx, pgcnt));
5918                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5919                 }
5920 
5921                 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5922                 evp = &svd->vpage[seg_page(seg, addr + len)];
5923 
5924                 /*
5925                  * See Statement at the beginning of segvn_lockop regarding
5926                  * the way cowcnts and lckcnts are handled.
5927                  */
5928                 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5929 
5930                         if (seg->s_szc != 0) {
5931                                 if (amp != NULL) {
5932                                         anon_array_enter(amp, anon_idx,
5933                                             &cookie);
5934                                 }
5935                                 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5936                                     !segvn_claim_pages(seg, svp, offset,
5937                                     anon_idx, prot)) {
5938                                         if (amp != NULL) {
5939                                                 anon_array_exit(&cookie);
5940                                         }
5941                                         break;
5942                                 }
5943                                 if (amp != NULL) {
5944                                         anon_array_exit(&cookie);
5945                                 }
5946                                 anon_idx++;
5947                         } else {
5948                                 if (amp != NULL) {
5949                                         anon_array_enter(amp, anon_idx,
5950                                             &cookie);
5951                                         ap = anon_get_ptr(amp->ahp, anon_idx++);
5952                                 }
5953 
5954                                 if (VPP_ISPPLOCK(svp) &&
5955                                     VPP_PROT(svp) != prot) {
5956 
5957                                         if (amp == NULL || ap == NULL) {
5958                                                 vp = svd->vp;
5959                                                 off = offset;
5960                                         } else
5961                                                 swap_xlate(ap, &vp, &off);
5962                                         if (amp != NULL)
5963                                                 anon_array_exit(&cookie);
5964 
5965                                         if ((pp = page_lookup(vp, off,
5966                                             SE_SHARED)) == NULL) {
5967                                                 panic("segvn_setprot: no page");
5968                                                 /*NOTREACHED*/
5969                                         }
5970                                         ASSERT(seg->s_szc == 0);
5971                                         if ((VPP_PROT(svp) ^ prot) &
5972                                             PROT_WRITE) {
5973                                                 if (prot & PROT_WRITE) {
5974                                                         if (!page_addclaim(
5975                                                             pp)) {
5976                                                                 page_unlock(pp);
5977                                                                 break;
5978                                                         }
5979                                                 } else {
5980                                                         if (!page_subclaim(
5981                                                             pp)) {
5982                                                                 page_unlock(pp);
5983                                                                 break;
5984                                                         }
5985                                                 }
5986                                         }
5987                                         page_unlock(pp);
5988                                 } else if (amp != NULL)
5989                                         anon_array_exit(&cookie);
5990                         }
5991                         VPP_SETPROT(svp, prot);
5992                         offset += PAGESIZE;
5993                 }
5994                 if (amp != NULL)
5995                         ANON_LOCK_EXIT(&amp->a_rwlock);
5996 
5997                 /*
5998                  * Did we terminate prematurely?  If so, simply unload
5999                  * the translations to the things we've updated so far.
6000                  */
6001                 if (svp != evp) {
6002                         if (unload_done) {
6003                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6004                                 return (IE_NOMEM);
6005                         }
6006                         len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6007                             PAGESIZE;
6008                         ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
6009                         if (len != 0)
6010                                 hat_unload(seg->s_as->a_hat, addr,
6011                                     len, HAT_UNLOAD);
6012                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6013                         return (IE_NOMEM);
6014                 }
6015         } else {
6016                 segvn_vpage(seg);
6017                 if (svd->vpage == NULL) {
6018                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6019                         return (ENOMEM);
6020                 }
6021                 svd->pageprot = 1;
6022                 evp = &svd->vpage[seg_page(seg, addr + len)];
6023                 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6024                         VPP_SETPROT(svp, prot);
6025                 }
6026         }
6027 
6028         if (unload_done) {
6029                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6030                 return (0);
6031         }
6032 
6033         if (((prot & PROT_WRITE) != 0 &&
6034             (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6035             (prot & ~PROT_USER) == PROT_NONE) {
6036                 /*
6037                  * Either private or shared data with write access (in
6038                  * which case we need to throw out all former translations
6039                  * so that we get the right translations set up on fault
6040                  * and we don't allow write access to any copy-on-write pages
6041                  * that might be around or to prevent write access to pages
6042                  * representing holes in a file), or we don't have permission
6043                  * to access the memory at all (in which case we have to
6044                  * unload any current translations that might exist).
6045                  */
6046                 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6047         } else {
6048                 /*
6049                  * A shared mapping or a private mapping in which write
6050                  * protection is going to be denied - just change all the
6051                  * protections over the range of addresses in question.
6052                  * segvn does not support any other attributes other
6053                  * than prot so we can use hat_chgattr.
6054                  */
6055                 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6056         }
6057 
6058         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6059 
6060         return (0);
6061 }
6062 
6063 /*
6064  * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6065  * to determine if the seg is capable of mapping the requested szc.
6066  */
6067 static int
6068 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6069 {
6070         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6071         struct segvn_data *nsvd;
6072         struct anon_map *amp = svd->amp;
6073         struct seg *nseg;
6074         caddr_t eaddr = addr + len, a;
6075         size_t pgsz = page_get_pagesize(szc);
6076         pgcnt_t pgcnt = page_get_pagecnt(szc);
6077         int err;
6078         u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6079 
6080         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
6081         ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6082 
6083         if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6084                 return (0);
6085         }
6086 
6087         /*
6088          * addr should always be pgsz aligned but eaddr may be misaligned if
6089          * it's at the end of the segment.
6090          *
6091          * XXX we should assert this condition since as_setpagesize() logic
6092          * guarantees it.
6093          */
6094         if (!IS_P2ALIGNED(addr, pgsz) ||
6095             (!IS_P2ALIGNED(eaddr, pgsz) &&
6096             eaddr != seg->s_base + seg->s_size)) {
6097 
6098                 segvn_setpgsz_align_err++;
6099                 return (EINVAL);
6100         }
6101 
6102         if (amp != NULL && svd->type == MAP_SHARED) {
6103                 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6104                 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6105 
6106                         segvn_setpgsz_anon_align_err++;
6107                         return (EINVAL);
6108                 }
6109         }
6110 
6111         if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6112             szc > segvn_maxpgszc) {
6113                 return (EINVAL);
6114         }
6115 
6116         /* paranoid check */
6117         if (svd->vp != NULL &&
6118             (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6119                 return (EINVAL);
6120         }
6121 
6122         if (seg->s_szc == 0 && svd->vp != NULL &&
6123             map_addr_vacalign_check(addr, off)) {
6124                 return (EINVAL);
6125         }
6126 
6127         /*
6128          * Check that protections are the same within new page
6129          * size boundaries.
6130          */
6131         if (svd->pageprot) {
6132                 for (a = addr; a < eaddr; a += pgsz) {
6133                         if ((a + pgsz) > eaddr) {
6134                                 if (!sameprot(seg, a, eaddr - a)) {
6135                                         return (EINVAL);
6136                                 }
6137                         } else {
6138                                 if (!sameprot(seg, a, pgsz)) {
6139                                         return (EINVAL);
6140                                 }
6141                         }
6142                 }
6143         }
6144 
6145         /*
6146          * Since we are changing page size we first have to flush
6147          * the cache. This makes sure all the pagelock calls have
6148          * to recheck protections.
6149          */
6150         if (svd->softlockcnt > 0) {
6151                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6152 
6153                 /*
6154                  * If this is shared segment non 0 softlockcnt
6155                  * means locked pages are still in use.
6156                  */
6157                 if (svd->type == MAP_SHARED) {
6158                         return (EAGAIN);
6159                 }
6160 
6161                 /*
6162                  * Since we do have the segvn writers lock nobody can fill
6163                  * the cache with entries belonging to this seg during
6164                  * the purge. The flush either succeeds or we still have
6165                  * pending I/Os.
6166                  */
6167                 segvn_purge(seg);
6168                 if (svd->softlockcnt > 0) {
6169                         return (EAGAIN);
6170                 }
6171         }
6172 
6173         if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6174                 ASSERT(svd->amp == NULL);
6175                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6176                 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6177                     HAT_REGION_TEXT);
6178                 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6179         } else if (svd->tr_state == SEGVN_TR_INIT) {
6180                 svd->tr_state = SEGVN_TR_OFF;
6181         } else if (svd->tr_state == SEGVN_TR_ON) {
6182                 ASSERT(svd->amp != NULL);
6183                 segvn_textunrepl(seg, 1);
6184                 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6185                 amp = NULL;
6186         }
6187 
6188         /*
6189          * Operation for sub range of existing segment.
6190          */
6191         if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6192                 if (szc < seg->s_szc) {
6193                         VM_STAT_ADD(segvnvmstats.demoterange[2]);
6194                         err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6195                         if (err == 0) {
6196                                 return (IE_RETRY);
6197                         }
6198                         if (err == ENOMEM) {
6199                                 return (IE_NOMEM);
6200                         }
6201                         return (err);
6202                 }
6203                 if (addr != seg->s_base) {
6204                         nseg = segvn_split_seg(seg, addr);
6205                         if (eaddr != (nseg->s_base + nseg->s_size)) {
6206                                 /* eaddr is szc aligned */
6207                                 (void) segvn_split_seg(nseg, eaddr);
6208                         }
6209                         return (IE_RETRY);
6210                 }
6211                 if (eaddr != (seg->s_base + seg->s_size)) {
6212                         /* eaddr is szc aligned */
6213                         (void) segvn_split_seg(seg, eaddr);
6214                 }
6215                 return (IE_RETRY);
6216         }
6217 
6218         /*
6219          * Break any low level sharing and reset seg->s_szc to 0.
6220          */
6221         if ((err = segvn_clrszc(seg)) != 0) {
6222                 if (err == ENOMEM) {
6223                         err = IE_NOMEM;
6224                 }
6225                 return (err);
6226         }
6227         ASSERT(seg->s_szc == 0);
6228 
6229         /*
6230          * If the end of the current segment is not pgsz aligned
6231          * then attempt to concatenate with the next segment.
6232          */
6233         if (!IS_P2ALIGNED(eaddr, pgsz)) {
6234                 nseg = AS_SEGNEXT(seg->s_as, seg);
6235                 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6236                         return (ENOMEM);
6237                 }
6238                 if (nseg->s_ops != &segvn_ops) {
6239                         return (EINVAL);
6240                 }
6241                 nsvd = (struct segvn_data *)nseg->s_data;
6242                 if (nsvd->softlockcnt > 0) {
6243                         /*
6244                          * If this is shared segment non 0 softlockcnt
6245                          * means locked pages are still in use.
6246                          */
6247                         if (nsvd->type == MAP_SHARED) {
6248                                 return (EAGAIN);
6249                         }
6250                         segvn_purge(nseg);
6251                         if (nsvd->softlockcnt > 0) {
6252                                 return (EAGAIN);
6253                         }
6254                 }
6255                 err = segvn_clrszc(nseg);
6256                 if (err == ENOMEM) {
6257                         err = IE_NOMEM;
6258                 }
6259                 if (err != 0) {
6260                         return (err);
6261                 }
6262                 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6263                 err = segvn_concat(seg, nseg, 1);
6264                 if (err == -1) {
6265                         return (EINVAL);
6266                 }
6267                 if (err == -2) {
6268                         return (IE_NOMEM);
6269                 }
6270                 return (IE_RETRY);
6271         }
6272 
6273         /*
6274          * May need to re-align anon array to
6275          * new szc.
6276          */
6277         if (amp != NULL) {
6278                 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6279                         struct anon_hdr *nahp;
6280 
6281                         ASSERT(svd->type == MAP_PRIVATE);
6282 
6283                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
6284                         ASSERT(amp->refcnt == 1);
6285                         nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6286                         if (nahp == NULL) {
6287                                 ANON_LOCK_EXIT(&amp->a_rwlock);
6288                                 return (IE_NOMEM);
6289                         }
6290                         if (anon_copy_ptr(amp->ahp, svd->anon_index,
6291                             nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6292                                 anon_release(nahp, btop(amp->size));
6293                                 ANON_LOCK_EXIT(&amp->a_rwlock);
6294                                 return (IE_NOMEM);
6295                         }
6296                         anon_release(amp->ahp, btop(amp->size));
6297                         amp->ahp = nahp;
6298                         svd->anon_index = 0;
6299                         ANON_LOCK_EXIT(&amp->a_rwlock);
6300                 }
6301         }
6302         if (svd->vp != NULL && szc != 0) {
6303                 struct vattr va;
6304                 u_offset_t eoffpage = svd->offset;
6305                 va.va_mask = AT_SIZE;
6306                 eoffpage += seg->s_size;
6307                 eoffpage = btopr(eoffpage);
6308                 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6309                         segvn_setpgsz_getattr_err++;
6310                         return (EINVAL);
6311                 }
6312                 if (btopr(va.va_size) < eoffpage) {
6313                         segvn_setpgsz_eof_err++;
6314                         return (EINVAL);
6315                 }
6316                 if (amp != NULL) {
6317                         /*
6318                          * anon_fill_cow_holes() may call VOP_GETPAGE().
6319                          * don't take anon map lock here to avoid holding it
6320                          * across VOP_GETPAGE() calls that may call back into
6321                          * segvn for klsutering checks. We don't really need
6322                          * anon map lock here since it's a private segment and
6323                          * we hold as level lock as writers.
6324                          */
6325                         if ((err = anon_fill_cow_holes(seg, seg->s_base,
6326                             amp->ahp, svd->anon_index, svd->vp, svd->offset,
6327                             seg->s_size, szc, svd->prot, svd->vpage,
6328                             svd->cred)) != 0) {
6329                                 return (EINVAL);
6330                         }
6331                 }
6332                 segvn_setvnode_mpss(svd->vp);
6333         }
6334 
6335         if (amp != NULL) {
6336                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
6337                 if (svd->type == MAP_PRIVATE) {
6338                         amp->a_szc = szc;
6339                 } else if (szc > amp->a_szc) {
6340                         amp->a_szc = szc;
6341                 }
6342                 ANON_LOCK_EXIT(&amp->a_rwlock);
6343         }
6344 
6345         seg->s_szc = szc;
6346 
6347         return (0);
6348 }
6349 
6350 static int
6351 segvn_clrszc(struct seg *seg)
6352 {
6353         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6354         struct anon_map *amp = svd->amp;
6355         size_t pgsz;
6356         pgcnt_t pages;
6357         int err = 0;
6358         caddr_t a = seg->s_base;
6359         caddr_t ea = a + seg->s_size;
6360         ulong_t an_idx = svd->anon_index;
6361         vnode_t *vp = svd->vp;
6362         struct vpage *vpage = svd->vpage;
6363         page_t *anon_pl[1 + 1], *pp;
6364         struct anon *ap, *oldap;
6365         uint_t prot = svd->prot, vpprot;
6366         int pageflag = 0;
6367 
6368         ASSERT(AS_WRITE_HELD(seg->s_as) ||
6369             SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6370         ASSERT(svd->softlockcnt == 0);
6371 
6372         if (vp == NULL && amp == NULL) {
6373                 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6374                 seg->s_szc = 0;
6375                 return (0);
6376         }
6377 
6378         if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6379                 ASSERT(svd->amp == NULL);
6380                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6381                 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6382                     HAT_REGION_TEXT);
6383                 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6384         } else if (svd->tr_state == SEGVN_TR_ON) {
6385                 ASSERT(svd->amp != NULL);
6386                 segvn_textunrepl(seg, 1);
6387                 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6388                 amp = NULL;
6389         } else {
6390                 if (svd->tr_state != SEGVN_TR_OFF) {
6391                         ASSERT(svd->tr_state == SEGVN_TR_INIT);
6392                         svd->tr_state = SEGVN_TR_OFF;
6393                 }
6394 
6395                 /*
6396                  * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6397                  * unload argument is 0 when we are freeing the segment
6398                  * and unload was already done.
6399                  */
6400                 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6401                     HAT_UNLOAD_UNMAP);
6402         }
6403 
6404         if (amp == NULL || svd->type == MAP_SHARED) {
6405                 seg->s_szc = 0;
6406                 return (0);
6407         }
6408 
6409         pgsz = page_get_pagesize(seg->s_szc);
6410         pages = btop(pgsz);
6411 
6412         /*
6413          * XXX anon rwlock is not really needed because this is a
6414          * private segment and we are writers.
6415          */
6416         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
6417 
6418         for (; a < ea; a += pgsz, an_idx += pages) {
6419                 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6420                         ASSERT(vpage != NULL || svd->pageprot == 0);
6421                         if (vpage != NULL) {
6422                                 ASSERT(sameprot(seg, a, pgsz));
6423                                 prot = VPP_PROT(vpage);
6424                                 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6425                         }
6426                         if (seg->s_szc != 0) {
6427                                 ASSERT(vp == NULL || anon_pages(amp->ahp,
6428                                     an_idx, pages) == pages);
6429                                 if ((err = anon_map_demotepages(amp, an_idx,
6430                                     seg, a, prot, vpage, svd->cred)) != 0) {
6431                                         goto out;
6432                                 }
6433                         } else {
6434                                 if (oldap->an_refcnt == 1) {
6435                                         continue;
6436                                 }
6437                                 if ((err = anon_getpage(&oldap, &vpprot,
6438                                     anon_pl, PAGESIZE, seg, a, S_READ,
6439                                     svd->cred))) {
6440                                         goto out;
6441                                 }
6442                                 if ((pp = anon_private(&ap, seg, a, prot,
6443                                     anon_pl[0], pageflag, svd->cred)) == NULL) {
6444                                         err = ENOMEM;
6445                                         goto out;
6446                                 }
6447                                 anon_decref(oldap);
6448                                 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6449                                     ANON_SLEEP);
6450                                 page_unlock(pp);
6451                         }
6452                 }
6453                 vpage = (vpage == NULL) ? NULL : vpage + pages;
6454         }
6455 
6456         amp->a_szc = 0;
6457         seg->s_szc = 0;
6458 out:
6459         ANON_LOCK_EXIT(&amp->a_rwlock);
6460         return (err);
6461 }
6462 
6463 static int
6464 segvn_claim_pages(
6465         struct seg *seg,
6466         struct vpage *svp,
6467         u_offset_t off,
6468         ulong_t anon_idx,
6469         uint_t prot)
6470 {
6471         pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6472         size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6473         page_t  **ppa;
6474         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6475         struct anon_map *amp = svd->amp;
6476         struct vpage *evp = svp + pgcnt;
6477         caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6478             + seg->s_base;
6479         struct anon *ap;
6480         struct vnode *vp = svd->vp;
6481         page_t *pp;
6482         pgcnt_t pg_idx, i;
6483         int err = 0;
6484         anoff_t aoff;
6485         int anon = (amp != NULL) ? 1 : 0;
6486 
6487         ASSERT(svd->type == MAP_PRIVATE);
6488         ASSERT(svd->vpage != NULL);
6489         ASSERT(seg->s_szc != 0);
6490         ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6491         ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6492         ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6493 
6494         if (VPP_PROT(svp) == prot)
6495                 return (1);
6496         if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6497                 return (1);
6498 
6499         ppa = kmem_alloc(ppasize, KM_SLEEP);
6500         if (anon && vp != NULL) {
6501                 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6502                         anon = 0;
6503                         ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6504                 }
6505                 ASSERT(!anon ||
6506                     anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6507         }
6508 
6509         for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6510                 if (!VPP_ISPPLOCK(svp))
6511                         continue;
6512                 if (anon) {
6513                         ap = anon_get_ptr(amp->ahp, anon_idx);
6514                         if (ap == NULL) {
6515                                 panic("segvn_claim_pages: no anon slot");
6516                         }
6517                         swap_xlate(ap, &vp, &aoff);
6518                         off = (u_offset_t)aoff;
6519                 }
6520                 ASSERT(vp != NULL);
6521                 if ((pp = page_lookup(vp,
6522                     (u_offset_t)off, SE_SHARED)) == NULL) {
6523                         panic("segvn_claim_pages: no page");
6524                 }
6525                 ppa[pg_idx++] = pp;
6526                 off += PAGESIZE;
6527         }
6528 
6529         if (ppa[0] == NULL) {
6530                 kmem_free(ppa, ppasize);
6531                 return (1);
6532         }
6533 
6534         ASSERT(pg_idx <= pgcnt);
6535         ppa[pg_idx] = NULL;
6536 
6537 
6538         /* Find each large page within ppa, and adjust its claim */
6539 
6540         /* Does ppa cover a single large page? */
6541         if (ppa[0]->p_szc == seg->s_szc) {
6542                 if (prot & PROT_WRITE)
6543                         err = page_addclaim_pages(ppa);
6544                 else
6545                         err = page_subclaim_pages(ppa);
6546         } else {
6547                 for (i = 0; ppa[i]; i += pgcnt) {
6548                         ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6549                         if (prot & PROT_WRITE)
6550                                 err = page_addclaim_pages(&ppa[i]);
6551                         else
6552                                 err = page_subclaim_pages(&ppa[i]);
6553                         if (err == 0)
6554                                 break;
6555                 }
6556         }
6557 
6558         for (i = 0; i < pg_idx; i++) {
6559                 ASSERT(ppa[i] != NULL);
6560                 page_unlock(ppa[i]);
6561         }
6562 
6563         kmem_free(ppa, ppasize);
6564         return (err);
6565 }
6566 
6567 /*
6568  * Returns right (upper address) segment if split occurred.
6569  * If the address is equal to the beginning or end of its segment it returns
6570  * the current segment.
6571  */
6572 static struct seg *
6573 segvn_split_seg(struct seg *seg, caddr_t addr)
6574 {
6575         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6576         struct seg *nseg;
6577         size_t nsize;
6578         struct segvn_data *nsvd;
6579 
6580         ASSERT(AS_WRITE_HELD(seg->s_as));
6581         ASSERT(svd->tr_state == SEGVN_TR_OFF);
6582 
6583         ASSERT(addr >= seg->s_base);
6584         ASSERT(addr <= seg->s_base + seg->s_size);
6585         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6586 
6587         if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6588                 return (seg);
6589 
6590         nsize = seg->s_base + seg->s_size - addr;
6591         seg->s_size = addr - seg->s_base;
6592         nseg = seg_alloc(seg->s_as, addr, nsize);
6593         ASSERT(nseg != NULL);
6594         nseg->s_ops = seg->s_ops;
6595         nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6596         nseg->s_data = (void *)nsvd;
6597         nseg->s_szc = seg->s_szc;
6598         *nsvd = *svd;
6599         ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6600         nsvd->seg = nseg;
6601         rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6602 
6603         if (nsvd->vp != NULL) {
6604                 VN_HOLD(nsvd->vp);
6605                 nsvd->offset = svd->offset +
6606                     (uintptr_t)(nseg->s_base - seg->s_base);
6607                 if (nsvd->type == MAP_SHARED)
6608                         lgrp_shm_policy_init(NULL, nsvd->vp);
6609         } else {
6610                 /*
6611                  * The offset for an anonymous segment has no signifigance in
6612                  * terms of an offset into a file. If we were to use the above
6613                  * calculation instead, the structures read out of
6614                  * /proc/<pid>/xmap would be more difficult to decipher since
6615                  * it would be unclear whether two seemingly contiguous
6616                  * prxmap_t structures represented different segments or a
6617                  * single segment that had been split up into multiple prxmap_t
6618                  * structures (e.g. if some part of the segment had not yet
6619                  * been faulted in).
6620                  */
6621                 nsvd->offset = 0;
6622         }
6623 
6624         ASSERT(svd->softlockcnt == 0);
6625         ASSERT(svd->softlockcnt_sbase == 0);
6626         ASSERT(svd->softlockcnt_send == 0);
6627         crhold(svd->cred);
6628 
6629         if (svd->vpage != NULL) {
6630                 size_t bytes = vpgtob(seg_pages(seg));
6631                 size_t nbytes = vpgtob(seg_pages(nseg));
6632                 struct vpage *ovpage = svd->vpage;
6633 
6634                 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6635                 bcopy(ovpage, svd->vpage, bytes);
6636                 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6637                 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6638                 kmem_free(ovpage, bytes + nbytes);
6639         }
6640         if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6641                 struct anon_map *oamp = svd->amp, *namp;
6642                 struct anon_hdr *nahp;
6643 
6644                 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6645                 ASSERT(oamp->refcnt == 1);
6646                 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6647                 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6648                     nahp, 0, btop(seg->s_size), ANON_SLEEP);
6649 
6650                 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6651                 namp->a_szc = nseg->s_szc;
6652                 (void) anon_copy_ptr(oamp->ahp,
6653                     svd->anon_index + btop(seg->s_size),
6654                     namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6655                 anon_release(oamp->ahp, btop(oamp->size));
6656                 oamp->ahp = nahp;
6657                 oamp->size = seg->s_size;
6658                 svd->anon_index = 0;
6659                 nsvd->amp = namp;
6660                 nsvd->anon_index = 0;
6661                 ANON_LOCK_EXIT(&oamp->a_rwlock);
6662         } else if (svd->amp != NULL) {
6663                 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6664                 ASSERT(svd->amp == nsvd->amp);
6665                 ASSERT(seg->s_szc <= svd->amp->a_szc);
6666                 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6667                 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6668                 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6669                 svd->amp->refcnt++;
6670                 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6671         }
6672 
6673         /*
6674          * Split the amount of swap reserved.
6675          */
6676         if (svd->swresv) {
6677                 /*
6678                  * For MAP_NORESERVE, only allocate swap reserve for pages
6679                  * being used.  Other segments get enough to cover whole
6680                  * segment.
6681                  */
6682                 if (svd->flags & MAP_NORESERVE) {
6683                         size_t  oswresv;
6684 
6685                         ASSERT(svd->amp);
6686                         oswresv = svd->swresv;
6687                         svd->swresv = ptob(anon_pages(svd->amp->ahp,
6688                             svd->anon_index, btop(seg->s_size)));
6689                         nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6690                             nsvd->anon_index, btop(nseg->s_size)));
6691                         ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6692                 } else {
6693                         if (svd->pageswap) {
6694                                 svd->swresv = segvn_count_swap_by_vpages(seg);
6695                                 ASSERT(nsvd->swresv >= svd->swresv);
6696                                 nsvd->swresv -= svd->swresv;
6697                         } else {
6698                                 ASSERT(svd->swresv == seg->s_size +
6699                                     nseg->s_size);
6700                                 svd->swresv = seg->s_size;
6701                                 nsvd->swresv = nseg->s_size;
6702                         }
6703                 }
6704         }
6705 
6706         return (nseg);
6707 }
6708 
6709 /*
6710  * called on memory operations (unmap, setprot, setpagesize) for a subset
6711  * of a large page segment to either demote the memory range (SDR_RANGE)
6712  * or the ends (SDR_END) by addr/len.
6713  *
6714  * returns 0 on success. returns errno, including ENOMEM, on failure.
6715  */
6716 static int
6717 segvn_demote_range(
6718         struct seg *seg,
6719         caddr_t addr,
6720         size_t len,
6721         int flag,
6722         uint_t szcvec)
6723 {
6724         caddr_t eaddr = addr + len;
6725         caddr_t lpgaddr, lpgeaddr;
6726         struct seg *nseg;
6727         struct seg *badseg1 = NULL;
6728         struct seg *badseg2 = NULL;
6729         size_t pgsz;
6730         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6731         int err;
6732         uint_t szc = seg->s_szc;
6733         uint_t tszcvec;
6734 
6735         ASSERT(AS_WRITE_HELD(seg->s_as));
6736         ASSERT(svd->tr_state == SEGVN_TR_OFF);
6737         ASSERT(szc != 0);
6738         pgsz = page_get_pagesize(szc);
6739         ASSERT(seg->s_base != addr || seg->s_size != len);
6740         ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6741         ASSERT(svd->softlockcnt == 0);
6742         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6743         ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6744 
6745         CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6746         ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6747         if (flag == SDR_RANGE) {
6748                 /* demote entire range */
6749                 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6750                 (void) segvn_split_seg(nseg, lpgeaddr);
6751                 ASSERT(badseg1->s_base == lpgaddr);
6752                 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6753         } else if (addr != lpgaddr) {
6754                 ASSERT(flag == SDR_END);
6755                 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6756                 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6757                     eaddr < lpgaddr + 2 * pgsz) {
6758                         (void) segvn_split_seg(nseg, lpgeaddr);
6759                         ASSERT(badseg1->s_base == lpgaddr);
6760                         ASSERT(badseg1->s_size == 2 * pgsz);
6761                 } else {
6762                         nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6763                         ASSERT(badseg1->s_base == lpgaddr);
6764                         ASSERT(badseg1->s_size == pgsz);
6765                         if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6766                                 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6767                                 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6768                                 badseg2 = nseg;
6769                                 (void) segvn_split_seg(nseg, lpgeaddr);
6770                                 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6771                                 ASSERT(badseg2->s_size == pgsz);
6772                         }
6773                 }
6774         } else {
6775                 ASSERT(flag == SDR_END);
6776                 ASSERT(eaddr < lpgeaddr);
6777                 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6778                 (void) segvn_split_seg(nseg, lpgeaddr);
6779                 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6780                 ASSERT(badseg1->s_size == pgsz);
6781         }
6782 
6783         ASSERT(badseg1 != NULL);
6784         ASSERT(badseg1->s_szc == szc);
6785         ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6786             badseg1->s_size == 2 * pgsz);
6787         ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6788         ASSERT(badseg1->s_size == pgsz ||
6789             sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6790         if (err = segvn_clrszc(badseg1)) {
6791                 return (err);
6792         }
6793         ASSERT(badseg1->s_szc == 0);
6794 
6795         if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6796                 uint_t tszc = highbit(tszcvec) - 1;
6797                 caddr_t ta = MAX(addr, badseg1->s_base);
6798                 caddr_t te;
6799                 size_t tpgsz = page_get_pagesize(tszc);
6800 
6801                 ASSERT(svd->type == MAP_SHARED);
6802                 ASSERT(flag == SDR_END);
6803                 ASSERT(tszc < szc && tszc > 0);
6804 
6805                 if (eaddr > badseg1->s_base + badseg1->s_size) {
6806                         te = badseg1->s_base + badseg1->s_size;
6807                 } else {
6808                         te = eaddr;
6809                 }
6810 
6811                 ASSERT(ta <= te);
6812                 badseg1->s_szc = tszc;
6813                 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6814                         if (badseg2 != NULL) {
6815                                 err = segvn_demote_range(badseg1, ta, te - ta,
6816                                     SDR_END, tszcvec);
6817                                 if (err != 0) {
6818                                         return (err);
6819                                 }
6820                         } else {
6821                                 return (segvn_demote_range(badseg1, ta,
6822                                     te - ta, SDR_END, tszcvec));
6823                         }
6824                 }
6825         }
6826 
6827         if (badseg2 == NULL)
6828                 return (0);
6829         ASSERT(badseg2->s_szc == szc);
6830         ASSERT(badseg2->s_size == pgsz);
6831         ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6832         if (err = segvn_clrszc(badseg2)) {
6833                 return (err);
6834         }
6835         ASSERT(badseg2->s_szc == 0);
6836 
6837         if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6838                 uint_t tszc = highbit(tszcvec) - 1;
6839                 size_t tpgsz = page_get_pagesize(tszc);
6840 
6841                 ASSERT(svd->type == MAP_SHARED);
6842                 ASSERT(flag == SDR_END);
6843                 ASSERT(tszc < szc && tszc > 0);
6844                 ASSERT(badseg2->s_base > addr);
6845                 ASSERT(eaddr > badseg2->s_base);
6846                 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6847 
6848                 badseg2->s_szc = tszc;
6849                 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6850                         return (segvn_demote_range(badseg2, badseg2->s_base,
6851                             eaddr - badseg2->s_base, SDR_END, tszcvec));
6852                 }
6853         }
6854 
6855         return (0);
6856 }
6857 
6858 static int
6859 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6860 {
6861         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6862         struct vpage *vp, *evp;
6863 
6864         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6865 
6866         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6867         /*
6868          * If segment protection can be used, simply check against them.
6869          */
6870         if (svd->pageprot == 0) {
6871                 int err;
6872 
6873                 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6874                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6875                 return (err);
6876         }
6877 
6878         /*
6879          * Have to check down to the vpage level.
6880          */
6881         evp = &svd->vpage[seg_page(seg, addr + len)];
6882         for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6883                 if ((VPP_PROT(vp) & prot) != prot) {
6884                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6885                         return (EACCES);
6886                 }
6887         }
6888         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6889         return (0);
6890 }
6891 
6892 static int
6893 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6894 {
6895         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6896         size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6897 
6898         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6899 
6900         if (pgno != 0) {
6901                 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6902                 if (svd->pageprot == 0) {
6903                         do {
6904                                 protv[--pgno] = svd->prot;
6905                         } while (pgno != 0);
6906                 } else {
6907                         size_t pgoff = seg_page(seg, addr);
6908 
6909                         do {
6910                                 pgno--;
6911                                 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6912                         } while (pgno != 0);
6913                 }
6914                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6915         }
6916         return (0);
6917 }
6918 
6919 static u_offset_t
6920 segvn_getoffset(struct seg *seg, caddr_t addr)
6921 {
6922         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6923 
6924         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6925 
6926         return (svd->offset + (uintptr_t)(addr - seg->s_base));
6927 }
6928 
6929 /*ARGSUSED*/
6930 static int
6931 segvn_gettype(struct seg *seg, caddr_t addr)
6932 {
6933         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6934 
6935         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6936 
6937         return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6938             MAP_INITDATA)));
6939 }
6940 
6941 /*ARGSUSED*/
6942 static int
6943 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6944 {
6945         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6946 
6947         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6948 
6949         *vpp = svd->vp;
6950         return (0);
6951 }
6952 
6953 /*
6954  * Check to see if it makes sense to do kluster/read ahead to
6955  * addr + delta relative to the mapping at addr.  We assume here
6956  * that delta is a signed PAGESIZE'd multiple (which can be negative).
6957  *
6958  * For segvn, we currently "approve" of the action if we are
6959  * still in the segment and it maps from the same vp/off,
6960  * or if the advice stored in segvn_data or vpages allows it.
6961  * Currently, klustering is not allowed only if MADV_RANDOM is set.
6962  */
6963 static int
6964 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6965 {
6966         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6967         struct anon *oap, *ap;
6968         ssize_t pd;
6969         size_t page;
6970         struct vnode *vp1, *vp2;
6971         u_offset_t off1, off2;
6972         struct anon_map *amp;
6973 
6974         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6975         ASSERT(AS_WRITE_HELD(seg->s_as) ||
6976             SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6977 
6978         if (addr + delta < seg->s_base ||
6979             addr + delta >= (seg->s_base + seg->s_size))
6980                 return (-1);            /* exceeded segment bounds */
6981 
6982         pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
6983         page = seg_page(seg, addr);
6984 
6985         /*
6986          * Check to see if either of the pages addr or addr + delta
6987          * have advice set that prevents klustering (if MADV_RANDOM advice
6988          * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6989          * is negative).
6990          */
6991         if (svd->advice == MADV_RANDOM ||
6992             svd->advice == MADV_SEQUENTIAL && delta < 0)
6993                 return (-1);
6994         else if (svd->pageadvice && svd->vpage) {
6995                 struct vpage *bvpp, *evpp;
6996 
6997                 bvpp = &svd->vpage[page];
6998                 evpp = &svd->vpage[page + pd];
6999                 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
7000                     VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
7001                         return (-1);
7002                 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
7003                     VPP_ADVICE(evpp) == MADV_RANDOM)
7004                         return (-1);
7005         }
7006 
7007         if (svd->type == MAP_SHARED)
7008                 return (0);             /* shared mapping - all ok */
7009 
7010         if ((amp = svd->amp) == NULL)
7011                 return (0);             /* off original vnode */
7012 
7013         page += svd->anon_index;
7014 
7015         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7016 
7017         oap = anon_get_ptr(amp->ahp, page);
7018         ap = anon_get_ptr(amp->ahp, page + pd);
7019 
7020         ANON_LOCK_EXIT(&amp->a_rwlock);
7021 
7022         if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
7023                 return (-1);            /* one with and one without an anon */
7024         }
7025 
7026         if (oap == NULL) {              /* implies that ap == NULL */
7027                 return (0);             /* off original vnode */
7028         }
7029 
7030         /*
7031          * Now we know we have two anon pointers - check to
7032          * see if they happen to be properly allocated.
7033          */
7034 
7035         /*
7036          * XXX We cheat here and don't lock the anon slots. We can't because
7037          * we may have been called from the anon layer which might already
7038          * have locked them. We are holding a refcnt on the slots so they
7039          * can't disappear. The worst that will happen is we'll get the wrong
7040          * names (vp, off) for the slots and make a poor klustering decision.
7041          */
7042         swap_xlate(ap, &vp1, &off1);
7043         swap_xlate(oap, &vp2, &off2);
7044 
7045 
7046         if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
7047                 return (-1);
7048         return (0);
7049 }
7050 
7051 /*
7052  * Synchronize primary storage cache with real object in virtual memory.
7053  *
7054  * XXX - Anonymous pages should not be sync'ed out at all.
7055  */
7056 static int
7057 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7058 {
7059         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7060         struct vpage *vpp;
7061         page_t *pp;
7062         u_offset_t offset;
7063         struct vnode *vp;
7064         u_offset_t off;
7065         caddr_t eaddr;
7066         int bflags;
7067         int err = 0;
7068         int segtype;
7069         int pageprot;
7070         int prot;
7071         ulong_t anon_index;
7072         struct anon_map *amp;
7073         struct anon *ap;
7074         anon_sync_obj_t cookie;
7075 
7076         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7077 
7078         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7079 
7080         if (svd->softlockcnt > 0) {
7081                 /*
7082                  * If this is shared segment non 0 softlockcnt
7083                  * means locked pages are still in use.
7084                  */
7085                 if (svd->type == MAP_SHARED) {
7086                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7087                         return (EAGAIN);
7088                 }
7089 
7090                 /*
7091                  * flush all pages from seg cache
7092                  * otherwise we may deadlock in swap_putpage
7093                  * for B_INVAL page (4175402).
7094                  *
7095                  * Even if we grab segvn WRITER's lock
7096                  * here, there might be another thread which could've
7097                  * successfully performed lookup/insert just before
7098                  * we acquired the lock here.  So, grabbing either
7099                  * lock here is of not much use.  Until we devise
7100                  * a strategy at upper layers to solve the
7101                  * synchronization issues completely, we expect
7102                  * applications to handle this appropriately.
7103                  */
7104                 segvn_purge(seg);
7105                 if (svd->softlockcnt > 0) {
7106                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7107                         return (EAGAIN);
7108                 }
7109         } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7110             svd->amp->a_softlockcnt > 0) {
7111                 /*
7112                  * Try to purge this amp's entries from pcache. It will
7113                  * succeed only if other segments that share the amp have no
7114                  * outstanding softlock's.
7115                  */
7116                 segvn_purge(seg);
7117                 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7118                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7119                         return (EAGAIN);
7120                 }
7121         }
7122 
7123         vpp = svd->vpage;
7124         offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7125         bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7126             ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7127 
7128         if (attr) {
7129                 pageprot = attr & ~(SHARED|PRIVATE);
7130                 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7131 
7132                 /*
7133                  * We are done if the segment types don't match
7134                  * or if we have segment level protections and
7135                  * they don't match.
7136                  */
7137                 if (svd->type != segtype) {
7138                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7139                         return (0);
7140                 }
7141                 if (vpp == NULL) {
7142                         if (svd->prot != pageprot) {
7143                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7144                                 return (0);
7145                         }
7146                         prot = svd->prot;
7147                 } else
7148                         vpp = &svd->vpage[seg_page(seg, addr)];
7149 
7150         } else if (svd->vp && svd->amp == NULL &&
7151             (flags & MS_INVALIDATE) == 0) {
7152 
7153                 /*
7154                  * No attributes, no anonymous pages and MS_INVALIDATE flag
7155                  * is not on, just use one big request.
7156                  */
7157                 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7158                     bflags, svd->cred, NULL);
7159                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7160                 return (err);
7161         }
7162 
7163         if ((amp = svd->amp) != NULL)
7164                 anon_index = svd->anon_index + seg_page(seg, addr);
7165 
7166         for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7167                 ap = NULL;
7168                 if (amp != NULL) {
7169                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7170                         anon_array_enter(amp, anon_index, &cookie);
7171                         ap = anon_get_ptr(amp->ahp, anon_index++);
7172                         if (ap != NULL) {
7173                                 swap_xlate(ap, &vp, &off);
7174                         } else {
7175                                 vp = svd->vp;
7176                                 off = offset;
7177                         }
7178                         anon_array_exit(&cookie);
7179                         ANON_LOCK_EXIT(&amp->a_rwlock);
7180                 } else {
7181                         vp = svd->vp;
7182                         off = offset;
7183                 }
7184                 offset += PAGESIZE;
7185 
7186                 if (vp == NULL)         /* untouched zfod page */
7187                         continue;
7188 
7189                 if (attr) {
7190                         if (vpp) {
7191                                 prot = VPP_PROT(vpp);
7192                                 vpp++;
7193                         }
7194                         if (prot != pageprot) {
7195                                 continue;
7196                         }
7197                 }
7198 
7199                 /*
7200                  * See if any of these pages are locked --  if so, then we
7201                  * will have to truncate an invalidate request at the first
7202                  * locked one. We don't need the page_struct_lock to test
7203                  * as this is only advisory; even if we acquire it someone
7204                  * might race in and lock the page after we unlock and before
7205                  * we do the PUTPAGE, then PUTPAGE simply does nothing.
7206                  */
7207                 if (flags & MS_INVALIDATE) {
7208                         if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7209                                 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7210                                         page_unlock(pp);
7211                                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7212                                         return (EBUSY);
7213                                 }
7214                                 if (ap != NULL && pp->p_szc != 0 &&
7215                                     page_tryupgrade(pp)) {
7216                                         if (pp->p_lckcnt == 0 &&
7217                                             pp->p_cowcnt == 0) {
7218                                                 /*
7219                                                  * swapfs VN_DISPOSE() won't
7220                                                  * invalidate large pages.
7221                                                  * Attempt to demote.
7222                                                  * XXX can't help it if it
7223                                                  * fails. But for swapfs
7224                                                  * pages it is no big deal.
7225                                                  */
7226                                                 (void) page_try_demote_pages(
7227                                                     pp);
7228                                         }
7229                                 }
7230                                 page_unlock(pp);
7231                         }
7232                 } else if (svd->type == MAP_SHARED && amp != NULL) {
7233                         /*
7234                          * Avoid writing out to disk ISM's large pages
7235                          * because segspt_free_pages() relies on NULL an_pvp
7236                          * of anon slots of such pages.
7237                          */
7238 
7239                         ASSERT(svd->vp == NULL);
7240                         /*
7241                          * swapfs uses page_lookup_nowait if not freeing or
7242                          * invalidating and skips a page if
7243                          * page_lookup_nowait returns NULL.
7244                          */
7245                         pp = page_lookup_nowait(vp, off, SE_SHARED);
7246                         if (pp == NULL) {
7247                                 continue;
7248                         }
7249                         if (pp->p_szc != 0) {
7250                                 page_unlock(pp);
7251                                 continue;
7252                         }
7253 
7254                         /*
7255                          * Note ISM pages are created large so (vp, off)'s
7256                          * page cannot suddenly become large after we unlock
7257                          * pp.
7258                          */
7259                         page_unlock(pp);
7260                 }
7261                 /*
7262                  * XXX - Should ultimately try to kluster
7263                  * calls to VOP_PUTPAGE() for performance.
7264                  */
7265                 VN_HOLD(vp);
7266                 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7267                     (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7268                     svd->cred, NULL);
7269 
7270                 VN_RELE(vp);
7271                 if (err)
7272                         break;
7273         }
7274         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7275         return (err);
7276 }
7277 
7278 /*
7279  * Determine if we have data corresponding to pages in the
7280  * primary storage virtual memory cache (i.e., "in core").
7281  */
7282 static size_t
7283 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7284 {
7285         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7286         struct vnode *vp, *avp;
7287         u_offset_t offset, aoffset;
7288         size_t p, ep;
7289         int ret;
7290         struct vpage *vpp;
7291         page_t *pp;
7292         uint_t start;
7293         struct anon_map *amp;           /* XXX - for locknest */
7294         struct anon *ap;
7295         uint_t attr;
7296         anon_sync_obj_t cookie;
7297 
7298         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7299 
7300         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7301         if (svd->amp == NULL && svd->vp == NULL) {
7302                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7303                 bzero(vec, btopr(len));
7304                 return (len);   /* no anonymous pages created yet */
7305         }
7306 
7307         p = seg_page(seg, addr);
7308         ep = seg_page(seg, addr + len);
7309         start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7310 
7311         amp = svd->amp;
7312         for (; p < ep; p++, addr += PAGESIZE) {
7313                 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7314                 ret = start;
7315                 ap = NULL;
7316                 avp = NULL;
7317                 /* Grab the vnode/offset for the anon slot */
7318                 if (amp != NULL) {
7319                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7320                         anon_array_enter(amp, svd->anon_index + p, &cookie);
7321                         ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7322                         if (ap != NULL) {
7323                                 swap_xlate(ap, &avp, &aoffset);
7324                         }
7325                         anon_array_exit(&cookie);
7326                         ANON_LOCK_EXIT(&amp->a_rwlock);
7327                 }
7328                 if ((avp != NULL) && page_exists(avp, aoffset)) {
7329                         /* A page exists for the anon slot */
7330                         ret |= SEG_PAGE_INCORE;
7331 
7332                         /*
7333                          * If page is mapped and writable
7334                          */
7335                         attr = (uint_t)0;
7336                         if ((hat_getattr(seg->s_as->a_hat, addr,
7337                             &attr) != -1) && (attr & PROT_WRITE)) {
7338                                 ret |= SEG_PAGE_ANON;
7339                         }
7340                         /*
7341                          * Don't get page_struct lock for lckcnt and cowcnt,
7342                          * since this is purely advisory.
7343                          */
7344                         if ((pp = page_lookup_nowait(avp, aoffset,
7345                             SE_SHARED)) != NULL) {
7346                                 if (pp->p_lckcnt)
7347                                         ret |= SEG_PAGE_SOFTLOCK;
7348                                 if (pp->p_cowcnt)
7349                                         ret |= SEG_PAGE_HASCOW;
7350                                 page_unlock(pp);
7351                         }
7352                 }
7353 
7354                 /* Gather vnode statistics */
7355                 vp = svd->vp;
7356                 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7357 
7358                 if (vp != NULL) {
7359                         /*
7360                          * Try to obtain a "shared" lock on the page
7361                          * without blocking.  If this fails, determine
7362                          * if the page is in memory.
7363                          */
7364                         pp = page_lookup_nowait(vp, offset, SE_SHARED);
7365                         if ((pp == NULL) && (page_exists(vp, offset))) {
7366                                 /* Page is incore, and is named */
7367                                 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7368                         }
7369                         /*
7370                          * Don't get page_struct lock for lckcnt and cowcnt,
7371                          * since this is purely advisory.
7372                          */
7373                         if (pp != NULL) {
7374                                 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7375                                 if (pp->p_lckcnt)
7376                                         ret |= SEG_PAGE_SOFTLOCK;
7377                                 if (pp->p_cowcnt)
7378                                         ret |= SEG_PAGE_HASCOW;
7379                                 page_unlock(pp);
7380                         }
7381                 }
7382 
7383                 /* Gather virtual page information */
7384                 if (vpp) {
7385                         if (VPP_ISPPLOCK(vpp))
7386                                 ret |= SEG_PAGE_LOCKED;
7387                         vpp++;
7388                 }
7389 
7390                 *vec++ = (char)ret;
7391         }
7392         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7393         return (len);
7394 }
7395 
7396 /*
7397  * Statement for p_cowcnts/p_lckcnts.
7398  *
7399  * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7400  * irrespective of the following factors or anything else:
7401  *
7402  *      (1) anon slots are populated or not
7403  *      (2) cow is broken or not
7404  *      (3) refcnt on ap is 1 or greater than 1
7405  *
7406  * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7407  * and munlock.
7408  *
7409  *
7410  * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7411  *
7412  *      if vpage has PROT_WRITE
7413  *              transfer cowcnt on the oldpage -> cowcnt on the newpage
7414  *      else
7415  *              transfer lckcnt on the oldpage -> lckcnt on the newpage
7416  *
7417  *      During copy-on-write, decrement p_cowcnt on the oldpage and increment
7418  *      p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7419  *
7420  *      We may also break COW if softlocking on read access in the physio case.
7421  *      In this case, vpage may not have PROT_WRITE. So, we need to decrement
7422  *      p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7423  *      vpage doesn't have PROT_WRITE.
7424  *
7425  *
7426  * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7427  *
7428  *      If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7429  *      increment p_lckcnt by calling page_subclaim() which takes care of
7430  *      availrmem accounting and p_lckcnt overflow.
7431  *
7432  *      If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7433  *      increment p_cowcnt by calling page_addclaim() which takes care of
7434  *      availrmem availability and p_cowcnt overflow.
7435  */
7436 
7437 /*
7438  * Lock down (or unlock) pages mapped by this segment.
7439  *
7440  * XXX only creates PAGESIZE pages if anon slots are not initialized.
7441  * At fault time they will be relocated into larger pages.
7442  */
7443 static int
7444 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7445     int attr, int op, ulong_t *lockmap, size_t pos)
7446 {
7447         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7448         struct vpage *vpp;
7449         struct vpage *evp;
7450         page_t *pp;
7451         u_offset_t offset;
7452         u_offset_t off;
7453         int segtype;
7454         int pageprot;
7455         int claim;
7456         struct vnode *vp;
7457         ulong_t anon_index;
7458         struct anon_map *amp;
7459         struct anon *ap;
7460         struct vattr va;
7461         anon_sync_obj_t cookie;
7462         struct kshmid *sp = NULL;
7463         struct proc     *p = curproc;
7464         kproject_t      *proj = NULL;
7465         int chargeproc = 1;
7466         size_t locked_bytes = 0;
7467         size_t unlocked_bytes = 0;
7468         int err = 0;
7469 
7470         /*
7471          * Hold write lock on address space because may split or concatenate
7472          * segments
7473          */
7474         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7475 
7476         /*
7477          * If this is a shm, use shm's project and zone, else use
7478          * project and zone of calling process
7479          */
7480 
7481         /* Determine if this segment backs a sysV shm */
7482         if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7483                 ASSERT(svd->type == MAP_SHARED);
7484                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7485                 sp = svd->amp->a_sp;
7486                 proj = sp->shm_perm.ipc_proj;
7487                 chargeproc = 0;
7488         }
7489 
7490         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7491         if (attr) {
7492                 pageprot = attr & ~(SHARED|PRIVATE);
7493                 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7494 
7495                 /*
7496                  * We are done if the segment types don't match
7497                  * or if we have segment level protections and
7498                  * they don't match.
7499                  */
7500                 if (svd->type != segtype) {
7501                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7502                         return (0);
7503                 }
7504                 if (svd->pageprot == 0 && svd->prot != pageprot) {
7505                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7506                         return (0);
7507                 }
7508         }
7509 
7510         if (op == MC_LOCK) {
7511                 if (svd->tr_state == SEGVN_TR_INIT) {
7512                         svd->tr_state = SEGVN_TR_OFF;
7513                 } else if (svd->tr_state == SEGVN_TR_ON) {
7514                         ASSERT(svd->amp != NULL);
7515                         segvn_textunrepl(seg, 0);
7516                         ASSERT(svd->amp == NULL &&
7517                             svd->tr_state == SEGVN_TR_OFF);
7518                 }
7519         }
7520 
7521         /*
7522          * If we're locking, then we must create a vpage structure if
7523          * none exists.  If we're unlocking, then check to see if there
7524          * is a vpage --  if not, then we could not have locked anything.
7525          */
7526 
7527         if ((vpp = svd->vpage) == NULL) {
7528                 if (op == MC_LOCK) {
7529                         segvn_vpage(seg);
7530                         if (svd->vpage == NULL) {
7531                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7532                                 return (ENOMEM);
7533                         }
7534                 } else {
7535                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7536                         return (0);
7537                 }
7538         }
7539 
7540         /*
7541          * The anonymous data vector (i.e., previously
7542          * unreferenced mapping to swap space) can be allocated
7543          * by lazily testing for its existence.
7544          */
7545         if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7546                 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7547                 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7548                 svd->amp->a_szc = seg->s_szc;
7549         }
7550 
7551         if ((amp = svd->amp) != NULL) {
7552                 anon_index = svd->anon_index + seg_page(seg, addr);
7553         }
7554 
7555         offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7556         evp = &svd->vpage[seg_page(seg, addr + len)];
7557 
7558         if (sp != NULL)
7559                 mutex_enter(&sp->shm_mlock);
7560 
7561         /* determine number of unlocked bytes in range for lock operation */
7562         if (op == MC_LOCK) {
7563 
7564                 if (sp == NULL) {
7565                         for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7566                             vpp++) {
7567                                 if (!VPP_ISPPLOCK(vpp))
7568                                         unlocked_bytes += PAGESIZE;
7569                         }
7570                 } else {
7571                         ulong_t         i_idx, i_edx;
7572                         anon_sync_obj_t i_cookie;
7573                         struct anon     *i_ap;
7574                         struct vnode    *i_vp;
7575                         u_offset_t      i_off;
7576 
7577                         /* Only count sysV pages once for locked memory */
7578                         i_edx = svd->anon_index + seg_page(seg, addr + len);
7579                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7580                         for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7581                                 anon_array_enter(amp, i_idx, &i_cookie);
7582                                 i_ap = anon_get_ptr(amp->ahp, i_idx);
7583                                 if (i_ap == NULL) {
7584                                         unlocked_bytes += PAGESIZE;
7585                                         anon_array_exit(&i_cookie);
7586                                         continue;
7587                                 }
7588                                 swap_xlate(i_ap, &i_vp, &i_off);
7589                                 anon_array_exit(&i_cookie);
7590                                 pp = page_lookup(i_vp, i_off, SE_SHARED);
7591                                 if (pp == NULL) {
7592                                         unlocked_bytes += PAGESIZE;
7593                                         continue;
7594                                 } else if (pp->p_lckcnt == 0)
7595                                         unlocked_bytes += PAGESIZE;
7596                                 page_unlock(pp);
7597                         }
7598                         ANON_LOCK_EXIT(&amp->a_rwlock);
7599                 }
7600 
7601                 mutex_enter(&p->p_lock);
7602                 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7603                     chargeproc);
7604                 mutex_exit(&p->p_lock);
7605 
7606                 if (err) {
7607                         if (sp != NULL)
7608                                 mutex_exit(&sp->shm_mlock);
7609                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7610                         return (err);
7611                 }
7612         }
7613         /*
7614          * Loop over all pages in the range.  Process if we're locking and
7615          * page has not already been locked in this mapping; or if we're
7616          * unlocking and the page has been locked.
7617          */
7618         for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7619             vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7620                 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7621                     ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7622                     (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7623 
7624                         if (amp != NULL)
7625                                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7626                         /*
7627                          * If this isn't a MAP_NORESERVE segment and
7628                          * we're locking, allocate anon slots if they
7629                          * don't exist.  The page is brought in later on.
7630                          */
7631                         if (op == MC_LOCK && svd->vp == NULL &&
7632                             ((svd->flags & MAP_NORESERVE) == 0) &&
7633                             amp != NULL &&
7634                             ((ap = anon_get_ptr(amp->ahp, anon_index))
7635                             == NULL)) {
7636                                 anon_array_enter(amp, anon_index, &cookie);
7637 
7638                                 if ((ap = anon_get_ptr(amp->ahp,
7639                                     anon_index)) == NULL) {
7640                                         pp = anon_zero(seg, addr, &ap,
7641                                             svd->cred);
7642                                         if (pp == NULL) {
7643                                                 anon_array_exit(&cookie);
7644                                                 ANON_LOCK_EXIT(&amp->a_rwlock);
7645                                                 err = ENOMEM;
7646                                                 goto out;
7647                                         }
7648                                         ASSERT(anon_get_ptr(amp->ahp,
7649                                             anon_index) == NULL);
7650                                         (void) anon_set_ptr(amp->ahp,
7651                                             anon_index, ap, ANON_SLEEP);
7652                                         page_unlock(pp);
7653                                 }
7654                                 anon_array_exit(&cookie);
7655                         }
7656 
7657                         /*
7658                          * Get name for page, accounting for
7659                          * existence of private copy.
7660                          */
7661                         ap = NULL;
7662                         if (amp != NULL) {
7663                                 anon_array_enter(amp, anon_index, &cookie);
7664                                 ap = anon_get_ptr(amp->ahp, anon_index);
7665                                 if (ap != NULL) {
7666                                         swap_xlate(ap, &vp, &off);
7667                                 } else {
7668                                         if (svd->vp == NULL &&
7669                                             (svd->flags & MAP_NORESERVE)) {
7670                                                 anon_array_exit(&cookie);
7671                                                 ANON_LOCK_EXIT(&amp->a_rwlock);
7672                                                 continue;
7673                                         }
7674                                         vp = svd->vp;
7675                                         off = offset;
7676                                 }
7677                                 if (op != MC_LOCK || ap == NULL) {
7678                                         anon_array_exit(&cookie);
7679                                         ANON_LOCK_EXIT(&amp->a_rwlock);
7680                                 }
7681                         } else {
7682                                 vp = svd->vp;
7683                                 off = offset;
7684                         }
7685 
7686                         /*
7687                          * Get page frame.  It's ok if the page is
7688                          * not available when we're unlocking, as this
7689                          * may simply mean that a page we locked got
7690                          * truncated out of existence after we locked it.
7691                          *
7692                          * Invoke VOP_GETPAGE() to obtain the page struct
7693                          * since we may need to read it from disk if its
7694                          * been paged out.
7695                          */
7696                         if (op != MC_LOCK)
7697                                 pp = page_lookup(vp, off, SE_SHARED);
7698                         else {
7699                                 page_t *pl[1 + 1];
7700                                 int error;
7701 
7702                                 ASSERT(vp != NULL);
7703 
7704                                 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7705                                     (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7706                                     S_OTHER, svd->cred, NULL);
7707 
7708                                 if (error && ap != NULL) {
7709                                         anon_array_exit(&cookie);
7710                                         ANON_LOCK_EXIT(&amp->a_rwlock);
7711                                 }
7712 
7713                                 /*
7714                                  * If the error is EDEADLK then we must bounce
7715                                  * up and drop all vm subsystem locks and then
7716                                  * retry the operation later
7717                                  * This behavior is a temporary measure because
7718                                  * ufs/sds logging is badly designed and will
7719                                  * deadlock if we don't allow this bounce to
7720                                  * happen.  The real solution is to re-design
7721                                  * the logging code to work properly.  See bug
7722                                  * 4125102 for details of the problem.
7723                                  */
7724                                 if (error == EDEADLK) {
7725                                         err = error;
7726                                         goto out;
7727                                 }
7728                                 /*
7729                                  * Quit if we fail to fault in the page.  Treat
7730                                  * the failure as an error, unless the addr
7731                                  * is mapped beyond the end of a file.
7732                                  */
7733                                 if (error && svd->vp) {
7734                                         va.va_mask = AT_SIZE;
7735                                         if (VOP_GETATTR(svd->vp, &va, 0,
7736                                             svd->cred, NULL) != 0) {
7737                                                 err = EIO;
7738                                                 goto out;
7739                                         }
7740                                         if (btopr(va.va_size) >=
7741                                             btopr(off + 1)) {
7742                                                 err = EIO;
7743                                                 goto out;
7744                                         }
7745                                         goto out;
7746 
7747                                 } else if (error) {
7748                                         err = EIO;
7749                                         goto out;
7750                                 }
7751                                 pp = pl[0];
7752                                 ASSERT(pp != NULL);
7753                         }
7754 
7755                         /*
7756                          * See Statement at the beginning of this routine.
7757                          *
7758                          * claim is always set if MAP_PRIVATE and PROT_WRITE
7759                          * irrespective of following factors:
7760                          *
7761                          * (1) anon slots are populated or not
7762                          * (2) cow is broken or not
7763                          * (3) refcnt on ap is 1 or greater than 1
7764                          *
7765                          * See 4140683 for details
7766                          */
7767                         claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7768                             (svd->type == MAP_PRIVATE));
7769 
7770                         /*
7771                          * Perform page-level operation appropriate to
7772                          * operation.  If locking, undo the SOFTLOCK
7773                          * performed to bring the page into memory
7774                          * after setting the lock.  If unlocking,
7775                          * and no page was found, account for the claim
7776                          * separately.
7777                          */
7778                         if (op == MC_LOCK) {
7779                                 int ret = 1;    /* Assume success */
7780 
7781                                 ASSERT(!VPP_ISPPLOCK(vpp));
7782 
7783                                 ret = page_pp_lock(pp, claim, 0);
7784                                 if (ap != NULL) {
7785                                         if (ap->an_pvp != NULL) {
7786                                                 anon_swap_free(ap, pp);
7787                                         }
7788                                         anon_array_exit(&cookie);
7789                                         ANON_LOCK_EXIT(&amp->a_rwlock);
7790                                 }
7791                                 if (ret == 0) {
7792                                         /* locking page failed */
7793                                         page_unlock(pp);
7794                                         err = EAGAIN;
7795                                         goto out;
7796                                 }
7797                                 VPP_SETPPLOCK(vpp);
7798                                 if (sp != NULL) {
7799                                         if (pp->p_lckcnt == 1)
7800                                                 locked_bytes += PAGESIZE;
7801                                 } else
7802                                         locked_bytes += PAGESIZE;
7803 
7804                                 if (lockmap != (ulong_t *)NULL)
7805                                         BT_SET(lockmap, pos);
7806 
7807                                 page_unlock(pp);
7808                         } else {
7809                                 ASSERT(VPP_ISPPLOCK(vpp));
7810                                 if (pp != NULL) {
7811                                         /* sysV pages should be locked */
7812                                         ASSERT(sp == NULL || pp->p_lckcnt > 0);
7813                                         page_pp_unlock(pp, claim, 0);
7814                                         if (sp != NULL) {
7815                                                 if (pp->p_lckcnt == 0)
7816                                                         unlocked_bytes
7817                                                             += PAGESIZE;
7818                                         } else
7819                                                 unlocked_bytes += PAGESIZE;
7820                                         page_unlock(pp);
7821                                 } else {
7822                                         ASSERT(sp == NULL);
7823                                         unlocked_bytes += PAGESIZE;
7824                                 }
7825                                 VPP_CLRPPLOCK(vpp);
7826                         }
7827                 }
7828         }
7829 out:
7830         if (op == MC_LOCK) {
7831                 /* Credit back bytes that did not get locked */
7832                 if ((unlocked_bytes - locked_bytes) > 0) {
7833                         if (proj == NULL)
7834                                 mutex_enter(&p->p_lock);
7835                         rctl_decr_locked_mem(p, proj,
7836                             (unlocked_bytes - locked_bytes), chargeproc);
7837                         if (proj == NULL)
7838                                 mutex_exit(&p->p_lock);
7839                 }
7840 
7841         } else {
7842                 /* Account bytes that were unlocked */
7843                 if (unlocked_bytes > 0) {
7844                         if (proj == NULL)
7845                                 mutex_enter(&p->p_lock);
7846                         rctl_decr_locked_mem(p, proj, unlocked_bytes,
7847                             chargeproc);
7848                         if (proj == NULL)
7849                                 mutex_exit(&p->p_lock);
7850                 }
7851         }
7852         if (sp != NULL)
7853                 mutex_exit(&sp->shm_mlock);
7854         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7855 
7856         return (err);
7857 }
7858 
7859 /*
7860  * Set advice from user for specified pages
7861  * There are 9 types of advice:
7862  *      MADV_NORMAL     - Normal (default) behavior (whatever that is)
7863  *      MADV_RANDOM     - Random page references
7864  *                              do not allow readahead or 'klustering'
7865  *      MADV_SEQUENTIAL - Sequential page references
7866  *                              Pages previous to the one currently being
7867  *                              accessed (determined by fault) are 'not needed'
7868  *                              and are freed immediately
7869  *      MADV_WILLNEED   - Pages are likely to be used (fault ahead in mctl)
7870  *      MADV_DONTNEED   - Pages are not needed (synced out in mctl)
7871  *      MADV_FREE       - Contents can be discarded
7872  *      MADV_ACCESS_DEFAULT- Default access
7873  *      MADV_ACCESS_LWP - Next LWP will access heavily
7874  *      MADV_ACCESS_MANY- Many LWPs or processes will access heavily
7875  */
7876 static int
7877 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
7878 {
7879         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7880         size_t page;
7881         int err = 0;
7882         int already_set;
7883         struct anon_map *amp;
7884         ulong_t anon_index;
7885         struct seg *next;
7886         lgrp_mem_policy_t policy;
7887         struct seg *prev;
7888         struct vnode *vp;
7889 
7890         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7891 
7892         /*
7893          * In case of MADV_FREE, we won't be modifying any segment private
7894          * data structures; so, we only need to grab READER's lock
7895          */
7896         if (behav != MADV_FREE) {
7897                 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7898                 if (svd->tr_state != SEGVN_TR_OFF) {
7899                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7900                         return (0);
7901                 }
7902         } else {
7903                 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7904         }
7905 
7906         /*
7907          * Large pages are assumed to be only turned on when accesses to the
7908          * segment's address range have spatial and temporal locality. That
7909          * justifies ignoring MADV_SEQUENTIAL for large page segments.
7910          * Also, ignore advice affecting lgroup memory allocation
7911          * if don't need to do lgroup optimizations on this system
7912          */
7913 
7914         if ((behav == MADV_SEQUENTIAL &&
7915             (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
7916             (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
7917             behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
7918                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7919                 return (0);
7920         }
7921 
7922         if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
7923             behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
7924                 /*
7925                  * Since we are going to unload hat mappings
7926                  * we first have to flush the cache. Otherwise
7927                  * this might lead to system panic if another
7928                  * thread is doing physio on the range whose
7929                  * mappings are unloaded by madvise(3C).
7930                  */
7931                 if (svd->softlockcnt > 0) {
7932                         /*
7933                          * If this is shared segment non 0 softlockcnt
7934                          * means locked pages are still in use.
7935                          */
7936                         if (svd->type == MAP_SHARED) {
7937                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7938                                 return (EAGAIN);
7939                         }
7940                         /*
7941                          * Since we do have the segvn writers lock
7942                          * nobody can fill the cache with entries
7943                          * belonging to this seg during the purge.
7944                          * The flush either succeeds or we still
7945                          * have pending I/Os. In the later case,
7946                          * madvise(3C) fails.
7947                          */
7948                         segvn_purge(seg);
7949                         if (svd->softlockcnt > 0) {
7950                                 /*
7951                                  * Since madvise(3C) is advisory and
7952                                  * it's not part of UNIX98, madvise(3C)
7953                                  * failure here doesn't cause any hardship.
7954                                  * Note that we don't block in "as" layer.
7955                                  */
7956                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7957                                 return (EAGAIN);
7958                         }
7959                 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7960                     svd->amp->a_softlockcnt > 0) {
7961                         /*
7962                          * Try to purge this amp's entries from pcache. It
7963                          * will succeed only if other segments that share the
7964                          * amp have no outstanding softlock's.
7965                          */
7966                         segvn_purge(seg);
7967                 }
7968         }
7969 
7970         amp = svd->amp;
7971         vp = svd->vp;
7972         if (behav == MADV_FREE) {
7973                 /*
7974                  * MADV_FREE is not supported for segments with
7975                  * underlying object; if anonmap is NULL, anon slots
7976                  * are not yet populated and there is nothing for
7977                  * us to do. As MADV_FREE is advisory, we don't
7978                  * return error in either case.
7979                  */
7980                 if (vp != NULL || amp == NULL) {
7981                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7982                         return (0);
7983                 }
7984 
7985                 segvn_purge(seg);
7986 
7987                 page = seg_page(seg, addr);
7988                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
7989                 anon_disclaim(amp, svd->anon_index + page, len);
7990                 ANON_LOCK_EXIT(&amp->a_rwlock);
7991                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7992                 return (0);
7993         }
7994 
7995         /*
7996          * If advice is to be applied to entire segment,
7997          * use advice field in seg_data structure
7998          * otherwise use appropriate vpage entry.
7999          */
8000         if ((addr == seg->s_base) && (len == seg->s_size)) {
8001                 switch (behav) {
8002                 case MADV_ACCESS_LWP:
8003                 case MADV_ACCESS_MANY:
8004                 case MADV_ACCESS_DEFAULT:
8005                         /*
8006                          * Set memory allocation policy for this segment
8007                          */
8008                         policy = lgrp_madv_to_policy(behav, len, svd->type);
8009                         if (svd->type == MAP_SHARED)
8010                                 already_set = lgrp_shm_policy_set(policy, amp,
8011                                     svd->anon_index, vp, svd->offset, len);
8012                         else {
8013                                 /*
8014                                  * For private memory, need writers lock on
8015                                  * address space because the segment may be
8016                                  * split or concatenated when changing policy
8017                                  */
8018                                 if (AS_READ_HELD(seg->s_as)) {
8019                                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8020                                         return (IE_RETRY);
8021                                 }
8022 
8023                                 already_set = lgrp_privm_policy_set(policy,
8024                                     &svd->policy_info, len);
8025                         }
8026 
8027                         /*
8028                          * If policy set already and it shouldn't be reapplied,
8029                          * don't do anything.
8030                          */
8031                         if (already_set &&
8032                             !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8033                                 break;
8034 
8035                         /*
8036                          * Mark any existing pages in given range for
8037                          * migration
8038                          */
8039                         page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8040                             vp, svd->offset, 1);
8041 
8042                         /*
8043                          * If same policy set already or this is a shared
8044                          * memory segment, don't need to try to concatenate
8045                          * segment with adjacent ones.
8046                          */
8047                         if (already_set || svd->type == MAP_SHARED)
8048                                 break;
8049 
8050                         /*
8051                          * Try to concatenate this segment with previous
8052                          * one and next one, since we changed policy for
8053                          * this one and it may be compatible with adjacent
8054                          * ones now.
8055                          */
8056                         prev = AS_SEGPREV(seg->s_as, seg);
8057                         next = AS_SEGNEXT(seg->s_as, seg);
8058 
8059                         if (next && next->s_ops == &segvn_ops &&
8060                             addr + len == next->s_base)
8061                                 (void) segvn_concat(seg, next, 1);
8062 
8063                         if (prev && prev->s_ops == &segvn_ops &&
8064                             addr == prev->s_base + prev->s_size) {
8065                                 /*
8066                                  * Drop lock for private data of current
8067                                  * segment before concatenating (deleting) it
8068                                  * and return IE_REATTACH to tell as_ctl() that
8069                                  * current segment has changed
8070                                  */
8071                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8072                                 if (!segvn_concat(prev, seg, 1))
8073                                         err = IE_REATTACH;
8074 
8075                                 return (err);
8076                         }
8077                         break;
8078 
8079                 case MADV_SEQUENTIAL:
8080                         /*
8081                          * unloading mapping guarantees
8082                          * detection in segvn_fault
8083                          */
8084                         ASSERT(seg->s_szc == 0);
8085                         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8086                         hat_unload(seg->s_as->a_hat, addr, len,
8087                             HAT_UNLOAD);
8088                         /* FALLTHROUGH */
8089                 case MADV_NORMAL:
8090                 case MADV_RANDOM:
8091                         svd->advice = (uchar_t)behav;
8092                         svd->pageadvice = 0;
8093                         break;
8094                 case MADV_WILLNEED:     /* handled in memcntl */
8095                 case MADV_DONTNEED:     /* handled in memcntl */
8096                 case MADV_FREE:         /* handled above */
8097                         break;
8098                 default:
8099                         err = EINVAL;
8100                 }
8101         } else {
8102                 caddr_t                 eaddr;
8103                 struct seg              *new_seg;
8104                 struct segvn_data       *new_svd;
8105                 u_offset_t              off;
8106                 caddr_t                 oldeaddr;
8107 
8108                 page = seg_page(seg, addr);
8109 
8110                 segvn_vpage(seg);
8111                 if (svd->vpage == NULL) {
8112                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8113                         return (ENOMEM);
8114                 }
8115 
8116                 switch (behav) {
8117                         struct vpage *bvpp, *evpp;
8118 
8119                 case MADV_ACCESS_LWP:
8120                 case MADV_ACCESS_MANY:
8121                 case MADV_ACCESS_DEFAULT:
8122                         /*
8123                          * Set memory allocation policy for portion of this
8124                          * segment
8125                          */
8126 
8127                         /*
8128                          * Align address and length of advice to page
8129                          * boundaries for large pages
8130                          */
8131                         if (seg->s_szc != 0) {
8132                                 size_t  pgsz;
8133 
8134                                 pgsz = page_get_pagesize(seg->s_szc);
8135                                 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8136                                 len = P2ROUNDUP(len, pgsz);
8137                         }
8138 
8139                         /*
8140                          * Check to see whether policy is set already
8141                          */
8142                         policy = lgrp_madv_to_policy(behav, len, svd->type);
8143 
8144                         anon_index = svd->anon_index + page;
8145                         off = svd->offset + (uintptr_t)(addr - seg->s_base);
8146 
8147                         if (svd->type == MAP_SHARED)
8148                                 already_set = lgrp_shm_policy_set(policy, amp,
8149                                     anon_index, vp, off, len);
8150                         else
8151                                 already_set =
8152                                     (policy == svd->policy_info.mem_policy);
8153 
8154                         /*
8155                          * If policy set already and it shouldn't be reapplied,
8156                          * don't do anything.
8157                          */
8158                         if (already_set &&
8159                             !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8160                                 break;
8161 
8162                         /*
8163                          * For private memory, need writers lock on
8164                          * address space because the segment may be
8165                          * split or concatenated when changing policy
8166                          */
8167                         if (svd->type == MAP_PRIVATE &&
8168                             AS_READ_HELD(seg->s_as)) {
8169                                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8170                                 return (IE_RETRY);
8171                         }
8172 
8173                         /*
8174                          * Mark any existing pages in given range for
8175                          * migration
8176                          */
8177                         page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8178                             vp, svd->offset, 1);
8179 
8180                         /*
8181                          * Don't need to try to split or concatenate
8182                          * segments, since policy is same or this is a shared
8183                          * memory segment
8184                          */
8185                         if (already_set || svd->type == MAP_SHARED)
8186                                 break;
8187 
8188                         if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8189                                 ASSERT(svd->amp == NULL);
8190                                 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8191                                 ASSERT(svd->softlockcnt == 0);
8192                                 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8193                                     HAT_REGION_TEXT);
8194                                 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8195                         }
8196 
8197                         /*
8198                          * Split off new segment if advice only applies to a
8199                          * portion of existing segment starting in middle
8200                          */
8201                         new_seg = NULL;
8202                         eaddr = addr + len;
8203                         oldeaddr = seg->s_base + seg->s_size;
8204                         if (addr > seg->s_base) {
8205                                 /*
8206                                  * Must flush I/O page cache
8207                                  * before splitting segment
8208                                  */
8209                                 if (svd->softlockcnt > 0)
8210                                         segvn_purge(seg);
8211 
8212                                 /*
8213                                  * Split segment and return IE_REATTACH to tell
8214                                  * as_ctl() that current segment changed
8215                                  */
8216                                 new_seg = segvn_split_seg(seg, addr);
8217                                 new_svd = (struct segvn_data *)new_seg->s_data;
8218                                 err = IE_REATTACH;
8219 
8220                                 /*
8221                                  * If new segment ends where old one
8222                                  * did, try to concatenate the new
8223                                  * segment with next one.
8224                                  */
8225                                 if (eaddr == oldeaddr) {
8226                                         /*
8227                                          * Set policy for new segment
8228                                          */
8229                                         (void) lgrp_privm_policy_set(policy,
8230                                             &new_svd->policy_info,
8231                                             new_seg->s_size);
8232 
8233                                         next = AS_SEGNEXT(new_seg->s_as,
8234                                             new_seg);
8235 
8236                                         if (next &&
8237                                             next->s_ops == &segvn_ops &&
8238                                             eaddr == next->s_base)
8239                                                 (void) segvn_concat(new_seg,
8240                                                     next, 1);
8241                                 }
8242                         }
8243 
8244                         /*
8245                          * Split off end of existing segment if advice only
8246                          * applies to a portion of segment ending before
8247                          * end of the existing segment
8248                          */
8249                         if (eaddr < oldeaddr) {
8250                                 /*
8251                                  * Must flush I/O page cache
8252                                  * before splitting segment
8253                                  */
8254                                 if (svd->softlockcnt > 0)
8255                                         segvn_purge(seg);
8256 
8257                                 /*
8258                                  * If beginning of old segment was already
8259                                  * split off, use new segment to split end off
8260                                  * from.
8261                                  */
8262                                 if (new_seg != NULL && new_seg != seg) {
8263                                         /*
8264                                          * Split segment
8265                                          */
8266                                         (void) segvn_split_seg(new_seg, eaddr);
8267 
8268                                         /*
8269                                          * Set policy for new segment
8270                                          */
8271                                         (void) lgrp_privm_policy_set(policy,
8272                                             &new_svd->policy_info,
8273                                             new_seg->s_size);
8274                                 } else {
8275                                         /*
8276                                          * Split segment and return IE_REATTACH
8277                                          * to tell as_ctl() that current
8278                                          * segment changed
8279                                          */
8280                                         (void) segvn_split_seg(seg, eaddr);
8281                                         err = IE_REATTACH;
8282 
8283                                         (void) lgrp_privm_policy_set(policy,
8284                                             &svd->policy_info, seg->s_size);
8285 
8286                                         /*
8287                                          * If new segment starts where old one
8288                                          * did, try to concatenate it with
8289                                          * previous segment.
8290                                          */
8291                                         if (addr == seg->s_base) {
8292                                                 prev = AS_SEGPREV(seg->s_as,
8293                                                     seg);
8294 
8295                                                 /*
8296                                                  * Drop lock for private data
8297                                                  * of current segment before
8298                                                  * concatenating (deleting) it
8299                                                  */
8300                                                 if (prev &&
8301                                                     prev->s_ops ==
8302                                                     &segvn_ops &&
8303                                                     addr == prev->s_base +
8304                                                     prev->s_size) {
8305                                                         SEGVN_LOCK_EXIT(
8306                                                             seg->s_as,
8307                                                             &svd->lock);
8308                                                         (void) segvn_concat(
8309                                                             prev, seg, 1);
8310                                                         return (err);
8311                                                 }
8312                                         }
8313                                 }
8314                         }
8315                         break;
8316                 case MADV_SEQUENTIAL:
8317                         ASSERT(seg->s_szc == 0);
8318                         ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8319                         hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8320                         /* FALLTHROUGH */
8321                 case MADV_NORMAL:
8322                 case MADV_RANDOM:
8323                         bvpp = &svd->vpage[page];
8324                         evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8325                         for (; bvpp < evpp; bvpp++)
8326                                 VPP_SETADVICE(bvpp, behav);
8327                         svd->advice = MADV_NORMAL;
8328                         break;
8329                 case MADV_WILLNEED:     /* handled in memcntl */
8330                 case MADV_DONTNEED:     /* handled in memcntl */
8331                 case MADV_FREE:         /* handled above */
8332                         break;
8333                 default:
8334                         err = EINVAL;
8335                 }
8336         }
8337         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8338         return (err);
8339 }
8340 
8341 /*
8342  * There is one kind of inheritance that can be specified for pages:
8343  *
8344  *     SEGP_INH_ZERO - Pages should be zeroed in the child
8345  */
8346 static int
8347 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8348 {
8349         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8350         struct vpage *bvpp, *evpp;
8351         size_t page;
8352         int ret = 0;
8353 
8354         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
8355 
8356         /* Can't support something we don't know about */
8357         if (behav != SEGP_INH_ZERO)
8358                 return (ENOTSUP);
8359 
8360         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8361 
8362         /*
8363          * This must be a straightforward anonymous segment that is mapped
8364          * privately and is not backed by a vnode.
8365          */
8366         if (svd->tr_state != SEGVN_TR_OFF ||
8367             svd->type != MAP_PRIVATE ||
8368             svd->vp != NULL) {
8369                 ret = EINVAL;
8370                 goto out;
8371         }
8372 
8373         /*
8374          * If the entire segment has been marked as inherit zero, then no reason
8375          * to do anything else.
8376          */
8377         if (svd->svn_inz == SEGVN_INZ_ALL) {
8378                 ret = 0;
8379                 goto out;
8380         }
8381 
8382         /*
8383          * If this applies to the entire segment, simply mark it and we're done.
8384          */
8385         if ((addr == seg->s_base) && (len == seg->s_size)) {
8386                 svd->svn_inz = SEGVN_INZ_ALL;
8387                 ret = 0;
8388                 goto out;
8389         }
8390 
8391         /*
8392          * We've been asked to mark a subset of this segment as inherit zero,
8393          * therefore we need to mainpulate its vpages.
8394          */
8395         if (svd->vpage == NULL) {
8396                 segvn_vpage(seg);
8397                 if (svd->vpage == NULL) {
8398                         ret = ENOMEM;
8399                         goto out;
8400                 }
8401         }
8402 
8403         svd->svn_inz = SEGVN_INZ_VPP;
8404         page = seg_page(seg, addr);
8405         bvpp = &svd->vpage[page];
8406         evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8407         for (; bvpp < evpp; bvpp++)
8408                 VPP_SETINHZERO(bvpp);
8409         ret = 0;
8410 
8411 out:
8412         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8413         return (ret);
8414 }
8415 
8416 /*
8417  * Create a vpage structure for this seg.
8418  */
8419 static void
8420 segvn_vpage(struct seg *seg)
8421 {
8422         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8423         struct vpage *vp, *evp;
8424         static pgcnt_t page_limit = 0;
8425 
8426         ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8427 
8428         /*
8429          * If no vpage structure exists, allocate one.  Copy the protections
8430          * and the advice from the segment itself to the individual pages.
8431          */
8432         if (svd->vpage == NULL) {
8433                 /*
8434                  * Start by calculating the number of pages we must allocate to
8435                  * track the per-page vpage structs needs for this entire
8436                  * segment. If we know now that it will require more than our
8437                  * heuristic for the maximum amount of kmem we can consume then
8438                  * fail. We do this here, instead of trying to detect this deep
8439                  * in page_resv and propagating the error up, since the entire
8440                  * memory allocation stack is not amenable to passing this
8441                  * back. Instead, it wants to keep trying.
8442                  *
8443                  * As a heuristic we set a page limit of 5/8s of total_pages
8444                  * for this allocation. We use shifts so that no floating
8445                  * point conversion takes place and only need to do the
8446                  * calculation once.
8447                  */
8448                 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage);
8449                 pgcnt_t npages = mem_needed >> PAGESHIFT;
8450 
8451                 if (page_limit == 0)
8452                         page_limit = (total_pages >> 1) + (total_pages >> 3);
8453 
8454                 if (npages > page_limit)
8455                         return;
8456 
8457                 svd->pageadvice = 1;
8458                 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8459                 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8460                 for (vp = svd->vpage; vp < evp; vp++) {
8461                         VPP_SETPROT(vp, svd->prot);
8462                         VPP_SETADVICE(vp, svd->advice);
8463                 }
8464         }
8465 }
8466 
8467 /*
8468  * Dump the pages belonging to this segvn segment.
8469  */
8470 static void
8471 segvn_dump(struct seg *seg)
8472 {
8473         struct segvn_data *svd;
8474         page_t *pp;
8475         struct anon_map *amp;
8476         ulong_t anon_index;
8477         struct vnode *vp;
8478         u_offset_t off, offset;
8479         pfn_t pfn;
8480         pgcnt_t page, npages;
8481         caddr_t addr;
8482 
8483         npages = seg_pages(seg);
8484         svd = (struct segvn_data *)seg->s_data;
8485         vp = svd->vp;
8486         off = offset = svd->offset;
8487         addr = seg->s_base;
8488 
8489         if ((amp = svd->amp) != NULL) {
8490                 anon_index = svd->anon_index;
8491                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
8492         }
8493 
8494         for (page = 0; page < npages; page++, offset += PAGESIZE) {
8495                 struct anon *ap;
8496                 int we_own_it = 0;
8497 
8498                 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8499                         swap_xlate_nopanic(ap, &vp, &off);
8500                 } else {
8501                         vp = svd->vp;
8502                         off = offset;
8503                 }
8504 
8505                 /*
8506                  * If pp == NULL, the page either does not exist
8507                  * or is exclusively locked.  So determine if it
8508                  * exists before searching for it.
8509                  */
8510 
8511                 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8512                         we_own_it = 1;
8513                 else
8514                         pp = page_exists(vp, off);
8515 
8516                 if (pp) {
8517                         pfn = page_pptonum(pp);
8518                         dump_addpage(seg->s_as, addr, pfn);
8519                         if (we_own_it)
8520                                 page_unlock(pp);
8521                 }
8522                 addr += PAGESIZE;
8523                 dump_timeleft = dump_timeout;
8524         }
8525 
8526         if (amp != NULL)
8527                 ANON_LOCK_EXIT(&amp->a_rwlock);
8528 }
8529 
8530 #ifdef DEBUG
8531 static uint32_t segvn_pglock_mtbf = 0;
8532 #endif
8533 
8534 #define PCACHE_SHWLIST          ((page_t *)-2)
8535 #define NOPCACHE_SHWLIST        ((page_t *)-1)
8536 
8537 /*
8538  * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8539  * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8540  * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8541  * the same parts of the segment. Currently shadow list creation is only
8542  * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8543  * tagged with segment pointer, starting virtual address and length. This
8544  * approach for MAP_SHARED segments may add many pcache entries for the same
8545  * set of pages and lead to long hash chains that decrease pcache lookup
8546  * performance. To avoid this issue for shared segments shared anon map and
8547  * starting anon index are used for pcache entry tagging. This allows all
8548  * segments to share pcache entries for the same anon range and reduces pcache
8549  * chain's length as well as memory overhead from duplicate shadow lists and
8550  * pcache entries.
8551  *
8552  * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8553  * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8554  * part of softlockcnt accounting is done differently for private and shared
8555  * segments. In private segment case softlock is only incremented when a new
8556  * shadow list is created but not when an existing one is found via
8557  * seg_plookup(). pcache entries have reference count incremented/decremented
8558  * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8559  * reference count can be purged (and purging is needed before segment can be
8560  * freed). When a private segment pcache entry is purged segvn_reclaim() will
8561  * decrement softlockcnt. Since in private segment case each of its pcache
8562  * entries only belongs to this segment we can expect that when
8563  * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8564  * segment purge will succeed and softlockcnt will drop to 0. In shared
8565  * segment case reference count in pcache entry counts active locks from many
8566  * different segments so we can't expect segment purging to succeed even when
8567  * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8568  * segment. To be able to determine when there're no pending pagelocks in
8569  * shared segment case we don't rely on purging to make softlockcnt drop to 0
8570  * but instead softlockcnt is incremented and decremented for every
8571  * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8572  * list was created or an existing one was found. When softlockcnt drops to 0
8573  * this segment no longer has any claims for pcached shadow lists and the
8574  * segment can be freed even if there're still active pcache entries
8575  * shared by this segment anon map. Shared segment pcache entries belong to
8576  * anon map and are typically removed when anon map is freed after all
8577  * processes destroy the segments that use this anon map.
8578  */
8579 static int
8580 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8581     enum lock_type type, enum seg_rw rw)
8582 {
8583         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8584         size_t np;
8585         pgcnt_t adjustpages;
8586         pgcnt_t npages;
8587         ulong_t anon_index;
8588         uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8589         uint_t error;
8590         struct anon_map *amp;
8591         pgcnt_t anpgcnt;
8592         struct page **pplist, **pl, *pp;
8593         caddr_t a;
8594         size_t page;
8595         caddr_t lpgaddr, lpgeaddr;
8596         anon_sync_obj_t cookie;
8597         int anlock;
8598         struct anon_map *pamp;
8599         caddr_t paddr;
8600         seg_preclaim_cbfunc_t preclaim_callback;
8601         size_t pgsz;
8602         int use_pcache;
8603         size_t wlen;
8604         uint_t pflags = 0;
8605         int sftlck_sbase = 0;
8606         int sftlck_send = 0;
8607 
8608 #ifdef DEBUG
8609         if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8610                 hrtime_t ts = gethrtime();
8611                 if ((ts % segvn_pglock_mtbf) == 0) {
8612                         return (ENOTSUP);
8613                 }
8614                 if ((ts % segvn_pglock_mtbf) == 1) {
8615                         return (EFAULT);
8616                 }
8617         }
8618 #endif
8619 
8620         TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8621             "segvn_pagelock: start seg %p addr %p", seg, addr);
8622 
8623         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
8624         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8625 
8626         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8627 
8628         /*
8629          * for now we only support pagelock to anon memory. We would have to
8630          * check protections for vnode objects and call into the vnode driver.
8631          * That's too much for a fast path. Let the fault entry point handle
8632          * it.
8633          */
8634         if (svd->vp != NULL) {
8635                 if (type == L_PAGELOCK) {
8636                         error = ENOTSUP;
8637                         goto out;
8638                 }
8639                 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8640         }
8641         if ((amp = svd->amp) == NULL) {
8642                 if (type == L_PAGELOCK) {
8643                         error = EFAULT;
8644                         goto out;
8645                 }
8646                 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8647         }
8648         if (rw != S_READ && rw != S_WRITE) {
8649                 if (type == L_PAGELOCK) {
8650                         error = ENOTSUP;
8651                         goto out;
8652                 }
8653                 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8654         }
8655 
8656         if (seg->s_szc != 0) {
8657                 /*
8658                  * We are adjusting the pagelock region to the large page size
8659                  * boundary because the unlocked part of a large page cannot
8660                  * be freed anyway unless all constituent pages of a large
8661                  * page are locked. Bigger regions reduce pcache chain length
8662                  * and improve lookup performance. The tradeoff is that the
8663                  * very first segvn_pagelock() call for a given page is more
8664                  * expensive if only 1 page_t is needed for IO. This is only
8665                  * an issue if pcache entry doesn't get reused by several
8666                  * subsequent calls. We optimize here for the case when pcache
8667                  * is heavily used by repeated IOs to the same address range.
8668                  *
8669                  * Note segment's page size cannot change while we are holding
8670                  * as lock.  And then it cannot change while softlockcnt is
8671                  * not 0. This will allow us to correctly recalculate large
8672                  * page size region for the matching pageunlock/reclaim call
8673                  * since as_pageunlock() caller must always match
8674                  * as_pagelock() call's addr and len.
8675                  *
8676                  * For pageunlock *ppp points to the pointer of page_t that
8677                  * corresponds to the real unadjusted start address. Similar
8678                  * for pagelock *ppp must point to the pointer of page_t that
8679                  * corresponds to the real unadjusted start address.
8680                  */
8681                 pgsz = page_get_pagesize(seg->s_szc);
8682                 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8683                 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8684         } else if (len < segvn_pglock_comb_thrshld) {
8685                 lpgaddr = addr;
8686                 lpgeaddr = addr + len;
8687                 adjustpages = 0;
8688                 pgsz = PAGESIZE;
8689         } else {
8690                 /*
8691                  * Align the address range of large enough requests to allow
8692                  * combining of different shadow lists into 1 to reduce memory
8693                  * overhead from potentially overlapping large shadow lists
8694                  * (worst case is we have a 1MB IO into buffers with start
8695                  * addresses separated by 4K).  Alignment is only possible if
8696                  * padded chunks have sufficient access permissions. Note
8697                  * permissions won't change between L_PAGELOCK and
8698                  * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8699                  * segvn_setprot() to wait until softlockcnt drops to 0. This
8700                  * allows us to determine in L_PAGEUNLOCK the same range we
8701                  * computed in L_PAGELOCK.
8702                  *
8703                  * If alignment is limited by segment ends set
8704                  * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8705                  * these flags are set bump softlockcnt_sbase/softlockcnt_send
8706                  * per segment counters. In L_PAGEUNLOCK case decrease
8707                  * softlockcnt_sbase/softlockcnt_send counters if
8708                  * sftlck_sbase/sftlck_send flags are set.  When
8709                  * softlockcnt_sbase/softlockcnt_send are non 0
8710                  * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8711                  * won't merge the segments. This restriction combined with
8712                  * restriction on segment unmapping and splitting for segments
8713                  * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8714                  * correctly determine the same range that was previously
8715                  * locked by matching L_PAGELOCK.
8716                  */
8717                 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8718                 pgsz = PAGESIZE;
8719                 if (svd->type == MAP_PRIVATE) {
8720                         lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8721                             segvn_pglock_comb_balign);
8722                         if (lpgaddr < seg->s_base) {
8723                                 lpgaddr = seg->s_base;
8724                                 sftlck_sbase = 1;
8725                         }
8726                 } else {
8727                         ulong_t aix = svd->anon_index + seg_page(seg, addr);
8728                         ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8729                         if (aaix < svd->anon_index) {
8730                                 lpgaddr = seg->s_base;
8731                                 sftlck_sbase = 1;
8732                         } else {
8733                                 lpgaddr = addr - ptob(aix - aaix);
8734                                 ASSERT(lpgaddr >= seg->s_base);
8735                         }
8736                 }
8737                 if (svd->pageprot && lpgaddr != addr) {
8738                         struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8739                         struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8740                         while (vp < evp) {
8741                                 if ((VPP_PROT(vp) & protchk) == 0) {
8742                                         break;
8743                                 }
8744                                 vp++;
8745                         }
8746                         if (vp < evp) {
8747                                 lpgaddr = addr;
8748                                 pflags = 0;
8749                         }
8750                 }
8751                 lpgeaddr = addr + len;
8752                 if (pflags) {
8753                         if (svd->type == MAP_PRIVATE) {
8754                                 lpgeaddr = (caddr_t)P2ROUNDUP(
8755                                     (uintptr_t)lpgeaddr,
8756                                     segvn_pglock_comb_balign);
8757                         } else {
8758                                 ulong_t aix = svd->anon_index +
8759                                     seg_page(seg, lpgeaddr);
8760                                 ulong_t aaix = P2ROUNDUP(aix,
8761                                     segvn_pglock_comb_palign);
8762                                 if (aaix < aix) {
8763                                         lpgeaddr = 0;
8764                                 } else {
8765                                         lpgeaddr += ptob(aaix - aix);
8766                                 }
8767                         }
8768                         if (lpgeaddr == 0 ||
8769                             lpgeaddr > seg->s_base + seg->s_size) {
8770                                 lpgeaddr = seg->s_base + seg->s_size;
8771                                 sftlck_send = 1;
8772                         }
8773                 }
8774                 if (svd->pageprot && lpgeaddr != addr + len) {
8775                         struct vpage *vp;
8776                         struct vpage *evp;
8777 
8778                         vp = &svd->vpage[seg_page(seg, addr + len)];
8779                         evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8780 
8781                         while (vp < evp) {
8782                                 if ((VPP_PROT(vp) & protchk) == 0) {
8783                                         break;
8784                                 }
8785                                 vp++;
8786                         }
8787                         if (vp < evp) {
8788                                 lpgeaddr = addr + len;
8789                         }
8790                 }
8791                 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8792         }
8793 
8794         /*
8795          * For MAP_SHARED segments we create pcache entries tagged by amp and
8796          * anon index so that we can share pcache entries with other segments
8797          * that map this amp.  For private segments pcache entries are tagged
8798          * with segment and virtual address.
8799          */
8800         if (svd->type == MAP_SHARED) {
8801                 pamp = amp;
8802                 paddr = (caddr_t)((lpgaddr - seg->s_base) +
8803                     ptob(svd->anon_index));
8804                 preclaim_callback = shamp_reclaim;
8805         } else {
8806                 pamp = NULL;
8807                 paddr = lpgaddr;
8808                 preclaim_callback = segvn_reclaim;
8809         }
8810 
8811         if (type == L_PAGEUNLOCK) {
8812                 VM_STAT_ADD(segvnvmstats.pagelock[0]);
8813 
8814                 /*
8815                  * update hat ref bits for /proc. We need to make sure
8816                  * that threads tracing the ref and mod bits of the
8817                  * address space get the right data.
8818                  * Note: page ref and mod bits are updated at reclaim time
8819                  */
8820                 if (seg->s_as->a_vbits) {
8821                         for (a = addr; a < addr + len; a += PAGESIZE) {
8822                                 if (rw == S_WRITE) {
8823                                         hat_setstat(seg->s_as, a,
8824                                             PAGESIZE, P_REF | P_MOD);
8825                                 } else {
8826                                         hat_setstat(seg->s_as, a,
8827                                             PAGESIZE, P_REF);
8828                                 }
8829                         }
8830                 }
8831 
8832                 /*
8833                  * Check the shadow list entry after the last page used in
8834                  * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8835                  * was not inserted into pcache and is not large page
8836                  * adjusted.  In this case call reclaim callback directly and
8837                  * don't adjust the shadow list start and size for large
8838                  * pages.
8839                  */
8840                 npages = btop(len);
8841                 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
8842                         void *ptag;
8843                         if (pamp != NULL) {
8844                                 ASSERT(svd->type == MAP_SHARED);
8845                                 ptag = (void *)pamp;
8846                                 paddr = (caddr_t)((addr - seg->s_base) +
8847                                     ptob(svd->anon_index));
8848                         } else {
8849                                 ptag = (void *)seg;
8850                                 paddr = addr;
8851                         }
8852                         (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8853                 } else {
8854                         ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8855                             IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8856                         len = lpgeaddr - lpgaddr;
8857                         npages = btop(len);
8858                         seg_pinactive(seg, pamp, paddr, len,
8859                             *ppp - adjustpages, rw, pflags, preclaim_callback);
8860                 }
8861 
8862                 if (pamp != NULL) {
8863                         ASSERT(svd->type == MAP_SHARED);
8864                         ASSERT(svd->softlockcnt >= npages);
8865                         atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8866                 }
8867 
8868                 if (sftlck_sbase) {
8869                         ASSERT(svd->softlockcnt_sbase > 0);
8870                         atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
8871                 }
8872                 if (sftlck_send) {
8873                         ASSERT(svd->softlockcnt_send > 0);
8874                         atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
8875                 }
8876 
8877                 /*
8878                  * If someone is blocked while unmapping, we purge
8879                  * segment page cache and thus reclaim pplist synchronously
8880                  * without waiting for seg_pasync_thread. This speeds up
8881                  * unmapping in cases where munmap(2) is called, while
8882                  * raw async i/o is still in progress or where a thread
8883                  * exits on data fault in a multithreaded application.
8884                  */
8885                 if (AS_ISUNMAPWAIT(seg->s_as)) {
8886                         if (svd->softlockcnt == 0) {
8887                                 mutex_enter(&seg->s_as->a_contents);
8888                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
8889                                         AS_CLRUNMAPWAIT(seg->s_as);
8890                                         cv_broadcast(&seg->s_as->a_cv);
8891                                 }
8892                                 mutex_exit(&seg->s_as->a_contents);
8893                         } else if (pamp == NULL) {
8894                                 /*
8895                                  * softlockcnt is not 0 and this is a
8896                                  * MAP_PRIVATE segment. Try to purge its
8897                                  * pcache entries to reduce softlockcnt.
8898                                  * If it drops to 0 segvn_reclaim()
8899                                  * will wake up a thread waiting on
8900                                  * unmapwait flag.
8901                                  *
8902                                  * We don't purge MAP_SHARED segments with non
8903                                  * 0 softlockcnt since IO is still in progress
8904                                  * for such segments.
8905                                  */
8906                                 ASSERT(svd->type == MAP_PRIVATE);
8907                                 segvn_purge(seg);
8908                         }
8909                 }
8910                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8911                 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
8912                     "segvn_pagelock: unlock seg %p addr %p", seg, addr);
8913                 return (0);
8914         }
8915 
8916         /* The L_PAGELOCK case ... */
8917 
8918         VM_STAT_ADD(segvnvmstats.pagelock[1]);
8919 
8920         /*
8921          * For MAP_SHARED segments we have to check protections before
8922          * seg_plookup() since pcache entries may be shared by many segments
8923          * with potentially different page protections.
8924          */
8925         if (pamp != NULL) {
8926                 ASSERT(svd->type == MAP_SHARED);
8927                 if (svd->pageprot == 0) {
8928                         if ((svd->prot & protchk) == 0) {
8929                                 error = EACCES;
8930                                 goto out;
8931                         }
8932                 } else {
8933                         /*
8934                          * check page protections
8935                          */
8936                         caddr_t ea;
8937 
8938                         if (seg->s_szc) {
8939                                 a = lpgaddr;
8940                                 ea = lpgeaddr;
8941                         } else {
8942                                 a = addr;
8943                                 ea = addr + len;
8944                         }
8945                         for (; a < ea; a += pgsz) {
8946                                 struct vpage *vp;
8947 
8948                                 ASSERT(seg->s_szc == 0 ||
8949                                     sameprot(seg, a, pgsz));
8950                                 vp = &svd->vpage[seg_page(seg, a)];
8951                                 if ((VPP_PROT(vp) & protchk) == 0) {
8952                                         error = EACCES;
8953                                         goto out;
8954                                 }
8955                         }
8956                 }
8957         }
8958 
8959         /*
8960          * try to find pages in segment page cache
8961          */
8962         pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8963         if (pplist != NULL) {
8964                 if (pamp != NULL) {
8965                         npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8966                         ASSERT(svd->type == MAP_SHARED);
8967                         atomic_add_long((ulong_t *)&svd->softlockcnt,
8968                             npages);
8969                 }
8970                 if (sftlck_sbase) {
8971                         atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
8972                 }
8973                 if (sftlck_send) {
8974                         atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
8975                 }
8976                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8977                 *ppp = pplist + adjustpages;
8978                 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
8979                     "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
8980                 return (0);
8981         }
8982 
8983         /*
8984          * For MAP_SHARED segments we already verified above that segment
8985          * protections allow this pagelock operation.
8986          */
8987         if (pamp == NULL) {
8988                 ASSERT(svd->type == MAP_PRIVATE);
8989                 if (svd->pageprot == 0) {
8990                         if ((svd->prot & protchk) == 0) {
8991                                 error = EACCES;
8992                                 goto out;
8993                         }
8994                         if (svd->prot & PROT_WRITE) {
8995                                 wlen = lpgeaddr - lpgaddr;
8996                         } else {
8997                                 wlen = 0;
8998                                 ASSERT(rw == S_READ);
8999                         }
9000                 } else {
9001                         int wcont = 1;
9002                         /*
9003                          * check page protections
9004                          */
9005                         for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9006                                 struct vpage *vp;
9007 
9008                                 ASSERT(seg->s_szc == 0 ||
9009                                     sameprot(seg, a, pgsz));
9010                                 vp = &svd->vpage[seg_page(seg, a)];
9011                                 if ((VPP_PROT(vp) & protchk) == 0) {
9012                                         error = EACCES;
9013                                         goto out;
9014                                 }
9015                                 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9016                                         wlen += pgsz;
9017                                 } else {
9018                                         wcont = 0;
9019                                         ASSERT(rw == S_READ);
9020                                 }
9021                         }
9022                 }
9023                 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9024                 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9025         }
9026 
9027         /*
9028          * Only build large page adjusted shadow list if we expect to insert
9029          * it into pcache. For large enough pages it's a big overhead to
9030          * create a shadow list of the entire large page. But this overhead
9031          * should be amortized over repeated pcache hits on subsequent reuse
9032          * of this shadow list (IO into any range within this shadow list will
9033          * find it in pcache since we large page align the request for pcache
9034          * lookups). pcache performance is improved with bigger shadow lists
9035          * as it reduces the time to pcache the entire big segment and reduces
9036          * pcache chain length.
9037          */
9038         if (seg_pinsert_check(seg, pamp, paddr,
9039             lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9040                 addr = lpgaddr;
9041                 len = lpgeaddr - lpgaddr;
9042                 use_pcache = 1;
9043         } else {
9044                 use_pcache = 0;
9045                 /*
9046                  * Since this entry will not be inserted into the pcache, we
9047                  * will not do any adjustments to the starting address or
9048                  * size of the memory to be locked.
9049                  */
9050                 adjustpages = 0;
9051         }
9052         npages = btop(len);
9053 
9054         pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9055         pl = pplist;
9056         *ppp = pplist + adjustpages;
9057         /*
9058          * If use_pcache is 0 this shadow list is not large page adjusted.
9059          * Record this info in the last entry of shadow array so that
9060          * L_PAGEUNLOCK can determine if it should large page adjust the
9061          * address range to find the real range that was locked.
9062          */
9063         pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9064 
9065         page = seg_page(seg, addr);
9066         anon_index = svd->anon_index + page;
9067 
9068         anlock = 0;
9069         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
9070         ASSERT(amp->a_szc >= seg->s_szc);
9071         anpgcnt = page_get_pagecnt(amp->a_szc);
9072         for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9073                 struct anon *ap;
9074                 struct vnode *vp;
9075                 u_offset_t off;
9076 
9077                 /*
9078                  * Lock and unlock anon array only once per large page.
9079                  * anon_array_enter() locks the root anon slot according to
9080                  * a_szc which can't change while anon map is locked.  We lock
9081                  * anon the first time through this loop and each time we
9082                  * reach anon index that corresponds to a root of a large
9083                  * page.
9084                  */
9085                 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9086                         ASSERT(anlock == 0);
9087                         anon_array_enter(amp, anon_index, &cookie);
9088                         anlock = 1;
9089                 }
9090                 ap = anon_get_ptr(amp->ahp, anon_index);
9091 
9092                 /*
9093                  * We must never use seg_pcache for COW pages
9094                  * because we might end up with original page still
9095                  * lying in seg_pcache even after private page is
9096                  * created. This leads to data corruption as
9097                  * aio_write refers to the page still in cache
9098                  * while all other accesses refer to the private
9099                  * page.
9100                  */
9101                 if (ap == NULL || ap->an_refcnt != 1) {
9102                         struct vpage *vpage;
9103 
9104                         if (seg->s_szc) {
9105                                 error = EFAULT;
9106                                 break;
9107                         }
9108                         if (svd->vpage != NULL) {
9109                                 vpage = &svd->vpage[seg_page(seg, a)];
9110                         } else {
9111                                 vpage = NULL;
9112                         }
9113                         ASSERT(anlock);
9114                         anon_array_exit(&cookie);
9115                         anlock = 0;
9116                         pp = NULL;
9117                         error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9118                             vpage, &pp, 0, F_INVAL, rw, 1);
9119                         if (error) {
9120                                 error = fc_decode(error);
9121                                 break;
9122                         }
9123                         anon_array_enter(amp, anon_index, &cookie);
9124                         anlock = 1;
9125                         ap = anon_get_ptr(amp->ahp, anon_index);
9126                         if (ap == NULL || ap->an_refcnt != 1) {
9127                                 error = EFAULT;
9128                                 break;
9129                         }
9130                 }
9131                 swap_xlate(ap, &vp, &off);
9132                 pp = page_lookup_nowait(vp, off, SE_SHARED);
9133                 if (pp == NULL) {
9134                         error = EFAULT;
9135                         break;
9136                 }
9137                 if (ap->an_pvp != NULL) {
9138                         anon_swap_free(ap, pp);
9139                 }
9140                 /*
9141                  * Unlock anon if this is the last slot in a large page.
9142                  */
9143                 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9144                         ASSERT(anlock);
9145                         anon_array_exit(&cookie);
9146                         anlock = 0;
9147                 }
9148                 *pplist++ = pp;
9149         }
9150         if (anlock) {           /* Ensure the lock is dropped */
9151                 anon_array_exit(&cookie);
9152         }
9153         ANON_LOCK_EXIT(&amp->a_rwlock);
9154 
9155         if (a >= addr + len) {
9156                 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9157                 if (pamp != NULL) {
9158                         ASSERT(svd->type == MAP_SHARED);
9159                         atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9160                             npages);
9161                         wlen = len;
9162                 }
9163                 if (sftlck_sbase) {
9164                         atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9165                 }
9166                 if (sftlck_send) {
9167                         atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9168                 }
9169                 if (use_pcache) {
9170                         (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9171                             rw, pflags, preclaim_callback);
9172                 }
9173                 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9174                 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9175                     "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9176                 return (0);
9177         }
9178 
9179         pplist = pl;
9180         np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9181         while (np > (uint_t)0) {
9182                 ASSERT(PAGE_LOCKED(*pplist));
9183                 page_unlock(*pplist);
9184                 np--;
9185                 pplist++;
9186         }
9187         kmem_free(pl, sizeof (page_t *) * (npages + 1));
9188 out:
9189         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9190         *ppp = NULL;
9191         TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9192             "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9193         return (error);
9194 }
9195 
9196 /*
9197  * purge any cached pages in the I/O page cache
9198  */
9199 static void
9200 segvn_purge(struct seg *seg)
9201 {
9202         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9203 
9204         /*
9205          * pcache is only used by pure anon segments.
9206          */
9207         if (svd->amp == NULL || svd->vp != NULL) {
9208                 return;
9209         }
9210 
9211         /*
9212          * For MAP_SHARED segments non 0 segment's softlockcnt means
9213          * active IO is still in progress via this segment. So we only
9214          * purge MAP_SHARED segments when their softlockcnt is 0.
9215          */
9216         if (svd->type == MAP_PRIVATE) {
9217                 if (svd->softlockcnt) {
9218                         seg_ppurge(seg, NULL, 0);
9219                 }
9220         } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9221                 seg_ppurge(seg, svd->amp, 0);
9222         }
9223 }
9224 
9225 /*
9226  * If async argument is not 0 we are called from pcache async thread and don't
9227  * hold AS lock.
9228  */
9229 
9230 /*ARGSUSED*/
9231 static int
9232 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9233         enum seg_rw rw, int async)
9234 {
9235         struct seg *seg = (struct seg *)ptag;
9236         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9237         pgcnt_t np, npages;
9238         struct page **pl;
9239 
9240         npages = np = btop(len);
9241         ASSERT(npages);
9242 
9243         ASSERT(svd->vp == NULL && svd->amp != NULL);
9244         ASSERT(svd->softlockcnt >= npages);
9245         ASSERT(async || AS_LOCK_HELD(seg->s_as));
9246 
9247         pl = pplist;
9248 
9249         ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9250         ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9251 
9252         while (np > (uint_t)0) {
9253                 if (rw == S_WRITE) {
9254                         hat_setrefmod(*pplist);
9255                 } else {
9256                         hat_setref(*pplist);
9257                 }
9258                 page_unlock(*pplist);
9259                 np--;
9260                 pplist++;
9261         }
9262 
9263         kmem_free(pl, sizeof (page_t *) * (npages + 1));
9264 
9265         /*
9266          * If we are pcache async thread we don't hold AS lock. This means if
9267          * softlockcnt drops to 0 after the decrement below address space may
9268          * get freed. We can't allow it since after softlock derement to 0 we
9269          * still need to access as structure for possible wakeup of unmap
9270          * waiters. To prevent the disappearance of as we take this segment
9271          * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9272          * make sure this routine completes before segment is freed.
9273          *
9274          * The second complication we have to deal with in async case is a
9275          * possibility of missed wake up of unmap wait thread. When we don't
9276          * hold as lock here we may take a_contents lock before unmap wait
9277          * thread that was first to see softlockcnt was still not 0. As a
9278          * result we'll fail to wake up an unmap wait thread. To avoid this
9279          * race we set nounmapwait flag in as structure if we drop softlockcnt
9280          * to 0 when we were called by pcache async thread.  unmapwait thread
9281          * will not block if this flag is set.
9282          */
9283         if (async) {
9284                 mutex_enter(&svd->segfree_syncmtx);
9285         }
9286 
9287         if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9288                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9289                         mutex_enter(&seg->s_as->a_contents);
9290                         if (async) {
9291                                 AS_SETNOUNMAPWAIT(seg->s_as);
9292                         }
9293                         if (AS_ISUNMAPWAIT(seg->s_as)) {
9294                                 AS_CLRUNMAPWAIT(seg->s_as);
9295                                 cv_broadcast(&seg->s_as->a_cv);
9296                         }
9297                         mutex_exit(&seg->s_as->a_contents);
9298                 }
9299         }
9300 
9301         if (async) {
9302                 mutex_exit(&svd->segfree_syncmtx);
9303         }
9304         return (0);
9305 }
9306 
9307 /*ARGSUSED*/
9308 static int
9309 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9310         enum seg_rw rw, int async)
9311 {
9312         amp_t *amp = (amp_t *)ptag;
9313         pgcnt_t np, npages;
9314         struct page **pl;
9315 
9316         npages = np = btop(len);
9317         ASSERT(npages);
9318         ASSERT(amp->a_softlockcnt >= npages);
9319 
9320         pl = pplist;
9321 
9322         ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9323         ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9324 
9325         while (np > (uint_t)0) {
9326                 if (rw == S_WRITE) {
9327                         hat_setrefmod(*pplist);
9328                 } else {
9329                         hat_setref(*pplist);
9330                 }
9331                 page_unlock(*pplist);
9332                 np--;
9333                 pplist++;
9334         }
9335 
9336         kmem_free(pl, sizeof (page_t *) * (npages + 1));
9337 
9338         /*
9339          * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9340          * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9341          * and anonmap_purge() acquires a_purgemtx.
9342          */
9343         mutex_enter(&amp->a_purgemtx);
9344         if (!atomic_add_long_nv((ulong_t *)&amp->a_softlockcnt, -npages) &&
9345             amp->a_purgewait) {
9346                 amp->a_purgewait = 0;
9347                 cv_broadcast(&amp->a_purgecv);
9348         }
9349         mutex_exit(&amp->a_purgemtx);
9350         return (0);
9351 }
9352 
9353 /*
9354  * get a memory ID for an addr in a given segment
9355  *
9356  * XXX only creates PAGESIZE pages if anon slots are not initialized.
9357  * At fault time they will be relocated into larger pages.
9358  */
9359 static int
9360 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9361 {
9362         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9363         struct anon     *ap = NULL;
9364         ulong_t         anon_index;
9365         struct anon_map *amp;
9366         anon_sync_obj_t cookie;
9367 
9368         if (svd->type == MAP_PRIVATE) {
9369                 memidp->val[0] = (uintptr_t)seg->s_as;
9370                 memidp->val[1] = (uintptr_t)addr;
9371                 return (0);
9372         }
9373 
9374         if (svd->type == MAP_SHARED) {
9375                 if (svd->vp) {
9376                         memidp->val[0] = (uintptr_t)svd->vp;
9377                         memidp->val[1] = (u_longlong_t)svd->offset +
9378                             (uintptr_t)(addr - seg->s_base);
9379                         return (0);
9380                 } else {
9381 
9382                         SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9383                         if ((amp = svd->amp) != NULL) {
9384                                 anon_index = svd->anon_index +
9385                                     seg_page(seg, addr);
9386                         }
9387                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9388 
9389                         ASSERT(amp != NULL);
9390 
9391                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
9392                         anon_array_enter(amp, anon_index, &cookie);
9393                         ap = anon_get_ptr(amp->ahp, anon_index);
9394                         if (ap == NULL) {
9395                                 page_t          *pp;
9396 
9397                                 pp = anon_zero(seg, addr, &ap, svd->cred);
9398                                 if (pp == NULL) {
9399                                         anon_array_exit(&cookie);
9400                                         ANON_LOCK_EXIT(&amp->a_rwlock);
9401                                         return (ENOMEM);
9402                                 }
9403                                 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9404                                     == NULL);
9405                                 (void) anon_set_ptr(amp->ahp, anon_index,
9406                                     ap, ANON_SLEEP);
9407                                 page_unlock(pp);
9408                         }
9409 
9410                         anon_array_exit(&cookie);
9411                         ANON_LOCK_EXIT(&amp->a_rwlock);
9412 
9413                         memidp->val[0] = (uintptr_t)ap;
9414                         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9415                         return (0);
9416                 }
9417         }
9418         return (EINVAL);
9419 }
9420 
9421 static int
9422 sameprot(struct seg *seg, caddr_t a, size_t len)
9423 {
9424         struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9425         struct vpage *vpage;
9426         spgcnt_t pages = btop(len);
9427         uint_t prot;
9428 
9429         if (svd->pageprot == 0)
9430                 return (1);
9431 
9432         ASSERT(svd->vpage != NULL);
9433 
9434         vpage = &svd->vpage[seg_page(seg, a)];
9435         prot = VPP_PROT(vpage);
9436         vpage++;
9437         pages--;
9438         while (pages-- > 0) {
9439                 if (prot != VPP_PROT(vpage))
9440                         return (0);
9441                 vpage++;
9442         }
9443         return (1);
9444 }
9445 
9446 /*
9447  * Get memory allocation policy info for specified address in given segment
9448  */
9449 static lgrp_mem_policy_info_t *
9450 segvn_getpolicy(struct seg *seg, caddr_t addr)
9451 {
9452         struct anon_map         *amp;
9453         ulong_t                 anon_index;
9454         lgrp_mem_policy_info_t  *policy_info;
9455         struct segvn_data       *svn_data;
9456         u_offset_t              vn_off;
9457         vnode_t                 *vp;
9458 
9459         ASSERT(seg != NULL);
9460 
9461         svn_data = (struct segvn_data *)seg->s_data;
9462         if (svn_data == NULL)
9463                 return (NULL);
9464 
9465         /*
9466          * Get policy info for private or shared memory
9467          */
9468         if (svn_data->type != MAP_SHARED) {
9469                 if (svn_data->tr_state != SEGVN_TR_ON) {
9470                         policy_info = &svn_data->policy_info;
9471                 } else {
9472                         policy_info = &svn_data->tr_policy_info;
9473                         ASSERT(policy_info->mem_policy ==
9474                             LGRP_MEM_POLICY_NEXT_SEG);
9475                 }
9476         } else {
9477                 amp = svn_data->amp;
9478                 anon_index = svn_data->anon_index + seg_page(seg, addr);
9479                 vp = svn_data->vp;
9480                 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9481                 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9482         }
9483 
9484         return (policy_info);
9485 }
9486 
9487 /*ARGSUSED*/
9488 static int
9489 segvn_capable(struct seg *seg, segcapability_t capability)
9490 {
9491         return (0);
9492 }
9493 
9494 /*
9495  * Bind text vnode segment to an amp. If we bind successfully mappings will be
9496  * established to per vnode mapping per lgroup amp pages instead of to vnode
9497  * pages. There's one amp per vnode text mapping per lgroup. Many processes
9498  * may share the same text replication amp. If a suitable amp doesn't already
9499  * exist in svntr hash table create a new one.  We may fail to bind to amp if
9500  * segment is not eligible for text replication.  Code below first checks for
9501  * these conditions. If binding is successful segment tr_state is set to on
9502  * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9503  * svd->amp remains as NULL.
9504  */
9505 static void
9506 segvn_textrepl(struct seg *seg)
9507 {
9508         struct segvn_data       *svd = (struct segvn_data *)seg->s_data;
9509         vnode_t                 *vp = svd->vp;
9510         u_offset_t              off = svd->offset;
9511         size_t                  size = seg->s_size;
9512         u_offset_t              eoff = off + size;
9513         uint_t                  szc = seg->s_szc;
9514         ulong_t                 hash = SVNTR_HASH_FUNC(vp);
9515         svntr_t                 *svntrp;
9516         struct vattr            va;
9517         proc_t                  *p = seg->s_as->a_proc;
9518         lgrp_id_t               lgrp_id;
9519         lgrp_id_t               olid;
9520         int                     first;
9521         struct anon_map         *amp;
9522 
9523         ASSERT(AS_LOCK_HELD(seg->s_as));
9524         ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9525         ASSERT(p != NULL);
9526         ASSERT(svd->tr_state == SEGVN_TR_INIT);
9527         ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9528         ASSERT(svd->flags & MAP_TEXT);
9529         ASSERT(svd->type == MAP_PRIVATE);
9530         ASSERT(vp != NULL && svd->amp == NULL);
9531         ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9532         ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9533         ASSERT(seg->s_as != &kas);
9534         ASSERT(off < eoff);
9535         ASSERT(svntr_hashtab != NULL);
9536 
9537         /*
9538          * If numa optimizations are no longer desired bail out.
9539          */
9540         if (!lgrp_optimizations()) {
9541                 svd->tr_state = SEGVN_TR_OFF;
9542                 return;
9543         }
9544 
9545         /*
9546          * Avoid creating anon maps with size bigger than the file size.
9547          * If VOP_GETATTR() call fails bail out.
9548          */
9549         va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9550         if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9551                 svd->tr_state = SEGVN_TR_OFF;
9552                 SEGVN_TR_ADDSTAT(gaerr);
9553                 return;
9554         }
9555         if (btopr(va.va_size) < btopr(eoff)) {
9556                 svd->tr_state = SEGVN_TR_OFF;
9557                 SEGVN_TR_ADDSTAT(overmap);
9558                 return;
9559         }
9560 
9561         /*
9562          * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9563          * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9564          * mapping that checks if trcache for this vnode needs to be
9565          * invalidated can't miss us.
9566          */
9567         if (!(vp->v_flag & VVMEXEC)) {
9568                 mutex_enter(&vp->v_lock);
9569                 vp->v_flag |= VVMEXEC;
9570                 mutex_exit(&vp->v_lock);
9571         }
9572         mutex_enter(&svntr_hashtab[hash].tr_lock);
9573         /*
9574          * Bail out if potentially MAP_SHARED writable mappings exist to this
9575          * vnode.  We don't want to use old file contents from existing
9576          * replicas if this mapping was established after the original file
9577          * was changed.
9578          */
9579         if (vn_is_mapped(vp, V_WRITE)) {
9580                 mutex_exit(&svntr_hashtab[hash].tr_lock);
9581                 svd->tr_state = SEGVN_TR_OFF;
9582                 SEGVN_TR_ADDSTAT(wrcnt);
9583                 return;
9584         }
9585         svntrp = svntr_hashtab[hash].tr_head;
9586         for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9587                 ASSERT(svntrp->tr_refcnt != 0);
9588                 if (svntrp->tr_vp != vp) {
9589                         continue;
9590                 }
9591 
9592                 /*
9593                  * Bail out if the file or its attributes were changed after
9594                  * this replication entry was created since we need to use the
9595                  * latest file contents. Note that mtime test alone is not
9596                  * sufficient because a user can explicitly change mtime via
9597                  * utimes(2) interfaces back to the old value after modifiying
9598                  * the file contents. To detect this case we also have to test
9599                  * ctime which among other things records the time of the last
9600                  * mtime change by utimes(2). ctime is not changed when the file
9601                  * is only read or executed so we expect that typically existing
9602                  * replication amp's can be used most of the time.
9603                  */
9604                 if (!svntrp->tr_valid ||
9605                     svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9606                     svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9607                     svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9608                     svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9609                         mutex_exit(&svntr_hashtab[hash].tr_lock);
9610                         svd->tr_state = SEGVN_TR_OFF;
9611                         SEGVN_TR_ADDSTAT(stale);
9612                         return;
9613                 }
9614                 /*
9615                  * if off, eoff and szc match current segment we found the
9616                  * existing entry we can use.
9617                  */
9618                 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9619                     svntrp->tr_szc == szc) {
9620                         break;
9621                 }
9622                 /*
9623                  * Don't create different but overlapping in file offsets
9624                  * entries to avoid replication of the same file pages more
9625                  * than once per lgroup.
9626                  */
9627                 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9628                     (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9629                         mutex_exit(&svntr_hashtab[hash].tr_lock);
9630                         svd->tr_state = SEGVN_TR_OFF;
9631                         SEGVN_TR_ADDSTAT(overlap);
9632                         return;
9633                 }
9634         }
9635         /*
9636          * If we didn't find existing entry create a new one.
9637          */
9638         if (svntrp == NULL) {
9639                 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9640                 if (svntrp == NULL) {
9641                         mutex_exit(&svntr_hashtab[hash].tr_lock);
9642                         svd->tr_state = SEGVN_TR_OFF;
9643                         SEGVN_TR_ADDSTAT(nokmem);
9644                         return;
9645                 }
9646 #ifdef DEBUG
9647                 {
9648                         lgrp_id_t i;
9649                         for (i = 0; i < NLGRPS_MAX; i++) {
9650                                 ASSERT(svntrp->tr_amp[i] == NULL);
9651                         }
9652                 }
9653 #endif /* DEBUG */
9654                 svntrp->tr_vp = vp;
9655                 svntrp->tr_off = off;
9656                 svntrp->tr_eoff = eoff;
9657                 svntrp->tr_szc = szc;
9658                 svntrp->tr_valid = 1;
9659                 svntrp->tr_mtime = va.va_mtime;
9660                 svntrp->tr_ctime = va.va_ctime;
9661                 svntrp->tr_refcnt = 0;
9662                 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9663                 svntr_hashtab[hash].tr_head = svntrp;
9664         }
9665         first = 1;
9666 again:
9667         /*
9668          * We want to pick a replica with pages on main thread's (t_tid = 1,
9669          * aka T1) lgrp. Currently text replication is only optimized for
9670          * workloads that either have all threads of a process on the same
9671          * lgrp or execute their large text primarily on main thread.
9672          */
9673         lgrp_id = p->p_t1_lgrpid;
9674         if (lgrp_id == LGRP_NONE) {
9675                 /*
9676                  * In case exec() prefaults text on non main thread use
9677                  * current thread lgrpid.  It will become main thread anyway
9678                  * soon.
9679                  */
9680                 lgrp_id = lgrp_home_id(curthread);
9681         }
9682         /*
9683          * Set p_tr_lgrpid to lgrpid if it hasn't been set yet.  Otherwise
9684          * just set it to NLGRPS_MAX if it's different from current process T1
9685          * home lgrp.  p_tr_lgrpid is used to detect if process uses text
9686          * replication and T1 new home is different from lgrp used for text
9687          * replication. When this happens asyncronous segvn thread rechecks if
9688          * segments should change lgrps used for text replication.  If we fail
9689          * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9690          * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9691          * we want to use.  We don't need to use cas in this case because
9692          * another thread that races in between our non atomic check and set
9693          * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9694          */
9695         ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9696         olid = p->p_tr_lgrpid;
9697         if (lgrp_id != olid && olid != NLGRPS_MAX) {
9698                 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9699                 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9700                     olid) {
9701                         olid = p->p_tr_lgrpid;
9702                         ASSERT(olid != LGRP_NONE);
9703                         if (olid != lgrp_id && olid != NLGRPS_MAX) {
9704                                 p->p_tr_lgrpid = NLGRPS_MAX;
9705                         }
9706                 }
9707                 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9708                 membar_producer();
9709                 /*
9710                  * lgrp_move_thread() won't schedule async recheck after
9711                  * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9712                  * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9713                  * is not LGRP_NONE.
9714                  */
9715                 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9716                     p->p_t1_lgrpid != lgrp_id) {
9717                         first = 0;
9718                         goto again;
9719                 }
9720         }
9721         /*
9722          * If no amp was created yet for lgrp_id create a new one as long as
9723          * we have enough memory to afford it.
9724          */
9725         if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9726                 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9727                 if (trmem > segvn_textrepl_max_bytes) {
9728                         SEGVN_TR_ADDSTAT(normem);
9729                         goto fail;
9730                 }
9731                 if (anon_try_resv_zone(size, NULL) == 0) {
9732                         SEGVN_TR_ADDSTAT(noanon);
9733                         goto fail;
9734                 }
9735                 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9736                 if (amp == NULL) {
9737                         anon_unresv_zone(size, NULL);
9738                         SEGVN_TR_ADDSTAT(nokmem);
9739                         goto fail;
9740                 }
9741                 ASSERT(amp->refcnt == 1);
9742                 amp->a_szc = szc;
9743                 svntrp->tr_amp[lgrp_id] = amp;
9744                 SEGVN_TR_ADDSTAT(newamp);
9745         }
9746         svntrp->tr_refcnt++;
9747         ASSERT(svd->svn_trnext == NULL);
9748         ASSERT(svd->svn_trprev == NULL);
9749         svd->svn_trnext = svntrp->tr_svnhead;
9750         svd->svn_trprev = NULL;
9751         if (svntrp->tr_svnhead != NULL) {
9752                 svntrp->tr_svnhead->svn_trprev = svd;
9753         }
9754         svntrp->tr_svnhead = svd;
9755         ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9756         ASSERT(amp->refcnt >= 1);
9757         svd->amp = amp;
9758         svd->anon_index = 0;
9759         svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9760         svd->tr_policy_info.mem_lgrpid = lgrp_id;
9761         svd->tr_state = SEGVN_TR_ON;
9762         mutex_exit(&svntr_hashtab[hash].tr_lock);
9763         SEGVN_TR_ADDSTAT(repl);
9764         return;
9765 fail:
9766         ASSERT(segvn_textrepl_bytes >= size);
9767         atomic_add_long(&segvn_textrepl_bytes, -size);
9768         ASSERT(svntrp != NULL);
9769         ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9770         if (svntrp->tr_refcnt == 0) {
9771                 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9772                 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9773                 mutex_exit(&svntr_hashtab[hash].tr_lock);
9774                 kmem_cache_free(svntr_cache, svntrp);
9775         } else {
9776                 mutex_exit(&svntr_hashtab[hash].tr_lock);
9777         }
9778         svd->tr_state = SEGVN_TR_OFF;
9779 }
9780 
9781 /*
9782  * Convert seg back to regular vnode mapping seg by unbinding it from its text
9783  * replication amp.  This routine is most typically called when segment is
9784  * unmapped but can also be called when segment no longer qualifies for text
9785  * replication (e.g. due to protection changes). If unload_unmap is set use
9786  * HAT_UNLOAD_UNMAP flag in hat_unload_callback().  If we are the last user of
9787  * svntr free all its anon maps and remove it from the hash table.
9788  */
9789 static void
9790 segvn_textunrepl(struct seg *seg, int unload_unmap)
9791 {
9792         struct segvn_data       *svd = (struct segvn_data *)seg->s_data;
9793         vnode_t                 *vp = svd->vp;
9794         u_offset_t              off = svd->offset;
9795         size_t                  size = seg->s_size;
9796         u_offset_t              eoff = off + size;
9797         uint_t                  szc = seg->s_szc;
9798         ulong_t                 hash = SVNTR_HASH_FUNC(vp);
9799         svntr_t                 *svntrp;
9800         svntr_t                 **prv_svntrp;
9801         lgrp_id_t               lgrp_id = svd->tr_policy_info.mem_lgrpid;
9802         lgrp_id_t               i;
9803 
9804         ASSERT(AS_LOCK_HELD(seg->s_as));
9805         ASSERT(AS_WRITE_HELD(seg->s_as) ||
9806             SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9807         ASSERT(svd->tr_state == SEGVN_TR_ON);
9808         ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9809         ASSERT(svd->amp != NULL);
9810         ASSERT(svd->amp->refcnt >= 1);
9811         ASSERT(svd->anon_index == 0);
9812         ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9813         ASSERT(svntr_hashtab != NULL);
9814 
9815         mutex_enter(&svntr_hashtab[hash].tr_lock);
9816         prv_svntrp = &svntr_hashtab[hash].tr_head;
9817         for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
9818                 ASSERT(svntrp->tr_refcnt != 0);
9819                 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
9820                     svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
9821                         break;
9822                 }
9823         }
9824         if (svntrp == NULL) {
9825                 panic("segvn_textunrepl: svntr record not found");
9826         }
9827         if (svntrp->tr_amp[lgrp_id] != svd->amp) {
9828                 panic("segvn_textunrepl: amp mismatch");
9829         }
9830         svd->tr_state = SEGVN_TR_OFF;
9831         svd->amp = NULL;
9832         if (svd->svn_trprev == NULL) {
9833                 ASSERT(svntrp->tr_svnhead == svd);
9834                 svntrp->tr_svnhead = svd->svn_trnext;
9835                 if (svntrp->tr_svnhead != NULL) {
9836                         svntrp->tr_svnhead->svn_trprev = NULL;
9837                 }
9838                 svd->svn_trnext = NULL;
9839         } else {
9840                 svd->svn_trprev->svn_trnext = svd->svn_trnext;
9841                 if (svd->svn_trnext != NULL) {
9842                         svd->svn_trnext->svn_trprev = svd->svn_trprev;
9843                         svd->svn_trnext = NULL;
9844                 }
9845                 svd->svn_trprev = NULL;
9846         }
9847         if (--svntrp->tr_refcnt) {
9848                 mutex_exit(&svntr_hashtab[hash].tr_lock);
9849                 goto done;
9850         }
9851         *prv_svntrp = svntrp->tr_next;
9852         mutex_exit(&svntr_hashtab[hash].tr_lock);
9853         for (i = 0; i < NLGRPS_MAX; i++) {
9854                 struct anon_map *amp = svntrp->tr_amp[i];
9855                 if (amp == NULL) {
9856                         continue;
9857                 }
9858                 ASSERT(amp->refcnt == 1);
9859                 ASSERT(amp->swresv == size);
9860                 ASSERT(amp->size == size);
9861                 ASSERT(amp->a_szc == szc);
9862                 if (amp->a_szc != 0) {
9863                         anon_free_pages(amp->ahp, 0, size, szc);
9864                 } else {
9865                         anon_free(amp->ahp, 0, size);
9866                 }
9867                 svntrp->tr_amp[i] = NULL;
9868                 ASSERT(segvn_textrepl_bytes >= size);
9869                 atomic_add_long(&segvn_textrepl_bytes, -size);
9870                 anon_unresv_zone(amp->swresv, NULL);
9871                 amp->refcnt = 0;
9872                 anonmap_free(amp);
9873         }
9874         kmem_cache_free(svntr_cache, svntrp);
9875 done:
9876         hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
9877             unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
9878 }
9879 
9880 /*
9881  * This is called when a MAP_SHARED writable mapping is created to a vnode
9882  * that is currently used for execution (VVMEXEC flag is set). In this case we
9883  * need to prevent further use of existing replicas.
9884  */
9885 static void
9886 segvn_inval_trcache(vnode_t *vp)
9887 {
9888         ulong_t                 hash = SVNTR_HASH_FUNC(vp);
9889         svntr_t                 *svntrp;
9890 
9891         ASSERT(vp->v_flag & VVMEXEC);
9892 
9893         if (svntr_hashtab == NULL) {
9894                 return;
9895         }
9896 
9897         mutex_enter(&svntr_hashtab[hash].tr_lock);
9898         svntrp = svntr_hashtab[hash].tr_head;
9899         for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9900                 ASSERT(svntrp->tr_refcnt != 0);
9901                 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
9902                         svntrp->tr_valid = 0;
9903                 }
9904         }
9905         mutex_exit(&svntr_hashtab[hash].tr_lock);
9906 }
9907 
9908 static void
9909 segvn_trasync_thread(void)
9910 {
9911         callb_cpr_t cpr_info;
9912         kmutex_t cpr_lock;      /* just for CPR stuff */
9913 
9914         mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
9915 
9916         CALLB_CPR_INIT(&cpr_info, &cpr_lock,
9917             callb_generic_cpr, "segvn_async");
9918 
9919         if (segvn_update_textrepl_interval == 0) {
9920                 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
9921         } else {
9922                 segvn_update_textrepl_interval *= hz;
9923         }
9924         (void) timeout(segvn_trupdate_wakeup, NULL,
9925             segvn_update_textrepl_interval);
9926 
9927         for (;;) {
9928                 mutex_enter(&cpr_lock);
9929                 CALLB_CPR_SAFE_BEGIN(&cpr_info);
9930                 mutex_exit(&cpr_lock);
9931                 sema_p(&segvn_trasync_sem);
9932                 mutex_enter(&cpr_lock);
9933                 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
9934                 mutex_exit(&cpr_lock);
9935                 segvn_trupdate();
9936         }
9937 }
9938 
9939 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
9940 
9941 static void
9942 segvn_trupdate_wakeup(void *dummy)
9943 {
9944         uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
9945 
9946         if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
9947                 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
9948                 sema_v(&segvn_trasync_sem);
9949         }
9950 
9951         if (!segvn_disable_textrepl_update &&
9952             segvn_update_textrepl_interval != 0) {
9953                 (void) timeout(segvn_trupdate_wakeup, dummy,
9954                     segvn_update_textrepl_interval);
9955         }
9956 }
9957 
9958 static void
9959 segvn_trupdate(void)
9960 {
9961         ulong_t         hash;
9962         svntr_t         *svntrp;
9963         segvn_data_t    *svd;
9964 
9965         ASSERT(svntr_hashtab != NULL);
9966 
9967         for (hash = 0; hash < svntr_hashtab_sz; hash++) {
9968                 mutex_enter(&svntr_hashtab[hash].tr_lock);
9969                 svntrp = svntr_hashtab[hash].tr_head;
9970                 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9971                         ASSERT(svntrp->tr_refcnt != 0);
9972                         svd = svntrp->tr_svnhead;
9973                         for (; svd != NULL; svd = svd->svn_trnext) {
9974                                 segvn_trupdate_seg(svd->seg, svd, svntrp,
9975                                     hash);
9976                         }
9977                 }
9978                 mutex_exit(&svntr_hashtab[hash].tr_lock);
9979         }
9980 }
9981 
9982 static void
9983 segvn_trupdate_seg(struct seg *seg,
9984         segvn_data_t *svd,
9985         svntr_t *svntrp,
9986         ulong_t hash)
9987 {
9988         proc_t                  *p;
9989         lgrp_id_t               lgrp_id;
9990         struct as               *as;
9991         size_t                  size;
9992         struct anon_map         *amp;
9993 
9994         ASSERT(svd->vp != NULL);
9995         ASSERT(svd->vp == svntrp->tr_vp);
9996         ASSERT(svd->offset == svntrp->tr_off);
9997         ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
9998         ASSERT(seg != NULL);
9999         ASSERT(svd->seg == seg);
10000         ASSERT(seg->s_data == (void *)svd);
10001         ASSERT(seg->s_szc == svntrp->tr_szc);
10002         ASSERT(svd->tr_state == SEGVN_TR_ON);
10003         ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10004         ASSERT(svd->amp != NULL);
10005         ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10006         ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10007         ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10008         ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10009         ASSERT(svntrp->tr_refcnt != 0);
10010         ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10011 
10012         as = seg->s_as;
10013         ASSERT(as != NULL && as != &kas);
10014         p = as->a_proc;
10015         ASSERT(p != NULL);
10016         ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10017         lgrp_id = p->p_t1_lgrpid;
10018         if (lgrp_id == LGRP_NONE) {
10019                 return;
10020         }
10021         ASSERT(lgrp_id < NLGRPS_MAX);
10022         if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10023                 return;
10024         }
10025 
10026         /*
10027          * Use tryenter locking since we are locking as/seg and svntr hash
10028          * lock in reverse from syncrounous thread order.
10029          */
10030         if (!AS_LOCK_TRYENTER(as, RW_READER)) {
10031                 SEGVN_TR_ADDSTAT(nolock);
10032                 if (segvn_lgrp_trthr_migrs_snpsht) {
10033                         segvn_lgrp_trthr_migrs_snpsht = 0;
10034                 }
10035                 return;
10036         }
10037         if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10038                 AS_LOCK_EXIT(as);
10039                 SEGVN_TR_ADDSTAT(nolock);
10040                 if (segvn_lgrp_trthr_migrs_snpsht) {
10041                         segvn_lgrp_trthr_migrs_snpsht = 0;
10042                 }
10043                 return;
10044         }
10045         size = seg->s_size;
10046         if (svntrp->tr_amp[lgrp_id] == NULL) {
10047                 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10048                 if (trmem > segvn_textrepl_max_bytes) {
10049                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10050                         AS_LOCK_EXIT(as);
10051                         atomic_add_long(&segvn_textrepl_bytes, -size);
10052                         SEGVN_TR_ADDSTAT(normem);
10053                         return;
10054                 }
10055                 if (anon_try_resv_zone(size, NULL) == 0) {
10056                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10057                         AS_LOCK_EXIT(as);
10058                         atomic_add_long(&segvn_textrepl_bytes, -size);
10059                         SEGVN_TR_ADDSTAT(noanon);
10060                         return;
10061                 }
10062                 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10063                 if (amp == NULL) {
10064                         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10065                         AS_LOCK_EXIT(as);
10066                         atomic_add_long(&segvn_textrepl_bytes, -size);
10067                         anon_unresv_zone(size, NULL);
10068                         SEGVN_TR_ADDSTAT(nokmem);
10069                         return;
10070                 }
10071                 ASSERT(amp->refcnt == 1);
10072                 amp->a_szc = seg->s_szc;
10073                 svntrp->tr_amp[lgrp_id] = amp;
10074         }
10075         /*
10076          * We don't need to drop the bucket lock but here we give other
10077          * threads a chance.  svntr and svd can't be unlinked as long as
10078          * segment lock is held as a writer and AS held as well.  After we
10079          * retake bucket lock we'll continue from where we left. We'll be able
10080          * to reach the end of either list since new entries are always added
10081          * to the beginning of the lists.
10082          */
10083         mutex_exit(&svntr_hashtab[hash].tr_lock);
10084         hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10085         mutex_enter(&svntr_hashtab[hash].tr_lock);
10086 
10087         ASSERT(svd->tr_state == SEGVN_TR_ON);
10088         ASSERT(svd->amp != NULL);
10089         ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10090         ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10091         ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10092 
10093         svd->tr_policy_info.mem_lgrpid = lgrp_id;
10094         svd->amp = svntrp->tr_amp[lgrp_id];
10095         p->p_tr_lgrpid = NLGRPS_MAX;
10096         SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10097         AS_LOCK_EXIT(as);
10098 
10099         ASSERT(svntrp->tr_refcnt != 0);
10100         ASSERT(svd->vp == svntrp->tr_vp);
10101         ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10102         ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10103         ASSERT(svd->seg == seg);
10104         ASSERT(svd->tr_state == SEGVN_TR_ON);
10105 
10106         SEGVN_TR_ADDSTAT(asyncrepl);
10107 }