1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2015, Joyent, Inc. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 /* 31 * University Copyright- Copyright (c) 1982, 1986, 1988 32 * The Regents of the University of California 33 * All Rights Reserved 34 * 35 * University Acknowledgment- Portions of this document are derived from 36 * software developed by the University of California, Berkeley, and its 37 * contributors. 38 */ 39 40 /* 41 * VM - shared or copy-on-write from a vnode/anonymous memory. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/t_lock.h> 47 #include <sys/errno.h> 48 #include <sys/systm.h> 49 #include <sys/mman.h> 50 #include <sys/debug.h> 51 #include <sys/cred.h> 52 #include <sys/vmsystm.h> 53 #include <sys/tuneable.h> 54 #include <sys/bitmap.h> 55 #include <sys/swap.h> 56 #include <sys/kmem.h> 57 #include <sys/sysmacros.h> 58 #include <sys/vtrace.h> 59 #include <sys/cmn_err.h> 60 #include <sys/callb.h> 61 #include <sys/vm.h> 62 #include <sys/dumphdr.h> 63 #include <sys/lgrp.h> 64 65 #include <vm/hat.h> 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_vn.h> 69 #include <vm/pvn.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/vpage.h> 73 #include <sys/proc.h> 74 #include <sys/task.h> 75 #include <sys/project.h> 76 #include <sys/zone.h> 77 #include <sys/shm_impl.h> 78 79 /* 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all 81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if 82 * it can. In the rare case when this page list is not large enough, it 83 * goes and gets a large enough array from kmem. 84 * 85 * This small page list array covers either 8 pages or 64kB worth of pages - 86 * whichever is smaller. 87 */ 88 #define PVN_MAX_GETPAGE_SZ 0x10000 89 #define PVN_MAX_GETPAGE_NUM 0x8 90 91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE 92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM) 93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM 94 #else 95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ 96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ) 97 #endif 98 99 /* 100 * Private seg op routines. 101 */ 102 static int segvn_dup(struct seg *seg, struct seg *newseg); 103 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 104 static void segvn_free(struct seg *seg); 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 106 caddr_t addr, size_t len, enum fault_type type, 107 enum seg_rw rw); 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 109 static int segvn_setprot(struct seg *seg, caddr_t addr, 110 size_t len, uint_t prot); 111 static int segvn_checkprot(struct seg *seg, caddr_t addr, 112 size_t len, uint_t prot); 113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 114 static size_t segvn_swapout(struct seg *seg); 115 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 116 int attr, uint_t flags); 117 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 118 char *vec); 119 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 120 int attr, int op, ulong_t *lockmap, size_t pos); 121 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 122 uint_t *protv); 123 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 124 static int segvn_gettype(struct seg *seg, caddr_t addr); 125 static int segvn_getvp(struct seg *seg, caddr_t addr, 126 struct vnode **vpp); 127 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 128 uint_t behav); 129 static void segvn_dump(struct seg *seg); 130 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 131 struct page ***ppp, enum lock_type type, enum seg_rw rw); 132 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 133 uint_t szc); 134 static int segvn_getmemid(struct seg *seg, caddr_t addr, 135 memid_t *memidp); 136 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 137 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t); 138 139 struct seg_ops segvn_ops = { 140 .dup = segvn_dup, 141 .unmap = segvn_unmap, 142 .free = segvn_free, 143 .fault = segvn_fault, 144 .faulta = segvn_faulta, 145 .setprot = segvn_setprot, 146 .checkprot = segvn_checkprot, 147 .kluster = segvn_kluster, 148 .swapout = segvn_swapout, 149 .sync = segvn_sync, 150 .incore = segvn_incore, 151 .lockop = segvn_lockop, 152 .getprot = segvn_getprot, 153 .getoffset = segvn_getoffset, 154 .gettype = segvn_gettype, 155 .getvp = segvn_getvp, 156 .advise = segvn_advise, 157 .dump = segvn_dump, 158 .pagelock = segvn_pagelock, 159 .setpagesize = segvn_setpagesize, 160 .getmemid = segvn_getmemid, 161 .getpolicy = segvn_getpolicy, 162 .inherit = segvn_inherit, 163 }; 164 165 /* 166 * Common zfod structures, provided as a shorthand for others to use. 167 */ 168 static segvn_crargs_t zfod_segvn_crargs = 169 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 170 static segvn_crargs_t kzfod_segvn_crargs = 171 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 172 PROT_ALL & ~PROT_USER); 173 static segvn_crargs_t stack_noexec_crargs = 174 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 175 176 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 177 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 178 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 179 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 180 181 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 182 183 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 184 185 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 186 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 187 uint_t segvn_pglock_comb_bshift; 188 size_t segvn_pglock_comb_palign; 189 190 static int segvn_concat(struct seg *, struct seg *, int); 191 static int segvn_extend_prev(struct seg *, struct seg *, 192 struct segvn_crargs *, size_t); 193 static int segvn_extend_next(struct seg *, struct seg *, 194 struct segvn_crargs *, size_t); 195 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 196 static void segvn_pagelist_rele(page_t **); 197 static void segvn_setvnode_mpss(vnode_t *); 198 static void segvn_relocate_pages(page_t **, page_t *); 199 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 200 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 201 uint_t, page_t **, page_t **, uint_t *, int *); 202 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 203 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 204 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 205 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 206 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 207 u_offset_t, struct vpage *, page_t **, uint_t, 208 enum fault_type, enum seg_rw, int); 209 static void segvn_vpage(struct seg *); 210 static size_t segvn_count_swap_by_vpages(struct seg *); 211 212 static void segvn_purge(struct seg *seg); 213 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 214 enum seg_rw, int); 215 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 216 enum seg_rw, int); 217 218 static int sameprot(struct seg *, caddr_t, size_t); 219 220 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 221 static int segvn_clrszc(struct seg *); 222 static struct seg *segvn_split_seg(struct seg *, caddr_t); 223 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 224 ulong_t, uint_t); 225 226 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 227 size_t, void *, u_offset_t); 228 229 static struct kmem_cache *segvn_cache; 230 static struct kmem_cache **segvn_szc_cache; 231 232 #ifdef VM_STATS 233 static struct segvnvmstats_str { 234 ulong_t fill_vp_pages[31]; 235 ulong_t fltvnpages[49]; 236 ulong_t fullszcpages[10]; 237 ulong_t relocatepages[3]; 238 ulong_t fltanpages[17]; 239 ulong_t pagelock[2]; 240 ulong_t demoterange[3]; 241 } segvnvmstats; 242 #endif /* VM_STATS */ 243 244 #define SDR_RANGE 1 /* demote entire range */ 245 #define SDR_END 2 /* demote non aligned ends only */ 246 247 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 248 if ((len) != 0) { \ 249 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 250 ASSERT(lpgaddr >= (seg)->s_base); \ 251 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 252 (len)), pgsz); \ 253 ASSERT(lpgeaddr > lpgaddr); \ 254 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 255 } else { \ 256 lpgeaddr = lpgaddr = (addr); \ 257 } \ 258 } 259 260 /*ARGSUSED*/ 261 static int 262 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 263 { 264 struct segvn_data *svd = buf; 265 266 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 267 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 268 svd->svn_trnext = svd->svn_trprev = NULL; 269 return (0); 270 } 271 272 /*ARGSUSED1*/ 273 static void 274 segvn_cache_destructor(void *buf, void *cdrarg) 275 { 276 struct segvn_data *svd = buf; 277 278 rw_destroy(&svd->lock); 279 mutex_destroy(&svd->segfree_syncmtx); 280 } 281 282 /*ARGSUSED*/ 283 static int 284 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 285 { 286 bzero(buf, sizeof (svntr_t)); 287 return (0); 288 } 289 290 /* 291 * Patching this variable to non-zero allows the system to run with 292 * stacks marked as "not executable". It's a bit of a kludge, but is 293 * provided as a tweakable for platforms that export those ABIs 294 * (e.g. sparc V8) that have executable stacks enabled by default. 295 * There are also some restrictions for platforms that don't actually 296 * implement 'noexec' protections. 297 * 298 * Once enabled, the system is (therefore) unable to provide a fully 299 * ABI-compliant execution environment, though practically speaking, 300 * most everything works. The exceptions are generally some interpreters 301 * and debuggers that create executable code on the stack and jump 302 * into it (without explicitly mprotecting the address range to include 303 * PROT_EXEC). 304 * 305 * One important class of applications that are disabled are those 306 * that have been transformed into malicious agents using one of the 307 * numerous "buffer overflow" attacks. See 4007890. 308 */ 309 int noexec_user_stack = 0; 310 int noexec_user_stack_log = 1; 311 312 int segvn_lpg_disable = 0; 313 uint_t segvn_maxpgszc = 0; 314 315 ulong_t segvn_vmpss_clrszc_cnt; 316 ulong_t segvn_vmpss_clrszc_err; 317 ulong_t segvn_fltvnpages_clrszc_cnt; 318 ulong_t segvn_fltvnpages_clrszc_err; 319 ulong_t segvn_setpgsz_align_err; 320 ulong_t segvn_setpgsz_anon_align_err; 321 ulong_t segvn_setpgsz_getattr_err; 322 ulong_t segvn_setpgsz_eof_err; 323 ulong_t segvn_faultvnmpss_align_err1; 324 ulong_t segvn_faultvnmpss_align_err2; 325 ulong_t segvn_faultvnmpss_align_err3; 326 ulong_t segvn_faultvnmpss_align_err4; 327 ulong_t segvn_faultvnmpss_align_err5; 328 ulong_t segvn_vmpss_pageio_deadlk_err; 329 330 int segvn_use_regions = 1; 331 332 /* 333 * Segvn supports text replication optimization for NUMA platforms. Text 334 * replica's are represented by anon maps (amp). There's one amp per text file 335 * region per lgroup. A process chooses the amp for each of its text mappings 336 * based on the lgroup assignment of its main thread (t_tid = 1). All 337 * processes that want a replica on a particular lgroup for the same text file 338 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 339 * with vp,off,size,szc used as a key. Text replication segments are read only 340 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 341 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 342 * pages. Replication amp is assigned to a segment when it gets its first 343 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 344 * rechecks periodically if the process still maps an amp local to the main 345 * thread. If not async thread forces process to remap to an amp in the new 346 * home lgroup of the main thread. Current text replication implementation 347 * only provides the benefit to workloads that do most of their work in the 348 * main thread of a process or all the threads of a process run in the same 349 * lgroup. To extend text replication benefit to different types of 350 * multithreaded workloads further work would be needed in the hat layer to 351 * allow the same virtual address in the same hat to simultaneously map 352 * different physical addresses (i.e. page table replication would be needed 353 * for x86). 354 * 355 * amp pages are used instead of vnode pages as long as segment has a very 356 * simple life cycle. It's created via segvn_create(), handles S_EXEC 357 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 358 * happens such as protection is changed, real COW fault happens, pagesize is 359 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 360 * text replication by converting the segment back to vnode only segment 361 * (unmap segment's address range and set svd->amp to NULL). 362 * 363 * The original file can be changed after amp is inserted into 364 * svntr_hashtab. Processes that are launched after the file is already 365 * changed can't use the replica's created prior to the file change. To 366 * implement this functionality hash entries are timestamped. Replica's can 367 * only be used if current file modification time is the same as the timestamp 368 * saved when hash entry was created. However just timestamps alone are not 369 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 370 * deal with file changes via MAP_SHARED mappings differently. When writable 371 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 372 * existing replica's for this vnode as not usable for future text 373 * mappings. And we don't create new replica's for files that currently have 374 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 375 * true). 376 */ 377 378 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 379 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 380 381 static ulong_t svntr_hashtab_sz = 512; 382 static svntr_bucket_t *svntr_hashtab = NULL; 383 static struct kmem_cache *svntr_cache; 384 static svntr_stats_t *segvn_textrepl_stats; 385 static ksema_t segvn_trasync_sem; 386 387 int segvn_disable_textrepl = 1; 388 size_t textrepl_size_thresh = (size_t)-1; 389 size_t segvn_textrepl_bytes = 0; 390 size_t segvn_textrepl_max_bytes = 0; 391 clock_t segvn_update_textrepl_interval = 0; 392 int segvn_update_tr_time = 10; 393 int segvn_disable_textrepl_update = 0; 394 395 static void segvn_textrepl(struct seg *); 396 static void segvn_textunrepl(struct seg *, int); 397 static void segvn_inval_trcache(vnode_t *); 398 static void segvn_trasync_thread(void); 399 static void segvn_trupdate_wakeup(void *); 400 static void segvn_trupdate(void); 401 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 402 ulong_t); 403 404 /* 405 * Initialize segvn data structures 406 */ 407 void 408 segvn_init(void) 409 { 410 uint_t maxszc; 411 uint_t szc; 412 size_t pgsz; 413 414 segvn_cache = kmem_cache_create("segvn_cache", 415 sizeof (struct segvn_data), 0, 416 segvn_cache_constructor, segvn_cache_destructor, NULL, 417 NULL, NULL, 0); 418 419 if (segvn_lpg_disable == 0) { 420 szc = maxszc = page_num_pagesizes() - 1; 421 if (szc == 0) { 422 segvn_lpg_disable = 1; 423 } 424 if (page_get_pagesize(0) != PAGESIZE) { 425 panic("segvn_init: bad szc 0"); 426 /*NOTREACHED*/ 427 } 428 while (szc != 0) { 429 pgsz = page_get_pagesize(szc); 430 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 431 panic("segvn_init: bad szc %d", szc); 432 /*NOTREACHED*/ 433 } 434 szc--; 435 } 436 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 437 segvn_maxpgszc = maxszc; 438 } 439 440 if (segvn_maxpgszc) { 441 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 442 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 443 KM_SLEEP); 444 } 445 446 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 447 char str[32]; 448 449 (void) sprintf(str, "segvn_szc_cache%d", szc); 450 segvn_szc_cache[szc] = kmem_cache_create(str, 451 page_get_pagecnt(szc) * sizeof (page_t *), 0, 452 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 453 } 454 455 456 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 457 segvn_use_regions = 0; 458 459 /* 460 * For now shared regions and text replication segvn support 461 * are mutually exclusive. This is acceptable because 462 * currently significant benefit from text replication was 463 * only observed on AMD64 NUMA platforms (due to relatively 464 * small L2$ size) and currently we don't support shared 465 * regions on x86. 466 */ 467 if (segvn_use_regions && !segvn_disable_textrepl) { 468 segvn_disable_textrepl = 1; 469 } 470 471 #if defined(_LP64) 472 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 473 !segvn_disable_textrepl) { 474 ulong_t i; 475 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 476 477 svntr_cache = kmem_cache_create("svntr_cache", 478 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 479 NULL, NULL, NULL, 0); 480 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 481 for (i = 0; i < svntr_hashtab_sz; i++) { 482 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 483 MUTEX_DEFAULT, NULL); 484 } 485 segvn_textrepl_max_bytes = ptob(physmem) / 486 segvn_textrepl_max_bytes_factor; 487 segvn_textrepl_stats = kmem_zalloc(NCPU * 488 sizeof (svntr_stats_t), KM_SLEEP); 489 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 490 (void) thread_create(NULL, 0, segvn_trasync_thread, 491 NULL, 0, &p0, TS_RUN, minclsyspri); 492 } 493 #endif 494 495 if (!ISP2(segvn_pglock_comb_balign) || 496 segvn_pglock_comb_balign < PAGESIZE) { 497 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 498 } 499 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 500 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 501 } 502 503 #define SEGVN_PAGEIO ((void *)0x1) 504 #define SEGVN_NOPAGEIO ((void *)0x2) 505 506 static void 507 segvn_setvnode_mpss(vnode_t *vp) 508 { 509 int err; 510 511 ASSERT(vp->v_mpssdata == NULL || 512 vp->v_mpssdata == SEGVN_PAGEIO || 513 vp->v_mpssdata == SEGVN_NOPAGEIO); 514 515 if (vp->v_mpssdata == NULL) { 516 if (vn_vmpss_usepageio(vp)) { 517 err = VOP_PAGEIO(vp, (page_t *)NULL, 518 (u_offset_t)0, 0, 0, CRED(), NULL); 519 } else { 520 err = ENOSYS; 521 } 522 /* 523 * set v_mpssdata just once per vnode life 524 * so that it never changes. 525 */ 526 mutex_enter(&vp->v_lock); 527 if (vp->v_mpssdata == NULL) { 528 if (err == EINVAL) { 529 vp->v_mpssdata = SEGVN_PAGEIO; 530 } else { 531 vp->v_mpssdata = SEGVN_NOPAGEIO; 532 } 533 } 534 mutex_exit(&vp->v_lock); 535 } 536 } 537 538 int 539 segvn_create(struct seg *seg, void *argsp) 540 { 541 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 542 struct segvn_data *svd; 543 size_t swresv = 0; 544 struct cred *cred; 545 struct anon_map *amp; 546 int error = 0; 547 size_t pgsz; 548 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 549 int use_rgn = 0; 550 int trok = 0; 551 552 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 553 554 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 555 panic("segvn_create type"); 556 /*NOTREACHED*/ 557 } 558 559 /* 560 * Check arguments. If a shared anon structure is given then 561 * it is illegal to also specify a vp. 562 */ 563 if (a->amp != NULL && a->vp != NULL) { 564 panic("segvn_create anon_map"); 565 /*NOTREACHED*/ 566 } 567 568 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 569 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 570 segvn_use_regions) { 571 use_rgn = 1; 572 } 573 574 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 575 if (a->type == MAP_SHARED) 576 a->flags &= ~MAP_NORESERVE; 577 578 if (a->szc != 0) { 579 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 580 (a->amp != NULL && a->type == MAP_PRIVATE) || 581 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 582 a->szc = 0; 583 } else { 584 if (a->szc > segvn_maxpgszc) 585 a->szc = segvn_maxpgszc; 586 pgsz = page_get_pagesize(a->szc); 587 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 588 !IS_P2ALIGNED(seg->s_size, pgsz)) { 589 a->szc = 0; 590 } else if (a->vp != NULL) { 591 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 592 /* 593 * paranoid check. 594 * hat_page_demote() is not supported 595 * on swapfs pages. 596 */ 597 a->szc = 0; 598 } else if (map_addr_vacalign_check(seg->s_base, 599 a->offset & PAGEMASK)) { 600 a->szc = 0; 601 } 602 } else if (a->amp != NULL) { 603 pgcnt_t anum = btopr(a->offset); 604 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 605 if (!IS_P2ALIGNED(anum, pgcnt)) { 606 a->szc = 0; 607 } 608 } 609 } 610 } 611 612 /* 613 * If segment may need private pages, reserve them now. 614 */ 615 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 616 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 617 if (anon_resv_zone(seg->s_size, 618 seg->s_as->a_proc->p_zone) == 0) 619 return (EAGAIN); 620 swresv = seg->s_size; 621 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 622 seg, swresv, 1); 623 } 624 625 /* 626 * Reserve any mapping structures that may be required. 627 * 628 * Don't do it for segments that may use regions. It's currently a 629 * noop in the hat implementations anyway. 630 */ 631 if (!use_rgn) { 632 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 633 } 634 635 if (a->cred) { 636 cred = a->cred; 637 crhold(cred); 638 } else { 639 crhold(cred = CRED()); 640 } 641 642 /* Inform the vnode of the new mapping */ 643 if (a->vp != NULL) { 644 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 645 seg->s_as, seg->s_base, seg->s_size, a->prot, 646 a->maxprot, a->type, cred, NULL); 647 if (error) { 648 if (swresv != 0) { 649 anon_unresv_zone(swresv, 650 seg->s_as->a_proc->p_zone); 651 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 652 "anon proc:%p %lu %u", seg, swresv, 0); 653 } 654 crfree(cred); 655 if (!use_rgn) { 656 hat_unload(seg->s_as->a_hat, seg->s_base, 657 seg->s_size, HAT_UNLOAD_UNMAP); 658 } 659 return (error); 660 } 661 /* 662 * svntr_hashtab will be NULL if we support shared regions. 663 */ 664 trok = ((a->flags & MAP_TEXT) && 665 (seg->s_size > textrepl_size_thresh || 666 (a->flags & _MAP_TEXTREPL)) && 667 lgrp_optimizations() && svntr_hashtab != NULL && 668 a->type == MAP_PRIVATE && swresv == 0 && 669 !(a->flags & MAP_NORESERVE) && 670 seg->s_as != &kas && a->vp->v_type == VREG); 671 672 ASSERT(!trok || !use_rgn); 673 } 674 675 /* 676 * MAP_NORESERVE mappings don't count towards the VSZ of a process 677 * until we fault the pages in. 678 */ 679 if ((a->vp == NULL || a->vp->v_type != VREG) && 680 a->flags & MAP_NORESERVE) { 681 seg->s_as->a_resvsize -= seg->s_size; 682 } 683 684 /* 685 * If more than one segment in the address space, and they're adjacent 686 * virtually, try to concatenate them. Don't concatenate if an 687 * explicit anon_map structure was supplied (e.g., SystemV shared 688 * memory) or if we'll use text replication for this segment. 689 */ 690 if (a->amp == NULL && !use_rgn && !trok) { 691 struct seg *pseg, *nseg; 692 struct segvn_data *psvd, *nsvd; 693 lgrp_mem_policy_t ppolicy, npolicy; 694 uint_t lgrp_mem_policy_flags = 0; 695 extern lgrp_mem_policy_t lgrp_mem_default_policy; 696 697 /* 698 * Memory policy flags (lgrp_mem_policy_flags) is valid when 699 * extending stack/heap segments. 700 */ 701 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 702 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 703 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 704 } else { 705 /* 706 * Get policy when not extending it from another segment 707 */ 708 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 709 } 710 711 /* 712 * First, try to concatenate the previous and new segments 713 */ 714 pseg = AS_SEGPREV(seg->s_as, seg); 715 if (pseg != NULL && 716 pseg->s_base + pseg->s_size == seg->s_base && 717 pseg->s_ops == &segvn_ops) { 718 /* 719 * Get memory allocation policy from previous segment. 720 * When extension is specified (e.g. for heap) apply 721 * this policy to the new segment regardless of the 722 * outcome of segment concatenation. Extension occurs 723 * for non-default policy otherwise default policy is 724 * used and is based on extended segment size. 725 */ 726 psvd = (struct segvn_data *)pseg->s_data; 727 ppolicy = psvd->policy_info.mem_policy; 728 if (lgrp_mem_policy_flags == 729 LGRP_MP_FLAG_EXTEND_UP) { 730 if (ppolicy != lgrp_mem_default_policy) { 731 mpolicy = ppolicy; 732 } else { 733 mpolicy = lgrp_mem_policy_default( 734 pseg->s_size + seg->s_size, 735 a->type); 736 } 737 } 738 739 if (mpolicy == ppolicy && 740 (pseg->s_size + seg->s_size <= 741 segvn_comb_thrshld || psvd->amp == NULL) && 742 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 743 /* 744 * success! now try to concatenate 745 * with following seg 746 */ 747 crfree(cred); 748 nseg = AS_SEGNEXT(pseg->s_as, pseg); 749 if (nseg != NULL && 750 nseg != pseg && 751 nseg->s_ops == &segvn_ops && 752 pseg->s_base + pseg->s_size == 753 nseg->s_base) 754 (void) segvn_concat(pseg, nseg, 0); 755 ASSERT(pseg->s_szc == 0 || 756 (a->szc == pseg->s_szc && 757 IS_P2ALIGNED(pseg->s_base, pgsz) && 758 IS_P2ALIGNED(pseg->s_size, pgsz))); 759 return (0); 760 } 761 } 762 763 /* 764 * Failed, so try to concatenate with following seg 765 */ 766 nseg = AS_SEGNEXT(seg->s_as, seg); 767 if (nseg != NULL && 768 seg->s_base + seg->s_size == nseg->s_base && 769 nseg->s_ops == &segvn_ops) { 770 /* 771 * Get memory allocation policy from next segment. 772 * When extension is specified (e.g. for stack) apply 773 * this policy to the new segment regardless of the 774 * outcome of segment concatenation. Extension occurs 775 * for non-default policy otherwise default policy is 776 * used and is based on extended segment size. 777 */ 778 nsvd = (struct segvn_data *)nseg->s_data; 779 npolicy = nsvd->policy_info.mem_policy; 780 if (lgrp_mem_policy_flags == 781 LGRP_MP_FLAG_EXTEND_DOWN) { 782 if (npolicy != lgrp_mem_default_policy) { 783 mpolicy = npolicy; 784 } else { 785 mpolicy = lgrp_mem_policy_default( 786 nseg->s_size + seg->s_size, 787 a->type); 788 } 789 } 790 791 if (mpolicy == npolicy && 792 segvn_extend_next(seg, nseg, a, swresv) == 0) { 793 crfree(cred); 794 ASSERT(nseg->s_szc == 0 || 795 (a->szc == nseg->s_szc && 796 IS_P2ALIGNED(nseg->s_base, pgsz) && 797 IS_P2ALIGNED(nseg->s_size, pgsz))); 798 return (0); 799 } 800 } 801 } 802 803 if (a->vp != NULL) { 804 VN_HOLD(a->vp); 805 if (a->type == MAP_SHARED) 806 lgrp_shm_policy_init(NULL, a->vp); 807 } 808 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 809 810 seg->s_ops = &segvn_ops; 811 seg->s_data = (void *)svd; 812 seg->s_szc = a->szc; 813 814 svd->seg = seg; 815 svd->vp = a->vp; 816 /* 817 * Anonymous mappings have no backing file so the offset is meaningless. 818 */ 819 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 820 svd->prot = a->prot; 821 svd->maxprot = a->maxprot; 822 svd->pageprot = 0; 823 svd->type = a->type; 824 svd->vpage = NULL; 825 svd->cred = cred; 826 svd->advice = MADV_NORMAL; 827 svd->pageadvice = 0; 828 svd->flags = (ushort_t)a->flags; 829 svd->softlockcnt = 0; 830 svd->softlockcnt_sbase = 0; 831 svd->softlockcnt_send = 0; 832 svd->svn_inz = 0; 833 svd->rcookie = HAT_INVALID_REGION_COOKIE; 834 svd->pageswap = 0; 835 836 if (a->szc != 0 && a->vp != NULL) { 837 segvn_setvnode_mpss(a->vp); 838 } 839 if (svd->type == MAP_SHARED && svd->vp != NULL && 840 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 841 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 842 segvn_inval_trcache(svd->vp); 843 } 844 845 amp = a->amp; 846 if ((svd->amp = amp) == NULL) { 847 svd->anon_index = 0; 848 if (svd->type == MAP_SHARED) { 849 svd->swresv = 0; 850 /* 851 * Shared mappings to a vp need no other setup. 852 * If we have a shared mapping to an anon_map object 853 * which hasn't been allocated yet, allocate the 854 * struct now so that it will be properly shared 855 * by remembering the swap reservation there. 856 */ 857 if (a->vp == NULL) { 858 svd->amp = anonmap_alloc(seg->s_size, swresv, 859 ANON_SLEEP); 860 svd->amp->a_szc = seg->s_szc; 861 } 862 } else { 863 /* 864 * Private mapping (with or without a vp). 865 * Allocate anon_map when needed. 866 */ 867 svd->swresv = swresv; 868 } 869 } else { 870 pgcnt_t anon_num; 871 872 /* 873 * Mapping to an existing anon_map structure without a vp. 874 * For now we will insure that the segment size isn't larger 875 * than the size - offset gives us. Later on we may wish to 876 * have the anon array dynamically allocated itself so that 877 * we don't always have to allocate all the anon pointer slots. 878 * This of course involves adding extra code to check that we 879 * aren't trying to use an anon pointer slot beyond the end 880 * of the currently allocated anon array. 881 */ 882 if ((amp->size - a->offset) < seg->s_size) { 883 panic("segvn_create anon_map size"); 884 /*NOTREACHED*/ 885 } 886 887 anon_num = btopr(a->offset); 888 889 if (a->type == MAP_SHARED) { 890 /* 891 * SHARED mapping to a given anon_map. 892 */ 893 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 894 amp->refcnt++; 895 if (a->szc > amp->a_szc) { 896 amp->a_szc = a->szc; 897 } 898 ANON_LOCK_EXIT(&->a_rwlock); 899 svd->anon_index = anon_num; 900 svd->swresv = 0; 901 } else { 902 /* 903 * PRIVATE mapping to a given anon_map. 904 * Make sure that all the needed anon 905 * structures are created (so that we will 906 * share the underlying pages if nothing 907 * is written by this mapping) and then 908 * duplicate the anon array as is done 909 * when a privately mapped segment is dup'ed. 910 */ 911 struct anon *ap; 912 caddr_t addr; 913 caddr_t eaddr; 914 ulong_t anon_idx; 915 int hat_flag = HAT_LOAD; 916 917 if (svd->flags & MAP_TEXT) { 918 hat_flag |= HAT_LOAD_TEXT; 919 } 920 921 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 922 svd->amp->a_szc = seg->s_szc; 923 svd->anon_index = 0; 924 svd->swresv = swresv; 925 926 /* 927 * Prevent 2 threads from allocating anon 928 * slots simultaneously. 929 */ 930 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 931 eaddr = seg->s_base + seg->s_size; 932 933 for (anon_idx = anon_num, addr = seg->s_base; 934 addr < eaddr; addr += PAGESIZE, anon_idx++) { 935 page_t *pp; 936 937 if ((ap = anon_get_ptr(amp->ahp, 938 anon_idx)) != NULL) 939 continue; 940 941 /* 942 * Allocate the anon struct now. 943 * Might as well load up translation 944 * to the page while we're at it... 945 */ 946 pp = anon_zero(seg, addr, &ap, cred); 947 if (ap == NULL || pp == NULL) { 948 panic("segvn_create anon_zero"); 949 /*NOTREACHED*/ 950 } 951 952 /* 953 * Re-acquire the anon_map lock and 954 * initialize the anon array entry. 955 */ 956 ASSERT(anon_get_ptr(amp->ahp, 957 anon_idx) == NULL); 958 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 959 ANON_SLEEP); 960 961 ASSERT(seg->s_szc == 0); 962 ASSERT(!IS_VMODSORT(pp->p_vnode)); 963 964 ASSERT(use_rgn == 0); 965 hat_memload(seg->s_as->a_hat, addr, pp, 966 svd->prot & ~PROT_WRITE, hat_flag); 967 968 page_unlock(pp); 969 } 970 ASSERT(seg->s_szc == 0); 971 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 972 0, seg->s_size); 973 ANON_LOCK_EXIT(&->a_rwlock); 974 } 975 } 976 977 /* 978 * Set default memory allocation policy for segment 979 * 980 * Always set policy for private memory at least for initialization 981 * even if this is a shared memory segment 982 */ 983 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 984 985 if (svd->type == MAP_SHARED) 986 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 987 svd->vp, svd->offset, seg->s_size); 988 989 if (use_rgn) { 990 ASSERT(!trok); 991 ASSERT(svd->amp == NULL); 992 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 993 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 994 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 995 HAT_REGION_TEXT); 996 } 997 998 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 999 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 1000 1001 return (0); 1002 } 1003 1004 /* 1005 * Concatenate two existing segments, if possible. 1006 * Return 0 on success, -1 if two segments are not compatible 1007 * or -2 on memory allocation failure. 1008 * If amp_cat == 1 then try and concat segments with anon maps 1009 */ 1010 static int 1011 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 1012 { 1013 struct segvn_data *svd1 = seg1->s_data; 1014 struct segvn_data *svd2 = seg2->s_data; 1015 struct anon_map *amp1 = svd1->amp; 1016 struct anon_map *amp2 = svd2->amp; 1017 struct vpage *vpage1 = svd1->vpage; 1018 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 1019 size_t size, nvpsize; 1020 pgcnt_t npages1, npages2; 1021 1022 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 1023 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1024 ASSERT(seg1->s_ops == seg2->s_ops); 1025 1026 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 1027 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1028 return (-1); 1029 } 1030 1031 /* both segments exist, try to merge them */ 1032 #define incompat(x) (svd1->x != svd2->x) 1033 if (incompat(vp) || incompat(maxprot) || 1034 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1035 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1036 incompat(type) || incompat(cred) || incompat(flags) || 1037 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1038 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1039 return (-1); 1040 #undef incompat 1041 1042 /* 1043 * vp == NULL implies zfod, offset doesn't matter 1044 */ 1045 if (svd1->vp != NULL && 1046 svd1->offset + seg1->s_size != svd2->offset) { 1047 return (-1); 1048 } 1049 1050 /* 1051 * Don't concatenate if either segment uses text replication. 1052 */ 1053 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1054 return (-1); 1055 } 1056 1057 /* 1058 * Fail early if we're not supposed to concatenate 1059 * segments with non NULL amp. 1060 */ 1061 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1062 return (-1); 1063 } 1064 1065 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1066 if (amp1 != amp2) { 1067 return (-1); 1068 } 1069 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1070 svd2->anon_index) { 1071 return (-1); 1072 } 1073 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1074 } 1075 1076 /* 1077 * If either seg has vpages, create a new merged vpage array. 1078 */ 1079 if (vpage1 != NULL || vpage2 != NULL) { 1080 struct vpage *vp, *evp; 1081 1082 npages1 = seg_pages(seg1); 1083 npages2 = seg_pages(seg2); 1084 nvpsize = vpgtob(npages1 + npages2); 1085 1086 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1087 return (-2); 1088 } 1089 1090 if (vpage1 != NULL) { 1091 bcopy(vpage1, nvpage, vpgtob(npages1)); 1092 } else { 1093 evp = nvpage + npages1; 1094 for (vp = nvpage; vp < evp; vp++) { 1095 VPP_SETPROT(vp, svd1->prot); 1096 VPP_SETADVICE(vp, svd1->advice); 1097 } 1098 } 1099 1100 if (vpage2 != NULL) { 1101 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1102 } else { 1103 evp = nvpage + npages1 + npages2; 1104 for (vp = nvpage + npages1; vp < evp; vp++) { 1105 VPP_SETPROT(vp, svd2->prot); 1106 VPP_SETADVICE(vp, svd2->advice); 1107 } 1108 } 1109 1110 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1111 ASSERT(svd1->swresv == seg1->s_size); 1112 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1113 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1114 evp = nvpage + npages1; 1115 for (vp = nvpage; vp < evp; vp++) { 1116 VPP_SETSWAPRES(vp); 1117 } 1118 } 1119 1120 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1121 ASSERT(svd2->swresv == seg2->s_size); 1122 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1123 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1124 vp = nvpage + npages1; 1125 evp = vp + npages2; 1126 for (; vp < evp; vp++) { 1127 VPP_SETSWAPRES(vp); 1128 } 1129 } 1130 } 1131 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1132 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1133 1134 /* 1135 * If either segment has private pages, create a new merged anon 1136 * array. If mergeing shared anon segments just decrement anon map's 1137 * refcnt. 1138 */ 1139 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1140 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1141 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1142 ASSERT(amp1->refcnt >= 2); 1143 amp1->refcnt--; 1144 ANON_LOCK_EXIT(&1->a_rwlock); 1145 svd2->amp = NULL; 1146 } else if (amp1 != NULL || amp2 != NULL) { 1147 struct anon_hdr *nahp; 1148 struct anon_map *namp = NULL; 1149 size_t asize; 1150 1151 ASSERT(svd1->type == MAP_PRIVATE); 1152 1153 asize = seg1->s_size + seg2->s_size; 1154 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1155 if (nvpage != NULL) { 1156 kmem_free(nvpage, nvpsize); 1157 } 1158 return (-2); 1159 } 1160 if (amp1 != NULL) { 1161 /* 1162 * XXX anon rwlock is not really needed because 1163 * this is a private segment and we are writers. 1164 */ 1165 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1166 ASSERT(amp1->refcnt == 1); 1167 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1168 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1169 anon_release(nahp, btop(asize)); 1170 ANON_LOCK_EXIT(&1->a_rwlock); 1171 if (nvpage != NULL) { 1172 kmem_free(nvpage, nvpsize); 1173 } 1174 return (-2); 1175 } 1176 } 1177 if (amp2 != NULL) { 1178 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1179 ASSERT(amp2->refcnt == 1); 1180 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1181 nahp, btop(seg1->s_size), btop(seg2->s_size), 1182 ANON_NOSLEEP)) { 1183 anon_release(nahp, btop(asize)); 1184 ANON_LOCK_EXIT(&2->a_rwlock); 1185 if (amp1 != NULL) { 1186 ANON_LOCK_EXIT(&1->a_rwlock); 1187 } 1188 if (nvpage != NULL) { 1189 kmem_free(nvpage, nvpsize); 1190 } 1191 return (-2); 1192 } 1193 } 1194 if (amp1 != NULL) { 1195 namp = amp1; 1196 anon_release(amp1->ahp, btop(amp1->size)); 1197 } 1198 if (amp2 != NULL) { 1199 if (namp == NULL) { 1200 ASSERT(amp1 == NULL); 1201 namp = amp2; 1202 anon_release(amp2->ahp, btop(amp2->size)); 1203 } else { 1204 amp2->refcnt--; 1205 ANON_LOCK_EXIT(&2->a_rwlock); 1206 anonmap_free(amp2); 1207 } 1208 svd2->amp = NULL; /* needed for seg_free */ 1209 } 1210 namp->ahp = nahp; 1211 namp->size = asize; 1212 svd1->amp = namp; 1213 svd1->anon_index = 0; 1214 ANON_LOCK_EXIT(&namp->a_rwlock); 1215 } 1216 /* 1217 * Now free the old vpage structures. 1218 */ 1219 if (nvpage != NULL) { 1220 if (vpage1 != NULL) { 1221 kmem_free(vpage1, vpgtob(npages1)); 1222 } 1223 if (vpage2 != NULL) { 1224 svd2->vpage = NULL; 1225 kmem_free(vpage2, vpgtob(npages2)); 1226 } 1227 if (svd2->pageprot) { 1228 svd1->pageprot = 1; 1229 } 1230 if (svd2->pageadvice) { 1231 svd1->pageadvice = 1; 1232 } 1233 if (svd2->pageswap) { 1234 svd1->pageswap = 1; 1235 } 1236 svd1->vpage = nvpage; 1237 } 1238 1239 /* all looks ok, merge segments */ 1240 svd1->swresv += svd2->swresv; 1241 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1242 size = seg2->s_size; 1243 seg_free(seg2); 1244 seg1->s_size += size; 1245 return (0); 1246 } 1247 1248 /* 1249 * Extend the previous segment (seg1) to include the 1250 * new segment (seg2 + a), if possible. 1251 * Return 0 on success. 1252 */ 1253 static int 1254 segvn_extend_prev(seg1, seg2, a, swresv) 1255 struct seg *seg1, *seg2; 1256 struct segvn_crargs *a; 1257 size_t swresv; 1258 { 1259 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1260 size_t size; 1261 struct anon_map *amp1; 1262 struct vpage *new_vpage; 1263 1264 /* 1265 * We don't need any segment level locks for "segvn" data 1266 * since the address space is "write" locked. 1267 */ 1268 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1269 1270 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1271 return (-1); 1272 } 1273 1274 /* second segment is new, try to extend first */ 1275 /* XXX - should also check cred */ 1276 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1277 (!svd1->pageprot && (svd1->prot != a->prot)) || 1278 svd1->type != a->type || svd1->flags != a->flags || 1279 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1280 return (-1); 1281 1282 /* vp == NULL implies zfod, offset doesn't matter */ 1283 if (svd1->vp != NULL && 1284 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1285 return (-1); 1286 1287 if (svd1->tr_state != SEGVN_TR_OFF) { 1288 return (-1); 1289 } 1290 1291 amp1 = svd1->amp; 1292 if (amp1) { 1293 pgcnt_t newpgs; 1294 1295 /* 1296 * Segment has private pages, can data structures 1297 * be expanded? 1298 * 1299 * Acquire the anon_map lock to prevent it from changing, 1300 * if it is shared. This ensures that the anon_map 1301 * will not change while a thread which has a read/write 1302 * lock on an address space references it. 1303 * XXX - Don't need the anon_map lock at all if "refcnt" 1304 * is 1. 1305 * 1306 * Can't grow a MAP_SHARED segment with an anonmap because 1307 * there may be existing anon slots where we want to extend 1308 * the segment and we wouldn't know what to do with them 1309 * (e.g., for tmpfs right thing is to just leave them there, 1310 * for /dev/zero they should be cleared out). 1311 */ 1312 if (svd1->type == MAP_SHARED) 1313 return (-1); 1314 1315 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1316 if (amp1->refcnt > 1) { 1317 ANON_LOCK_EXIT(&1->a_rwlock); 1318 return (-1); 1319 } 1320 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1321 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1322 1323 if (newpgs == 0) { 1324 ANON_LOCK_EXIT(&1->a_rwlock); 1325 return (-1); 1326 } 1327 amp1->size = ptob(newpgs); 1328 ANON_LOCK_EXIT(&1->a_rwlock); 1329 } 1330 if (svd1->vpage != NULL) { 1331 struct vpage *vp, *evp; 1332 new_vpage = 1333 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1334 KM_NOSLEEP); 1335 if (new_vpage == NULL) 1336 return (-1); 1337 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1338 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1339 svd1->vpage = new_vpage; 1340 1341 vp = new_vpage + seg_pages(seg1); 1342 evp = vp + seg_pages(seg2); 1343 for (; vp < evp; vp++) 1344 VPP_SETPROT(vp, a->prot); 1345 if (svd1->pageswap && swresv) { 1346 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1347 ASSERT(swresv == seg2->s_size); 1348 vp = new_vpage + seg_pages(seg1); 1349 for (; vp < evp; vp++) { 1350 VPP_SETSWAPRES(vp); 1351 } 1352 } 1353 } 1354 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1355 size = seg2->s_size; 1356 seg_free(seg2); 1357 seg1->s_size += size; 1358 svd1->swresv += swresv; 1359 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1360 svd1->type == MAP_SHARED && svd1->vp != NULL && 1361 (svd1->vp->v_flag & VVMEXEC)) { 1362 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1363 segvn_inval_trcache(svd1->vp); 1364 } 1365 return (0); 1366 } 1367 1368 /* 1369 * Extend the next segment (seg2) to include the 1370 * new segment (seg1 + a), if possible. 1371 * Return 0 on success. 1372 */ 1373 static int 1374 segvn_extend_next( 1375 struct seg *seg1, 1376 struct seg *seg2, 1377 struct segvn_crargs *a, 1378 size_t swresv) 1379 { 1380 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1381 size_t size; 1382 struct anon_map *amp2; 1383 struct vpage *new_vpage; 1384 1385 /* 1386 * We don't need any segment level locks for "segvn" data 1387 * since the address space is "write" locked. 1388 */ 1389 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1390 1391 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1392 return (-1); 1393 } 1394 1395 /* first segment is new, try to extend second */ 1396 /* XXX - should also check cred */ 1397 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1398 (!svd2->pageprot && (svd2->prot != a->prot)) || 1399 svd2->type != a->type || svd2->flags != a->flags || 1400 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1401 return (-1); 1402 /* vp == NULL implies zfod, offset doesn't matter */ 1403 if (svd2->vp != NULL && 1404 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1405 return (-1); 1406 1407 if (svd2->tr_state != SEGVN_TR_OFF) { 1408 return (-1); 1409 } 1410 1411 amp2 = svd2->amp; 1412 if (amp2) { 1413 pgcnt_t newpgs; 1414 1415 /* 1416 * Segment has private pages, can data structures 1417 * be expanded? 1418 * 1419 * Acquire the anon_map lock to prevent it from changing, 1420 * if it is shared. This ensures that the anon_map 1421 * will not change while a thread which has a read/write 1422 * lock on an address space references it. 1423 * 1424 * XXX - Don't need the anon_map lock at all if "refcnt" 1425 * is 1. 1426 */ 1427 if (svd2->type == MAP_SHARED) 1428 return (-1); 1429 1430 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1431 if (amp2->refcnt > 1) { 1432 ANON_LOCK_EXIT(&2->a_rwlock); 1433 return (-1); 1434 } 1435 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1436 btop(seg2->s_size), btop(seg1->s_size), 1437 ANON_NOSLEEP | ANON_GROWDOWN); 1438 1439 if (newpgs == 0) { 1440 ANON_LOCK_EXIT(&2->a_rwlock); 1441 return (-1); 1442 } 1443 amp2->size = ptob(newpgs); 1444 ANON_LOCK_EXIT(&2->a_rwlock); 1445 } 1446 if (svd2->vpage != NULL) { 1447 struct vpage *vp, *evp; 1448 new_vpage = 1449 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1450 KM_NOSLEEP); 1451 if (new_vpage == NULL) { 1452 /* Not merging segments so adjust anon_index back */ 1453 if (amp2) 1454 svd2->anon_index += seg_pages(seg1); 1455 return (-1); 1456 } 1457 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1458 vpgtob(seg_pages(seg2))); 1459 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1460 svd2->vpage = new_vpage; 1461 1462 vp = new_vpage; 1463 evp = vp + seg_pages(seg1); 1464 for (; vp < evp; vp++) 1465 VPP_SETPROT(vp, a->prot); 1466 if (svd2->pageswap && swresv) { 1467 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1468 ASSERT(swresv == seg1->s_size); 1469 vp = new_vpage; 1470 for (; vp < evp; vp++) { 1471 VPP_SETSWAPRES(vp); 1472 } 1473 } 1474 } 1475 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1476 size = seg1->s_size; 1477 seg_free(seg1); 1478 seg2->s_size += size; 1479 seg2->s_base -= size; 1480 svd2->offset -= size; 1481 svd2->swresv += swresv; 1482 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1483 svd2->type == MAP_SHARED && svd2->vp != NULL && 1484 (svd2->vp->v_flag & VVMEXEC)) { 1485 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1486 segvn_inval_trcache(svd2->vp); 1487 } 1488 return (0); 1489 } 1490 1491 /* 1492 * Duplicate all the pages in the segment. This may break COW sharing for a 1493 * given page. If the page is marked with inherit zero set, then instead of 1494 * duplicating the page, we zero the page. 1495 */ 1496 static int 1497 segvn_dup_pages(struct seg *seg, struct seg *newseg) 1498 { 1499 int error; 1500 uint_t prot; 1501 page_t *pp; 1502 struct anon *ap, *newap; 1503 size_t i; 1504 caddr_t addr; 1505 1506 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1507 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data; 1508 ulong_t old_idx = svd->anon_index; 1509 ulong_t new_idx = 0; 1510 1511 i = btopr(seg->s_size); 1512 addr = seg->s_base; 1513 1514 /* 1515 * XXX break cow sharing using PAGESIZE 1516 * pages. They will be relocated into larger 1517 * pages at fault time. 1518 */ 1519 while (i-- > 0) { 1520 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) { 1521 struct vpage *vpp; 1522 1523 vpp = &svd->vpage[seg_page(seg, addr)]; 1524 1525 /* 1526 * prot need not be computed below 'cause anon_private 1527 * is going to ignore it anyway as child doesn't inherit 1528 * pagelock from parent. 1529 */ 1530 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot; 1531 1532 /* 1533 * Check whether we should zero this or dup it. 1534 */ 1535 if (svd->svn_inz == SEGVN_INZ_ALL || 1536 (svd->svn_inz == SEGVN_INZ_VPP && 1537 VPP_ISINHZERO(vpp))) { 1538 pp = anon_zero(newseg, addr, &newap, 1539 newsvd->cred); 1540 } else { 1541 page_t *anon_pl[1+1]; 1542 uint_t vpprot; 1543 error = anon_getpage(&ap, &vpprot, anon_pl, 1544 PAGESIZE, seg, addr, S_READ, svd->cred); 1545 if (error != 0) 1546 return (error); 1547 1548 pp = anon_private(&newap, newseg, addr, prot, 1549 anon_pl[0], 0, newsvd->cred); 1550 } 1551 if (pp == NULL) { 1552 return (ENOMEM); 1553 } 1554 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap, 1555 ANON_SLEEP); 1556 page_unlock(pp); 1557 } 1558 addr += PAGESIZE; 1559 old_idx++; 1560 new_idx++; 1561 } 1562 1563 return (0); 1564 } 1565 1566 static int 1567 segvn_dup(struct seg *seg, struct seg *newseg) 1568 { 1569 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1570 struct segvn_data *newsvd; 1571 pgcnt_t npages = seg_pages(seg); 1572 int error = 0; 1573 size_t len; 1574 struct anon_map *amp; 1575 1576 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1577 ASSERT(newseg->s_as->a_proc->p_parent == curproc); 1578 1579 /* 1580 * If segment has anon reserved, reserve more for the new seg. 1581 * For a MAP_NORESERVE segment swresv will be a count of all the 1582 * allocated anon slots; thus we reserve for the child as many slots 1583 * as the parent has allocated. This semantic prevents the child or 1584 * parent from dieing during a copy-on-write fault caused by trying 1585 * to write a shared pre-existing anon page. 1586 */ 1587 if ((len = svd->swresv) != 0) { 1588 if (anon_resv(svd->swresv) == 0) 1589 return (ENOMEM); 1590 1591 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1592 seg, len, 0); 1593 } 1594 1595 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1596 1597 newseg->s_ops = &segvn_ops; 1598 newseg->s_data = (void *)newsvd; 1599 newseg->s_szc = seg->s_szc; 1600 1601 newsvd->seg = newseg; 1602 if ((newsvd->vp = svd->vp) != NULL) { 1603 VN_HOLD(svd->vp); 1604 if (svd->type == MAP_SHARED) 1605 lgrp_shm_policy_init(NULL, svd->vp); 1606 } 1607 newsvd->offset = svd->offset; 1608 newsvd->prot = svd->prot; 1609 newsvd->maxprot = svd->maxprot; 1610 newsvd->pageprot = svd->pageprot; 1611 newsvd->type = svd->type; 1612 newsvd->cred = svd->cred; 1613 crhold(newsvd->cred); 1614 newsvd->advice = svd->advice; 1615 newsvd->pageadvice = svd->pageadvice; 1616 newsvd->svn_inz = svd->svn_inz; 1617 newsvd->swresv = svd->swresv; 1618 newsvd->pageswap = svd->pageswap; 1619 newsvd->flags = svd->flags; 1620 newsvd->softlockcnt = 0; 1621 newsvd->softlockcnt_sbase = 0; 1622 newsvd->softlockcnt_send = 0; 1623 newsvd->policy_info = svd->policy_info; 1624 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1625 1626 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1627 /* 1628 * Not attaching to a shared anon object. 1629 */ 1630 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1631 svd->tr_state == SEGVN_TR_OFF); 1632 if (svd->tr_state == SEGVN_TR_ON) { 1633 ASSERT(newsvd->vp != NULL && amp != NULL); 1634 newsvd->tr_state = SEGVN_TR_INIT; 1635 } else { 1636 newsvd->tr_state = svd->tr_state; 1637 } 1638 newsvd->amp = NULL; 1639 newsvd->anon_index = 0; 1640 } else { 1641 /* regions for now are only used on pure vnode segments */ 1642 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1643 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1644 newsvd->tr_state = SEGVN_TR_OFF; 1645 if (svd->type == MAP_SHARED) { 1646 ASSERT(svd->svn_inz == SEGVN_INZ_NONE); 1647 newsvd->amp = amp; 1648 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1649 amp->refcnt++; 1650 ANON_LOCK_EXIT(&->a_rwlock); 1651 newsvd->anon_index = svd->anon_index; 1652 } else { 1653 int reclaim = 1; 1654 1655 /* 1656 * Allocate and initialize new anon_map structure. 1657 */ 1658 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1659 ANON_SLEEP); 1660 newsvd->amp->a_szc = newseg->s_szc; 1661 newsvd->anon_index = 0; 1662 ASSERT(svd->svn_inz == SEGVN_INZ_NONE || 1663 svd->svn_inz == SEGVN_INZ_ALL || 1664 svd->svn_inz == SEGVN_INZ_VPP); 1665 1666 /* 1667 * We don't have to acquire the anon_map lock 1668 * for the new segment (since it belongs to an 1669 * address space that is still not associated 1670 * with any process), or the segment in the old 1671 * address space (since all threads in it 1672 * are stopped while duplicating the address space). 1673 */ 1674 1675 /* 1676 * The goal of the following code is to make sure that 1677 * softlocked pages do not end up as copy on write 1678 * pages. This would cause problems where one 1679 * thread writes to a page that is COW and a different 1680 * thread in the same process has softlocked it. The 1681 * softlock lock would move away from this process 1682 * because the write would cause this process to get 1683 * a copy (without the softlock). 1684 * 1685 * The strategy here is to just break the 1686 * sharing on pages that could possibly be 1687 * softlocked. 1688 * 1689 * In addition, if any pages have been marked that they 1690 * should be inherited as zero, then we immediately go 1691 * ahead and break COW and zero them. In the case of a 1692 * softlocked page that should be inherited zero, we 1693 * break COW and just get a zero page. 1694 */ 1695 retry: 1696 if (svd->softlockcnt || 1697 svd->svn_inz != SEGVN_INZ_NONE) { 1698 /* 1699 * The softlock count might be non zero 1700 * because some pages are still stuck in the 1701 * cache for lazy reclaim. Flush the cache 1702 * now. This should drop the count to zero. 1703 * [or there is really I/O going on to these 1704 * pages]. Note, we have the writers lock so 1705 * nothing gets inserted during the flush. 1706 */ 1707 if (svd->softlockcnt && reclaim == 1) { 1708 segvn_purge(seg); 1709 reclaim = 0; 1710 goto retry; 1711 } 1712 1713 error = segvn_dup_pages(seg, newseg); 1714 if (error != 0) { 1715 newsvd->vpage = NULL; 1716 goto out; 1717 } 1718 } else { /* common case */ 1719 if (seg->s_szc != 0) { 1720 /* 1721 * If at least one of anon slots of a 1722 * large page exists then make sure 1723 * all anon slots of a large page 1724 * exist to avoid partial cow sharing 1725 * of a large page in the future. 1726 */ 1727 anon_dup_fill_holes(amp->ahp, 1728 svd->anon_index, newsvd->amp->ahp, 1729 0, seg->s_size, seg->s_szc, 1730 svd->vp != NULL); 1731 } else { 1732 anon_dup(amp->ahp, svd->anon_index, 1733 newsvd->amp->ahp, 0, seg->s_size); 1734 } 1735 1736 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1737 seg->s_size, PROT_WRITE); 1738 } 1739 } 1740 } 1741 /* 1742 * If necessary, create a vpage structure for the new segment. 1743 * Do not copy any page lock indications. 1744 */ 1745 if (svd->vpage != NULL) { 1746 uint_t i; 1747 struct vpage *ovp = svd->vpage; 1748 struct vpage *nvp; 1749 1750 nvp = newsvd->vpage = 1751 kmem_alloc(vpgtob(npages), KM_SLEEP); 1752 for (i = 0; i < npages; i++) { 1753 *nvp = *ovp++; 1754 VPP_CLRPPLOCK(nvp++); 1755 } 1756 } else 1757 newsvd->vpage = NULL; 1758 1759 /* Inform the vnode of the new mapping */ 1760 if (newsvd->vp != NULL) { 1761 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1762 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1763 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1764 } 1765 out: 1766 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1767 ASSERT(newsvd->amp == NULL); 1768 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1769 newsvd->rcookie = svd->rcookie; 1770 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1771 } 1772 return (error); 1773 } 1774 1775 1776 /* 1777 * callback function to invoke free_vp_pages() for only those pages actually 1778 * processed by the HAT when a shared region is destroyed. 1779 */ 1780 extern int free_pages; 1781 1782 static void 1783 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1784 size_t r_size, void *r_obj, u_offset_t r_objoff) 1785 { 1786 u_offset_t off; 1787 size_t len; 1788 vnode_t *vp = (vnode_t *)r_obj; 1789 1790 ASSERT(eaddr > saddr); 1791 ASSERT(saddr >= r_saddr); 1792 ASSERT(saddr < r_saddr + r_size); 1793 ASSERT(eaddr > r_saddr); 1794 ASSERT(eaddr <= r_saddr + r_size); 1795 ASSERT(vp != NULL); 1796 1797 if (!free_pages) { 1798 return; 1799 } 1800 1801 len = eaddr - saddr; 1802 off = (saddr - r_saddr) + r_objoff; 1803 free_vp_pages(vp, off, len); 1804 } 1805 1806 /* 1807 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1808 * those pages actually processed by the HAT 1809 */ 1810 static void 1811 segvn_hat_unload_callback(hat_callback_t *cb) 1812 { 1813 struct seg *seg = cb->hcb_data; 1814 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1815 size_t len; 1816 u_offset_t off; 1817 1818 ASSERT(svd->vp != NULL); 1819 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1820 ASSERT(cb->hcb_start_addr >= seg->s_base); 1821 1822 len = cb->hcb_end_addr - cb->hcb_start_addr; 1823 off = cb->hcb_start_addr - seg->s_base; 1824 free_vp_pages(svd->vp, svd->offset + off, len); 1825 } 1826 1827 /* 1828 * This function determines the number of bytes of swap reserved by 1829 * a segment for which per-page accounting is present. It is used to 1830 * calculate the correct value of a segvn_data's swresv. 1831 */ 1832 static size_t 1833 segvn_count_swap_by_vpages(struct seg *seg) 1834 { 1835 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1836 struct vpage *vp, *evp; 1837 size_t nswappages = 0; 1838 1839 ASSERT(svd->pageswap); 1840 ASSERT(svd->vpage != NULL); 1841 1842 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1843 1844 for (vp = svd->vpage; vp < evp; vp++) { 1845 if (VPP_ISSWAPRES(vp)) 1846 nswappages++; 1847 } 1848 1849 return (nswappages << PAGESHIFT); 1850 } 1851 1852 static int 1853 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1854 { 1855 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1856 struct segvn_data *nsvd; 1857 struct seg *nseg; 1858 struct anon_map *amp; 1859 pgcnt_t opages; /* old segment size in pages */ 1860 pgcnt_t npages; /* new segment size in pages */ 1861 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1862 hat_callback_t callback; /* used for free_vp_pages() */ 1863 hat_callback_t *cbp = NULL; 1864 caddr_t nbase; 1865 size_t nsize; 1866 size_t oswresv; 1867 int reclaim = 1; 1868 1869 /* 1870 * We don't need any segment level locks for "segvn" data 1871 * since the address space is "write" locked. 1872 */ 1873 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1874 1875 /* 1876 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1877 * softlockcnt is protected from change by the as write lock. 1878 */ 1879 retry: 1880 if (svd->softlockcnt > 0) { 1881 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1882 1883 /* 1884 * If this is shared segment non 0 softlockcnt 1885 * means locked pages are still in use. 1886 */ 1887 if (svd->type == MAP_SHARED) { 1888 return (EAGAIN); 1889 } 1890 1891 /* 1892 * since we do have the writers lock nobody can fill 1893 * the cache during the purge. The flush either succeeds 1894 * or we still have pending I/Os. 1895 */ 1896 if (reclaim == 1) { 1897 segvn_purge(seg); 1898 reclaim = 0; 1899 goto retry; 1900 } 1901 return (EAGAIN); 1902 } 1903 1904 /* 1905 * Check for bad sizes 1906 */ 1907 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1908 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1909 panic("segvn_unmap"); 1910 /*NOTREACHED*/ 1911 } 1912 1913 if (seg->s_szc != 0) { 1914 size_t pgsz = page_get_pagesize(seg->s_szc); 1915 int err; 1916 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1917 ASSERT(seg->s_base != addr || seg->s_size != len); 1918 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1919 ASSERT(svd->amp == NULL); 1920 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1921 hat_leave_region(seg->s_as->a_hat, 1922 svd->rcookie, HAT_REGION_TEXT); 1923 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1924 /* 1925 * could pass a flag to segvn_demote_range() 1926 * below to tell it not to do any unloads but 1927 * this case is rare enough to not bother for 1928 * now. 1929 */ 1930 } else if (svd->tr_state == SEGVN_TR_INIT) { 1931 svd->tr_state = SEGVN_TR_OFF; 1932 } else if (svd->tr_state == SEGVN_TR_ON) { 1933 ASSERT(svd->amp != NULL); 1934 segvn_textunrepl(seg, 1); 1935 ASSERT(svd->amp == NULL); 1936 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1937 } 1938 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1939 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1940 if (err == 0) { 1941 return (IE_RETRY); 1942 } 1943 return (err); 1944 } 1945 } 1946 1947 /* Inform the vnode of the unmapping. */ 1948 if (svd->vp) { 1949 int error; 1950 1951 error = VOP_DELMAP(svd->vp, 1952 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1953 seg->s_as, addr, len, svd->prot, svd->maxprot, 1954 svd->type, svd->cred, NULL); 1955 1956 if (error == EAGAIN) 1957 return (error); 1958 } 1959 1960 /* 1961 * Remove any page locks set through this mapping. 1962 * If text replication is not off no page locks could have been 1963 * established via this mapping. 1964 */ 1965 if (svd->tr_state == SEGVN_TR_OFF) { 1966 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1967 } 1968 1969 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1970 ASSERT(svd->amp == NULL); 1971 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1972 ASSERT(svd->type == MAP_PRIVATE); 1973 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1974 HAT_REGION_TEXT); 1975 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1976 } else if (svd->tr_state == SEGVN_TR_ON) { 1977 ASSERT(svd->amp != NULL); 1978 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1979 segvn_textunrepl(seg, 1); 1980 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1981 } else { 1982 if (svd->tr_state != SEGVN_TR_OFF) { 1983 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1984 svd->tr_state = SEGVN_TR_OFF; 1985 } 1986 /* 1987 * Unload any hardware translations in the range to be taken 1988 * out. Use a callback to invoke free_vp_pages() effectively. 1989 */ 1990 if (svd->vp != NULL && free_pages != 0) { 1991 callback.hcb_data = seg; 1992 callback.hcb_function = segvn_hat_unload_callback; 1993 cbp = &callback; 1994 } 1995 hat_unload_callback(seg->s_as->a_hat, addr, len, 1996 HAT_UNLOAD_UNMAP, cbp); 1997 1998 if (svd->type == MAP_SHARED && svd->vp != NULL && 1999 (svd->vp->v_flag & VVMEXEC) && 2000 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 2001 segvn_inval_trcache(svd->vp); 2002 } 2003 } 2004 2005 /* 2006 * Check for entire segment 2007 */ 2008 if (addr == seg->s_base && len == seg->s_size) { 2009 seg_free(seg); 2010 return (0); 2011 } 2012 2013 opages = seg_pages(seg); 2014 dpages = btop(len); 2015 npages = opages - dpages; 2016 amp = svd->amp; 2017 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 2018 2019 /* 2020 * Check for beginning of segment 2021 */ 2022 if (addr == seg->s_base) { 2023 if (svd->vpage != NULL) { 2024 size_t nbytes; 2025 struct vpage *ovpage; 2026 2027 ovpage = svd->vpage; /* keep pointer to vpage */ 2028 2029 nbytes = vpgtob(npages); 2030 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2031 bcopy(&ovpage[dpages], svd->vpage, nbytes); 2032 2033 /* free up old vpage */ 2034 kmem_free(ovpage, vpgtob(opages)); 2035 } 2036 if (amp != NULL) { 2037 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2038 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2039 /* 2040 * Shared anon map is no longer in use. Before 2041 * freeing its pages purge all entries from 2042 * pcache that belong to this amp. 2043 */ 2044 if (svd->type == MAP_SHARED) { 2045 ASSERT(amp->refcnt == 1); 2046 ASSERT(svd->softlockcnt == 0); 2047 anonmap_purge(amp); 2048 } 2049 /* 2050 * Free up now unused parts of anon_map array. 2051 */ 2052 if (amp->a_szc == seg->s_szc) { 2053 if (seg->s_szc != 0) { 2054 anon_free_pages(amp->ahp, 2055 svd->anon_index, len, 2056 seg->s_szc); 2057 } else { 2058 anon_free(amp->ahp, 2059 svd->anon_index, 2060 len); 2061 } 2062 } else { 2063 ASSERT(svd->type == MAP_SHARED); 2064 ASSERT(amp->a_szc > seg->s_szc); 2065 anon_shmap_free_pages(amp, 2066 svd->anon_index, len); 2067 } 2068 2069 /* 2070 * Unreserve swap space for the 2071 * unmapped chunk of this segment in 2072 * case it's MAP_SHARED 2073 */ 2074 if (svd->type == MAP_SHARED) { 2075 anon_unresv_zone(len, 2076 seg->s_as->a_proc->p_zone); 2077 amp->swresv -= len; 2078 } 2079 } 2080 ANON_LOCK_EXIT(&->a_rwlock); 2081 svd->anon_index += dpages; 2082 } 2083 if (svd->vp != NULL) 2084 svd->offset += len; 2085 2086 seg->s_base += len; 2087 seg->s_size -= len; 2088 2089 if (svd->swresv) { 2090 if (svd->flags & MAP_NORESERVE) { 2091 ASSERT(amp); 2092 oswresv = svd->swresv; 2093 2094 svd->swresv = ptob(anon_pages(amp->ahp, 2095 svd->anon_index, npages)); 2096 anon_unresv_zone(oswresv - svd->swresv, 2097 seg->s_as->a_proc->p_zone); 2098 if (SEG_IS_PARTIAL_RESV(seg)) 2099 seg->s_as->a_resvsize -= oswresv - 2100 svd->swresv; 2101 } else { 2102 size_t unlen; 2103 2104 if (svd->pageswap) { 2105 oswresv = svd->swresv; 2106 svd->swresv = 2107 segvn_count_swap_by_vpages(seg); 2108 ASSERT(oswresv >= svd->swresv); 2109 unlen = oswresv - svd->swresv; 2110 } else { 2111 svd->swresv -= len; 2112 ASSERT(svd->swresv == seg->s_size); 2113 unlen = len; 2114 } 2115 anon_unresv_zone(unlen, 2116 seg->s_as->a_proc->p_zone); 2117 } 2118 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2119 seg, len, 0); 2120 } 2121 2122 return (0); 2123 } 2124 2125 /* 2126 * Check for end of segment 2127 */ 2128 if (addr + len == seg->s_base + seg->s_size) { 2129 if (svd->vpage != NULL) { 2130 size_t nbytes; 2131 struct vpage *ovpage; 2132 2133 ovpage = svd->vpage; /* keep pointer to vpage */ 2134 2135 nbytes = vpgtob(npages); 2136 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2137 bcopy(ovpage, svd->vpage, nbytes); 2138 2139 /* free up old vpage */ 2140 kmem_free(ovpage, vpgtob(opages)); 2141 2142 } 2143 if (amp != NULL) { 2144 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2145 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2146 /* 2147 * Free up now unused parts of anon_map array. 2148 */ 2149 ulong_t an_idx = svd->anon_index + npages; 2150 2151 /* 2152 * Shared anon map is no longer in use. Before 2153 * freeing its pages purge all entries from 2154 * pcache that belong to this amp. 2155 */ 2156 if (svd->type == MAP_SHARED) { 2157 ASSERT(amp->refcnt == 1); 2158 ASSERT(svd->softlockcnt == 0); 2159 anonmap_purge(amp); 2160 } 2161 2162 if (amp->a_szc == seg->s_szc) { 2163 if (seg->s_szc != 0) { 2164 anon_free_pages(amp->ahp, 2165 an_idx, len, 2166 seg->s_szc); 2167 } else { 2168 anon_free(amp->ahp, an_idx, 2169 len); 2170 } 2171 } else { 2172 ASSERT(svd->type == MAP_SHARED); 2173 ASSERT(amp->a_szc > seg->s_szc); 2174 anon_shmap_free_pages(amp, 2175 an_idx, len); 2176 } 2177 2178 /* 2179 * Unreserve swap space for the 2180 * unmapped chunk of this segment in 2181 * case it's MAP_SHARED 2182 */ 2183 if (svd->type == MAP_SHARED) { 2184 anon_unresv_zone(len, 2185 seg->s_as->a_proc->p_zone); 2186 amp->swresv -= len; 2187 } 2188 } 2189 ANON_LOCK_EXIT(&->a_rwlock); 2190 } 2191 2192 seg->s_size -= len; 2193 2194 if (svd->swresv) { 2195 if (svd->flags & MAP_NORESERVE) { 2196 ASSERT(amp); 2197 oswresv = svd->swresv; 2198 svd->swresv = ptob(anon_pages(amp->ahp, 2199 svd->anon_index, npages)); 2200 anon_unresv_zone(oswresv - svd->swresv, 2201 seg->s_as->a_proc->p_zone); 2202 if (SEG_IS_PARTIAL_RESV(seg)) 2203 seg->s_as->a_resvsize -= oswresv - 2204 svd->swresv; 2205 } else { 2206 size_t unlen; 2207 2208 if (svd->pageswap) { 2209 oswresv = svd->swresv; 2210 svd->swresv = 2211 segvn_count_swap_by_vpages(seg); 2212 ASSERT(oswresv >= svd->swresv); 2213 unlen = oswresv - svd->swresv; 2214 } else { 2215 svd->swresv -= len; 2216 ASSERT(svd->swresv == seg->s_size); 2217 unlen = len; 2218 } 2219 anon_unresv_zone(unlen, 2220 seg->s_as->a_proc->p_zone); 2221 } 2222 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2223 "anon proc:%p %lu %u", seg, len, 0); 2224 } 2225 2226 return (0); 2227 } 2228 2229 /* 2230 * The section to go is in the middle of the segment, 2231 * have to make it into two segments. nseg is made for 2232 * the high end while seg is cut down at the low end. 2233 */ 2234 nbase = addr + len; /* new seg base */ 2235 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2236 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2237 nseg = seg_alloc(seg->s_as, nbase, nsize); 2238 if (nseg == NULL) { 2239 panic("segvn_unmap seg_alloc"); 2240 /*NOTREACHED*/ 2241 } 2242 nseg->s_ops = seg->s_ops; 2243 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2244 nseg->s_data = (void *)nsvd; 2245 nseg->s_szc = seg->s_szc; 2246 *nsvd = *svd; 2247 nsvd->seg = nseg; 2248 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2249 nsvd->swresv = 0; 2250 nsvd->softlockcnt = 0; 2251 nsvd->softlockcnt_sbase = 0; 2252 nsvd->softlockcnt_send = 0; 2253 nsvd->svn_inz = svd->svn_inz; 2254 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2255 2256 if (svd->vp != NULL) { 2257 VN_HOLD(nsvd->vp); 2258 if (nsvd->type == MAP_SHARED) 2259 lgrp_shm_policy_init(NULL, nsvd->vp); 2260 } 2261 crhold(svd->cred); 2262 2263 if (svd->vpage == NULL) { 2264 nsvd->vpage = NULL; 2265 } else { 2266 /* need to split vpage into two arrays */ 2267 size_t nbytes; 2268 struct vpage *ovpage; 2269 2270 ovpage = svd->vpage; /* keep pointer to vpage */ 2271 2272 npages = seg_pages(seg); /* seg has shrunk */ 2273 nbytes = vpgtob(npages); 2274 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2275 2276 bcopy(ovpage, svd->vpage, nbytes); 2277 2278 npages = seg_pages(nseg); 2279 nbytes = vpgtob(npages); 2280 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2281 2282 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2283 2284 /* free up old vpage */ 2285 kmem_free(ovpage, vpgtob(opages)); 2286 } 2287 2288 if (amp == NULL) { 2289 nsvd->amp = NULL; 2290 nsvd->anon_index = 0; 2291 } else { 2292 /* 2293 * Need to create a new anon map for the new segment. 2294 * We'll also allocate a new smaller array for the old 2295 * smaller segment to save space. 2296 */ 2297 opages = btop((uintptr_t)(addr - seg->s_base)); 2298 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2299 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2300 /* 2301 * Free up now unused parts of anon_map array. 2302 */ 2303 ulong_t an_idx = svd->anon_index + opages; 2304 2305 /* 2306 * Shared anon map is no longer in use. Before 2307 * freeing its pages purge all entries from 2308 * pcache that belong to this amp. 2309 */ 2310 if (svd->type == MAP_SHARED) { 2311 ASSERT(amp->refcnt == 1); 2312 ASSERT(svd->softlockcnt == 0); 2313 anonmap_purge(amp); 2314 } 2315 2316 if (amp->a_szc == seg->s_szc) { 2317 if (seg->s_szc != 0) { 2318 anon_free_pages(amp->ahp, an_idx, len, 2319 seg->s_szc); 2320 } else { 2321 anon_free(amp->ahp, an_idx, 2322 len); 2323 } 2324 } else { 2325 ASSERT(svd->type == MAP_SHARED); 2326 ASSERT(amp->a_szc > seg->s_szc); 2327 anon_shmap_free_pages(amp, an_idx, len); 2328 } 2329 2330 /* 2331 * Unreserve swap space for the 2332 * unmapped chunk of this segment in 2333 * case it's MAP_SHARED 2334 */ 2335 if (svd->type == MAP_SHARED) { 2336 anon_unresv_zone(len, 2337 seg->s_as->a_proc->p_zone); 2338 amp->swresv -= len; 2339 } 2340 } 2341 nsvd->anon_index = svd->anon_index + 2342 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2343 if (svd->type == MAP_SHARED) { 2344 amp->refcnt++; 2345 nsvd->amp = amp; 2346 } else { 2347 struct anon_map *namp; 2348 struct anon_hdr *nahp; 2349 2350 ASSERT(svd->type == MAP_PRIVATE); 2351 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2352 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2353 namp->a_szc = seg->s_szc; 2354 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2355 0, btop(seg->s_size), ANON_SLEEP); 2356 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2357 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2358 anon_release(amp->ahp, btop(amp->size)); 2359 svd->anon_index = 0; 2360 nsvd->anon_index = 0; 2361 amp->ahp = nahp; 2362 amp->size = seg->s_size; 2363 nsvd->amp = namp; 2364 } 2365 ANON_LOCK_EXIT(&->a_rwlock); 2366 } 2367 if (svd->swresv) { 2368 if (svd->flags & MAP_NORESERVE) { 2369 ASSERT(amp); 2370 oswresv = svd->swresv; 2371 svd->swresv = ptob(anon_pages(amp->ahp, 2372 svd->anon_index, btop(seg->s_size))); 2373 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2374 nsvd->anon_index, btop(nseg->s_size))); 2375 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2376 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv), 2377 seg->s_as->a_proc->p_zone); 2378 if (SEG_IS_PARTIAL_RESV(seg)) 2379 seg->s_as->a_resvsize -= oswresv - 2380 (svd->swresv + nsvd->swresv); 2381 } else { 2382 size_t unlen; 2383 2384 if (svd->pageswap) { 2385 oswresv = svd->swresv; 2386 svd->swresv = segvn_count_swap_by_vpages(seg); 2387 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2388 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2389 unlen = oswresv - (svd->swresv + nsvd->swresv); 2390 } else { 2391 if (seg->s_size + nseg->s_size + len != 2392 svd->swresv) { 2393 panic("segvn_unmap: cannot split " 2394 "swap reservation"); 2395 /*NOTREACHED*/ 2396 } 2397 svd->swresv = seg->s_size; 2398 nsvd->swresv = nseg->s_size; 2399 unlen = len; 2400 } 2401 anon_unresv_zone(unlen, 2402 seg->s_as->a_proc->p_zone); 2403 } 2404 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2405 seg, len, 0); 2406 } 2407 2408 return (0); /* I'm glad that's all over with! */ 2409 } 2410 2411 static void 2412 segvn_free(struct seg *seg) 2413 { 2414 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2415 pgcnt_t npages = seg_pages(seg); 2416 struct anon_map *amp; 2417 size_t len; 2418 2419 /* 2420 * We don't need any segment level locks for "segvn" data 2421 * since the address space is "write" locked. 2422 */ 2423 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2424 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2425 2426 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2427 2428 /* 2429 * Be sure to unlock pages. XXX Why do things get free'ed instead 2430 * of unmapped? XXX 2431 */ 2432 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2433 0, MC_UNLOCK, NULL, 0); 2434 2435 /* 2436 * Deallocate the vpage and anon pointers if necessary and possible. 2437 */ 2438 if (svd->vpage != NULL) { 2439 kmem_free(svd->vpage, vpgtob(npages)); 2440 svd->vpage = NULL; 2441 } 2442 if ((amp = svd->amp) != NULL) { 2443 /* 2444 * If there are no more references to this anon_map 2445 * structure, then deallocate the structure after freeing 2446 * up all the anon slot pointers that we can. 2447 */ 2448 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2449 ASSERT(amp->a_szc >= seg->s_szc); 2450 if (--amp->refcnt == 0) { 2451 if (svd->type == MAP_PRIVATE) { 2452 /* 2453 * Private - we only need to anon_free 2454 * the part that this segment refers to. 2455 */ 2456 if (seg->s_szc != 0) { 2457 anon_free_pages(amp->ahp, 2458 svd->anon_index, seg->s_size, 2459 seg->s_szc); 2460 } else { 2461 anon_free(amp->ahp, svd->anon_index, 2462 seg->s_size); 2463 } 2464 } else { 2465 2466 /* 2467 * Shared anon map is no longer in use. Before 2468 * freeing its pages purge all entries from 2469 * pcache that belong to this amp. 2470 */ 2471 ASSERT(svd->softlockcnt == 0); 2472 anonmap_purge(amp); 2473 2474 /* 2475 * Shared - anon_free the entire 2476 * anon_map's worth of stuff and 2477 * release any swap reservation. 2478 */ 2479 if (amp->a_szc != 0) { 2480 anon_shmap_free_pages(amp, 0, 2481 amp->size); 2482 } else { 2483 anon_free(amp->ahp, 0, amp->size); 2484 } 2485 if ((len = amp->swresv) != 0) { 2486 anon_unresv_zone(len, 2487 seg->s_as->a_proc->p_zone); 2488 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2489 "anon proc:%p %lu %u", seg, len, 0); 2490 } 2491 } 2492 svd->amp = NULL; 2493 ANON_LOCK_EXIT(&->a_rwlock); 2494 anonmap_free(amp); 2495 } else if (svd->type == MAP_PRIVATE) { 2496 /* 2497 * We had a private mapping which still has 2498 * a held anon_map so just free up all the 2499 * anon slot pointers that we were using. 2500 */ 2501 if (seg->s_szc != 0) { 2502 anon_free_pages(amp->ahp, svd->anon_index, 2503 seg->s_size, seg->s_szc); 2504 } else { 2505 anon_free(amp->ahp, svd->anon_index, 2506 seg->s_size); 2507 } 2508 ANON_LOCK_EXIT(&->a_rwlock); 2509 } else { 2510 ANON_LOCK_EXIT(&->a_rwlock); 2511 } 2512 } 2513 2514 /* 2515 * Release swap reservation. 2516 */ 2517 if ((len = svd->swresv) != 0) { 2518 anon_unresv_zone(svd->swresv, 2519 seg->s_as->a_proc->p_zone); 2520 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2521 seg, len, 0); 2522 if (SEG_IS_PARTIAL_RESV(seg)) 2523 seg->s_as->a_resvsize -= svd->swresv; 2524 svd->swresv = 0; 2525 } 2526 /* 2527 * Release claim on vnode, credentials, and finally free the 2528 * private data. 2529 */ 2530 if (svd->vp != NULL) { 2531 if (svd->type == MAP_SHARED) 2532 lgrp_shm_policy_fini(NULL, svd->vp); 2533 VN_RELE(svd->vp); 2534 svd->vp = NULL; 2535 } 2536 crfree(svd->cred); 2537 svd->pageprot = 0; 2538 svd->pageadvice = 0; 2539 svd->pageswap = 0; 2540 svd->cred = NULL; 2541 2542 /* 2543 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2544 * still working with this segment without holding as lock (in case 2545 * it's called by pcache async thread). 2546 */ 2547 ASSERT(svd->softlockcnt == 0); 2548 mutex_enter(&svd->segfree_syncmtx); 2549 mutex_exit(&svd->segfree_syncmtx); 2550 2551 seg->s_data = NULL; 2552 kmem_cache_free(segvn_cache, svd); 2553 } 2554 2555 /* 2556 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2557 * already been F_SOFTLOCK'ed. 2558 * Caller must always match addr and len of a softunlock with a previous 2559 * softlock with exactly the same addr and len. 2560 */ 2561 static void 2562 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2563 { 2564 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2565 page_t *pp; 2566 caddr_t adr; 2567 struct vnode *vp; 2568 u_offset_t offset; 2569 ulong_t anon_index; 2570 struct anon_map *amp; 2571 struct anon *ap = NULL; 2572 2573 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2574 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2575 2576 if ((amp = svd->amp) != NULL) 2577 anon_index = svd->anon_index + seg_page(seg, addr); 2578 2579 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2580 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2581 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2582 } else { 2583 hat_unlock(seg->s_as->a_hat, addr, len); 2584 } 2585 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2586 if (amp != NULL) { 2587 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2588 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2589 != NULL) { 2590 swap_xlate(ap, &vp, &offset); 2591 } else { 2592 vp = svd->vp; 2593 offset = svd->offset + 2594 (uintptr_t)(adr - seg->s_base); 2595 } 2596 ANON_LOCK_EXIT(&->a_rwlock); 2597 } else { 2598 vp = svd->vp; 2599 offset = svd->offset + 2600 (uintptr_t)(adr - seg->s_base); 2601 } 2602 2603 /* 2604 * Use page_find() instead of page_lookup() to 2605 * find the page since we know that it is locked. 2606 */ 2607 pp = page_find(vp, offset); 2608 if (pp == NULL) { 2609 panic( 2610 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2611 (void *)adr, (void *)ap, (void *)vp, offset); 2612 /*NOTREACHED*/ 2613 } 2614 2615 if (rw == S_WRITE) { 2616 hat_setrefmod(pp); 2617 if (seg->s_as->a_vbits) 2618 hat_setstat(seg->s_as, adr, PAGESIZE, 2619 P_REF | P_MOD); 2620 } else if (rw != S_OTHER) { 2621 hat_setref(pp); 2622 if (seg->s_as->a_vbits) 2623 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2624 } 2625 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2626 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2627 page_unlock(pp); 2628 } 2629 ASSERT(svd->softlockcnt >= btop(len)); 2630 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2631 /* 2632 * All SOFTLOCKS are gone. Wakeup any waiting 2633 * unmappers so they can try again to unmap. 2634 * Check for waiters first without the mutex 2635 * held so we don't always grab the mutex on 2636 * softunlocks. 2637 */ 2638 if (AS_ISUNMAPWAIT(seg->s_as)) { 2639 mutex_enter(&seg->s_as->a_contents); 2640 if (AS_ISUNMAPWAIT(seg->s_as)) { 2641 AS_CLRUNMAPWAIT(seg->s_as); 2642 cv_broadcast(&seg->s_as->a_cv); 2643 } 2644 mutex_exit(&seg->s_as->a_contents); 2645 } 2646 } 2647 } 2648 2649 #define PAGE_HANDLED ((page_t *)-1) 2650 2651 /* 2652 * Release all the pages in the NULL terminated ppp list 2653 * which haven't already been converted to PAGE_HANDLED. 2654 */ 2655 static void 2656 segvn_pagelist_rele(page_t **ppp) 2657 { 2658 for (; *ppp != NULL; ppp++) { 2659 if (*ppp != PAGE_HANDLED) 2660 page_unlock(*ppp); 2661 } 2662 } 2663 2664 static int stealcow = 1; 2665 2666 /* 2667 * Workaround for viking chip bug. See bug id 1220902. 2668 * To fix this down in pagefault() would require importing so 2669 * much as and segvn code as to be unmaintainable. 2670 */ 2671 int enable_mbit_wa = 0; 2672 2673 /* 2674 * Handles all the dirty work of getting the right 2675 * anonymous pages and loading up the translations. 2676 * This routine is called only from segvn_fault() 2677 * when looping over the range of addresses requested. 2678 * 2679 * The basic algorithm here is: 2680 * If this is an anon_zero case 2681 * Call anon_zero to allocate page 2682 * Load up translation 2683 * Return 2684 * endif 2685 * If this is an anon page 2686 * Use anon_getpage to get the page 2687 * else 2688 * Find page in pl[] list passed in 2689 * endif 2690 * If not a cow 2691 * Load up the translation to the page 2692 * return 2693 * endif 2694 * Call anon_private to handle cow 2695 * Load up (writable) translation to new page 2696 */ 2697 static faultcode_t 2698 segvn_faultpage( 2699 struct hat *hat, /* the hat to use for mapping */ 2700 struct seg *seg, /* seg_vn of interest */ 2701 caddr_t addr, /* address in as */ 2702 u_offset_t off, /* offset in vp */ 2703 struct vpage *vpage, /* pointer to vpage for vp, off */ 2704 page_t *pl[], /* object source page pointer */ 2705 uint_t vpprot, /* access allowed to object pages */ 2706 enum fault_type type, /* type of fault */ 2707 enum seg_rw rw, /* type of access at fault */ 2708 int brkcow) /* we may need to break cow */ 2709 { 2710 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2711 page_t *pp, **ppp; 2712 uint_t pageflags = 0; 2713 page_t *anon_pl[1 + 1]; 2714 page_t *opp = NULL; /* original page */ 2715 uint_t prot; 2716 int err; 2717 int cow; 2718 int claim; 2719 int steal = 0; 2720 ulong_t anon_index; 2721 struct anon *ap, *oldap; 2722 struct anon_map *amp; 2723 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2724 int anon_lock = 0; 2725 anon_sync_obj_t cookie; 2726 2727 if (svd->flags & MAP_TEXT) { 2728 hat_flag |= HAT_LOAD_TEXT; 2729 } 2730 2731 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2732 ASSERT(seg->s_szc == 0); 2733 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2734 2735 /* 2736 * Initialize protection value for this page. 2737 * If we have per page protection values check it now. 2738 */ 2739 if (svd->pageprot) { 2740 uint_t protchk; 2741 2742 switch (rw) { 2743 case S_READ: 2744 protchk = PROT_READ; 2745 break; 2746 case S_WRITE: 2747 protchk = PROT_WRITE; 2748 break; 2749 case S_EXEC: 2750 protchk = PROT_EXEC; 2751 break; 2752 case S_OTHER: 2753 default: 2754 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2755 break; 2756 } 2757 2758 prot = VPP_PROT(vpage); 2759 if ((prot & protchk) == 0) 2760 return (FC_PROT); /* illegal access type */ 2761 } else { 2762 prot = svd->prot; 2763 } 2764 2765 if (type == F_SOFTLOCK) { 2766 atomic_inc_ulong((ulong_t *)&svd->softlockcnt); 2767 } 2768 2769 /* 2770 * Always acquire the anon array lock to prevent 2 threads from 2771 * allocating separate anon slots for the same "addr". 2772 */ 2773 2774 if ((amp = svd->amp) != NULL) { 2775 ASSERT(RW_READ_HELD(&->a_rwlock)); 2776 anon_index = svd->anon_index + seg_page(seg, addr); 2777 anon_array_enter(amp, anon_index, &cookie); 2778 anon_lock = 1; 2779 } 2780 2781 if (svd->vp == NULL && amp != NULL) { 2782 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2783 /* 2784 * Allocate a (normally) writable anonymous page of 2785 * zeroes. If no advance reservations, reserve now. 2786 */ 2787 if (svd->flags & MAP_NORESERVE) { 2788 if (anon_resv_zone(ptob(1), 2789 seg->s_as->a_proc->p_zone)) { 2790 atomic_add_long(&svd->swresv, ptob(1)); 2791 atomic_add_long(&seg->s_as->a_resvsize, 2792 ptob(1)); 2793 } else { 2794 err = ENOMEM; 2795 goto out; 2796 } 2797 } 2798 if ((pp = anon_zero(seg, addr, &ap, 2799 svd->cred)) == NULL) { 2800 err = ENOMEM; 2801 goto out; /* out of swap space */ 2802 } 2803 /* 2804 * Re-acquire the anon_map lock and 2805 * initialize the anon array entry. 2806 */ 2807 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2808 ANON_SLEEP); 2809 2810 ASSERT(pp->p_szc == 0); 2811 2812 /* 2813 * Handle pages that have been marked for migration 2814 */ 2815 if (lgrp_optimizations()) 2816 page_migrate(seg, addr, &pp, 1); 2817 2818 if (enable_mbit_wa) { 2819 if (rw == S_WRITE) 2820 hat_setmod(pp); 2821 else if (!hat_ismod(pp)) 2822 prot &= ~PROT_WRITE; 2823 } 2824 /* 2825 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2826 * with MC_LOCKAS, MCL_FUTURE) and this is a 2827 * MAP_NORESERVE segment, we may need to 2828 * permanently lock the page as it is being faulted 2829 * for the first time. The following text applies 2830 * only to MAP_NORESERVE segments: 2831 * 2832 * As per memcntl(2), if this segment was created 2833 * after MCL_FUTURE was applied (a "future" 2834 * segment), its pages must be locked. If this 2835 * segment existed at MCL_FUTURE application (a 2836 * "past" segment), the interface is unclear. 2837 * 2838 * We decide to lock only if vpage is present: 2839 * 2840 * - "future" segments will have a vpage array (see 2841 * as_map), and so will be locked as required 2842 * 2843 * - "past" segments may not have a vpage array, 2844 * depending on whether events (such as 2845 * mprotect) have occurred. Locking if vpage 2846 * exists will preserve legacy behavior. Not 2847 * locking if vpage is absent, will not break 2848 * the interface or legacy behavior. Note that 2849 * allocating vpage here if it's absent requires 2850 * upgrading the segvn reader lock, the cost of 2851 * which does not seem worthwhile. 2852 * 2853 * Usually testing and setting VPP_ISPPLOCK and 2854 * VPP_SETPPLOCK requires holding the segvn lock as 2855 * writer, but in this case all readers are 2856 * serializing on the anon array lock. 2857 */ 2858 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2859 (svd->flags & MAP_NORESERVE) && 2860 !VPP_ISPPLOCK(vpage)) { 2861 proc_t *p = seg->s_as->a_proc; 2862 ASSERT(svd->type == MAP_PRIVATE); 2863 mutex_enter(&p->p_lock); 2864 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2865 1) == 0) { 2866 claim = VPP_PROT(vpage) & PROT_WRITE; 2867 if (page_pp_lock(pp, claim, 0)) { 2868 VPP_SETPPLOCK(vpage); 2869 } else { 2870 rctl_decr_locked_mem(p, NULL, 2871 PAGESIZE, 1); 2872 } 2873 } 2874 mutex_exit(&p->p_lock); 2875 } 2876 2877 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2878 hat_memload(hat, addr, pp, prot, hat_flag); 2879 2880 if (!(hat_flag & HAT_LOAD_LOCK)) 2881 page_unlock(pp); 2882 2883 anon_array_exit(&cookie); 2884 return (0); 2885 } 2886 } 2887 2888 /* 2889 * Obtain the page structure via anon_getpage() if it is 2890 * a private copy of an object (the result of a previous 2891 * copy-on-write). 2892 */ 2893 if (amp != NULL) { 2894 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2895 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2896 seg, addr, rw, svd->cred); 2897 if (err) 2898 goto out; 2899 2900 if (svd->type == MAP_SHARED) { 2901 /* 2902 * If this is a shared mapping to an 2903 * anon_map, then ignore the write 2904 * permissions returned by anon_getpage(). 2905 * They apply to the private mappings 2906 * of this anon_map. 2907 */ 2908 vpprot |= PROT_WRITE; 2909 } 2910 opp = anon_pl[0]; 2911 } 2912 } 2913 2914 /* 2915 * Search the pl[] list passed in if it is from the 2916 * original object (i.e., not a private copy). 2917 */ 2918 if (opp == NULL) { 2919 /* 2920 * Find original page. We must be bringing it in 2921 * from the list in pl[]. 2922 */ 2923 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2924 if (opp == PAGE_HANDLED) 2925 continue; 2926 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2927 if (opp->p_offset == off) 2928 break; 2929 } 2930 if (opp == NULL) { 2931 panic("segvn_faultpage not found"); 2932 /*NOTREACHED*/ 2933 } 2934 *ppp = PAGE_HANDLED; 2935 2936 } 2937 2938 ASSERT(PAGE_LOCKED(opp)); 2939 2940 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2941 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2942 2943 /* 2944 * The fault is treated as a copy-on-write fault if a 2945 * write occurs on a private segment and the object 2946 * page (i.e., mapping) is write protected. We assume 2947 * that fatal protection checks have already been made. 2948 */ 2949 2950 if (brkcow) { 2951 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2952 cow = !(vpprot & PROT_WRITE); 2953 } else if (svd->tr_state == SEGVN_TR_ON) { 2954 /* 2955 * If we are doing text replication COW on first touch. 2956 */ 2957 ASSERT(amp != NULL); 2958 ASSERT(svd->vp != NULL); 2959 ASSERT(rw != S_WRITE); 2960 cow = (ap == NULL); 2961 } else { 2962 cow = 0; 2963 } 2964 2965 /* 2966 * If not a copy-on-write case load the translation 2967 * and return. 2968 */ 2969 if (cow == 0) { 2970 2971 /* 2972 * Handle pages that have been marked for migration 2973 */ 2974 if (lgrp_optimizations()) 2975 page_migrate(seg, addr, &opp, 1); 2976 2977 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2978 if (rw == S_WRITE) 2979 hat_setmod(opp); 2980 else if (rw != S_OTHER && !hat_ismod(opp)) 2981 prot &= ~PROT_WRITE; 2982 } 2983 2984 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2985 (!svd->pageprot && svd->prot == (prot & vpprot))); 2986 ASSERT(amp == NULL || 2987 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2988 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2989 svd->rcookie); 2990 2991 if (!(hat_flag & HAT_LOAD_LOCK)) 2992 page_unlock(opp); 2993 2994 if (anon_lock) { 2995 anon_array_exit(&cookie); 2996 } 2997 return (0); 2998 } 2999 3000 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3001 3002 hat_setref(opp); 3003 3004 ASSERT(amp != NULL && anon_lock); 3005 3006 /* 3007 * Steal the page only if it isn't a private page 3008 * since stealing a private page is not worth the effort. 3009 */ 3010 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 3011 steal = 1; 3012 3013 /* 3014 * Steal the original page if the following conditions are true: 3015 * 3016 * We are low on memory, the page is not private, page is not large, 3017 * not shared, not modified, not `locked' or if we have it `locked' 3018 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 3019 * that the page is not shared) and if it doesn't have any 3020 * translations. page_struct_lock isn't needed to look at p_cowcnt 3021 * and p_lckcnt because we first get exclusive lock on page. 3022 */ 3023 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 3024 3025 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 3026 page_tryupgrade(opp) && !hat_ismod(opp) && 3027 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 3028 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 3029 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 3030 /* 3031 * Check if this page has other translations 3032 * after unloading our translation. 3033 */ 3034 if (hat_page_is_mapped(opp)) { 3035 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3036 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 3037 HAT_UNLOAD); 3038 } 3039 3040 /* 3041 * hat_unload() might sync back someone else's recent 3042 * modification, so check again. 3043 */ 3044 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 3045 pageflags |= STEAL_PAGE; 3046 } 3047 3048 /* 3049 * If we have a vpage pointer, see if it indicates that we have 3050 * ``locked'' the page we map -- if so, tell anon_private to 3051 * transfer the locking resource to the new page. 3052 * 3053 * See Statement at the beginning of segvn_lockop regarding 3054 * the way lockcnts/cowcnts are handled during COW. 3055 * 3056 */ 3057 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 3058 pageflags |= LOCK_PAGE; 3059 3060 /* 3061 * Allocate a private page and perform the copy. 3062 * For MAP_NORESERVE reserve swap space now, unless this 3063 * is a cow fault on an existing anon page in which case 3064 * MAP_NORESERVE will have made advance reservations. 3065 */ 3066 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 3067 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 3068 atomic_add_long(&svd->swresv, ptob(1)); 3069 atomic_add_long(&seg->s_as->a_resvsize, ptob(1)); 3070 } else { 3071 page_unlock(opp); 3072 err = ENOMEM; 3073 goto out; 3074 } 3075 } 3076 oldap = ap; 3077 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 3078 if (pp == NULL) { 3079 err = ENOMEM; /* out of swap space */ 3080 goto out; 3081 } 3082 3083 /* 3084 * If we copied away from an anonymous page, then 3085 * we are one step closer to freeing up an anon slot. 3086 * 3087 * NOTE: The original anon slot must be released while 3088 * holding the "anon_map" lock. This is necessary to prevent 3089 * other threads from obtaining a pointer to the anon slot 3090 * which may be freed if its "refcnt" is 1. 3091 */ 3092 if (oldap != NULL) 3093 anon_decref(oldap); 3094 3095 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3096 3097 /* 3098 * Handle pages that have been marked for migration 3099 */ 3100 if (lgrp_optimizations()) 3101 page_migrate(seg, addr, &pp, 1); 3102 3103 ASSERT(pp->p_szc == 0); 3104 3105 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3106 if (enable_mbit_wa) { 3107 if (rw == S_WRITE) 3108 hat_setmod(pp); 3109 else if (!hat_ismod(pp)) 3110 prot &= ~PROT_WRITE; 3111 } 3112 3113 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3114 hat_memload(hat, addr, pp, prot, hat_flag); 3115 3116 if (!(hat_flag & HAT_LOAD_LOCK)) 3117 page_unlock(pp); 3118 3119 ASSERT(anon_lock); 3120 anon_array_exit(&cookie); 3121 return (0); 3122 out: 3123 if (anon_lock) 3124 anon_array_exit(&cookie); 3125 3126 if (type == F_SOFTLOCK) { 3127 atomic_dec_ulong((ulong_t *)&svd->softlockcnt); 3128 } 3129 return (FC_MAKE_ERR(err)); 3130 } 3131 3132 /* 3133 * relocate a bunch of smaller targ pages into one large repl page. all targ 3134 * pages must be complete pages smaller than replacement pages. 3135 * it's assumed that no page's szc can change since they are all PAGESIZE or 3136 * complete large pages locked SHARED. 3137 */ 3138 static void 3139 segvn_relocate_pages(page_t **targ, page_t *replacement) 3140 { 3141 page_t *pp; 3142 pgcnt_t repl_npgs, curnpgs; 3143 pgcnt_t i; 3144 uint_t repl_szc = replacement->p_szc; 3145 page_t *first_repl = replacement; 3146 page_t *repl; 3147 spgcnt_t npgs; 3148 3149 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3150 3151 ASSERT(repl_szc != 0); 3152 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3153 3154 i = 0; 3155 while (repl_npgs) { 3156 spgcnt_t nreloc; 3157 int err; 3158 ASSERT(replacement != NULL); 3159 pp = targ[i]; 3160 ASSERT(pp->p_szc < repl_szc); 3161 ASSERT(PAGE_EXCL(pp)); 3162 ASSERT(!PP_ISFREE(pp)); 3163 curnpgs = page_get_pagecnt(pp->p_szc); 3164 if (curnpgs == 1) { 3165 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3166 repl = replacement; 3167 page_sub(&replacement, repl); 3168 ASSERT(PAGE_EXCL(repl)); 3169 ASSERT(!PP_ISFREE(repl)); 3170 ASSERT(repl->p_szc == repl_szc); 3171 } else { 3172 page_t *repl_savepp; 3173 int j; 3174 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3175 repl_savepp = replacement; 3176 for (j = 0; j < curnpgs; j++) { 3177 repl = replacement; 3178 page_sub(&replacement, repl); 3179 ASSERT(PAGE_EXCL(repl)); 3180 ASSERT(!PP_ISFREE(repl)); 3181 ASSERT(repl->p_szc == repl_szc); 3182 ASSERT(page_pptonum(targ[i + j]) == 3183 page_pptonum(targ[i]) + j); 3184 } 3185 repl = repl_savepp; 3186 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3187 } 3188 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3189 if (err || nreloc != curnpgs) { 3190 panic("segvn_relocate_pages: " 3191 "page_relocate failed err=%d curnpgs=%ld " 3192 "nreloc=%ld", err, curnpgs, nreloc); 3193 } 3194 ASSERT(curnpgs <= repl_npgs); 3195 repl_npgs -= curnpgs; 3196 i += curnpgs; 3197 } 3198 ASSERT(replacement == NULL); 3199 3200 repl = first_repl; 3201 repl_npgs = npgs; 3202 for (i = 0; i < repl_npgs; i++) { 3203 ASSERT(PAGE_EXCL(repl)); 3204 ASSERT(!PP_ISFREE(repl)); 3205 targ[i] = repl; 3206 page_downgrade(targ[i]); 3207 repl++; 3208 } 3209 } 3210 3211 /* 3212 * Check if all pages in ppa array are complete smaller than szc pages and 3213 * their roots will still be aligned relative to their current size if the 3214 * entire ppa array is relocated into one szc page. If these conditions are 3215 * not met return 0. 3216 * 3217 * If all pages are properly aligned attempt to upgrade their locks 3218 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3219 * upgrdfail was set to 0 by caller. 3220 * 3221 * Return 1 if all pages are aligned and locked exclusively. 3222 * 3223 * If all pages in ppa array happen to be physically contiguous to make one 3224 * szc page and all exclusive locks are successfully obtained promote the page 3225 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3226 */ 3227 static int 3228 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3229 { 3230 page_t *pp; 3231 pfn_t pfn; 3232 pgcnt_t totnpgs = page_get_pagecnt(szc); 3233 pfn_t first_pfn; 3234 int contig = 1; 3235 pgcnt_t i; 3236 pgcnt_t j; 3237 uint_t curszc; 3238 pgcnt_t curnpgs; 3239 int root = 0; 3240 3241 ASSERT(szc > 0); 3242 3243 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3244 3245 for (i = 0; i < totnpgs; i++) { 3246 pp = ppa[i]; 3247 ASSERT(PAGE_SHARED(pp)); 3248 ASSERT(!PP_ISFREE(pp)); 3249 pfn = page_pptonum(pp); 3250 if (i == 0) { 3251 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3252 contig = 0; 3253 } else { 3254 first_pfn = pfn; 3255 } 3256 } else if (contig && pfn != first_pfn + i) { 3257 contig = 0; 3258 } 3259 if (pp->p_szc == 0) { 3260 if (root) { 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3262 return (0); 3263 } 3264 } else if (!root) { 3265 if ((curszc = pp->p_szc) >= szc) { 3266 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3267 return (0); 3268 } 3269 if (curszc == 0) { 3270 /* 3271 * p_szc changed means we don't have all pages 3272 * locked. return failure. 3273 */ 3274 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3275 return (0); 3276 } 3277 curnpgs = page_get_pagecnt(curszc); 3278 if (!IS_P2ALIGNED(pfn, curnpgs) || 3279 !IS_P2ALIGNED(i, curnpgs)) { 3280 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3281 return (0); 3282 } 3283 root = 1; 3284 } else { 3285 ASSERT(i > 0); 3286 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3287 if (pp->p_szc != curszc) { 3288 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3289 return (0); 3290 } 3291 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3292 panic("segvn_full_szcpages: " 3293 "large page not physically contiguous"); 3294 } 3295 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3296 root = 0; 3297 } 3298 } 3299 } 3300 3301 for (i = 0; i < totnpgs; i++) { 3302 ASSERT(ppa[i]->p_szc < szc); 3303 if (!page_tryupgrade(ppa[i])) { 3304 for (j = 0; j < i; j++) { 3305 page_downgrade(ppa[j]); 3306 } 3307 *pszc = ppa[i]->p_szc; 3308 *upgrdfail = 1; 3309 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3310 return (0); 3311 } 3312 } 3313 3314 /* 3315 * When a page is put a free cachelist its szc is set to 0. if file 3316 * system reclaimed pages from cachelist targ pages will be physically 3317 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3318 * pages without any relocations. 3319 * To avoid any hat issues with previous small mappings 3320 * hat_pageunload() the target pages first. 3321 */ 3322 if (contig) { 3323 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3324 for (i = 0; i < totnpgs; i++) { 3325 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3326 } 3327 for (i = 0; i < totnpgs; i++) { 3328 ppa[i]->p_szc = szc; 3329 } 3330 for (i = 0; i < totnpgs; i++) { 3331 ASSERT(PAGE_EXCL(ppa[i])); 3332 page_downgrade(ppa[i]); 3333 } 3334 if (pszc != NULL) { 3335 *pszc = szc; 3336 } 3337 } 3338 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3339 return (1); 3340 } 3341 3342 /* 3343 * Create physically contiguous pages for [vp, off] - [vp, off + 3344 * page_size(szc)) range and for private segment return them in ppa array. 3345 * Pages are created either via IO or relocations. 3346 * 3347 * Return 1 on success and 0 on failure. 3348 * 3349 * If physically contiguous pages already exist for this range return 1 without 3350 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3351 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3352 */ 3353 3354 static int 3355 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3356 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3357 int *downsize) 3358 3359 { 3360 page_t *pplist = *ppplist; 3361 size_t pgsz = page_get_pagesize(szc); 3362 pgcnt_t pages = btop(pgsz); 3363 ulong_t start_off = off; 3364 u_offset_t eoff = off + pgsz; 3365 spgcnt_t nreloc; 3366 u_offset_t io_off = off; 3367 size_t io_len; 3368 page_t *io_pplist = NULL; 3369 page_t *done_pplist = NULL; 3370 pgcnt_t pgidx = 0; 3371 page_t *pp; 3372 page_t *newpp; 3373 page_t *targpp; 3374 int io_err = 0; 3375 int i; 3376 pfn_t pfn; 3377 ulong_t ppages; 3378 page_t *targ_pplist = NULL; 3379 page_t *repl_pplist = NULL; 3380 page_t *tmp_pplist; 3381 int nios = 0; 3382 uint_t pszc; 3383 struct vattr va; 3384 3385 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3386 3387 ASSERT(szc != 0); 3388 ASSERT(pplist->p_szc == szc); 3389 3390 /* 3391 * downsize will be set to 1 only if we fail to lock pages. this will 3392 * allow subsequent faults to try to relocate the page again. If we 3393 * fail due to misalignment don't downsize and let the caller map the 3394 * whole region with small mappings to avoid more faults into the area 3395 * where we can't get large pages anyway. 3396 */ 3397 *downsize = 0; 3398 3399 while (off < eoff) { 3400 newpp = pplist; 3401 ASSERT(newpp != NULL); 3402 ASSERT(PAGE_EXCL(newpp)); 3403 ASSERT(!PP_ISFREE(newpp)); 3404 /* 3405 * we pass NULL for nrelocp to page_lookup_create() 3406 * so that it doesn't relocate. We relocate here 3407 * later only after we make sure we can lock all 3408 * pages in the range we handle and they are all 3409 * aligned. 3410 */ 3411 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3412 ASSERT(pp != NULL); 3413 ASSERT(!PP_ISFREE(pp)); 3414 ASSERT(pp->p_vnode == vp); 3415 ASSERT(pp->p_offset == off); 3416 if (pp == newpp) { 3417 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3418 page_sub(&pplist, pp); 3419 ASSERT(PAGE_EXCL(pp)); 3420 ASSERT(page_iolock_assert(pp)); 3421 page_list_concat(&io_pplist, &pp); 3422 off += PAGESIZE; 3423 continue; 3424 } 3425 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3426 pfn = page_pptonum(pp); 3427 pszc = pp->p_szc; 3428 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3429 IS_P2ALIGNED(pfn, pages)) { 3430 ASSERT(repl_pplist == NULL); 3431 ASSERT(done_pplist == NULL); 3432 ASSERT(pplist == *ppplist); 3433 page_unlock(pp); 3434 page_free_replacement_page(pplist); 3435 page_create_putback(pages); 3436 *ppplist = NULL; 3437 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3438 return (1); 3439 } 3440 if (pszc >= szc) { 3441 page_unlock(pp); 3442 segvn_faultvnmpss_align_err1++; 3443 goto out; 3444 } 3445 ppages = page_get_pagecnt(pszc); 3446 if (!IS_P2ALIGNED(pfn, ppages)) { 3447 ASSERT(pszc > 0); 3448 /* 3449 * sizing down to pszc won't help. 3450 */ 3451 page_unlock(pp); 3452 segvn_faultvnmpss_align_err2++; 3453 goto out; 3454 } 3455 pfn = page_pptonum(newpp); 3456 if (!IS_P2ALIGNED(pfn, ppages)) { 3457 ASSERT(pszc > 0); 3458 /* 3459 * sizing down to pszc won't help. 3460 */ 3461 page_unlock(pp); 3462 segvn_faultvnmpss_align_err3++; 3463 goto out; 3464 } 3465 if (!PAGE_EXCL(pp)) { 3466 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3467 page_unlock(pp); 3468 *downsize = 1; 3469 *ret_pszc = pp->p_szc; 3470 goto out; 3471 } 3472 targpp = pp; 3473 if (io_pplist != NULL) { 3474 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3475 io_len = off - io_off; 3476 /* 3477 * Some file systems like NFS don't check EOF 3478 * conditions in VOP_PAGEIO(). Check it here 3479 * now that pages are locked SE_EXCL. Any file 3480 * truncation will wait until the pages are 3481 * unlocked so no need to worry that file will 3482 * be truncated after we check its size here. 3483 * XXX fix NFS to remove this check. 3484 */ 3485 va.va_mask = AT_SIZE; 3486 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3487 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3488 page_unlock(targpp); 3489 goto out; 3490 } 3491 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3492 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3493 *downsize = 1; 3494 *ret_pszc = 0; 3495 page_unlock(targpp); 3496 goto out; 3497 } 3498 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3499 B_READ, svd->cred, NULL); 3500 if (io_err) { 3501 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3502 page_unlock(targpp); 3503 if (io_err == EDEADLK) { 3504 segvn_vmpss_pageio_deadlk_err++; 3505 } 3506 goto out; 3507 } 3508 nios++; 3509 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3510 while (io_pplist != NULL) { 3511 pp = io_pplist; 3512 page_sub(&io_pplist, pp); 3513 ASSERT(page_iolock_assert(pp)); 3514 page_io_unlock(pp); 3515 pgidx = (pp->p_offset - start_off) >> 3516 PAGESHIFT; 3517 ASSERT(pgidx < pages); 3518 ppa[pgidx] = pp; 3519 page_list_concat(&done_pplist, &pp); 3520 } 3521 } 3522 pp = targpp; 3523 ASSERT(PAGE_EXCL(pp)); 3524 ASSERT(pp->p_szc <= pszc); 3525 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3526 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3527 page_unlock(pp); 3528 *downsize = 1; 3529 *ret_pszc = pp->p_szc; 3530 goto out; 3531 } 3532 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3533 /* 3534 * page szc chould have changed before the entire group was 3535 * locked. reread page szc. 3536 */ 3537 pszc = pp->p_szc; 3538 ppages = page_get_pagecnt(pszc); 3539 3540 /* link just the roots */ 3541 page_list_concat(&targ_pplist, &pp); 3542 page_sub(&pplist, newpp); 3543 page_list_concat(&repl_pplist, &newpp); 3544 off += PAGESIZE; 3545 while (--ppages != 0) { 3546 newpp = pplist; 3547 page_sub(&pplist, newpp); 3548 off += PAGESIZE; 3549 } 3550 io_off = off; 3551 } 3552 if (io_pplist != NULL) { 3553 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3554 io_len = eoff - io_off; 3555 va.va_mask = AT_SIZE; 3556 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3557 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3558 goto out; 3559 } 3560 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3561 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3562 *downsize = 1; 3563 *ret_pszc = 0; 3564 goto out; 3565 } 3566 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3567 B_READ, svd->cred, NULL); 3568 if (io_err) { 3569 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3570 if (io_err == EDEADLK) { 3571 segvn_vmpss_pageio_deadlk_err++; 3572 } 3573 goto out; 3574 } 3575 nios++; 3576 while (io_pplist != NULL) { 3577 pp = io_pplist; 3578 page_sub(&io_pplist, pp); 3579 ASSERT(page_iolock_assert(pp)); 3580 page_io_unlock(pp); 3581 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3582 ASSERT(pgidx < pages); 3583 ppa[pgidx] = pp; 3584 } 3585 } 3586 /* 3587 * we're now bound to succeed or panic. 3588 * remove pages from done_pplist. it's not needed anymore. 3589 */ 3590 while (done_pplist != NULL) { 3591 pp = done_pplist; 3592 page_sub(&done_pplist, pp); 3593 } 3594 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3595 ASSERT(pplist == NULL); 3596 *ppplist = NULL; 3597 while (targ_pplist != NULL) { 3598 int ret; 3599 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3600 ASSERT(repl_pplist); 3601 pp = targ_pplist; 3602 page_sub(&targ_pplist, pp); 3603 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3604 newpp = repl_pplist; 3605 page_sub(&repl_pplist, newpp); 3606 #ifdef DEBUG 3607 pfn = page_pptonum(pp); 3608 pszc = pp->p_szc; 3609 ppages = page_get_pagecnt(pszc); 3610 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3611 pfn = page_pptonum(newpp); 3612 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3613 ASSERT(P2PHASE(pfn, pages) == pgidx); 3614 #endif 3615 nreloc = 0; 3616 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3617 if (ret != 0 || nreloc == 0) { 3618 panic("segvn_fill_vp_pages: " 3619 "page_relocate failed"); 3620 } 3621 pp = newpp; 3622 while (nreloc-- != 0) { 3623 ASSERT(PAGE_EXCL(pp)); 3624 ASSERT(pp->p_vnode == vp); 3625 ASSERT(pgidx == 3626 ((pp->p_offset - start_off) >> PAGESHIFT)); 3627 ppa[pgidx++] = pp; 3628 pp++; 3629 } 3630 } 3631 3632 if (svd->type == MAP_PRIVATE) { 3633 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3634 for (i = 0; i < pages; i++) { 3635 ASSERT(ppa[i] != NULL); 3636 ASSERT(PAGE_EXCL(ppa[i])); 3637 ASSERT(ppa[i]->p_vnode == vp); 3638 ASSERT(ppa[i]->p_offset == 3639 start_off + (i << PAGESHIFT)); 3640 page_downgrade(ppa[i]); 3641 } 3642 ppa[pages] = NULL; 3643 } else { 3644 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3645 /* 3646 * the caller will still call VOP_GETPAGE() for shared segments 3647 * to check FS write permissions. For private segments we map 3648 * file read only anyway. so no VOP_GETPAGE is needed. 3649 */ 3650 for (i = 0; i < pages; i++) { 3651 ASSERT(ppa[i] != NULL); 3652 ASSERT(PAGE_EXCL(ppa[i])); 3653 ASSERT(ppa[i]->p_vnode == vp); 3654 ASSERT(ppa[i]->p_offset == 3655 start_off + (i << PAGESHIFT)); 3656 page_unlock(ppa[i]); 3657 } 3658 ppa[0] = NULL; 3659 } 3660 3661 return (1); 3662 out: 3663 /* 3664 * Do the cleanup. Unlock target pages we didn't relocate. They are 3665 * linked on targ_pplist by root pages. reassemble unused replacement 3666 * and io pages back to pplist. 3667 */ 3668 if (io_pplist != NULL) { 3669 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3670 pp = io_pplist; 3671 do { 3672 ASSERT(pp->p_vnode == vp); 3673 ASSERT(pp->p_offset == io_off); 3674 ASSERT(page_iolock_assert(pp)); 3675 page_io_unlock(pp); 3676 page_hashout(pp, NULL); 3677 io_off += PAGESIZE; 3678 } while ((pp = pp->p_next) != io_pplist); 3679 page_list_concat(&io_pplist, &pplist); 3680 pplist = io_pplist; 3681 } 3682 tmp_pplist = NULL; 3683 while (targ_pplist != NULL) { 3684 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3685 pp = targ_pplist; 3686 ASSERT(PAGE_EXCL(pp)); 3687 page_sub(&targ_pplist, pp); 3688 3689 pszc = pp->p_szc; 3690 ppages = page_get_pagecnt(pszc); 3691 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3692 3693 if (pszc != 0) { 3694 group_page_unlock(pp); 3695 } 3696 page_unlock(pp); 3697 3698 pp = repl_pplist; 3699 ASSERT(pp != NULL); 3700 ASSERT(PAGE_EXCL(pp)); 3701 ASSERT(pp->p_szc == szc); 3702 page_sub(&repl_pplist, pp); 3703 3704 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3705 3706 /* relink replacement page */ 3707 page_list_concat(&tmp_pplist, &pp); 3708 while (--ppages != 0) { 3709 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3710 pp++; 3711 ASSERT(PAGE_EXCL(pp)); 3712 ASSERT(pp->p_szc == szc); 3713 page_list_concat(&tmp_pplist, &pp); 3714 } 3715 } 3716 if (tmp_pplist != NULL) { 3717 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3718 page_list_concat(&tmp_pplist, &pplist); 3719 pplist = tmp_pplist; 3720 } 3721 /* 3722 * at this point all pages are either on done_pplist or 3723 * pplist. They can't be all on done_pplist otherwise 3724 * we'd've been done. 3725 */ 3726 ASSERT(pplist != NULL); 3727 if (nios != 0) { 3728 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3729 pp = pplist; 3730 do { 3731 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3732 ASSERT(pp->p_szc == szc); 3733 ASSERT(PAGE_EXCL(pp)); 3734 ASSERT(pp->p_vnode != vp); 3735 pp->p_szc = 0; 3736 } while ((pp = pp->p_next) != pplist); 3737 3738 pp = done_pplist; 3739 do { 3740 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3741 ASSERT(pp->p_szc == szc); 3742 ASSERT(PAGE_EXCL(pp)); 3743 ASSERT(pp->p_vnode == vp); 3744 pp->p_szc = 0; 3745 } while ((pp = pp->p_next) != done_pplist); 3746 3747 while (pplist != NULL) { 3748 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3749 pp = pplist; 3750 page_sub(&pplist, pp); 3751 page_free(pp, 0); 3752 } 3753 3754 while (done_pplist != NULL) { 3755 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3756 pp = done_pplist; 3757 page_sub(&done_pplist, pp); 3758 page_unlock(pp); 3759 } 3760 *ppplist = NULL; 3761 return (0); 3762 } 3763 ASSERT(pplist == *ppplist); 3764 if (io_err) { 3765 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3766 /* 3767 * don't downsize on io error. 3768 * see if vop_getpage succeeds. 3769 * pplist may still be used in this case 3770 * for relocations. 3771 */ 3772 return (0); 3773 } 3774 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3775 page_free_replacement_page(pplist); 3776 page_create_putback(pages); 3777 *ppplist = NULL; 3778 return (0); 3779 } 3780 3781 int segvn_anypgsz = 0; 3782 3783 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3784 if ((type) == F_SOFTLOCK) { \ 3785 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3786 -(pages)); \ 3787 } 3788 3789 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3790 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3791 if ((rw) == S_WRITE) { \ 3792 for (i = 0; i < (pages); i++) { \ 3793 ASSERT((ppa)[i]->p_vnode == \ 3794 (ppa)[0]->p_vnode); \ 3795 hat_setmod((ppa)[i]); \ 3796 } \ 3797 } else if ((rw) != S_OTHER && \ 3798 ((prot) & (vpprot) & PROT_WRITE)) { \ 3799 for (i = 0; i < (pages); i++) { \ 3800 ASSERT((ppa)[i]->p_vnode == \ 3801 (ppa)[0]->p_vnode); \ 3802 if (!hat_ismod((ppa)[i])) { \ 3803 prot &= ~PROT_WRITE; \ 3804 break; \ 3805 } \ 3806 } \ 3807 } \ 3808 } 3809 3810 #ifdef VM_STATS 3811 3812 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3813 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3814 3815 #else /* VM_STATS */ 3816 3817 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3818 3819 #endif 3820 3821 static faultcode_t 3822 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3823 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3824 caddr_t eaddr, int brkcow) 3825 { 3826 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3827 struct anon_map *amp = svd->amp; 3828 uchar_t segtype = svd->type; 3829 uint_t szc = seg->s_szc; 3830 size_t pgsz = page_get_pagesize(szc); 3831 size_t maxpgsz = pgsz; 3832 pgcnt_t pages = btop(pgsz); 3833 pgcnt_t maxpages = pages; 3834 size_t ppasize = (pages + 1) * sizeof (page_t *); 3835 caddr_t a = lpgaddr; 3836 caddr_t maxlpgeaddr = lpgeaddr; 3837 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3838 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3839 struct vpage *vpage = (svd->vpage != NULL) ? 3840 &svd->vpage[seg_page(seg, a)] : NULL; 3841 vnode_t *vp = svd->vp; 3842 page_t **ppa; 3843 uint_t pszc; 3844 size_t ppgsz; 3845 pgcnt_t ppages; 3846 faultcode_t err = 0; 3847 int ierr; 3848 int vop_size_err = 0; 3849 uint_t protchk, prot, vpprot; 3850 ulong_t i; 3851 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3852 anon_sync_obj_t an_cookie; 3853 enum seg_rw arw; 3854 int alloc_failed = 0; 3855 int adjszc_chk; 3856 struct vattr va; 3857 int xhat = 0; 3858 page_t *pplist; 3859 pfn_t pfn; 3860 int physcontig; 3861 int upgrdfail; 3862 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3863 int tron = (svd->tr_state == SEGVN_TR_ON); 3864 3865 ASSERT(szc != 0); 3866 ASSERT(vp != NULL); 3867 ASSERT(brkcow == 0 || amp != NULL); 3868 ASSERT(tron == 0 || amp != NULL); 3869 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3870 ASSERT(!(svd->flags & MAP_NORESERVE)); 3871 ASSERT(type != F_SOFTUNLOCK); 3872 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3873 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3874 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3875 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3876 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3877 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3878 3879 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3880 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3881 3882 if (svd->flags & MAP_TEXT) { 3883 hat_flag |= HAT_LOAD_TEXT; 3884 } 3885 3886 if (svd->pageprot) { 3887 switch (rw) { 3888 case S_READ: 3889 protchk = PROT_READ; 3890 break; 3891 case S_WRITE: 3892 protchk = PROT_WRITE; 3893 break; 3894 case S_EXEC: 3895 protchk = PROT_EXEC; 3896 break; 3897 case S_OTHER: 3898 default: 3899 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3900 break; 3901 } 3902 } else { 3903 prot = svd->prot; 3904 /* caller has already done segment level protection check. */ 3905 } 3906 3907 if (seg->s_as->a_hat != hat) { 3908 xhat = 1; 3909 } 3910 3911 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3912 SEGVN_VMSTAT_FLTVNPAGES(2); 3913 arw = S_READ; 3914 } else { 3915 arw = rw; 3916 } 3917 3918 ppa = kmem_alloc(ppasize, KM_SLEEP); 3919 3920 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3921 3922 for (;;) { 3923 adjszc_chk = 0; 3924 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3925 if (adjszc_chk) { 3926 while (szc < seg->s_szc) { 3927 uintptr_t e; 3928 uint_t tszc; 3929 tszc = segvn_anypgsz_vnode ? szc + 1 : 3930 seg->s_szc; 3931 ppgsz = page_get_pagesize(tszc); 3932 if (!IS_P2ALIGNED(a, ppgsz) || 3933 ((alloc_failed >> tszc) & 0x1)) { 3934 break; 3935 } 3936 SEGVN_VMSTAT_FLTVNPAGES(4); 3937 szc = tszc; 3938 pgsz = ppgsz; 3939 pages = btop(pgsz); 3940 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3941 lpgeaddr = (caddr_t)e; 3942 } 3943 } 3944 3945 again: 3946 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3947 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3948 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3949 anon_array_enter(amp, aindx, &an_cookie); 3950 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3951 SEGVN_VMSTAT_FLTVNPAGES(5); 3952 ASSERT(anon_pages(amp->ahp, aindx, 3953 maxpages) == maxpages); 3954 anon_array_exit(&an_cookie); 3955 ANON_LOCK_EXIT(&->a_rwlock); 3956 err = segvn_fault_anonpages(hat, seg, 3957 a, a + maxpgsz, type, rw, 3958 MAX(a, addr), 3959 MIN(a + maxpgsz, eaddr), brkcow); 3960 if (err != 0) { 3961 SEGVN_VMSTAT_FLTVNPAGES(6); 3962 goto out; 3963 } 3964 if (szc < seg->s_szc) { 3965 szc = seg->s_szc; 3966 pgsz = maxpgsz; 3967 pages = maxpages; 3968 lpgeaddr = maxlpgeaddr; 3969 } 3970 goto next; 3971 } else { 3972 ASSERT(anon_pages(amp->ahp, aindx, 3973 maxpages) == 0); 3974 SEGVN_VMSTAT_FLTVNPAGES(7); 3975 anon_array_exit(&an_cookie); 3976 ANON_LOCK_EXIT(&->a_rwlock); 3977 } 3978 } 3979 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3980 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3981 3982 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3983 ASSERT(vpage != NULL); 3984 prot = VPP_PROT(vpage); 3985 ASSERT(sameprot(seg, a, maxpgsz)); 3986 if ((prot & protchk) == 0) { 3987 SEGVN_VMSTAT_FLTVNPAGES(8); 3988 err = FC_PROT; 3989 goto out; 3990 } 3991 } 3992 if (type == F_SOFTLOCK) { 3993 atomic_add_long((ulong_t *)&svd->softlockcnt, 3994 pages); 3995 } 3996 3997 pplist = NULL; 3998 physcontig = 0; 3999 ppa[0] = NULL; 4000 if (!brkcow && !tron && szc && 4001 !page_exists_physcontig(vp, off, szc, 4002 segtype == MAP_PRIVATE ? ppa : NULL)) { 4003 SEGVN_VMSTAT_FLTVNPAGES(9); 4004 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 4005 szc, 0, 0) && type != F_SOFTLOCK) { 4006 SEGVN_VMSTAT_FLTVNPAGES(10); 4007 pszc = 0; 4008 ierr = -1; 4009 alloc_failed |= (1 << szc); 4010 break; 4011 } 4012 if (pplist != NULL && 4013 vp->v_mpssdata == SEGVN_PAGEIO) { 4014 int downsize; 4015 SEGVN_VMSTAT_FLTVNPAGES(11); 4016 physcontig = segvn_fill_vp_pages(svd, 4017 vp, off, szc, ppa, &pplist, 4018 &pszc, &downsize); 4019 ASSERT(!physcontig || pplist == NULL); 4020 if (!physcontig && downsize && 4021 type != F_SOFTLOCK) { 4022 ASSERT(pplist == NULL); 4023 SEGVN_VMSTAT_FLTVNPAGES(12); 4024 ierr = -1; 4025 break; 4026 } 4027 ASSERT(!physcontig || 4028 segtype == MAP_PRIVATE || 4029 ppa[0] == NULL); 4030 if (physcontig && ppa[0] == NULL) { 4031 physcontig = 0; 4032 } 4033 } 4034 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 4035 SEGVN_VMSTAT_FLTVNPAGES(13); 4036 ASSERT(segtype == MAP_PRIVATE); 4037 physcontig = 1; 4038 } 4039 4040 if (!physcontig) { 4041 SEGVN_VMSTAT_FLTVNPAGES(14); 4042 ppa[0] = NULL; 4043 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 4044 &vpprot, ppa, pgsz, seg, a, arw, 4045 svd->cred, NULL); 4046 #ifdef DEBUG 4047 if (ierr == 0) { 4048 for (i = 0; i < pages; i++) { 4049 ASSERT(PAGE_LOCKED(ppa[i])); 4050 ASSERT(!PP_ISFREE(ppa[i])); 4051 ASSERT(ppa[i]->p_vnode == vp); 4052 ASSERT(ppa[i]->p_offset == 4053 off + (i << PAGESHIFT)); 4054 } 4055 } 4056 #endif /* DEBUG */ 4057 if (segtype == MAP_PRIVATE) { 4058 SEGVN_VMSTAT_FLTVNPAGES(15); 4059 vpprot &= ~PROT_WRITE; 4060 } 4061 } else { 4062 ASSERT(segtype == MAP_PRIVATE); 4063 SEGVN_VMSTAT_FLTVNPAGES(16); 4064 vpprot = PROT_ALL & ~PROT_WRITE; 4065 ierr = 0; 4066 } 4067 4068 if (ierr != 0) { 4069 SEGVN_VMSTAT_FLTVNPAGES(17); 4070 if (pplist != NULL) { 4071 SEGVN_VMSTAT_FLTVNPAGES(18); 4072 page_free_replacement_page(pplist); 4073 page_create_putback(pages); 4074 } 4075 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4076 if (a + pgsz <= eaddr) { 4077 SEGVN_VMSTAT_FLTVNPAGES(19); 4078 err = FC_MAKE_ERR(ierr); 4079 goto out; 4080 } 4081 va.va_mask = AT_SIZE; 4082 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4083 SEGVN_VMSTAT_FLTVNPAGES(20); 4084 err = FC_MAKE_ERR(EIO); 4085 goto out; 4086 } 4087 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4088 SEGVN_VMSTAT_FLTVNPAGES(21); 4089 err = FC_MAKE_ERR(ierr); 4090 goto out; 4091 } 4092 if (btopr(va.va_size) < 4093 btopr(off + (eaddr - a))) { 4094 SEGVN_VMSTAT_FLTVNPAGES(22); 4095 err = FC_MAKE_ERR(ierr); 4096 goto out; 4097 } 4098 if (brkcow || tron || type == F_SOFTLOCK) { 4099 /* can't reduce map area */ 4100 SEGVN_VMSTAT_FLTVNPAGES(23); 4101 vop_size_err = 1; 4102 goto out; 4103 } 4104 SEGVN_VMSTAT_FLTVNPAGES(24); 4105 ASSERT(szc != 0); 4106 pszc = 0; 4107 ierr = -1; 4108 break; 4109 } 4110 4111 if (amp != NULL) { 4112 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4113 anon_array_enter(amp, aindx, &an_cookie); 4114 } 4115 if (amp != NULL && 4116 anon_get_ptr(amp->ahp, aindx) != NULL) { 4117 ulong_t taindx = P2ALIGN(aindx, maxpages); 4118 4119 SEGVN_VMSTAT_FLTVNPAGES(25); 4120 ASSERT(anon_pages(amp->ahp, taindx, 4121 maxpages) == maxpages); 4122 for (i = 0; i < pages; i++) { 4123 page_unlock(ppa[i]); 4124 } 4125 anon_array_exit(&an_cookie); 4126 ANON_LOCK_EXIT(&->a_rwlock); 4127 if (pplist != NULL) { 4128 page_free_replacement_page(pplist); 4129 page_create_putback(pages); 4130 } 4131 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4132 if (szc < seg->s_szc) { 4133 SEGVN_VMSTAT_FLTVNPAGES(26); 4134 /* 4135 * For private segments SOFTLOCK 4136 * either always breaks cow (any rw 4137 * type except S_READ_NOCOW) or 4138 * address space is locked as writer 4139 * (S_READ_NOCOW case) and anon slots 4140 * can't show up on second check. 4141 * Therefore if we are here for 4142 * SOFTLOCK case it must be a cow 4143 * break but cow break never reduces 4144 * szc. text replication (tron) in 4145 * this case works as cow break. 4146 * Thus the assert below. 4147 */ 4148 ASSERT(!brkcow && !tron && 4149 type != F_SOFTLOCK); 4150 pszc = seg->s_szc; 4151 ierr = -2; 4152 break; 4153 } 4154 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4155 goto again; 4156 } 4157 #ifdef DEBUG 4158 if (amp != NULL) { 4159 ulong_t taindx = P2ALIGN(aindx, maxpages); 4160 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4161 } 4162 #endif /* DEBUG */ 4163 4164 if (brkcow || tron) { 4165 ASSERT(amp != NULL); 4166 ASSERT(pplist == NULL); 4167 ASSERT(szc == seg->s_szc); 4168 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4169 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4170 SEGVN_VMSTAT_FLTVNPAGES(27); 4171 ierr = anon_map_privatepages(amp, aindx, szc, 4172 seg, a, prot, ppa, vpage, segvn_anypgsz, 4173 tron ? PG_LOCAL : 0, svd->cred); 4174 if (ierr != 0) { 4175 SEGVN_VMSTAT_FLTVNPAGES(28); 4176 anon_array_exit(&an_cookie); 4177 ANON_LOCK_EXIT(&->a_rwlock); 4178 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4179 err = FC_MAKE_ERR(ierr); 4180 goto out; 4181 } 4182 4183 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4184 /* 4185 * p_szc can't be changed for locked 4186 * swapfs pages. 4187 */ 4188 ASSERT(svd->rcookie == 4189 HAT_INVALID_REGION_COOKIE); 4190 hat_memload_array(hat, a, pgsz, ppa, prot, 4191 hat_flag); 4192 4193 if (!(hat_flag & HAT_LOAD_LOCK)) { 4194 SEGVN_VMSTAT_FLTVNPAGES(29); 4195 for (i = 0; i < pages; i++) { 4196 page_unlock(ppa[i]); 4197 } 4198 } 4199 anon_array_exit(&an_cookie); 4200 ANON_LOCK_EXIT(&->a_rwlock); 4201 goto next; 4202 } 4203 4204 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4205 (!svd->pageprot && svd->prot == (prot & vpprot))); 4206 4207 pfn = page_pptonum(ppa[0]); 4208 /* 4209 * hat_page_demote() needs an SE_EXCL lock on one of 4210 * constituent page_t's and it decreases root's p_szc 4211 * last. This means if root's p_szc is equal szc and 4212 * all its constituent pages are locked 4213 * hat_page_demote() that could have changed p_szc to 4214 * szc is already done and no new have page_demote() 4215 * can start for this large page. 4216 */ 4217 4218 /* 4219 * we need to make sure same mapping size is used for 4220 * the same address range if there's a possibility the 4221 * adddress is already mapped because hat layer panics 4222 * when translation is loaded for the range already 4223 * mapped with a different page size. We achieve it 4224 * by always using largest page size possible subject 4225 * to the constraints of page size, segment page size 4226 * and page alignment. Since mappings are invalidated 4227 * when those constraints change and make it 4228 * impossible to use previously used mapping size no 4229 * mapping size conflicts should happen. 4230 */ 4231 4232 chkszc: 4233 if ((pszc = ppa[0]->p_szc) == szc && 4234 IS_P2ALIGNED(pfn, pages)) { 4235 4236 SEGVN_VMSTAT_FLTVNPAGES(30); 4237 #ifdef DEBUG 4238 for (i = 0; i < pages; i++) { 4239 ASSERT(PAGE_LOCKED(ppa[i])); 4240 ASSERT(!PP_ISFREE(ppa[i])); 4241 ASSERT(page_pptonum(ppa[i]) == 4242 pfn + i); 4243 ASSERT(ppa[i]->p_szc == szc); 4244 ASSERT(ppa[i]->p_vnode == vp); 4245 ASSERT(ppa[i]->p_offset == 4246 off + (i << PAGESHIFT)); 4247 } 4248 #endif /* DEBUG */ 4249 /* 4250 * All pages are of szc we need and they are 4251 * all locked so they can't change szc. load 4252 * translations. 4253 * 4254 * if page got promoted since last check 4255 * we don't need pplist. 4256 */ 4257 if (pplist != NULL) { 4258 page_free_replacement_page(pplist); 4259 page_create_putback(pages); 4260 } 4261 if (PP_ISMIGRATE(ppa[0])) { 4262 page_migrate(seg, a, ppa, pages); 4263 } 4264 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4265 prot, vpprot); 4266 if (!xhat) { 4267 hat_memload_array_region(hat, a, pgsz, 4268 ppa, prot & vpprot, hat_flag, 4269 svd->rcookie); 4270 } else { 4271 /* 4272 * avoid large xhat mappings to FS 4273 * pages so that hat_page_demote() 4274 * doesn't need to check for xhat 4275 * large mappings. 4276 * Don't use regions with xhats. 4277 */ 4278 for (i = 0; i < pages; i++) { 4279 hat_memload(hat, 4280 a + (i << PAGESHIFT), 4281 ppa[i], prot & vpprot, 4282 hat_flag); 4283 } 4284 } 4285 4286 if (!(hat_flag & HAT_LOAD_LOCK)) { 4287 for (i = 0; i < pages; i++) { 4288 page_unlock(ppa[i]); 4289 } 4290 } 4291 if (amp != NULL) { 4292 anon_array_exit(&an_cookie); 4293 ANON_LOCK_EXIT(&->a_rwlock); 4294 } 4295 goto next; 4296 } 4297 4298 /* 4299 * See if upsize is possible. 4300 */ 4301 if (pszc > szc && szc < seg->s_szc && 4302 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4303 pgcnt_t aphase; 4304 uint_t pszc1 = MIN(pszc, seg->s_szc); 4305 ppgsz = page_get_pagesize(pszc1); 4306 ppages = btop(ppgsz); 4307 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4308 4309 ASSERT(type != F_SOFTLOCK); 4310 4311 SEGVN_VMSTAT_FLTVNPAGES(31); 4312 if (aphase != P2PHASE(pfn, ppages)) { 4313 segvn_faultvnmpss_align_err4++; 4314 } else { 4315 SEGVN_VMSTAT_FLTVNPAGES(32); 4316 if (pplist != NULL) { 4317 page_t *pl = pplist; 4318 page_free_replacement_page(pl); 4319 page_create_putback(pages); 4320 } 4321 for (i = 0; i < pages; i++) { 4322 page_unlock(ppa[i]); 4323 } 4324 if (amp != NULL) { 4325 anon_array_exit(&an_cookie); 4326 ANON_LOCK_EXIT(&->a_rwlock); 4327 } 4328 pszc = pszc1; 4329 ierr = -2; 4330 break; 4331 } 4332 } 4333 4334 /* 4335 * check if we should use smallest mapping size. 4336 */ 4337 upgrdfail = 0; 4338 if (szc == 0 || xhat || 4339 (pszc >= szc && 4340 !IS_P2ALIGNED(pfn, pages)) || 4341 (pszc < szc && 4342 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4343 &pszc))) { 4344 4345 if (upgrdfail && type != F_SOFTLOCK) { 4346 /* 4347 * segvn_full_szcpages failed to lock 4348 * all pages EXCL. Size down. 4349 */ 4350 ASSERT(pszc < szc); 4351 4352 SEGVN_VMSTAT_FLTVNPAGES(33); 4353 4354 if (pplist != NULL) { 4355 page_t *pl = pplist; 4356 page_free_replacement_page(pl); 4357 page_create_putback(pages); 4358 } 4359 4360 for (i = 0; i < pages; i++) { 4361 page_unlock(ppa[i]); 4362 } 4363 if (amp != NULL) { 4364 anon_array_exit(&an_cookie); 4365 ANON_LOCK_EXIT(&->a_rwlock); 4366 } 4367 ierr = -1; 4368 break; 4369 } 4370 if (szc != 0 && !xhat && !upgrdfail) { 4371 segvn_faultvnmpss_align_err5++; 4372 } 4373 SEGVN_VMSTAT_FLTVNPAGES(34); 4374 if (pplist != NULL) { 4375 page_free_replacement_page(pplist); 4376 page_create_putback(pages); 4377 } 4378 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4379 prot, vpprot); 4380 if (upgrdfail && segvn_anypgsz_vnode) { 4381 /* SOFTLOCK case */ 4382 hat_memload_array_region(hat, a, pgsz, 4383 ppa, prot & vpprot, hat_flag, 4384 svd->rcookie); 4385 } else { 4386 for (i = 0; i < pages; i++) { 4387 hat_memload_region(hat, 4388 a + (i << PAGESHIFT), 4389 ppa[i], prot & vpprot, 4390 hat_flag, svd->rcookie); 4391 } 4392 } 4393 if (!(hat_flag & HAT_LOAD_LOCK)) { 4394 for (i = 0; i < pages; i++) { 4395 page_unlock(ppa[i]); 4396 } 4397 } 4398 if (amp != NULL) { 4399 anon_array_exit(&an_cookie); 4400 ANON_LOCK_EXIT(&->a_rwlock); 4401 } 4402 goto next; 4403 } 4404 4405 if (pszc == szc) { 4406 /* 4407 * segvn_full_szcpages() upgraded pages szc. 4408 */ 4409 ASSERT(pszc == ppa[0]->p_szc); 4410 ASSERT(IS_P2ALIGNED(pfn, pages)); 4411 goto chkszc; 4412 } 4413 4414 if (pszc > szc) { 4415 kmutex_t *szcmtx; 4416 SEGVN_VMSTAT_FLTVNPAGES(35); 4417 /* 4418 * p_szc of ppa[0] can change since we haven't 4419 * locked all constituent pages. Call 4420 * page_lock_szc() to prevent szc changes. 4421 * This should be a rare case that happens when 4422 * multiple segments use a different page size 4423 * to map the same file offsets. 4424 */ 4425 szcmtx = page_szc_lock(ppa[0]); 4426 pszc = ppa[0]->p_szc; 4427 ASSERT(szcmtx != NULL || pszc == 0); 4428 ASSERT(ppa[0]->p_szc <= pszc); 4429 if (pszc <= szc) { 4430 SEGVN_VMSTAT_FLTVNPAGES(36); 4431 if (szcmtx != NULL) { 4432 mutex_exit(szcmtx); 4433 } 4434 goto chkszc; 4435 } 4436 if (pplist != NULL) { 4437 /* 4438 * page got promoted since last check. 4439 * we don't need preaalocated large 4440 * page. 4441 */ 4442 SEGVN_VMSTAT_FLTVNPAGES(37); 4443 page_free_replacement_page(pplist); 4444 page_create_putback(pages); 4445 } 4446 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4447 prot, vpprot); 4448 hat_memload_array_region(hat, a, pgsz, ppa, 4449 prot & vpprot, hat_flag, svd->rcookie); 4450 mutex_exit(szcmtx); 4451 if (!(hat_flag & HAT_LOAD_LOCK)) { 4452 for (i = 0; i < pages; i++) { 4453 page_unlock(ppa[i]); 4454 } 4455 } 4456 if (amp != NULL) { 4457 anon_array_exit(&an_cookie); 4458 ANON_LOCK_EXIT(&->a_rwlock); 4459 } 4460 goto next; 4461 } 4462 4463 /* 4464 * if page got demoted since last check 4465 * we could have not allocated larger page. 4466 * allocate now. 4467 */ 4468 if (pplist == NULL && 4469 page_alloc_pages(vp, seg, a, &pplist, NULL, 4470 szc, 0, 0) && type != F_SOFTLOCK) { 4471 SEGVN_VMSTAT_FLTVNPAGES(38); 4472 for (i = 0; i < pages; i++) { 4473 page_unlock(ppa[i]); 4474 } 4475 if (amp != NULL) { 4476 anon_array_exit(&an_cookie); 4477 ANON_LOCK_EXIT(&->a_rwlock); 4478 } 4479 ierr = -1; 4480 alloc_failed |= (1 << szc); 4481 break; 4482 } 4483 4484 SEGVN_VMSTAT_FLTVNPAGES(39); 4485 4486 if (pplist != NULL) { 4487 segvn_relocate_pages(ppa, pplist); 4488 #ifdef DEBUG 4489 } else { 4490 ASSERT(type == F_SOFTLOCK); 4491 SEGVN_VMSTAT_FLTVNPAGES(40); 4492 #endif /* DEBUG */ 4493 } 4494 4495 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4496 4497 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4498 ASSERT(type == F_SOFTLOCK); 4499 for (i = 0; i < pages; i++) { 4500 ASSERT(ppa[i]->p_szc < szc); 4501 hat_memload_region(hat, 4502 a + (i << PAGESHIFT), 4503 ppa[i], prot & vpprot, hat_flag, 4504 svd->rcookie); 4505 } 4506 } else { 4507 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4508 hat_memload_array_region(hat, a, pgsz, ppa, 4509 prot & vpprot, hat_flag, svd->rcookie); 4510 } 4511 if (!(hat_flag & HAT_LOAD_LOCK)) { 4512 for (i = 0; i < pages; i++) { 4513 ASSERT(PAGE_SHARED(ppa[i])); 4514 page_unlock(ppa[i]); 4515 } 4516 } 4517 if (amp != NULL) { 4518 anon_array_exit(&an_cookie); 4519 ANON_LOCK_EXIT(&->a_rwlock); 4520 } 4521 4522 next: 4523 if (vpage != NULL) { 4524 vpage += pages; 4525 } 4526 adjszc_chk = 1; 4527 } 4528 if (a == lpgeaddr) 4529 break; 4530 ASSERT(a < lpgeaddr); 4531 4532 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4533 4534 /* 4535 * ierr == -1 means we failed to map with a large page. 4536 * (either due to allocation/relocation failures or 4537 * misalignment with other mappings to this file. 4538 * 4539 * ierr == -2 means some other thread allocated a large page 4540 * after we gave up tp map with a large page. retry with 4541 * larger mapping. 4542 */ 4543 ASSERT(ierr == -1 || ierr == -2); 4544 ASSERT(ierr == -2 || szc != 0); 4545 ASSERT(ierr == -1 || szc < seg->s_szc); 4546 if (ierr == -2) { 4547 SEGVN_VMSTAT_FLTVNPAGES(41); 4548 ASSERT(pszc > szc && pszc <= seg->s_szc); 4549 szc = pszc; 4550 } else if (segvn_anypgsz_vnode) { 4551 SEGVN_VMSTAT_FLTVNPAGES(42); 4552 szc--; 4553 } else { 4554 SEGVN_VMSTAT_FLTVNPAGES(43); 4555 ASSERT(pszc < szc); 4556 /* 4557 * other process created pszc large page. 4558 * but we still have to drop to 0 szc. 4559 */ 4560 szc = 0; 4561 } 4562 4563 pgsz = page_get_pagesize(szc); 4564 pages = btop(pgsz); 4565 if (ierr == -2) { 4566 /* 4567 * Size up case. Note lpgaddr may only be needed for 4568 * softlock case so we don't adjust it here. 4569 */ 4570 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4571 ASSERT(a >= lpgaddr); 4572 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4573 off = svd->offset + (uintptr_t)(a - seg->s_base); 4574 aindx = svd->anon_index + seg_page(seg, a); 4575 vpage = (svd->vpage != NULL) ? 4576 &svd->vpage[seg_page(seg, a)] : NULL; 4577 } else { 4578 /* 4579 * Size down case. Note lpgaddr may only be needed for 4580 * softlock case so we don't adjust it here. 4581 */ 4582 ASSERT(IS_P2ALIGNED(a, pgsz)); 4583 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4584 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4585 ASSERT(a < lpgeaddr); 4586 if (a < addr) { 4587 SEGVN_VMSTAT_FLTVNPAGES(44); 4588 /* 4589 * The beginning of the large page region can 4590 * be pulled to the right to make a smaller 4591 * region. We haven't yet faulted a single 4592 * page. 4593 */ 4594 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4595 ASSERT(a >= lpgaddr); 4596 off = svd->offset + 4597 (uintptr_t)(a - seg->s_base); 4598 aindx = svd->anon_index + seg_page(seg, a); 4599 vpage = (svd->vpage != NULL) ? 4600 &svd->vpage[seg_page(seg, a)] : NULL; 4601 } 4602 } 4603 } 4604 out: 4605 kmem_free(ppa, ppasize); 4606 if (!err && !vop_size_err) { 4607 SEGVN_VMSTAT_FLTVNPAGES(45); 4608 return (0); 4609 } 4610 if (type == F_SOFTLOCK && a > lpgaddr) { 4611 SEGVN_VMSTAT_FLTVNPAGES(46); 4612 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4613 } 4614 if (!vop_size_err) { 4615 SEGVN_VMSTAT_FLTVNPAGES(47); 4616 return (err); 4617 } 4618 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4619 /* 4620 * Large page end is mapped beyond the end of file and it's a cow 4621 * fault (can be a text replication induced cow) or softlock so we can't 4622 * reduce the map area. For now just demote the segment. This should 4623 * really only happen if the end of the file changed after the mapping 4624 * was established since when large page segments are created we make 4625 * sure they don't extend beyond the end of the file. 4626 */ 4627 SEGVN_VMSTAT_FLTVNPAGES(48); 4628 4629 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4630 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4631 err = 0; 4632 if (seg->s_szc != 0) { 4633 segvn_fltvnpages_clrszc_cnt++; 4634 ASSERT(svd->softlockcnt == 0); 4635 err = segvn_clrszc(seg); 4636 if (err != 0) { 4637 segvn_fltvnpages_clrszc_err++; 4638 } 4639 } 4640 ASSERT(err || seg->s_szc == 0); 4641 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4642 /* segvn_fault will do its job as if szc had been zero to begin with */ 4643 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4644 } 4645 4646 /* 4647 * This routine will attempt to fault in one large page. 4648 * it will use smaller pages if that fails. 4649 * It should only be called for pure anonymous segments. 4650 */ 4651 static faultcode_t 4652 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4653 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4654 caddr_t eaddr, int brkcow) 4655 { 4656 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4657 struct anon_map *amp = svd->amp; 4658 uchar_t segtype = svd->type; 4659 uint_t szc = seg->s_szc; 4660 size_t pgsz = page_get_pagesize(szc); 4661 size_t maxpgsz = pgsz; 4662 pgcnt_t pages = btop(pgsz); 4663 uint_t ppaszc = szc; 4664 caddr_t a = lpgaddr; 4665 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4666 struct vpage *vpage = (svd->vpage != NULL) ? 4667 &svd->vpage[seg_page(seg, a)] : NULL; 4668 page_t **ppa; 4669 uint_t ppa_szc; 4670 faultcode_t err; 4671 int ierr; 4672 uint_t protchk, prot, vpprot; 4673 ulong_t i; 4674 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4675 anon_sync_obj_t cookie; 4676 int adjszc_chk; 4677 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4678 4679 ASSERT(szc != 0); 4680 ASSERT(amp != NULL); 4681 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4682 ASSERT(!(svd->flags & MAP_NORESERVE)); 4683 ASSERT(type != F_SOFTUNLOCK); 4684 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4685 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4686 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4687 4688 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4689 4690 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4691 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4692 4693 if (svd->flags & MAP_TEXT) { 4694 hat_flag |= HAT_LOAD_TEXT; 4695 } 4696 4697 if (svd->pageprot) { 4698 switch (rw) { 4699 case S_READ: 4700 protchk = PROT_READ; 4701 break; 4702 case S_WRITE: 4703 protchk = PROT_WRITE; 4704 break; 4705 case S_EXEC: 4706 protchk = PROT_EXEC; 4707 break; 4708 case S_OTHER: 4709 default: 4710 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4711 break; 4712 } 4713 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4714 } else { 4715 prot = svd->prot; 4716 /* caller has already done segment level protection check. */ 4717 } 4718 4719 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4720 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4721 for (;;) { 4722 adjszc_chk = 0; 4723 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4724 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4725 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4726 ASSERT(vpage != NULL); 4727 prot = VPP_PROT(vpage); 4728 ASSERT(sameprot(seg, a, maxpgsz)); 4729 if ((prot & protchk) == 0) { 4730 err = FC_PROT; 4731 goto error; 4732 } 4733 } 4734 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4735 pgsz < maxpgsz) { 4736 ASSERT(a > lpgaddr); 4737 szc = seg->s_szc; 4738 pgsz = maxpgsz; 4739 pages = btop(pgsz); 4740 ASSERT(IS_P2ALIGNED(aindx, pages)); 4741 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4742 pgsz); 4743 } 4744 if (type == F_SOFTLOCK) { 4745 atomic_add_long((ulong_t *)&svd->softlockcnt, 4746 pages); 4747 } 4748 anon_array_enter(amp, aindx, &cookie); 4749 ppa_szc = (uint_t)-1; 4750 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4751 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4752 segvn_anypgsz, pgflags, svd->cred); 4753 if (ierr != 0) { 4754 anon_array_exit(&cookie); 4755 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4756 if (type == F_SOFTLOCK) { 4757 atomic_add_long( 4758 (ulong_t *)&svd->softlockcnt, 4759 -pages); 4760 } 4761 if (ierr > 0) { 4762 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4763 err = FC_MAKE_ERR(ierr); 4764 goto error; 4765 } 4766 break; 4767 } 4768 4769 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4770 4771 ASSERT(segtype == MAP_SHARED || 4772 ppa[0]->p_szc <= szc); 4773 ASSERT(segtype == MAP_PRIVATE || 4774 ppa[0]->p_szc >= szc); 4775 4776 /* 4777 * Handle pages that have been marked for migration 4778 */ 4779 if (lgrp_optimizations()) 4780 page_migrate(seg, a, ppa, pages); 4781 4782 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4783 4784 if (segtype == MAP_SHARED) { 4785 vpprot |= PROT_WRITE; 4786 } 4787 4788 hat_memload_array(hat, a, pgsz, ppa, 4789 prot & vpprot, hat_flag); 4790 4791 if (hat_flag & HAT_LOAD_LOCK) { 4792 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4793 } else { 4794 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4795 for (i = 0; i < pages; i++) 4796 page_unlock(ppa[i]); 4797 } 4798 if (vpage != NULL) 4799 vpage += pages; 4800 4801 anon_array_exit(&cookie); 4802 adjszc_chk = 1; 4803 } 4804 if (a == lpgeaddr) 4805 break; 4806 ASSERT(a < lpgeaddr); 4807 /* 4808 * ierr == -1 means we failed to allocate a large page. 4809 * so do a size down operation. 4810 * 4811 * ierr == -2 means some other process that privately shares 4812 * pages with this process has allocated a larger page and we 4813 * need to retry with larger pages. So do a size up 4814 * operation. This relies on the fact that large pages are 4815 * never partially shared i.e. if we share any constituent 4816 * page of a large page with another process we must share the 4817 * entire large page. Note this cannot happen for SOFTLOCK 4818 * case, unless current address (a) is at the beginning of the 4819 * next page size boundary because the other process couldn't 4820 * have relocated locked pages. 4821 */ 4822 ASSERT(ierr == -1 || ierr == -2); 4823 4824 if (segvn_anypgsz) { 4825 ASSERT(ierr == -2 || szc != 0); 4826 ASSERT(ierr == -1 || szc < seg->s_szc); 4827 szc = (ierr == -1) ? szc - 1 : szc + 1; 4828 } else { 4829 /* 4830 * For non COW faults and segvn_anypgsz == 0 4831 * we need to be careful not to loop forever 4832 * if existing page is found with szc other 4833 * than 0 or seg->s_szc. This could be due 4834 * to page relocations on behalf of DR or 4835 * more likely large page creation. For this 4836 * case simply re-size to existing page's szc 4837 * if returned by anon_map_getpages(). 4838 */ 4839 if (ppa_szc == (uint_t)-1) { 4840 szc = (ierr == -1) ? 0 : seg->s_szc; 4841 } else { 4842 ASSERT(ppa_szc <= seg->s_szc); 4843 ASSERT(ierr == -2 || ppa_szc < szc); 4844 ASSERT(ierr == -1 || ppa_szc > szc); 4845 szc = ppa_szc; 4846 } 4847 } 4848 4849 pgsz = page_get_pagesize(szc); 4850 pages = btop(pgsz); 4851 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4852 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4853 if (type == F_SOFTLOCK) { 4854 /* 4855 * For softlocks we cannot reduce the fault area 4856 * (calculated based on the largest page size for this 4857 * segment) for size down and a is already next 4858 * page size aligned as assertted above for size 4859 * ups. Therefore just continue in case of softlock. 4860 */ 4861 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4862 continue; /* keep lint happy */ 4863 } else if (ierr == -2) { 4864 4865 /* 4866 * Size up case. Note lpgaddr may only be needed for 4867 * softlock case so we don't adjust it here. 4868 */ 4869 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4870 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4871 ASSERT(a >= lpgaddr); 4872 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4873 aindx = svd->anon_index + seg_page(seg, a); 4874 vpage = (svd->vpage != NULL) ? 4875 &svd->vpage[seg_page(seg, a)] : NULL; 4876 } else { 4877 /* 4878 * Size down case. Note lpgaddr may only be needed for 4879 * softlock case so we don't adjust it here. 4880 */ 4881 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4882 ASSERT(IS_P2ALIGNED(a, pgsz)); 4883 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4884 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4885 ASSERT(a < lpgeaddr); 4886 if (a < addr) { 4887 /* 4888 * The beginning of the large page region can 4889 * be pulled to the right to make a smaller 4890 * region. We haven't yet faulted a single 4891 * page. 4892 */ 4893 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4894 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4895 ASSERT(a >= lpgaddr); 4896 aindx = svd->anon_index + seg_page(seg, a); 4897 vpage = (svd->vpage != NULL) ? 4898 &svd->vpage[seg_page(seg, a)] : NULL; 4899 } 4900 } 4901 } 4902 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4903 ANON_LOCK_EXIT(&->a_rwlock); 4904 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4905 return (0); 4906 error: 4907 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4908 ANON_LOCK_EXIT(&->a_rwlock); 4909 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4910 if (type == F_SOFTLOCK && a > lpgaddr) { 4911 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4912 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4913 } 4914 return (err); 4915 } 4916 4917 int fltadvice = 1; /* set to free behind pages for sequential access */ 4918 4919 /* 4920 * This routine is called via a machine specific fault handling routine. 4921 * It is also called by software routines wishing to lock or unlock 4922 * a range of addresses. 4923 * 4924 * Here is the basic algorithm: 4925 * If unlocking 4926 * Call segvn_softunlock 4927 * Return 4928 * endif 4929 * Checking and set up work 4930 * If we will need some non-anonymous pages 4931 * Call VOP_GETPAGE over the range of non-anonymous pages 4932 * endif 4933 * Loop over all addresses requested 4934 * Call segvn_faultpage passing in page list 4935 * to load up translations and handle anonymous pages 4936 * endloop 4937 * Load up translation to any additional pages in page list not 4938 * already handled that fit into this segment 4939 */ 4940 static faultcode_t 4941 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4942 enum fault_type type, enum seg_rw rw) 4943 { 4944 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4945 page_t **plp, **ppp, *pp; 4946 u_offset_t off; 4947 caddr_t a; 4948 struct vpage *vpage; 4949 uint_t vpprot, prot; 4950 int err; 4951 page_t *pl[PVN_GETPAGE_NUM + 1]; 4952 size_t plsz, pl_alloc_sz; 4953 size_t page; 4954 ulong_t anon_index; 4955 struct anon_map *amp; 4956 int dogetpage = 0; 4957 caddr_t lpgaddr, lpgeaddr; 4958 size_t pgsz; 4959 anon_sync_obj_t cookie; 4960 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4961 4962 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4963 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4964 4965 /* 4966 * First handle the easy stuff 4967 */ 4968 if (type == F_SOFTUNLOCK) { 4969 if (rw == S_READ_NOCOW) { 4970 rw = S_READ; 4971 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4972 } 4973 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4974 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4975 page_get_pagesize(seg->s_szc); 4976 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4977 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4978 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4979 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4980 return (0); 4981 } 4982 4983 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4984 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4985 if (brkcow == 0) { 4986 if (svd->tr_state == SEGVN_TR_INIT) { 4987 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4988 if (svd->tr_state == SEGVN_TR_INIT) { 4989 ASSERT(svd->vp != NULL && svd->amp == NULL); 4990 ASSERT(svd->flags & MAP_TEXT); 4991 ASSERT(svd->type == MAP_PRIVATE); 4992 segvn_textrepl(seg); 4993 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4994 ASSERT(svd->tr_state != SEGVN_TR_ON || 4995 svd->amp != NULL); 4996 } 4997 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4998 } 4999 } else if (svd->tr_state != SEGVN_TR_OFF) { 5000 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5001 5002 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 5003 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 5004 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5005 return (FC_PROT); 5006 } 5007 5008 if (svd->tr_state == SEGVN_TR_ON) { 5009 ASSERT(svd->vp != NULL && svd->amp != NULL); 5010 segvn_textunrepl(seg, 0); 5011 ASSERT(svd->amp == NULL && 5012 svd->tr_state == SEGVN_TR_OFF); 5013 } else if (svd->tr_state != SEGVN_TR_OFF) { 5014 svd->tr_state = SEGVN_TR_OFF; 5015 } 5016 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5017 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5018 } 5019 5020 top: 5021 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5022 5023 /* 5024 * If we have the same protections for the entire segment, 5025 * insure that the access being attempted is legitimate. 5026 */ 5027 5028 if (svd->pageprot == 0) { 5029 uint_t protchk; 5030 5031 switch (rw) { 5032 case S_READ: 5033 case S_READ_NOCOW: 5034 protchk = PROT_READ; 5035 break; 5036 case S_WRITE: 5037 protchk = PROT_WRITE; 5038 break; 5039 case S_EXEC: 5040 protchk = PROT_EXEC; 5041 break; 5042 case S_OTHER: 5043 default: 5044 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 5045 break; 5046 } 5047 5048 if ((svd->prot & protchk) == 0) { 5049 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5050 return (FC_PROT); /* illegal access type */ 5051 } 5052 } 5053 5054 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5055 /* this must be SOFTLOCK S_READ fault */ 5056 ASSERT(svd->amp == NULL); 5057 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5059 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5060 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5061 /* 5062 * this must be the first ever non S_READ_NOCOW 5063 * softlock for this segment. 5064 */ 5065 ASSERT(svd->softlockcnt == 0); 5066 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5067 HAT_REGION_TEXT); 5068 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5069 } 5070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5071 goto top; 5072 } 5073 5074 /* 5075 * We can't allow the long term use of softlocks for vmpss segments, 5076 * because in some file truncation cases we should be able to demote 5077 * the segment, which requires that there are no softlocks. The 5078 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5079 * segment is S_READ_NOCOW, where the caller holds the address space 5080 * locked as writer and calls softunlock before dropping the as lock. 5081 * S_READ_NOCOW is used by /proc to read memory from another user. 5082 * 5083 * Another deadlock between SOFTLOCK and file truncation can happen 5084 * because segvn_fault_vnodepages() calls the FS one pagesize at 5085 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5086 * can cause a deadlock because the first set of page_t's remain 5087 * locked SE_SHARED. To avoid this, we demote segments on a first 5088 * SOFTLOCK if they have a length greater than the segment's 5089 * page size. 5090 * 5091 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5092 * the access type is S_READ_NOCOW and the fault length is less than 5093 * or equal to the segment's page size. While this is quite restrictive, 5094 * it should be the most common case of SOFTLOCK against a vmpss 5095 * segment. 5096 * 5097 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5098 * caller makes sure no COW will be caused by another thread for a 5099 * softlocked page. 5100 */ 5101 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5102 int demote = 0; 5103 5104 if (rw != S_READ_NOCOW) { 5105 demote = 1; 5106 } 5107 if (!demote && len > PAGESIZE) { 5108 pgsz = page_get_pagesize(seg->s_szc); 5109 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5110 lpgeaddr); 5111 if (lpgeaddr - lpgaddr > pgsz) { 5112 demote = 1; 5113 } 5114 } 5115 5116 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5117 5118 if (demote) { 5119 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5120 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5121 if (seg->s_szc != 0) { 5122 segvn_vmpss_clrszc_cnt++; 5123 ASSERT(svd->softlockcnt == 0); 5124 err = segvn_clrszc(seg); 5125 if (err) { 5126 segvn_vmpss_clrszc_err++; 5127 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5128 return (FC_MAKE_ERR(err)); 5129 } 5130 } 5131 ASSERT(seg->s_szc == 0); 5132 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5133 goto top; 5134 } 5135 } 5136 5137 /* 5138 * Check to see if we need to allocate an anon_map structure. 5139 */ 5140 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5141 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5142 /* 5143 * Drop the "read" lock on the segment and acquire 5144 * the "write" version since we have to allocate the 5145 * anon_map. 5146 */ 5147 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5148 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5149 5150 if (svd->amp == NULL) { 5151 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5152 svd->amp->a_szc = seg->s_szc; 5153 } 5154 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5155 5156 /* 5157 * Start all over again since segment protections 5158 * may have changed after we dropped the "read" lock. 5159 */ 5160 goto top; 5161 } 5162 5163 /* 5164 * S_READ_NOCOW vs S_READ distinction was 5165 * only needed for the code above. After 5166 * that we treat it as S_READ. 5167 */ 5168 if (rw == S_READ_NOCOW) { 5169 ASSERT(type == F_SOFTLOCK); 5170 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5171 rw = S_READ; 5172 } 5173 5174 amp = svd->amp; 5175 5176 /* 5177 * MADV_SEQUENTIAL work is ignored for large page segments. 5178 */ 5179 if (seg->s_szc != 0) { 5180 pgsz = page_get_pagesize(seg->s_szc); 5181 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5182 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5183 if (svd->vp == NULL) { 5184 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5185 lpgeaddr, type, rw, addr, addr + len, brkcow); 5186 } else { 5187 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5188 lpgeaddr, type, rw, addr, addr + len, brkcow); 5189 if (err == IE_RETRY) { 5190 ASSERT(seg->s_szc == 0); 5191 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5192 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5193 goto top; 5194 } 5195 } 5196 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5197 return (err); 5198 } 5199 5200 page = seg_page(seg, addr); 5201 if (amp != NULL) { 5202 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5203 anon_index = svd->anon_index + page; 5204 5205 if (type == F_PROT && rw == S_READ && 5206 svd->tr_state == SEGVN_TR_OFF && 5207 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5208 size_t index = anon_index; 5209 struct anon *ap; 5210 5211 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5212 /* 5213 * The fast path could apply to S_WRITE also, except 5214 * that the protection fault could be caused by lazy 5215 * tlb flush when ro->rw. In this case, the pte is 5216 * RW already. But RO in the other cpu's tlb causes 5217 * the fault. Since hat_chgprot won't do anything if 5218 * pte doesn't change, we may end up faulting 5219 * indefinitely until the RO tlb entry gets replaced. 5220 */ 5221 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5222 anon_array_enter(amp, index, &cookie); 5223 ap = anon_get_ptr(amp->ahp, index); 5224 anon_array_exit(&cookie); 5225 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5226 ANON_LOCK_EXIT(&->a_rwlock); 5227 goto slow; 5228 } 5229 } 5230 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5231 ANON_LOCK_EXIT(&->a_rwlock); 5232 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5233 return (0); 5234 } 5235 } 5236 slow: 5237 5238 if (svd->vpage == NULL) 5239 vpage = NULL; 5240 else 5241 vpage = &svd->vpage[page]; 5242 5243 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5244 5245 /* 5246 * If MADV_SEQUENTIAL has been set for the particular page we 5247 * are faulting on, free behind all pages in the segment and put 5248 * them on the free list. 5249 */ 5250 5251 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5252 struct vpage *vpp; 5253 ulong_t fanon_index; 5254 size_t fpage; 5255 u_offset_t pgoff, fpgoff; 5256 struct vnode *fvp; 5257 struct anon *fap = NULL; 5258 5259 if (svd->advice == MADV_SEQUENTIAL || 5260 (svd->pageadvice && 5261 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5262 pgoff = off - PAGESIZE; 5263 fpage = page - 1; 5264 if (vpage != NULL) 5265 vpp = &svd->vpage[fpage]; 5266 if (amp != NULL) 5267 fanon_index = svd->anon_index + fpage; 5268 5269 while (pgoff > svd->offset) { 5270 if (svd->advice != MADV_SEQUENTIAL && 5271 (!svd->pageadvice || (vpage && 5272 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5273 break; 5274 5275 /* 5276 * If this is an anon page, we must find the 5277 * correct <vp, offset> for it 5278 */ 5279 fap = NULL; 5280 if (amp != NULL) { 5281 ANON_LOCK_ENTER(&->a_rwlock, 5282 RW_READER); 5283 anon_array_enter(amp, fanon_index, 5284 &cookie); 5285 fap = anon_get_ptr(amp->ahp, 5286 fanon_index); 5287 if (fap != NULL) { 5288 swap_xlate(fap, &fvp, &fpgoff); 5289 } else { 5290 fpgoff = pgoff; 5291 fvp = svd->vp; 5292 } 5293 anon_array_exit(&cookie); 5294 ANON_LOCK_EXIT(&->a_rwlock); 5295 } else { 5296 fpgoff = pgoff; 5297 fvp = svd->vp; 5298 } 5299 if (fvp == NULL) 5300 break; /* XXX */ 5301 /* 5302 * Skip pages that are free or have an 5303 * "exclusive" lock. 5304 */ 5305 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5306 if (pp == NULL) 5307 break; 5308 /* 5309 * We don't need the page_struct_lock to test 5310 * as this is only advisory; even if we 5311 * acquire it someone might race in and lock 5312 * the page after we unlock and before the 5313 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5314 */ 5315 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5316 /* 5317 * Hold the vnode before releasing 5318 * the page lock to prevent it from 5319 * being freed and re-used by some 5320 * other thread. 5321 */ 5322 VN_HOLD(fvp); 5323 page_unlock(pp); 5324 /* 5325 * We should build a page list 5326 * to kluster putpages XXX 5327 */ 5328 (void) VOP_PUTPAGE(fvp, 5329 (offset_t)fpgoff, PAGESIZE, 5330 (B_DONTNEED|B_FREE|B_ASYNC), 5331 svd->cred, NULL); 5332 VN_RELE(fvp); 5333 } else { 5334 /* 5335 * XXX - Should the loop terminate if 5336 * the page is `locked'? 5337 */ 5338 page_unlock(pp); 5339 } 5340 --vpp; 5341 --fanon_index; 5342 pgoff -= PAGESIZE; 5343 } 5344 } 5345 } 5346 5347 plp = pl; 5348 *plp = NULL; 5349 pl_alloc_sz = 0; 5350 5351 /* 5352 * See if we need to call VOP_GETPAGE for 5353 * *any* of the range being faulted on. 5354 * We can skip all of this work if there 5355 * was no original vnode. 5356 */ 5357 if (svd->vp != NULL) { 5358 u_offset_t vp_off; 5359 size_t vp_len; 5360 struct anon *ap; 5361 vnode_t *vp; 5362 5363 vp_off = off; 5364 vp_len = len; 5365 5366 if (amp == NULL) 5367 dogetpage = 1; 5368 else { 5369 /* 5370 * Only acquire reader lock to prevent amp->ahp 5371 * from being changed. It's ok to miss pages, 5372 * hence we don't do anon_array_enter 5373 */ 5374 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5375 ap = anon_get_ptr(amp->ahp, anon_index); 5376 5377 if (len <= PAGESIZE) 5378 /* inline non_anon() */ 5379 dogetpage = (ap == NULL); 5380 else 5381 dogetpage = non_anon(amp->ahp, anon_index, 5382 &vp_off, &vp_len); 5383 ANON_LOCK_EXIT(&->a_rwlock); 5384 } 5385 5386 if (dogetpage) { 5387 enum seg_rw arw; 5388 struct as *as = seg->s_as; 5389 5390 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5391 /* 5392 * Page list won't fit in local array, 5393 * allocate one of the needed size. 5394 */ 5395 pl_alloc_sz = 5396 (btop(len) + 1) * sizeof (page_t *); 5397 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5398 plp[0] = NULL; 5399 plsz = len; 5400 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5401 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5402 (((size_t)(addr + PAGESIZE) < 5403 (size_t)(seg->s_base + seg->s_size)) && 5404 hat_probe(as->a_hat, addr + PAGESIZE))) { 5405 /* 5406 * Ask VOP_GETPAGE to return the exact number 5407 * of pages if 5408 * (a) this is a COW fault, or 5409 * (b) this is a software fault, or 5410 * (c) next page is already mapped. 5411 */ 5412 plsz = len; 5413 } else { 5414 /* 5415 * Ask VOP_GETPAGE to return adjacent pages 5416 * within the segment. 5417 */ 5418 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5419 ((seg->s_base + seg->s_size) - addr)); 5420 ASSERT((addr + plsz) <= 5421 (seg->s_base + seg->s_size)); 5422 } 5423 5424 /* 5425 * Need to get some non-anonymous pages. 5426 * We need to make only one call to GETPAGE to do 5427 * this to prevent certain deadlocking conditions 5428 * when we are doing locking. In this case 5429 * non_anon() should have picked up the smallest 5430 * range which includes all the non-anonymous 5431 * pages in the requested range. We have to 5432 * be careful regarding which rw flag to pass in 5433 * because on a private mapping, the underlying 5434 * object is never allowed to be written. 5435 */ 5436 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5437 arw = S_READ; 5438 } else { 5439 arw = rw; 5440 } 5441 vp = svd->vp; 5442 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5443 "segvn_getpage:seg %p addr %p vp %p", 5444 seg, addr, vp); 5445 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5446 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5447 svd->cred, NULL); 5448 if (err) { 5449 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5450 segvn_pagelist_rele(plp); 5451 if (pl_alloc_sz) 5452 kmem_free(plp, pl_alloc_sz); 5453 return (FC_MAKE_ERR(err)); 5454 } 5455 if (svd->type == MAP_PRIVATE) 5456 vpprot &= ~PROT_WRITE; 5457 } 5458 } 5459 5460 /* 5461 * N.B. at this time the plp array has all the needed non-anon 5462 * pages in addition to (possibly) having some adjacent pages. 5463 */ 5464 5465 /* 5466 * Always acquire the anon_array_lock to prevent 5467 * 2 threads from allocating separate anon slots for 5468 * the same "addr". 5469 * 5470 * If this is a copy-on-write fault and we don't already 5471 * have the anon_array_lock, acquire it to prevent the 5472 * fault routine from handling multiple copy-on-write faults 5473 * on the same "addr" in the same address space. 5474 * 5475 * Only one thread should deal with the fault since after 5476 * it is handled, the other threads can acquire a translation 5477 * to the newly created private page. This prevents two or 5478 * more threads from creating different private pages for the 5479 * same fault. 5480 * 5481 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5482 * to prevent deadlock between this thread and another thread 5483 * which has soft-locked this page and wants to acquire serial_lock. 5484 * ( bug 4026339 ) 5485 * 5486 * The fix for bug 4026339 becomes unnecessary when using the 5487 * locking scheme with per amp rwlock and a global set of hash 5488 * lock, anon_array_lock. If we steal a vnode page when low 5489 * on memory and upgrad the page lock through page_rename, 5490 * then the page is PAGE_HANDLED, nothing needs to be done 5491 * for this page after returning from segvn_faultpage. 5492 * 5493 * But really, the page lock should be downgraded after 5494 * the stolen page is page_rename'd. 5495 */ 5496 5497 if (amp != NULL) 5498 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5499 5500 /* 5501 * Ok, now loop over the address range and handle faults 5502 */ 5503 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5504 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5505 type, rw, brkcow); 5506 if (err) { 5507 if (amp != NULL) 5508 ANON_LOCK_EXIT(&->a_rwlock); 5509 if (type == F_SOFTLOCK && a > addr) { 5510 segvn_softunlock(seg, addr, (a - addr), 5511 S_OTHER); 5512 } 5513 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5514 segvn_pagelist_rele(plp); 5515 if (pl_alloc_sz) 5516 kmem_free(plp, pl_alloc_sz); 5517 return (err); 5518 } 5519 if (vpage) { 5520 vpage++; 5521 } else if (svd->vpage) { 5522 page = seg_page(seg, addr); 5523 vpage = &svd->vpage[++page]; 5524 } 5525 } 5526 5527 /* Didn't get pages from the underlying fs so we're done */ 5528 if (!dogetpage) 5529 goto done; 5530 5531 /* 5532 * Now handle any other pages in the list returned. 5533 * If the page can be used, load up the translations now. 5534 * Note that the for loop will only be entered if "plp" 5535 * is pointing to a non-NULL page pointer which means that 5536 * VOP_GETPAGE() was called and vpprot has been initialized. 5537 */ 5538 if (svd->pageprot == 0) 5539 prot = svd->prot & vpprot; 5540 5541 5542 /* 5543 * Large Files: diff should be unsigned value because we started 5544 * supporting > 2GB segment sizes from 2.5.1 and when a 5545 * large file of size > 2GB gets mapped to address space 5546 * the diff value can be > 2GB. 5547 */ 5548 5549 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5550 size_t diff; 5551 struct anon *ap; 5552 int anon_index; 5553 anon_sync_obj_t cookie; 5554 int hat_flag = HAT_LOAD_ADV; 5555 5556 if (svd->flags & MAP_TEXT) { 5557 hat_flag |= HAT_LOAD_TEXT; 5558 } 5559 5560 if (pp == PAGE_HANDLED) 5561 continue; 5562 5563 if (svd->tr_state != SEGVN_TR_ON && 5564 pp->p_offset >= svd->offset && 5565 pp->p_offset < svd->offset + seg->s_size) { 5566 5567 diff = pp->p_offset - svd->offset; 5568 5569 /* 5570 * Large Files: Following is the assertion 5571 * validating the above cast. 5572 */ 5573 ASSERT(svd->vp == pp->p_vnode); 5574 5575 page = btop(diff); 5576 if (svd->pageprot) 5577 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5578 5579 /* 5580 * Prevent other threads in the address space from 5581 * creating private pages (i.e., allocating anon slots) 5582 * while we are in the process of loading translations 5583 * to additional pages returned by the underlying 5584 * object. 5585 */ 5586 if (amp != NULL) { 5587 anon_index = svd->anon_index + page; 5588 anon_array_enter(amp, anon_index, &cookie); 5589 ap = anon_get_ptr(amp->ahp, anon_index); 5590 } 5591 if ((amp == NULL) || (ap == NULL)) { 5592 if (IS_VMODSORT(pp->p_vnode) || 5593 enable_mbit_wa) { 5594 if (rw == S_WRITE) 5595 hat_setmod(pp); 5596 else if (rw != S_OTHER && 5597 !hat_ismod(pp)) 5598 prot &= ~PROT_WRITE; 5599 } 5600 /* 5601 * Skip mapping read ahead pages marked 5602 * for migration, so they will get migrated 5603 * properly on fault 5604 */ 5605 ASSERT(amp == NULL || 5606 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5607 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5608 hat_memload_region(hat, 5609 seg->s_base + diff, 5610 pp, prot, hat_flag, 5611 svd->rcookie); 5612 } 5613 } 5614 if (amp != NULL) 5615 anon_array_exit(&cookie); 5616 } 5617 page_unlock(pp); 5618 } 5619 done: 5620 if (amp != NULL) 5621 ANON_LOCK_EXIT(&->a_rwlock); 5622 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5623 if (pl_alloc_sz) 5624 kmem_free(plp, pl_alloc_sz); 5625 return (0); 5626 } 5627 5628 /* 5629 * This routine is used to start I/O on pages asynchronously. XXX it will 5630 * only create PAGESIZE pages. At fault time they will be relocated into 5631 * larger pages. 5632 */ 5633 static faultcode_t 5634 segvn_faulta(struct seg *seg, caddr_t addr) 5635 { 5636 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5637 int err; 5638 struct anon_map *amp; 5639 vnode_t *vp; 5640 5641 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5642 5643 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5644 if ((amp = svd->amp) != NULL) { 5645 struct anon *ap; 5646 5647 /* 5648 * Reader lock to prevent amp->ahp from being changed. 5649 * This is advisory, it's ok to miss a page, so 5650 * we don't do anon_array_enter lock. 5651 */ 5652 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5653 if ((ap = anon_get_ptr(amp->ahp, 5654 svd->anon_index + seg_page(seg, addr))) != NULL) { 5655 5656 err = anon_getpage(&ap, NULL, NULL, 5657 0, seg, addr, S_READ, svd->cred); 5658 5659 ANON_LOCK_EXIT(&->a_rwlock); 5660 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5661 if (err) 5662 return (FC_MAKE_ERR(err)); 5663 return (0); 5664 } 5665 ANON_LOCK_EXIT(&->a_rwlock); 5666 } 5667 5668 if (svd->vp == NULL) { 5669 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5670 return (0); /* zfod page - do nothing now */ 5671 } 5672 5673 vp = svd->vp; 5674 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5675 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5676 err = VOP_GETPAGE(vp, 5677 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5678 PAGESIZE, NULL, NULL, 0, seg, addr, 5679 S_OTHER, svd->cred, NULL); 5680 5681 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5682 if (err) 5683 return (FC_MAKE_ERR(err)); 5684 return (0); 5685 } 5686 5687 static int 5688 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5689 { 5690 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5691 struct vpage *cvp, *svp, *evp; 5692 struct vnode *vp; 5693 size_t pgsz; 5694 pgcnt_t pgcnt; 5695 anon_sync_obj_t cookie; 5696 int unload_done = 0; 5697 5698 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5699 5700 if ((svd->maxprot & prot) != prot) 5701 return (EACCES); /* violated maxprot */ 5702 5703 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5704 5705 /* return if prot is the same */ 5706 if (!svd->pageprot && svd->prot == prot) { 5707 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5708 return (0); 5709 } 5710 5711 /* 5712 * Since we change protections we first have to flush the cache. 5713 * This makes sure all the pagelock calls have to recheck 5714 * protections. 5715 */ 5716 if (svd->softlockcnt > 0) { 5717 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5718 5719 /* 5720 * If this is shared segment non 0 softlockcnt 5721 * means locked pages are still in use. 5722 */ 5723 if (svd->type == MAP_SHARED) { 5724 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5725 return (EAGAIN); 5726 } 5727 5728 /* 5729 * Since we do have the segvn writers lock nobody can fill 5730 * the cache with entries belonging to this seg during 5731 * the purge. The flush either succeeds or we still have 5732 * pending I/Os. 5733 */ 5734 segvn_purge(seg); 5735 if (svd->softlockcnt > 0) { 5736 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5737 return (EAGAIN); 5738 } 5739 } 5740 5741 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5742 ASSERT(svd->amp == NULL); 5743 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5744 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5745 HAT_REGION_TEXT); 5746 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5747 unload_done = 1; 5748 } else if (svd->tr_state == SEGVN_TR_INIT) { 5749 svd->tr_state = SEGVN_TR_OFF; 5750 } else if (svd->tr_state == SEGVN_TR_ON) { 5751 ASSERT(svd->amp != NULL); 5752 segvn_textunrepl(seg, 0); 5753 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5754 unload_done = 1; 5755 } 5756 5757 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5758 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5759 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5760 segvn_inval_trcache(svd->vp); 5761 } 5762 if (seg->s_szc != 0) { 5763 int err; 5764 pgsz = page_get_pagesize(seg->s_szc); 5765 pgcnt = pgsz >> PAGESHIFT; 5766 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5767 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5768 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5769 ASSERT(seg->s_base != addr || seg->s_size != len); 5770 /* 5771 * If we are holding the as lock as a reader then 5772 * we need to return IE_RETRY and let the as 5773 * layer drop and re-acquire the lock as a writer. 5774 */ 5775 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5776 return (IE_RETRY); 5777 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5778 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5779 err = segvn_demote_range(seg, addr, len, 5780 SDR_END, 0); 5781 } else { 5782 uint_t szcvec = map_pgszcvec(seg->s_base, 5783 pgsz, (uintptr_t)seg->s_base, 5784 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5785 err = segvn_demote_range(seg, addr, len, 5786 SDR_END, szcvec); 5787 } 5788 if (err == 0) 5789 return (IE_RETRY); 5790 if (err == ENOMEM) 5791 return (IE_NOMEM); 5792 return (err); 5793 } 5794 } 5795 5796 5797 /* 5798 * If it's a private mapping and we're making it writable then we 5799 * may have to reserve the additional swap space now. If we are 5800 * making writable only a part of the segment then we use its vpage 5801 * array to keep a record of the pages for which we have reserved 5802 * swap. In this case we set the pageswap field in the segment's 5803 * segvn structure to record this. 5804 * 5805 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5806 * removing write permission on the entire segment and we haven't 5807 * modified any pages, we can release the swap space. 5808 */ 5809 if (svd->type == MAP_PRIVATE) { 5810 if (prot & PROT_WRITE) { 5811 if (!(svd->flags & MAP_NORESERVE) && 5812 !(svd->swresv && svd->pageswap == 0)) { 5813 size_t sz = 0; 5814 5815 /* 5816 * Start by determining how much swap 5817 * space is required. 5818 */ 5819 if (addr == seg->s_base && 5820 len == seg->s_size && 5821 svd->pageswap == 0) { 5822 /* The whole segment */ 5823 sz = seg->s_size; 5824 } else { 5825 /* 5826 * Make sure that the vpage array 5827 * exists, and make a note of the 5828 * range of elements corresponding 5829 * to len. 5830 */ 5831 segvn_vpage(seg); 5832 if (svd->vpage == NULL) { 5833 SEGVN_LOCK_EXIT(seg->s_as, 5834 &svd->lock); 5835 return (ENOMEM); 5836 } 5837 svp = &svd->vpage[seg_page(seg, addr)]; 5838 evp = &svd->vpage[seg_page(seg, 5839 addr + len)]; 5840 5841 if (svd->pageswap == 0) { 5842 /* 5843 * This is the first time we've 5844 * asked for a part of this 5845 * segment, so we need to 5846 * reserve everything we've 5847 * been asked for. 5848 */ 5849 sz = len; 5850 } else { 5851 /* 5852 * We have to count the number 5853 * of pages required. 5854 */ 5855 for (cvp = svp; cvp < evp; 5856 cvp++) { 5857 if (!VPP_ISSWAPRES(cvp)) 5858 sz++; 5859 } 5860 sz <<= PAGESHIFT; 5861 } 5862 } 5863 5864 /* Try to reserve the necessary swap. */ 5865 if (anon_resv_zone(sz, 5866 seg->s_as->a_proc->p_zone) == 0) { 5867 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5868 return (IE_NOMEM); 5869 } 5870 5871 /* 5872 * Make a note of how much swap space 5873 * we've reserved. 5874 */ 5875 if (svd->pageswap == 0 && sz == seg->s_size) { 5876 svd->swresv = sz; 5877 } else { 5878 ASSERT(svd->vpage != NULL); 5879 svd->swresv += sz; 5880 svd->pageswap = 1; 5881 for (cvp = svp; cvp < evp; cvp++) { 5882 if (!VPP_ISSWAPRES(cvp)) 5883 VPP_SETSWAPRES(cvp); 5884 } 5885 } 5886 } 5887 } else { 5888 /* 5889 * Swap space is released only if this segment 5890 * does not map anonymous memory, since read faults 5891 * on such segments still need an anon slot to read 5892 * in the data. 5893 */ 5894 if (svd->swresv != 0 && svd->vp != NULL && 5895 svd->amp == NULL && addr == seg->s_base && 5896 len == seg->s_size && svd->pageprot == 0) { 5897 ASSERT(svd->pageswap == 0); 5898 anon_unresv_zone(svd->swresv, 5899 seg->s_as->a_proc->p_zone); 5900 svd->swresv = 0; 5901 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5902 "anon proc:%p %lu %u", seg, 0, 0); 5903 } 5904 } 5905 } 5906 5907 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5908 if (svd->prot == prot) { 5909 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5910 return (0); /* all done */ 5911 } 5912 svd->prot = (uchar_t)prot; 5913 } else if (svd->type == MAP_PRIVATE) { 5914 struct anon *ap = NULL; 5915 page_t *pp; 5916 u_offset_t offset, off; 5917 struct anon_map *amp; 5918 ulong_t anon_idx = 0; 5919 5920 /* 5921 * A vpage structure exists or else the change does not 5922 * involve the entire segment. Establish a vpage structure 5923 * if none is there. Then, for each page in the range, 5924 * adjust its individual permissions. Note that write- 5925 * enabling a MAP_PRIVATE page can affect the claims for 5926 * locked down memory. Overcommitting memory terminates 5927 * the operation. 5928 */ 5929 segvn_vpage(seg); 5930 if (svd->vpage == NULL) { 5931 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5932 return (ENOMEM); 5933 } 5934 svd->pageprot = 1; 5935 if ((amp = svd->amp) != NULL) { 5936 anon_idx = svd->anon_index + seg_page(seg, addr); 5937 ASSERT(seg->s_szc == 0 || 5938 IS_P2ALIGNED(anon_idx, pgcnt)); 5939 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5940 } 5941 5942 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5943 evp = &svd->vpage[seg_page(seg, addr + len)]; 5944 5945 /* 5946 * See Statement at the beginning of segvn_lockop regarding 5947 * the way cowcnts and lckcnts are handled. 5948 */ 5949 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5950 5951 if (seg->s_szc != 0) { 5952 if (amp != NULL) { 5953 anon_array_enter(amp, anon_idx, 5954 &cookie); 5955 } 5956 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5957 !segvn_claim_pages(seg, svp, offset, 5958 anon_idx, prot)) { 5959 if (amp != NULL) { 5960 anon_array_exit(&cookie); 5961 } 5962 break; 5963 } 5964 if (amp != NULL) { 5965 anon_array_exit(&cookie); 5966 } 5967 anon_idx++; 5968 } else { 5969 if (amp != NULL) { 5970 anon_array_enter(amp, anon_idx, 5971 &cookie); 5972 ap = anon_get_ptr(amp->ahp, anon_idx++); 5973 } 5974 5975 if (VPP_ISPPLOCK(svp) && 5976 VPP_PROT(svp) != prot) { 5977 5978 if (amp == NULL || ap == NULL) { 5979 vp = svd->vp; 5980 off = offset; 5981 } else 5982 swap_xlate(ap, &vp, &off); 5983 if (amp != NULL) 5984 anon_array_exit(&cookie); 5985 5986 if ((pp = page_lookup(vp, off, 5987 SE_SHARED)) == NULL) { 5988 panic("segvn_setprot: no page"); 5989 /*NOTREACHED*/ 5990 } 5991 ASSERT(seg->s_szc == 0); 5992 if ((VPP_PROT(svp) ^ prot) & 5993 PROT_WRITE) { 5994 if (prot & PROT_WRITE) { 5995 if (!page_addclaim( 5996 pp)) { 5997 page_unlock(pp); 5998 break; 5999 } 6000 } else { 6001 if (!page_subclaim( 6002 pp)) { 6003 page_unlock(pp); 6004 break; 6005 } 6006 } 6007 } 6008 page_unlock(pp); 6009 } else if (amp != NULL) 6010 anon_array_exit(&cookie); 6011 } 6012 VPP_SETPROT(svp, prot); 6013 offset += PAGESIZE; 6014 } 6015 if (amp != NULL) 6016 ANON_LOCK_EXIT(&->a_rwlock); 6017 6018 /* 6019 * Did we terminate prematurely? If so, simply unload 6020 * the translations to the things we've updated so far. 6021 */ 6022 if (svp != evp) { 6023 if (unload_done) { 6024 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6025 return (IE_NOMEM); 6026 } 6027 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 6028 PAGESIZE; 6029 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 6030 if (len != 0) 6031 hat_unload(seg->s_as->a_hat, addr, 6032 len, HAT_UNLOAD); 6033 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6034 return (IE_NOMEM); 6035 } 6036 } else { 6037 segvn_vpage(seg); 6038 if (svd->vpage == NULL) { 6039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6040 return (ENOMEM); 6041 } 6042 svd->pageprot = 1; 6043 evp = &svd->vpage[seg_page(seg, addr + len)]; 6044 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 6045 VPP_SETPROT(svp, prot); 6046 } 6047 } 6048 6049 if (unload_done) { 6050 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6051 return (0); 6052 } 6053 6054 if (((prot & PROT_WRITE) != 0 && 6055 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 6056 (prot & ~PROT_USER) == PROT_NONE) { 6057 /* 6058 * Either private or shared data with write access (in 6059 * which case we need to throw out all former translations 6060 * so that we get the right translations set up on fault 6061 * and we don't allow write access to any copy-on-write pages 6062 * that might be around or to prevent write access to pages 6063 * representing holes in a file), or we don't have permission 6064 * to access the memory at all (in which case we have to 6065 * unload any current translations that might exist). 6066 */ 6067 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 6068 } else { 6069 /* 6070 * A shared mapping or a private mapping in which write 6071 * protection is going to be denied - just change all the 6072 * protections over the range of addresses in question. 6073 * segvn does not support any other attributes other 6074 * than prot so we can use hat_chgattr. 6075 */ 6076 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 6077 } 6078 6079 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6080 6081 return (0); 6082 } 6083 6084 /* 6085 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize, 6086 * to determine if the seg is capable of mapping the requested szc. 6087 */ 6088 static int 6089 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 6090 { 6091 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6092 struct segvn_data *nsvd; 6093 struct anon_map *amp = svd->amp; 6094 struct seg *nseg; 6095 caddr_t eaddr = addr + len, a; 6096 size_t pgsz = page_get_pagesize(szc); 6097 pgcnt_t pgcnt = page_get_pagecnt(szc); 6098 int err; 6099 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6100 6101 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6102 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6103 6104 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6105 return (0); 6106 } 6107 6108 /* 6109 * addr should always be pgsz aligned but eaddr may be misaligned if 6110 * it's at the end of the segment. 6111 * 6112 * XXX we should assert this condition since as_setpagesize() logic 6113 * guarantees it. 6114 */ 6115 if (!IS_P2ALIGNED(addr, pgsz) || 6116 (!IS_P2ALIGNED(eaddr, pgsz) && 6117 eaddr != seg->s_base + seg->s_size)) { 6118 6119 segvn_setpgsz_align_err++; 6120 return (EINVAL); 6121 } 6122 6123 if (amp != NULL && svd->type == MAP_SHARED) { 6124 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6125 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6126 6127 segvn_setpgsz_anon_align_err++; 6128 return (EINVAL); 6129 } 6130 } 6131 6132 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6133 szc > segvn_maxpgszc) { 6134 return (EINVAL); 6135 } 6136 6137 /* paranoid check */ 6138 if (svd->vp != NULL && 6139 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6140 return (EINVAL); 6141 } 6142 6143 if (seg->s_szc == 0 && svd->vp != NULL && 6144 map_addr_vacalign_check(addr, off)) { 6145 return (EINVAL); 6146 } 6147 6148 /* 6149 * Check that protections are the same within new page 6150 * size boundaries. 6151 */ 6152 if (svd->pageprot) { 6153 for (a = addr; a < eaddr; a += pgsz) { 6154 if ((a + pgsz) > eaddr) { 6155 if (!sameprot(seg, a, eaddr - a)) { 6156 return (EINVAL); 6157 } 6158 } else { 6159 if (!sameprot(seg, a, pgsz)) { 6160 return (EINVAL); 6161 } 6162 } 6163 } 6164 } 6165 6166 /* 6167 * Since we are changing page size we first have to flush 6168 * the cache. This makes sure all the pagelock calls have 6169 * to recheck protections. 6170 */ 6171 if (svd->softlockcnt > 0) { 6172 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6173 6174 /* 6175 * If this is shared segment non 0 softlockcnt 6176 * means locked pages are still in use. 6177 */ 6178 if (svd->type == MAP_SHARED) { 6179 return (EAGAIN); 6180 } 6181 6182 /* 6183 * Since we do have the segvn writers lock nobody can fill 6184 * the cache with entries belonging to this seg during 6185 * the purge. The flush either succeeds or we still have 6186 * pending I/Os. 6187 */ 6188 segvn_purge(seg); 6189 if (svd->softlockcnt > 0) { 6190 return (EAGAIN); 6191 } 6192 } 6193 6194 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6195 ASSERT(svd->amp == NULL); 6196 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6197 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6198 HAT_REGION_TEXT); 6199 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6200 } else if (svd->tr_state == SEGVN_TR_INIT) { 6201 svd->tr_state = SEGVN_TR_OFF; 6202 } else if (svd->tr_state == SEGVN_TR_ON) { 6203 ASSERT(svd->amp != NULL); 6204 segvn_textunrepl(seg, 1); 6205 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6206 amp = NULL; 6207 } 6208 6209 /* 6210 * Operation for sub range of existing segment. 6211 */ 6212 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6213 if (szc < seg->s_szc) { 6214 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6215 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6216 if (err == 0) { 6217 return (IE_RETRY); 6218 } 6219 if (err == ENOMEM) { 6220 return (IE_NOMEM); 6221 } 6222 return (err); 6223 } 6224 if (addr != seg->s_base) { 6225 nseg = segvn_split_seg(seg, addr); 6226 if (eaddr != (nseg->s_base + nseg->s_size)) { 6227 /* eaddr is szc aligned */ 6228 (void) segvn_split_seg(nseg, eaddr); 6229 } 6230 return (IE_RETRY); 6231 } 6232 if (eaddr != (seg->s_base + seg->s_size)) { 6233 /* eaddr is szc aligned */ 6234 (void) segvn_split_seg(seg, eaddr); 6235 } 6236 return (IE_RETRY); 6237 } 6238 6239 /* 6240 * Break any low level sharing and reset seg->s_szc to 0. 6241 */ 6242 if ((err = segvn_clrszc(seg)) != 0) { 6243 if (err == ENOMEM) { 6244 err = IE_NOMEM; 6245 } 6246 return (err); 6247 } 6248 ASSERT(seg->s_szc == 0); 6249 6250 /* 6251 * If the end of the current segment is not pgsz aligned 6252 * then attempt to concatenate with the next segment. 6253 */ 6254 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6255 nseg = AS_SEGNEXT(seg->s_as, seg); 6256 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6257 return (ENOMEM); 6258 } 6259 if (nseg->s_ops != &segvn_ops) { 6260 return (EINVAL); 6261 } 6262 nsvd = (struct segvn_data *)nseg->s_data; 6263 if (nsvd->softlockcnt > 0) { 6264 /* 6265 * If this is shared segment non 0 softlockcnt 6266 * means locked pages are still in use. 6267 */ 6268 if (nsvd->type == MAP_SHARED) { 6269 return (EAGAIN); 6270 } 6271 segvn_purge(nseg); 6272 if (nsvd->softlockcnt > 0) { 6273 return (EAGAIN); 6274 } 6275 } 6276 err = segvn_clrszc(nseg); 6277 if (err == ENOMEM) { 6278 err = IE_NOMEM; 6279 } 6280 if (err != 0) { 6281 return (err); 6282 } 6283 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6284 err = segvn_concat(seg, nseg, 1); 6285 if (err == -1) { 6286 return (EINVAL); 6287 } 6288 if (err == -2) { 6289 return (IE_NOMEM); 6290 } 6291 return (IE_RETRY); 6292 } 6293 6294 /* 6295 * May need to re-align anon array to 6296 * new szc. 6297 */ 6298 if (amp != NULL) { 6299 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6300 struct anon_hdr *nahp; 6301 6302 ASSERT(svd->type == MAP_PRIVATE); 6303 6304 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6305 ASSERT(amp->refcnt == 1); 6306 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6307 if (nahp == NULL) { 6308 ANON_LOCK_EXIT(&->a_rwlock); 6309 return (IE_NOMEM); 6310 } 6311 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6312 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6313 anon_release(nahp, btop(amp->size)); 6314 ANON_LOCK_EXIT(&->a_rwlock); 6315 return (IE_NOMEM); 6316 } 6317 anon_release(amp->ahp, btop(amp->size)); 6318 amp->ahp = nahp; 6319 svd->anon_index = 0; 6320 ANON_LOCK_EXIT(&->a_rwlock); 6321 } 6322 } 6323 if (svd->vp != NULL && szc != 0) { 6324 struct vattr va; 6325 u_offset_t eoffpage = svd->offset; 6326 va.va_mask = AT_SIZE; 6327 eoffpage += seg->s_size; 6328 eoffpage = btopr(eoffpage); 6329 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6330 segvn_setpgsz_getattr_err++; 6331 return (EINVAL); 6332 } 6333 if (btopr(va.va_size) < eoffpage) { 6334 segvn_setpgsz_eof_err++; 6335 return (EINVAL); 6336 } 6337 if (amp != NULL) { 6338 /* 6339 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6340 * don't take anon map lock here to avoid holding it 6341 * across VOP_GETPAGE() calls that may call back into 6342 * segvn for klsutering checks. We don't really need 6343 * anon map lock here since it's a private segment and 6344 * we hold as level lock as writers. 6345 */ 6346 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6347 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6348 seg->s_size, szc, svd->prot, svd->vpage, 6349 svd->cred)) != 0) { 6350 return (EINVAL); 6351 } 6352 } 6353 segvn_setvnode_mpss(svd->vp); 6354 } 6355 6356 if (amp != NULL) { 6357 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6358 if (svd->type == MAP_PRIVATE) { 6359 amp->a_szc = szc; 6360 } else if (szc > amp->a_szc) { 6361 amp->a_szc = szc; 6362 } 6363 ANON_LOCK_EXIT(&->a_rwlock); 6364 } 6365 6366 seg->s_szc = szc; 6367 6368 return (0); 6369 } 6370 6371 static int 6372 segvn_clrszc(struct seg *seg) 6373 { 6374 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6375 struct anon_map *amp = svd->amp; 6376 size_t pgsz; 6377 pgcnt_t pages; 6378 int err = 0; 6379 caddr_t a = seg->s_base; 6380 caddr_t ea = a + seg->s_size; 6381 ulong_t an_idx = svd->anon_index; 6382 vnode_t *vp = svd->vp; 6383 struct vpage *vpage = svd->vpage; 6384 page_t *anon_pl[1 + 1], *pp; 6385 struct anon *ap, *oldap; 6386 uint_t prot = svd->prot, vpprot; 6387 int pageflag = 0; 6388 6389 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6390 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6391 ASSERT(svd->softlockcnt == 0); 6392 6393 if (vp == NULL && amp == NULL) { 6394 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6395 seg->s_szc = 0; 6396 return (0); 6397 } 6398 6399 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6400 ASSERT(svd->amp == NULL); 6401 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6402 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6403 HAT_REGION_TEXT); 6404 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6405 } else if (svd->tr_state == SEGVN_TR_ON) { 6406 ASSERT(svd->amp != NULL); 6407 segvn_textunrepl(seg, 1); 6408 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6409 amp = NULL; 6410 } else { 6411 if (svd->tr_state != SEGVN_TR_OFF) { 6412 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6413 svd->tr_state = SEGVN_TR_OFF; 6414 } 6415 6416 /* 6417 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6418 * unload argument is 0 when we are freeing the segment 6419 * and unload was already done. 6420 */ 6421 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6422 HAT_UNLOAD_UNMAP); 6423 } 6424 6425 if (amp == NULL || svd->type == MAP_SHARED) { 6426 seg->s_szc = 0; 6427 return (0); 6428 } 6429 6430 pgsz = page_get_pagesize(seg->s_szc); 6431 pages = btop(pgsz); 6432 6433 /* 6434 * XXX anon rwlock is not really needed because this is a 6435 * private segment and we are writers. 6436 */ 6437 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6438 6439 for (; a < ea; a += pgsz, an_idx += pages) { 6440 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6441 ASSERT(vpage != NULL || svd->pageprot == 0); 6442 if (vpage != NULL) { 6443 ASSERT(sameprot(seg, a, pgsz)); 6444 prot = VPP_PROT(vpage); 6445 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6446 } 6447 if (seg->s_szc != 0) { 6448 ASSERT(vp == NULL || anon_pages(amp->ahp, 6449 an_idx, pages) == pages); 6450 if ((err = anon_map_demotepages(amp, an_idx, 6451 seg, a, prot, vpage, svd->cred)) != 0) { 6452 goto out; 6453 } 6454 } else { 6455 if (oldap->an_refcnt == 1) { 6456 continue; 6457 } 6458 if ((err = anon_getpage(&oldap, &vpprot, 6459 anon_pl, PAGESIZE, seg, a, S_READ, 6460 svd->cred))) { 6461 goto out; 6462 } 6463 if ((pp = anon_private(&ap, seg, a, prot, 6464 anon_pl[0], pageflag, svd->cred)) == NULL) { 6465 err = ENOMEM; 6466 goto out; 6467 } 6468 anon_decref(oldap); 6469 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6470 ANON_SLEEP); 6471 page_unlock(pp); 6472 } 6473 } 6474 vpage = (vpage == NULL) ? NULL : vpage + pages; 6475 } 6476 6477 amp->a_szc = 0; 6478 seg->s_szc = 0; 6479 out: 6480 ANON_LOCK_EXIT(&->a_rwlock); 6481 return (err); 6482 } 6483 6484 static int 6485 segvn_claim_pages( 6486 struct seg *seg, 6487 struct vpage *svp, 6488 u_offset_t off, 6489 ulong_t anon_idx, 6490 uint_t prot) 6491 { 6492 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6493 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6494 page_t **ppa; 6495 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6496 struct anon_map *amp = svd->amp; 6497 struct vpage *evp = svp + pgcnt; 6498 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6499 + seg->s_base; 6500 struct anon *ap; 6501 struct vnode *vp = svd->vp; 6502 page_t *pp; 6503 pgcnt_t pg_idx, i; 6504 int err = 0; 6505 anoff_t aoff; 6506 int anon = (amp != NULL) ? 1 : 0; 6507 6508 ASSERT(svd->type == MAP_PRIVATE); 6509 ASSERT(svd->vpage != NULL); 6510 ASSERT(seg->s_szc != 0); 6511 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6512 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6513 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6514 6515 if (VPP_PROT(svp) == prot) 6516 return (1); 6517 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6518 return (1); 6519 6520 ppa = kmem_alloc(ppasize, KM_SLEEP); 6521 if (anon && vp != NULL) { 6522 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6523 anon = 0; 6524 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6525 } 6526 ASSERT(!anon || 6527 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6528 } 6529 6530 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6531 if (!VPP_ISPPLOCK(svp)) 6532 continue; 6533 if (anon) { 6534 ap = anon_get_ptr(amp->ahp, anon_idx); 6535 if (ap == NULL) { 6536 panic("segvn_claim_pages: no anon slot"); 6537 } 6538 swap_xlate(ap, &vp, &aoff); 6539 off = (u_offset_t)aoff; 6540 } 6541 ASSERT(vp != NULL); 6542 if ((pp = page_lookup(vp, 6543 (u_offset_t)off, SE_SHARED)) == NULL) { 6544 panic("segvn_claim_pages: no page"); 6545 } 6546 ppa[pg_idx++] = pp; 6547 off += PAGESIZE; 6548 } 6549 6550 if (ppa[0] == NULL) { 6551 kmem_free(ppa, ppasize); 6552 return (1); 6553 } 6554 6555 ASSERT(pg_idx <= pgcnt); 6556 ppa[pg_idx] = NULL; 6557 6558 6559 /* Find each large page within ppa, and adjust its claim */ 6560 6561 /* Does ppa cover a single large page? */ 6562 if (ppa[0]->p_szc == seg->s_szc) { 6563 if (prot & PROT_WRITE) 6564 err = page_addclaim_pages(ppa); 6565 else 6566 err = page_subclaim_pages(ppa); 6567 } else { 6568 for (i = 0; ppa[i]; i += pgcnt) { 6569 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt)); 6570 if (prot & PROT_WRITE) 6571 err = page_addclaim_pages(&ppa[i]); 6572 else 6573 err = page_subclaim_pages(&ppa[i]); 6574 if (err == 0) 6575 break; 6576 } 6577 } 6578 6579 for (i = 0; i < pg_idx; i++) { 6580 ASSERT(ppa[i] != NULL); 6581 page_unlock(ppa[i]); 6582 } 6583 6584 kmem_free(ppa, ppasize); 6585 return (err); 6586 } 6587 6588 /* 6589 * Returns right (upper address) segment if split occurred. 6590 * If the address is equal to the beginning or end of its segment it returns 6591 * the current segment. 6592 */ 6593 static struct seg * 6594 segvn_split_seg(struct seg *seg, caddr_t addr) 6595 { 6596 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6597 struct seg *nseg; 6598 size_t nsize; 6599 struct segvn_data *nsvd; 6600 6601 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6602 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6603 6604 ASSERT(addr >= seg->s_base); 6605 ASSERT(addr <= seg->s_base + seg->s_size); 6606 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6607 6608 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6609 return (seg); 6610 6611 nsize = seg->s_base + seg->s_size - addr; 6612 seg->s_size = addr - seg->s_base; 6613 nseg = seg_alloc(seg->s_as, addr, nsize); 6614 ASSERT(nseg != NULL); 6615 nseg->s_ops = seg->s_ops; 6616 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6617 nseg->s_data = (void *)nsvd; 6618 nseg->s_szc = seg->s_szc; 6619 *nsvd = *svd; 6620 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6621 nsvd->seg = nseg; 6622 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6623 6624 if (nsvd->vp != NULL) { 6625 VN_HOLD(nsvd->vp); 6626 nsvd->offset = svd->offset + 6627 (uintptr_t)(nseg->s_base - seg->s_base); 6628 if (nsvd->type == MAP_SHARED) 6629 lgrp_shm_policy_init(NULL, nsvd->vp); 6630 } else { 6631 /* 6632 * The offset for an anonymous segment has no signifigance in 6633 * terms of an offset into a file. If we were to use the above 6634 * calculation instead, the structures read out of 6635 * /proc/<pid>/xmap would be more difficult to decipher since 6636 * it would be unclear whether two seemingly contiguous 6637 * prxmap_t structures represented different segments or a 6638 * single segment that had been split up into multiple prxmap_t 6639 * structures (e.g. if some part of the segment had not yet 6640 * been faulted in). 6641 */ 6642 nsvd->offset = 0; 6643 } 6644 6645 ASSERT(svd->softlockcnt == 0); 6646 ASSERT(svd->softlockcnt_sbase == 0); 6647 ASSERT(svd->softlockcnt_send == 0); 6648 crhold(svd->cred); 6649 6650 if (svd->vpage != NULL) { 6651 size_t bytes = vpgtob(seg_pages(seg)); 6652 size_t nbytes = vpgtob(seg_pages(nseg)); 6653 struct vpage *ovpage = svd->vpage; 6654 6655 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6656 bcopy(ovpage, svd->vpage, bytes); 6657 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6658 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6659 kmem_free(ovpage, bytes + nbytes); 6660 } 6661 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6662 struct anon_map *oamp = svd->amp, *namp; 6663 struct anon_hdr *nahp; 6664 6665 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6666 ASSERT(oamp->refcnt == 1); 6667 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6668 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6669 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6670 6671 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6672 namp->a_szc = nseg->s_szc; 6673 (void) anon_copy_ptr(oamp->ahp, 6674 svd->anon_index + btop(seg->s_size), 6675 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6676 anon_release(oamp->ahp, btop(oamp->size)); 6677 oamp->ahp = nahp; 6678 oamp->size = seg->s_size; 6679 svd->anon_index = 0; 6680 nsvd->amp = namp; 6681 nsvd->anon_index = 0; 6682 ANON_LOCK_EXIT(&oamp->a_rwlock); 6683 } else if (svd->amp != NULL) { 6684 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6685 ASSERT(svd->amp == nsvd->amp); 6686 ASSERT(seg->s_szc <= svd->amp->a_szc); 6687 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6688 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6689 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6690 svd->amp->refcnt++; 6691 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6692 } 6693 6694 /* 6695 * Split the amount of swap reserved. 6696 */ 6697 if (svd->swresv) { 6698 /* 6699 * For MAP_NORESERVE, only allocate swap reserve for pages 6700 * being used. Other segments get enough to cover whole 6701 * segment. 6702 */ 6703 if (svd->flags & MAP_NORESERVE) { 6704 size_t oswresv; 6705 6706 ASSERT(svd->amp); 6707 oswresv = svd->swresv; 6708 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6709 svd->anon_index, btop(seg->s_size))); 6710 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6711 nsvd->anon_index, btop(nseg->s_size))); 6712 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6713 } else { 6714 if (svd->pageswap) { 6715 svd->swresv = segvn_count_swap_by_vpages(seg); 6716 ASSERT(nsvd->swresv >= svd->swresv); 6717 nsvd->swresv -= svd->swresv; 6718 } else { 6719 ASSERT(svd->swresv == seg->s_size + 6720 nseg->s_size); 6721 svd->swresv = seg->s_size; 6722 nsvd->swresv = nseg->s_size; 6723 } 6724 } 6725 } 6726 6727 return (nseg); 6728 } 6729 6730 /* 6731 * called on memory operations (unmap, setprot, setpagesize) for a subset 6732 * of a large page segment to either demote the memory range (SDR_RANGE) 6733 * or the ends (SDR_END) by addr/len. 6734 * 6735 * returns 0 on success. returns errno, including ENOMEM, on failure. 6736 */ 6737 static int 6738 segvn_demote_range( 6739 struct seg *seg, 6740 caddr_t addr, 6741 size_t len, 6742 int flag, 6743 uint_t szcvec) 6744 { 6745 caddr_t eaddr = addr + len; 6746 caddr_t lpgaddr, lpgeaddr; 6747 struct seg *nseg; 6748 struct seg *badseg1 = NULL; 6749 struct seg *badseg2 = NULL; 6750 size_t pgsz; 6751 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6752 int err; 6753 uint_t szc = seg->s_szc; 6754 uint_t tszcvec; 6755 6756 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6757 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6758 ASSERT(szc != 0); 6759 pgsz = page_get_pagesize(szc); 6760 ASSERT(seg->s_base != addr || seg->s_size != len); 6761 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6762 ASSERT(svd->softlockcnt == 0); 6763 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6764 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6765 6766 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6767 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6768 if (flag == SDR_RANGE) { 6769 /* demote entire range */ 6770 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6771 (void) segvn_split_seg(nseg, lpgeaddr); 6772 ASSERT(badseg1->s_base == lpgaddr); 6773 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6774 } else if (addr != lpgaddr) { 6775 ASSERT(flag == SDR_END); 6776 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6777 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6778 eaddr < lpgaddr + 2 * pgsz) { 6779 (void) segvn_split_seg(nseg, lpgeaddr); 6780 ASSERT(badseg1->s_base == lpgaddr); 6781 ASSERT(badseg1->s_size == 2 * pgsz); 6782 } else { 6783 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6784 ASSERT(badseg1->s_base == lpgaddr); 6785 ASSERT(badseg1->s_size == pgsz); 6786 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6787 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6788 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6789 badseg2 = nseg; 6790 (void) segvn_split_seg(nseg, lpgeaddr); 6791 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6792 ASSERT(badseg2->s_size == pgsz); 6793 } 6794 } 6795 } else { 6796 ASSERT(flag == SDR_END); 6797 ASSERT(eaddr < lpgeaddr); 6798 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6799 (void) segvn_split_seg(nseg, lpgeaddr); 6800 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6801 ASSERT(badseg1->s_size == pgsz); 6802 } 6803 6804 ASSERT(badseg1 != NULL); 6805 ASSERT(badseg1->s_szc == szc); 6806 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6807 badseg1->s_size == 2 * pgsz); 6808 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6809 ASSERT(badseg1->s_size == pgsz || 6810 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6811 if (err = segvn_clrszc(badseg1)) { 6812 return (err); 6813 } 6814 ASSERT(badseg1->s_szc == 0); 6815 6816 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6817 uint_t tszc = highbit(tszcvec) - 1; 6818 caddr_t ta = MAX(addr, badseg1->s_base); 6819 caddr_t te; 6820 size_t tpgsz = page_get_pagesize(tszc); 6821 6822 ASSERT(svd->type == MAP_SHARED); 6823 ASSERT(flag == SDR_END); 6824 ASSERT(tszc < szc && tszc > 0); 6825 6826 if (eaddr > badseg1->s_base + badseg1->s_size) { 6827 te = badseg1->s_base + badseg1->s_size; 6828 } else { 6829 te = eaddr; 6830 } 6831 6832 ASSERT(ta <= te); 6833 badseg1->s_szc = tszc; 6834 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6835 if (badseg2 != NULL) { 6836 err = segvn_demote_range(badseg1, ta, te - ta, 6837 SDR_END, tszcvec); 6838 if (err != 0) { 6839 return (err); 6840 } 6841 } else { 6842 return (segvn_demote_range(badseg1, ta, 6843 te - ta, SDR_END, tszcvec)); 6844 } 6845 } 6846 } 6847 6848 if (badseg2 == NULL) 6849 return (0); 6850 ASSERT(badseg2->s_szc == szc); 6851 ASSERT(badseg2->s_size == pgsz); 6852 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6853 if (err = segvn_clrszc(badseg2)) { 6854 return (err); 6855 } 6856 ASSERT(badseg2->s_szc == 0); 6857 6858 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6859 uint_t tszc = highbit(tszcvec) - 1; 6860 size_t tpgsz = page_get_pagesize(tszc); 6861 6862 ASSERT(svd->type == MAP_SHARED); 6863 ASSERT(flag == SDR_END); 6864 ASSERT(tszc < szc && tszc > 0); 6865 ASSERT(badseg2->s_base > addr); 6866 ASSERT(eaddr > badseg2->s_base); 6867 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6868 6869 badseg2->s_szc = tszc; 6870 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6871 return (segvn_demote_range(badseg2, badseg2->s_base, 6872 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6873 } 6874 } 6875 6876 return (0); 6877 } 6878 6879 static int 6880 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6881 { 6882 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6883 struct vpage *vp, *evp; 6884 6885 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6886 6887 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6888 /* 6889 * If segment protection can be used, simply check against them. 6890 */ 6891 if (svd->pageprot == 0) { 6892 int err; 6893 6894 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6895 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6896 return (err); 6897 } 6898 6899 /* 6900 * Have to check down to the vpage level. 6901 */ 6902 evp = &svd->vpage[seg_page(seg, addr + len)]; 6903 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6904 if ((VPP_PROT(vp) & prot) != prot) { 6905 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6906 return (EACCES); 6907 } 6908 } 6909 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6910 return (0); 6911 } 6912 6913 static int 6914 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6915 { 6916 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6917 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6918 6919 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6920 6921 if (pgno != 0) { 6922 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6923 if (svd->pageprot == 0) { 6924 do { 6925 protv[--pgno] = svd->prot; 6926 } while (pgno != 0); 6927 } else { 6928 size_t pgoff = seg_page(seg, addr); 6929 6930 do { 6931 pgno--; 6932 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6933 } while (pgno != 0); 6934 } 6935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6936 } 6937 return (0); 6938 } 6939 6940 static u_offset_t 6941 segvn_getoffset(struct seg *seg, caddr_t addr) 6942 { 6943 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6944 6945 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6946 6947 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6948 } 6949 6950 /*ARGSUSED*/ 6951 static int 6952 segvn_gettype(struct seg *seg, caddr_t addr) 6953 { 6954 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6955 6956 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6957 6958 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6959 MAP_INITDATA))); 6960 } 6961 6962 /*ARGSUSED*/ 6963 static int 6964 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6965 { 6966 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6967 6968 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6969 6970 *vpp = svd->vp; 6971 return (0); 6972 } 6973 6974 /* 6975 * Check to see if it makes sense to do kluster/read ahead to 6976 * addr + delta relative to the mapping at addr. We assume here 6977 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6978 * 6979 * For segvn, we currently "approve" of the action if we are 6980 * still in the segment and it maps from the same vp/off, 6981 * or if the advice stored in segvn_data or vpages allows it. 6982 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6983 */ 6984 static int 6985 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6986 { 6987 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6988 struct anon *oap, *ap; 6989 ssize_t pd; 6990 size_t page; 6991 struct vnode *vp1, *vp2; 6992 u_offset_t off1, off2; 6993 struct anon_map *amp; 6994 6995 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6996 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6997 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6998 6999 if (addr + delta < seg->s_base || 7000 addr + delta >= (seg->s_base + seg->s_size)) 7001 return (-1); /* exceeded segment bounds */ 7002 7003 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 7004 page = seg_page(seg, addr); 7005 7006 /* 7007 * Check to see if either of the pages addr or addr + delta 7008 * have advice set that prevents klustering (if MADV_RANDOM advice 7009 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 7010 * is negative). 7011 */ 7012 if (svd->advice == MADV_RANDOM || 7013 svd->advice == MADV_SEQUENTIAL && delta < 0) 7014 return (-1); 7015 else if (svd->pageadvice && svd->vpage) { 7016 struct vpage *bvpp, *evpp; 7017 7018 bvpp = &svd->vpage[page]; 7019 evpp = &svd->vpage[page + pd]; 7020 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 7021 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 7022 return (-1); 7023 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 7024 VPP_ADVICE(evpp) == MADV_RANDOM) 7025 return (-1); 7026 } 7027 7028 if (svd->type == MAP_SHARED) 7029 return (0); /* shared mapping - all ok */ 7030 7031 if ((amp = svd->amp) == NULL) 7032 return (0); /* off original vnode */ 7033 7034 page += svd->anon_index; 7035 7036 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7037 7038 oap = anon_get_ptr(amp->ahp, page); 7039 ap = anon_get_ptr(amp->ahp, page + pd); 7040 7041 ANON_LOCK_EXIT(&->a_rwlock); 7042 7043 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 7044 return (-1); /* one with and one without an anon */ 7045 } 7046 7047 if (oap == NULL) { /* implies that ap == NULL */ 7048 return (0); /* off original vnode */ 7049 } 7050 7051 /* 7052 * Now we know we have two anon pointers - check to 7053 * see if they happen to be properly allocated. 7054 */ 7055 7056 /* 7057 * XXX We cheat here and don't lock the anon slots. We can't because 7058 * we may have been called from the anon layer which might already 7059 * have locked them. We are holding a refcnt on the slots so they 7060 * can't disappear. The worst that will happen is we'll get the wrong 7061 * names (vp, off) for the slots and make a poor klustering decision. 7062 */ 7063 swap_xlate(ap, &vp1, &off1); 7064 swap_xlate(oap, &vp2, &off2); 7065 7066 7067 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 7068 return (-1); 7069 return (0); 7070 } 7071 7072 /* 7073 * Swap the pages of seg out to secondary storage, returning the 7074 * number of bytes of storage freed. 7075 * 7076 * The basic idea is first to unload all translations and then to call 7077 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the 7078 * swap device. Pages to which other segments have mappings will remain 7079 * mapped and won't be swapped. Our caller (as_swapout) has already 7080 * performed the unloading step. 7081 * 7082 * The value returned is intended to correlate well with the process's 7083 * memory requirements. However, there are some caveats: 7084 * 1) When given a shared segment as argument, this routine will 7085 * only succeed in swapping out pages for the last sharer of the 7086 * segment. (Previous callers will only have decremented mapping 7087 * reference counts.) 7088 * 2) We assume that the hat layer maintains a large enough translation 7089 * cache to capture process reference patterns. 7090 */ 7091 static size_t 7092 segvn_swapout(struct seg *seg) 7093 { 7094 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7095 struct anon_map *amp; 7096 pgcnt_t pgcnt = 0; 7097 pgcnt_t npages; 7098 pgcnt_t page; 7099 ulong_t anon_index; 7100 7101 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7102 7103 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7104 /* 7105 * Find pages unmapped by our caller and force them 7106 * out to the virtual swap device. 7107 */ 7108 if ((amp = svd->amp) != NULL) 7109 anon_index = svd->anon_index; 7110 npages = seg->s_size >> PAGESHIFT; 7111 for (page = 0; page < npages; page++) { 7112 page_t *pp; 7113 struct anon *ap; 7114 struct vnode *vp; 7115 u_offset_t off; 7116 anon_sync_obj_t cookie; 7117 7118 /* 7119 * Obtain <vp, off> pair for the page, then look it up. 7120 * 7121 * Note that this code is willing to consider regular 7122 * pages as well as anon pages. Is this appropriate here? 7123 */ 7124 ap = NULL; 7125 if (amp != NULL) { 7126 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7127 if (anon_array_try_enter(amp, anon_index + page, 7128 &cookie)) { 7129 ANON_LOCK_EXIT(&->a_rwlock); 7130 continue; 7131 } 7132 ap = anon_get_ptr(amp->ahp, anon_index + page); 7133 if (ap != NULL) { 7134 swap_xlate(ap, &vp, &off); 7135 } else { 7136 vp = svd->vp; 7137 off = svd->offset + ptob(page); 7138 } 7139 anon_array_exit(&cookie); 7140 ANON_LOCK_EXIT(&->a_rwlock); 7141 } else { 7142 vp = svd->vp; 7143 off = svd->offset + ptob(page); 7144 } 7145 if (vp == NULL) { /* untouched zfod page */ 7146 ASSERT(ap == NULL); 7147 continue; 7148 } 7149 7150 pp = page_lookup_nowait(vp, off, SE_SHARED); 7151 if (pp == NULL) 7152 continue; 7153 7154 7155 /* 7156 * Examine the page to see whether it can be tossed out, 7157 * keeping track of how many we've found. 7158 */ 7159 if (!page_tryupgrade(pp)) { 7160 /* 7161 * If the page has an i/o lock and no mappings, 7162 * it's very likely that the page is being 7163 * written out as a result of klustering. 7164 * Assume this is so and take credit for it here. 7165 */ 7166 if (!page_io_trylock(pp)) { 7167 if (!hat_page_is_mapped(pp)) 7168 pgcnt++; 7169 } else { 7170 page_io_unlock(pp); 7171 } 7172 page_unlock(pp); 7173 continue; 7174 } 7175 ASSERT(!page_iolock_assert(pp)); 7176 7177 7178 /* 7179 * Skip if page is locked or has mappings. 7180 * We don't need the page_struct_lock to look at lckcnt 7181 * and cowcnt because the page is exclusive locked. 7182 */ 7183 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 || 7184 hat_page_is_mapped(pp)) { 7185 page_unlock(pp); 7186 continue; 7187 } 7188 7189 /* 7190 * dispose skips large pages so try to demote first. 7191 */ 7192 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) { 7193 page_unlock(pp); 7194 /* 7195 * XXX should skip the remaining page_t's of this 7196 * large page. 7197 */ 7198 continue; 7199 } 7200 7201 ASSERT(pp->p_szc == 0); 7202 7203 /* 7204 * No longer mapped -- we can toss it out. How 7205 * we do so depends on whether or not it's dirty. 7206 */ 7207 if (hat_ismod(pp) && pp->p_vnode) { 7208 /* 7209 * We must clean the page before it can be 7210 * freed. Setting B_FREE will cause pvn_done 7211 * to free the page when the i/o completes. 7212 * XXX: This also causes it to be accounted 7213 * as a pageout instead of a swap: need 7214 * B_SWAPOUT bit to use instead of B_FREE. 7215 * 7216 * Hold the vnode before releasing the page lock 7217 * to prevent it from being freed and re-used by 7218 * some other thread. 7219 */ 7220 VN_HOLD(vp); 7221 page_unlock(pp); 7222 7223 /* 7224 * Queue all i/o requests for the pageout thread 7225 * to avoid saturating the pageout devices. 7226 */ 7227 if (!queue_io_request(vp, off)) 7228 VN_RELE(vp); 7229 } else { 7230 /* 7231 * The page was clean, free it. 7232 * 7233 * XXX: Can we ever encounter modified pages 7234 * with no associated vnode here? 7235 */ 7236 ASSERT(pp->p_vnode != NULL); 7237 /*LINTED: constant in conditional context*/ 7238 VN_DISPOSE(pp, B_FREE, 0, kcred); 7239 } 7240 7241 /* 7242 * Credit now even if i/o is in progress. 7243 */ 7244 pgcnt++; 7245 } 7246 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7247 7248 /* 7249 * Wakeup pageout to initiate i/o on all queued requests. 7250 */ 7251 cv_signal_pageout(); 7252 return (ptob(pgcnt)); 7253 } 7254 7255 /* 7256 * Synchronize primary storage cache with real object in virtual memory. 7257 * 7258 * XXX - Anonymous pages should not be sync'ed out at all. 7259 */ 7260 static int 7261 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7262 { 7263 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7264 struct vpage *vpp; 7265 page_t *pp; 7266 u_offset_t offset; 7267 struct vnode *vp; 7268 u_offset_t off; 7269 caddr_t eaddr; 7270 int bflags; 7271 int err = 0; 7272 int segtype; 7273 int pageprot; 7274 int prot; 7275 ulong_t anon_index; 7276 struct anon_map *amp; 7277 struct anon *ap; 7278 anon_sync_obj_t cookie; 7279 7280 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7281 7282 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7283 7284 if (svd->softlockcnt > 0) { 7285 /* 7286 * If this is shared segment non 0 softlockcnt 7287 * means locked pages are still in use. 7288 */ 7289 if (svd->type == MAP_SHARED) { 7290 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7291 return (EAGAIN); 7292 } 7293 7294 /* 7295 * flush all pages from seg cache 7296 * otherwise we may deadlock in swap_putpage 7297 * for B_INVAL page (4175402). 7298 * 7299 * Even if we grab segvn WRITER's lock 7300 * here, there might be another thread which could've 7301 * successfully performed lookup/insert just before 7302 * we acquired the lock here. So, grabbing either 7303 * lock here is of not much use. Until we devise 7304 * a strategy at upper layers to solve the 7305 * synchronization issues completely, we expect 7306 * applications to handle this appropriately. 7307 */ 7308 segvn_purge(seg); 7309 if (svd->softlockcnt > 0) { 7310 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7311 return (EAGAIN); 7312 } 7313 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7314 svd->amp->a_softlockcnt > 0) { 7315 /* 7316 * Try to purge this amp's entries from pcache. It will 7317 * succeed only if other segments that share the amp have no 7318 * outstanding softlock's. 7319 */ 7320 segvn_purge(seg); 7321 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7322 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7323 return (EAGAIN); 7324 } 7325 } 7326 7327 vpp = svd->vpage; 7328 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7329 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7330 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7331 7332 if (attr) { 7333 pageprot = attr & ~(SHARED|PRIVATE); 7334 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7335 7336 /* 7337 * We are done if the segment types don't match 7338 * or if we have segment level protections and 7339 * they don't match. 7340 */ 7341 if (svd->type != segtype) { 7342 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7343 return (0); 7344 } 7345 if (vpp == NULL) { 7346 if (svd->prot != pageprot) { 7347 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7348 return (0); 7349 } 7350 prot = svd->prot; 7351 } else 7352 vpp = &svd->vpage[seg_page(seg, addr)]; 7353 7354 } else if (svd->vp && svd->amp == NULL && 7355 (flags & MS_INVALIDATE) == 0) { 7356 7357 /* 7358 * No attributes, no anonymous pages and MS_INVALIDATE flag 7359 * is not on, just use one big request. 7360 */ 7361 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7362 bflags, svd->cred, NULL); 7363 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7364 return (err); 7365 } 7366 7367 if ((amp = svd->amp) != NULL) 7368 anon_index = svd->anon_index + seg_page(seg, addr); 7369 7370 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7371 ap = NULL; 7372 if (amp != NULL) { 7373 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7374 anon_array_enter(amp, anon_index, &cookie); 7375 ap = anon_get_ptr(amp->ahp, anon_index++); 7376 if (ap != NULL) { 7377 swap_xlate(ap, &vp, &off); 7378 } else { 7379 vp = svd->vp; 7380 off = offset; 7381 } 7382 anon_array_exit(&cookie); 7383 ANON_LOCK_EXIT(&->a_rwlock); 7384 } else { 7385 vp = svd->vp; 7386 off = offset; 7387 } 7388 offset += PAGESIZE; 7389 7390 if (vp == NULL) /* untouched zfod page */ 7391 continue; 7392 7393 if (attr) { 7394 if (vpp) { 7395 prot = VPP_PROT(vpp); 7396 vpp++; 7397 } 7398 if (prot != pageprot) { 7399 continue; 7400 } 7401 } 7402 7403 /* 7404 * See if any of these pages are locked -- if so, then we 7405 * will have to truncate an invalidate request at the first 7406 * locked one. We don't need the page_struct_lock to test 7407 * as this is only advisory; even if we acquire it someone 7408 * might race in and lock the page after we unlock and before 7409 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7410 */ 7411 if (flags & MS_INVALIDATE) { 7412 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7413 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7414 page_unlock(pp); 7415 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7416 return (EBUSY); 7417 } 7418 if (ap != NULL && pp->p_szc != 0 && 7419 page_tryupgrade(pp)) { 7420 if (pp->p_lckcnt == 0 && 7421 pp->p_cowcnt == 0) { 7422 /* 7423 * swapfs VN_DISPOSE() won't 7424 * invalidate large pages. 7425 * Attempt to demote. 7426 * XXX can't help it if it 7427 * fails. But for swapfs 7428 * pages it is no big deal. 7429 */ 7430 (void) page_try_demote_pages( 7431 pp); 7432 } 7433 } 7434 page_unlock(pp); 7435 } 7436 } else if (svd->type == MAP_SHARED && amp != NULL) { 7437 /* 7438 * Avoid writing out to disk ISM's large pages 7439 * because segspt_free_pages() relies on NULL an_pvp 7440 * of anon slots of such pages. 7441 */ 7442 7443 ASSERT(svd->vp == NULL); 7444 /* 7445 * swapfs uses page_lookup_nowait if not freeing or 7446 * invalidating and skips a page if 7447 * page_lookup_nowait returns NULL. 7448 */ 7449 pp = page_lookup_nowait(vp, off, SE_SHARED); 7450 if (pp == NULL) { 7451 continue; 7452 } 7453 if (pp->p_szc != 0) { 7454 page_unlock(pp); 7455 continue; 7456 } 7457 7458 /* 7459 * Note ISM pages are created large so (vp, off)'s 7460 * page cannot suddenly become large after we unlock 7461 * pp. 7462 */ 7463 page_unlock(pp); 7464 } 7465 /* 7466 * XXX - Should ultimately try to kluster 7467 * calls to VOP_PUTPAGE() for performance. 7468 */ 7469 VN_HOLD(vp); 7470 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7471 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)), 7472 svd->cred, NULL); 7473 7474 VN_RELE(vp); 7475 if (err) 7476 break; 7477 } 7478 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7479 return (err); 7480 } 7481 7482 /* 7483 * Determine if we have data corresponding to pages in the 7484 * primary storage virtual memory cache (i.e., "in core"). 7485 */ 7486 static size_t 7487 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7488 { 7489 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7490 struct vnode *vp, *avp; 7491 u_offset_t offset, aoffset; 7492 size_t p, ep; 7493 int ret; 7494 struct vpage *vpp; 7495 page_t *pp; 7496 uint_t start; 7497 struct anon_map *amp; /* XXX - for locknest */ 7498 struct anon *ap; 7499 uint_t attr; 7500 anon_sync_obj_t cookie; 7501 7502 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7503 7504 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7505 if (svd->amp == NULL && svd->vp == NULL) { 7506 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7507 bzero(vec, btopr(len)); 7508 return (len); /* no anonymous pages created yet */ 7509 } 7510 7511 p = seg_page(seg, addr); 7512 ep = seg_page(seg, addr + len); 7513 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7514 7515 amp = svd->amp; 7516 for (; p < ep; p++, addr += PAGESIZE) { 7517 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7518 ret = start; 7519 ap = NULL; 7520 avp = NULL; 7521 /* Grab the vnode/offset for the anon slot */ 7522 if (amp != NULL) { 7523 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7524 anon_array_enter(amp, svd->anon_index + p, &cookie); 7525 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7526 if (ap != NULL) { 7527 swap_xlate(ap, &avp, &aoffset); 7528 } 7529 anon_array_exit(&cookie); 7530 ANON_LOCK_EXIT(&->a_rwlock); 7531 } 7532 if ((avp != NULL) && page_exists(avp, aoffset)) { 7533 /* A page exists for the anon slot */ 7534 ret |= SEG_PAGE_INCORE; 7535 7536 /* 7537 * If page is mapped and writable 7538 */ 7539 attr = (uint_t)0; 7540 if ((hat_getattr(seg->s_as->a_hat, addr, 7541 &attr) != -1) && (attr & PROT_WRITE)) { 7542 ret |= SEG_PAGE_ANON; 7543 } 7544 /* 7545 * Don't get page_struct lock for lckcnt and cowcnt, 7546 * since this is purely advisory. 7547 */ 7548 if ((pp = page_lookup_nowait(avp, aoffset, 7549 SE_SHARED)) != NULL) { 7550 if (pp->p_lckcnt) 7551 ret |= SEG_PAGE_SOFTLOCK; 7552 if (pp->p_cowcnt) 7553 ret |= SEG_PAGE_HASCOW; 7554 page_unlock(pp); 7555 } 7556 } 7557 7558 /* Gather vnode statistics */ 7559 vp = svd->vp; 7560 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7561 7562 if (vp != NULL) { 7563 /* 7564 * Try to obtain a "shared" lock on the page 7565 * without blocking. If this fails, determine 7566 * if the page is in memory. 7567 */ 7568 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7569 if ((pp == NULL) && (page_exists(vp, offset))) { 7570 /* Page is incore, and is named */ 7571 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7572 } 7573 /* 7574 * Don't get page_struct lock for lckcnt and cowcnt, 7575 * since this is purely advisory. 7576 */ 7577 if (pp != NULL) { 7578 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7579 if (pp->p_lckcnt) 7580 ret |= SEG_PAGE_SOFTLOCK; 7581 if (pp->p_cowcnt) 7582 ret |= SEG_PAGE_HASCOW; 7583 page_unlock(pp); 7584 } 7585 } 7586 7587 /* Gather virtual page information */ 7588 if (vpp) { 7589 if (VPP_ISPPLOCK(vpp)) 7590 ret |= SEG_PAGE_LOCKED; 7591 vpp++; 7592 } 7593 7594 *vec++ = (char)ret; 7595 } 7596 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7597 return (len); 7598 } 7599 7600 /* 7601 * Statement for p_cowcnts/p_lckcnts. 7602 * 7603 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7604 * irrespective of the following factors or anything else: 7605 * 7606 * (1) anon slots are populated or not 7607 * (2) cow is broken or not 7608 * (3) refcnt on ap is 1 or greater than 1 7609 * 7610 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7611 * and munlock. 7612 * 7613 * 7614 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7615 * 7616 * if vpage has PROT_WRITE 7617 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7618 * else 7619 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7620 * 7621 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7622 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7623 * 7624 * We may also break COW if softlocking on read access in the physio case. 7625 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7626 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7627 * vpage doesn't have PROT_WRITE. 7628 * 7629 * 7630 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7631 * 7632 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7633 * increment p_lckcnt by calling page_subclaim() which takes care of 7634 * availrmem accounting and p_lckcnt overflow. 7635 * 7636 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7637 * increment p_cowcnt by calling page_addclaim() which takes care of 7638 * availrmem availability and p_cowcnt overflow. 7639 */ 7640 7641 /* 7642 * Lock down (or unlock) pages mapped by this segment. 7643 * 7644 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7645 * At fault time they will be relocated into larger pages. 7646 */ 7647 static int 7648 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7649 int attr, int op, ulong_t *lockmap, size_t pos) 7650 { 7651 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7652 struct vpage *vpp; 7653 struct vpage *evp; 7654 page_t *pp; 7655 u_offset_t offset; 7656 u_offset_t off; 7657 int segtype; 7658 int pageprot; 7659 int claim; 7660 struct vnode *vp; 7661 ulong_t anon_index; 7662 struct anon_map *amp; 7663 struct anon *ap; 7664 struct vattr va; 7665 anon_sync_obj_t cookie; 7666 struct kshmid *sp = NULL; 7667 struct proc *p = curproc; 7668 kproject_t *proj = NULL; 7669 int chargeproc = 1; 7670 size_t locked_bytes = 0; 7671 size_t unlocked_bytes = 0; 7672 int err = 0; 7673 7674 /* 7675 * Hold write lock on address space because may split or concatenate 7676 * segments 7677 */ 7678 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7679 7680 /* 7681 * If this is a shm, use shm's project and zone, else use 7682 * project and zone of calling process 7683 */ 7684 7685 /* Determine if this segment backs a sysV shm */ 7686 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7687 ASSERT(svd->type == MAP_SHARED); 7688 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7689 sp = svd->amp->a_sp; 7690 proj = sp->shm_perm.ipc_proj; 7691 chargeproc = 0; 7692 } 7693 7694 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7695 if (attr) { 7696 pageprot = attr & ~(SHARED|PRIVATE); 7697 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7698 7699 /* 7700 * We are done if the segment types don't match 7701 * or if we have segment level protections and 7702 * they don't match. 7703 */ 7704 if (svd->type != segtype) { 7705 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7706 return (0); 7707 } 7708 if (svd->pageprot == 0 && svd->prot != pageprot) { 7709 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7710 return (0); 7711 } 7712 } 7713 7714 if (op == MC_LOCK) { 7715 if (svd->tr_state == SEGVN_TR_INIT) { 7716 svd->tr_state = SEGVN_TR_OFF; 7717 } else if (svd->tr_state == SEGVN_TR_ON) { 7718 ASSERT(svd->amp != NULL); 7719 segvn_textunrepl(seg, 0); 7720 ASSERT(svd->amp == NULL && 7721 svd->tr_state == SEGVN_TR_OFF); 7722 } 7723 } 7724 7725 /* 7726 * If we're locking, then we must create a vpage structure if 7727 * none exists. If we're unlocking, then check to see if there 7728 * is a vpage -- if not, then we could not have locked anything. 7729 */ 7730 7731 if ((vpp = svd->vpage) == NULL) { 7732 if (op == MC_LOCK) { 7733 segvn_vpage(seg); 7734 if (svd->vpage == NULL) { 7735 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7736 return (ENOMEM); 7737 } 7738 } else { 7739 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7740 return (0); 7741 } 7742 } 7743 7744 /* 7745 * The anonymous data vector (i.e., previously 7746 * unreferenced mapping to swap space) can be allocated 7747 * by lazily testing for its existence. 7748 */ 7749 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7750 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7751 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7752 svd->amp->a_szc = seg->s_szc; 7753 } 7754 7755 if ((amp = svd->amp) != NULL) { 7756 anon_index = svd->anon_index + seg_page(seg, addr); 7757 } 7758 7759 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7760 evp = &svd->vpage[seg_page(seg, addr + len)]; 7761 7762 if (sp != NULL) 7763 mutex_enter(&sp->shm_mlock); 7764 7765 /* determine number of unlocked bytes in range for lock operation */ 7766 if (op == MC_LOCK) { 7767 7768 if (sp == NULL) { 7769 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7770 vpp++) { 7771 if (!VPP_ISPPLOCK(vpp)) 7772 unlocked_bytes += PAGESIZE; 7773 } 7774 } else { 7775 ulong_t i_idx, i_edx; 7776 anon_sync_obj_t i_cookie; 7777 struct anon *i_ap; 7778 struct vnode *i_vp; 7779 u_offset_t i_off; 7780 7781 /* Only count sysV pages once for locked memory */ 7782 i_edx = svd->anon_index + seg_page(seg, addr + len); 7783 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7784 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7785 anon_array_enter(amp, i_idx, &i_cookie); 7786 i_ap = anon_get_ptr(amp->ahp, i_idx); 7787 if (i_ap == NULL) { 7788 unlocked_bytes += PAGESIZE; 7789 anon_array_exit(&i_cookie); 7790 continue; 7791 } 7792 swap_xlate(i_ap, &i_vp, &i_off); 7793 anon_array_exit(&i_cookie); 7794 pp = page_lookup(i_vp, i_off, SE_SHARED); 7795 if (pp == NULL) { 7796 unlocked_bytes += PAGESIZE; 7797 continue; 7798 } else if (pp->p_lckcnt == 0) 7799 unlocked_bytes += PAGESIZE; 7800 page_unlock(pp); 7801 } 7802 ANON_LOCK_EXIT(&->a_rwlock); 7803 } 7804 7805 mutex_enter(&p->p_lock); 7806 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7807 chargeproc); 7808 mutex_exit(&p->p_lock); 7809 7810 if (err) { 7811 if (sp != NULL) 7812 mutex_exit(&sp->shm_mlock); 7813 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7814 return (err); 7815 } 7816 } 7817 /* 7818 * Loop over all pages in the range. Process if we're locking and 7819 * page has not already been locked in this mapping; or if we're 7820 * unlocking and the page has been locked. 7821 */ 7822 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7823 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7824 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7825 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7826 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7827 7828 if (amp != NULL) 7829 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7830 /* 7831 * If this isn't a MAP_NORESERVE segment and 7832 * we're locking, allocate anon slots if they 7833 * don't exist. The page is brought in later on. 7834 */ 7835 if (op == MC_LOCK && svd->vp == NULL && 7836 ((svd->flags & MAP_NORESERVE) == 0) && 7837 amp != NULL && 7838 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7839 == NULL)) { 7840 anon_array_enter(amp, anon_index, &cookie); 7841 7842 if ((ap = anon_get_ptr(amp->ahp, 7843 anon_index)) == NULL) { 7844 pp = anon_zero(seg, addr, &ap, 7845 svd->cred); 7846 if (pp == NULL) { 7847 anon_array_exit(&cookie); 7848 ANON_LOCK_EXIT(&->a_rwlock); 7849 err = ENOMEM; 7850 goto out; 7851 } 7852 ASSERT(anon_get_ptr(amp->ahp, 7853 anon_index) == NULL); 7854 (void) anon_set_ptr(amp->ahp, 7855 anon_index, ap, ANON_SLEEP); 7856 page_unlock(pp); 7857 } 7858 anon_array_exit(&cookie); 7859 } 7860 7861 /* 7862 * Get name for page, accounting for 7863 * existence of private copy. 7864 */ 7865 ap = NULL; 7866 if (amp != NULL) { 7867 anon_array_enter(amp, anon_index, &cookie); 7868 ap = anon_get_ptr(amp->ahp, anon_index); 7869 if (ap != NULL) { 7870 swap_xlate(ap, &vp, &off); 7871 } else { 7872 if (svd->vp == NULL && 7873 (svd->flags & MAP_NORESERVE)) { 7874 anon_array_exit(&cookie); 7875 ANON_LOCK_EXIT(&->a_rwlock); 7876 continue; 7877 } 7878 vp = svd->vp; 7879 off = offset; 7880 } 7881 if (op != MC_LOCK || ap == NULL) { 7882 anon_array_exit(&cookie); 7883 ANON_LOCK_EXIT(&->a_rwlock); 7884 } 7885 } else { 7886 vp = svd->vp; 7887 off = offset; 7888 } 7889 7890 /* 7891 * Get page frame. It's ok if the page is 7892 * not available when we're unlocking, as this 7893 * may simply mean that a page we locked got 7894 * truncated out of existence after we locked it. 7895 * 7896 * Invoke VOP_GETPAGE() to obtain the page struct 7897 * since we may need to read it from disk if its 7898 * been paged out. 7899 */ 7900 if (op != MC_LOCK) 7901 pp = page_lookup(vp, off, SE_SHARED); 7902 else { 7903 page_t *pl[1 + 1]; 7904 int error; 7905 7906 ASSERT(vp != NULL); 7907 7908 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7909 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7910 S_OTHER, svd->cred, NULL); 7911 7912 if (error && ap != NULL) { 7913 anon_array_exit(&cookie); 7914 ANON_LOCK_EXIT(&->a_rwlock); 7915 } 7916 7917 /* 7918 * If the error is EDEADLK then we must bounce 7919 * up and drop all vm subsystem locks and then 7920 * retry the operation later 7921 * This behavior is a temporary measure because 7922 * ufs/sds logging is badly designed and will 7923 * deadlock if we don't allow this bounce to 7924 * happen. The real solution is to re-design 7925 * the logging code to work properly. See bug 7926 * 4125102 for details of the problem. 7927 */ 7928 if (error == EDEADLK) { 7929 err = error; 7930 goto out; 7931 } 7932 /* 7933 * Quit if we fail to fault in the page. Treat 7934 * the failure as an error, unless the addr 7935 * is mapped beyond the end of a file. 7936 */ 7937 if (error && svd->vp) { 7938 va.va_mask = AT_SIZE; 7939 if (VOP_GETATTR(svd->vp, &va, 0, 7940 svd->cred, NULL) != 0) { 7941 err = EIO; 7942 goto out; 7943 } 7944 if (btopr(va.va_size) >= 7945 btopr(off + 1)) { 7946 err = EIO; 7947 goto out; 7948 } 7949 goto out; 7950 7951 } else if (error) { 7952 err = EIO; 7953 goto out; 7954 } 7955 pp = pl[0]; 7956 ASSERT(pp != NULL); 7957 } 7958 7959 /* 7960 * See Statement at the beginning of this routine. 7961 * 7962 * claim is always set if MAP_PRIVATE and PROT_WRITE 7963 * irrespective of following factors: 7964 * 7965 * (1) anon slots are populated or not 7966 * (2) cow is broken or not 7967 * (3) refcnt on ap is 1 or greater than 1 7968 * 7969 * See 4140683 for details 7970 */ 7971 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7972 (svd->type == MAP_PRIVATE)); 7973 7974 /* 7975 * Perform page-level operation appropriate to 7976 * operation. If locking, undo the SOFTLOCK 7977 * performed to bring the page into memory 7978 * after setting the lock. If unlocking, 7979 * and no page was found, account for the claim 7980 * separately. 7981 */ 7982 if (op == MC_LOCK) { 7983 int ret = 1; /* Assume success */ 7984 7985 ASSERT(!VPP_ISPPLOCK(vpp)); 7986 7987 ret = page_pp_lock(pp, claim, 0); 7988 if (ap != NULL) { 7989 if (ap->an_pvp != NULL) { 7990 anon_swap_free(ap, pp); 7991 } 7992 anon_array_exit(&cookie); 7993 ANON_LOCK_EXIT(&->a_rwlock); 7994 } 7995 if (ret == 0) { 7996 /* locking page failed */ 7997 page_unlock(pp); 7998 err = EAGAIN; 7999 goto out; 8000 } 8001 VPP_SETPPLOCK(vpp); 8002 if (sp != NULL) { 8003 if (pp->p_lckcnt == 1) 8004 locked_bytes += PAGESIZE; 8005 } else 8006 locked_bytes += PAGESIZE; 8007 8008 if (lockmap != (ulong_t *)NULL) 8009 BT_SET(lockmap, pos); 8010 8011 page_unlock(pp); 8012 } else { 8013 ASSERT(VPP_ISPPLOCK(vpp)); 8014 if (pp != NULL) { 8015 /* sysV pages should be locked */ 8016 ASSERT(sp == NULL || pp->p_lckcnt > 0); 8017 page_pp_unlock(pp, claim, 0); 8018 if (sp != NULL) { 8019 if (pp->p_lckcnt == 0) 8020 unlocked_bytes 8021 += PAGESIZE; 8022 } else 8023 unlocked_bytes += PAGESIZE; 8024 page_unlock(pp); 8025 } else { 8026 ASSERT(sp == NULL); 8027 unlocked_bytes += PAGESIZE; 8028 } 8029 VPP_CLRPPLOCK(vpp); 8030 } 8031 } 8032 } 8033 out: 8034 if (op == MC_LOCK) { 8035 /* Credit back bytes that did not get locked */ 8036 if ((unlocked_bytes - locked_bytes) > 0) { 8037 if (proj == NULL) 8038 mutex_enter(&p->p_lock); 8039 rctl_decr_locked_mem(p, proj, 8040 (unlocked_bytes - locked_bytes), chargeproc); 8041 if (proj == NULL) 8042 mutex_exit(&p->p_lock); 8043 } 8044 8045 } else { 8046 /* Account bytes that were unlocked */ 8047 if (unlocked_bytes > 0) { 8048 if (proj == NULL) 8049 mutex_enter(&p->p_lock); 8050 rctl_decr_locked_mem(p, proj, unlocked_bytes, 8051 chargeproc); 8052 if (proj == NULL) 8053 mutex_exit(&p->p_lock); 8054 } 8055 } 8056 if (sp != NULL) 8057 mutex_exit(&sp->shm_mlock); 8058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8059 8060 return (err); 8061 } 8062 8063 /* 8064 * Set advice from user for specified pages 8065 * There are 9 types of advice: 8066 * MADV_NORMAL - Normal (default) behavior (whatever that is) 8067 * MADV_RANDOM - Random page references 8068 * do not allow readahead or 'klustering' 8069 * MADV_SEQUENTIAL - Sequential page references 8070 * Pages previous to the one currently being 8071 * accessed (determined by fault) are 'not needed' 8072 * and are freed immediately 8073 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 8074 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 8075 * MADV_FREE - Contents can be discarded 8076 * MADV_ACCESS_DEFAULT- Default access 8077 * MADV_ACCESS_LWP - Next LWP will access heavily 8078 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 8079 */ 8080 static int 8081 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8082 { 8083 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8084 size_t page; 8085 int err = 0; 8086 int already_set; 8087 struct anon_map *amp; 8088 ulong_t anon_index; 8089 struct seg *next; 8090 lgrp_mem_policy_t policy; 8091 struct seg *prev; 8092 struct vnode *vp; 8093 8094 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8095 8096 /* 8097 * In case of MADV_FREE, we won't be modifying any segment private 8098 * data structures; so, we only need to grab READER's lock 8099 */ 8100 if (behav != MADV_FREE) { 8101 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8102 if (svd->tr_state != SEGVN_TR_OFF) { 8103 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8104 return (0); 8105 } 8106 } else { 8107 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8108 } 8109 8110 /* 8111 * Large pages are assumed to be only turned on when accesses to the 8112 * segment's address range have spatial and temporal locality. That 8113 * justifies ignoring MADV_SEQUENTIAL for large page segments. 8114 * Also, ignore advice affecting lgroup memory allocation 8115 * if don't need to do lgroup optimizations on this system 8116 */ 8117 8118 if ((behav == MADV_SEQUENTIAL && 8119 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 8120 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 8121 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 8122 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8123 return (0); 8124 } 8125 8126 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 8127 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 8128 /* 8129 * Since we are going to unload hat mappings 8130 * we first have to flush the cache. Otherwise 8131 * this might lead to system panic if another 8132 * thread is doing physio on the range whose 8133 * mappings are unloaded by madvise(3C). 8134 */ 8135 if (svd->softlockcnt > 0) { 8136 /* 8137 * If this is shared segment non 0 softlockcnt 8138 * means locked pages are still in use. 8139 */ 8140 if (svd->type == MAP_SHARED) { 8141 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8142 return (EAGAIN); 8143 } 8144 /* 8145 * Since we do have the segvn writers lock 8146 * nobody can fill the cache with entries 8147 * belonging to this seg during the purge. 8148 * The flush either succeeds or we still 8149 * have pending I/Os. In the later case, 8150 * madvise(3C) fails. 8151 */ 8152 segvn_purge(seg); 8153 if (svd->softlockcnt > 0) { 8154 /* 8155 * Since madvise(3C) is advisory and 8156 * it's not part of UNIX98, madvise(3C) 8157 * failure here doesn't cause any hardship. 8158 * Note that we don't block in "as" layer. 8159 */ 8160 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8161 return (EAGAIN); 8162 } 8163 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 8164 svd->amp->a_softlockcnt > 0) { 8165 /* 8166 * Try to purge this amp's entries from pcache. It 8167 * will succeed only if other segments that share the 8168 * amp have no outstanding softlock's. 8169 */ 8170 segvn_purge(seg); 8171 } 8172 } 8173 8174 amp = svd->amp; 8175 vp = svd->vp; 8176 if (behav == MADV_FREE) { 8177 /* 8178 * MADV_FREE is not supported for segments with 8179 * underlying object; if anonmap is NULL, anon slots 8180 * are not yet populated and there is nothing for 8181 * us to do. As MADV_FREE is advisory, we don't 8182 * return error in either case. 8183 */ 8184 if (vp != NULL || amp == NULL) { 8185 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8186 return (0); 8187 } 8188 8189 segvn_purge(seg); 8190 8191 page = seg_page(seg, addr); 8192 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8193 anon_disclaim(amp, svd->anon_index + page, len); 8194 ANON_LOCK_EXIT(&->a_rwlock); 8195 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8196 return (0); 8197 } 8198 8199 /* 8200 * If advice is to be applied to entire segment, 8201 * use advice field in seg_data structure 8202 * otherwise use appropriate vpage entry. 8203 */ 8204 if ((addr == seg->s_base) && (len == seg->s_size)) { 8205 switch (behav) { 8206 case MADV_ACCESS_LWP: 8207 case MADV_ACCESS_MANY: 8208 case MADV_ACCESS_DEFAULT: 8209 /* 8210 * Set memory allocation policy for this segment 8211 */ 8212 policy = lgrp_madv_to_policy(behav, len, svd->type); 8213 if (svd->type == MAP_SHARED) 8214 already_set = lgrp_shm_policy_set(policy, amp, 8215 svd->anon_index, vp, svd->offset, len); 8216 else { 8217 /* 8218 * For private memory, need writers lock on 8219 * address space because the segment may be 8220 * split or concatenated when changing policy 8221 */ 8222 if (AS_READ_HELD(seg->s_as, 8223 &seg->s_as->a_lock)) { 8224 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8225 return (IE_RETRY); 8226 } 8227 8228 already_set = lgrp_privm_policy_set(policy, 8229 &svd->policy_info, len); 8230 } 8231 8232 /* 8233 * If policy set already and it shouldn't be reapplied, 8234 * don't do anything. 8235 */ 8236 if (already_set && 8237 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8238 break; 8239 8240 /* 8241 * Mark any existing pages in given range for 8242 * migration 8243 */ 8244 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8245 vp, svd->offset, 1); 8246 8247 /* 8248 * If same policy set already or this is a shared 8249 * memory segment, don't need to try to concatenate 8250 * segment with adjacent ones. 8251 */ 8252 if (already_set || svd->type == MAP_SHARED) 8253 break; 8254 8255 /* 8256 * Try to concatenate this segment with previous 8257 * one and next one, since we changed policy for 8258 * this one and it may be compatible with adjacent 8259 * ones now. 8260 */ 8261 prev = AS_SEGPREV(seg->s_as, seg); 8262 next = AS_SEGNEXT(seg->s_as, seg); 8263 8264 if (next && next->s_ops == &segvn_ops && 8265 addr + len == next->s_base) 8266 (void) segvn_concat(seg, next, 1); 8267 8268 if (prev && prev->s_ops == &segvn_ops && 8269 addr == prev->s_base + prev->s_size) { 8270 /* 8271 * Drop lock for private data of current 8272 * segment before concatenating (deleting) it 8273 * and return IE_REATTACH to tell as_ctl() that 8274 * current segment has changed 8275 */ 8276 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8277 if (!segvn_concat(prev, seg, 1)) 8278 err = IE_REATTACH; 8279 8280 return (err); 8281 } 8282 break; 8283 8284 case MADV_SEQUENTIAL: 8285 /* 8286 * unloading mapping guarantees 8287 * detection in segvn_fault 8288 */ 8289 ASSERT(seg->s_szc == 0); 8290 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8291 hat_unload(seg->s_as->a_hat, addr, len, 8292 HAT_UNLOAD); 8293 /* FALLTHROUGH */ 8294 case MADV_NORMAL: 8295 case MADV_RANDOM: 8296 svd->advice = (uchar_t)behav; 8297 svd->pageadvice = 0; 8298 break; 8299 case MADV_WILLNEED: /* handled in memcntl */ 8300 case MADV_DONTNEED: /* handled in memcntl */ 8301 case MADV_FREE: /* handled above */ 8302 break; 8303 default: 8304 err = EINVAL; 8305 } 8306 } else { 8307 caddr_t eaddr; 8308 struct seg *new_seg; 8309 struct segvn_data *new_svd; 8310 u_offset_t off; 8311 caddr_t oldeaddr; 8312 8313 page = seg_page(seg, addr); 8314 8315 segvn_vpage(seg); 8316 if (svd->vpage == NULL) { 8317 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8318 return (ENOMEM); 8319 } 8320 8321 switch (behav) { 8322 struct vpage *bvpp, *evpp; 8323 8324 case MADV_ACCESS_LWP: 8325 case MADV_ACCESS_MANY: 8326 case MADV_ACCESS_DEFAULT: 8327 /* 8328 * Set memory allocation policy for portion of this 8329 * segment 8330 */ 8331 8332 /* 8333 * Align address and length of advice to page 8334 * boundaries for large pages 8335 */ 8336 if (seg->s_szc != 0) { 8337 size_t pgsz; 8338 8339 pgsz = page_get_pagesize(seg->s_szc); 8340 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8341 len = P2ROUNDUP(len, pgsz); 8342 } 8343 8344 /* 8345 * Check to see whether policy is set already 8346 */ 8347 policy = lgrp_madv_to_policy(behav, len, svd->type); 8348 8349 anon_index = svd->anon_index + page; 8350 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8351 8352 if (svd->type == MAP_SHARED) 8353 already_set = lgrp_shm_policy_set(policy, amp, 8354 anon_index, vp, off, len); 8355 else 8356 already_set = 8357 (policy == svd->policy_info.mem_policy); 8358 8359 /* 8360 * If policy set already and it shouldn't be reapplied, 8361 * don't do anything. 8362 */ 8363 if (already_set && 8364 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8365 break; 8366 8367 /* 8368 * For private memory, need writers lock on 8369 * address space because the segment may be 8370 * split or concatenated when changing policy 8371 */ 8372 if (svd->type == MAP_PRIVATE && 8373 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8374 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8375 return (IE_RETRY); 8376 } 8377 8378 /* 8379 * Mark any existing pages in given range for 8380 * migration 8381 */ 8382 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8383 vp, svd->offset, 1); 8384 8385 /* 8386 * Don't need to try to split or concatenate 8387 * segments, since policy is same or this is a shared 8388 * memory segment 8389 */ 8390 if (already_set || svd->type == MAP_SHARED) 8391 break; 8392 8393 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8394 ASSERT(svd->amp == NULL); 8395 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8396 ASSERT(svd->softlockcnt == 0); 8397 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8398 HAT_REGION_TEXT); 8399 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8400 } 8401 8402 /* 8403 * Split off new segment if advice only applies to a 8404 * portion of existing segment starting in middle 8405 */ 8406 new_seg = NULL; 8407 eaddr = addr + len; 8408 oldeaddr = seg->s_base + seg->s_size; 8409 if (addr > seg->s_base) { 8410 /* 8411 * Must flush I/O page cache 8412 * before splitting segment 8413 */ 8414 if (svd->softlockcnt > 0) 8415 segvn_purge(seg); 8416 8417 /* 8418 * Split segment and return IE_REATTACH to tell 8419 * as_ctl() that current segment changed 8420 */ 8421 new_seg = segvn_split_seg(seg, addr); 8422 new_svd = (struct segvn_data *)new_seg->s_data; 8423 err = IE_REATTACH; 8424 8425 /* 8426 * If new segment ends where old one 8427 * did, try to concatenate the new 8428 * segment with next one. 8429 */ 8430 if (eaddr == oldeaddr) { 8431 /* 8432 * Set policy for new segment 8433 */ 8434 (void) lgrp_privm_policy_set(policy, 8435 &new_svd->policy_info, 8436 new_seg->s_size); 8437 8438 next = AS_SEGNEXT(new_seg->s_as, 8439 new_seg); 8440 8441 if (next && 8442 next->s_ops == &segvn_ops && 8443 eaddr == next->s_base) 8444 (void) segvn_concat(new_seg, 8445 next, 1); 8446 } 8447 } 8448 8449 /* 8450 * Split off end of existing segment if advice only 8451 * applies to a portion of segment ending before 8452 * end of the existing segment 8453 */ 8454 if (eaddr < oldeaddr) { 8455 /* 8456 * Must flush I/O page cache 8457 * before splitting segment 8458 */ 8459 if (svd->softlockcnt > 0) 8460 segvn_purge(seg); 8461 8462 /* 8463 * If beginning of old segment was already 8464 * split off, use new segment to split end off 8465 * from. 8466 */ 8467 if (new_seg != NULL && new_seg != seg) { 8468 /* 8469 * Split segment 8470 */ 8471 (void) segvn_split_seg(new_seg, eaddr); 8472 8473 /* 8474 * Set policy for new segment 8475 */ 8476 (void) lgrp_privm_policy_set(policy, 8477 &new_svd->policy_info, 8478 new_seg->s_size); 8479 } else { 8480 /* 8481 * Split segment and return IE_REATTACH 8482 * to tell as_ctl() that current 8483 * segment changed 8484 */ 8485 (void) segvn_split_seg(seg, eaddr); 8486 err = IE_REATTACH; 8487 8488 (void) lgrp_privm_policy_set(policy, 8489 &svd->policy_info, seg->s_size); 8490 8491 /* 8492 * If new segment starts where old one 8493 * did, try to concatenate it with 8494 * previous segment. 8495 */ 8496 if (addr == seg->s_base) { 8497 prev = AS_SEGPREV(seg->s_as, 8498 seg); 8499 8500 /* 8501 * Drop lock for private data 8502 * of current segment before 8503 * concatenating (deleting) it 8504 */ 8505 if (prev && 8506 prev->s_ops == 8507 &segvn_ops && 8508 addr == prev->s_base + 8509 prev->s_size) { 8510 SEGVN_LOCK_EXIT( 8511 seg->s_as, 8512 &svd->lock); 8513 (void) segvn_concat( 8514 prev, seg, 1); 8515 return (err); 8516 } 8517 } 8518 } 8519 } 8520 break; 8521 case MADV_SEQUENTIAL: 8522 ASSERT(seg->s_szc == 0); 8523 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8524 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8525 /* FALLTHROUGH */ 8526 case MADV_NORMAL: 8527 case MADV_RANDOM: 8528 bvpp = &svd->vpage[page]; 8529 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8530 for (; bvpp < evpp; bvpp++) 8531 VPP_SETADVICE(bvpp, behav); 8532 svd->advice = MADV_NORMAL; 8533 break; 8534 case MADV_WILLNEED: /* handled in memcntl */ 8535 case MADV_DONTNEED: /* handled in memcntl */ 8536 case MADV_FREE: /* handled above */ 8537 break; 8538 default: 8539 err = EINVAL; 8540 } 8541 } 8542 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8543 return (err); 8544 } 8545 8546 /* 8547 * There is one kind of inheritance that can be specified for pages: 8548 * 8549 * SEGP_INH_ZERO - Pages should be zeroed in the child 8550 */ 8551 static int 8552 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8553 { 8554 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8555 struct vpage *bvpp, *evpp; 8556 size_t page; 8557 int ret = 0; 8558 8559 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8560 8561 /* Can't support something we don't know about */ 8562 if (behav != SEGP_INH_ZERO) 8563 return (ENOTSUP); 8564 8565 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8566 8567 /* 8568 * This must be a straightforward anonymous segment that is mapped 8569 * privately and is not backed by a vnode. 8570 */ 8571 if (svd->tr_state != SEGVN_TR_OFF || 8572 svd->type != MAP_PRIVATE || 8573 svd->vp != NULL) { 8574 ret = EINVAL; 8575 goto out; 8576 } 8577 8578 /* 8579 * If the entire segment has been marked as inherit zero, then no reason 8580 * to do anything else. 8581 */ 8582 if (svd->svn_inz == SEGVN_INZ_ALL) { 8583 ret = 0; 8584 goto out; 8585 } 8586 8587 /* 8588 * If this applies to the entire segment, simply mark it and we're done. 8589 */ 8590 if ((addr == seg->s_base) && (len == seg->s_size)) { 8591 svd->svn_inz = SEGVN_INZ_ALL; 8592 ret = 0; 8593 goto out; 8594 } 8595 8596 /* 8597 * We've been asked to mark a subset of this segment as inherit zero, 8598 * therefore we need to mainpulate its vpages. 8599 */ 8600 if (svd->vpage == NULL) { 8601 segvn_vpage(seg); 8602 if (svd->vpage == NULL) { 8603 ret = ENOMEM; 8604 goto out; 8605 } 8606 } 8607 8608 svd->svn_inz = SEGVN_INZ_VPP; 8609 page = seg_page(seg, addr); 8610 bvpp = &svd->vpage[page]; 8611 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8612 for (; bvpp < evpp; bvpp++) 8613 VPP_SETINHZERO(bvpp); 8614 ret = 0; 8615 8616 out: 8617 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8618 return (ret); 8619 } 8620 8621 /* 8622 * Create a vpage structure for this seg. 8623 */ 8624 static void 8625 segvn_vpage(struct seg *seg) 8626 { 8627 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8628 struct vpage *vp, *evp; 8629 static pgcnt_t page_limit = 0; 8630 8631 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8632 8633 /* 8634 * If no vpage structure exists, allocate one. Copy the protections 8635 * and the advice from the segment itself to the individual pages. 8636 */ 8637 if (svd->vpage == NULL) { 8638 /* 8639 * Start by calculating the number of pages we must allocate to 8640 * track the per-page vpage structs needs for this entire 8641 * segment. If we know now that it will require more than our 8642 * heuristic for the maximum amount of kmem we can consume then 8643 * fail. We do this here, instead of trying to detect this deep 8644 * in page_resv and propagating the error up, since the entire 8645 * memory allocation stack is not amenable to passing this 8646 * back. Instead, it wants to keep trying. 8647 * 8648 * As a heuristic we set a page limit of 5/8s of total_pages 8649 * for this allocation. We use shifts so that no floating 8650 * point conversion takes place and only need to do the 8651 * calculation once. 8652 */ 8653 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage); 8654 pgcnt_t npages = mem_needed >> PAGESHIFT; 8655 8656 if (page_limit == 0) 8657 page_limit = (total_pages >> 1) + (total_pages >> 3); 8658 8659 if (npages > page_limit) 8660 return; 8661 8662 svd->pageadvice = 1; 8663 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP); 8664 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8665 for (vp = svd->vpage; vp < evp; vp++) { 8666 VPP_SETPROT(vp, svd->prot); 8667 VPP_SETADVICE(vp, svd->advice); 8668 } 8669 } 8670 } 8671 8672 /* 8673 * Dump the pages belonging to this segvn segment. 8674 */ 8675 static void 8676 segvn_dump(struct seg *seg) 8677 { 8678 struct segvn_data *svd; 8679 page_t *pp; 8680 struct anon_map *amp; 8681 ulong_t anon_index; 8682 struct vnode *vp; 8683 u_offset_t off, offset; 8684 pfn_t pfn; 8685 pgcnt_t page, npages; 8686 caddr_t addr; 8687 8688 npages = seg_pages(seg); 8689 svd = (struct segvn_data *)seg->s_data; 8690 vp = svd->vp; 8691 off = offset = svd->offset; 8692 addr = seg->s_base; 8693 8694 if ((amp = svd->amp) != NULL) { 8695 anon_index = svd->anon_index; 8696 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8697 } 8698 8699 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8700 struct anon *ap; 8701 int we_own_it = 0; 8702 8703 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8704 swap_xlate_nopanic(ap, &vp, &off); 8705 } else { 8706 vp = svd->vp; 8707 off = offset; 8708 } 8709 8710 /* 8711 * If pp == NULL, the page either does not exist 8712 * or is exclusively locked. So determine if it 8713 * exists before searching for it. 8714 */ 8715 8716 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8717 we_own_it = 1; 8718 else 8719 pp = page_exists(vp, off); 8720 8721 if (pp) { 8722 pfn = page_pptonum(pp); 8723 dump_addpage(seg->s_as, addr, pfn); 8724 if (we_own_it) 8725 page_unlock(pp); 8726 } 8727 addr += PAGESIZE; 8728 dump_timeleft = dump_timeout; 8729 } 8730 8731 if (amp != NULL) 8732 ANON_LOCK_EXIT(&->a_rwlock); 8733 } 8734 8735 #ifdef DEBUG 8736 static uint32_t segvn_pglock_mtbf = 0; 8737 #endif 8738 8739 #define PCACHE_SHWLIST ((page_t *)-2) 8740 #define NOPCACHE_SHWLIST ((page_t *)-1) 8741 8742 /* 8743 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8744 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8745 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8746 * the same parts of the segment. Currently shadow list creation is only 8747 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8748 * tagged with segment pointer, starting virtual address and length. This 8749 * approach for MAP_SHARED segments may add many pcache entries for the same 8750 * set of pages and lead to long hash chains that decrease pcache lookup 8751 * performance. To avoid this issue for shared segments shared anon map and 8752 * starting anon index are used for pcache entry tagging. This allows all 8753 * segments to share pcache entries for the same anon range and reduces pcache 8754 * chain's length as well as memory overhead from duplicate shadow lists and 8755 * pcache entries. 8756 * 8757 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8758 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8759 * part of softlockcnt accounting is done differently for private and shared 8760 * segments. In private segment case softlock is only incremented when a new 8761 * shadow list is created but not when an existing one is found via 8762 * seg_plookup(). pcache entries have reference count incremented/decremented 8763 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8764 * reference count can be purged (and purging is needed before segment can be 8765 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8766 * decrement softlockcnt. Since in private segment case each of its pcache 8767 * entries only belongs to this segment we can expect that when 8768 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8769 * segment purge will succeed and softlockcnt will drop to 0. In shared 8770 * segment case reference count in pcache entry counts active locks from many 8771 * different segments so we can't expect segment purging to succeed even when 8772 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8773 * segment. To be able to determine when there're no pending pagelocks in 8774 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8775 * but instead softlockcnt is incremented and decremented for every 8776 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8777 * list was created or an existing one was found. When softlockcnt drops to 0 8778 * this segment no longer has any claims for pcached shadow lists and the 8779 * segment can be freed even if there're still active pcache entries 8780 * shared by this segment anon map. Shared segment pcache entries belong to 8781 * anon map and are typically removed when anon map is freed after all 8782 * processes destroy the segments that use this anon map. 8783 */ 8784 static int 8785 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8786 enum lock_type type, enum seg_rw rw) 8787 { 8788 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8789 size_t np; 8790 pgcnt_t adjustpages; 8791 pgcnt_t npages; 8792 ulong_t anon_index; 8793 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8794 uint_t error; 8795 struct anon_map *amp; 8796 pgcnt_t anpgcnt; 8797 struct page **pplist, **pl, *pp; 8798 caddr_t a; 8799 size_t page; 8800 caddr_t lpgaddr, lpgeaddr; 8801 anon_sync_obj_t cookie; 8802 int anlock; 8803 struct anon_map *pamp; 8804 caddr_t paddr; 8805 seg_preclaim_cbfunc_t preclaim_callback; 8806 size_t pgsz; 8807 int use_pcache; 8808 size_t wlen; 8809 uint_t pflags = 0; 8810 int sftlck_sbase = 0; 8811 int sftlck_send = 0; 8812 8813 #ifdef DEBUG 8814 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8815 hrtime_t ts = gethrtime(); 8816 if ((ts % segvn_pglock_mtbf) == 0) { 8817 return (ENOTSUP); 8818 } 8819 if ((ts % segvn_pglock_mtbf) == 1) { 8820 return (EFAULT); 8821 } 8822 } 8823 #endif 8824 8825 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8826 "segvn_pagelock: start seg %p addr %p", seg, addr); 8827 8828 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8829 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8830 8831 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8832 8833 /* 8834 * for now we only support pagelock to anon memory. We would have to 8835 * check protections for vnode objects and call into the vnode driver. 8836 * That's too much for a fast path. Let the fault entry point handle 8837 * it. 8838 */ 8839 if (svd->vp != NULL) { 8840 if (type == L_PAGELOCK) { 8841 error = ENOTSUP; 8842 goto out; 8843 } 8844 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8845 } 8846 if ((amp = svd->amp) == NULL) { 8847 if (type == L_PAGELOCK) { 8848 error = EFAULT; 8849 goto out; 8850 } 8851 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8852 } 8853 if (rw != S_READ && rw != S_WRITE) { 8854 if (type == L_PAGELOCK) { 8855 error = ENOTSUP; 8856 goto out; 8857 } 8858 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8859 } 8860 8861 if (seg->s_szc != 0) { 8862 /* 8863 * We are adjusting the pagelock region to the large page size 8864 * boundary because the unlocked part of a large page cannot 8865 * be freed anyway unless all constituent pages of a large 8866 * page are locked. Bigger regions reduce pcache chain length 8867 * and improve lookup performance. The tradeoff is that the 8868 * very first segvn_pagelock() call for a given page is more 8869 * expensive if only 1 page_t is needed for IO. This is only 8870 * an issue if pcache entry doesn't get reused by several 8871 * subsequent calls. We optimize here for the case when pcache 8872 * is heavily used by repeated IOs to the same address range. 8873 * 8874 * Note segment's page size cannot change while we are holding 8875 * as lock. And then it cannot change while softlockcnt is 8876 * not 0. This will allow us to correctly recalculate large 8877 * page size region for the matching pageunlock/reclaim call 8878 * since as_pageunlock() caller must always match 8879 * as_pagelock() call's addr and len. 8880 * 8881 * For pageunlock *ppp points to the pointer of page_t that 8882 * corresponds to the real unadjusted start address. Similar 8883 * for pagelock *ppp must point to the pointer of page_t that 8884 * corresponds to the real unadjusted start address. 8885 */ 8886 pgsz = page_get_pagesize(seg->s_szc); 8887 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8888 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8889 } else if (len < segvn_pglock_comb_thrshld) { 8890 lpgaddr = addr; 8891 lpgeaddr = addr + len; 8892 adjustpages = 0; 8893 pgsz = PAGESIZE; 8894 } else { 8895 /* 8896 * Align the address range of large enough requests to allow 8897 * combining of different shadow lists into 1 to reduce memory 8898 * overhead from potentially overlapping large shadow lists 8899 * (worst case is we have a 1MB IO into buffers with start 8900 * addresses separated by 4K). Alignment is only possible if 8901 * padded chunks have sufficient access permissions. Note 8902 * permissions won't change between L_PAGELOCK and 8903 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8904 * segvn_setprot() to wait until softlockcnt drops to 0. This 8905 * allows us to determine in L_PAGEUNLOCK the same range we 8906 * computed in L_PAGELOCK. 8907 * 8908 * If alignment is limited by segment ends set 8909 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8910 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8911 * per segment counters. In L_PAGEUNLOCK case decrease 8912 * softlockcnt_sbase/softlockcnt_send counters if 8913 * sftlck_sbase/sftlck_send flags are set. When 8914 * softlockcnt_sbase/softlockcnt_send are non 0 8915 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8916 * won't merge the segments. This restriction combined with 8917 * restriction on segment unmapping and splitting for segments 8918 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8919 * correctly determine the same range that was previously 8920 * locked by matching L_PAGELOCK. 8921 */ 8922 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8923 pgsz = PAGESIZE; 8924 if (svd->type == MAP_PRIVATE) { 8925 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8926 segvn_pglock_comb_balign); 8927 if (lpgaddr < seg->s_base) { 8928 lpgaddr = seg->s_base; 8929 sftlck_sbase = 1; 8930 } 8931 } else { 8932 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8933 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8934 if (aaix < svd->anon_index) { 8935 lpgaddr = seg->s_base; 8936 sftlck_sbase = 1; 8937 } else { 8938 lpgaddr = addr - ptob(aix - aaix); 8939 ASSERT(lpgaddr >= seg->s_base); 8940 } 8941 } 8942 if (svd->pageprot && lpgaddr != addr) { 8943 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8944 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8945 while (vp < evp) { 8946 if ((VPP_PROT(vp) & protchk) == 0) { 8947 break; 8948 } 8949 vp++; 8950 } 8951 if (vp < evp) { 8952 lpgaddr = addr; 8953 pflags = 0; 8954 } 8955 } 8956 lpgeaddr = addr + len; 8957 if (pflags) { 8958 if (svd->type == MAP_PRIVATE) { 8959 lpgeaddr = (caddr_t)P2ROUNDUP( 8960 (uintptr_t)lpgeaddr, 8961 segvn_pglock_comb_balign); 8962 } else { 8963 ulong_t aix = svd->anon_index + 8964 seg_page(seg, lpgeaddr); 8965 ulong_t aaix = P2ROUNDUP(aix, 8966 segvn_pglock_comb_palign); 8967 if (aaix < aix) { 8968 lpgeaddr = 0; 8969 } else { 8970 lpgeaddr += ptob(aaix - aix); 8971 } 8972 } 8973 if (lpgeaddr == 0 || 8974 lpgeaddr > seg->s_base + seg->s_size) { 8975 lpgeaddr = seg->s_base + seg->s_size; 8976 sftlck_send = 1; 8977 } 8978 } 8979 if (svd->pageprot && lpgeaddr != addr + len) { 8980 struct vpage *vp; 8981 struct vpage *evp; 8982 8983 vp = &svd->vpage[seg_page(seg, addr + len)]; 8984 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 8985 8986 while (vp < evp) { 8987 if ((VPP_PROT(vp) & protchk) == 0) { 8988 break; 8989 } 8990 vp++; 8991 } 8992 if (vp < evp) { 8993 lpgeaddr = addr + len; 8994 } 8995 } 8996 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8997 } 8998 8999 /* 9000 * For MAP_SHARED segments we create pcache entries tagged by amp and 9001 * anon index so that we can share pcache entries with other segments 9002 * that map this amp. For private segments pcache entries are tagged 9003 * with segment and virtual address. 9004 */ 9005 if (svd->type == MAP_SHARED) { 9006 pamp = amp; 9007 paddr = (caddr_t)((lpgaddr - seg->s_base) + 9008 ptob(svd->anon_index)); 9009 preclaim_callback = shamp_reclaim; 9010 } else { 9011 pamp = NULL; 9012 paddr = lpgaddr; 9013 preclaim_callback = segvn_reclaim; 9014 } 9015 9016 if (type == L_PAGEUNLOCK) { 9017 VM_STAT_ADD(segvnvmstats.pagelock[0]); 9018 9019 /* 9020 * update hat ref bits for /proc. We need to make sure 9021 * that threads tracing the ref and mod bits of the 9022 * address space get the right data. 9023 * Note: page ref and mod bits are updated at reclaim time 9024 */ 9025 if (seg->s_as->a_vbits) { 9026 for (a = addr; a < addr + len; a += PAGESIZE) { 9027 if (rw == S_WRITE) { 9028 hat_setstat(seg->s_as, a, 9029 PAGESIZE, P_REF | P_MOD); 9030 } else { 9031 hat_setstat(seg->s_as, a, 9032 PAGESIZE, P_REF); 9033 } 9034 } 9035 } 9036 9037 /* 9038 * Check the shadow list entry after the last page used in 9039 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 9040 * was not inserted into pcache and is not large page 9041 * adjusted. In this case call reclaim callback directly and 9042 * don't adjust the shadow list start and size for large 9043 * pages. 9044 */ 9045 npages = btop(len); 9046 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 9047 void *ptag; 9048 if (pamp != NULL) { 9049 ASSERT(svd->type == MAP_SHARED); 9050 ptag = (void *)pamp; 9051 paddr = (caddr_t)((addr - seg->s_base) + 9052 ptob(svd->anon_index)); 9053 } else { 9054 ptag = (void *)seg; 9055 paddr = addr; 9056 } 9057 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0); 9058 } else { 9059 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 9060 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 9061 len = lpgeaddr - lpgaddr; 9062 npages = btop(len); 9063 seg_pinactive(seg, pamp, paddr, len, 9064 *ppp - adjustpages, rw, pflags, preclaim_callback); 9065 } 9066 9067 if (pamp != NULL) { 9068 ASSERT(svd->type == MAP_SHARED); 9069 ASSERT(svd->softlockcnt >= npages); 9070 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 9071 } 9072 9073 if (sftlck_sbase) { 9074 ASSERT(svd->softlockcnt_sbase > 0); 9075 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase); 9076 } 9077 if (sftlck_send) { 9078 ASSERT(svd->softlockcnt_send > 0); 9079 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send); 9080 } 9081 9082 /* 9083 * If someone is blocked while unmapping, we purge 9084 * segment page cache and thus reclaim pplist synchronously 9085 * without waiting for seg_pasync_thread. This speeds up 9086 * unmapping in cases where munmap(2) is called, while 9087 * raw async i/o is still in progress or where a thread 9088 * exits on data fault in a multithreaded application. 9089 */ 9090 if (AS_ISUNMAPWAIT(seg->s_as)) { 9091 if (svd->softlockcnt == 0) { 9092 mutex_enter(&seg->s_as->a_contents); 9093 if (AS_ISUNMAPWAIT(seg->s_as)) { 9094 AS_CLRUNMAPWAIT(seg->s_as); 9095 cv_broadcast(&seg->s_as->a_cv); 9096 } 9097 mutex_exit(&seg->s_as->a_contents); 9098 } else if (pamp == NULL) { 9099 /* 9100 * softlockcnt is not 0 and this is a 9101 * MAP_PRIVATE segment. Try to purge its 9102 * pcache entries to reduce softlockcnt. 9103 * If it drops to 0 segvn_reclaim() 9104 * will wake up a thread waiting on 9105 * unmapwait flag. 9106 * 9107 * We don't purge MAP_SHARED segments with non 9108 * 0 softlockcnt since IO is still in progress 9109 * for such segments. 9110 */ 9111 ASSERT(svd->type == MAP_PRIVATE); 9112 segvn_purge(seg); 9113 } 9114 } 9115 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9116 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 9117 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 9118 return (0); 9119 } 9120 9121 /* The L_PAGELOCK case ... */ 9122 9123 VM_STAT_ADD(segvnvmstats.pagelock[1]); 9124 9125 /* 9126 * For MAP_SHARED segments we have to check protections before 9127 * seg_plookup() since pcache entries may be shared by many segments 9128 * with potentially different page protections. 9129 */ 9130 if (pamp != NULL) { 9131 ASSERT(svd->type == MAP_SHARED); 9132 if (svd->pageprot == 0) { 9133 if ((svd->prot & protchk) == 0) { 9134 error = EACCES; 9135 goto out; 9136 } 9137 } else { 9138 /* 9139 * check page protections 9140 */ 9141 caddr_t ea; 9142 9143 if (seg->s_szc) { 9144 a = lpgaddr; 9145 ea = lpgeaddr; 9146 } else { 9147 a = addr; 9148 ea = addr + len; 9149 } 9150 for (; a < ea; a += pgsz) { 9151 struct vpage *vp; 9152 9153 ASSERT(seg->s_szc == 0 || 9154 sameprot(seg, a, pgsz)); 9155 vp = &svd->vpage[seg_page(seg, a)]; 9156 if ((VPP_PROT(vp) & protchk) == 0) { 9157 error = EACCES; 9158 goto out; 9159 } 9160 } 9161 } 9162 } 9163 9164 /* 9165 * try to find pages in segment page cache 9166 */ 9167 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 9168 if (pplist != NULL) { 9169 if (pamp != NULL) { 9170 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 9171 ASSERT(svd->type == MAP_SHARED); 9172 atomic_add_long((ulong_t *)&svd->softlockcnt, 9173 npages); 9174 } 9175 if (sftlck_sbase) { 9176 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9177 } 9178 if (sftlck_send) { 9179 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9180 } 9181 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9182 *ppp = pplist + adjustpages; 9183 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 9184 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 9185 return (0); 9186 } 9187 9188 /* 9189 * For MAP_SHARED segments we already verified above that segment 9190 * protections allow this pagelock operation. 9191 */ 9192 if (pamp == NULL) { 9193 ASSERT(svd->type == MAP_PRIVATE); 9194 if (svd->pageprot == 0) { 9195 if ((svd->prot & protchk) == 0) { 9196 error = EACCES; 9197 goto out; 9198 } 9199 if (svd->prot & PROT_WRITE) { 9200 wlen = lpgeaddr - lpgaddr; 9201 } else { 9202 wlen = 0; 9203 ASSERT(rw == S_READ); 9204 } 9205 } else { 9206 int wcont = 1; 9207 /* 9208 * check page protections 9209 */ 9210 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 9211 struct vpage *vp; 9212 9213 ASSERT(seg->s_szc == 0 || 9214 sameprot(seg, a, pgsz)); 9215 vp = &svd->vpage[seg_page(seg, a)]; 9216 if ((VPP_PROT(vp) & protchk) == 0) { 9217 error = EACCES; 9218 goto out; 9219 } 9220 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 9221 wlen += pgsz; 9222 } else { 9223 wcont = 0; 9224 ASSERT(rw == S_READ); 9225 } 9226 } 9227 } 9228 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 9229 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 9230 } 9231 9232 /* 9233 * Only build large page adjusted shadow list if we expect to insert 9234 * it into pcache. For large enough pages it's a big overhead to 9235 * create a shadow list of the entire large page. But this overhead 9236 * should be amortized over repeated pcache hits on subsequent reuse 9237 * of this shadow list (IO into any range within this shadow list will 9238 * find it in pcache since we large page align the request for pcache 9239 * lookups). pcache performance is improved with bigger shadow lists 9240 * as it reduces the time to pcache the entire big segment and reduces 9241 * pcache chain length. 9242 */ 9243 if (seg_pinsert_check(seg, pamp, paddr, 9244 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 9245 addr = lpgaddr; 9246 len = lpgeaddr - lpgaddr; 9247 use_pcache = 1; 9248 } else { 9249 use_pcache = 0; 9250 /* 9251 * Since this entry will not be inserted into the pcache, we 9252 * will not do any adjustments to the starting address or 9253 * size of the memory to be locked. 9254 */ 9255 adjustpages = 0; 9256 } 9257 npages = btop(len); 9258 9259 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 9260 pl = pplist; 9261 *ppp = pplist + adjustpages; 9262 /* 9263 * If use_pcache is 0 this shadow list is not large page adjusted. 9264 * Record this info in the last entry of shadow array so that 9265 * L_PAGEUNLOCK can determine if it should large page adjust the 9266 * address range to find the real range that was locked. 9267 */ 9268 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 9269 9270 page = seg_page(seg, addr); 9271 anon_index = svd->anon_index + page; 9272 9273 anlock = 0; 9274 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9275 ASSERT(amp->a_szc >= seg->s_szc); 9276 anpgcnt = page_get_pagecnt(amp->a_szc); 9277 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 9278 struct anon *ap; 9279 struct vnode *vp; 9280 u_offset_t off; 9281 9282 /* 9283 * Lock and unlock anon array only once per large page. 9284 * anon_array_enter() locks the root anon slot according to 9285 * a_szc which can't change while anon map is locked. We lock 9286 * anon the first time through this loop and each time we 9287 * reach anon index that corresponds to a root of a large 9288 * page. 9289 */ 9290 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 9291 ASSERT(anlock == 0); 9292 anon_array_enter(amp, anon_index, &cookie); 9293 anlock = 1; 9294 } 9295 ap = anon_get_ptr(amp->ahp, anon_index); 9296 9297 /* 9298 * We must never use seg_pcache for COW pages 9299 * because we might end up with original page still 9300 * lying in seg_pcache even after private page is 9301 * created. This leads to data corruption as 9302 * aio_write refers to the page still in cache 9303 * while all other accesses refer to the private 9304 * page. 9305 */ 9306 if (ap == NULL || ap->an_refcnt != 1) { 9307 struct vpage *vpage; 9308 9309 if (seg->s_szc) { 9310 error = EFAULT; 9311 break; 9312 } 9313 if (svd->vpage != NULL) { 9314 vpage = &svd->vpage[seg_page(seg, a)]; 9315 } else { 9316 vpage = NULL; 9317 } 9318 ASSERT(anlock); 9319 anon_array_exit(&cookie); 9320 anlock = 0; 9321 pp = NULL; 9322 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 9323 vpage, &pp, 0, F_INVAL, rw, 1); 9324 if (error) { 9325 error = fc_decode(error); 9326 break; 9327 } 9328 anon_array_enter(amp, anon_index, &cookie); 9329 anlock = 1; 9330 ap = anon_get_ptr(amp->ahp, anon_index); 9331 if (ap == NULL || ap->an_refcnt != 1) { 9332 error = EFAULT; 9333 break; 9334 } 9335 } 9336 swap_xlate(ap, &vp, &off); 9337 pp = page_lookup_nowait(vp, off, SE_SHARED); 9338 if (pp == NULL) { 9339 error = EFAULT; 9340 break; 9341 } 9342 if (ap->an_pvp != NULL) { 9343 anon_swap_free(ap, pp); 9344 } 9345 /* 9346 * Unlock anon if this is the last slot in a large page. 9347 */ 9348 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 9349 ASSERT(anlock); 9350 anon_array_exit(&cookie); 9351 anlock = 0; 9352 } 9353 *pplist++ = pp; 9354 } 9355 if (anlock) { /* Ensure the lock is dropped */ 9356 anon_array_exit(&cookie); 9357 } 9358 ANON_LOCK_EXIT(&->a_rwlock); 9359 9360 if (a >= addr + len) { 9361 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 9362 if (pamp != NULL) { 9363 ASSERT(svd->type == MAP_SHARED); 9364 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9365 npages); 9366 wlen = len; 9367 } 9368 if (sftlck_sbase) { 9369 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9370 } 9371 if (sftlck_send) { 9372 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9373 } 9374 if (use_pcache) { 9375 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9376 rw, pflags, preclaim_callback); 9377 } 9378 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9379 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9380 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9381 return (0); 9382 } 9383 9384 pplist = pl; 9385 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9386 while (np > (uint_t)0) { 9387 ASSERT(PAGE_LOCKED(*pplist)); 9388 page_unlock(*pplist); 9389 np--; 9390 pplist++; 9391 } 9392 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9393 out: 9394 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9395 *ppp = NULL; 9396 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9397 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9398 return (error); 9399 } 9400 9401 /* 9402 * purge any cached pages in the I/O page cache 9403 */ 9404 static void 9405 segvn_purge(struct seg *seg) 9406 { 9407 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9408 9409 /* 9410 * pcache is only used by pure anon segments. 9411 */ 9412 if (svd->amp == NULL || svd->vp != NULL) { 9413 return; 9414 } 9415 9416 /* 9417 * For MAP_SHARED segments non 0 segment's softlockcnt means 9418 * active IO is still in progress via this segment. So we only 9419 * purge MAP_SHARED segments when their softlockcnt is 0. 9420 */ 9421 if (svd->type == MAP_PRIVATE) { 9422 if (svd->softlockcnt) { 9423 seg_ppurge(seg, NULL, 0); 9424 } 9425 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9426 seg_ppurge(seg, svd->amp, 0); 9427 } 9428 } 9429 9430 /* 9431 * If async argument is not 0 we are called from pcache async thread and don't 9432 * hold AS lock. 9433 */ 9434 9435 /*ARGSUSED*/ 9436 static int 9437 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9438 enum seg_rw rw, int async) 9439 { 9440 struct seg *seg = (struct seg *)ptag; 9441 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9442 pgcnt_t np, npages; 9443 struct page **pl; 9444 9445 npages = np = btop(len); 9446 ASSERT(npages); 9447 9448 ASSERT(svd->vp == NULL && svd->amp != NULL); 9449 ASSERT(svd->softlockcnt >= npages); 9450 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9451 9452 pl = pplist; 9453 9454 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9455 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9456 9457 while (np > (uint_t)0) { 9458 if (rw == S_WRITE) { 9459 hat_setrefmod(*pplist); 9460 } else { 9461 hat_setref(*pplist); 9462 } 9463 page_unlock(*pplist); 9464 np--; 9465 pplist++; 9466 } 9467 9468 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9469 9470 /* 9471 * If we are pcache async thread we don't hold AS lock. This means if 9472 * softlockcnt drops to 0 after the decrement below address space may 9473 * get freed. We can't allow it since after softlock derement to 0 we 9474 * still need to access as structure for possible wakeup of unmap 9475 * waiters. To prevent the disappearance of as we take this segment 9476 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9477 * make sure this routine completes before segment is freed. 9478 * 9479 * The second complication we have to deal with in async case is a 9480 * possibility of missed wake up of unmap wait thread. When we don't 9481 * hold as lock here we may take a_contents lock before unmap wait 9482 * thread that was first to see softlockcnt was still not 0. As a 9483 * result we'll fail to wake up an unmap wait thread. To avoid this 9484 * race we set nounmapwait flag in as structure if we drop softlockcnt 9485 * to 0 when we were called by pcache async thread. unmapwait thread 9486 * will not block if this flag is set. 9487 */ 9488 if (async) { 9489 mutex_enter(&svd->segfree_syncmtx); 9490 } 9491 9492 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9493 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9494 mutex_enter(&seg->s_as->a_contents); 9495 if (async) { 9496 AS_SETNOUNMAPWAIT(seg->s_as); 9497 } 9498 if (AS_ISUNMAPWAIT(seg->s_as)) { 9499 AS_CLRUNMAPWAIT(seg->s_as); 9500 cv_broadcast(&seg->s_as->a_cv); 9501 } 9502 mutex_exit(&seg->s_as->a_contents); 9503 } 9504 } 9505 9506 if (async) { 9507 mutex_exit(&svd->segfree_syncmtx); 9508 } 9509 return (0); 9510 } 9511 9512 /*ARGSUSED*/ 9513 static int 9514 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9515 enum seg_rw rw, int async) 9516 { 9517 amp_t *amp = (amp_t *)ptag; 9518 pgcnt_t np, npages; 9519 struct page **pl; 9520 9521 npages = np = btop(len); 9522 ASSERT(npages); 9523 ASSERT(amp->a_softlockcnt >= npages); 9524 9525 pl = pplist; 9526 9527 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9528 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9529 9530 while (np > (uint_t)0) { 9531 if (rw == S_WRITE) { 9532 hat_setrefmod(*pplist); 9533 } else { 9534 hat_setref(*pplist); 9535 } 9536 page_unlock(*pplist); 9537 np--; 9538 pplist++; 9539 } 9540 9541 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9542 9543 /* 9544 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9545 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9546 * and anonmap_purge() acquires a_purgemtx. 9547 */ 9548 mutex_enter(&->a_purgemtx); 9549 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9550 amp->a_purgewait) { 9551 amp->a_purgewait = 0; 9552 cv_broadcast(&->a_purgecv); 9553 } 9554 mutex_exit(&->a_purgemtx); 9555 return (0); 9556 } 9557 9558 /* 9559 * get a memory ID for an addr in a given segment 9560 * 9561 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9562 * At fault time they will be relocated into larger pages. 9563 */ 9564 static int 9565 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9566 { 9567 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9568 struct anon *ap = NULL; 9569 ulong_t anon_index; 9570 struct anon_map *amp; 9571 anon_sync_obj_t cookie; 9572 9573 if (svd->type == MAP_PRIVATE) { 9574 memidp->val[0] = (uintptr_t)seg->s_as; 9575 memidp->val[1] = (uintptr_t)addr; 9576 return (0); 9577 } 9578 9579 if (svd->type == MAP_SHARED) { 9580 if (svd->vp) { 9581 memidp->val[0] = (uintptr_t)svd->vp; 9582 memidp->val[1] = (u_longlong_t)svd->offset + 9583 (uintptr_t)(addr - seg->s_base); 9584 return (0); 9585 } else { 9586 9587 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9588 if ((amp = svd->amp) != NULL) { 9589 anon_index = svd->anon_index + 9590 seg_page(seg, addr); 9591 } 9592 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9593 9594 ASSERT(amp != NULL); 9595 9596 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9597 anon_array_enter(amp, anon_index, &cookie); 9598 ap = anon_get_ptr(amp->ahp, anon_index); 9599 if (ap == NULL) { 9600 page_t *pp; 9601 9602 pp = anon_zero(seg, addr, &ap, svd->cred); 9603 if (pp == NULL) { 9604 anon_array_exit(&cookie); 9605 ANON_LOCK_EXIT(&->a_rwlock); 9606 return (ENOMEM); 9607 } 9608 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9609 == NULL); 9610 (void) anon_set_ptr(amp->ahp, anon_index, 9611 ap, ANON_SLEEP); 9612 page_unlock(pp); 9613 } 9614 9615 anon_array_exit(&cookie); 9616 ANON_LOCK_EXIT(&->a_rwlock); 9617 9618 memidp->val[0] = (uintptr_t)ap; 9619 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9620 return (0); 9621 } 9622 } 9623 return (EINVAL); 9624 } 9625 9626 static int 9627 sameprot(struct seg *seg, caddr_t a, size_t len) 9628 { 9629 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9630 struct vpage *vpage; 9631 spgcnt_t pages = btop(len); 9632 uint_t prot; 9633 9634 if (svd->pageprot == 0) 9635 return (1); 9636 9637 ASSERT(svd->vpage != NULL); 9638 9639 vpage = &svd->vpage[seg_page(seg, a)]; 9640 prot = VPP_PROT(vpage); 9641 vpage++; 9642 pages--; 9643 while (pages-- > 0) { 9644 if (prot != VPP_PROT(vpage)) 9645 return (0); 9646 vpage++; 9647 } 9648 return (1); 9649 } 9650 9651 /* 9652 * Get memory allocation policy info for specified address in given segment 9653 */ 9654 static lgrp_mem_policy_info_t * 9655 segvn_getpolicy(struct seg *seg, caddr_t addr) 9656 { 9657 struct anon_map *amp; 9658 ulong_t anon_index; 9659 lgrp_mem_policy_info_t *policy_info; 9660 struct segvn_data *svn_data; 9661 u_offset_t vn_off; 9662 vnode_t *vp; 9663 9664 ASSERT(seg != NULL); 9665 9666 svn_data = (struct segvn_data *)seg->s_data; 9667 if (svn_data == NULL) 9668 return (NULL); 9669 9670 /* 9671 * Get policy info for private or shared memory 9672 */ 9673 if (svn_data->type != MAP_SHARED) { 9674 if (svn_data->tr_state != SEGVN_TR_ON) { 9675 policy_info = &svn_data->policy_info; 9676 } else { 9677 policy_info = &svn_data->tr_policy_info; 9678 ASSERT(policy_info->mem_policy == 9679 LGRP_MEM_POLICY_NEXT_SEG); 9680 } 9681 } else { 9682 amp = svn_data->amp; 9683 anon_index = svn_data->anon_index + seg_page(seg, addr); 9684 vp = svn_data->vp; 9685 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9686 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9687 } 9688 9689 return (policy_info); 9690 } 9691 9692 /* 9693 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9694 * established to per vnode mapping per lgroup amp pages instead of to vnode 9695 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9696 * may share the same text replication amp. If a suitable amp doesn't already 9697 * exist in svntr hash table create a new one. We may fail to bind to amp if 9698 * segment is not eligible for text replication. Code below first checks for 9699 * these conditions. If binding is successful segment tr_state is set to on 9700 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9701 * svd->amp remains as NULL. 9702 */ 9703 static void 9704 segvn_textrepl(struct seg *seg) 9705 { 9706 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9707 vnode_t *vp = svd->vp; 9708 u_offset_t off = svd->offset; 9709 size_t size = seg->s_size; 9710 u_offset_t eoff = off + size; 9711 uint_t szc = seg->s_szc; 9712 ulong_t hash = SVNTR_HASH_FUNC(vp); 9713 svntr_t *svntrp; 9714 struct vattr va; 9715 proc_t *p = seg->s_as->a_proc; 9716 lgrp_id_t lgrp_id; 9717 lgrp_id_t olid; 9718 int first; 9719 struct anon_map *amp; 9720 9721 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9722 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9723 ASSERT(p != NULL); 9724 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9725 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9726 ASSERT(svd->flags & MAP_TEXT); 9727 ASSERT(svd->type == MAP_PRIVATE); 9728 ASSERT(vp != NULL && svd->amp == NULL); 9729 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9730 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9731 ASSERT(seg->s_as != &kas); 9732 ASSERT(off < eoff); 9733 ASSERT(svntr_hashtab != NULL); 9734 9735 /* 9736 * If numa optimizations are no longer desired bail out. 9737 */ 9738 if (!lgrp_optimizations()) { 9739 svd->tr_state = SEGVN_TR_OFF; 9740 return; 9741 } 9742 9743 /* 9744 * Avoid creating anon maps with size bigger than the file size. 9745 * If VOP_GETATTR() call fails bail out. 9746 */ 9747 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9748 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9749 svd->tr_state = SEGVN_TR_OFF; 9750 SEGVN_TR_ADDSTAT(gaerr); 9751 return; 9752 } 9753 if (btopr(va.va_size) < btopr(eoff)) { 9754 svd->tr_state = SEGVN_TR_OFF; 9755 SEGVN_TR_ADDSTAT(overmap); 9756 return; 9757 } 9758 9759 /* 9760 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9761 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9762 * mapping that checks if trcache for this vnode needs to be 9763 * invalidated can't miss us. 9764 */ 9765 if (!(vp->v_flag & VVMEXEC)) { 9766 mutex_enter(&vp->v_lock); 9767 vp->v_flag |= VVMEXEC; 9768 mutex_exit(&vp->v_lock); 9769 } 9770 mutex_enter(&svntr_hashtab[hash].tr_lock); 9771 /* 9772 * Bail out if potentially MAP_SHARED writable mappings exist to this 9773 * vnode. We don't want to use old file contents from existing 9774 * replicas if this mapping was established after the original file 9775 * was changed. 9776 */ 9777 if (vn_is_mapped(vp, V_WRITE)) { 9778 mutex_exit(&svntr_hashtab[hash].tr_lock); 9779 svd->tr_state = SEGVN_TR_OFF; 9780 SEGVN_TR_ADDSTAT(wrcnt); 9781 return; 9782 } 9783 svntrp = svntr_hashtab[hash].tr_head; 9784 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9785 ASSERT(svntrp->tr_refcnt != 0); 9786 if (svntrp->tr_vp != vp) { 9787 continue; 9788 } 9789 9790 /* 9791 * Bail out if the file or its attributes were changed after 9792 * this replication entry was created since we need to use the 9793 * latest file contents. Note that mtime test alone is not 9794 * sufficient because a user can explicitly change mtime via 9795 * utimes(2) interfaces back to the old value after modifiying 9796 * the file contents. To detect this case we also have to test 9797 * ctime which among other things records the time of the last 9798 * mtime change by utimes(2). ctime is not changed when the file 9799 * is only read or executed so we expect that typically existing 9800 * replication amp's can be used most of the time. 9801 */ 9802 if (!svntrp->tr_valid || 9803 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9804 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9805 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9806 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9807 mutex_exit(&svntr_hashtab[hash].tr_lock); 9808 svd->tr_state = SEGVN_TR_OFF; 9809 SEGVN_TR_ADDSTAT(stale); 9810 return; 9811 } 9812 /* 9813 * if off, eoff and szc match current segment we found the 9814 * existing entry we can use. 9815 */ 9816 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9817 svntrp->tr_szc == szc) { 9818 break; 9819 } 9820 /* 9821 * Don't create different but overlapping in file offsets 9822 * entries to avoid replication of the same file pages more 9823 * than once per lgroup. 9824 */ 9825 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9826 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9827 mutex_exit(&svntr_hashtab[hash].tr_lock); 9828 svd->tr_state = SEGVN_TR_OFF; 9829 SEGVN_TR_ADDSTAT(overlap); 9830 return; 9831 } 9832 } 9833 /* 9834 * If we didn't find existing entry create a new one. 9835 */ 9836 if (svntrp == NULL) { 9837 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9838 if (svntrp == NULL) { 9839 mutex_exit(&svntr_hashtab[hash].tr_lock); 9840 svd->tr_state = SEGVN_TR_OFF; 9841 SEGVN_TR_ADDSTAT(nokmem); 9842 return; 9843 } 9844 #ifdef DEBUG 9845 { 9846 lgrp_id_t i; 9847 for (i = 0; i < NLGRPS_MAX; i++) { 9848 ASSERT(svntrp->tr_amp[i] == NULL); 9849 } 9850 } 9851 #endif /* DEBUG */ 9852 svntrp->tr_vp = vp; 9853 svntrp->tr_off = off; 9854 svntrp->tr_eoff = eoff; 9855 svntrp->tr_szc = szc; 9856 svntrp->tr_valid = 1; 9857 svntrp->tr_mtime = va.va_mtime; 9858 svntrp->tr_ctime = va.va_ctime; 9859 svntrp->tr_refcnt = 0; 9860 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9861 svntr_hashtab[hash].tr_head = svntrp; 9862 } 9863 first = 1; 9864 again: 9865 /* 9866 * We want to pick a replica with pages on main thread's (t_tid = 1, 9867 * aka T1) lgrp. Currently text replication is only optimized for 9868 * workloads that either have all threads of a process on the same 9869 * lgrp or execute their large text primarily on main thread. 9870 */ 9871 lgrp_id = p->p_t1_lgrpid; 9872 if (lgrp_id == LGRP_NONE) { 9873 /* 9874 * In case exec() prefaults text on non main thread use 9875 * current thread lgrpid. It will become main thread anyway 9876 * soon. 9877 */ 9878 lgrp_id = lgrp_home_id(curthread); 9879 } 9880 /* 9881 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9882 * just set it to NLGRPS_MAX if it's different from current process T1 9883 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9884 * replication and T1 new home is different from lgrp used for text 9885 * replication. When this happens asyncronous segvn thread rechecks if 9886 * segments should change lgrps used for text replication. If we fail 9887 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX 9888 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id 9889 * we want to use. We don't need to use cas in this case because 9890 * another thread that races in between our non atomic check and set 9891 * may only change p_tr_lgrpid to NLGRPS_MAX at this point. 9892 */ 9893 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9894 olid = p->p_tr_lgrpid; 9895 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9896 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9897 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != 9898 olid) { 9899 olid = p->p_tr_lgrpid; 9900 ASSERT(olid != LGRP_NONE); 9901 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9902 p->p_tr_lgrpid = NLGRPS_MAX; 9903 } 9904 } 9905 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9906 membar_producer(); 9907 /* 9908 * lgrp_move_thread() won't schedule async recheck after 9909 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9910 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9911 * is not LGRP_NONE. 9912 */ 9913 if (first && p->p_t1_lgrpid != LGRP_NONE && 9914 p->p_t1_lgrpid != lgrp_id) { 9915 first = 0; 9916 goto again; 9917 } 9918 } 9919 /* 9920 * If no amp was created yet for lgrp_id create a new one as long as 9921 * we have enough memory to afford it. 9922 */ 9923 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9924 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9925 if (trmem > segvn_textrepl_max_bytes) { 9926 SEGVN_TR_ADDSTAT(normem); 9927 goto fail; 9928 } 9929 if (anon_try_resv_zone(size, NULL) == 0) { 9930 SEGVN_TR_ADDSTAT(noanon); 9931 goto fail; 9932 } 9933 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9934 if (amp == NULL) { 9935 anon_unresv_zone(size, NULL); 9936 SEGVN_TR_ADDSTAT(nokmem); 9937 goto fail; 9938 } 9939 ASSERT(amp->refcnt == 1); 9940 amp->a_szc = szc; 9941 svntrp->tr_amp[lgrp_id] = amp; 9942 SEGVN_TR_ADDSTAT(newamp); 9943 } 9944 svntrp->tr_refcnt++; 9945 ASSERT(svd->svn_trnext == NULL); 9946 ASSERT(svd->svn_trprev == NULL); 9947 svd->svn_trnext = svntrp->tr_svnhead; 9948 svd->svn_trprev = NULL; 9949 if (svntrp->tr_svnhead != NULL) { 9950 svntrp->tr_svnhead->svn_trprev = svd; 9951 } 9952 svntrp->tr_svnhead = svd; 9953 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9954 ASSERT(amp->refcnt >= 1); 9955 svd->amp = amp; 9956 svd->anon_index = 0; 9957 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9958 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9959 svd->tr_state = SEGVN_TR_ON; 9960 mutex_exit(&svntr_hashtab[hash].tr_lock); 9961 SEGVN_TR_ADDSTAT(repl); 9962 return; 9963 fail: 9964 ASSERT(segvn_textrepl_bytes >= size); 9965 atomic_add_long(&segvn_textrepl_bytes, -size); 9966 ASSERT(svntrp != NULL); 9967 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9968 if (svntrp->tr_refcnt == 0) { 9969 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9970 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9971 mutex_exit(&svntr_hashtab[hash].tr_lock); 9972 kmem_cache_free(svntr_cache, svntrp); 9973 } else { 9974 mutex_exit(&svntr_hashtab[hash].tr_lock); 9975 } 9976 svd->tr_state = SEGVN_TR_OFF; 9977 } 9978 9979 /* 9980 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9981 * replication amp. This routine is most typically called when segment is 9982 * unmapped but can also be called when segment no longer qualifies for text 9983 * replication (e.g. due to protection changes). If unload_unmap is set use 9984 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9985 * svntr free all its anon maps and remove it from the hash table. 9986 */ 9987 static void 9988 segvn_textunrepl(struct seg *seg, int unload_unmap) 9989 { 9990 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9991 vnode_t *vp = svd->vp; 9992 u_offset_t off = svd->offset; 9993 size_t size = seg->s_size; 9994 u_offset_t eoff = off + size; 9995 uint_t szc = seg->s_szc; 9996 ulong_t hash = SVNTR_HASH_FUNC(vp); 9997 svntr_t *svntrp; 9998 svntr_t **prv_svntrp; 9999 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 10000 lgrp_id_t i; 10001 10002 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 10003 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 10004 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 10005 ASSERT(svd->tr_state == SEGVN_TR_ON); 10006 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 10007 ASSERT(svd->amp != NULL); 10008 ASSERT(svd->amp->refcnt >= 1); 10009 ASSERT(svd->anon_index == 0); 10010 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 10011 ASSERT(svntr_hashtab != NULL); 10012 10013 mutex_enter(&svntr_hashtab[hash].tr_lock); 10014 prv_svntrp = &svntr_hashtab[hash].tr_head; 10015 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 10016 ASSERT(svntrp->tr_refcnt != 0); 10017 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 10018 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 10019 break; 10020 } 10021 } 10022 if (svntrp == NULL) { 10023 panic("segvn_textunrepl: svntr record not found"); 10024 } 10025 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 10026 panic("segvn_textunrepl: amp mismatch"); 10027 } 10028 svd->tr_state = SEGVN_TR_OFF; 10029 svd->amp = NULL; 10030 if (svd->svn_trprev == NULL) { 10031 ASSERT(svntrp->tr_svnhead == svd); 10032 svntrp->tr_svnhead = svd->svn_trnext; 10033 if (svntrp->tr_svnhead != NULL) { 10034 svntrp->tr_svnhead->svn_trprev = NULL; 10035 } 10036 svd->svn_trnext = NULL; 10037 } else { 10038 svd->svn_trprev->svn_trnext = svd->svn_trnext; 10039 if (svd->svn_trnext != NULL) { 10040 svd->svn_trnext->svn_trprev = svd->svn_trprev; 10041 svd->svn_trnext = NULL; 10042 } 10043 svd->svn_trprev = NULL; 10044 } 10045 if (--svntrp->tr_refcnt) { 10046 mutex_exit(&svntr_hashtab[hash].tr_lock); 10047 goto done; 10048 } 10049 *prv_svntrp = svntrp->tr_next; 10050 mutex_exit(&svntr_hashtab[hash].tr_lock); 10051 for (i = 0; i < NLGRPS_MAX; i++) { 10052 struct anon_map *amp = svntrp->tr_amp[i]; 10053 if (amp == NULL) { 10054 continue; 10055 } 10056 ASSERT(amp->refcnt == 1); 10057 ASSERT(amp->swresv == size); 10058 ASSERT(amp->size == size); 10059 ASSERT(amp->a_szc == szc); 10060 if (amp->a_szc != 0) { 10061 anon_free_pages(amp->ahp, 0, size, szc); 10062 } else { 10063 anon_free(amp->ahp, 0, size); 10064 } 10065 svntrp->tr_amp[i] = NULL; 10066 ASSERT(segvn_textrepl_bytes >= size); 10067 atomic_add_long(&segvn_textrepl_bytes, -size); 10068 anon_unresv_zone(amp->swresv, NULL); 10069 amp->refcnt = 0; 10070 anonmap_free(amp); 10071 } 10072 kmem_cache_free(svntr_cache, svntrp); 10073 done: 10074 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 10075 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 10076 } 10077 10078 /* 10079 * This is called when a MAP_SHARED writable mapping is created to a vnode 10080 * that is currently used for execution (VVMEXEC flag is set). In this case we 10081 * need to prevent further use of existing replicas. 10082 */ 10083 static void 10084 segvn_inval_trcache(vnode_t *vp) 10085 { 10086 ulong_t hash = SVNTR_HASH_FUNC(vp); 10087 svntr_t *svntrp; 10088 10089 ASSERT(vp->v_flag & VVMEXEC); 10090 10091 if (svntr_hashtab == NULL) { 10092 return; 10093 } 10094 10095 mutex_enter(&svntr_hashtab[hash].tr_lock); 10096 svntrp = svntr_hashtab[hash].tr_head; 10097 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 10098 ASSERT(svntrp->tr_refcnt != 0); 10099 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 10100 svntrp->tr_valid = 0; 10101 } 10102 } 10103 mutex_exit(&svntr_hashtab[hash].tr_lock); 10104 } 10105 10106 static void 10107 segvn_trasync_thread(void) 10108 { 10109 callb_cpr_t cpr_info; 10110 kmutex_t cpr_lock; /* just for CPR stuff */ 10111 10112 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 10113 10114 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 10115 callb_generic_cpr, "segvn_async"); 10116 10117 if (segvn_update_textrepl_interval == 0) { 10118 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 10119 } else { 10120 segvn_update_textrepl_interval *= hz; 10121 } 10122 (void) timeout(segvn_trupdate_wakeup, NULL, 10123 segvn_update_textrepl_interval); 10124 10125 for (;;) { 10126 mutex_enter(&cpr_lock); 10127 CALLB_CPR_SAFE_BEGIN(&cpr_info); 10128 mutex_exit(&cpr_lock); 10129 sema_p(&segvn_trasync_sem); 10130 mutex_enter(&cpr_lock); 10131 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 10132 mutex_exit(&cpr_lock); 10133 segvn_trupdate(); 10134 } 10135 } 10136 10137 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 10138 10139 static void 10140 segvn_trupdate_wakeup(void *dummy) 10141 { 10142 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 10143 10144 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 10145 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 10146 sema_v(&segvn_trasync_sem); 10147 } 10148 10149 if (!segvn_disable_textrepl_update && 10150 segvn_update_textrepl_interval != 0) { 10151 (void) timeout(segvn_trupdate_wakeup, dummy, 10152 segvn_update_textrepl_interval); 10153 } 10154 } 10155 10156 static void 10157 segvn_trupdate(void) 10158 { 10159 ulong_t hash; 10160 svntr_t *svntrp; 10161 segvn_data_t *svd; 10162 10163 ASSERT(svntr_hashtab != NULL); 10164 10165 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 10166 mutex_enter(&svntr_hashtab[hash].tr_lock); 10167 svntrp = svntr_hashtab[hash].tr_head; 10168 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 10169 ASSERT(svntrp->tr_refcnt != 0); 10170 svd = svntrp->tr_svnhead; 10171 for (; svd != NULL; svd = svd->svn_trnext) { 10172 segvn_trupdate_seg(svd->seg, svd, svntrp, 10173 hash); 10174 } 10175 } 10176 mutex_exit(&svntr_hashtab[hash].tr_lock); 10177 } 10178 } 10179 10180 static void 10181 segvn_trupdate_seg(struct seg *seg, 10182 segvn_data_t *svd, 10183 svntr_t *svntrp, 10184 ulong_t hash) 10185 { 10186 proc_t *p; 10187 lgrp_id_t lgrp_id; 10188 struct as *as; 10189 size_t size; 10190 struct anon_map *amp; 10191 10192 ASSERT(svd->vp != NULL); 10193 ASSERT(svd->vp == svntrp->tr_vp); 10194 ASSERT(svd->offset == svntrp->tr_off); 10195 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 10196 ASSERT(seg != NULL); 10197 ASSERT(svd->seg == seg); 10198 ASSERT(seg->s_data == (void *)svd); 10199 ASSERT(seg->s_szc == svntrp->tr_szc); 10200 ASSERT(svd->tr_state == SEGVN_TR_ON); 10201 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 10202 ASSERT(svd->amp != NULL); 10203 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10204 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 10205 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 10206 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 10207 ASSERT(svntrp->tr_refcnt != 0); 10208 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 10209 10210 as = seg->s_as; 10211 ASSERT(as != NULL && as != &kas); 10212 p = as->a_proc; 10213 ASSERT(p != NULL); 10214 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 10215 lgrp_id = p->p_t1_lgrpid; 10216 if (lgrp_id == LGRP_NONE) { 10217 return; 10218 } 10219 ASSERT(lgrp_id < NLGRPS_MAX); 10220 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 10221 return; 10222 } 10223 10224 /* 10225 * Use tryenter locking since we are locking as/seg and svntr hash 10226 * lock in reverse from syncrounous thread order. 10227 */ 10228 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 10229 SEGVN_TR_ADDSTAT(nolock); 10230 if (segvn_lgrp_trthr_migrs_snpsht) { 10231 segvn_lgrp_trthr_migrs_snpsht = 0; 10232 } 10233 return; 10234 } 10235 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 10236 AS_LOCK_EXIT(as, &as->a_lock); 10237 SEGVN_TR_ADDSTAT(nolock); 10238 if (segvn_lgrp_trthr_migrs_snpsht) { 10239 segvn_lgrp_trthr_migrs_snpsht = 0; 10240 } 10241 return; 10242 } 10243 size = seg->s_size; 10244 if (svntrp->tr_amp[lgrp_id] == NULL) { 10245 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 10246 if (trmem > segvn_textrepl_max_bytes) { 10247 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10248 AS_LOCK_EXIT(as, &as->a_lock); 10249 atomic_add_long(&segvn_textrepl_bytes, -size); 10250 SEGVN_TR_ADDSTAT(normem); 10251 return; 10252 } 10253 if (anon_try_resv_zone(size, NULL) == 0) { 10254 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10255 AS_LOCK_EXIT(as, &as->a_lock); 10256 atomic_add_long(&segvn_textrepl_bytes, -size); 10257 SEGVN_TR_ADDSTAT(noanon); 10258 return; 10259 } 10260 amp = anonmap_alloc(size, size, KM_NOSLEEP); 10261 if (amp == NULL) { 10262 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10263 AS_LOCK_EXIT(as, &as->a_lock); 10264 atomic_add_long(&segvn_textrepl_bytes, -size); 10265 anon_unresv_zone(size, NULL); 10266 SEGVN_TR_ADDSTAT(nokmem); 10267 return; 10268 } 10269 ASSERT(amp->refcnt == 1); 10270 amp->a_szc = seg->s_szc; 10271 svntrp->tr_amp[lgrp_id] = amp; 10272 } 10273 /* 10274 * We don't need to drop the bucket lock but here we give other 10275 * threads a chance. svntr and svd can't be unlinked as long as 10276 * segment lock is held as a writer and AS held as well. After we 10277 * retake bucket lock we'll continue from where we left. We'll be able 10278 * to reach the end of either list since new entries are always added 10279 * to the beginning of the lists. 10280 */ 10281 mutex_exit(&svntr_hashtab[hash].tr_lock); 10282 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 10283 mutex_enter(&svntr_hashtab[hash].tr_lock); 10284 10285 ASSERT(svd->tr_state == SEGVN_TR_ON); 10286 ASSERT(svd->amp != NULL); 10287 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10288 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 10289 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 10290 10291 svd->tr_policy_info.mem_lgrpid = lgrp_id; 10292 svd->amp = svntrp->tr_amp[lgrp_id]; 10293 p->p_tr_lgrpid = NLGRPS_MAX; 10294 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10295 AS_LOCK_EXIT(as, &as->a_lock); 10296 10297 ASSERT(svntrp->tr_refcnt != 0); 10298 ASSERT(svd->vp == svntrp->tr_vp); 10299 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 10300 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 10301 ASSERT(svd->seg == seg); 10302 ASSERT(svd->tr_state == SEGVN_TR_ON); 10303 10304 SEGVN_TR_ADDSTAT(asyncrepl); 10305 }