1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * University Copyright- Copyright (c) 1982, 1986, 1988 30 * The Regents of the University of California 31 * All Rights Reserved 32 * 33 * University Acknowledgment- Portions of this document are derived from 34 * software developed by the University of California, Berkeley, and its 35 * contributors. 36 */ 37 38 /* 39 * VM - shared or copy-on-write from a vnode/anonymous memory. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/param.h> 44 #include <sys/t_lock.h> 45 #include <sys/errno.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/debug.h> 49 #include <sys/cred.h> 50 #include <sys/vmsystm.h> 51 #include <sys/tuneable.h> 52 #include <sys/bitmap.h> 53 #include <sys/swap.h> 54 #include <sys/kmem.h> 55 #include <sys/sysmacros.h> 56 #include <sys/vtrace.h> 57 #include <sys/cmn_err.h> 58 #include <sys/callb.h> 59 #include <sys/vm.h> 60 #include <sys/dumphdr.h> 61 #include <sys/lgrp.h> 62 63 #include <vm/hat.h> 64 #include <vm/as.h> 65 #include <vm/seg.h> 66 #include <vm/seg_vn.h> 67 #include <vm/pvn.h> 68 #include <vm/anon.h> 69 #include <vm/page.h> 70 #include <vm/vpage.h> 71 #include <sys/proc.h> 72 #include <sys/task.h> 73 #include <sys/project.h> 74 #include <sys/zone.h> 75 #include <sys/shm_impl.h> 76 /* 77 * Private seg op routines. 78 */ 79 static int segvn_dup(struct seg *seg, struct seg *newseg); 80 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 81 static void segvn_free(struct seg *seg); 82 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 83 caddr_t addr, size_t len, enum fault_type type, 84 enum seg_rw rw); 85 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 86 static int segvn_setprot(struct seg *seg, caddr_t addr, 87 size_t len, uint_t prot); 88 static int segvn_checkprot(struct seg *seg, caddr_t addr, 89 size_t len, uint_t prot); 90 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 91 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 92 int attr, uint_t flags); 93 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 94 char *vec); 95 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 96 int attr, int op, ulong_t *lockmap, size_t pos); 97 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 98 uint_t *protv); 99 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 100 static int segvn_gettype(struct seg *seg, caddr_t addr); 101 static int segvn_getvp(struct seg *seg, caddr_t addr, 102 struct vnode **vpp); 103 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 104 uint_t behav); 105 static void segvn_dump(struct seg *seg); 106 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 107 struct page ***ppp, enum lock_type type, enum seg_rw rw); 108 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 109 uint_t szc); 110 static int segvn_getmemid(struct seg *seg, caddr_t addr, 111 memid_t *memidp); 112 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 113 static int segvn_capable(struct seg *seg, segcapability_t capable); 114 115 struct seg_ops segvn_ops = { 116 segvn_dup, 117 segvn_unmap, 118 segvn_free, 119 segvn_fault, 120 segvn_faulta, 121 segvn_setprot, 122 segvn_checkprot, 123 segvn_kluster, 124 segvn_sync, 125 segvn_incore, 126 segvn_lockop, 127 segvn_getprot, 128 segvn_getoffset, 129 segvn_gettype, 130 segvn_getvp, 131 segvn_advise, 132 segvn_dump, 133 segvn_pagelock, 134 segvn_setpagesize, 135 segvn_getmemid, 136 segvn_getpolicy, 137 segvn_capable, 138 }; 139 140 /* 141 * Common zfod structures, provided as a shorthand for others to use. 142 */ 143 static segvn_crargs_t zfod_segvn_crargs = 144 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 145 static segvn_crargs_t kzfod_segvn_crargs = 146 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 147 PROT_ALL & ~PROT_USER); 148 static segvn_crargs_t stack_noexec_crargs = 149 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 150 151 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 152 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 153 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 154 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 155 156 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 157 158 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 159 160 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 161 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 162 uint_t segvn_pglock_comb_bshift; 163 size_t segvn_pglock_comb_palign; 164 165 static int segvn_concat(struct seg *, struct seg *, int); 166 static int segvn_extend_prev(struct seg *, struct seg *, 167 struct segvn_crargs *, size_t); 168 static int segvn_extend_next(struct seg *, struct seg *, 169 struct segvn_crargs *, size_t); 170 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 171 static void segvn_pagelist_rele(page_t **); 172 static void segvn_setvnode_mpss(vnode_t *); 173 static void segvn_relocate_pages(page_t **, page_t *); 174 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 175 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 176 uint_t, page_t **, page_t **, uint_t *, int *); 177 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 178 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 179 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 180 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 181 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 182 u_offset_t, struct vpage *, page_t **, uint_t, 183 enum fault_type, enum seg_rw, int); 184 static void segvn_vpage(struct seg *); 185 static size_t segvn_count_swap_by_vpages(struct seg *); 186 187 static void segvn_purge(struct seg *seg); 188 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 189 enum seg_rw, int); 190 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 191 enum seg_rw, int); 192 193 static int sameprot(struct seg *, caddr_t, size_t); 194 195 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 196 static int segvn_clrszc(struct seg *); 197 static struct seg *segvn_split_seg(struct seg *, caddr_t); 198 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 199 ulong_t, uint_t); 200 201 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 202 size_t, void *, u_offset_t); 203 204 static struct kmem_cache *segvn_cache; 205 static struct kmem_cache **segvn_szc_cache; 206 207 #ifdef VM_STATS 208 static struct segvnvmstats_str { 209 ulong_t fill_vp_pages[31]; 210 ulong_t fltvnpages[49]; 211 ulong_t fullszcpages[10]; 212 ulong_t relocatepages[3]; 213 ulong_t fltanpages[17]; 214 ulong_t pagelock[2]; 215 ulong_t demoterange[3]; 216 } segvnvmstats; 217 #endif /* VM_STATS */ 218 219 #define SDR_RANGE 1 /* demote entire range */ 220 #define SDR_END 2 /* demote non aligned ends only */ 221 222 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 223 if ((len) != 0) { \ 224 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 225 ASSERT(lpgaddr >= (seg)->s_base); \ 226 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 227 (len)), pgsz); \ 228 ASSERT(lpgeaddr > lpgaddr); \ 229 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 230 } else { \ 231 lpgeaddr = lpgaddr = (addr); \ 232 } \ 233 } 234 235 /*ARGSUSED*/ 236 static int 237 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 238 { 239 struct segvn_data *svd = buf; 240 241 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 242 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 243 svd->svn_trnext = svd->svn_trprev = NULL; 244 return (0); 245 } 246 247 /*ARGSUSED1*/ 248 static void 249 segvn_cache_destructor(void *buf, void *cdrarg) 250 { 251 struct segvn_data *svd = buf; 252 253 rw_destroy(&svd->lock); 254 mutex_destroy(&svd->segfree_syncmtx); 255 } 256 257 /*ARGSUSED*/ 258 static int 259 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 260 { 261 bzero(buf, sizeof (svntr_t)); 262 return (0); 263 } 264 265 /* 266 * Patching this variable to non-zero allows the system to run with 267 * stacks marked as "not executable". It's a bit of a kludge, but is 268 * provided as a tweakable for platforms that export those ABIs 269 * (e.g. sparc V8) that have executable stacks enabled by default. 270 * There are also some restrictions for platforms that don't actually 271 * implement 'noexec' protections. 272 * 273 * Once enabled, the system is (therefore) unable to provide a fully 274 * ABI-compliant execution environment, though practically speaking, 275 * most everything works. The exceptions are generally some interpreters 276 * and debuggers that create executable code on the stack and jump 277 * into it (without explicitly mprotecting the address range to include 278 * PROT_EXEC). 279 * 280 * One important class of applications that are disabled are those 281 * that have been transformed into malicious agents using one of the 282 * numerous "buffer overflow" attacks. See 4007890. 283 */ 284 int noexec_user_stack = 0; 285 int noexec_user_stack_log = 1; 286 287 int segvn_lpg_disable = 0; 288 uint_t segvn_maxpgszc = 0; 289 290 ulong_t segvn_vmpss_clrszc_cnt; 291 ulong_t segvn_vmpss_clrszc_err; 292 ulong_t segvn_fltvnpages_clrszc_cnt; 293 ulong_t segvn_fltvnpages_clrszc_err; 294 ulong_t segvn_setpgsz_align_err; 295 ulong_t segvn_setpgsz_anon_align_err; 296 ulong_t segvn_setpgsz_getattr_err; 297 ulong_t segvn_setpgsz_eof_err; 298 ulong_t segvn_faultvnmpss_align_err1; 299 ulong_t segvn_faultvnmpss_align_err2; 300 ulong_t segvn_faultvnmpss_align_err3; 301 ulong_t segvn_faultvnmpss_align_err4; 302 ulong_t segvn_faultvnmpss_align_err5; 303 ulong_t segvn_vmpss_pageio_deadlk_err; 304 305 int segvn_use_regions = 1; 306 307 /* 308 * Segvn supports text replication optimization for NUMA platforms. Text 309 * replica's are represented by anon maps (amp). There's one amp per text file 310 * region per lgroup. A process chooses the amp for each of its text mappings 311 * based on the lgroup assignment of its main thread (t_tid = 1). All 312 * processes that want a replica on a particular lgroup for the same text file 313 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 314 * with vp,off,size,szc used as a key. Text replication segments are read only 315 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 316 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 317 * pages. Replication amp is assigned to a segment when it gets its first 318 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 319 * rechecks periodically if the process still maps an amp local to the main 320 * thread. If not async thread forces process to remap to an amp in the new 321 * home lgroup of the main thread. Current text replication implementation 322 * only provides the benefit to workloads that do most of their work in the 323 * main thread of a process or all the threads of a process run in the same 324 * lgroup. To extend text replication benefit to different types of 325 * multithreaded workloads further work would be needed in the hat layer to 326 * allow the same virtual address in the same hat to simultaneously map 327 * different physical addresses (i.e. page table replication would be needed 328 * for x86). 329 * 330 * amp pages are used instead of vnode pages as long as segment has a very 331 * simple life cycle. It's created via segvn_create(), handles S_EXEC 332 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 333 * happens such as protection is changed, real COW fault happens, pagesize is 334 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 335 * text replication by converting the segment back to vnode only segment 336 * (unmap segment's address range and set svd->amp to NULL). 337 * 338 * The original file can be changed after amp is inserted into 339 * svntr_hashtab. Processes that are launched after the file is already 340 * changed can't use the replica's created prior to the file change. To 341 * implement this functionality hash entries are timestamped. Replica's can 342 * only be used if current file modification time is the same as the timestamp 343 * saved when hash entry was created. However just timestamps alone are not 344 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 345 * deal with file changes via MAP_SHARED mappings differently. When writable 346 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 347 * existing replica's for this vnode as not usable for future text 348 * mappings. And we don't create new replica's for files that currently have 349 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 350 * true). 351 */ 352 353 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 354 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 355 356 static ulong_t svntr_hashtab_sz = 512; 357 static svntr_bucket_t *svntr_hashtab = NULL; 358 static struct kmem_cache *svntr_cache; 359 static svntr_stats_t *segvn_textrepl_stats; 360 static ksema_t segvn_trasync_sem; 361 362 int segvn_disable_textrepl = 1; 363 size_t textrepl_size_thresh = (size_t)-1; 364 size_t segvn_textrepl_bytes = 0; 365 size_t segvn_textrepl_max_bytes = 0; 366 clock_t segvn_update_textrepl_interval = 0; 367 int segvn_update_tr_time = 10; 368 int segvn_disable_textrepl_update = 0; 369 370 static void segvn_textrepl(struct seg *); 371 static void segvn_textunrepl(struct seg *, int); 372 static void segvn_inval_trcache(vnode_t *); 373 static void segvn_trasync_thread(void); 374 static void segvn_trupdate_wakeup(void *); 375 static void segvn_trupdate(void); 376 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 377 ulong_t); 378 379 /* 380 * Initialize segvn data structures 381 */ 382 void 383 segvn_init(void) 384 { 385 uint_t maxszc; 386 uint_t szc; 387 size_t pgsz; 388 389 segvn_cache = kmem_cache_create("segvn_cache", 390 sizeof (struct segvn_data), 0, 391 segvn_cache_constructor, segvn_cache_destructor, NULL, 392 NULL, NULL, 0); 393 394 if (segvn_lpg_disable == 0) { 395 szc = maxszc = page_num_pagesizes() - 1; 396 if (szc == 0) { 397 segvn_lpg_disable = 1; 398 } 399 if (page_get_pagesize(0) != PAGESIZE) { 400 panic("segvn_init: bad szc 0"); 401 /*NOTREACHED*/ 402 } 403 while (szc != 0) { 404 pgsz = page_get_pagesize(szc); 405 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 406 panic("segvn_init: bad szc %d", szc); 407 /*NOTREACHED*/ 408 } 409 szc--; 410 } 411 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 412 segvn_maxpgszc = maxszc; 413 } 414 415 if (segvn_maxpgszc) { 416 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 417 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 418 KM_SLEEP); 419 } 420 421 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 422 char str[32]; 423 424 (void) sprintf(str, "segvn_szc_cache%d", szc); 425 segvn_szc_cache[szc] = kmem_cache_create(str, 426 page_get_pagecnt(szc) * sizeof (page_t *), 0, 427 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 428 } 429 430 431 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 432 segvn_use_regions = 0; 433 434 /* 435 * For now shared regions and text replication segvn support 436 * are mutually exclusive. This is acceptable because 437 * currently significant benefit from text replication was 438 * only observed on AMD64 NUMA platforms (due to relatively 439 * small L2$ size) and currently we don't support shared 440 * regions on x86. 441 */ 442 if (segvn_use_regions && !segvn_disable_textrepl) { 443 segvn_disable_textrepl = 1; 444 } 445 446 #if defined(_LP64) 447 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 448 !segvn_disable_textrepl) { 449 ulong_t i; 450 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 451 452 svntr_cache = kmem_cache_create("svntr_cache", 453 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 454 NULL, NULL, NULL, 0); 455 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 456 for (i = 0; i < svntr_hashtab_sz; i++) { 457 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 458 MUTEX_DEFAULT, NULL); 459 } 460 segvn_textrepl_max_bytes = ptob(physmem) / 461 segvn_textrepl_max_bytes_factor; 462 segvn_textrepl_stats = kmem_zalloc(NCPU * 463 sizeof (svntr_stats_t), KM_SLEEP); 464 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 465 (void) thread_create(NULL, 0, segvn_trasync_thread, 466 NULL, 0, &p0, TS_RUN, minclsyspri); 467 } 468 #endif 469 470 if (!ISP2(segvn_pglock_comb_balign) || 471 segvn_pglock_comb_balign < PAGESIZE) { 472 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 473 } 474 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 475 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 476 } 477 478 #define SEGVN_PAGEIO ((void *)0x1) 479 #define SEGVN_NOPAGEIO ((void *)0x2) 480 481 static void 482 segvn_setvnode_mpss(vnode_t *vp) 483 { 484 int err; 485 486 ASSERT(vp->v_mpssdata == NULL || 487 vp->v_mpssdata == SEGVN_PAGEIO || 488 vp->v_mpssdata == SEGVN_NOPAGEIO); 489 490 if (vp->v_mpssdata == NULL) { 491 if (vn_vmpss_usepageio(vp)) { 492 err = VOP_PAGEIO(vp, (page_t *)NULL, 493 (u_offset_t)0, 0, 0, CRED(), NULL); 494 } else { 495 err = ENOSYS; 496 } 497 /* 498 * set v_mpssdata just once per vnode life 499 * so that it never changes. 500 */ 501 mutex_enter(&vp->v_lock); 502 if (vp->v_mpssdata == NULL) { 503 if (err == EINVAL) { 504 vp->v_mpssdata = SEGVN_PAGEIO; 505 } else { 506 vp->v_mpssdata = SEGVN_NOPAGEIO; 507 } 508 } 509 mutex_exit(&vp->v_lock); 510 } 511 } 512 513 int 514 segvn_create(struct seg *seg, void *argsp) 515 { 516 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 517 struct segvn_data *svd; 518 size_t swresv = 0; 519 struct cred *cred; 520 struct anon_map *amp; 521 int error = 0; 522 size_t pgsz; 523 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 524 int use_rgn = 0; 525 int trok = 0; 526 527 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 528 529 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 530 panic("segvn_create type"); 531 /*NOTREACHED*/ 532 } 533 534 /* 535 * Check arguments. If a shared anon structure is given then 536 * it is illegal to also specify a vp. 537 */ 538 if (a->amp != NULL && a->vp != NULL) { 539 panic("segvn_create anon_map"); 540 /*NOTREACHED*/ 541 } 542 543 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 544 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 545 segvn_use_regions) { 546 use_rgn = 1; 547 } 548 549 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 550 if (a->type == MAP_SHARED) 551 a->flags &= ~MAP_NORESERVE; 552 553 if (a->szc != 0) { 554 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 555 (a->amp != NULL && a->type == MAP_PRIVATE) || 556 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 557 a->szc = 0; 558 } else { 559 if (a->szc > segvn_maxpgszc) 560 a->szc = segvn_maxpgszc; 561 pgsz = page_get_pagesize(a->szc); 562 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 563 !IS_P2ALIGNED(seg->s_size, pgsz)) { 564 a->szc = 0; 565 } else if (a->vp != NULL) { 566 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 567 /* 568 * paranoid check. 569 * hat_page_demote() is not supported 570 * on swapfs pages. 571 */ 572 a->szc = 0; 573 } else if (map_addr_vacalign_check(seg->s_base, 574 a->offset & PAGEMASK)) { 575 a->szc = 0; 576 } 577 } else if (a->amp != NULL) { 578 pgcnt_t anum = btopr(a->offset); 579 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 580 if (!IS_P2ALIGNED(anum, pgcnt)) { 581 a->szc = 0; 582 } 583 } 584 } 585 } 586 587 /* 588 * If segment may need private pages, reserve them now. 589 */ 590 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 591 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 592 if (anon_resv_zone(seg->s_size, 593 seg->s_as->a_proc->p_zone) == 0) 594 return (EAGAIN); 595 swresv = seg->s_size; 596 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 597 seg, swresv, 1); 598 } 599 600 /* 601 * Reserve any mapping structures that may be required. 602 * 603 * Don't do it for segments that may use regions. It's currently a 604 * noop in the hat implementations anyway. 605 */ 606 if (!use_rgn) { 607 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 608 } 609 610 if (a->cred) { 611 cred = a->cred; 612 crhold(cred); 613 } else { 614 crhold(cred = CRED()); 615 } 616 617 /* Inform the vnode of the new mapping */ 618 if (a->vp != NULL) { 619 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 620 seg->s_as, seg->s_base, seg->s_size, a->prot, 621 a->maxprot, a->type, cred, NULL); 622 if (error) { 623 if (swresv != 0) { 624 anon_unresv_zone(swresv, 625 seg->s_as->a_proc->p_zone); 626 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 627 "anon proc:%p %lu %u", seg, swresv, 0); 628 } 629 crfree(cred); 630 if (!use_rgn) { 631 hat_unload(seg->s_as->a_hat, seg->s_base, 632 seg->s_size, HAT_UNLOAD_UNMAP); 633 } 634 return (error); 635 } 636 /* 637 * svntr_hashtab will be NULL if we support shared regions. 638 */ 639 trok = ((a->flags & MAP_TEXT) && 640 (seg->s_size > textrepl_size_thresh || 641 (a->flags & _MAP_TEXTREPL)) && 642 lgrp_optimizations() && svntr_hashtab != NULL && 643 a->type == MAP_PRIVATE && swresv == 0 && 644 !(a->flags & MAP_NORESERVE) && 645 seg->s_as != &kas && a->vp->v_type == VREG); 646 647 ASSERT(!trok || !use_rgn); 648 } 649 650 /* 651 * MAP_NORESERVE mappings don't count towards the VSZ of a process 652 * until we fault the pages in. 653 */ 654 if ((a->vp == NULL || a->vp->v_type != VREG) && 655 a->flags & MAP_NORESERVE) { 656 seg->s_as->a_resvsize -= seg->s_size; 657 } 658 659 /* 660 * If more than one segment in the address space, and they're adjacent 661 * virtually, try to concatenate them. Don't concatenate if an 662 * explicit anon_map structure was supplied (e.g., SystemV shared 663 * memory) or if we'll use text replication for this segment. 664 */ 665 if (a->amp == NULL && !use_rgn && !trok) { 666 struct seg *pseg, *nseg; 667 struct segvn_data *psvd, *nsvd; 668 lgrp_mem_policy_t ppolicy, npolicy; 669 uint_t lgrp_mem_policy_flags = 0; 670 extern lgrp_mem_policy_t lgrp_mem_default_policy; 671 672 /* 673 * Memory policy flags (lgrp_mem_policy_flags) is valid when 674 * extending stack/heap segments. 675 */ 676 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 677 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 678 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 679 } else { 680 /* 681 * Get policy when not extending it from another segment 682 */ 683 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 684 } 685 686 /* 687 * First, try to concatenate the previous and new segments 688 */ 689 pseg = AS_SEGPREV(seg->s_as, seg); 690 if (pseg != NULL && 691 pseg->s_base + pseg->s_size == seg->s_base && 692 pseg->s_ops == &segvn_ops) { 693 /* 694 * Get memory allocation policy from previous segment. 695 * When extension is specified (e.g. for heap) apply 696 * this policy to the new segment regardless of the 697 * outcome of segment concatenation. Extension occurs 698 * for non-default policy otherwise default policy is 699 * used and is based on extended segment size. 700 */ 701 psvd = (struct segvn_data *)pseg->s_data; 702 ppolicy = psvd->policy_info.mem_policy; 703 if (lgrp_mem_policy_flags == 704 LGRP_MP_FLAG_EXTEND_UP) { 705 if (ppolicy != lgrp_mem_default_policy) { 706 mpolicy = ppolicy; 707 } else { 708 mpolicy = lgrp_mem_policy_default( 709 pseg->s_size + seg->s_size, 710 a->type); 711 } 712 } 713 714 if (mpolicy == ppolicy && 715 (pseg->s_size + seg->s_size <= 716 segvn_comb_thrshld || psvd->amp == NULL) && 717 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 718 /* 719 * success! now try to concatenate 720 * with following seg 721 */ 722 crfree(cred); 723 nseg = AS_SEGNEXT(pseg->s_as, pseg); 724 if (nseg != NULL && 725 nseg != pseg && 726 nseg->s_ops == &segvn_ops && 727 pseg->s_base + pseg->s_size == 728 nseg->s_base) 729 (void) segvn_concat(pseg, nseg, 0); 730 ASSERT(pseg->s_szc == 0 || 731 (a->szc == pseg->s_szc && 732 IS_P2ALIGNED(pseg->s_base, pgsz) && 733 IS_P2ALIGNED(pseg->s_size, pgsz))); 734 return (0); 735 } 736 } 737 738 /* 739 * Failed, so try to concatenate with following seg 740 */ 741 nseg = AS_SEGNEXT(seg->s_as, seg); 742 if (nseg != NULL && 743 seg->s_base + seg->s_size == nseg->s_base && 744 nseg->s_ops == &segvn_ops) { 745 /* 746 * Get memory allocation policy from next segment. 747 * When extension is specified (e.g. for stack) apply 748 * this policy to the new segment regardless of the 749 * outcome of segment concatenation. Extension occurs 750 * for non-default policy otherwise default policy is 751 * used and is based on extended segment size. 752 */ 753 nsvd = (struct segvn_data *)nseg->s_data; 754 npolicy = nsvd->policy_info.mem_policy; 755 if (lgrp_mem_policy_flags == 756 LGRP_MP_FLAG_EXTEND_DOWN) { 757 if (npolicy != lgrp_mem_default_policy) { 758 mpolicy = npolicy; 759 } else { 760 mpolicy = lgrp_mem_policy_default( 761 nseg->s_size + seg->s_size, 762 a->type); 763 } 764 } 765 766 if (mpolicy == npolicy && 767 segvn_extend_next(seg, nseg, a, swresv) == 0) { 768 crfree(cred); 769 ASSERT(nseg->s_szc == 0 || 770 (a->szc == nseg->s_szc && 771 IS_P2ALIGNED(nseg->s_base, pgsz) && 772 IS_P2ALIGNED(nseg->s_size, pgsz))); 773 return (0); 774 } 775 } 776 } 777 778 if (a->vp != NULL) { 779 VN_HOLD(a->vp); 780 if (a->type == MAP_SHARED) 781 lgrp_shm_policy_init(NULL, a->vp); 782 } 783 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 784 785 seg->s_ops = &segvn_ops; 786 seg->s_data = (void *)svd; 787 seg->s_szc = a->szc; 788 789 svd->seg = seg; 790 svd->vp = a->vp; 791 /* 792 * Anonymous mappings have no backing file so the offset is meaningless. 793 */ 794 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 795 svd->prot = a->prot; 796 svd->maxprot = a->maxprot; 797 svd->pageprot = 0; 798 svd->type = a->type; 799 svd->vpage = NULL; 800 svd->cred = cred; 801 svd->advice = MADV_NORMAL; 802 svd->pageadvice = 0; 803 svd->flags = (ushort_t)a->flags; 804 svd->softlockcnt = 0; 805 svd->softlockcnt_sbase = 0; 806 svd->softlockcnt_send = 0; 807 svd->rcookie = HAT_INVALID_REGION_COOKIE; 808 svd->pageswap = 0; 809 810 if (a->szc != 0 && a->vp != NULL) { 811 segvn_setvnode_mpss(a->vp); 812 } 813 if (svd->type == MAP_SHARED && svd->vp != NULL && 814 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 815 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 816 segvn_inval_trcache(svd->vp); 817 } 818 819 amp = a->amp; 820 if ((svd->amp = amp) == NULL) { 821 svd->anon_index = 0; 822 if (svd->type == MAP_SHARED) { 823 svd->swresv = 0; 824 /* 825 * Shared mappings to a vp need no other setup. 826 * If we have a shared mapping to an anon_map object 827 * which hasn't been allocated yet, allocate the 828 * struct now so that it will be properly shared 829 * by remembering the swap reservation there. 830 */ 831 if (a->vp == NULL) { 832 svd->amp = anonmap_alloc(seg->s_size, swresv, 833 ANON_SLEEP); 834 svd->amp->a_szc = seg->s_szc; 835 } 836 } else { 837 /* 838 * Private mapping (with or without a vp). 839 * Allocate anon_map when needed. 840 */ 841 svd->swresv = swresv; 842 } 843 } else { 844 pgcnt_t anon_num; 845 846 /* 847 * Mapping to an existing anon_map structure without a vp. 848 * For now we will insure that the segment size isn't larger 849 * than the size - offset gives us. Later on we may wish to 850 * have the anon array dynamically allocated itself so that 851 * we don't always have to allocate all the anon pointer slots. 852 * This of course involves adding extra code to check that we 853 * aren't trying to use an anon pointer slot beyond the end 854 * of the currently allocated anon array. 855 */ 856 if ((amp->size - a->offset) < seg->s_size) { 857 panic("segvn_create anon_map size"); 858 /*NOTREACHED*/ 859 } 860 861 anon_num = btopr(a->offset); 862 863 if (a->type == MAP_SHARED) { 864 /* 865 * SHARED mapping to a given anon_map. 866 */ 867 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 868 amp->refcnt++; 869 if (a->szc > amp->a_szc) { 870 amp->a_szc = a->szc; 871 } 872 ANON_LOCK_EXIT(&->a_rwlock); 873 svd->anon_index = anon_num; 874 svd->swresv = 0; 875 } else { 876 /* 877 * PRIVATE mapping to a given anon_map. 878 * Make sure that all the needed anon 879 * structures are created (so that we will 880 * share the underlying pages if nothing 881 * is written by this mapping) and then 882 * duplicate the anon array as is done 883 * when a privately mapped segment is dup'ed. 884 */ 885 struct anon *ap; 886 caddr_t addr; 887 caddr_t eaddr; 888 ulong_t anon_idx; 889 int hat_flag = HAT_LOAD; 890 891 if (svd->flags & MAP_TEXT) { 892 hat_flag |= HAT_LOAD_TEXT; 893 } 894 895 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 896 svd->amp->a_szc = seg->s_szc; 897 svd->anon_index = 0; 898 svd->swresv = swresv; 899 900 /* 901 * Prevent 2 threads from allocating anon 902 * slots simultaneously. 903 */ 904 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 905 eaddr = seg->s_base + seg->s_size; 906 907 for (anon_idx = anon_num, addr = seg->s_base; 908 addr < eaddr; addr += PAGESIZE, anon_idx++) { 909 page_t *pp; 910 911 if ((ap = anon_get_ptr(amp->ahp, 912 anon_idx)) != NULL) 913 continue; 914 915 /* 916 * Allocate the anon struct now. 917 * Might as well load up translation 918 * to the page while we're at it... 919 */ 920 pp = anon_zero(seg, addr, &ap, cred); 921 if (ap == NULL || pp == NULL) { 922 panic("segvn_create anon_zero"); 923 /*NOTREACHED*/ 924 } 925 926 /* 927 * Re-acquire the anon_map lock and 928 * initialize the anon array entry. 929 */ 930 ASSERT(anon_get_ptr(amp->ahp, 931 anon_idx) == NULL); 932 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 933 ANON_SLEEP); 934 935 ASSERT(seg->s_szc == 0); 936 ASSERT(!IS_VMODSORT(pp->p_vnode)); 937 938 ASSERT(use_rgn == 0); 939 hat_memload(seg->s_as->a_hat, addr, pp, 940 svd->prot & ~PROT_WRITE, hat_flag); 941 942 page_unlock(pp); 943 } 944 ASSERT(seg->s_szc == 0); 945 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 946 0, seg->s_size); 947 ANON_LOCK_EXIT(&->a_rwlock); 948 } 949 } 950 951 /* 952 * Set default memory allocation policy for segment 953 * 954 * Always set policy for private memory at least for initialization 955 * even if this is a shared memory segment 956 */ 957 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 958 959 if (svd->type == MAP_SHARED) 960 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 961 svd->vp, svd->offset, seg->s_size); 962 963 if (use_rgn) { 964 ASSERT(!trok); 965 ASSERT(svd->amp == NULL); 966 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 967 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 968 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 969 HAT_REGION_TEXT); 970 } 971 972 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 973 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 974 975 return (0); 976 } 977 978 /* 979 * Concatenate two existing segments, if possible. 980 * Return 0 on success, -1 if two segments are not compatible 981 * or -2 on memory allocation failure. 982 * If amp_cat == 1 then try and concat segments with anon maps 983 */ 984 static int 985 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 986 { 987 struct segvn_data *svd1 = seg1->s_data; 988 struct segvn_data *svd2 = seg2->s_data; 989 struct anon_map *amp1 = svd1->amp; 990 struct anon_map *amp2 = svd2->amp; 991 struct vpage *vpage1 = svd1->vpage; 992 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 993 size_t size, nvpsize; 994 pgcnt_t npages1, npages2; 995 996 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 997 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 998 ASSERT(seg1->s_ops == seg2->s_ops); 999 1000 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 1001 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1002 return (-1); 1003 } 1004 1005 /* both segments exist, try to merge them */ 1006 #define incompat(x) (svd1->x != svd2->x) 1007 if (incompat(vp) || incompat(maxprot) || 1008 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1009 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1010 incompat(type) || incompat(cred) || incompat(flags) || 1011 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1012 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1013 return (-1); 1014 #undef incompat 1015 1016 /* 1017 * vp == NULL implies zfod, offset doesn't matter 1018 */ 1019 if (svd1->vp != NULL && 1020 svd1->offset + seg1->s_size != svd2->offset) { 1021 return (-1); 1022 } 1023 1024 /* 1025 * Don't concatenate if either segment uses text replication. 1026 */ 1027 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1028 return (-1); 1029 } 1030 1031 /* 1032 * Fail early if we're not supposed to concatenate 1033 * segments with non NULL amp. 1034 */ 1035 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1036 return (-1); 1037 } 1038 1039 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1040 if (amp1 != amp2) { 1041 return (-1); 1042 } 1043 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1044 svd2->anon_index) { 1045 return (-1); 1046 } 1047 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1048 } 1049 1050 /* 1051 * If either seg has vpages, create a new merged vpage array. 1052 */ 1053 if (vpage1 != NULL || vpage2 != NULL) { 1054 struct vpage *vp, *evp; 1055 1056 npages1 = seg_pages(seg1); 1057 npages2 = seg_pages(seg2); 1058 nvpsize = vpgtob(npages1 + npages2); 1059 1060 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1061 return (-2); 1062 } 1063 1064 if (vpage1 != NULL) { 1065 bcopy(vpage1, nvpage, vpgtob(npages1)); 1066 } else { 1067 evp = nvpage + npages1; 1068 for (vp = nvpage; vp < evp; vp++) { 1069 VPP_SETPROT(vp, svd1->prot); 1070 VPP_SETADVICE(vp, svd1->advice); 1071 } 1072 } 1073 1074 if (vpage2 != NULL) { 1075 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1076 } else { 1077 evp = nvpage + npages1 + npages2; 1078 for (vp = nvpage + npages1; vp < evp; vp++) { 1079 VPP_SETPROT(vp, svd2->prot); 1080 VPP_SETADVICE(vp, svd2->advice); 1081 } 1082 } 1083 1084 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1085 ASSERT(svd1->swresv == seg1->s_size); 1086 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1087 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1088 evp = nvpage + npages1; 1089 for (vp = nvpage; vp < evp; vp++) { 1090 VPP_SETSWAPRES(vp); 1091 } 1092 } 1093 1094 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1095 ASSERT(svd2->swresv == seg2->s_size); 1096 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1097 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1098 vp = nvpage + npages1; 1099 evp = vp + npages2; 1100 for (; vp < evp; vp++) { 1101 VPP_SETSWAPRES(vp); 1102 } 1103 } 1104 } 1105 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1106 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1107 1108 /* 1109 * If either segment has private pages, create a new merged anon 1110 * array. If mergeing shared anon segments just decrement anon map's 1111 * refcnt. 1112 */ 1113 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1114 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1115 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1116 ASSERT(amp1->refcnt >= 2); 1117 amp1->refcnt--; 1118 ANON_LOCK_EXIT(&1->a_rwlock); 1119 svd2->amp = NULL; 1120 } else if (amp1 != NULL || amp2 != NULL) { 1121 struct anon_hdr *nahp; 1122 struct anon_map *namp = NULL; 1123 size_t asize; 1124 1125 ASSERT(svd1->type == MAP_PRIVATE); 1126 1127 asize = seg1->s_size + seg2->s_size; 1128 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1129 if (nvpage != NULL) { 1130 kmem_free(nvpage, nvpsize); 1131 } 1132 return (-2); 1133 } 1134 if (amp1 != NULL) { 1135 /* 1136 * XXX anon rwlock is not really needed because 1137 * this is a private segment and we are writers. 1138 */ 1139 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1140 ASSERT(amp1->refcnt == 1); 1141 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1142 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1143 anon_release(nahp, btop(asize)); 1144 ANON_LOCK_EXIT(&1->a_rwlock); 1145 if (nvpage != NULL) { 1146 kmem_free(nvpage, nvpsize); 1147 } 1148 return (-2); 1149 } 1150 } 1151 if (amp2 != NULL) { 1152 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1153 ASSERT(amp2->refcnt == 1); 1154 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1155 nahp, btop(seg1->s_size), btop(seg2->s_size), 1156 ANON_NOSLEEP)) { 1157 anon_release(nahp, btop(asize)); 1158 ANON_LOCK_EXIT(&2->a_rwlock); 1159 if (amp1 != NULL) { 1160 ANON_LOCK_EXIT(&1->a_rwlock); 1161 } 1162 if (nvpage != NULL) { 1163 kmem_free(nvpage, nvpsize); 1164 } 1165 return (-2); 1166 } 1167 } 1168 if (amp1 != NULL) { 1169 namp = amp1; 1170 anon_release(amp1->ahp, btop(amp1->size)); 1171 } 1172 if (amp2 != NULL) { 1173 if (namp == NULL) { 1174 ASSERT(amp1 == NULL); 1175 namp = amp2; 1176 anon_release(amp2->ahp, btop(amp2->size)); 1177 } else { 1178 amp2->refcnt--; 1179 ANON_LOCK_EXIT(&2->a_rwlock); 1180 anonmap_free(amp2); 1181 } 1182 svd2->amp = NULL; /* needed for seg_free */ 1183 } 1184 namp->ahp = nahp; 1185 namp->size = asize; 1186 svd1->amp = namp; 1187 svd1->anon_index = 0; 1188 ANON_LOCK_EXIT(&namp->a_rwlock); 1189 } 1190 /* 1191 * Now free the old vpage structures. 1192 */ 1193 if (nvpage != NULL) { 1194 if (vpage1 != NULL) { 1195 kmem_free(vpage1, vpgtob(npages1)); 1196 } 1197 if (vpage2 != NULL) { 1198 svd2->vpage = NULL; 1199 kmem_free(vpage2, vpgtob(npages2)); 1200 } 1201 if (svd2->pageprot) { 1202 svd1->pageprot = 1; 1203 } 1204 if (svd2->pageadvice) { 1205 svd1->pageadvice = 1; 1206 } 1207 if (svd2->pageswap) { 1208 svd1->pageswap = 1; 1209 } 1210 svd1->vpage = nvpage; 1211 } 1212 1213 /* all looks ok, merge segments */ 1214 svd1->swresv += svd2->swresv; 1215 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1216 size = seg2->s_size; 1217 seg_free(seg2); 1218 seg1->s_size += size; 1219 return (0); 1220 } 1221 1222 /* 1223 * Extend the previous segment (seg1) to include the 1224 * new segment (seg2 + a), if possible. 1225 * Return 0 on success. 1226 */ 1227 static int 1228 segvn_extend_prev(seg1, seg2, a, swresv) 1229 struct seg *seg1, *seg2; 1230 struct segvn_crargs *a; 1231 size_t swresv; 1232 { 1233 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1234 size_t size; 1235 struct anon_map *amp1; 1236 struct vpage *new_vpage; 1237 1238 /* 1239 * We don't need any segment level locks for "segvn" data 1240 * since the address space is "write" locked. 1241 */ 1242 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1243 1244 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1245 return (-1); 1246 } 1247 1248 /* second segment is new, try to extend first */ 1249 /* XXX - should also check cred */ 1250 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1251 (!svd1->pageprot && (svd1->prot != a->prot)) || 1252 svd1->type != a->type || svd1->flags != a->flags || 1253 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1254 return (-1); 1255 1256 /* vp == NULL implies zfod, offset doesn't matter */ 1257 if (svd1->vp != NULL && 1258 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1259 return (-1); 1260 1261 if (svd1->tr_state != SEGVN_TR_OFF) { 1262 return (-1); 1263 } 1264 1265 amp1 = svd1->amp; 1266 if (amp1) { 1267 pgcnt_t newpgs; 1268 1269 /* 1270 * Segment has private pages, can data structures 1271 * be expanded? 1272 * 1273 * Acquire the anon_map lock to prevent it from changing, 1274 * if it is shared. This ensures that the anon_map 1275 * will not change while a thread which has a read/write 1276 * lock on an address space references it. 1277 * XXX - Don't need the anon_map lock at all if "refcnt" 1278 * is 1. 1279 * 1280 * Can't grow a MAP_SHARED segment with an anonmap because 1281 * there may be existing anon slots where we want to extend 1282 * the segment and we wouldn't know what to do with them 1283 * (e.g., for tmpfs right thing is to just leave them there, 1284 * for /dev/zero they should be cleared out). 1285 */ 1286 if (svd1->type == MAP_SHARED) 1287 return (-1); 1288 1289 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1290 if (amp1->refcnt > 1) { 1291 ANON_LOCK_EXIT(&1->a_rwlock); 1292 return (-1); 1293 } 1294 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1295 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1296 1297 if (newpgs == 0) { 1298 ANON_LOCK_EXIT(&1->a_rwlock); 1299 return (-1); 1300 } 1301 amp1->size = ptob(newpgs); 1302 ANON_LOCK_EXIT(&1->a_rwlock); 1303 } 1304 if (svd1->vpage != NULL) { 1305 struct vpage *vp, *evp; 1306 new_vpage = 1307 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1308 KM_NOSLEEP); 1309 if (new_vpage == NULL) 1310 return (-1); 1311 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1312 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1313 svd1->vpage = new_vpage; 1314 1315 vp = new_vpage + seg_pages(seg1); 1316 evp = vp + seg_pages(seg2); 1317 for (; vp < evp; vp++) 1318 VPP_SETPROT(vp, a->prot); 1319 if (svd1->pageswap && swresv) { 1320 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1321 ASSERT(swresv == seg2->s_size); 1322 vp = new_vpage + seg_pages(seg1); 1323 for (; vp < evp; vp++) { 1324 VPP_SETSWAPRES(vp); 1325 } 1326 } 1327 } 1328 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1329 size = seg2->s_size; 1330 seg_free(seg2); 1331 seg1->s_size += size; 1332 svd1->swresv += swresv; 1333 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1334 svd1->type == MAP_SHARED && svd1->vp != NULL && 1335 (svd1->vp->v_flag & VVMEXEC)) { 1336 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1337 segvn_inval_trcache(svd1->vp); 1338 } 1339 return (0); 1340 } 1341 1342 /* 1343 * Extend the next segment (seg2) to include the 1344 * new segment (seg1 + a), if possible. 1345 * Return 0 on success. 1346 */ 1347 static int 1348 segvn_extend_next( 1349 struct seg *seg1, 1350 struct seg *seg2, 1351 struct segvn_crargs *a, 1352 size_t swresv) 1353 { 1354 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1355 size_t size; 1356 struct anon_map *amp2; 1357 struct vpage *new_vpage; 1358 1359 /* 1360 * We don't need any segment level locks for "segvn" data 1361 * since the address space is "write" locked. 1362 */ 1363 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1364 1365 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1366 return (-1); 1367 } 1368 1369 /* first segment is new, try to extend second */ 1370 /* XXX - should also check cred */ 1371 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1372 (!svd2->pageprot && (svd2->prot != a->prot)) || 1373 svd2->type != a->type || svd2->flags != a->flags || 1374 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1375 return (-1); 1376 /* vp == NULL implies zfod, offset doesn't matter */ 1377 if (svd2->vp != NULL && 1378 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1379 return (-1); 1380 1381 if (svd2->tr_state != SEGVN_TR_OFF) { 1382 return (-1); 1383 } 1384 1385 amp2 = svd2->amp; 1386 if (amp2) { 1387 pgcnt_t newpgs; 1388 1389 /* 1390 * Segment has private pages, can data structures 1391 * be expanded? 1392 * 1393 * Acquire the anon_map lock to prevent it from changing, 1394 * if it is shared. This ensures that the anon_map 1395 * will not change while a thread which has a read/write 1396 * lock on an address space references it. 1397 * 1398 * XXX - Don't need the anon_map lock at all if "refcnt" 1399 * is 1. 1400 */ 1401 if (svd2->type == MAP_SHARED) 1402 return (-1); 1403 1404 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1405 if (amp2->refcnt > 1) { 1406 ANON_LOCK_EXIT(&2->a_rwlock); 1407 return (-1); 1408 } 1409 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1410 btop(seg2->s_size), btop(seg1->s_size), 1411 ANON_NOSLEEP | ANON_GROWDOWN); 1412 1413 if (newpgs == 0) { 1414 ANON_LOCK_EXIT(&2->a_rwlock); 1415 return (-1); 1416 } 1417 amp2->size = ptob(newpgs); 1418 ANON_LOCK_EXIT(&2->a_rwlock); 1419 } 1420 if (svd2->vpage != NULL) { 1421 struct vpage *vp, *evp; 1422 new_vpage = 1423 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1424 KM_NOSLEEP); 1425 if (new_vpage == NULL) { 1426 /* Not merging segments so adjust anon_index back */ 1427 if (amp2) 1428 svd2->anon_index += seg_pages(seg1); 1429 return (-1); 1430 } 1431 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1432 vpgtob(seg_pages(seg2))); 1433 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1434 svd2->vpage = new_vpage; 1435 1436 vp = new_vpage; 1437 evp = vp + seg_pages(seg1); 1438 for (; vp < evp; vp++) 1439 VPP_SETPROT(vp, a->prot); 1440 if (svd2->pageswap && swresv) { 1441 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1442 ASSERT(swresv == seg1->s_size); 1443 vp = new_vpage; 1444 for (; vp < evp; vp++) { 1445 VPP_SETSWAPRES(vp); 1446 } 1447 } 1448 } 1449 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1450 size = seg1->s_size; 1451 seg_free(seg1); 1452 seg2->s_size += size; 1453 seg2->s_base -= size; 1454 svd2->offset -= size; 1455 svd2->swresv += swresv; 1456 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1457 svd2->type == MAP_SHARED && svd2->vp != NULL && 1458 (svd2->vp->v_flag & VVMEXEC)) { 1459 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1460 segvn_inval_trcache(svd2->vp); 1461 } 1462 return (0); 1463 } 1464 1465 static int 1466 segvn_dup(struct seg *seg, struct seg *newseg) 1467 { 1468 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1469 struct segvn_data *newsvd; 1470 pgcnt_t npages = seg_pages(seg); 1471 int error = 0; 1472 uint_t prot; 1473 size_t len; 1474 struct anon_map *amp; 1475 1476 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1477 ASSERT(newseg->s_as->a_proc->p_parent == curproc); 1478 1479 /* 1480 * If segment has anon reserved, reserve more for the new seg. 1481 * For a MAP_NORESERVE segment swresv will be a count of all the 1482 * allocated anon slots; thus we reserve for the child as many slots 1483 * as the parent has allocated. This semantic prevents the child or 1484 * parent from dieing during a copy-on-write fault caused by trying 1485 * to write a shared pre-existing anon page. 1486 */ 1487 if ((len = svd->swresv) != 0) { 1488 if (anon_resv(svd->swresv) == 0) 1489 return (ENOMEM); 1490 1491 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1492 seg, len, 0); 1493 } 1494 1495 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1496 1497 newseg->s_ops = &segvn_ops; 1498 newseg->s_data = (void *)newsvd; 1499 newseg->s_szc = seg->s_szc; 1500 1501 newsvd->seg = newseg; 1502 if ((newsvd->vp = svd->vp) != NULL) { 1503 VN_HOLD(svd->vp); 1504 if (svd->type == MAP_SHARED) 1505 lgrp_shm_policy_init(NULL, svd->vp); 1506 } 1507 newsvd->offset = svd->offset; 1508 newsvd->prot = svd->prot; 1509 newsvd->maxprot = svd->maxprot; 1510 newsvd->pageprot = svd->pageprot; 1511 newsvd->type = svd->type; 1512 newsvd->cred = svd->cred; 1513 crhold(newsvd->cred); 1514 newsvd->advice = svd->advice; 1515 newsvd->pageadvice = svd->pageadvice; 1516 newsvd->swresv = svd->swresv; 1517 newsvd->pageswap = svd->pageswap; 1518 newsvd->flags = svd->flags; 1519 newsvd->softlockcnt = 0; 1520 newsvd->softlockcnt_sbase = 0; 1521 newsvd->softlockcnt_send = 0; 1522 newsvd->policy_info = svd->policy_info; 1523 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1524 1525 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1526 /* 1527 * Not attaching to a shared anon object. 1528 */ 1529 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1530 svd->tr_state == SEGVN_TR_OFF); 1531 if (svd->tr_state == SEGVN_TR_ON) { 1532 ASSERT(newsvd->vp != NULL && amp != NULL); 1533 newsvd->tr_state = SEGVN_TR_INIT; 1534 } else { 1535 newsvd->tr_state = svd->tr_state; 1536 } 1537 newsvd->amp = NULL; 1538 newsvd->anon_index = 0; 1539 } else { 1540 /* regions for now are only used on pure vnode segments */ 1541 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1542 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1543 newsvd->tr_state = SEGVN_TR_OFF; 1544 if (svd->type == MAP_SHARED) { 1545 newsvd->amp = amp; 1546 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1547 amp->refcnt++; 1548 ANON_LOCK_EXIT(&->a_rwlock); 1549 newsvd->anon_index = svd->anon_index; 1550 } else { 1551 int reclaim = 1; 1552 1553 /* 1554 * Allocate and initialize new anon_map structure. 1555 */ 1556 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1557 ANON_SLEEP); 1558 newsvd->amp->a_szc = newseg->s_szc; 1559 newsvd->anon_index = 0; 1560 1561 /* 1562 * We don't have to acquire the anon_map lock 1563 * for the new segment (since it belongs to an 1564 * address space that is still not associated 1565 * with any process), or the segment in the old 1566 * address space (since all threads in it 1567 * are stopped while duplicating the address space). 1568 */ 1569 1570 /* 1571 * The goal of the following code is to make sure that 1572 * softlocked pages do not end up as copy on write 1573 * pages. This would cause problems where one 1574 * thread writes to a page that is COW and a different 1575 * thread in the same process has softlocked it. The 1576 * softlock lock would move away from this process 1577 * because the write would cause this process to get 1578 * a copy (without the softlock). 1579 * 1580 * The strategy here is to just break the 1581 * sharing on pages that could possibly be 1582 * softlocked. 1583 */ 1584 retry: 1585 if (svd->softlockcnt) { 1586 struct anon *ap, *newap; 1587 size_t i; 1588 uint_t vpprot; 1589 page_t *anon_pl[1+1], *pp; 1590 caddr_t addr; 1591 ulong_t old_idx = svd->anon_index; 1592 ulong_t new_idx = 0; 1593 1594 /* 1595 * The softlock count might be non zero 1596 * because some pages are still stuck in the 1597 * cache for lazy reclaim. Flush the cache 1598 * now. This should drop the count to zero. 1599 * [or there is really I/O going on to these 1600 * pages]. Note, we have the writers lock so 1601 * nothing gets inserted during the flush. 1602 */ 1603 if (reclaim == 1) { 1604 segvn_purge(seg); 1605 reclaim = 0; 1606 goto retry; 1607 } 1608 i = btopr(seg->s_size); 1609 addr = seg->s_base; 1610 /* 1611 * XXX break cow sharing using PAGESIZE 1612 * pages. They will be relocated into larger 1613 * pages at fault time. 1614 */ 1615 while (i-- > 0) { 1616 if (ap = anon_get_ptr(amp->ahp, 1617 old_idx)) { 1618 error = anon_getpage(&ap, 1619 &vpprot, anon_pl, PAGESIZE, 1620 seg, addr, S_READ, 1621 svd->cred); 1622 if (error) { 1623 newsvd->vpage = NULL; 1624 goto out; 1625 } 1626 /* 1627 * prot need not be computed 1628 * below 'cause anon_private is 1629 * going to ignore it anyway 1630 * as child doesn't inherit 1631 * pagelock from parent. 1632 */ 1633 prot = svd->pageprot ? 1634 VPP_PROT( 1635 &svd->vpage[ 1636 seg_page(seg, addr)]) 1637 : svd->prot; 1638 pp = anon_private(&newap, 1639 newseg, addr, prot, 1640 anon_pl[0], 0, 1641 newsvd->cred); 1642 if (pp == NULL) { 1643 /* no mem abort */ 1644 newsvd->vpage = NULL; 1645 error = ENOMEM; 1646 goto out; 1647 } 1648 (void) anon_set_ptr( 1649 newsvd->amp->ahp, new_idx, 1650 newap, ANON_SLEEP); 1651 page_unlock(pp); 1652 } 1653 addr += PAGESIZE; 1654 old_idx++; 1655 new_idx++; 1656 } 1657 } else { /* common case */ 1658 if (seg->s_szc != 0) { 1659 /* 1660 * If at least one of anon slots of a 1661 * large page exists then make sure 1662 * all anon slots of a large page 1663 * exist to avoid partial cow sharing 1664 * of a large page in the future. 1665 */ 1666 anon_dup_fill_holes(amp->ahp, 1667 svd->anon_index, newsvd->amp->ahp, 1668 0, seg->s_size, seg->s_szc, 1669 svd->vp != NULL); 1670 } else { 1671 anon_dup(amp->ahp, svd->anon_index, 1672 newsvd->amp->ahp, 0, seg->s_size); 1673 } 1674 1675 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1676 seg->s_size, PROT_WRITE); 1677 } 1678 } 1679 } 1680 /* 1681 * If necessary, create a vpage structure for the new segment. 1682 * Do not copy any page lock indications. 1683 */ 1684 if (svd->vpage != NULL) { 1685 uint_t i; 1686 struct vpage *ovp = svd->vpage; 1687 struct vpage *nvp; 1688 1689 nvp = newsvd->vpage = 1690 kmem_alloc(vpgtob(npages), KM_SLEEP); 1691 for (i = 0; i < npages; i++) { 1692 *nvp = *ovp++; 1693 VPP_CLRPPLOCK(nvp++); 1694 } 1695 } else 1696 newsvd->vpage = NULL; 1697 1698 /* Inform the vnode of the new mapping */ 1699 if (newsvd->vp != NULL) { 1700 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1701 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1702 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1703 } 1704 out: 1705 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1706 ASSERT(newsvd->amp == NULL); 1707 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1708 newsvd->rcookie = svd->rcookie; 1709 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1710 } 1711 return (error); 1712 } 1713 1714 1715 /* 1716 * callback function to invoke free_vp_pages() for only those pages actually 1717 * processed by the HAT when a shared region is destroyed. 1718 */ 1719 extern int free_pages; 1720 1721 static void 1722 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1723 size_t r_size, void *r_obj, u_offset_t r_objoff) 1724 { 1725 u_offset_t off; 1726 size_t len; 1727 vnode_t *vp = (vnode_t *)r_obj; 1728 1729 ASSERT(eaddr > saddr); 1730 ASSERT(saddr >= r_saddr); 1731 ASSERT(saddr < r_saddr + r_size); 1732 ASSERT(eaddr > r_saddr); 1733 ASSERT(eaddr <= r_saddr + r_size); 1734 ASSERT(vp != NULL); 1735 1736 if (!free_pages) { 1737 return; 1738 } 1739 1740 len = eaddr - saddr; 1741 off = (saddr - r_saddr) + r_objoff; 1742 free_vp_pages(vp, off, len); 1743 } 1744 1745 /* 1746 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1747 * those pages actually processed by the HAT 1748 */ 1749 static void 1750 segvn_hat_unload_callback(hat_callback_t *cb) 1751 { 1752 struct seg *seg = cb->hcb_data; 1753 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1754 size_t len; 1755 u_offset_t off; 1756 1757 ASSERT(svd->vp != NULL); 1758 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1759 ASSERT(cb->hcb_start_addr >= seg->s_base); 1760 1761 len = cb->hcb_end_addr - cb->hcb_start_addr; 1762 off = cb->hcb_start_addr - seg->s_base; 1763 free_vp_pages(svd->vp, svd->offset + off, len); 1764 } 1765 1766 /* 1767 * This function determines the number of bytes of swap reserved by 1768 * a segment for which per-page accounting is present. It is used to 1769 * calculate the correct value of a segvn_data's swresv. 1770 */ 1771 static size_t 1772 segvn_count_swap_by_vpages(struct seg *seg) 1773 { 1774 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1775 struct vpage *vp, *evp; 1776 size_t nswappages = 0; 1777 1778 ASSERT(svd->pageswap); 1779 ASSERT(svd->vpage != NULL); 1780 1781 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1782 1783 for (vp = svd->vpage; vp < evp; vp++) { 1784 if (VPP_ISSWAPRES(vp)) 1785 nswappages++; 1786 } 1787 1788 return (nswappages << PAGESHIFT); 1789 } 1790 1791 static int 1792 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1793 { 1794 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1795 struct segvn_data *nsvd; 1796 struct seg *nseg; 1797 struct anon_map *amp; 1798 pgcnt_t opages; /* old segment size in pages */ 1799 pgcnt_t npages; /* new segment size in pages */ 1800 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1801 hat_callback_t callback; /* used for free_vp_pages() */ 1802 hat_callback_t *cbp = NULL; 1803 caddr_t nbase; 1804 size_t nsize; 1805 size_t oswresv; 1806 int reclaim = 1; 1807 1808 /* 1809 * We don't need any segment level locks for "segvn" data 1810 * since the address space is "write" locked. 1811 */ 1812 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1813 1814 /* 1815 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1816 * softlockcnt is protected from change by the as write lock. 1817 */ 1818 retry: 1819 if (svd->softlockcnt > 0) { 1820 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1821 1822 /* 1823 * If this is shared segment non 0 softlockcnt 1824 * means locked pages are still in use. 1825 */ 1826 if (svd->type == MAP_SHARED) { 1827 return (EAGAIN); 1828 } 1829 1830 /* 1831 * since we do have the writers lock nobody can fill 1832 * the cache during the purge. The flush either succeeds 1833 * or we still have pending I/Os. 1834 */ 1835 if (reclaim == 1) { 1836 segvn_purge(seg); 1837 reclaim = 0; 1838 goto retry; 1839 } 1840 return (EAGAIN); 1841 } 1842 1843 /* 1844 * Check for bad sizes 1845 */ 1846 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1847 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1848 panic("segvn_unmap"); 1849 /*NOTREACHED*/ 1850 } 1851 1852 if (seg->s_szc != 0) { 1853 size_t pgsz = page_get_pagesize(seg->s_szc); 1854 int err; 1855 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1856 ASSERT(seg->s_base != addr || seg->s_size != len); 1857 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1858 ASSERT(svd->amp == NULL); 1859 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1860 hat_leave_region(seg->s_as->a_hat, 1861 svd->rcookie, HAT_REGION_TEXT); 1862 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1863 /* 1864 * could pass a flag to segvn_demote_range() 1865 * below to tell it not to do any unloads but 1866 * this case is rare enough to not bother for 1867 * now. 1868 */ 1869 } else if (svd->tr_state == SEGVN_TR_INIT) { 1870 svd->tr_state = SEGVN_TR_OFF; 1871 } else if (svd->tr_state == SEGVN_TR_ON) { 1872 ASSERT(svd->amp != NULL); 1873 segvn_textunrepl(seg, 1); 1874 ASSERT(svd->amp == NULL); 1875 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1876 } 1877 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1878 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1879 if (err == 0) { 1880 return (IE_RETRY); 1881 } 1882 return (err); 1883 } 1884 } 1885 1886 /* Inform the vnode of the unmapping. */ 1887 if (svd->vp) { 1888 int error; 1889 1890 error = VOP_DELMAP(svd->vp, 1891 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1892 seg->s_as, addr, len, svd->prot, svd->maxprot, 1893 svd->type, svd->cred, NULL); 1894 1895 if (error == EAGAIN) 1896 return (error); 1897 } 1898 1899 /* 1900 * Remove any page locks set through this mapping. 1901 * If text replication is not off no page locks could have been 1902 * established via this mapping. 1903 */ 1904 if (svd->tr_state == SEGVN_TR_OFF) { 1905 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1906 } 1907 1908 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1909 ASSERT(svd->amp == NULL); 1910 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1911 ASSERT(svd->type == MAP_PRIVATE); 1912 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1913 HAT_REGION_TEXT); 1914 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1915 } else if (svd->tr_state == SEGVN_TR_ON) { 1916 ASSERT(svd->amp != NULL); 1917 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1918 segvn_textunrepl(seg, 1); 1919 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1920 } else { 1921 if (svd->tr_state != SEGVN_TR_OFF) { 1922 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1923 svd->tr_state = SEGVN_TR_OFF; 1924 } 1925 /* 1926 * Unload any hardware translations in the range to be taken 1927 * out. Use a callback to invoke free_vp_pages() effectively. 1928 */ 1929 if (svd->vp != NULL && free_pages != 0) { 1930 callback.hcb_data = seg; 1931 callback.hcb_function = segvn_hat_unload_callback; 1932 cbp = &callback; 1933 } 1934 hat_unload_callback(seg->s_as->a_hat, addr, len, 1935 HAT_UNLOAD_UNMAP, cbp); 1936 1937 if (svd->type == MAP_SHARED && svd->vp != NULL && 1938 (svd->vp->v_flag & VVMEXEC) && 1939 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1940 segvn_inval_trcache(svd->vp); 1941 } 1942 } 1943 1944 /* 1945 * Check for entire segment 1946 */ 1947 if (addr == seg->s_base && len == seg->s_size) { 1948 seg_free(seg); 1949 return (0); 1950 } 1951 1952 opages = seg_pages(seg); 1953 dpages = btop(len); 1954 npages = opages - dpages; 1955 amp = svd->amp; 1956 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 1957 1958 /* 1959 * Check for beginning of segment 1960 */ 1961 if (addr == seg->s_base) { 1962 if (svd->vpage != NULL) { 1963 size_t nbytes; 1964 struct vpage *ovpage; 1965 1966 ovpage = svd->vpage; /* keep pointer to vpage */ 1967 1968 nbytes = vpgtob(npages); 1969 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 1970 bcopy(&ovpage[dpages], svd->vpage, nbytes); 1971 1972 /* free up old vpage */ 1973 kmem_free(ovpage, vpgtob(opages)); 1974 } 1975 if (amp != NULL) { 1976 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1977 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 1978 /* 1979 * Shared anon map is no longer in use. Before 1980 * freeing its pages purge all entries from 1981 * pcache that belong to this amp. 1982 */ 1983 if (svd->type == MAP_SHARED) { 1984 ASSERT(amp->refcnt == 1); 1985 ASSERT(svd->softlockcnt == 0); 1986 anonmap_purge(amp); 1987 } 1988 /* 1989 * Free up now unused parts of anon_map array. 1990 */ 1991 if (amp->a_szc == seg->s_szc) { 1992 if (seg->s_szc != 0) { 1993 anon_free_pages(amp->ahp, 1994 svd->anon_index, len, 1995 seg->s_szc); 1996 } else { 1997 anon_free(amp->ahp, 1998 svd->anon_index, 1999 len); 2000 } 2001 } else { 2002 ASSERT(svd->type == MAP_SHARED); 2003 ASSERT(amp->a_szc > seg->s_szc); 2004 anon_shmap_free_pages(amp, 2005 svd->anon_index, len); 2006 } 2007 2008 /* 2009 * Unreserve swap space for the 2010 * unmapped chunk of this segment in 2011 * case it's MAP_SHARED 2012 */ 2013 if (svd->type == MAP_SHARED) { 2014 anon_unresv_zone(len, 2015 seg->s_as->a_proc->p_zone); 2016 amp->swresv -= len; 2017 } 2018 } 2019 ANON_LOCK_EXIT(&->a_rwlock); 2020 svd->anon_index += dpages; 2021 } 2022 if (svd->vp != NULL) 2023 svd->offset += len; 2024 2025 seg->s_base += len; 2026 seg->s_size -= len; 2027 2028 if (svd->swresv) { 2029 if (svd->flags & MAP_NORESERVE) { 2030 ASSERT(amp); 2031 oswresv = svd->swresv; 2032 2033 svd->swresv = ptob(anon_pages(amp->ahp, 2034 svd->anon_index, npages)); 2035 anon_unresv_zone(oswresv - svd->swresv, 2036 seg->s_as->a_proc->p_zone); 2037 if (SEG_IS_PARTIAL_RESV(seg)) 2038 seg->s_as->a_resvsize -= oswresv - 2039 svd->swresv; 2040 } else { 2041 size_t unlen; 2042 2043 if (svd->pageswap) { 2044 oswresv = svd->swresv; 2045 svd->swresv = 2046 segvn_count_swap_by_vpages(seg); 2047 ASSERT(oswresv >= svd->swresv); 2048 unlen = oswresv - svd->swresv; 2049 } else { 2050 svd->swresv -= len; 2051 ASSERT(svd->swresv == seg->s_size); 2052 unlen = len; 2053 } 2054 anon_unresv_zone(unlen, 2055 seg->s_as->a_proc->p_zone); 2056 } 2057 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2058 seg, len, 0); 2059 } 2060 2061 return (0); 2062 } 2063 2064 /* 2065 * Check for end of segment 2066 */ 2067 if (addr + len == seg->s_base + seg->s_size) { 2068 if (svd->vpage != NULL) { 2069 size_t nbytes; 2070 struct vpage *ovpage; 2071 2072 ovpage = svd->vpage; /* keep pointer to vpage */ 2073 2074 nbytes = vpgtob(npages); 2075 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2076 bcopy(ovpage, svd->vpage, nbytes); 2077 2078 /* free up old vpage */ 2079 kmem_free(ovpage, vpgtob(opages)); 2080 2081 } 2082 if (amp != NULL) { 2083 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2084 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2085 /* 2086 * Free up now unused parts of anon_map array. 2087 */ 2088 ulong_t an_idx = svd->anon_index + npages; 2089 2090 /* 2091 * Shared anon map is no longer in use. Before 2092 * freeing its pages purge all entries from 2093 * pcache that belong to this amp. 2094 */ 2095 if (svd->type == MAP_SHARED) { 2096 ASSERT(amp->refcnt == 1); 2097 ASSERT(svd->softlockcnt == 0); 2098 anonmap_purge(amp); 2099 } 2100 2101 if (amp->a_szc == seg->s_szc) { 2102 if (seg->s_szc != 0) { 2103 anon_free_pages(amp->ahp, 2104 an_idx, len, 2105 seg->s_szc); 2106 } else { 2107 anon_free(amp->ahp, an_idx, 2108 len); 2109 } 2110 } else { 2111 ASSERT(svd->type == MAP_SHARED); 2112 ASSERT(amp->a_szc > seg->s_szc); 2113 anon_shmap_free_pages(amp, 2114 an_idx, len); 2115 } 2116 2117 /* 2118 * Unreserve swap space for the 2119 * unmapped chunk of this segment in 2120 * case it's MAP_SHARED 2121 */ 2122 if (svd->type == MAP_SHARED) { 2123 anon_unresv_zone(len, 2124 seg->s_as->a_proc->p_zone); 2125 amp->swresv -= len; 2126 } 2127 } 2128 ANON_LOCK_EXIT(&->a_rwlock); 2129 } 2130 2131 seg->s_size -= len; 2132 2133 if (svd->swresv) { 2134 if (svd->flags & MAP_NORESERVE) { 2135 ASSERT(amp); 2136 oswresv = svd->swresv; 2137 svd->swresv = ptob(anon_pages(amp->ahp, 2138 svd->anon_index, npages)); 2139 anon_unresv_zone(oswresv - svd->swresv, 2140 seg->s_as->a_proc->p_zone); 2141 if (SEG_IS_PARTIAL_RESV(seg)) 2142 seg->s_as->a_resvsize -= oswresv - 2143 svd->swresv; 2144 } else { 2145 size_t unlen; 2146 2147 if (svd->pageswap) { 2148 oswresv = svd->swresv; 2149 svd->swresv = 2150 segvn_count_swap_by_vpages(seg); 2151 ASSERT(oswresv >= svd->swresv); 2152 unlen = oswresv - svd->swresv; 2153 } else { 2154 svd->swresv -= len; 2155 ASSERT(svd->swresv == seg->s_size); 2156 unlen = len; 2157 } 2158 anon_unresv_zone(unlen, 2159 seg->s_as->a_proc->p_zone); 2160 } 2161 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2162 "anon proc:%p %lu %u", seg, len, 0); 2163 } 2164 2165 return (0); 2166 } 2167 2168 /* 2169 * The section to go is in the middle of the segment, 2170 * have to make it into two segments. nseg is made for 2171 * the high end while seg is cut down at the low end. 2172 */ 2173 nbase = addr + len; /* new seg base */ 2174 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2175 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2176 nseg = seg_alloc(seg->s_as, nbase, nsize); 2177 if (nseg == NULL) { 2178 panic("segvn_unmap seg_alloc"); 2179 /*NOTREACHED*/ 2180 } 2181 nseg->s_ops = seg->s_ops; 2182 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2183 nseg->s_data = (void *)nsvd; 2184 nseg->s_szc = seg->s_szc; 2185 *nsvd = *svd; 2186 nsvd->seg = nseg; 2187 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2188 nsvd->swresv = 0; 2189 nsvd->softlockcnt = 0; 2190 nsvd->softlockcnt_sbase = 0; 2191 nsvd->softlockcnt_send = 0; 2192 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2193 2194 if (svd->vp != NULL) { 2195 VN_HOLD(nsvd->vp); 2196 if (nsvd->type == MAP_SHARED) 2197 lgrp_shm_policy_init(NULL, nsvd->vp); 2198 } 2199 crhold(svd->cred); 2200 2201 if (svd->vpage == NULL) { 2202 nsvd->vpage = NULL; 2203 } else { 2204 /* need to split vpage into two arrays */ 2205 size_t nbytes; 2206 struct vpage *ovpage; 2207 2208 ovpage = svd->vpage; /* keep pointer to vpage */ 2209 2210 npages = seg_pages(seg); /* seg has shrunk */ 2211 nbytes = vpgtob(npages); 2212 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2213 2214 bcopy(ovpage, svd->vpage, nbytes); 2215 2216 npages = seg_pages(nseg); 2217 nbytes = vpgtob(npages); 2218 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2219 2220 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2221 2222 /* free up old vpage */ 2223 kmem_free(ovpage, vpgtob(opages)); 2224 } 2225 2226 if (amp == NULL) { 2227 nsvd->amp = NULL; 2228 nsvd->anon_index = 0; 2229 } else { 2230 /* 2231 * Need to create a new anon map for the new segment. 2232 * We'll also allocate a new smaller array for the old 2233 * smaller segment to save space. 2234 */ 2235 opages = btop((uintptr_t)(addr - seg->s_base)); 2236 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2237 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2238 /* 2239 * Free up now unused parts of anon_map array. 2240 */ 2241 ulong_t an_idx = svd->anon_index + opages; 2242 2243 /* 2244 * Shared anon map is no longer in use. Before 2245 * freeing its pages purge all entries from 2246 * pcache that belong to this amp. 2247 */ 2248 if (svd->type == MAP_SHARED) { 2249 ASSERT(amp->refcnt == 1); 2250 ASSERT(svd->softlockcnt == 0); 2251 anonmap_purge(amp); 2252 } 2253 2254 if (amp->a_szc == seg->s_szc) { 2255 if (seg->s_szc != 0) { 2256 anon_free_pages(amp->ahp, an_idx, len, 2257 seg->s_szc); 2258 } else { 2259 anon_free(amp->ahp, an_idx, 2260 len); 2261 } 2262 } else { 2263 ASSERT(svd->type == MAP_SHARED); 2264 ASSERT(amp->a_szc > seg->s_szc); 2265 anon_shmap_free_pages(amp, an_idx, len); 2266 } 2267 2268 /* 2269 * Unreserve swap space for the 2270 * unmapped chunk of this segment in 2271 * case it's MAP_SHARED 2272 */ 2273 if (svd->type == MAP_SHARED) { 2274 anon_unresv_zone(len, 2275 seg->s_as->a_proc->p_zone); 2276 amp->swresv -= len; 2277 } 2278 } 2279 nsvd->anon_index = svd->anon_index + 2280 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2281 if (svd->type == MAP_SHARED) { 2282 amp->refcnt++; 2283 nsvd->amp = amp; 2284 } else { 2285 struct anon_map *namp; 2286 struct anon_hdr *nahp; 2287 2288 ASSERT(svd->type == MAP_PRIVATE); 2289 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2290 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2291 namp->a_szc = seg->s_szc; 2292 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2293 0, btop(seg->s_size), ANON_SLEEP); 2294 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2295 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2296 anon_release(amp->ahp, btop(amp->size)); 2297 svd->anon_index = 0; 2298 nsvd->anon_index = 0; 2299 amp->ahp = nahp; 2300 amp->size = seg->s_size; 2301 nsvd->amp = namp; 2302 } 2303 ANON_LOCK_EXIT(&->a_rwlock); 2304 } 2305 if (svd->swresv) { 2306 if (svd->flags & MAP_NORESERVE) { 2307 ASSERT(amp); 2308 oswresv = svd->swresv; 2309 svd->swresv = ptob(anon_pages(amp->ahp, 2310 svd->anon_index, btop(seg->s_size))); 2311 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2312 nsvd->anon_index, btop(nseg->s_size))); 2313 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2314 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv), 2315 seg->s_as->a_proc->p_zone); 2316 if (SEG_IS_PARTIAL_RESV(seg)) 2317 seg->s_as->a_resvsize -= oswresv - 2318 (svd->swresv + nsvd->swresv); 2319 } else { 2320 size_t unlen; 2321 2322 if (svd->pageswap) { 2323 oswresv = svd->swresv; 2324 svd->swresv = segvn_count_swap_by_vpages(seg); 2325 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2326 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2327 unlen = oswresv - (svd->swresv + nsvd->swresv); 2328 } else { 2329 if (seg->s_size + nseg->s_size + len != 2330 svd->swresv) { 2331 panic("segvn_unmap: cannot split " 2332 "swap reservation"); 2333 /*NOTREACHED*/ 2334 } 2335 svd->swresv = seg->s_size; 2336 nsvd->swresv = nseg->s_size; 2337 unlen = len; 2338 } 2339 anon_unresv_zone(unlen, 2340 seg->s_as->a_proc->p_zone); 2341 } 2342 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2343 seg, len, 0); 2344 } 2345 2346 return (0); /* I'm glad that's all over with! */ 2347 } 2348 2349 static void 2350 segvn_free(struct seg *seg) 2351 { 2352 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2353 pgcnt_t npages = seg_pages(seg); 2354 struct anon_map *amp; 2355 size_t len; 2356 2357 /* 2358 * We don't need any segment level locks for "segvn" data 2359 * since the address space is "write" locked. 2360 */ 2361 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2362 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2363 2364 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2365 2366 /* 2367 * Be sure to unlock pages. XXX Why do things get free'ed instead 2368 * of unmapped? XXX 2369 */ 2370 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2371 0, MC_UNLOCK, NULL, 0); 2372 2373 /* 2374 * Deallocate the vpage and anon pointers if necessary and possible. 2375 */ 2376 if (svd->vpage != NULL) { 2377 kmem_free(svd->vpage, vpgtob(npages)); 2378 svd->vpage = NULL; 2379 } 2380 if ((amp = svd->amp) != NULL) { 2381 /* 2382 * If there are no more references to this anon_map 2383 * structure, then deallocate the structure after freeing 2384 * up all the anon slot pointers that we can. 2385 */ 2386 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2387 ASSERT(amp->a_szc >= seg->s_szc); 2388 if (--amp->refcnt == 0) { 2389 if (svd->type == MAP_PRIVATE) { 2390 /* 2391 * Private - we only need to anon_free 2392 * the part that this segment refers to. 2393 */ 2394 if (seg->s_szc != 0) { 2395 anon_free_pages(amp->ahp, 2396 svd->anon_index, seg->s_size, 2397 seg->s_szc); 2398 } else { 2399 anon_free(amp->ahp, svd->anon_index, 2400 seg->s_size); 2401 } 2402 } else { 2403 2404 /* 2405 * Shared anon map is no longer in use. Before 2406 * freeing its pages purge all entries from 2407 * pcache that belong to this amp. 2408 */ 2409 ASSERT(svd->softlockcnt == 0); 2410 anonmap_purge(amp); 2411 2412 /* 2413 * Shared - anon_free the entire 2414 * anon_map's worth of stuff and 2415 * release any swap reservation. 2416 */ 2417 if (amp->a_szc != 0) { 2418 anon_shmap_free_pages(amp, 0, 2419 amp->size); 2420 } else { 2421 anon_free(amp->ahp, 0, amp->size); 2422 } 2423 if ((len = amp->swresv) != 0) { 2424 anon_unresv_zone(len, 2425 seg->s_as->a_proc->p_zone); 2426 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2427 "anon proc:%p %lu %u", seg, len, 0); 2428 } 2429 } 2430 svd->amp = NULL; 2431 ANON_LOCK_EXIT(&->a_rwlock); 2432 anonmap_free(amp); 2433 } else if (svd->type == MAP_PRIVATE) { 2434 /* 2435 * We had a private mapping which still has 2436 * a held anon_map so just free up all the 2437 * anon slot pointers that we were using. 2438 */ 2439 if (seg->s_szc != 0) { 2440 anon_free_pages(amp->ahp, svd->anon_index, 2441 seg->s_size, seg->s_szc); 2442 } else { 2443 anon_free(amp->ahp, svd->anon_index, 2444 seg->s_size); 2445 } 2446 ANON_LOCK_EXIT(&->a_rwlock); 2447 } else { 2448 ANON_LOCK_EXIT(&->a_rwlock); 2449 } 2450 } 2451 2452 /* 2453 * Release swap reservation. 2454 */ 2455 if ((len = svd->swresv) != 0) { 2456 anon_unresv_zone(svd->swresv, 2457 seg->s_as->a_proc->p_zone); 2458 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2459 seg, len, 0); 2460 if (SEG_IS_PARTIAL_RESV(seg)) 2461 seg->s_as->a_resvsize -= svd->swresv; 2462 svd->swresv = 0; 2463 } 2464 /* 2465 * Release claim on vnode, credentials, and finally free the 2466 * private data. 2467 */ 2468 if (svd->vp != NULL) { 2469 if (svd->type == MAP_SHARED) 2470 lgrp_shm_policy_fini(NULL, svd->vp); 2471 VN_RELE(svd->vp); 2472 svd->vp = NULL; 2473 } 2474 crfree(svd->cred); 2475 svd->pageprot = 0; 2476 svd->pageadvice = 0; 2477 svd->pageswap = 0; 2478 svd->cred = NULL; 2479 2480 /* 2481 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2482 * still working with this segment without holding as lock (in case 2483 * it's called by pcache async thread). 2484 */ 2485 ASSERT(svd->softlockcnt == 0); 2486 mutex_enter(&svd->segfree_syncmtx); 2487 mutex_exit(&svd->segfree_syncmtx); 2488 2489 seg->s_data = NULL; 2490 kmem_cache_free(segvn_cache, svd); 2491 } 2492 2493 /* 2494 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2495 * already been F_SOFTLOCK'ed. 2496 * Caller must always match addr and len of a softunlock with a previous 2497 * softlock with exactly the same addr and len. 2498 */ 2499 static void 2500 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2501 { 2502 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2503 page_t *pp; 2504 caddr_t adr; 2505 struct vnode *vp; 2506 u_offset_t offset; 2507 ulong_t anon_index; 2508 struct anon_map *amp; 2509 struct anon *ap = NULL; 2510 2511 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2512 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2513 2514 if ((amp = svd->amp) != NULL) 2515 anon_index = svd->anon_index + seg_page(seg, addr); 2516 2517 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2518 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2519 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2520 } else { 2521 hat_unlock(seg->s_as->a_hat, addr, len); 2522 } 2523 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2524 if (amp != NULL) { 2525 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2526 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2527 != NULL) { 2528 swap_xlate(ap, &vp, &offset); 2529 } else { 2530 vp = svd->vp; 2531 offset = svd->offset + 2532 (uintptr_t)(adr - seg->s_base); 2533 } 2534 ANON_LOCK_EXIT(&->a_rwlock); 2535 } else { 2536 vp = svd->vp; 2537 offset = svd->offset + 2538 (uintptr_t)(adr - seg->s_base); 2539 } 2540 2541 /* 2542 * Use page_find() instead of page_lookup() to 2543 * find the page since we know that it is locked. 2544 */ 2545 pp = page_find(vp, offset); 2546 if (pp == NULL) { 2547 panic( 2548 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2549 (void *)adr, (void *)ap, (void *)vp, offset); 2550 /*NOTREACHED*/ 2551 } 2552 2553 if (rw == S_WRITE) { 2554 hat_setrefmod(pp); 2555 if (seg->s_as->a_vbits) 2556 hat_setstat(seg->s_as, adr, PAGESIZE, 2557 P_REF | P_MOD); 2558 } else if (rw != S_OTHER) { 2559 hat_setref(pp); 2560 if (seg->s_as->a_vbits) 2561 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2562 } 2563 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2564 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2565 page_unlock(pp); 2566 } 2567 ASSERT(svd->softlockcnt >= btop(len)); 2568 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2569 /* 2570 * All SOFTLOCKS are gone. Wakeup any waiting 2571 * unmappers so they can try again to unmap. 2572 * Check for waiters first without the mutex 2573 * held so we don't always grab the mutex on 2574 * softunlocks. 2575 */ 2576 if (AS_ISUNMAPWAIT(seg->s_as)) { 2577 mutex_enter(&seg->s_as->a_contents); 2578 if (AS_ISUNMAPWAIT(seg->s_as)) { 2579 AS_CLRUNMAPWAIT(seg->s_as); 2580 cv_broadcast(&seg->s_as->a_cv); 2581 } 2582 mutex_exit(&seg->s_as->a_contents); 2583 } 2584 } 2585 } 2586 2587 #define PAGE_HANDLED ((page_t *)-1) 2588 2589 /* 2590 * Release all the pages in the NULL terminated ppp list 2591 * which haven't already been converted to PAGE_HANDLED. 2592 */ 2593 static void 2594 segvn_pagelist_rele(page_t **ppp) 2595 { 2596 for (; *ppp != NULL; ppp++) { 2597 if (*ppp != PAGE_HANDLED) 2598 page_unlock(*ppp); 2599 } 2600 } 2601 2602 static int stealcow = 1; 2603 2604 /* 2605 * Workaround for viking chip bug. See bug id 1220902. 2606 * To fix this down in pagefault() would require importing so 2607 * much as and segvn code as to be unmaintainable. 2608 */ 2609 int enable_mbit_wa = 0; 2610 2611 /* 2612 * Handles all the dirty work of getting the right 2613 * anonymous pages and loading up the translations. 2614 * This routine is called only from segvn_fault() 2615 * when looping over the range of addresses requested. 2616 * 2617 * The basic algorithm here is: 2618 * If this is an anon_zero case 2619 * Call anon_zero to allocate page 2620 * Load up translation 2621 * Return 2622 * endif 2623 * If this is an anon page 2624 * Use anon_getpage to get the page 2625 * else 2626 * Find page in pl[] list passed in 2627 * endif 2628 * If not a cow 2629 * Load up the translation to the page 2630 * return 2631 * endif 2632 * Call anon_private to handle cow 2633 * Load up (writable) translation to new page 2634 */ 2635 static faultcode_t 2636 segvn_faultpage( 2637 struct hat *hat, /* the hat to use for mapping */ 2638 struct seg *seg, /* seg_vn of interest */ 2639 caddr_t addr, /* address in as */ 2640 u_offset_t off, /* offset in vp */ 2641 struct vpage *vpage, /* pointer to vpage for vp, off */ 2642 page_t *pl[], /* object source page pointer */ 2643 uint_t vpprot, /* access allowed to object pages */ 2644 enum fault_type type, /* type of fault */ 2645 enum seg_rw rw, /* type of access at fault */ 2646 int brkcow) /* we may need to break cow */ 2647 { 2648 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2649 page_t *pp, **ppp; 2650 uint_t pageflags = 0; 2651 page_t *anon_pl[1 + 1]; 2652 page_t *opp = NULL; /* original page */ 2653 uint_t prot; 2654 int err; 2655 int cow; 2656 int claim; 2657 int steal = 0; 2658 ulong_t anon_index; 2659 struct anon *ap, *oldap; 2660 struct anon_map *amp; 2661 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2662 int anon_lock = 0; 2663 anon_sync_obj_t cookie; 2664 2665 if (svd->flags & MAP_TEXT) { 2666 hat_flag |= HAT_LOAD_TEXT; 2667 } 2668 2669 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2670 ASSERT(seg->s_szc == 0); 2671 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2672 2673 /* 2674 * Initialize protection value for this page. 2675 * If we have per page protection values check it now. 2676 */ 2677 if (svd->pageprot) { 2678 uint_t protchk; 2679 2680 switch (rw) { 2681 case S_READ: 2682 protchk = PROT_READ; 2683 break; 2684 case S_WRITE: 2685 protchk = PROT_WRITE; 2686 break; 2687 case S_EXEC: 2688 protchk = PROT_EXEC; 2689 break; 2690 case S_OTHER: 2691 default: 2692 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2693 break; 2694 } 2695 2696 prot = VPP_PROT(vpage); 2697 if ((prot & protchk) == 0) 2698 return (FC_PROT); /* illegal access type */ 2699 } else { 2700 prot = svd->prot; 2701 } 2702 2703 if (type == F_SOFTLOCK) { 2704 atomic_add_long((ulong_t *)&svd->softlockcnt, 1); 2705 } 2706 2707 /* 2708 * Always acquire the anon array lock to prevent 2 threads from 2709 * allocating separate anon slots for the same "addr". 2710 */ 2711 2712 if ((amp = svd->amp) != NULL) { 2713 ASSERT(RW_READ_HELD(&->a_rwlock)); 2714 anon_index = svd->anon_index + seg_page(seg, addr); 2715 anon_array_enter(amp, anon_index, &cookie); 2716 anon_lock = 1; 2717 } 2718 2719 if (svd->vp == NULL && amp != NULL) { 2720 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2721 /* 2722 * Allocate a (normally) writable anonymous page of 2723 * zeroes. If no advance reservations, reserve now. 2724 */ 2725 if (svd->flags & MAP_NORESERVE) { 2726 if (anon_resv_zone(ptob(1), 2727 seg->s_as->a_proc->p_zone)) { 2728 atomic_add_long(&svd->swresv, ptob(1)); 2729 atomic_add_long(&seg->s_as->a_resvsize, 2730 ptob(1)); 2731 } else { 2732 err = ENOMEM; 2733 goto out; 2734 } 2735 } 2736 if ((pp = anon_zero(seg, addr, &ap, 2737 svd->cred)) == NULL) { 2738 err = ENOMEM; 2739 goto out; /* out of swap space */ 2740 } 2741 /* 2742 * Re-acquire the anon_map lock and 2743 * initialize the anon array entry. 2744 */ 2745 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2746 ANON_SLEEP); 2747 2748 ASSERT(pp->p_szc == 0); 2749 2750 /* 2751 * Handle pages that have been marked for migration 2752 */ 2753 if (lgrp_optimizations()) 2754 page_migrate(seg, addr, &pp, 1); 2755 2756 if (enable_mbit_wa) { 2757 if (rw == S_WRITE) 2758 hat_setmod(pp); 2759 else if (!hat_ismod(pp)) 2760 prot &= ~PROT_WRITE; 2761 } 2762 /* 2763 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2764 * with MC_LOCKAS, MCL_FUTURE) and this is a 2765 * MAP_NORESERVE segment, we may need to 2766 * permanently lock the page as it is being faulted 2767 * for the first time. The following text applies 2768 * only to MAP_NORESERVE segments: 2769 * 2770 * As per memcntl(2), if this segment was created 2771 * after MCL_FUTURE was applied (a "future" 2772 * segment), its pages must be locked. If this 2773 * segment existed at MCL_FUTURE application (a 2774 * "past" segment), the interface is unclear. 2775 * 2776 * We decide to lock only if vpage is present: 2777 * 2778 * - "future" segments will have a vpage array (see 2779 * as_map), and so will be locked as required 2780 * 2781 * - "past" segments may not have a vpage array, 2782 * depending on whether events (such as 2783 * mprotect) have occurred. Locking if vpage 2784 * exists will preserve legacy behavior. Not 2785 * locking if vpage is absent, will not break 2786 * the interface or legacy behavior. Note that 2787 * allocating vpage here if it's absent requires 2788 * upgrading the segvn reader lock, the cost of 2789 * which does not seem worthwhile. 2790 * 2791 * Usually testing and setting VPP_ISPPLOCK and 2792 * VPP_SETPPLOCK requires holding the segvn lock as 2793 * writer, but in this case all readers are 2794 * serializing on the anon array lock. 2795 */ 2796 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2797 (svd->flags & MAP_NORESERVE) && 2798 !VPP_ISPPLOCK(vpage)) { 2799 proc_t *p = seg->s_as->a_proc; 2800 ASSERT(svd->type == MAP_PRIVATE); 2801 mutex_enter(&p->p_lock); 2802 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2803 1) == 0) { 2804 claim = VPP_PROT(vpage) & PROT_WRITE; 2805 if (page_pp_lock(pp, claim, 0)) { 2806 VPP_SETPPLOCK(vpage); 2807 } else { 2808 rctl_decr_locked_mem(p, NULL, 2809 PAGESIZE, 1); 2810 } 2811 } 2812 mutex_exit(&p->p_lock); 2813 } 2814 2815 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2816 hat_memload(hat, addr, pp, prot, hat_flag); 2817 2818 if (!(hat_flag & HAT_LOAD_LOCK)) 2819 page_unlock(pp); 2820 2821 anon_array_exit(&cookie); 2822 return (0); 2823 } 2824 } 2825 2826 /* 2827 * Obtain the page structure via anon_getpage() if it is 2828 * a private copy of an object (the result of a previous 2829 * copy-on-write). 2830 */ 2831 if (amp != NULL) { 2832 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2833 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2834 seg, addr, rw, svd->cred); 2835 if (err) 2836 goto out; 2837 2838 if (svd->type == MAP_SHARED) { 2839 /* 2840 * If this is a shared mapping to an 2841 * anon_map, then ignore the write 2842 * permissions returned by anon_getpage(). 2843 * They apply to the private mappings 2844 * of this anon_map. 2845 */ 2846 vpprot |= PROT_WRITE; 2847 } 2848 opp = anon_pl[0]; 2849 } 2850 } 2851 2852 /* 2853 * Search the pl[] list passed in if it is from the 2854 * original object (i.e., not a private copy). 2855 */ 2856 if (opp == NULL) { 2857 /* 2858 * Find original page. We must be bringing it in 2859 * from the list in pl[]. 2860 */ 2861 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2862 if (opp == PAGE_HANDLED) 2863 continue; 2864 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2865 if (opp->p_offset == off) 2866 break; 2867 } 2868 if (opp == NULL) { 2869 panic("segvn_faultpage not found"); 2870 /*NOTREACHED*/ 2871 } 2872 *ppp = PAGE_HANDLED; 2873 2874 } 2875 2876 ASSERT(PAGE_LOCKED(opp)); 2877 2878 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2879 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2880 2881 /* 2882 * The fault is treated as a copy-on-write fault if a 2883 * write occurs on a private segment and the object 2884 * page (i.e., mapping) is write protected. We assume 2885 * that fatal protection checks have already been made. 2886 */ 2887 2888 if (brkcow) { 2889 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2890 cow = !(vpprot & PROT_WRITE); 2891 } else if (svd->tr_state == SEGVN_TR_ON) { 2892 /* 2893 * If we are doing text replication COW on first touch. 2894 */ 2895 ASSERT(amp != NULL); 2896 ASSERT(svd->vp != NULL); 2897 ASSERT(rw != S_WRITE); 2898 cow = (ap == NULL); 2899 } else { 2900 cow = 0; 2901 } 2902 2903 /* 2904 * If not a copy-on-write case load the translation 2905 * and return. 2906 */ 2907 if (cow == 0) { 2908 2909 /* 2910 * Handle pages that have been marked for migration 2911 */ 2912 if (lgrp_optimizations()) 2913 page_migrate(seg, addr, &opp, 1); 2914 2915 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2916 if (rw == S_WRITE) 2917 hat_setmod(opp); 2918 else if (rw != S_OTHER && !hat_ismod(opp)) 2919 prot &= ~PROT_WRITE; 2920 } 2921 2922 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2923 (!svd->pageprot && svd->prot == (prot & vpprot))); 2924 ASSERT(amp == NULL || 2925 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2926 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2927 svd->rcookie); 2928 2929 if (!(hat_flag & HAT_LOAD_LOCK)) 2930 page_unlock(opp); 2931 2932 if (anon_lock) { 2933 anon_array_exit(&cookie); 2934 } 2935 return (0); 2936 } 2937 2938 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2939 2940 hat_setref(opp); 2941 2942 ASSERT(amp != NULL && anon_lock); 2943 2944 /* 2945 * Steal the page only if it isn't a private page 2946 * since stealing a private page is not worth the effort. 2947 */ 2948 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2949 steal = 1; 2950 2951 /* 2952 * Steal the original page if the following conditions are true: 2953 * 2954 * We are low on memory, the page is not private, page is not large, 2955 * not shared, not modified, not `locked' or if we have it `locked' 2956 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 2957 * that the page is not shared) and if it doesn't have any 2958 * translations. page_struct_lock isn't needed to look at p_cowcnt 2959 * and p_lckcnt because we first get exclusive lock on page. 2960 */ 2961 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 2962 2963 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 2964 page_tryupgrade(opp) && !hat_ismod(opp) && 2965 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 2966 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 2967 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 2968 /* 2969 * Check if this page has other translations 2970 * after unloading our translation. 2971 */ 2972 if (hat_page_is_mapped(opp)) { 2973 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2974 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 2975 HAT_UNLOAD); 2976 } 2977 2978 /* 2979 * hat_unload() might sync back someone else's recent 2980 * modification, so check again. 2981 */ 2982 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 2983 pageflags |= STEAL_PAGE; 2984 } 2985 2986 /* 2987 * If we have a vpage pointer, see if it indicates that we have 2988 * ``locked'' the page we map -- if so, tell anon_private to 2989 * transfer the locking resource to the new page. 2990 * 2991 * See Statement at the beginning of segvn_lockop regarding 2992 * the way lockcnts/cowcnts are handled during COW. 2993 * 2994 */ 2995 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 2996 pageflags |= LOCK_PAGE; 2997 2998 /* 2999 * Allocate a private page and perform the copy. 3000 * For MAP_NORESERVE reserve swap space now, unless this 3001 * is a cow fault on an existing anon page in which case 3002 * MAP_NORESERVE will have made advance reservations. 3003 */ 3004 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 3005 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 3006 atomic_add_long(&svd->swresv, ptob(1)); 3007 atomic_add_long(&seg->s_as->a_resvsize, ptob(1)); 3008 } else { 3009 page_unlock(opp); 3010 err = ENOMEM; 3011 goto out; 3012 } 3013 } 3014 oldap = ap; 3015 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 3016 if (pp == NULL) { 3017 err = ENOMEM; /* out of swap space */ 3018 goto out; 3019 } 3020 3021 /* 3022 * If we copied away from an anonymous page, then 3023 * we are one step closer to freeing up an anon slot. 3024 * 3025 * NOTE: The original anon slot must be released while 3026 * holding the "anon_map" lock. This is necessary to prevent 3027 * other threads from obtaining a pointer to the anon slot 3028 * which may be freed if its "refcnt" is 1. 3029 */ 3030 if (oldap != NULL) 3031 anon_decref(oldap); 3032 3033 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3034 3035 /* 3036 * Handle pages that have been marked for migration 3037 */ 3038 if (lgrp_optimizations()) 3039 page_migrate(seg, addr, &pp, 1); 3040 3041 ASSERT(pp->p_szc == 0); 3042 3043 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3044 if (enable_mbit_wa) { 3045 if (rw == S_WRITE) 3046 hat_setmod(pp); 3047 else if (!hat_ismod(pp)) 3048 prot &= ~PROT_WRITE; 3049 } 3050 3051 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3052 hat_memload(hat, addr, pp, prot, hat_flag); 3053 3054 if (!(hat_flag & HAT_LOAD_LOCK)) 3055 page_unlock(pp); 3056 3057 ASSERT(anon_lock); 3058 anon_array_exit(&cookie); 3059 return (0); 3060 out: 3061 if (anon_lock) 3062 anon_array_exit(&cookie); 3063 3064 if (type == F_SOFTLOCK) { 3065 atomic_add_long((ulong_t *)&svd->softlockcnt, -1); 3066 } 3067 return (FC_MAKE_ERR(err)); 3068 } 3069 3070 /* 3071 * relocate a bunch of smaller targ pages into one large repl page. all targ 3072 * pages must be complete pages smaller than replacement pages. 3073 * it's assumed that no page's szc can change since they are all PAGESIZE or 3074 * complete large pages locked SHARED. 3075 */ 3076 static void 3077 segvn_relocate_pages(page_t **targ, page_t *replacement) 3078 { 3079 page_t *pp; 3080 pgcnt_t repl_npgs, curnpgs; 3081 pgcnt_t i; 3082 uint_t repl_szc = replacement->p_szc; 3083 page_t *first_repl = replacement; 3084 page_t *repl; 3085 spgcnt_t npgs; 3086 3087 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3088 3089 ASSERT(repl_szc != 0); 3090 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3091 3092 i = 0; 3093 while (repl_npgs) { 3094 spgcnt_t nreloc; 3095 int err; 3096 ASSERT(replacement != NULL); 3097 pp = targ[i]; 3098 ASSERT(pp->p_szc < repl_szc); 3099 ASSERT(PAGE_EXCL(pp)); 3100 ASSERT(!PP_ISFREE(pp)); 3101 curnpgs = page_get_pagecnt(pp->p_szc); 3102 if (curnpgs == 1) { 3103 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3104 repl = replacement; 3105 page_sub(&replacement, repl); 3106 ASSERT(PAGE_EXCL(repl)); 3107 ASSERT(!PP_ISFREE(repl)); 3108 ASSERT(repl->p_szc == repl_szc); 3109 } else { 3110 page_t *repl_savepp; 3111 int j; 3112 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3113 repl_savepp = replacement; 3114 for (j = 0; j < curnpgs; j++) { 3115 repl = replacement; 3116 page_sub(&replacement, repl); 3117 ASSERT(PAGE_EXCL(repl)); 3118 ASSERT(!PP_ISFREE(repl)); 3119 ASSERT(repl->p_szc == repl_szc); 3120 ASSERT(page_pptonum(targ[i + j]) == 3121 page_pptonum(targ[i]) + j); 3122 } 3123 repl = repl_savepp; 3124 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3125 } 3126 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3127 if (err || nreloc != curnpgs) { 3128 panic("segvn_relocate_pages: " 3129 "page_relocate failed err=%d curnpgs=%ld " 3130 "nreloc=%ld", err, curnpgs, nreloc); 3131 } 3132 ASSERT(curnpgs <= repl_npgs); 3133 repl_npgs -= curnpgs; 3134 i += curnpgs; 3135 } 3136 ASSERT(replacement == NULL); 3137 3138 repl = first_repl; 3139 repl_npgs = npgs; 3140 for (i = 0; i < repl_npgs; i++) { 3141 ASSERT(PAGE_EXCL(repl)); 3142 ASSERT(!PP_ISFREE(repl)); 3143 targ[i] = repl; 3144 page_downgrade(targ[i]); 3145 repl++; 3146 } 3147 } 3148 3149 /* 3150 * Check if all pages in ppa array are complete smaller than szc pages and 3151 * their roots will still be aligned relative to their current size if the 3152 * entire ppa array is relocated into one szc page. If these conditions are 3153 * not met return 0. 3154 * 3155 * If all pages are properly aligned attempt to upgrade their locks 3156 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3157 * upgrdfail was set to 0 by caller. 3158 * 3159 * Return 1 if all pages are aligned and locked exclusively. 3160 * 3161 * If all pages in ppa array happen to be physically contiguous to make one 3162 * szc page and all exclusive locks are successfully obtained promote the page 3163 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3164 */ 3165 static int 3166 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3167 { 3168 page_t *pp; 3169 pfn_t pfn; 3170 pgcnt_t totnpgs = page_get_pagecnt(szc); 3171 pfn_t first_pfn; 3172 int contig = 1; 3173 pgcnt_t i; 3174 pgcnt_t j; 3175 uint_t curszc; 3176 pgcnt_t curnpgs; 3177 int root = 0; 3178 3179 ASSERT(szc > 0); 3180 3181 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3182 3183 for (i = 0; i < totnpgs; i++) { 3184 pp = ppa[i]; 3185 ASSERT(PAGE_SHARED(pp)); 3186 ASSERT(!PP_ISFREE(pp)); 3187 pfn = page_pptonum(pp); 3188 if (i == 0) { 3189 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3190 contig = 0; 3191 } else { 3192 first_pfn = pfn; 3193 } 3194 } else if (contig && pfn != first_pfn + i) { 3195 contig = 0; 3196 } 3197 if (pp->p_szc == 0) { 3198 if (root) { 3199 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3200 return (0); 3201 } 3202 } else if (!root) { 3203 if ((curszc = pp->p_szc) >= szc) { 3204 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3205 return (0); 3206 } 3207 if (curszc == 0) { 3208 /* 3209 * p_szc changed means we don't have all pages 3210 * locked. return failure. 3211 */ 3212 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3213 return (0); 3214 } 3215 curnpgs = page_get_pagecnt(curszc); 3216 if (!IS_P2ALIGNED(pfn, curnpgs) || 3217 !IS_P2ALIGNED(i, curnpgs)) { 3218 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3219 return (0); 3220 } 3221 root = 1; 3222 } else { 3223 ASSERT(i > 0); 3224 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3225 if (pp->p_szc != curszc) { 3226 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3227 return (0); 3228 } 3229 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3230 panic("segvn_full_szcpages: " 3231 "large page not physically contiguous"); 3232 } 3233 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3234 root = 0; 3235 } 3236 } 3237 } 3238 3239 for (i = 0; i < totnpgs; i++) { 3240 ASSERT(ppa[i]->p_szc < szc); 3241 if (!page_tryupgrade(ppa[i])) { 3242 for (j = 0; j < i; j++) { 3243 page_downgrade(ppa[j]); 3244 } 3245 *pszc = ppa[i]->p_szc; 3246 *upgrdfail = 1; 3247 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3248 return (0); 3249 } 3250 } 3251 3252 /* 3253 * When a page is put a free cachelist its szc is set to 0. if file 3254 * system reclaimed pages from cachelist targ pages will be physically 3255 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3256 * pages without any relocations. 3257 * To avoid any hat issues with previous small mappings 3258 * hat_pageunload() the target pages first. 3259 */ 3260 if (contig) { 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3262 for (i = 0; i < totnpgs; i++) { 3263 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3264 } 3265 for (i = 0; i < totnpgs; i++) { 3266 ppa[i]->p_szc = szc; 3267 } 3268 for (i = 0; i < totnpgs; i++) { 3269 ASSERT(PAGE_EXCL(ppa[i])); 3270 page_downgrade(ppa[i]); 3271 } 3272 if (pszc != NULL) { 3273 *pszc = szc; 3274 } 3275 } 3276 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3277 return (1); 3278 } 3279 3280 /* 3281 * Create physically contiguous pages for [vp, off] - [vp, off + 3282 * page_size(szc)) range and for private segment return them in ppa array. 3283 * Pages are created either via IO or relocations. 3284 * 3285 * Return 1 on success and 0 on failure. 3286 * 3287 * If physically contiguous pages already exist for this range return 1 without 3288 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3289 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3290 */ 3291 3292 static int 3293 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3294 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3295 int *downsize) 3296 3297 { 3298 page_t *pplist = *ppplist; 3299 size_t pgsz = page_get_pagesize(szc); 3300 pgcnt_t pages = btop(pgsz); 3301 ulong_t start_off = off; 3302 u_offset_t eoff = off + pgsz; 3303 spgcnt_t nreloc; 3304 u_offset_t io_off = off; 3305 size_t io_len; 3306 page_t *io_pplist = NULL; 3307 page_t *done_pplist = NULL; 3308 pgcnt_t pgidx = 0; 3309 page_t *pp; 3310 page_t *newpp; 3311 page_t *targpp; 3312 int io_err = 0; 3313 int i; 3314 pfn_t pfn; 3315 ulong_t ppages; 3316 page_t *targ_pplist = NULL; 3317 page_t *repl_pplist = NULL; 3318 page_t *tmp_pplist; 3319 int nios = 0; 3320 uint_t pszc; 3321 struct vattr va; 3322 3323 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3324 3325 ASSERT(szc != 0); 3326 ASSERT(pplist->p_szc == szc); 3327 3328 /* 3329 * downsize will be set to 1 only if we fail to lock pages. this will 3330 * allow subsequent faults to try to relocate the page again. If we 3331 * fail due to misalignment don't downsize and let the caller map the 3332 * whole region with small mappings to avoid more faults into the area 3333 * where we can't get large pages anyway. 3334 */ 3335 *downsize = 0; 3336 3337 while (off < eoff) { 3338 newpp = pplist; 3339 ASSERT(newpp != NULL); 3340 ASSERT(PAGE_EXCL(newpp)); 3341 ASSERT(!PP_ISFREE(newpp)); 3342 /* 3343 * we pass NULL for nrelocp to page_lookup_create() 3344 * so that it doesn't relocate. We relocate here 3345 * later only after we make sure we can lock all 3346 * pages in the range we handle and they are all 3347 * aligned. 3348 */ 3349 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3350 ASSERT(pp != NULL); 3351 ASSERT(!PP_ISFREE(pp)); 3352 ASSERT(pp->p_vnode == vp); 3353 ASSERT(pp->p_offset == off); 3354 if (pp == newpp) { 3355 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3356 page_sub(&pplist, pp); 3357 ASSERT(PAGE_EXCL(pp)); 3358 ASSERT(page_iolock_assert(pp)); 3359 page_list_concat(&io_pplist, &pp); 3360 off += PAGESIZE; 3361 continue; 3362 } 3363 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3364 pfn = page_pptonum(pp); 3365 pszc = pp->p_szc; 3366 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3367 IS_P2ALIGNED(pfn, pages)) { 3368 ASSERT(repl_pplist == NULL); 3369 ASSERT(done_pplist == NULL); 3370 ASSERT(pplist == *ppplist); 3371 page_unlock(pp); 3372 page_free_replacement_page(pplist); 3373 page_create_putback(pages); 3374 *ppplist = NULL; 3375 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3376 return (1); 3377 } 3378 if (pszc >= szc) { 3379 page_unlock(pp); 3380 segvn_faultvnmpss_align_err1++; 3381 goto out; 3382 } 3383 ppages = page_get_pagecnt(pszc); 3384 if (!IS_P2ALIGNED(pfn, ppages)) { 3385 ASSERT(pszc > 0); 3386 /* 3387 * sizing down to pszc won't help. 3388 */ 3389 page_unlock(pp); 3390 segvn_faultvnmpss_align_err2++; 3391 goto out; 3392 } 3393 pfn = page_pptonum(newpp); 3394 if (!IS_P2ALIGNED(pfn, ppages)) { 3395 ASSERT(pszc > 0); 3396 /* 3397 * sizing down to pszc won't help. 3398 */ 3399 page_unlock(pp); 3400 segvn_faultvnmpss_align_err3++; 3401 goto out; 3402 } 3403 if (!PAGE_EXCL(pp)) { 3404 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3405 page_unlock(pp); 3406 *downsize = 1; 3407 *ret_pszc = pp->p_szc; 3408 goto out; 3409 } 3410 targpp = pp; 3411 if (io_pplist != NULL) { 3412 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3413 io_len = off - io_off; 3414 /* 3415 * Some file systems like NFS don't check EOF 3416 * conditions in VOP_PAGEIO(). Check it here 3417 * now that pages are locked SE_EXCL. Any file 3418 * truncation will wait until the pages are 3419 * unlocked so no need to worry that file will 3420 * be truncated after we check its size here. 3421 * XXX fix NFS to remove this check. 3422 */ 3423 va.va_mask = AT_SIZE; 3424 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3425 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3426 page_unlock(targpp); 3427 goto out; 3428 } 3429 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3430 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3431 *downsize = 1; 3432 *ret_pszc = 0; 3433 page_unlock(targpp); 3434 goto out; 3435 } 3436 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3437 B_READ, svd->cred, NULL); 3438 if (io_err) { 3439 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3440 page_unlock(targpp); 3441 if (io_err == EDEADLK) { 3442 segvn_vmpss_pageio_deadlk_err++; 3443 } 3444 goto out; 3445 } 3446 nios++; 3447 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3448 while (io_pplist != NULL) { 3449 pp = io_pplist; 3450 page_sub(&io_pplist, pp); 3451 ASSERT(page_iolock_assert(pp)); 3452 page_io_unlock(pp); 3453 pgidx = (pp->p_offset - start_off) >> 3454 PAGESHIFT; 3455 ASSERT(pgidx < pages); 3456 ppa[pgidx] = pp; 3457 page_list_concat(&done_pplist, &pp); 3458 } 3459 } 3460 pp = targpp; 3461 ASSERT(PAGE_EXCL(pp)); 3462 ASSERT(pp->p_szc <= pszc); 3463 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3464 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3465 page_unlock(pp); 3466 *downsize = 1; 3467 *ret_pszc = pp->p_szc; 3468 goto out; 3469 } 3470 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3471 /* 3472 * page szc chould have changed before the entire group was 3473 * locked. reread page szc. 3474 */ 3475 pszc = pp->p_szc; 3476 ppages = page_get_pagecnt(pszc); 3477 3478 /* link just the roots */ 3479 page_list_concat(&targ_pplist, &pp); 3480 page_sub(&pplist, newpp); 3481 page_list_concat(&repl_pplist, &newpp); 3482 off += PAGESIZE; 3483 while (--ppages != 0) { 3484 newpp = pplist; 3485 page_sub(&pplist, newpp); 3486 off += PAGESIZE; 3487 } 3488 io_off = off; 3489 } 3490 if (io_pplist != NULL) { 3491 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3492 io_len = eoff - io_off; 3493 va.va_mask = AT_SIZE; 3494 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3495 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3496 goto out; 3497 } 3498 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3499 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3500 *downsize = 1; 3501 *ret_pszc = 0; 3502 goto out; 3503 } 3504 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3505 B_READ, svd->cred, NULL); 3506 if (io_err) { 3507 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3508 if (io_err == EDEADLK) { 3509 segvn_vmpss_pageio_deadlk_err++; 3510 } 3511 goto out; 3512 } 3513 nios++; 3514 while (io_pplist != NULL) { 3515 pp = io_pplist; 3516 page_sub(&io_pplist, pp); 3517 ASSERT(page_iolock_assert(pp)); 3518 page_io_unlock(pp); 3519 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3520 ASSERT(pgidx < pages); 3521 ppa[pgidx] = pp; 3522 } 3523 } 3524 /* 3525 * we're now bound to succeed or panic. 3526 * remove pages from done_pplist. it's not needed anymore. 3527 */ 3528 while (done_pplist != NULL) { 3529 pp = done_pplist; 3530 page_sub(&done_pplist, pp); 3531 } 3532 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3533 ASSERT(pplist == NULL); 3534 *ppplist = NULL; 3535 while (targ_pplist != NULL) { 3536 int ret; 3537 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3538 ASSERT(repl_pplist); 3539 pp = targ_pplist; 3540 page_sub(&targ_pplist, pp); 3541 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3542 newpp = repl_pplist; 3543 page_sub(&repl_pplist, newpp); 3544 #ifdef DEBUG 3545 pfn = page_pptonum(pp); 3546 pszc = pp->p_szc; 3547 ppages = page_get_pagecnt(pszc); 3548 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3549 pfn = page_pptonum(newpp); 3550 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3551 ASSERT(P2PHASE(pfn, pages) == pgidx); 3552 #endif 3553 nreloc = 0; 3554 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3555 if (ret != 0 || nreloc == 0) { 3556 panic("segvn_fill_vp_pages: " 3557 "page_relocate failed"); 3558 } 3559 pp = newpp; 3560 while (nreloc-- != 0) { 3561 ASSERT(PAGE_EXCL(pp)); 3562 ASSERT(pp->p_vnode == vp); 3563 ASSERT(pgidx == 3564 ((pp->p_offset - start_off) >> PAGESHIFT)); 3565 ppa[pgidx++] = pp; 3566 pp++; 3567 } 3568 } 3569 3570 if (svd->type == MAP_PRIVATE) { 3571 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3572 for (i = 0; i < pages; i++) { 3573 ASSERT(ppa[i] != NULL); 3574 ASSERT(PAGE_EXCL(ppa[i])); 3575 ASSERT(ppa[i]->p_vnode == vp); 3576 ASSERT(ppa[i]->p_offset == 3577 start_off + (i << PAGESHIFT)); 3578 page_downgrade(ppa[i]); 3579 } 3580 ppa[pages] = NULL; 3581 } else { 3582 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3583 /* 3584 * the caller will still call VOP_GETPAGE() for shared segments 3585 * to check FS write permissions. For private segments we map 3586 * file read only anyway. so no VOP_GETPAGE is needed. 3587 */ 3588 for (i = 0; i < pages; i++) { 3589 ASSERT(ppa[i] != NULL); 3590 ASSERT(PAGE_EXCL(ppa[i])); 3591 ASSERT(ppa[i]->p_vnode == vp); 3592 ASSERT(ppa[i]->p_offset == 3593 start_off + (i << PAGESHIFT)); 3594 page_unlock(ppa[i]); 3595 } 3596 ppa[0] = NULL; 3597 } 3598 3599 return (1); 3600 out: 3601 /* 3602 * Do the cleanup. Unlock target pages we didn't relocate. They are 3603 * linked on targ_pplist by root pages. reassemble unused replacement 3604 * and io pages back to pplist. 3605 */ 3606 if (io_pplist != NULL) { 3607 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3608 pp = io_pplist; 3609 do { 3610 ASSERT(pp->p_vnode == vp); 3611 ASSERT(pp->p_offset == io_off); 3612 ASSERT(page_iolock_assert(pp)); 3613 page_io_unlock(pp); 3614 page_hashout(pp, NULL); 3615 io_off += PAGESIZE; 3616 } while ((pp = pp->p_next) != io_pplist); 3617 page_list_concat(&io_pplist, &pplist); 3618 pplist = io_pplist; 3619 } 3620 tmp_pplist = NULL; 3621 while (targ_pplist != NULL) { 3622 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3623 pp = targ_pplist; 3624 ASSERT(PAGE_EXCL(pp)); 3625 page_sub(&targ_pplist, pp); 3626 3627 pszc = pp->p_szc; 3628 ppages = page_get_pagecnt(pszc); 3629 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3630 3631 if (pszc != 0) { 3632 group_page_unlock(pp); 3633 } 3634 page_unlock(pp); 3635 3636 pp = repl_pplist; 3637 ASSERT(pp != NULL); 3638 ASSERT(PAGE_EXCL(pp)); 3639 ASSERT(pp->p_szc == szc); 3640 page_sub(&repl_pplist, pp); 3641 3642 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3643 3644 /* relink replacement page */ 3645 page_list_concat(&tmp_pplist, &pp); 3646 while (--ppages != 0) { 3647 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3648 pp++; 3649 ASSERT(PAGE_EXCL(pp)); 3650 ASSERT(pp->p_szc == szc); 3651 page_list_concat(&tmp_pplist, &pp); 3652 } 3653 } 3654 if (tmp_pplist != NULL) { 3655 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3656 page_list_concat(&tmp_pplist, &pplist); 3657 pplist = tmp_pplist; 3658 } 3659 /* 3660 * at this point all pages are either on done_pplist or 3661 * pplist. They can't be all on done_pplist otherwise 3662 * we'd've been done. 3663 */ 3664 ASSERT(pplist != NULL); 3665 if (nios != 0) { 3666 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3667 pp = pplist; 3668 do { 3669 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3670 ASSERT(pp->p_szc == szc); 3671 ASSERT(PAGE_EXCL(pp)); 3672 ASSERT(pp->p_vnode != vp); 3673 pp->p_szc = 0; 3674 } while ((pp = pp->p_next) != pplist); 3675 3676 pp = done_pplist; 3677 do { 3678 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3679 ASSERT(pp->p_szc == szc); 3680 ASSERT(PAGE_EXCL(pp)); 3681 ASSERT(pp->p_vnode == vp); 3682 pp->p_szc = 0; 3683 } while ((pp = pp->p_next) != done_pplist); 3684 3685 while (pplist != NULL) { 3686 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3687 pp = pplist; 3688 page_sub(&pplist, pp); 3689 page_free(pp, 0); 3690 } 3691 3692 while (done_pplist != NULL) { 3693 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3694 pp = done_pplist; 3695 page_sub(&done_pplist, pp); 3696 page_unlock(pp); 3697 } 3698 *ppplist = NULL; 3699 return (0); 3700 } 3701 ASSERT(pplist == *ppplist); 3702 if (io_err) { 3703 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3704 /* 3705 * don't downsize on io error. 3706 * see if vop_getpage succeeds. 3707 * pplist may still be used in this case 3708 * for relocations. 3709 */ 3710 return (0); 3711 } 3712 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3713 page_free_replacement_page(pplist); 3714 page_create_putback(pages); 3715 *ppplist = NULL; 3716 return (0); 3717 } 3718 3719 int segvn_anypgsz = 0; 3720 3721 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3722 if ((type) == F_SOFTLOCK) { \ 3723 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3724 -(pages)); \ 3725 } 3726 3727 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3728 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3729 if ((rw) == S_WRITE) { \ 3730 for (i = 0; i < (pages); i++) { \ 3731 ASSERT((ppa)[i]->p_vnode == \ 3732 (ppa)[0]->p_vnode); \ 3733 hat_setmod((ppa)[i]); \ 3734 } \ 3735 } else if ((rw) != S_OTHER && \ 3736 ((prot) & (vpprot) & PROT_WRITE)) { \ 3737 for (i = 0; i < (pages); i++) { \ 3738 ASSERT((ppa)[i]->p_vnode == \ 3739 (ppa)[0]->p_vnode); \ 3740 if (!hat_ismod((ppa)[i])) { \ 3741 prot &= ~PROT_WRITE; \ 3742 break; \ 3743 } \ 3744 } \ 3745 } \ 3746 } 3747 3748 #ifdef VM_STATS 3749 3750 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3751 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3752 3753 #else /* VM_STATS */ 3754 3755 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3756 3757 #endif 3758 3759 static faultcode_t 3760 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3761 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3762 caddr_t eaddr, int brkcow) 3763 { 3764 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3765 struct anon_map *amp = svd->amp; 3766 uchar_t segtype = svd->type; 3767 uint_t szc = seg->s_szc; 3768 size_t pgsz = page_get_pagesize(szc); 3769 size_t maxpgsz = pgsz; 3770 pgcnt_t pages = btop(pgsz); 3771 pgcnt_t maxpages = pages; 3772 size_t ppasize = (pages + 1) * sizeof (page_t *); 3773 caddr_t a = lpgaddr; 3774 caddr_t maxlpgeaddr = lpgeaddr; 3775 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3776 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3777 struct vpage *vpage = (svd->vpage != NULL) ? 3778 &svd->vpage[seg_page(seg, a)] : NULL; 3779 vnode_t *vp = svd->vp; 3780 page_t **ppa; 3781 uint_t pszc; 3782 size_t ppgsz; 3783 pgcnt_t ppages; 3784 faultcode_t err = 0; 3785 int ierr; 3786 int vop_size_err = 0; 3787 uint_t protchk, prot, vpprot; 3788 ulong_t i; 3789 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3790 anon_sync_obj_t an_cookie; 3791 enum seg_rw arw; 3792 int alloc_failed = 0; 3793 int adjszc_chk; 3794 struct vattr va; 3795 int xhat = 0; 3796 page_t *pplist; 3797 pfn_t pfn; 3798 int physcontig; 3799 int upgrdfail; 3800 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3801 int tron = (svd->tr_state == SEGVN_TR_ON); 3802 3803 ASSERT(szc != 0); 3804 ASSERT(vp != NULL); 3805 ASSERT(brkcow == 0 || amp != NULL); 3806 ASSERT(tron == 0 || amp != NULL); 3807 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3808 ASSERT(!(svd->flags & MAP_NORESERVE)); 3809 ASSERT(type != F_SOFTUNLOCK); 3810 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3811 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3812 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3813 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3814 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3815 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3816 3817 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3818 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3819 3820 if (svd->flags & MAP_TEXT) { 3821 hat_flag |= HAT_LOAD_TEXT; 3822 } 3823 3824 if (svd->pageprot) { 3825 switch (rw) { 3826 case S_READ: 3827 protchk = PROT_READ; 3828 break; 3829 case S_WRITE: 3830 protchk = PROT_WRITE; 3831 break; 3832 case S_EXEC: 3833 protchk = PROT_EXEC; 3834 break; 3835 case S_OTHER: 3836 default: 3837 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3838 break; 3839 } 3840 } else { 3841 prot = svd->prot; 3842 /* caller has already done segment level protection check. */ 3843 } 3844 3845 if (seg->s_as->a_hat != hat) { 3846 xhat = 1; 3847 } 3848 3849 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3850 SEGVN_VMSTAT_FLTVNPAGES(2); 3851 arw = S_READ; 3852 } else { 3853 arw = rw; 3854 } 3855 3856 ppa = kmem_alloc(ppasize, KM_SLEEP); 3857 3858 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3859 3860 for (;;) { 3861 adjszc_chk = 0; 3862 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3863 if (adjszc_chk) { 3864 while (szc < seg->s_szc) { 3865 uintptr_t e; 3866 uint_t tszc; 3867 tszc = segvn_anypgsz_vnode ? szc + 1 : 3868 seg->s_szc; 3869 ppgsz = page_get_pagesize(tszc); 3870 if (!IS_P2ALIGNED(a, ppgsz) || 3871 ((alloc_failed >> tszc) & 0x1)) { 3872 break; 3873 } 3874 SEGVN_VMSTAT_FLTVNPAGES(4); 3875 szc = tszc; 3876 pgsz = ppgsz; 3877 pages = btop(pgsz); 3878 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3879 lpgeaddr = (caddr_t)e; 3880 } 3881 } 3882 3883 again: 3884 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3885 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3886 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3887 anon_array_enter(amp, aindx, &an_cookie); 3888 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3889 SEGVN_VMSTAT_FLTVNPAGES(5); 3890 ASSERT(anon_pages(amp->ahp, aindx, 3891 maxpages) == maxpages); 3892 anon_array_exit(&an_cookie); 3893 ANON_LOCK_EXIT(&->a_rwlock); 3894 err = segvn_fault_anonpages(hat, seg, 3895 a, a + maxpgsz, type, rw, 3896 MAX(a, addr), 3897 MIN(a + maxpgsz, eaddr), brkcow); 3898 if (err != 0) { 3899 SEGVN_VMSTAT_FLTVNPAGES(6); 3900 goto out; 3901 } 3902 if (szc < seg->s_szc) { 3903 szc = seg->s_szc; 3904 pgsz = maxpgsz; 3905 pages = maxpages; 3906 lpgeaddr = maxlpgeaddr; 3907 } 3908 goto next; 3909 } else { 3910 ASSERT(anon_pages(amp->ahp, aindx, 3911 maxpages) == 0); 3912 SEGVN_VMSTAT_FLTVNPAGES(7); 3913 anon_array_exit(&an_cookie); 3914 ANON_LOCK_EXIT(&->a_rwlock); 3915 } 3916 } 3917 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3918 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3919 3920 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3921 ASSERT(vpage != NULL); 3922 prot = VPP_PROT(vpage); 3923 ASSERT(sameprot(seg, a, maxpgsz)); 3924 if ((prot & protchk) == 0) { 3925 SEGVN_VMSTAT_FLTVNPAGES(8); 3926 err = FC_PROT; 3927 goto out; 3928 } 3929 } 3930 if (type == F_SOFTLOCK) { 3931 atomic_add_long((ulong_t *)&svd->softlockcnt, 3932 pages); 3933 } 3934 3935 pplist = NULL; 3936 physcontig = 0; 3937 ppa[0] = NULL; 3938 if (!brkcow && !tron && szc && 3939 !page_exists_physcontig(vp, off, szc, 3940 segtype == MAP_PRIVATE ? ppa : NULL)) { 3941 SEGVN_VMSTAT_FLTVNPAGES(9); 3942 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3943 szc, 0, 0) && type != F_SOFTLOCK) { 3944 SEGVN_VMSTAT_FLTVNPAGES(10); 3945 pszc = 0; 3946 ierr = -1; 3947 alloc_failed |= (1 << szc); 3948 break; 3949 } 3950 if (pplist != NULL && 3951 vp->v_mpssdata == SEGVN_PAGEIO) { 3952 int downsize; 3953 SEGVN_VMSTAT_FLTVNPAGES(11); 3954 physcontig = segvn_fill_vp_pages(svd, 3955 vp, off, szc, ppa, &pplist, 3956 &pszc, &downsize); 3957 ASSERT(!physcontig || pplist == NULL); 3958 if (!physcontig && downsize && 3959 type != F_SOFTLOCK) { 3960 ASSERT(pplist == NULL); 3961 SEGVN_VMSTAT_FLTVNPAGES(12); 3962 ierr = -1; 3963 break; 3964 } 3965 ASSERT(!physcontig || 3966 segtype == MAP_PRIVATE || 3967 ppa[0] == NULL); 3968 if (physcontig && ppa[0] == NULL) { 3969 physcontig = 0; 3970 } 3971 } 3972 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 3973 SEGVN_VMSTAT_FLTVNPAGES(13); 3974 ASSERT(segtype == MAP_PRIVATE); 3975 physcontig = 1; 3976 } 3977 3978 if (!physcontig) { 3979 SEGVN_VMSTAT_FLTVNPAGES(14); 3980 ppa[0] = NULL; 3981 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 3982 &vpprot, ppa, pgsz, seg, a, arw, 3983 svd->cred, NULL); 3984 #ifdef DEBUG 3985 if (ierr == 0) { 3986 for (i = 0; i < pages; i++) { 3987 ASSERT(PAGE_LOCKED(ppa[i])); 3988 ASSERT(!PP_ISFREE(ppa[i])); 3989 ASSERT(ppa[i]->p_vnode == vp); 3990 ASSERT(ppa[i]->p_offset == 3991 off + (i << PAGESHIFT)); 3992 } 3993 } 3994 #endif /* DEBUG */ 3995 if (segtype == MAP_PRIVATE) { 3996 SEGVN_VMSTAT_FLTVNPAGES(15); 3997 vpprot &= ~PROT_WRITE; 3998 } 3999 } else { 4000 ASSERT(segtype == MAP_PRIVATE); 4001 SEGVN_VMSTAT_FLTVNPAGES(16); 4002 vpprot = PROT_ALL & ~PROT_WRITE; 4003 ierr = 0; 4004 } 4005 4006 if (ierr != 0) { 4007 SEGVN_VMSTAT_FLTVNPAGES(17); 4008 if (pplist != NULL) { 4009 SEGVN_VMSTAT_FLTVNPAGES(18); 4010 page_free_replacement_page(pplist); 4011 page_create_putback(pages); 4012 } 4013 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4014 if (a + pgsz <= eaddr) { 4015 SEGVN_VMSTAT_FLTVNPAGES(19); 4016 err = FC_MAKE_ERR(ierr); 4017 goto out; 4018 } 4019 va.va_mask = AT_SIZE; 4020 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4021 SEGVN_VMSTAT_FLTVNPAGES(20); 4022 err = FC_MAKE_ERR(EIO); 4023 goto out; 4024 } 4025 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4026 SEGVN_VMSTAT_FLTVNPAGES(21); 4027 err = FC_MAKE_ERR(ierr); 4028 goto out; 4029 } 4030 if (btopr(va.va_size) < 4031 btopr(off + (eaddr - a))) { 4032 SEGVN_VMSTAT_FLTVNPAGES(22); 4033 err = FC_MAKE_ERR(ierr); 4034 goto out; 4035 } 4036 if (brkcow || tron || type == F_SOFTLOCK) { 4037 /* can't reduce map area */ 4038 SEGVN_VMSTAT_FLTVNPAGES(23); 4039 vop_size_err = 1; 4040 goto out; 4041 } 4042 SEGVN_VMSTAT_FLTVNPAGES(24); 4043 ASSERT(szc != 0); 4044 pszc = 0; 4045 ierr = -1; 4046 break; 4047 } 4048 4049 if (amp != NULL) { 4050 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4051 anon_array_enter(amp, aindx, &an_cookie); 4052 } 4053 if (amp != NULL && 4054 anon_get_ptr(amp->ahp, aindx) != NULL) { 4055 ulong_t taindx = P2ALIGN(aindx, maxpages); 4056 4057 SEGVN_VMSTAT_FLTVNPAGES(25); 4058 ASSERT(anon_pages(amp->ahp, taindx, 4059 maxpages) == maxpages); 4060 for (i = 0; i < pages; i++) { 4061 page_unlock(ppa[i]); 4062 } 4063 anon_array_exit(&an_cookie); 4064 ANON_LOCK_EXIT(&->a_rwlock); 4065 if (pplist != NULL) { 4066 page_free_replacement_page(pplist); 4067 page_create_putback(pages); 4068 } 4069 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4070 if (szc < seg->s_szc) { 4071 SEGVN_VMSTAT_FLTVNPAGES(26); 4072 /* 4073 * For private segments SOFTLOCK 4074 * either always breaks cow (any rw 4075 * type except S_READ_NOCOW) or 4076 * address space is locked as writer 4077 * (S_READ_NOCOW case) and anon slots 4078 * can't show up on second check. 4079 * Therefore if we are here for 4080 * SOFTLOCK case it must be a cow 4081 * break but cow break never reduces 4082 * szc. text replication (tron) in 4083 * this case works as cow break. 4084 * Thus the assert below. 4085 */ 4086 ASSERT(!brkcow && !tron && 4087 type != F_SOFTLOCK); 4088 pszc = seg->s_szc; 4089 ierr = -2; 4090 break; 4091 } 4092 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4093 goto again; 4094 } 4095 #ifdef DEBUG 4096 if (amp != NULL) { 4097 ulong_t taindx = P2ALIGN(aindx, maxpages); 4098 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4099 } 4100 #endif /* DEBUG */ 4101 4102 if (brkcow || tron) { 4103 ASSERT(amp != NULL); 4104 ASSERT(pplist == NULL); 4105 ASSERT(szc == seg->s_szc); 4106 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4107 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4108 SEGVN_VMSTAT_FLTVNPAGES(27); 4109 ierr = anon_map_privatepages(amp, aindx, szc, 4110 seg, a, prot, ppa, vpage, segvn_anypgsz, 4111 tron ? PG_LOCAL : 0, svd->cred); 4112 if (ierr != 0) { 4113 SEGVN_VMSTAT_FLTVNPAGES(28); 4114 anon_array_exit(&an_cookie); 4115 ANON_LOCK_EXIT(&->a_rwlock); 4116 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4117 err = FC_MAKE_ERR(ierr); 4118 goto out; 4119 } 4120 4121 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4122 /* 4123 * p_szc can't be changed for locked 4124 * swapfs pages. 4125 */ 4126 ASSERT(svd->rcookie == 4127 HAT_INVALID_REGION_COOKIE); 4128 hat_memload_array(hat, a, pgsz, ppa, prot, 4129 hat_flag); 4130 4131 if (!(hat_flag & HAT_LOAD_LOCK)) { 4132 SEGVN_VMSTAT_FLTVNPAGES(29); 4133 for (i = 0; i < pages; i++) { 4134 page_unlock(ppa[i]); 4135 } 4136 } 4137 anon_array_exit(&an_cookie); 4138 ANON_LOCK_EXIT(&->a_rwlock); 4139 goto next; 4140 } 4141 4142 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4143 (!svd->pageprot && svd->prot == (prot & vpprot))); 4144 4145 pfn = page_pptonum(ppa[0]); 4146 /* 4147 * hat_page_demote() needs an SE_EXCL lock on one of 4148 * constituent page_t's and it decreases root's p_szc 4149 * last. This means if root's p_szc is equal szc and 4150 * all its constituent pages are locked 4151 * hat_page_demote() that could have changed p_szc to 4152 * szc is already done and no new have page_demote() 4153 * can start for this large page. 4154 */ 4155 4156 /* 4157 * we need to make sure same mapping size is used for 4158 * the same address range if there's a possibility the 4159 * adddress is already mapped because hat layer panics 4160 * when translation is loaded for the range already 4161 * mapped with a different page size. We achieve it 4162 * by always using largest page size possible subject 4163 * to the constraints of page size, segment page size 4164 * and page alignment. Since mappings are invalidated 4165 * when those constraints change and make it 4166 * impossible to use previously used mapping size no 4167 * mapping size conflicts should happen. 4168 */ 4169 4170 chkszc: 4171 if ((pszc = ppa[0]->p_szc) == szc && 4172 IS_P2ALIGNED(pfn, pages)) { 4173 4174 SEGVN_VMSTAT_FLTVNPAGES(30); 4175 #ifdef DEBUG 4176 for (i = 0; i < pages; i++) { 4177 ASSERT(PAGE_LOCKED(ppa[i])); 4178 ASSERT(!PP_ISFREE(ppa[i])); 4179 ASSERT(page_pptonum(ppa[i]) == 4180 pfn + i); 4181 ASSERT(ppa[i]->p_szc == szc); 4182 ASSERT(ppa[i]->p_vnode == vp); 4183 ASSERT(ppa[i]->p_offset == 4184 off + (i << PAGESHIFT)); 4185 } 4186 #endif /* DEBUG */ 4187 /* 4188 * All pages are of szc we need and they are 4189 * all locked so they can't change szc. load 4190 * translations. 4191 * 4192 * if page got promoted since last check 4193 * we don't need pplist. 4194 */ 4195 if (pplist != NULL) { 4196 page_free_replacement_page(pplist); 4197 page_create_putback(pages); 4198 } 4199 if (PP_ISMIGRATE(ppa[0])) { 4200 page_migrate(seg, a, ppa, pages); 4201 } 4202 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4203 prot, vpprot); 4204 if (!xhat) { 4205 hat_memload_array_region(hat, a, pgsz, 4206 ppa, prot & vpprot, hat_flag, 4207 svd->rcookie); 4208 } else { 4209 /* 4210 * avoid large xhat mappings to FS 4211 * pages so that hat_page_demote() 4212 * doesn't need to check for xhat 4213 * large mappings. 4214 * Don't use regions with xhats. 4215 */ 4216 for (i = 0; i < pages; i++) { 4217 hat_memload(hat, 4218 a + (i << PAGESHIFT), 4219 ppa[i], prot & vpprot, 4220 hat_flag); 4221 } 4222 } 4223 4224 if (!(hat_flag & HAT_LOAD_LOCK)) { 4225 for (i = 0; i < pages; i++) { 4226 page_unlock(ppa[i]); 4227 } 4228 } 4229 if (amp != NULL) { 4230 anon_array_exit(&an_cookie); 4231 ANON_LOCK_EXIT(&->a_rwlock); 4232 } 4233 goto next; 4234 } 4235 4236 /* 4237 * See if upsize is possible. 4238 */ 4239 if (pszc > szc && szc < seg->s_szc && 4240 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4241 pgcnt_t aphase; 4242 uint_t pszc1 = MIN(pszc, seg->s_szc); 4243 ppgsz = page_get_pagesize(pszc1); 4244 ppages = btop(ppgsz); 4245 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4246 4247 ASSERT(type != F_SOFTLOCK); 4248 4249 SEGVN_VMSTAT_FLTVNPAGES(31); 4250 if (aphase != P2PHASE(pfn, ppages)) { 4251 segvn_faultvnmpss_align_err4++; 4252 } else { 4253 SEGVN_VMSTAT_FLTVNPAGES(32); 4254 if (pplist != NULL) { 4255 page_t *pl = pplist; 4256 page_free_replacement_page(pl); 4257 page_create_putback(pages); 4258 } 4259 for (i = 0; i < pages; i++) { 4260 page_unlock(ppa[i]); 4261 } 4262 if (amp != NULL) { 4263 anon_array_exit(&an_cookie); 4264 ANON_LOCK_EXIT(&->a_rwlock); 4265 } 4266 pszc = pszc1; 4267 ierr = -2; 4268 break; 4269 } 4270 } 4271 4272 /* 4273 * check if we should use smallest mapping size. 4274 */ 4275 upgrdfail = 0; 4276 if (szc == 0 || xhat || 4277 (pszc >= szc && 4278 !IS_P2ALIGNED(pfn, pages)) || 4279 (pszc < szc && 4280 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4281 &pszc))) { 4282 4283 if (upgrdfail && type != F_SOFTLOCK) { 4284 /* 4285 * segvn_full_szcpages failed to lock 4286 * all pages EXCL. Size down. 4287 */ 4288 ASSERT(pszc < szc); 4289 4290 SEGVN_VMSTAT_FLTVNPAGES(33); 4291 4292 if (pplist != NULL) { 4293 page_t *pl = pplist; 4294 page_free_replacement_page(pl); 4295 page_create_putback(pages); 4296 } 4297 4298 for (i = 0; i < pages; i++) { 4299 page_unlock(ppa[i]); 4300 } 4301 if (amp != NULL) { 4302 anon_array_exit(&an_cookie); 4303 ANON_LOCK_EXIT(&->a_rwlock); 4304 } 4305 ierr = -1; 4306 break; 4307 } 4308 if (szc != 0 && !xhat && !upgrdfail) { 4309 segvn_faultvnmpss_align_err5++; 4310 } 4311 SEGVN_VMSTAT_FLTVNPAGES(34); 4312 if (pplist != NULL) { 4313 page_free_replacement_page(pplist); 4314 page_create_putback(pages); 4315 } 4316 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4317 prot, vpprot); 4318 if (upgrdfail && segvn_anypgsz_vnode) { 4319 /* SOFTLOCK case */ 4320 hat_memload_array_region(hat, a, pgsz, 4321 ppa, prot & vpprot, hat_flag, 4322 svd->rcookie); 4323 } else { 4324 for (i = 0; i < pages; i++) { 4325 hat_memload_region(hat, 4326 a + (i << PAGESHIFT), 4327 ppa[i], prot & vpprot, 4328 hat_flag, svd->rcookie); 4329 } 4330 } 4331 if (!(hat_flag & HAT_LOAD_LOCK)) { 4332 for (i = 0; i < pages; i++) { 4333 page_unlock(ppa[i]); 4334 } 4335 } 4336 if (amp != NULL) { 4337 anon_array_exit(&an_cookie); 4338 ANON_LOCK_EXIT(&->a_rwlock); 4339 } 4340 goto next; 4341 } 4342 4343 if (pszc == szc) { 4344 /* 4345 * segvn_full_szcpages() upgraded pages szc. 4346 */ 4347 ASSERT(pszc == ppa[0]->p_szc); 4348 ASSERT(IS_P2ALIGNED(pfn, pages)); 4349 goto chkszc; 4350 } 4351 4352 if (pszc > szc) { 4353 kmutex_t *szcmtx; 4354 SEGVN_VMSTAT_FLTVNPAGES(35); 4355 /* 4356 * p_szc of ppa[0] can change since we haven't 4357 * locked all constituent pages. Call 4358 * page_lock_szc() to prevent szc changes. 4359 * This should be a rare case that happens when 4360 * multiple segments use a different page size 4361 * to map the same file offsets. 4362 */ 4363 szcmtx = page_szc_lock(ppa[0]); 4364 pszc = ppa[0]->p_szc; 4365 ASSERT(szcmtx != NULL || pszc == 0); 4366 ASSERT(ppa[0]->p_szc <= pszc); 4367 if (pszc <= szc) { 4368 SEGVN_VMSTAT_FLTVNPAGES(36); 4369 if (szcmtx != NULL) { 4370 mutex_exit(szcmtx); 4371 } 4372 goto chkszc; 4373 } 4374 if (pplist != NULL) { 4375 /* 4376 * page got promoted since last check. 4377 * we don't need preaalocated large 4378 * page. 4379 */ 4380 SEGVN_VMSTAT_FLTVNPAGES(37); 4381 page_free_replacement_page(pplist); 4382 page_create_putback(pages); 4383 } 4384 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4385 prot, vpprot); 4386 hat_memload_array_region(hat, a, pgsz, ppa, 4387 prot & vpprot, hat_flag, svd->rcookie); 4388 mutex_exit(szcmtx); 4389 if (!(hat_flag & HAT_LOAD_LOCK)) { 4390 for (i = 0; i < pages; i++) { 4391 page_unlock(ppa[i]); 4392 } 4393 } 4394 if (amp != NULL) { 4395 anon_array_exit(&an_cookie); 4396 ANON_LOCK_EXIT(&->a_rwlock); 4397 } 4398 goto next; 4399 } 4400 4401 /* 4402 * if page got demoted since last check 4403 * we could have not allocated larger page. 4404 * allocate now. 4405 */ 4406 if (pplist == NULL && 4407 page_alloc_pages(vp, seg, a, &pplist, NULL, 4408 szc, 0, 0) && type != F_SOFTLOCK) { 4409 SEGVN_VMSTAT_FLTVNPAGES(38); 4410 for (i = 0; i < pages; i++) { 4411 page_unlock(ppa[i]); 4412 } 4413 if (amp != NULL) { 4414 anon_array_exit(&an_cookie); 4415 ANON_LOCK_EXIT(&->a_rwlock); 4416 } 4417 ierr = -1; 4418 alloc_failed |= (1 << szc); 4419 break; 4420 } 4421 4422 SEGVN_VMSTAT_FLTVNPAGES(39); 4423 4424 if (pplist != NULL) { 4425 segvn_relocate_pages(ppa, pplist); 4426 #ifdef DEBUG 4427 } else { 4428 ASSERT(type == F_SOFTLOCK); 4429 SEGVN_VMSTAT_FLTVNPAGES(40); 4430 #endif /* DEBUG */ 4431 } 4432 4433 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4434 4435 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4436 ASSERT(type == F_SOFTLOCK); 4437 for (i = 0; i < pages; i++) { 4438 ASSERT(ppa[i]->p_szc < szc); 4439 hat_memload_region(hat, 4440 a + (i << PAGESHIFT), 4441 ppa[i], prot & vpprot, hat_flag, 4442 svd->rcookie); 4443 } 4444 } else { 4445 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4446 hat_memload_array_region(hat, a, pgsz, ppa, 4447 prot & vpprot, hat_flag, svd->rcookie); 4448 } 4449 if (!(hat_flag & HAT_LOAD_LOCK)) { 4450 for (i = 0; i < pages; i++) { 4451 ASSERT(PAGE_SHARED(ppa[i])); 4452 page_unlock(ppa[i]); 4453 } 4454 } 4455 if (amp != NULL) { 4456 anon_array_exit(&an_cookie); 4457 ANON_LOCK_EXIT(&->a_rwlock); 4458 } 4459 4460 next: 4461 if (vpage != NULL) { 4462 vpage += pages; 4463 } 4464 adjszc_chk = 1; 4465 } 4466 if (a == lpgeaddr) 4467 break; 4468 ASSERT(a < lpgeaddr); 4469 4470 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4471 4472 /* 4473 * ierr == -1 means we failed to map with a large page. 4474 * (either due to allocation/relocation failures or 4475 * misalignment with other mappings to this file. 4476 * 4477 * ierr == -2 means some other thread allocated a large page 4478 * after we gave up tp map with a large page. retry with 4479 * larger mapping. 4480 */ 4481 ASSERT(ierr == -1 || ierr == -2); 4482 ASSERT(ierr == -2 || szc != 0); 4483 ASSERT(ierr == -1 || szc < seg->s_szc); 4484 if (ierr == -2) { 4485 SEGVN_VMSTAT_FLTVNPAGES(41); 4486 ASSERT(pszc > szc && pszc <= seg->s_szc); 4487 szc = pszc; 4488 } else if (segvn_anypgsz_vnode) { 4489 SEGVN_VMSTAT_FLTVNPAGES(42); 4490 szc--; 4491 } else { 4492 SEGVN_VMSTAT_FLTVNPAGES(43); 4493 ASSERT(pszc < szc); 4494 /* 4495 * other process created pszc large page. 4496 * but we still have to drop to 0 szc. 4497 */ 4498 szc = 0; 4499 } 4500 4501 pgsz = page_get_pagesize(szc); 4502 pages = btop(pgsz); 4503 if (ierr == -2) { 4504 /* 4505 * Size up case. Note lpgaddr may only be needed for 4506 * softlock case so we don't adjust it here. 4507 */ 4508 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4509 ASSERT(a >= lpgaddr); 4510 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4511 off = svd->offset + (uintptr_t)(a - seg->s_base); 4512 aindx = svd->anon_index + seg_page(seg, a); 4513 vpage = (svd->vpage != NULL) ? 4514 &svd->vpage[seg_page(seg, a)] : NULL; 4515 } else { 4516 /* 4517 * Size down case. Note lpgaddr may only be needed for 4518 * softlock case so we don't adjust it here. 4519 */ 4520 ASSERT(IS_P2ALIGNED(a, pgsz)); 4521 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4522 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4523 ASSERT(a < lpgeaddr); 4524 if (a < addr) { 4525 SEGVN_VMSTAT_FLTVNPAGES(44); 4526 /* 4527 * The beginning of the large page region can 4528 * be pulled to the right to make a smaller 4529 * region. We haven't yet faulted a single 4530 * page. 4531 */ 4532 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4533 ASSERT(a >= lpgaddr); 4534 off = svd->offset + 4535 (uintptr_t)(a - seg->s_base); 4536 aindx = svd->anon_index + seg_page(seg, a); 4537 vpage = (svd->vpage != NULL) ? 4538 &svd->vpage[seg_page(seg, a)] : NULL; 4539 } 4540 } 4541 } 4542 out: 4543 kmem_free(ppa, ppasize); 4544 if (!err && !vop_size_err) { 4545 SEGVN_VMSTAT_FLTVNPAGES(45); 4546 return (0); 4547 } 4548 if (type == F_SOFTLOCK && a > lpgaddr) { 4549 SEGVN_VMSTAT_FLTVNPAGES(46); 4550 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4551 } 4552 if (!vop_size_err) { 4553 SEGVN_VMSTAT_FLTVNPAGES(47); 4554 return (err); 4555 } 4556 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4557 /* 4558 * Large page end is mapped beyond the end of file and it's a cow 4559 * fault (can be a text replication induced cow) or softlock so we can't 4560 * reduce the map area. For now just demote the segment. This should 4561 * really only happen if the end of the file changed after the mapping 4562 * was established since when large page segments are created we make 4563 * sure they don't extend beyond the end of the file. 4564 */ 4565 SEGVN_VMSTAT_FLTVNPAGES(48); 4566 4567 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4568 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4569 err = 0; 4570 if (seg->s_szc != 0) { 4571 segvn_fltvnpages_clrszc_cnt++; 4572 ASSERT(svd->softlockcnt == 0); 4573 err = segvn_clrszc(seg); 4574 if (err != 0) { 4575 segvn_fltvnpages_clrszc_err++; 4576 } 4577 } 4578 ASSERT(err || seg->s_szc == 0); 4579 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4580 /* segvn_fault will do its job as if szc had been zero to begin with */ 4581 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4582 } 4583 4584 /* 4585 * This routine will attempt to fault in one large page. 4586 * it will use smaller pages if that fails. 4587 * It should only be called for pure anonymous segments. 4588 */ 4589 static faultcode_t 4590 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4591 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4592 caddr_t eaddr, int brkcow) 4593 { 4594 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4595 struct anon_map *amp = svd->amp; 4596 uchar_t segtype = svd->type; 4597 uint_t szc = seg->s_szc; 4598 size_t pgsz = page_get_pagesize(szc); 4599 size_t maxpgsz = pgsz; 4600 pgcnt_t pages = btop(pgsz); 4601 uint_t ppaszc = szc; 4602 caddr_t a = lpgaddr; 4603 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4604 struct vpage *vpage = (svd->vpage != NULL) ? 4605 &svd->vpage[seg_page(seg, a)] : NULL; 4606 page_t **ppa; 4607 uint_t ppa_szc; 4608 faultcode_t err; 4609 int ierr; 4610 uint_t protchk, prot, vpprot; 4611 ulong_t i; 4612 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4613 anon_sync_obj_t cookie; 4614 int adjszc_chk; 4615 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4616 4617 ASSERT(szc != 0); 4618 ASSERT(amp != NULL); 4619 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4620 ASSERT(!(svd->flags & MAP_NORESERVE)); 4621 ASSERT(type != F_SOFTUNLOCK); 4622 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4623 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4624 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4625 4626 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4627 4628 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4629 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4630 4631 if (svd->flags & MAP_TEXT) { 4632 hat_flag |= HAT_LOAD_TEXT; 4633 } 4634 4635 if (svd->pageprot) { 4636 switch (rw) { 4637 case S_READ: 4638 protchk = PROT_READ; 4639 break; 4640 case S_WRITE: 4641 protchk = PROT_WRITE; 4642 break; 4643 case S_EXEC: 4644 protchk = PROT_EXEC; 4645 break; 4646 case S_OTHER: 4647 default: 4648 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4649 break; 4650 } 4651 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4652 } else { 4653 prot = svd->prot; 4654 /* caller has already done segment level protection check. */ 4655 } 4656 4657 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4658 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4659 for (;;) { 4660 adjszc_chk = 0; 4661 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4662 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4663 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4664 ASSERT(vpage != NULL); 4665 prot = VPP_PROT(vpage); 4666 ASSERT(sameprot(seg, a, maxpgsz)); 4667 if ((prot & protchk) == 0) { 4668 err = FC_PROT; 4669 goto error; 4670 } 4671 } 4672 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4673 pgsz < maxpgsz) { 4674 ASSERT(a > lpgaddr); 4675 szc = seg->s_szc; 4676 pgsz = maxpgsz; 4677 pages = btop(pgsz); 4678 ASSERT(IS_P2ALIGNED(aindx, pages)); 4679 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4680 pgsz); 4681 } 4682 if (type == F_SOFTLOCK) { 4683 atomic_add_long((ulong_t *)&svd->softlockcnt, 4684 pages); 4685 } 4686 anon_array_enter(amp, aindx, &cookie); 4687 ppa_szc = (uint_t)-1; 4688 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4689 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4690 segvn_anypgsz, pgflags, svd->cred); 4691 if (ierr != 0) { 4692 anon_array_exit(&cookie); 4693 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4694 if (type == F_SOFTLOCK) { 4695 atomic_add_long( 4696 (ulong_t *)&svd->softlockcnt, 4697 -pages); 4698 } 4699 if (ierr > 0) { 4700 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4701 err = FC_MAKE_ERR(ierr); 4702 goto error; 4703 } 4704 break; 4705 } 4706 4707 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4708 4709 ASSERT(segtype == MAP_SHARED || 4710 ppa[0]->p_szc <= szc); 4711 ASSERT(segtype == MAP_PRIVATE || 4712 ppa[0]->p_szc >= szc); 4713 4714 /* 4715 * Handle pages that have been marked for migration 4716 */ 4717 if (lgrp_optimizations()) 4718 page_migrate(seg, a, ppa, pages); 4719 4720 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4721 4722 if (segtype == MAP_SHARED) { 4723 vpprot |= PROT_WRITE; 4724 } 4725 4726 hat_memload_array(hat, a, pgsz, ppa, 4727 prot & vpprot, hat_flag); 4728 4729 if (hat_flag & HAT_LOAD_LOCK) { 4730 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4731 } else { 4732 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4733 for (i = 0; i < pages; i++) 4734 page_unlock(ppa[i]); 4735 } 4736 if (vpage != NULL) 4737 vpage += pages; 4738 4739 anon_array_exit(&cookie); 4740 adjszc_chk = 1; 4741 } 4742 if (a == lpgeaddr) 4743 break; 4744 ASSERT(a < lpgeaddr); 4745 /* 4746 * ierr == -1 means we failed to allocate a large page. 4747 * so do a size down operation. 4748 * 4749 * ierr == -2 means some other process that privately shares 4750 * pages with this process has allocated a larger page and we 4751 * need to retry with larger pages. So do a size up 4752 * operation. This relies on the fact that large pages are 4753 * never partially shared i.e. if we share any constituent 4754 * page of a large page with another process we must share the 4755 * entire large page. Note this cannot happen for SOFTLOCK 4756 * case, unless current address (a) is at the beginning of the 4757 * next page size boundary because the other process couldn't 4758 * have relocated locked pages. 4759 */ 4760 ASSERT(ierr == -1 || ierr == -2); 4761 4762 if (segvn_anypgsz) { 4763 ASSERT(ierr == -2 || szc != 0); 4764 ASSERT(ierr == -1 || szc < seg->s_szc); 4765 szc = (ierr == -1) ? szc - 1 : szc + 1; 4766 } else { 4767 /* 4768 * For non COW faults and segvn_anypgsz == 0 4769 * we need to be careful not to loop forever 4770 * if existing page is found with szc other 4771 * than 0 or seg->s_szc. This could be due 4772 * to page relocations on behalf of DR or 4773 * more likely large page creation. For this 4774 * case simply re-size to existing page's szc 4775 * if returned by anon_map_getpages(). 4776 */ 4777 if (ppa_szc == (uint_t)-1) { 4778 szc = (ierr == -1) ? 0 : seg->s_szc; 4779 } else { 4780 ASSERT(ppa_szc <= seg->s_szc); 4781 ASSERT(ierr == -2 || ppa_szc < szc); 4782 ASSERT(ierr == -1 || ppa_szc > szc); 4783 szc = ppa_szc; 4784 } 4785 } 4786 4787 pgsz = page_get_pagesize(szc); 4788 pages = btop(pgsz); 4789 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4790 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4791 if (type == F_SOFTLOCK) { 4792 /* 4793 * For softlocks we cannot reduce the fault area 4794 * (calculated based on the largest page size for this 4795 * segment) for size down and a is already next 4796 * page size aligned as assertted above for size 4797 * ups. Therefore just continue in case of softlock. 4798 */ 4799 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4800 continue; /* keep lint happy */ 4801 } else if (ierr == -2) { 4802 4803 /* 4804 * Size up case. Note lpgaddr may only be needed for 4805 * softlock case so we don't adjust it here. 4806 */ 4807 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4808 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4809 ASSERT(a >= lpgaddr); 4810 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4811 aindx = svd->anon_index + seg_page(seg, a); 4812 vpage = (svd->vpage != NULL) ? 4813 &svd->vpage[seg_page(seg, a)] : NULL; 4814 } else { 4815 /* 4816 * Size down case. Note lpgaddr may only be needed for 4817 * softlock case so we don't adjust it here. 4818 */ 4819 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4820 ASSERT(IS_P2ALIGNED(a, pgsz)); 4821 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4822 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4823 ASSERT(a < lpgeaddr); 4824 if (a < addr) { 4825 /* 4826 * The beginning of the large page region can 4827 * be pulled to the right to make a smaller 4828 * region. We haven't yet faulted a single 4829 * page. 4830 */ 4831 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4832 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4833 ASSERT(a >= lpgaddr); 4834 aindx = svd->anon_index + seg_page(seg, a); 4835 vpage = (svd->vpage != NULL) ? 4836 &svd->vpage[seg_page(seg, a)] : NULL; 4837 } 4838 } 4839 } 4840 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4841 ANON_LOCK_EXIT(&->a_rwlock); 4842 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4843 return (0); 4844 error: 4845 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4846 ANON_LOCK_EXIT(&->a_rwlock); 4847 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4848 if (type == F_SOFTLOCK && a > lpgaddr) { 4849 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4850 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4851 } 4852 return (err); 4853 } 4854 4855 int fltadvice = 1; /* set to free behind pages for sequential access */ 4856 4857 /* 4858 * This routine is called via a machine specific fault handling routine. 4859 * It is also called by software routines wishing to lock or unlock 4860 * a range of addresses. 4861 * 4862 * Here is the basic algorithm: 4863 * If unlocking 4864 * Call segvn_softunlock 4865 * Return 4866 * endif 4867 * Checking and set up work 4868 * If we will need some non-anonymous pages 4869 * Call VOP_GETPAGE over the range of non-anonymous pages 4870 * endif 4871 * Loop over all addresses requested 4872 * Call segvn_faultpage passing in page list 4873 * to load up translations and handle anonymous pages 4874 * endloop 4875 * Load up translation to any additional pages in page list not 4876 * already handled that fit into this segment 4877 */ 4878 static faultcode_t 4879 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4880 enum fault_type type, enum seg_rw rw) 4881 { 4882 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4883 page_t **plp, **ppp, *pp; 4884 u_offset_t off; 4885 caddr_t a; 4886 struct vpage *vpage; 4887 uint_t vpprot, prot; 4888 int err; 4889 page_t *pl[PVN_GETPAGE_NUM + 1]; 4890 size_t plsz, pl_alloc_sz; 4891 size_t page; 4892 ulong_t anon_index; 4893 struct anon_map *amp; 4894 int dogetpage = 0; 4895 caddr_t lpgaddr, lpgeaddr; 4896 size_t pgsz; 4897 anon_sync_obj_t cookie; 4898 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4899 4900 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4901 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4902 4903 /* 4904 * First handle the easy stuff 4905 */ 4906 if (type == F_SOFTUNLOCK) { 4907 if (rw == S_READ_NOCOW) { 4908 rw = S_READ; 4909 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4910 } 4911 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4912 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4913 page_get_pagesize(seg->s_szc); 4914 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4915 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4916 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4917 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4918 return (0); 4919 } 4920 4921 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4922 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4923 if (brkcow == 0) { 4924 if (svd->tr_state == SEGVN_TR_INIT) { 4925 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4926 if (svd->tr_state == SEGVN_TR_INIT) { 4927 ASSERT(svd->vp != NULL && svd->amp == NULL); 4928 ASSERT(svd->flags & MAP_TEXT); 4929 ASSERT(svd->type == MAP_PRIVATE); 4930 segvn_textrepl(seg); 4931 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4932 ASSERT(svd->tr_state != SEGVN_TR_ON || 4933 svd->amp != NULL); 4934 } 4935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4936 } 4937 } else if (svd->tr_state != SEGVN_TR_OFF) { 4938 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4939 4940 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4941 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4942 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4943 return (FC_PROT); 4944 } 4945 4946 if (svd->tr_state == SEGVN_TR_ON) { 4947 ASSERT(svd->vp != NULL && svd->amp != NULL); 4948 segvn_textunrepl(seg, 0); 4949 ASSERT(svd->amp == NULL && 4950 svd->tr_state == SEGVN_TR_OFF); 4951 } else if (svd->tr_state != SEGVN_TR_OFF) { 4952 svd->tr_state = SEGVN_TR_OFF; 4953 } 4954 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4955 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4956 } 4957 4958 top: 4959 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4960 4961 /* 4962 * If we have the same protections for the entire segment, 4963 * insure that the access being attempted is legitimate. 4964 */ 4965 4966 if (svd->pageprot == 0) { 4967 uint_t protchk; 4968 4969 switch (rw) { 4970 case S_READ: 4971 case S_READ_NOCOW: 4972 protchk = PROT_READ; 4973 break; 4974 case S_WRITE: 4975 protchk = PROT_WRITE; 4976 break; 4977 case S_EXEC: 4978 protchk = PROT_EXEC; 4979 break; 4980 case S_OTHER: 4981 default: 4982 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4983 break; 4984 } 4985 4986 if ((svd->prot & protchk) == 0) { 4987 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4988 return (FC_PROT); /* illegal access type */ 4989 } 4990 } 4991 4992 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 4993 /* this must be SOFTLOCK S_READ fault */ 4994 ASSERT(svd->amp == NULL); 4995 ASSERT(svd->tr_state == SEGVN_TR_OFF); 4996 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4997 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4998 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 4999 /* 5000 * this must be the first ever non S_READ_NOCOW 5001 * softlock for this segment. 5002 */ 5003 ASSERT(svd->softlockcnt == 0); 5004 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5005 HAT_REGION_TEXT); 5006 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5007 } 5008 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5009 goto top; 5010 } 5011 5012 /* 5013 * We can't allow the long term use of softlocks for vmpss segments, 5014 * because in some file truncation cases we should be able to demote 5015 * the segment, which requires that there are no softlocks. The 5016 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5017 * segment is S_READ_NOCOW, where the caller holds the address space 5018 * locked as writer and calls softunlock before dropping the as lock. 5019 * S_READ_NOCOW is used by /proc to read memory from another user. 5020 * 5021 * Another deadlock between SOFTLOCK and file truncation can happen 5022 * because segvn_fault_vnodepages() calls the FS one pagesize at 5023 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5024 * can cause a deadlock because the first set of page_t's remain 5025 * locked SE_SHARED. To avoid this, we demote segments on a first 5026 * SOFTLOCK if they have a length greater than the segment's 5027 * page size. 5028 * 5029 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5030 * the access type is S_READ_NOCOW and the fault length is less than 5031 * or equal to the segment's page size. While this is quite restrictive, 5032 * it should be the most common case of SOFTLOCK against a vmpss 5033 * segment. 5034 * 5035 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5036 * caller makes sure no COW will be caused by another thread for a 5037 * softlocked page. 5038 */ 5039 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5040 int demote = 0; 5041 5042 if (rw != S_READ_NOCOW) { 5043 demote = 1; 5044 } 5045 if (!demote && len > PAGESIZE) { 5046 pgsz = page_get_pagesize(seg->s_szc); 5047 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5048 lpgeaddr); 5049 if (lpgeaddr - lpgaddr > pgsz) { 5050 demote = 1; 5051 } 5052 } 5053 5054 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5055 5056 if (demote) { 5057 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5058 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5059 if (seg->s_szc != 0) { 5060 segvn_vmpss_clrszc_cnt++; 5061 ASSERT(svd->softlockcnt == 0); 5062 err = segvn_clrszc(seg); 5063 if (err) { 5064 segvn_vmpss_clrszc_err++; 5065 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5066 return (FC_MAKE_ERR(err)); 5067 } 5068 } 5069 ASSERT(seg->s_szc == 0); 5070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5071 goto top; 5072 } 5073 } 5074 5075 /* 5076 * Check to see if we need to allocate an anon_map structure. 5077 */ 5078 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5079 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5080 /* 5081 * Drop the "read" lock on the segment and acquire 5082 * the "write" version since we have to allocate the 5083 * anon_map. 5084 */ 5085 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5086 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5087 5088 if (svd->amp == NULL) { 5089 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5090 svd->amp->a_szc = seg->s_szc; 5091 } 5092 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5093 5094 /* 5095 * Start all over again since segment protections 5096 * may have changed after we dropped the "read" lock. 5097 */ 5098 goto top; 5099 } 5100 5101 /* 5102 * S_READ_NOCOW vs S_READ distinction was 5103 * only needed for the code above. After 5104 * that we treat it as S_READ. 5105 */ 5106 if (rw == S_READ_NOCOW) { 5107 ASSERT(type == F_SOFTLOCK); 5108 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5109 rw = S_READ; 5110 } 5111 5112 amp = svd->amp; 5113 5114 /* 5115 * MADV_SEQUENTIAL work is ignored for large page segments. 5116 */ 5117 if (seg->s_szc != 0) { 5118 pgsz = page_get_pagesize(seg->s_szc); 5119 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5120 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5121 if (svd->vp == NULL) { 5122 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5123 lpgeaddr, type, rw, addr, addr + len, brkcow); 5124 } else { 5125 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5126 lpgeaddr, type, rw, addr, addr + len, brkcow); 5127 if (err == IE_RETRY) { 5128 ASSERT(seg->s_szc == 0); 5129 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5130 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5131 goto top; 5132 } 5133 } 5134 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5135 return (err); 5136 } 5137 5138 page = seg_page(seg, addr); 5139 if (amp != NULL) { 5140 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5141 anon_index = svd->anon_index + page; 5142 5143 if (type == F_PROT && rw == S_READ && 5144 svd->tr_state == SEGVN_TR_OFF && 5145 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5146 size_t index = anon_index; 5147 struct anon *ap; 5148 5149 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5150 /* 5151 * The fast path could apply to S_WRITE also, except 5152 * that the protection fault could be caused by lazy 5153 * tlb flush when ro->rw. In this case, the pte is 5154 * RW already. But RO in the other cpu's tlb causes 5155 * the fault. Since hat_chgprot won't do anything if 5156 * pte doesn't change, we may end up faulting 5157 * indefinitely until the RO tlb entry gets replaced. 5158 */ 5159 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5160 anon_array_enter(amp, index, &cookie); 5161 ap = anon_get_ptr(amp->ahp, index); 5162 anon_array_exit(&cookie); 5163 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5164 ANON_LOCK_EXIT(&->a_rwlock); 5165 goto slow; 5166 } 5167 } 5168 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5169 ANON_LOCK_EXIT(&->a_rwlock); 5170 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5171 return (0); 5172 } 5173 } 5174 slow: 5175 5176 if (svd->vpage == NULL) 5177 vpage = NULL; 5178 else 5179 vpage = &svd->vpage[page]; 5180 5181 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5182 5183 /* 5184 * If MADV_SEQUENTIAL has been set for the particular page we 5185 * are faulting on, free behind all pages in the segment and put 5186 * them on the free list. 5187 */ 5188 5189 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5190 struct vpage *vpp; 5191 ulong_t fanon_index; 5192 size_t fpage; 5193 u_offset_t pgoff, fpgoff; 5194 struct vnode *fvp; 5195 struct anon *fap = NULL; 5196 5197 if (svd->advice == MADV_SEQUENTIAL || 5198 (svd->pageadvice && 5199 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5200 pgoff = off - PAGESIZE; 5201 fpage = page - 1; 5202 if (vpage != NULL) 5203 vpp = &svd->vpage[fpage]; 5204 if (amp != NULL) 5205 fanon_index = svd->anon_index + fpage; 5206 5207 while (pgoff > svd->offset) { 5208 if (svd->advice != MADV_SEQUENTIAL && 5209 (!svd->pageadvice || (vpage && 5210 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5211 break; 5212 5213 /* 5214 * If this is an anon page, we must find the 5215 * correct <vp, offset> for it 5216 */ 5217 fap = NULL; 5218 if (amp != NULL) { 5219 ANON_LOCK_ENTER(&->a_rwlock, 5220 RW_READER); 5221 anon_array_enter(amp, fanon_index, 5222 &cookie); 5223 fap = anon_get_ptr(amp->ahp, 5224 fanon_index); 5225 if (fap != NULL) { 5226 swap_xlate(fap, &fvp, &fpgoff); 5227 } else { 5228 fpgoff = pgoff; 5229 fvp = svd->vp; 5230 } 5231 anon_array_exit(&cookie); 5232 ANON_LOCK_EXIT(&->a_rwlock); 5233 } else { 5234 fpgoff = pgoff; 5235 fvp = svd->vp; 5236 } 5237 if (fvp == NULL) 5238 break; /* XXX */ 5239 /* 5240 * Skip pages that are free or have an 5241 * "exclusive" lock. 5242 */ 5243 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5244 if (pp == NULL) 5245 break; 5246 /* 5247 * We don't need the page_struct_lock to test 5248 * as this is only advisory; even if we 5249 * acquire it someone might race in and lock 5250 * the page after we unlock and before the 5251 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5252 */ 5253 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5254 /* 5255 * Hold the vnode before releasing 5256 * the page lock to prevent it from 5257 * being freed and re-used by some 5258 * other thread. 5259 */ 5260 VN_HOLD(fvp); 5261 page_unlock(pp); 5262 /* 5263 * We should build a page list 5264 * to kluster putpages XXX 5265 */ 5266 (void) VOP_PUTPAGE(fvp, 5267 (offset_t)fpgoff, PAGESIZE, 5268 (B_DONTNEED|B_FREE|B_ASYNC), 5269 svd->cred, NULL); 5270 VN_RELE(fvp); 5271 } else { 5272 /* 5273 * XXX - Should the loop terminate if 5274 * the page is `locked'? 5275 */ 5276 page_unlock(pp); 5277 } 5278 --vpp; 5279 --fanon_index; 5280 pgoff -= PAGESIZE; 5281 } 5282 } 5283 } 5284 5285 plp = pl; 5286 *plp = NULL; 5287 pl_alloc_sz = 0; 5288 5289 /* 5290 * See if we need to call VOP_GETPAGE for 5291 * *any* of the range being faulted on. 5292 * We can skip all of this work if there 5293 * was no original vnode. 5294 */ 5295 if (svd->vp != NULL) { 5296 u_offset_t vp_off; 5297 size_t vp_len; 5298 struct anon *ap; 5299 vnode_t *vp; 5300 5301 vp_off = off; 5302 vp_len = len; 5303 5304 if (amp == NULL) 5305 dogetpage = 1; 5306 else { 5307 /* 5308 * Only acquire reader lock to prevent amp->ahp 5309 * from being changed. It's ok to miss pages, 5310 * hence we don't do anon_array_enter 5311 */ 5312 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5313 ap = anon_get_ptr(amp->ahp, anon_index); 5314 5315 if (len <= PAGESIZE) 5316 /* inline non_anon() */ 5317 dogetpage = (ap == NULL); 5318 else 5319 dogetpage = non_anon(amp->ahp, anon_index, 5320 &vp_off, &vp_len); 5321 ANON_LOCK_EXIT(&->a_rwlock); 5322 } 5323 5324 if (dogetpage) { 5325 enum seg_rw arw; 5326 struct as *as = seg->s_as; 5327 5328 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) { 5329 /* 5330 * Page list won't fit in local array, 5331 * allocate one of the needed size. 5332 */ 5333 pl_alloc_sz = 5334 (btop(len) + 1) * sizeof (page_t *); 5335 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5336 plp[0] = NULL; 5337 plsz = len; 5338 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5339 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5340 (((size_t)(addr + PAGESIZE) < 5341 (size_t)(seg->s_base + seg->s_size)) && 5342 hat_probe(as->a_hat, addr + PAGESIZE))) { 5343 /* 5344 * Ask VOP_GETPAGE to return the exact number 5345 * of pages if 5346 * (a) this is a COW fault, or 5347 * (b) this is a software fault, or 5348 * (c) next page is already mapped. 5349 */ 5350 plsz = len; 5351 } else { 5352 /* 5353 * Ask VOP_GETPAGE to return adjacent pages 5354 * within the segment. 5355 */ 5356 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t) 5357 ((seg->s_base + seg->s_size) - addr)); 5358 ASSERT((addr + plsz) <= 5359 (seg->s_base + seg->s_size)); 5360 } 5361 5362 /* 5363 * Need to get some non-anonymous pages. 5364 * We need to make only one call to GETPAGE to do 5365 * this to prevent certain deadlocking conditions 5366 * when we are doing locking. In this case 5367 * non_anon() should have picked up the smallest 5368 * range which includes all the non-anonymous 5369 * pages in the requested range. We have to 5370 * be careful regarding which rw flag to pass in 5371 * because on a private mapping, the underlying 5372 * object is never allowed to be written. 5373 */ 5374 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5375 arw = S_READ; 5376 } else { 5377 arw = rw; 5378 } 5379 vp = svd->vp; 5380 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5381 "segvn_getpage:seg %p addr %p vp %p", 5382 seg, addr, vp); 5383 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5384 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5385 svd->cred, NULL); 5386 if (err) { 5387 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5388 segvn_pagelist_rele(plp); 5389 if (pl_alloc_sz) 5390 kmem_free(plp, pl_alloc_sz); 5391 return (FC_MAKE_ERR(err)); 5392 } 5393 if (svd->type == MAP_PRIVATE) 5394 vpprot &= ~PROT_WRITE; 5395 } 5396 } 5397 5398 /* 5399 * N.B. at this time the plp array has all the needed non-anon 5400 * pages in addition to (possibly) having some adjacent pages. 5401 */ 5402 5403 /* 5404 * Always acquire the anon_array_lock to prevent 5405 * 2 threads from allocating separate anon slots for 5406 * the same "addr". 5407 * 5408 * If this is a copy-on-write fault and we don't already 5409 * have the anon_array_lock, acquire it to prevent the 5410 * fault routine from handling multiple copy-on-write faults 5411 * on the same "addr" in the same address space. 5412 * 5413 * Only one thread should deal with the fault since after 5414 * it is handled, the other threads can acquire a translation 5415 * to the newly created private page. This prevents two or 5416 * more threads from creating different private pages for the 5417 * same fault. 5418 * 5419 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5420 * to prevent deadlock between this thread and another thread 5421 * which has soft-locked this page and wants to acquire serial_lock. 5422 * ( bug 4026339 ) 5423 * 5424 * The fix for bug 4026339 becomes unnecessary when using the 5425 * locking scheme with per amp rwlock and a global set of hash 5426 * lock, anon_array_lock. If we steal a vnode page when low 5427 * on memory and upgrad the page lock through page_rename, 5428 * then the page is PAGE_HANDLED, nothing needs to be done 5429 * for this page after returning from segvn_faultpage. 5430 * 5431 * But really, the page lock should be downgraded after 5432 * the stolen page is page_rename'd. 5433 */ 5434 5435 if (amp != NULL) 5436 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5437 5438 /* 5439 * Ok, now loop over the address range and handle faults 5440 */ 5441 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5442 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5443 type, rw, brkcow); 5444 if (err) { 5445 if (amp != NULL) 5446 ANON_LOCK_EXIT(&->a_rwlock); 5447 if (type == F_SOFTLOCK && a > addr) { 5448 segvn_softunlock(seg, addr, (a - addr), 5449 S_OTHER); 5450 } 5451 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5452 segvn_pagelist_rele(plp); 5453 if (pl_alloc_sz) 5454 kmem_free(plp, pl_alloc_sz); 5455 return (err); 5456 } 5457 if (vpage) { 5458 vpage++; 5459 } else if (svd->vpage) { 5460 page = seg_page(seg, addr); 5461 vpage = &svd->vpage[++page]; 5462 } 5463 } 5464 5465 /* Didn't get pages from the underlying fs so we're done */ 5466 if (!dogetpage) 5467 goto done; 5468 5469 /* 5470 * Now handle any other pages in the list returned. 5471 * If the page can be used, load up the translations now. 5472 * Note that the for loop will only be entered if "plp" 5473 * is pointing to a non-NULL page pointer which means that 5474 * VOP_GETPAGE() was called and vpprot has been initialized. 5475 */ 5476 if (svd->pageprot == 0) 5477 prot = svd->prot & vpprot; 5478 5479 5480 /* 5481 * Large Files: diff should be unsigned value because we started 5482 * supporting > 2GB segment sizes from 2.5.1 and when a 5483 * large file of size > 2GB gets mapped to address space 5484 * the diff value can be > 2GB. 5485 */ 5486 5487 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5488 size_t diff; 5489 struct anon *ap; 5490 int anon_index; 5491 anon_sync_obj_t cookie; 5492 int hat_flag = HAT_LOAD_ADV; 5493 5494 if (svd->flags & MAP_TEXT) { 5495 hat_flag |= HAT_LOAD_TEXT; 5496 } 5497 5498 if (pp == PAGE_HANDLED) 5499 continue; 5500 5501 if (svd->tr_state != SEGVN_TR_ON && 5502 pp->p_offset >= svd->offset && 5503 pp->p_offset < svd->offset + seg->s_size) { 5504 5505 diff = pp->p_offset - svd->offset; 5506 5507 /* 5508 * Large Files: Following is the assertion 5509 * validating the above cast. 5510 */ 5511 ASSERT(svd->vp == pp->p_vnode); 5512 5513 page = btop(diff); 5514 if (svd->pageprot) 5515 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5516 5517 /* 5518 * Prevent other threads in the address space from 5519 * creating private pages (i.e., allocating anon slots) 5520 * while we are in the process of loading translations 5521 * to additional pages returned by the underlying 5522 * object. 5523 */ 5524 if (amp != NULL) { 5525 anon_index = svd->anon_index + page; 5526 anon_array_enter(amp, anon_index, &cookie); 5527 ap = anon_get_ptr(amp->ahp, anon_index); 5528 } 5529 if ((amp == NULL) || (ap == NULL)) { 5530 if (IS_VMODSORT(pp->p_vnode) || 5531 enable_mbit_wa) { 5532 if (rw == S_WRITE) 5533 hat_setmod(pp); 5534 else if (rw != S_OTHER && 5535 !hat_ismod(pp)) 5536 prot &= ~PROT_WRITE; 5537 } 5538 /* 5539 * Skip mapping read ahead pages marked 5540 * for migration, so they will get migrated 5541 * properly on fault 5542 */ 5543 ASSERT(amp == NULL || 5544 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5545 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5546 hat_memload_region(hat, 5547 seg->s_base + diff, 5548 pp, prot, hat_flag, 5549 svd->rcookie); 5550 } 5551 } 5552 if (amp != NULL) 5553 anon_array_exit(&cookie); 5554 } 5555 page_unlock(pp); 5556 } 5557 done: 5558 if (amp != NULL) 5559 ANON_LOCK_EXIT(&->a_rwlock); 5560 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5561 if (pl_alloc_sz) 5562 kmem_free(plp, pl_alloc_sz); 5563 return (0); 5564 } 5565 5566 /* 5567 * This routine is used to start I/O on pages asynchronously. XXX it will 5568 * only create PAGESIZE pages. At fault time they will be relocated into 5569 * larger pages. 5570 */ 5571 static faultcode_t 5572 segvn_faulta(struct seg *seg, caddr_t addr) 5573 { 5574 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5575 int err; 5576 struct anon_map *amp; 5577 vnode_t *vp; 5578 5579 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5580 5581 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5582 if ((amp = svd->amp) != NULL) { 5583 struct anon *ap; 5584 5585 /* 5586 * Reader lock to prevent amp->ahp from being changed. 5587 * This is advisory, it's ok to miss a page, so 5588 * we don't do anon_array_enter lock. 5589 */ 5590 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5591 if ((ap = anon_get_ptr(amp->ahp, 5592 svd->anon_index + seg_page(seg, addr))) != NULL) { 5593 5594 err = anon_getpage(&ap, NULL, NULL, 5595 0, seg, addr, S_READ, svd->cred); 5596 5597 ANON_LOCK_EXIT(&->a_rwlock); 5598 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5599 if (err) 5600 return (FC_MAKE_ERR(err)); 5601 return (0); 5602 } 5603 ANON_LOCK_EXIT(&->a_rwlock); 5604 } 5605 5606 if (svd->vp == NULL) { 5607 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5608 return (0); /* zfod page - do nothing now */ 5609 } 5610 5611 vp = svd->vp; 5612 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5613 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5614 err = VOP_GETPAGE(vp, 5615 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5616 PAGESIZE, NULL, NULL, 0, seg, addr, 5617 S_OTHER, svd->cred, NULL); 5618 5619 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5620 if (err) 5621 return (FC_MAKE_ERR(err)); 5622 return (0); 5623 } 5624 5625 static int 5626 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5627 { 5628 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5629 struct vpage *cvp, *svp, *evp; 5630 struct vnode *vp; 5631 size_t pgsz; 5632 pgcnt_t pgcnt; 5633 anon_sync_obj_t cookie; 5634 int unload_done = 0; 5635 5636 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5637 5638 if ((svd->maxprot & prot) != prot) 5639 return (EACCES); /* violated maxprot */ 5640 5641 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5642 5643 /* return if prot is the same */ 5644 if (!svd->pageprot && svd->prot == prot) { 5645 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5646 return (0); 5647 } 5648 5649 /* 5650 * Since we change protections we first have to flush the cache. 5651 * This makes sure all the pagelock calls have to recheck 5652 * protections. 5653 */ 5654 if (svd->softlockcnt > 0) { 5655 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5656 5657 /* 5658 * If this is shared segment non 0 softlockcnt 5659 * means locked pages are still in use. 5660 */ 5661 if (svd->type == MAP_SHARED) { 5662 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5663 return (EAGAIN); 5664 } 5665 5666 /* 5667 * Since we do have the segvn writers lock nobody can fill 5668 * the cache with entries belonging to this seg during 5669 * the purge. The flush either succeeds or we still have 5670 * pending I/Os. 5671 */ 5672 segvn_purge(seg); 5673 if (svd->softlockcnt > 0) { 5674 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5675 return (EAGAIN); 5676 } 5677 } 5678 5679 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5680 ASSERT(svd->amp == NULL); 5681 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5682 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5683 HAT_REGION_TEXT); 5684 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5685 unload_done = 1; 5686 } else if (svd->tr_state == SEGVN_TR_INIT) { 5687 svd->tr_state = SEGVN_TR_OFF; 5688 } else if (svd->tr_state == SEGVN_TR_ON) { 5689 ASSERT(svd->amp != NULL); 5690 segvn_textunrepl(seg, 0); 5691 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5692 unload_done = 1; 5693 } 5694 5695 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5696 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5697 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5698 segvn_inval_trcache(svd->vp); 5699 } 5700 if (seg->s_szc != 0) { 5701 int err; 5702 pgsz = page_get_pagesize(seg->s_szc); 5703 pgcnt = pgsz >> PAGESHIFT; 5704 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5705 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5706 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5707 ASSERT(seg->s_base != addr || seg->s_size != len); 5708 /* 5709 * If we are holding the as lock as a reader then 5710 * we need to return IE_RETRY and let the as 5711 * layer drop and re-acquire the lock as a writer. 5712 */ 5713 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5714 return (IE_RETRY); 5715 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5716 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5717 err = segvn_demote_range(seg, addr, len, 5718 SDR_END, 0); 5719 } else { 5720 uint_t szcvec = map_pgszcvec(seg->s_base, 5721 pgsz, (uintptr_t)seg->s_base, 5722 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5723 err = segvn_demote_range(seg, addr, len, 5724 SDR_END, szcvec); 5725 } 5726 if (err == 0) 5727 return (IE_RETRY); 5728 if (err == ENOMEM) 5729 return (IE_NOMEM); 5730 return (err); 5731 } 5732 } 5733 5734 5735 /* 5736 * If it's a private mapping and we're making it writable then we 5737 * may have to reserve the additional swap space now. If we are 5738 * making writable only a part of the segment then we use its vpage 5739 * array to keep a record of the pages for which we have reserved 5740 * swap. In this case we set the pageswap field in the segment's 5741 * segvn structure to record this. 5742 * 5743 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5744 * removing write permission on the entire segment and we haven't 5745 * modified any pages, we can release the swap space. 5746 */ 5747 if (svd->type == MAP_PRIVATE) { 5748 if (prot & PROT_WRITE) { 5749 if (!(svd->flags & MAP_NORESERVE) && 5750 !(svd->swresv && svd->pageswap == 0)) { 5751 size_t sz = 0; 5752 5753 /* 5754 * Start by determining how much swap 5755 * space is required. 5756 */ 5757 if (addr == seg->s_base && 5758 len == seg->s_size && 5759 svd->pageswap == 0) { 5760 /* The whole segment */ 5761 sz = seg->s_size; 5762 } else { 5763 /* 5764 * Make sure that the vpage array 5765 * exists, and make a note of the 5766 * range of elements corresponding 5767 * to len. 5768 */ 5769 segvn_vpage(seg); 5770 svp = &svd->vpage[seg_page(seg, addr)]; 5771 evp = &svd->vpage[seg_page(seg, 5772 addr + len)]; 5773 5774 if (svd->pageswap == 0) { 5775 /* 5776 * This is the first time we've 5777 * asked for a part of this 5778 * segment, so we need to 5779 * reserve everything we've 5780 * been asked for. 5781 */ 5782 sz = len; 5783 } else { 5784 /* 5785 * We have to count the number 5786 * of pages required. 5787 */ 5788 for (cvp = svp; cvp < evp; 5789 cvp++) { 5790 if (!VPP_ISSWAPRES(cvp)) 5791 sz++; 5792 } 5793 sz <<= PAGESHIFT; 5794 } 5795 } 5796 5797 /* Try to reserve the necessary swap. */ 5798 if (anon_resv_zone(sz, 5799 seg->s_as->a_proc->p_zone) == 0) { 5800 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5801 return (IE_NOMEM); 5802 } 5803 5804 /* 5805 * Make a note of how much swap space 5806 * we've reserved. 5807 */ 5808 if (svd->pageswap == 0 && sz == seg->s_size) { 5809 svd->swresv = sz; 5810 } else { 5811 ASSERT(svd->vpage != NULL); 5812 svd->swresv += sz; 5813 svd->pageswap = 1; 5814 for (cvp = svp; cvp < evp; cvp++) { 5815 if (!VPP_ISSWAPRES(cvp)) 5816 VPP_SETSWAPRES(cvp); 5817 } 5818 } 5819 } 5820 } else { 5821 /* 5822 * Swap space is released only if this segment 5823 * does not map anonymous memory, since read faults 5824 * on such segments still need an anon slot to read 5825 * in the data. 5826 */ 5827 if (svd->swresv != 0 && svd->vp != NULL && 5828 svd->amp == NULL && addr == seg->s_base && 5829 len == seg->s_size && svd->pageprot == 0) { 5830 ASSERT(svd->pageswap == 0); 5831 anon_unresv_zone(svd->swresv, 5832 seg->s_as->a_proc->p_zone); 5833 svd->swresv = 0; 5834 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5835 "anon proc:%p %lu %u", seg, 0, 0); 5836 } 5837 } 5838 } 5839 5840 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5841 if (svd->prot == prot) { 5842 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5843 return (0); /* all done */ 5844 } 5845 svd->prot = (uchar_t)prot; 5846 } else if (svd->type == MAP_PRIVATE) { 5847 struct anon *ap = NULL; 5848 page_t *pp; 5849 u_offset_t offset, off; 5850 struct anon_map *amp; 5851 ulong_t anon_idx = 0; 5852 5853 /* 5854 * A vpage structure exists or else the change does not 5855 * involve the entire segment. Establish a vpage structure 5856 * if none is there. Then, for each page in the range, 5857 * adjust its individual permissions. Note that write- 5858 * enabling a MAP_PRIVATE page can affect the claims for 5859 * locked down memory. Overcommitting memory terminates 5860 * the operation. 5861 */ 5862 segvn_vpage(seg); 5863 svd->pageprot = 1; 5864 if ((amp = svd->amp) != NULL) { 5865 anon_idx = svd->anon_index + seg_page(seg, addr); 5866 ASSERT(seg->s_szc == 0 || 5867 IS_P2ALIGNED(anon_idx, pgcnt)); 5868 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5869 } 5870 5871 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5872 evp = &svd->vpage[seg_page(seg, addr + len)]; 5873 5874 /* 5875 * See Statement at the beginning of segvn_lockop regarding 5876 * the way cowcnts and lckcnts are handled. 5877 */ 5878 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5879 5880 if (seg->s_szc != 0) { 5881 if (amp != NULL) { 5882 anon_array_enter(amp, anon_idx, 5883 &cookie); 5884 } 5885 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5886 !segvn_claim_pages(seg, svp, offset, 5887 anon_idx, prot)) { 5888 if (amp != NULL) { 5889 anon_array_exit(&cookie); 5890 } 5891 break; 5892 } 5893 if (amp != NULL) { 5894 anon_array_exit(&cookie); 5895 } 5896 anon_idx++; 5897 } else { 5898 if (amp != NULL) { 5899 anon_array_enter(amp, anon_idx, 5900 &cookie); 5901 ap = anon_get_ptr(amp->ahp, anon_idx++); 5902 } 5903 5904 if (VPP_ISPPLOCK(svp) && 5905 VPP_PROT(svp) != prot) { 5906 5907 if (amp == NULL || ap == NULL) { 5908 vp = svd->vp; 5909 off = offset; 5910 } else 5911 swap_xlate(ap, &vp, &off); 5912 if (amp != NULL) 5913 anon_array_exit(&cookie); 5914 5915 if ((pp = page_lookup(vp, off, 5916 SE_SHARED)) == NULL) { 5917 panic("segvn_setprot: no page"); 5918 /*NOTREACHED*/ 5919 } 5920 ASSERT(seg->s_szc == 0); 5921 if ((VPP_PROT(svp) ^ prot) & 5922 PROT_WRITE) { 5923 if (prot & PROT_WRITE) { 5924 if (!page_addclaim( 5925 pp)) { 5926 page_unlock(pp); 5927 break; 5928 } 5929 } else { 5930 if (!page_subclaim( 5931 pp)) { 5932 page_unlock(pp); 5933 break; 5934 } 5935 } 5936 } 5937 page_unlock(pp); 5938 } else if (amp != NULL) 5939 anon_array_exit(&cookie); 5940 } 5941 VPP_SETPROT(svp, prot); 5942 offset += PAGESIZE; 5943 } 5944 if (amp != NULL) 5945 ANON_LOCK_EXIT(&->a_rwlock); 5946 5947 /* 5948 * Did we terminate prematurely? If so, simply unload 5949 * the translations to the things we've updated so far. 5950 */ 5951 if (svp != evp) { 5952 if (unload_done) { 5953 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5954 return (IE_NOMEM); 5955 } 5956 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5957 PAGESIZE; 5958 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5959 if (len != 0) 5960 hat_unload(seg->s_as->a_hat, addr, 5961 len, HAT_UNLOAD); 5962 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5963 return (IE_NOMEM); 5964 } 5965 } else { 5966 segvn_vpage(seg); 5967 svd->pageprot = 1; 5968 evp = &svd->vpage[seg_page(seg, addr + len)]; 5969 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5970 VPP_SETPROT(svp, prot); 5971 } 5972 } 5973 5974 if (unload_done) { 5975 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5976 return (0); 5977 } 5978 5979 if (((prot & PROT_WRITE) != 0 && 5980 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 5981 (prot & ~PROT_USER) == PROT_NONE) { 5982 /* 5983 * Either private or shared data with write access (in 5984 * which case we need to throw out all former translations 5985 * so that we get the right translations set up on fault 5986 * and we don't allow write access to any copy-on-write pages 5987 * that might be around or to prevent write access to pages 5988 * representing holes in a file), or we don't have permission 5989 * to access the memory at all (in which case we have to 5990 * unload any current translations that might exist). 5991 */ 5992 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 5993 } else { 5994 /* 5995 * A shared mapping or a private mapping in which write 5996 * protection is going to be denied - just change all the 5997 * protections over the range of addresses in question. 5998 * segvn does not support any other attributes other 5999 * than prot so we can use hat_chgattr. 6000 */ 6001 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 6002 } 6003 6004 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6005 6006 return (0); 6007 } 6008 6009 /* 6010 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize, 6011 * to determine if the seg is capable of mapping the requested szc. 6012 */ 6013 static int 6014 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 6015 { 6016 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6017 struct segvn_data *nsvd; 6018 struct anon_map *amp = svd->amp; 6019 struct seg *nseg; 6020 caddr_t eaddr = addr + len, a; 6021 size_t pgsz = page_get_pagesize(szc); 6022 pgcnt_t pgcnt = page_get_pagecnt(szc); 6023 int err; 6024 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6025 6026 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6027 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6028 6029 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6030 return (0); 6031 } 6032 6033 /* 6034 * addr should always be pgsz aligned but eaddr may be misaligned if 6035 * it's at the end of the segment. 6036 * 6037 * XXX we should assert this condition since as_setpagesize() logic 6038 * guarantees it. 6039 */ 6040 if (!IS_P2ALIGNED(addr, pgsz) || 6041 (!IS_P2ALIGNED(eaddr, pgsz) && 6042 eaddr != seg->s_base + seg->s_size)) { 6043 6044 segvn_setpgsz_align_err++; 6045 return (EINVAL); 6046 } 6047 6048 if (amp != NULL && svd->type == MAP_SHARED) { 6049 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6050 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6051 6052 segvn_setpgsz_anon_align_err++; 6053 return (EINVAL); 6054 } 6055 } 6056 6057 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6058 szc > segvn_maxpgszc) { 6059 return (EINVAL); 6060 } 6061 6062 /* paranoid check */ 6063 if (svd->vp != NULL && 6064 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6065 return (EINVAL); 6066 } 6067 6068 if (seg->s_szc == 0 && svd->vp != NULL && 6069 map_addr_vacalign_check(addr, off)) { 6070 return (EINVAL); 6071 } 6072 6073 /* 6074 * Check that protections are the same within new page 6075 * size boundaries. 6076 */ 6077 if (svd->pageprot) { 6078 for (a = addr; a < eaddr; a += pgsz) { 6079 if ((a + pgsz) > eaddr) { 6080 if (!sameprot(seg, a, eaddr - a)) { 6081 return (EINVAL); 6082 } 6083 } else { 6084 if (!sameprot(seg, a, pgsz)) { 6085 return (EINVAL); 6086 } 6087 } 6088 } 6089 } 6090 6091 /* 6092 * Since we are changing page size we first have to flush 6093 * the cache. This makes sure all the pagelock calls have 6094 * to recheck protections. 6095 */ 6096 if (svd->softlockcnt > 0) { 6097 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6098 6099 /* 6100 * If this is shared segment non 0 softlockcnt 6101 * means locked pages are still in use. 6102 */ 6103 if (svd->type == MAP_SHARED) { 6104 return (EAGAIN); 6105 } 6106 6107 /* 6108 * Since we do have the segvn writers lock nobody can fill 6109 * the cache with entries belonging to this seg during 6110 * the purge. The flush either succeeds or we still have 6111 * pending I/Os. 6112 */ 6113 segvn_purge(seg); 6114 if (svd->softlockcnt > 0) { 6115 return (EAGAIN); 6116 } 6117 } 6118 6119 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6120 ASSERT(svd->amp == NULL); 6121 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6122 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6123 HAT_REGION_TEXT); 6124 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6125 } else if (svd->tr_state == SEGVN_TR_INIT) { 6126 svd->tr_state = SEGVN_TR_OFF; 6127 } else if (svd->tr_state == SEGVN_TR_ON) { 6128 ASSERT(svd->amp != NULL); 6129 segvn_textunrepl(seg, 1); 6130 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6131 amp = NULL; 6132 } 6133 6134 /* 6135 * Operation for sub range of existing segment. 6136 */ 6137 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6138 if (szc < seg->s_szc) { 6139 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6140 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6141 if (err == 0) { 6142 return (IE_RETRY); 6143 } 6144 if (err == ENOMEM) { 6145 return (IE_NOMEM); 6146 } 6147 return (err); 6148 } 6149 if (addr != seg->s_base) { 6150 nseg = segvn_split_seg(seg, addr); 6151 if (eaddr != (nseg->s_base + nseg->s_size)) { 6152 /* eaddr is szc aligned */ 6153 (void) segvn_split_seg(nseg, eaddr); 6154 } 6155 return (IE_RETRY); 6156 } 6157 if (eaddr != (seg->s_base + seg->s_size)) { 6158 /* eaddr is szc aligned */ 6159 (void) segvn_split_seg(seg, eaddr); 6160 } 6161 return (IE_RETRY); 6162 } 6163 6164 /* 6165 * Break any low level sharing and reset seg->s_szc to 0. 6166 */ 6167 if ((err = segvn_clrszc(seg)) != 0) { 6168 if (err == ENOMEM) { 6169 err = IE_NOMEM; 6170 } 6171 return (err); 6172 } 6173 ASSERT(seg->s_szc == 0); 6174 6175 /* 6176 * If the end of the current segment is not pgsz aligned 6177 * then attempt to concatenate with the next segment. 6178 */ 6179 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6180 nseg = AS_SEGNEXT(seg->s_as, seg); 6181 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6182 return (ENOMEM); 6183 } 6184 if (nseg->s_ops != &segvn_ops) { 6185 return (EINVAL); 6186 } 6187 nsvd = (struct segvn_data *)nseg->s_data; 6188 if (nsvd->softlockcnt > 0) { 6189 /* 6190 * If this is shared segment non 0 softlockcnt 6191 * means locked pages are still in use. 6192 */ 6193 if (nsvd->type == MAP_SHARED) { 6194 return (EAGAIN); 6195 } 6196 segvn_purge(nseg); 6197 if (nsvd->softlockcnt > 0) { 6198 return (EAGAIN); 6199 } 6200 } 6201 err = segvn_clrszc(nseg); 6202 if (err == ENOMEM) { 6203 err = IE_NOMEM; 6204 } 6205 if (err != 0) { 6206 return (err); 6207 } 6208 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6209 err = segvn_concat(seg, nseg, 1); 6210 if (err == -1) { 6211 return (EINVAL); 6212 } 6213 if (err == -2) { 6214 return (IE_NOMEM); 6215 } 6216 return (IE_RETRY); 6217 } 6218 6219 /* 6220 * May need to re-align anon array to 6221 * new szc. 6222 */ 6223 if (amp != NULL) { 6224 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6225 struct anon_hdr *nahp; 6226 6227 ASSERT(svd->type == MAP_PRIVATE); 6228 6229 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6230 ASSERT(amp->refcnt == 1); 6231 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6232 if (nahp == NULL) { 6233 ANON_LOCK_EXIT(&->a_rwlock); 6234 return (IE_NOMEM); 6235 } 6236 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6237 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6238 anon_release(nahp, btop(amp->size)); 6239 ANON_LOCK_EXIT(&->a_rwlock); 6240 return (IE_NOMEM); 6241 } 6242 anon_release(amp->ahp, btop(amp->size)); 6243 amp->ahp = nahp; 6244 svd->anon_index = 0; 6245 ANON_LOCK_EXIT(&->a_rwlock); 6246 } 6247 } 6248 if (svd->vp != NULL && szc != 0) { 6249 struct vattr va; 6250 u_offset_t eoffpage = svd->offset; 6251 va.va_mask = AT_SIZE; 6252 eoffpage += seg->s_size; 6253 eoffpage = btopr(eoffpage); 6254 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6255 segvn_setpgsz_getattr_err++; 6256 return (EINVAL); 6257 } 6258 if (btopr(va.va_size) < eoffpage) { 6259 segvn_setpgsz_eof_err++; 6260 return (EINVAL); 6261 } 6262 if (amp != NULL) { 6263 /* 6264 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6265 * don't take anon map lock here to avoid holding it 6266 * across VOP_GETPAGE() calls that may call back into 6267 * segvn for klsutering checks. We don't really need 6268 * anon map lock here since it's a private segment and 6269 * we hold as level lock as writers. 6270 */ 6271 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6272 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6273 seg->s_size, szc, svd->prot, svd->vpage, 6274 svd->cred)) != 0) { 6275 return (EINVAL); 6276 } 6277 } 6278 segvn_setvnode_mpss(svd->vp); 6279 } 6280 6281 if (amp != NULL) { 6282 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6283 if (svd->type == MAP_PRIVATE) { 6284 amp->a_szc = szc; 6285 } else if (szc > amp->a_szc) { 6286 amp->a_szc = szc; 6287 } 6288 ANON_LOCK_EXIT(&->a_rwlock); 6289 } 6290 6291 seg->s_szc = szc; 6292 6293 return (0); 6294 } 6295 6296 static int 6297 segvn_clrszc(struct seg *seg) 6298 { 6299 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6300 struct anon_map *amp = svd->amp; 6301 size_t pgsz; 6302 pgcnt_t pages; 6303 int err = 0; 6304 caddr_t a = seg->s_base; 6305 caddr_t ea = a + seg->s_size; 6306 ulong_t an_idx = svd->anon_index; 6307 vnode_t *vp = svd->vp; 6308 struct vpage *vpage = svd->vpage; 6309 page_t *anon_pl[1 + 1], *pp; 6310 struct anon *ap, *oldap; 6311 uint_t prot = svd->prot, vpprot; 6312 int pageflag = 0; 6313 6314 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6315 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6316 ASSERT(svd->softlockcnt == 0); 6317 6318 if (vp == NULL && amp == NULL) { 6319 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6320 seg->s_szc = 0; 6321 return (0); 6322 } 6323 6324 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6325 ASSERT(svd->amp == NULL); 6326 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6327 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6328 HAT_REGION_TEXT); 6329 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6330 } else if (svd->tr_state == SEGVN_TR_ON) { 6331 ASSERT(svd->amp != NULL); 6332 segvn_textunrepl(seg, 1); 6333 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6334 amp = NULL; 6335 } else { 6336 if (svd->tr_state != SEGVN_TR_OFF) { 6337 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6338 svd->tr_state = SEGVN_TR_OFF; 6339 } 6340 6341 /* 6342 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6343 * unload argument is 0 when we are freeing the segment 6344 * and unload was already done. 6345 */ 6346 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6347 HAT_UNLOAD_UNMAP); 6348 } 6349 6350 if (amp == NULL || svd->type == MAP_SHARED) { 6351 seg->s_szc = 0; 6352 return (0); 6353 } 6354 6355 pgsz = page_get_pagesize(seg->s_szc); 6356 pages = btop(pgsz); 6357 6358 /* 6359 * XXX anon rwlock is not really needed because this is a 6360 * private segment and we are writers. 6361 */ 6362 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6363 6364 for (; a < ea; a += pgsz, an_idx += pages) { 6365 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6366 ASSERT(vpage != NULL || svd->pageprot == 0); 6367 if (vpage != NULL) { 6368 ASSERT(sameprot(seg, a, pgsz)); 6369 prot = VPP_PROT(vpage); 6370 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6371 } 6372 if (seg->s_szc != 0) { 6373 ASSERT(vp == NULL || anon_pages(amp->ahp, 6374 an_idx, pages) == pages); 6375 if ((err = anon_map_demotepages(amp, an_idx, 6376 seg, a, prot, vpage, svd->cred)) != 0) { 6377 goto out; 6378 } 6379 } else { 6380 if (oldap->an_refcnt == 1) { 6381 continue; 6382 } 6383 if ((err = anon_getpage(&oldap, &vpprot, 6384 anon_pl, PAGESIZE, seg, a, S_READ, 6385 svd->cred))) { 6386 goto out; 6387 } 6388 if ((pp = anon_private(&ap, seg, a, prot, 6389 anon_pl[0], pageflag, svd->cred)) == NULL) { 6390 err = ENOMEM; 6391 goto out; 6392 } 6393 anon_decref(oldap); 6394 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6395 ANON_SLEEP); 6396 page_unlock(pp); 6397 } 6398 } 6399 vpage = (vpage == NULL) ? NULL : vpage + pages; 6400 } 6401 6402 amp->a_szc = 0; 6403 seg->s_szc = 0; 6404 out: 6405 ANON_LOCK_EXIT(&->a_rwlock); 6406 return (err); 6407 } 6408 6409 static int 6410 segvn_claim_pages( 6411 struct seg *seg, 6412 struct vpage *svp, 6413 u_offset_t off, 6414 ulong_t anon_idx, 6415 uint_t prot) 6416 { 6417 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6418 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6419 page_t **ppa; 6420 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6421 struct anon_map *amp = svd->amp; 6422 struct vpage *evp = svp + pgcnt; 6423 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6424 + seg->s_base; 6425 struct anon *ap; 6426 struct vnode *vp = svd->vp; 6427 page_t *pp; 6428 pgcnt_t pg_idx, i; 6429 int err = 0; 6430 anoff_t aoff; 6431 int anon = (amp != NULL) ? 1 : 0; 6432 6433 ASSERT(svd->type == MAP_PRIVATE); 6434 ASSERT(svd->vpage != NULL); 6435 ASSERT(seg->s_szc != 0); 6436 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6437 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6438 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6439 6440 if (VPP_PROT(svp) == prot) 6441 return (1); 6442 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6443 return (1); 6444 6445 ppa = kmem_alloc(ppasize, KM_SLEEP); 6446 if (anon && vp != NULL) { 6447 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6448 anon = 0; 6449 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6450 } 6451 ASSERT(!anon || 6452 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6453 } 6454 6455 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6456 if (!VPP_ISPPLOCK(svp)) 6457 continue; 6458 if (anon) { 6459 ap = anon_get_ptr(amp->ahp, anon_idx); 6460 if (ap == NULL) { 6461 panic("segvn_claim_pages: no anon slot"); 6462 } 6463 swap_xlate(ap, &vp, &aoff); 6464 off = (u_offset_t)aoff; 6465 } 6466 ASSERT(vp != NULL); 6467 if ((pp = page_lookup(vp, 6468 (u_offset_t)off, SE_SHARED)) == NULL) { 6469 panic("segvn_claim_pages: no page"); 6470 } 6471 ppa[pg_idx++] = pp; 6472 off += PAGESIZE; 6473 } 6474 6475 if (ppa[0] == NULL) { 6476 kmem_free(ppa, ppasize); 6477 return (1); 6478 } 6479 6480 ASSERT(pg_idx <= pgcnt); 6481 ppa[pg_idx] = NULL; 6482 6483 6484 /* Find each large page within ppa, and adjust its claim */ 6485 6486 /* Does ppa cover a single large page? */ 6487 if (ppa[0]->p_szc == seg->s_szc) { 6488 if (prot & PROT_WRITE) 6489 err = page_addclaim_pages(ppa); 6490 else 6491 err = page_subclaim_pages(ppa); 6492 } else { 6493 for (i = 0; ppa[i]; i += pgcnt) { 6494 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt)); 6495 if (prot & PROT_WRITE) 6496 err = page_addclaim_pages(&ppa[i]); 6497 else 6498 err = page_subclaim_pages(&ppa[i]); 6499 if (err == 0) 6500 break; 6501 } 6502 } 6503 6504 for (i = 0; i < pg_idx; i++) { 6505 ASSERT(ppa[i] != NULL); 6506 page_unlock(ppa[i]); 6507 } 6508 6509 kmem_free(ppa, ppasize); 6510 return (err); 6511 } 6512 6513 /* 6514 * Returns right (upper address) segment if split occurred. 6515 * If the address is equal to the beginning or end of its segment it returns 6516 * the current segment. 6517 */ 6518 static struct seg * 6519 segvn_split_seg(struct seg *seg, caddr_t addr) 6520 { 6521 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6522 struct seg *nseg; 6523 size_t nsize; 6524 struct segvn_data *nsvd; 6525 6526 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6527 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6528 6529 ASSERT(addr >= seg->s_base); 6530 ASSERT(addr <= seg->s_base + seg->s_size); 6531 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6532 6533 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6534 return (seg); 6535 6536 nsize = seg->s_base + seg->s_size - addr; 6537 seg->s_size = addr - seg->s_base; 6538 nseg = seg_alloc(seg->s_as, addr, nsize); 6539 ASSERT(nseg != NULL); 6540 nseg->s_ops = seg->s_ops; 6541 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6542 nseg->s_data = (void *)nsvd; 6543 nseg->s_szc = seg->s_szc; 6544 *nsvd = *svd; 6545 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6546 nsvd->seg = nseg; 6547 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6548 6549 if (nsvd->vp != NULL) { 6550 VN_HOLD(nsvd->vp); 6551 nsvd->offset = svd->offset + 6552 (uintptr_t)(nseg->s_base - seg->s_base); 6553 if (nsvd->type == MAP_SHARED) 6554 lgrp_shm_policy_init(NULL, nsvd->vp); 6555 } else { 6556 /* 6557 * The offset for an anonymous segment has no signifigance in 6558 * terms of an offset into a file. If we were to use the above 6559 * calculation instead, the structures read out of 6560 * /proc/<pid>/xmap would be more difficult to decipher since 6561 * it would be unclear whether two seemingly contiguous 6562 * prxmap_t structures represented different segments or a 6563 * single segment that had been split up into multiple prxmap_t 6564 * structures (e.g. if some part of the segment had not yet 6565 * been faulted in). 6566 */ 6567 nsvd->offset = 0; 6568 } 6569 6570 ASSERT(svd->softlockcnt == 0); 6571 ASSERT(svd->softlockcnt_sbase == 0); 6572 ASSERT(svd->softlockcnt_send == 0); 6573 crhold(svd->cred); 6574 6575 if (svd->vpage != NULL) { 6576 size_t bytes = vpgtob(seg_pages(seg)); 6577 size_t nbytes = vpgtob(seg_pages(nseg)); 6578 struct vpage *ovpage = svd->vpage; 6579 6580 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6581 bcopy(ovpage, svd->vpage, bytes); 6582 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6583 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6584 kmem_free(ovpage, bytes + nbytes); 6585 } 6586 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6587 struct anon_map *oamp = svd->amp, *namp; 6588 struct anon_hdr *nahp; 6589 6590 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6591 ASSERT(oamp->refcnt == 1); 6592 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6593 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6594 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6595 6596 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6597 namp->a_szc = nseg->s_szc; 6598 (void) anon_copy_ptr(oamp->ahp, 6599 svd->anon_index + btop(seg->s_size), 6600 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6601 anon_release(oamp->ahp, btop(oamp->size)); 6602 oamp->ahp = nahp; 6603 oamp->size = seg->s_size; 6604 svd->anon_index = 0; 6605 nsvd->amp = namp; 6606 nsvd->anon_index = 0; 6607 ANON_LOCK_EXIT(&oamp->a_rwlock); 6608 } else if (svd->amp != NULL) { 6609 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6610 ASSERT(svd->amp == nsvd->amp); 6611 ASSERT(seg->s_szc <= svd->amp->a_szc); 6612 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6613 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6614 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6615 svd->amp->refcnt++; 6616 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6617 } 6618 6619 /* 6620 * Split the amount of swap reserved. 6621 */ 6622 if (svd->swresv) { 6623 /* 6624 * For MAP_NORESERVE, only allocate swap reserve for pages 6625 * being used. Other segments get enough to cover whole 6626 * segment. 6627 */ 6628 if (svd->flags & MAP_NORESERVE) { 6629 size_t oswresv; 6630 6631 ASSERT(svd->amp); 6632 oswresv = svd->swresv; 6633 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6634 svd->anon_index, btop(seg->s_size))); 6635 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6636 nsvd->anon_index, btop(nseg->s_size))); 6637 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6638 } else { 6639 if (svd->pageswap) { 6640 svd->swresv = segvn_count_swap_by_vpages(seg); 6641 ASSERT(nsvd->swresv >= svd->swresv); 6642 nsvd->swresv -= svd->swresv; 6643 } else { 6644 ASSERT(svd->swresv == seg->s_size + 6645 nseg->s_size); 6646 svd->swresv = seg->s_size; 6647 nsvd->swresv = nseg->s_size; 6648 } 6649 } 6650 } 6651 6652 return (nseg); 6653 } 6654 6655 /* 6656 * called on memory operations (unmap, setprot, setpagesize) for a subset 6657 * of a large page segment to either demote the memory range (SDR_RANGE) 6658 * or the ends (SDR_END) by addr/len. 6659 * 6660 * returns 0 on success. returns errno, including ENOMEM, on failure. 6661 */ 6662 static int 6663 segvn_demote_range( 6664 struct seg *seg, 6665 caddr_t addr, 6666 size_t len, 6667 int flag, 6668 uint_t szcvec) 6669 { 6670 caddr_t eaddr = addr + len; 6671 caddr_t lpgaddr, lpgeaddr; 6672 struct seg *nseg; 6673 struct seg *badseg1 = NULL; 6674 struct seg *badseg2 = NULL; 6675 size_t pgsz; 6676 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6677 int err; 6678 uint_t szc = seg->s_szc; 6679 uint_t tszcvec; 6680 6681 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6682 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6683 ASSERT(szc != 0); 6684 pgsz = page_get_pagesize(szc); 6685 ASSERT(seg->s_base != addr || seg->s_size != len); 6686 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6687 ASSERT(svd->softlockcnt == 0); 6688 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6689 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6690 6691 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6692 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6693 if (flag == SDR_RANGE) { 6694 /* demote entire range */ 6695 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6696 (void) segvn_split_seg(nseg, lpgeaddr); 6697 ASSERT(badseg1->s_base == lpgaddr); 6698 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6699 } else if (addr != lpgaddr) { 6700 ASSERT(flag == SDR_END); 6701 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6702 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6703 eaddr < lpgaddr + 2 * pgsz) { 6704 (void) segvn_split_seg(nseg, lpgeaddr); 6705 ASSERT(badseg1->s_base == lpgaddr); 6706 ASSERT(badseg1->s_size == 2 * pgsz); 6707 } else { 6708 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6709 ASSERT(badseg1->s_base == lpgaddr); 6710 ASSERT(badseg1->s_size == pgsz); 6711 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6712 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6713 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6714 badseg2 = nseg; 6715 (void) segvn_split_seg(nseg, lpgeaddr); 6716 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6717 ASSERT(badseg2->s_size == pgsz); 6718 } 6719 } 6720 } else { 6721 ASSERT(flag == SDR_END); 6722 ASSERT(eaddr < lpgeaddr); 6723 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6724 (void) segvn_split_seg(nseg, lpgeaddr); 6725 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6726 ASSERT(badseg1->s_size == pgsz); 6727 } 6728 6729 ASSERT(badseg1 != NULL); 6730 ASSERT(badseg1->s_szc == szc); 6731 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6732 badseg1->s_size == 2 * pgsz); 6733 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6734 ASSERT(badseg1->s_size == pgsz || 6735 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6736 if (err = segvn_clrszc(badseg1)) { 6737 return (err); 6738 } 6739 ASSERT(badseg1->s_szc == 0); 6740 6741 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6742 uint_t tszc = highbit(tszcvec) - 1; 6743 caddr_t ta = MAX(addr, badseg1->s_base); 6744 caddr_t te; 6745 size_t tpgsz = page_get_pagesize(tszc); 6746 6747 ASSERT(svd->type == MAP_SHARED); 6748 ASSERT(flag == SDR_END); 6749 ASSERT(tszc < szc && tszc > 0); 6750 6751 if (eaddr > badseg1->s_base + badseg1->s_size) { 6752 te = badseg1->s_base + badseg1->s_size; 6753 } else { 6754 te = eaddr; 6755 } 6756 6757 ASSERT(ta <= te); 6758 badseg1->s_szc = tszc; 6759 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6760 if (badseg2 != NULL) { 6761 err = segvn_demote_range(badseg1, ta, te - ta, 6762 SDR_END, tszcvec); 6763 if (err != 0) { 6764 return (err); 6765 } 6766 } else { 6767 return (segvn_demote_range(badseg1, ta, 6768 te - ta, SDR_END, tszcvec)); 6769 } 6770 } 6771 } 6772 6773 if (badseg2 == NULL) 6774 return (0); 6775 ASSERT(badseg2->s_szc == szc); 6776 ASSERT(badseg2->s_size == pgsz); 6777 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6778 if (err = segvn_clrszc(badseg2)) { 6779 return (err); 6780 } 6781 ASSERT(badseg2->s_szc == 0); 6782 6783 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6784 uint_t tszc = highbit(tszcvec) - 1; 6785 size_t tpgsz = page_get_pagesize(tszc); 6786 6787 ASSERT(svd->type == MAP_SHARED); 6788 ASSERT(flag == SDR_END); 6789 ASSERT(tszc < szc && tszc > 0); 6790 ASSERT(badseg2->s_base > addr); 6791 ASSERT(eaddr > badseg2->s_base); 6792 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6793 6794 badseg2->s_szc = tszc; 6795 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6796 return (segvn_demote_range(badseg2, badseg2->s_base, 6797 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6798 } 6799 } 6800 6801 return (0); 6802 } 6803 6804 static int 6805 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6806 { 6807 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6808 struct vpage *vp, *evp; 6809 6810 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6811 6812 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6813 /* 6814 * If segment protection can be used, simply check against them. 6815 */ 6816 if (svd->pageprot == 0) { 6817 int err; 6818 6819 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6820 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6821 return (err); 6822 } 6823 6824 /* 6825 * Have to check down to the vpage level. 6826 */ 6827 evp = &svd->vpage[seg_page(seg, addr + len)]; 6828 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6829 if ((VPP_PROT(vp) & prot) != prot) { 6830 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6831 return (EACCES); 6832 } 6833 } 6834 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6835 return (0); 6836 } 6837 6838 static int 6839 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6840 { 6841 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6842 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6843 6844 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6845 6846 if (pgno != 0) { 6847 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6848 if (svd->pageprot == 0) { 6849 do { 6850 protv[--pgno] = svd->prot; 6851 } while (pgno != 0); 6852 } else { 6853 size_t pgoff = seg_page(seg, addr); 6854 6855 do { 6856 pgno--; 6857 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6858 } while (pgno != 0); 6859 } 6860 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6861 } 6862 return (0); 6863 } 6864 6865 static u_offset_t 6866 segvn_getoffset(struct seg *seg, caddr_t addr) 6867 { 6868 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6869 6870 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6871 6872 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6873 } 6874 6875 /*ARGSUSED*/ 6876 static int 6877 segvn_gettype(struct seg *seg, caddr_t addr) 6878 { 6879 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6880 6881 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6882 6883 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6884 MAP_INITDATA))); 6885 } 6886 6887 /*ARGSUSED*/ 6888 static int 6889 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6890 { 6891 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6892 6893 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6894 6895 *vpp = svd->vp; 6896 return (0); 6897 } 6898 6899 /* 6900 * Check to see if it makes sense to do kluster/read ahead to 6901 * addr + delta relative to the mapping at addr. We assume here 6902 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6903 * 6904 * For segvn, we currently "approve" of the action if we are 6905 * still in the segment and it maps from the same vp/off, 6906 * or if the advice stored in segvn_data or vpages allows it. 6907 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6908 */ 6909 static int 6910 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6911 { 6912 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6913 struct anon *oap, *ap; 6914 ssize_t pd; 6915 size_t page; 6916 struct vnode *vp1, *vp2; 6917 u_offset_t off1, off2; 6918 struct anon_map *amp; 6919 6920 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6921 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6922 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6923 6924 if (addr + delta < seg->s_base || 6925 addr + delta >= (seg->s_base + seg->s_size)) 6926 return (-1); /* exceeded segment bounds */ 6927 6928 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6929 page = seg_page(seg, addr); 6930 6931 /* 6932 * Check to see if either of the pages addr or addr + delta 6933 * have advice set that prevents klustering (if MADV_RANDOM advice 6934 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6935 * is negative). 6936 */ 6937 if (svd->advice == MADV_RANDOM || 6938 svd->advice == MADV_SEQUENTIAL && delta < 0) 6939 return (-1); 6940 else if (svd->pageadvice && svd->vpage) { 6941 struct vpage *bvpp, *evpp; 6942 6943 bvpp = &svd->vpage[page]; 6944 evpp = &svd->vpage[page + pd]; 6945 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6946 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6947 return (-1); 6948 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6949 VPP_ADVICE(evpp) == MADV_RANDOM) 6950 return (-1); 6951 } 6952 6953 if (svd->type == MAP_SHARED) 6954 return (0); /* shared mapping - all ok */ 6955 6956 if ((amp = svd->amp) == NULL) 6957 return (0); /* off original vnode */ 6958 6959 page += svd->anon_index; 6960 6961 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 6962 6963 oap = anon_get_ptr(amp->ahp, page); 6964 ap = anon_get_ptr(amp->ahp, page + pd); 6965 6966 ANON_LOCK_EXIT(&->a_rwlock); 6967 6968 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 6969 return (-1); /* one with and one without an anon */ 6970 } 6971 6972 if (oap == NULL) { /* implies that ap == NULL */ 6973 return (0); /* off original vnode */ 6974 } 6975 6976 /* 6977 * Now we know we have two anon pointers - check to 6978 * see if they happen to be properly allocated. 6979 */ 6980 6981 /* 6982 * XXX We cheat here and don't lock the anon slots. We can't because 6983 * we may have been called from the anon layer which might already 6984 * have locked them. We are holding a refcnt on the slots so they 6985 * can't disappear. The worst that will happen is we'll get the wrong 6986 * names (vp, off) for the slots and make a poor klustering decision. 6987 */ 6988 swap_xlate(ap, &vp1, &off1); 6989 swap_xlate(oap, &vp2, &off2); 6990 6991 6992 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 6993 return (-1); 6994 return (0); 6995 } 6996 6997 /* 6998 * Synchronize primary storage cache with real object in virtual memory. 6999 * 7000 * XXX - Anonymous pages should not be sync'ed out at all. 7001 */ 7002 static int 7003 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7004 { 7005 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7006 struct vpage *vpp; 7007 page_t *pp; 7008 u_offset_t offset; 7009 struct vnode *vp; 7010 u_offset_t off; 7011 caddr_t eaddr; 7012 int bflags; 7013 int err = 0; 7014 int segtype; 7015 int pageprot; 7016 int prot; 7017 ulong_t anon_index; 7018 struct anon_map *amp; 7019 struct anon *ap; 7020 anon_sync_obj_t cookie; 7021 7022 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7023 7024 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7025 7026 if (svd->softlockcnt > 0) { 7027 /* 7028 * If this is shared segment non 0 softlockcnt 7029 * means locked pages are still in use. 7030 */ 7031 if (svd->type == MAP_SHARED) { 7032 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7033 return (EAGAIN); 7034 } 7035 7036 /* 7037 * flush all pages from seg cache 7038 * otherwise we may deadlock in swap_putpage 7039 * for B_INVAL page (4175402). 7040 * 7041 * Even if we grab segvn WRITER's lock 7042 * here, there might be another thread which could've 7043 * successfully performed lookup/insert just before 7044 * we acquired the lock here. So, grabbing either 7045 * lock here is of not much use. Until we devise 7046 * a strategy at upper layers to solve the 7047 * synchronization issues completely, we expect 7048 * applications to handle this appropriately. 7049 */ 7050 segvn_purge(seg); 7051 if (svd->softlockcnt > 0) { 7052 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7053 return (EAGAIN); 7054 } 7055 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7056 svd->amp->a_softlockcnt > 0) { 7057 /* 7058 * Try to purge this amp's entries from pcache. It will 7059 * succeed only if other segments that share the amp have no 7060 * outstanding softlock's. 7061 */ 7062 segvn_purge(seg); 7063 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7064 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7065 return (EAGAIN); 7066 } 7067 } 7068 7069 vpp = svd->vpage; 7070 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7071 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7072 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7073 7074 if (attr) { 7075 pageprot = attr & ~(SHARED|PRIVATE); 7076 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7077 7078 /* 7079 * We are done if the segment types don't match 7080 * or if we have segment level protections and 7081 * they don't match. 7082 */ 7083 if (svd->type != segtype) { 7084 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7085 return (0); 7086 } 7087 if (vpp == NULL) { 7088 if (svd->prot != pageprot) { 7089 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7090 return (0); 7091 } 7092 prot = svd->prot; 7093 } else 7094 vpp = &svd->vpage[seg_page(seg, addr)]; 7095 7096 } else if (svd->vp && svd->amp == NULL && 7097 (flags & MS_INVALIDATE) == 0) { 7098 7099 /* 7100 * No attributes, no anonymous pages and MS_INVALIDATE flag 7101 * is not on, just use one big request. 7102 */ 7103 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7104 bflags, svd->cred, NULL); 7105 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7106 return (err); 7107 } 7108 7109 if ((amp = svd->amp) != NULL) 7110 anon_index = svd->anon_index + seg_page(seg, addr); 7111 7112 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7113 ap = NULL; 7114 if (amp != NULL) { 7115 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7116 anon_array_enter(amp, anon_index, &cookie); 7117 ap = anon_get_ptr(amp->ahp, anon_index++); 7118 if (ap != NULL) { 7119 swap_xlate(ap, &vp, &off); 7120 } else { 7121 vp = svd->vp; 7122 off = offset; 7123 } 7124 anon_array_exit(&cookie); 7125 ANON_LOCK_EXIT(&->a_rwlock); 7126 } else { 7127 vp = svd->vp; 7128 off = offset; 7129 } 7130 offset += PAGESIZE; 7131 7132 if (vp == NULL) /* untouched zfod page */ 7133 continue; 7134 7135 if (attr) { 7136 if (vpp) { 7137 prot = VPP_PROT(vpp); 7138 vpp++; 7139 } 7140 if (prot != pageprot) { 7141 continue; 7142 } 7143 } 7144 7145 /* 7146 * See if any of these pages are locked -- if so, then we 7147 * will have to truncate an invalidate request at the first 7148 * locked one. We don't need the page_struct_lock to test 7149 * as this is only advisory; even if we acquire it someone 7150 * might race in and lock the page after we unlock and before 7151 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7152 */ 7153 if (flags & MS_INVALIDATE) { 7154 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7155 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7156 page_unlock(pp); 7157 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7158 return (EBUSY); 7159 } 7160 if (ap != NULL && pp->p_szc != 0 && 7161 page_tryupgrade(pp)) { 7162 if (pp->p_lckcnt == 0 && 7163 pp->p_cowcnt == 0) { 7164 /* 7165 * swapfs VN_DISPOSE() won't 7166 * invalidate large pages. 7167 * Attempt to demote. 7168 * XXX can't help it if it 7169 * fails. But for swapfs 7170 * pages it is no big deal. 7171 */ 7172 (void) page_try_demote_pages( 7173 pp); 7174 } 7175 } 7176 page_unlock(pp); 7177 } 7178 } else if (svd->type == MAP_SHARED && amp != NULL) { 7179 /* 7180 * Avoid writing out to disk ISM's large pages 7181 * because segspt_free_pages() relies on NULL an_pvp 7182 * of anon slots of such pages. 7183 */ 7184 7185 ASSERT(svd->vp == NULL); 7186 /* 7187 * swapfs uses page_lookup_nowait if not freeing or 7188 * invalidating and skips a page if 7189 * page_lookup_nowait returns NULL. 7190 */ 7191 pp = page_lookup_nowait(vp, off, SE_SHARED); 7192 if (pp == NULL) { 7193 continue; 7194 } 7195 if (pp->p_szc != 0) { 7196 page_unlock(pp); 7197 continue; 7198 } 7199 7200 /* 7201 * Note ISM pages are created large so (vp, off)'s 7202 * page cannot suddenly become large after we unlock 7203 * pp. 7204 */ 7205 page_unlock(pp); 7206 } 7207 /* 7208 * XXX - Should ultimately try to kluster 7209 * calls to VOP_PUTPAGE() for performance. 7210 */ 7211 VN_HOLD(vp); 7212 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7213 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)), 7214 svd->cred, NULL); 7215 7216 VN_RELE(vp); 7217 if (err) 7218 break; 7219 } 7220 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7221 return (err); 7222 } 7223 7224 /* 7225 * Determine if we have data corresponding to pages in the 7226 * primary storage virtual memory cache (i.e., "in core"). 7227 */ 7228 static size_t 7229 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7230 { 7231 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7232 struct vnode *vp, *avp; 7233 u_offset_t offset, aoffset; 7234 size_t p, ep; 7235 int ret; 7236 struct vpage *vpp; 7237 page_t *pp; 7238 uint_t start; 7239 struct anon_map *amp; /* XXX - for locknest */ 7240 struct anon *ap; 7241 uint_t attr; 7242 anon_sync_obj_t cookie; 7243 7244 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7245 7246 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7247 if (svd->amp == NULL && svd->vp == NULL) { 7248 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7249 bzero(vec, btopr(len)); 7250 return (len); /* no anonymous pages created yet */ 7251 } 7252 7253 p = seg_page(seg, addr); 7254 ep = seg_page(seg, addr + len); 7255 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7256 7257 amp = svd->amp; 7258 for (; p < ep; p++, addr += PAGESIZE) { 7259 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7260 ret = start; 7261 ap = NULL; 7262 avp = NULL; 7263 /* Grab the vnode/offset for the anon slot */ 7264 if (amp != NULL) { 7265 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7266 anon_array_enter(amp, svd->anon_index + p, &cookie); 7267 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7268 if (ap != NULL) { 7269 swap_xlate(ap, &avp, &aoffset); 7270 } 7271 anon_array_exit(&cookie); 7272 ANON_LOCK_EXIT(&->a_rwlock); 7273 } 7274 if ((avp != NULL) && page_exists(avp, aoffset)) { 7275 /* A page exists for the anon slot */ 7276 ret |= SEG_PAGE_INCORE; 7277 7278 /* 7279 * If page is mapped and writable 7280 */ 7281 attr = (uint_t)0; 7282 if ((hat_getattr(seg->s_as->a_hat, addr, 7283 &attr) != -1) && (attr & PROT_WRITE)) { 7284 ret |= SEG_PAGE_ANON; 7285 } 7286 /* 7287 * Don't get page_struct lock for lckcnt and cowcnt, 7288 * since this is purely advisory. 7289 */ 7290 if ((pp = page_lookup_nowait(avp, aoffset, 7291 SE_SHARED)) != NULL) { 7292 if (pp->p_lckcnt) 7293 ret |= SEG_PAGE_SOFTLOCK; 7294 if (pp->p_cowcnt) 7295 ret |= SEG_PAGE_HASCOW; 7296 page_unlock(pp); 7297 } 7298 } 7299 7300 /* Gather vnode statistics */ 7301 vp = svd->vp; 7302 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7303 7304 if (vp != NULL) { 7305 /* 7306 * Try to obtain a "shared" lock on the page 7307 * without blocking. If this fails, determine 7308 * if the page is in memory. 7309 */ 7310 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7311 if ((pp == NULL) && (page_exists(vp, offset))) { 7312 /* Page is incore, and is named */ 7313 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7314 } 7315 /* 7316 * Don't get page_struct lock for lckcnt and cowcnt, 7317 * since this is purely advisory. 7318 */ 7319 if (pp != NULL) { 7320 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7321 if (pp->p_lckcnt) 7322 ret |= SEG_PAGE_SOFTLOCK; 7323 if (pp->p_cowcnt) 7324 ret |= SEG_PAGE_HASCOW; 7325 page_unlock(pp); 7326 } 7327 } 7328 7329 /* Gather virtual page information */ 7330 if (vpp) { 7331 if (VPP_ISPPLOCK(vpp)) 7332 ret |= SEG_PAGE_LOCKED; 7333 vpp++; 7334 } 7335 7336 *vec++ = (char)ret; 7337 } 7338 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7339 return (len); 7340 } 7341 7342 /* 7343 * Statement for p_cowcnts/p_lckcnts. 7344 * 7345 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7346 * irrespective of the following factors or anything else: 7347 * 7348 * (1) anon slots are populated or not 7349 * (2) cow is broken or not 7350 * (3) refcnt on ap is 1 or greater than 1 7351 * 7352 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7353 * and munlock. 7354 * 7355 * 7356 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7357 * 7358 * if vpage has PROT_WRITE 7359 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7360 * else 7361 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7362 * 7363 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7364 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7365 * 7366 * We may also break COW if softlocking on read access in the physio case. 7367 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7368 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7369 * vpage doesn't have PROT_WRITE. 7370 * 7371 * 7372 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7373 * 7374 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7375 * increment p_lckcnt by calling page_subclaim() which takes care of 7376 * availrmem accounting and p_lckcnt overflow. 7377 * 7378 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7379 * increment p_cowcnt by calling page_addclaim() which takes care of 7380 * availrmem availability and p_cowcnt overflow. 7381 */ 7382 7383 /* 7384 * Lock down (or unlock) pages mapped by this segment. 7385 * 7386 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7387 * At fault time they will be relocated into larger pages. 7388 */ 7389 static int 7390 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7391 int attr, int op, ulong_t *lockmap, size_t pos) 7392 { 7393 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7394 struct vpage *vpp; 7395 struct vpage *evp; 7396 page_t *pp; 7397 u_offset_t offset; 7398 u_offset_t off; 7399 int segtype; 7400 int pageprot; 7401 int claim; 7402 struct vnode *vp; 7403 ulong_t anon_index; 7404 struct anon_map *amp; 7405 struct anon *ap; 7406 struct vattr va; 7407 anon_sync_obj_t cookie; 7408 struct kshmid *sp = NULL; 7409 struct proc *p = curproc; 7410 kproject_t *proj = NULL; 7411 int chargeproc = 1; 7412 size_t locked_bytes = 0; 7413 size_t unlocked_bytes = 0; 7414 int err = 0; 7415 7416 /* 7417 * Hold write lock on address space because may split or concatenate 7418 * segments 7419 */ 7420 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7421 7422 /* 7423 * If this is a shm, use shm's project and zone, else use 7424 * project and zone of calling process 7425 */ 7426 7427 /* Determine if this segment backs a sysV shm */ 7428 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7429 ASSERT(svd->type == MAP_SHARED); 7430 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7431 sp = svd->amp->a_sp; 7432 proj = sp->shm_perm.ipc_proj; 7433 chargeproc = 0; 7434 } 7435 7436 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7437 if (attr) { 7438 pageprot = attr & ~(SHARED|PRIVATE); 7439 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7440 7441 /* 7442 * We are done if the segment types don't match 7443 * or if we have segment level protections and 7444 * they don't match. 7445 */ 7446 if (svd->type != segtype) { 7447 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7448 return (0); 7449 } 7450 if (svd->pageprot == 0 && svd->prot != pageprot) { 7451 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7452 return (0); 7453 } 7454 } 7455 7456 if (op == MC_LOCK) { 7457 if (svd->tr_state == SEGVN_TR_INIT) { 7458 svd->tr_state = SEGVN_TR_OFF; 7459 } else if (svd->tr_state == SEGVN_TR_ON) { 7460 ASSERT(svd->amp != NULL); 7461 segvn_textunrepl(seg, 0); 7462 ASSERT(svd->amp == NULL && 7463 svd->tr_state == SEGVN_TR_OFF); 7464 } 7465 } 7466 7467 /* 7468 * If we're locking, then we must create a vpage structure if 7469 * none exists. If we're unlocking, then check to see if there 7470 * is a vpage -- if not, then we could not have locked anything. 7471 */ 7472 7473 if ((vpp = svd->vpage) == NULL) { 7474 if (op == MC_LOCK) 7475 segvn_vpage(seg); 7476 else { 7477 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7478 return (0); 7479 } 7480 } 7481 7482 /* 7483 * The anonymous data vector (i.e., previously 7484 * unreferenced mapping to swap space) can be allocated 7485 * by lazily testing for its existence. 7486 */ 7487 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7488 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7489 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7490 svd->amp->a_szc = seg->s_szc; 7491 } 7492 7493 if ((amp = svd->amp) != NULL) { 7494 anon_index = svd->anon_index + seg_page(seg, addr); 7495 } 7496 7497 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7498 evp = &svd->vpage[seg_page(seg, addr + len)]; 7499 7500 if (sp != NULL) 7501 mutex_enter(&sp->shm_mlock); 7502 7503 /* determine number of unlocked bytes in range for lock operation */ 7504 if (op == MC_LOCK) { 7505 7506 if (sp == NULL) { 7507 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7508 vpp++) { 7509 if (!VPP_ISPPLOCK(vpp)) 7510 unlocked_bytes += PAGESIZE; 7511 } 7512 } else { 7513 ulong_t i_idx, i_edx; 7514 anon_sync_obj_t i_cookie; 7515 struct anon *i_ap; 7516 struct vnode *i_vp; 7517 u_offset_t i_off; 7518 7519 /* Only count sysV pages once for locked memory */ 7520 i_edx = svd->anon_index + seg_page(seg, addr + len); 7521 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7522 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7523 anon_array_enter(amp, i_idx, &i_cookie); 7524 i_ap = anon_get_ptr(amp->ahp, i_idx); 7525 if (i_ap == NULL) { 7526 unlocked_bytes += PAGESIZE; 7527 anon_array_exit(&i_cookie); 7528 continue; 7529 } 7530 swap_xlate(i_ap, &i_vp, &i_off); 7531 anon_array_exit(&i_cookie); 7532 pp = page_lookup(i_vp, i_off, SE_SHARED); 7533 if (pp == NULL) { 7534 unlocked_bytes += PAGESIZE; 7535 continue; 7536 } else if (pp->p_lckcnt == 0) 7537 unlocked_bytes += PAGESIZE; 7538 page_unlock(pp); 7539 } 7540 ANON_LOCK_EXIT(&->a_rwlock); 7541 } 7542 7543 mutex_enter(&p->p_lock); 7544 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7545 chargeproc); 7546 mutex_exit(&p->p_lock); 7547 7548 if (err) { 7549 if (sp != NULL) 7550 mutex_exit(&sp->shm_mlock); 7551 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7552 return (err); 7553 } 7554 } 7555 /* 7556 * Loop over all pages in the range. Process if we're locking and 7557 * page has not already been locked in this mapping; or if we're 7558 * unlocking and the page has been locked. 7559 */ 7560 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7561 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7562 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7563 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7564 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7565 7566 if (amp != NULL) 7567 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7568 /* 7569 * If this isn't a MAP_NORESERVE segment and 7570 * we're locking, allocate anon slots if they 7571 * don't exist. The page is brought in later on. 7572 */ 7573 if (op == MC_LOCK && svd->vp == NULL && 7574 ((svd->flags & MAP_NORESERVE) == 0) && 7575 amp != NULL && 7576 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7577 == NULL)) { 7578 anon_array_enter(amp, anon_index, &cookie); 7579 7580 if ((ap = anon_get_ptr(amp->ahp, 7581 anon_index)) == NULL) { 7582 pp = anon_zero(seg, addr, &ap, 7583 svd->cred); 7584 if (pp == NULL) { 7585 anon_array_exit(&cookie); 7586 ANON_LOCK_EXIT(&->a_rwlock); 7587 err = ENOMEM; 7588 goto out; 7589 } 7590 ASSERT(anon_get_ptr(amp->ahp, 7591 anon_index) == NULL); 7592 (void) anon_set_ptr(amp->ahp, 7593 anon_index, ap, ANON_SLEEP); 7594 page_unlock(pp); 7595 } 7596 anon_array_exit(&cookie); 7597 } 7598 7599 /* 7600 * Get name for page, accounting for 7601 * existence of private copy. 7602 */ 7603 ap = NULL; 7604 if (amp != NULL) { 7605 anon_array_enter(amp, anon_index, &cookie); 7606 ap = anon_get_ptr(amp->ahp, anon_index); 7607 if (ap != NULL) { 7608 swap_xlate(ap, &vp, &off); 7609 } else { 7610 if (svd->vp == NULL && 7611 (svd->flags & MAP_NORESERVE)) { 7612 anon_array_exit(&cookie); 7613 ANON_LOCK_EXIT(&->a_rwlock); 7614 continue; 7615 } 7616 vp = svd->vp; 7617 off = offset; 7618 } 7619 if (op != MC_LOCK || ap == NULL) { 7620 anon_array_exit(&cookie); 7621 ANON_LOCK_EXIT(&->a_rwlock); 7622 } 7623 } else { 7624 vp = svd->vp; 7625 off = offset; 7626 } 7627 7628 /* 7629 * Get page frame. It's ok if the page is 7630 * not available when we're unlocking, as this 7631 * may simply mean that a page we locked got 7632 * truncated out of existence after we locked it. 7633 * 7634 * Invoke VOP_GETPAGE() to obtain the page struct 7635 * since we may need to read it from disk if its 7636 * been paged out. 7637 */ 7638 if (op != MC_LOCK) 7639 pp = page_lookup(vp, off, SE_SHARED); 7640 else { 7641 page_t *pl[1 + 1]; 7642 int error; 7643 7644 ASSERT(vp != NULL); 7645 7646 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7647 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7648 S_OTHER, svd->cred, NULL); 7649 7650 if (error && ap != NULL) { 7651 anon_array_exit(&cookie); 7652 ANON_LOCK_EXIT(&->a_rwlock); 7653 } 7654 7655 /* 7656 * If the error is EDEADLK then we must bounce 7657 * up and drop all vm subsystem locks and then 7658 * retry the operation later 7659 * This behavior is a temporary measure because 7660 * ufs/sds logging is badly designed and will 7661 * deadlock if we don't allow this bounce to 7662 * happen. The real solution is to re-design 7663 * the logging code to work properly. See bug 7664 * 4125102 for details of the problem. 7665 */ 7666 if (error == EDEADLK) { 7667 err = error; 7668 goto out; 7669 } 7670 /* 7671 * Quit if we fail to fault in the page. Treat 7672 * the failure as an error, unless the addr 7673 * is mapped beyond the end of a file. 7674 */ 7675 if (error && svd->vp) { 7676 va.va_mask = AT_SIZE; 7677 if (VOP_GETATTR(svd->vp, &va, 0, 7678 svd->cred, NULL) != 0) { 7679 err = EIO; 7680 goto out; 7681 } 7682 if (btopr(va.va_size) >= 7683 btopr(off + 1)) { 7684 err = EIO; 7685 goto out; 7686 } 7687 goto out; 7688 7689 } else if (error) { 7690 err = EIO; 7691 goto out; 7692 } 7693 pp = pl[0]; 7694 ASSERT(pp != NULL); 7695 } 7696 7697 /* 7698 * See Statement at the beginning of this routine. 7699 * 7700 * claim is always set if MAP_PRIVATE and PROT_WRITE 7701 * irrespective of following factors: 7702 * 7703 * (1) anon slots are populated or not 7704 * (2) cow is broken or not 7705 * (3) refcnt on ap is 1 or greater than 1 7706 * 7707 * See 4140683 for details 7708 */ 7709 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7710 (svd->type == MAP_PRIVATE)); 7711 7712 /* 7713 * Perform page-level operation appropriate to 7714 * operation. If locking, undo the SOFTLOCK 7715 * performed to bring the page into memory 7716 * after setting the lock. If unlocking, 7717 * and no page was found, account for the claim 7718 * separately. 7719 */ 7720 if (op == MC_LOCK) { 7721 int ret = 1; /* Assume success */ 7722 7723 ASSERT(!VPP_ISPPLOCK(vpp)); 7724 7725 ret = page_pp_lock(pp, claim, 0); 7726 if (ap != NULL) { 7727 if (ap->an_pvp != NULL) { 7728 anon_swap_free(ap, pp); 7729 } 7730 anon_array_exit(&cookie); 7731 ANON_LOCK_EXIT(&->a_rwlock); 7732 } 7733 if (ret == 0) { 7734 /* locking page failed */ 7735 page_unlock(pp); 7736 err = EAGAIN; 7737 goto out; 7738 } 7739 VPP_SETPPLOCK(vpp); 7740 if (sp != NULL) { 7741 if (pp->p_lckcnt == 1) 7742 locked_bytes += PAGESIZE; 7743 } else 7744 locked_bytes += PAGESIZE; 7745 7746 if (lockmap != (ulong_t *)NULL) 7747 BT_SET(lockmap, pos); 7748 7749 page_unlock(pp); 7750 } else { 7751 ASSERT(VPP_ISPPLOCK(vpp)); 7752 if (pp != NULL) { 7753 /* sysV pages should be locked */ 7754 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7755 page_pp_unlock(pp, claim, 0); 7756 if (sp != NULL) { 7757 if (pp->p_lckcnt == 0) 7758 unlocked_bytes 7759 += PAGESIZE; 7760 } else 7761 unlocked_bytes += PAGESIZE; 7762 page_unlock(pp); 7763 } else { 7764 ASSERT(sp == NULL); 7765 unlocked_bytes += PAGESIZE; 7766 } 7767 VPP_CLRPPLOCK(vpp); 7768 } 7769 } 7770 } 7771 out: 7772 if (op == MC_LOCK) { 7773 /* Credit back bytes that did not get locked */ 7774 if ((unlocked_bytes - locked_bytes) > 0) { 7775 if (proj == NULL) 7776 mutex_enter(&p->p_lock); 7777 rctl_decr_locked_mem(p, proj, 7778 (unlocked_bytes - locked_bytes), chargeproc); 7779 if (proj == NULL) 7780 mutex_exit(&p->p_lock); 7781 } 7782 7783 } else { 7784 /* Account bytes that were unlocked */ 7785 if (unlocked_bytes > 0) { 7786 if (proj == NULL) 7787 mutex_enter(&p->p_lock); 7788 rctl_decr_locked_mem(p, proj, unlocked_bytes, 7789 chargeproc); 7790 if (proj == NULL) 7791 mutex_exit(&p->p_lock); 7792 } 7793 } 7794 if (sp != NULL) 7795 mutex_exit(&sp->shm_mlock); 7796 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7797 7798 return (err); 7799 } 7800 7801 /* 7802 * Set advice from user for specified pages 7803 * There are 5 types of advice: 7804 * MADV_NORMAL - Normal (default) behavior (whatever that is) 7805 * MADV_RANDOM - Random page references 7806 * do not allow readahead or 'klustering' 7807 * MADV_SEQUENTIAL - Sequential page references 7808 * Pages previous to the one currently being 7809 * accessed (determined by fault) are 'not needed' 7810 * and are freed immediately 7811 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 7812 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 7813 * MADV_FREE - Contents can be discarded 7814 * MADV_ACCESS_DEFAULT- Default access 7815 * MADV_ACCESS_LWP - Next LWP will access heavily 7816 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 7817 */ 7818 static int 7819 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 7820 { 7821 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7822 size_t page; 7823 int err = 0; 7824 int already_set; 7825 struct anon_map *amp; 7826 ulong_t anon_index; 7827 struct seg *next; 7828 lgrp_mem_policy_t policy; 7829 struct seg *prev; 7830 struct vnode *vp; 7831 7832 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7833 7834 /* 7835 * In case of MADV_FREE, we won't be modifying any segment private 7836 * data structures; so, we only need to grab READER's lock 7837 */ 7838 if (behav != MADV_FREE) { 7839 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7840 if (svd->tr_state != SEGVN_TR_OFF) { 7841 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7842 return (0); 7843 } 7844 } else { 7845 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7846 } 7847 7848 /* 7849 * Large pages are assumed to be only turned on when accesses to the 7850 * segment's address range have spatial and temporal locality. That 7851 * justifies ignoring MADV_SEQUENTIAL for large page segments. 7852 * Also, ignore advice affecting lgroup memory allocation 7853 * if don't need to do lgroup optimizations on this system 7854 */ 7855 7856 if ((behav == MADV_SEQUENTIAL && 7857 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 7858 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 7859 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 7860 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7861 return (0); 7862 } 7863 7864 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 7865 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 7866 /* 7867 * Since we are going to unload hat mappings 7868 * we first have to flush the cache. Otherwise 7869 * this might lead to system panic if another 7870 * thread is doing physio on the range whose 7871 * mappings are unloaded by madvise(3C). 7872 */ 7873 if (svd->softlockcnt > 0) { 7874 /* 7875 * If this is shared segment non 0 softlockcnt 7876 * means locked pages are still in use. 7877 */ 7878 if (svd->type == MAP_SHARED) { 7879 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7880 return (EAGAIN); 7881 } 7882 /* 7883 * Since we do have the segvn writers lock 7884 * nobody can fill the cache with entries 7885 * belonging to this seg during the purge. 7886 * The flush either succeeds or we still 7887 * have pending I/Os. In the later case, 7888 * madvise(3C) fails. 7889 */ 7890 segvn_purge(seg); 7891 if (svd->softlockcnt > 0) { 7892 /* 7893 * Since madvise(3C) is advisory and 7894 * it's not part of UNIX98, madvise(3C) 7895 * failure here doesn't cause any hardship. 7896 * Note that we don't block in "as" layer. 7897 */ 7898 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7899 return (EAGAIN); 7900 } 7901 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7902 svd->amp->a_softlockcnt > 0) { 7903 /* 7904 * Try to purge this amp's entries from pcache. It 7905 * will succeed only if other segments that share the 7906 * amp have no outstanding softlock's. 7907 */ 7908 segvn_purge(seg); 7909 } 7910 } 7911 7912 amp = svd->amp; 7913 vp = svd->vp; 7914 if (behav == MADV_FREE) { 7915 /* 7916 * MADV_FREE is not supported for segments with 7917 * underlying object; if anonmap is NULL, anon slots 7918 * are not yet populated and there is nothing for 7919 * us to do. As MADV_FREE is advisory, we don't 7920 * return error in either case. 7921 */ 7922 if (vp != NULL || amp == NULL) { 7923 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7924 return (0); 7925 } 7926 7927 segvn_purge(seg); 7928 7929 page = seg_page(seg, addr); 7930 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7931 anon_disclaim(amp, svd->anon_index + page, len); 7932 ANON_LOCK_EXIT(&->a_rwlock); 7933 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7934 return (0); 7935 } 7936 7937 /* 7938 * If advice is to be applied to entire segment, 7939 * use advice field in seg_data structure 7940 * otherwise use appropriate vpage entry. 7941 */ 7942 if ((addr == seg->s_base) && (len == seg->s_size)) { 7943 switch (behav) { 7944 case MADV_ACCESS_LWP: 7945 case MADV_ACCESS_MANY: 7946 case MADV_ACCESS_DEFAULT: 7947 /* 7948 * Set memory allocation policy for this segment 7949 */ 7950 policy = lgrp_madv_to_policy(behav, len, svd->type); 7951 if (svd->type == MAP_SHARED) 7952 already_set = lgrp_shm_policy_set(policy, amp, 7953 svd->anon_index, vp, svd->offset, len); 7954 else { 7955 /* 7956 * For private memory, need writers lock on 7957 * address space because the segment may be 7958 * split or concatenated when changing policy 7959 */ 7960 if (AS_READ_HELD(seg->s_as, 7961 &seg->s_as->a_lock)) { 7962 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7963 return (IE_RETRY); 7964 } 7965 7966 already_set = lgrp_privm_policy_set(policy, 7967 &svd->policy_info, len); 7968 } 7969 7970 /* 7971 * If policy set already and it shouldn't be reapplied, 7972 * don't do anything. 7973 */ 7974 if (already_set && 7975 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 7976 break; 7977 7978 /* 7979 * Mark any existing pages in given range for 7980 * migration 7981 */ 7982 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 7983 vp, svd->offset, 1); 7984 7985 /* 7986 * If same policy set already or this is a shared 7987 * memory segment, don't need to try to concatenate 7988 * segment with adjacent ones. 7989 */ 7990 if (already_set || svd->type == MAP_SHARED) 7991 break; 7992 7993 /* 7994 * Try to concatenate this segment with previous 7995 * one and next one, since we changed policy for 7996 * this one and it may be compatible with adjacent 7997 * ones now. 7998 */ 7999 prev = AS_SEGPREV(seg->s_as, seg); 8000 next = AS_SEGNEXT(seg->s_as, seg); 8001 8002 if (next && next->s_ops == &segvn_ops && 8003 addr + len == next->s_base) 8004 (void) segvn_concat(seg, next, 1); 8005 8006 if (prev && prev->s_ops == &segvn_ops && 8007 addr == prev->s_base + prev->s_size) { 8008 /* 8009 * Drop lock for private data of current 8010 * segment before concatenating (deleting) it 8011 * and return IE_REATTACH to tell as_ctl() that 8012 * current segment has changed 8013 */ 8014 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8015 if (!segvn_concat(prev, seg, 1)) 8016 err = IE_REATTACH; 8017 8018 return (err); 8019 } 8020 break; 8021 8022 case MADV_SEQUENTIAL: 8023 /* 8024 * unloading mapping guarantees 8025 * detection in segvn_fault 8026 */ 8027 ASSERT(seg->s_szc == 0); 8028 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8029 hat_unload(seg->s_as->a_hat, addr, len, 8030 HAT_UNLOAD); 8031 /* FALLTHROUGH */ 8032 case MADV_NORMAL: 8033 case MADV_RANDOM: 8034 svd->advice = (uchar_t)behav; 8035 svd->pageadvice = 0; 8036 break; 8037 case MADV_WILLNEED: /* handled in memcntl */ 8038 case MADV_DONTNEED: /* handled in memcntl */ 8039 case MADV_FREE: /* handled above */ 8040 break; 8041 default: 8042 err = EINVAL; 8043 } 8044 } else { 8045 caddr_t eaddr; 8046 struct seg *new_seg; 8047 struct segvn_data *new_svd; 8048 u_offset_t off; 8049 caddr_t oldeaddr; 8050 8051 page = seg_page(seg, addr); 8052 8053 segvn_vpage(seg); 8054 8055 switch (behav) { 8056 struct vpage *bvpp, *evpp; 8057 8058 case MADV_ACCESS_LWP: 8059 case MADV_ACCESS_MANY: 8060 case MADV_ACCESS_DEFAULT: 8061 /* 8062 * Set memory allocation policy for portion of this 8063 * segment 8064 */ 8065 8066 /* 8067 * Align address and length of advice to page 8068 * boundaries for large pages 8069 */ 8070 if (seg->s_szc != 0) { 8071 size_t pgsz; 8072 8073 pgsz = page_get_pagesize(seg->s_szc); 8074 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8075 len = P2ROUNDUP(len, pgsz); 8076 } 8077 8078 /* 8079 * Check to see whether policy is set already 8080 */ 8081 policy = lgrp_madv_to_policy(behav, len, svd->type); 8082 8083 anon_index = svd->anon_index + page; 8084 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8085 8086 if (svd->type == MAP_SHARED) 8087 already_set = lgrp_shm_policy_set(policy, amp, 8088 anon_index, vp, off, len); 8089 else 8090 already_set = 8091 (policy == svd->policy_info.mem_policy); 8092 8093 /* 8094 * If policy set already and it shouldn't be reapplied, 8095 * don't do anything. 8096 */ 8097 if (already_set && 8098 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8099 break; 8100 8101 /* 8102 * For private memory, need writers lock on 8103 * address space because the segment may be 8104 * split or concatenated when changing policy 8105 */ 8106 if (svd->type == MAP_PRIVATE && 8107 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8108 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8109 return (IE_RETRY); 8110 } 8111 8112 /* 8113 * Mark any existing pages in given range for 8114 * migration 8115 */ 8116 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8117 vp, svd->offset, 1); 8118 8119 /* 8120 * Don't need to try to split or concatenate 8121 * segments, since policy is same or this is a shared 8122 * memory segment 8123 */ 8124 if (already_set || svd->type == MAP_SHARED) 8125 break; 8126 8127 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8128 ASSERT(svd->amp == NULL); 8129 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8130 ASSERT(svd->softlockcnt == 0); 8131 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8132 HAT_REGION_TEXT); 8133 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8134 } 8135 8136 /* 8137 * Split off new segment if advice only applies to a 8138 * portion of existing segment starting in middle 8139 */ 8140 new_seg = NULL; 8141 eaddr = addr + len; 8142 oldeaddr = seg->s_base + seg->s_size; 8143 if (addr > seg->s_base) { 8144 /* 8145 * Must flush I/O page cache 8146 * before splitting segment 8147 */ 8148 if (svd->softlockcnt > 0) 8149 segvn_purge(seg); 8150 8151 /* 8152 * Split segment and return IE_REATTACH to tell 8153 * as_ctl() that current segment changed 8154 */ 8155 new_seg = segvn_split_seg(seg, addr); 8156 new_svd = (struct segvn_data *)new_seg->s_data; 8157 err = IE_REATTACH; 8158 8159 /* 8160 * If new segment ends where old one 8161 * did, try to concatenate the new 8162 * segment with next one. 8163 */ 8164 if (eaddr == oldeaddr) { 8165 /* 8166 * Set policy for new segment 8167 */ 8168 (void) lgrp_privm_policy_set(policy, 8169 &new_svd->policy_info, 8170 new_seg->s_size); 8171 8172 next = AS_SEGNEXT(new_seg->s_as, 8173 new_seg); 8174 8175 if (next && 8176 next->s_ops == &segvn_ops && 8177 eaddr == next->s_base) 8178 (void) segvn_concat(new_seg, 8179 next, 1); 8180 } 8181 } 8182 8183 /* 8184 * Split off end of existing segment if advice only 8185 * applies to a portion of segment ending before 8186 * end of the existing segment 8187 */ 8188 if (eaddr < oldeaddr) { 8189 /* 8190 * Must flush I/O page cache 8191 * before splitting segment 8192 */ 8193 if (svd->softlockcnt > 0) 8194 segvn_purge(seg); 8195 8196 /* 8197 * If beginning of old segment was already 8198 * split off, use new segment to split end off 8199 * from. 8200 */ 8201 if (new_seg != NULL && new_seg != seg) { 8202 /* 8203 * Split segment 8204 */ 8205 (void) segvn_split_seg(new_seg, eaddr); 8206 8207 /* 8208 * Set policy for new segment 8209 */ 8210 (void) lgrp_privm_policy_set(policy, 8211 &new_svd->policy_info, 8212 new_seg->s_size); 8213 } else { 8214 /* 8215 * Split segment and return IE_REATTACH 8216 * to tell as_ctl() that current 8217 * segment changed 8218 */ 8219 (void) segvn_split_seg(seg, eaddr); 8220 err = IE_REATTACH; 8221 8222 (void) lgrp_privm_policy_set(policy, 8223 &svd->policy_info, seg->s_size); 8224 8225 /* 8226 * If new segment starts where old one 8227 * did, try to concatenate it with 8228 * previous segment. 8229 */ 8230 if (addr == seg->s_base) { 8231 prev = AS_SEGPREV(seg->s_as, 8232 seg); 8233 8234 /* 8235 * Drop lock for private data 8236 * of current segment before 8237 * concatenating (deleting) it 8238 */ 8239 if (prev && 8240 prev->s_ops == 8241 &segvn_ops && 8242 addr == prev->s_base + 8243 prev->s_size) { 8244 SEGVN_LOCK_EXIT( 8245 seg->s_as, 8246 &svd->lock); 8247 (void) segvn_concat( 8248 prev, seg, 1); 8249 return (err); 8250 } 8251 } 8252 } 8253 } 8254 break; 8255 case MADV_SEQUENTIAL: 8256 ASSERT(seg->s_szc == 0); 8257 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8258 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8259 /* FALLTHROUGH */ 8260 case MADV_NORMAL: 8261 case MADV_RANDOM: 8262 bvpp = &svd->vpage[page]; 8263 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8264 for (; bvpp < evpp; bvpp++) 8265 VPP_SETADVICE(bvpp, behav); 8266 svd->advice = MADV_NORMAL; 8267 break; 8268 case MADV_WILLNEED: /* handled in memcntl */ 8269 case MADV_DONTNEED: /* handled in memcntl */ 8270 case MADV_FREE: /* handled above */ 8271 break; 8272 default: 8273 err = EINVAL; 8274 } 8275 } 8276 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8277 return (err); 8278 } 8279 8280 /* 8281 * Create a vpage structure for this seg. 8282 */ 8283 static void 8284 segvn_vpage(struct seg *seg) 8285 { 8286 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8287 struct vpage *vp, *evp; 8288 8289 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8290 8291 /* 8292 * If no vpage structure exists, allocate one. Copy the protections 8293 * and the advice from the segment itself to the individual pages. 8294 */ 8295 if (svd->vpage == NULL) { 8296 svd->pageadvice = 1; 8297 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage), 8298 KM_SLEEP); 8299 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8300 for (vp = svd->vpage; vp < evp; vp++) { 8301 VPP_SETPROT(vp, svd->prot); 8302 VPP_SETADVICE(vp, svd->advice); 8303 } 8304 } 8305 } 8306 8307 /* 8308 * Dump the pages belonging to this segvn segment. 8309 */ 8310 static void 8311 segvn_dump(struct seg *seg) 8312 { 8313 struct segvn_data *svd; 8314 page_t *pp; 8315 struct anon_map *amp; 8316 ulong_t anon_index; 8317 struct vnode *vp; 8318 u_offset_t off, offset; 8319 pfn_t pfn; 8320 pgcnt_t page, npages; 8321 caddr_t addr; 8322 8323 npages = seg_pages(seg); 8324 svd = (struct segvn_data *)seg->s_data; 8325 vp = svd->vp; 8326 off = offset = svd->offset; 8327 addr = seg->s_base; 8328 8329 if ((amp = svd->amp) != NULL) { 8330 anon_index = svd->anon_index; 8331 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8332 } 8333 8334 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8335 struct anon *ap; 8336 int we_own_it = 0; 8337 8338 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8339 swap_xlate_nopanic(ap, &vp, &off); 8340 } else { 8341 vp = svd->vp; 8342 off = offset; 8343 } 8344 8345 /* 8346 * If pp == NULL, the page either does not exist 8347 * or is exclusively locked. So determine if it 8348 * exists before searching for it. 8349 */ 8350 8351 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8352 we_own_it = 1; 8353 else 8354 pp = page_exists(vp, off); 8355 8356 if (pp) { 8357 pfn = page_pptonum(pp); 8358 dump_addpage(seg->s_as, addr, pfn); 8359 if (we_own_it) 8360 page_unlock(pp); 8361 } 8362 addr += PAGESIZE; 8363 dump_timeleft = dump_timeout; 8364 } 8365 8366 if (amp != NULL) 8367 ANON_LOCK_EXIT(&->a_rwlock); 8368 } 8369 8370 #ifdef DEBUG 8371 static uint32_t segvn_pglock_mtbf = 0; 8372 #endif 8373 8374 #define PCACHE_SHWLIST ((page_t *)-2) 8375 #define NOPCACHE_SHWLIST ((page_t *)-1) 8376 8377 /* 8378 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8379 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8380 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8381 * the same parts of the segment. Currently shadow list creation is only 8382 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8383 * tagged with segment pointer, starting virtual address and length. This 8384 * approach for MAP_SHARED segments may add many pcache entries for the same 8385 * set of pages and lead to long hash chains that decrease pcache lookup 8386 * performance. To avoid this issue for shared segments shared anon map and 8387 * starting anon index are used for pcache entry tagging. This allows all 8388 * segments to share pcache entries for the same anon range and reduces pcache 8389 * chain's length as well as memory overhead from duplicate shadow lists and 8390 * pcache entries. 8391 * 8392 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8393 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8394 * part of softlockcnt accounting is done differently for private and shared 8395 * segments. In private segment case softlock is only incremented when a new 8396 * shadow list is created but not when an existing one is found via 8397 * seg_plookup(). pcache entries have reference count incremented/decremented 8398 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8399 * reference count can be purged (and purging is needed before segment can be 8400 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8401 * decrement softlockcnt. Since in private segment case each of its pcache 8402 * entries only belongs to this segment we can expect that when 8403 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8404 * segment purge will succeed and softlockcnt will drop to 0. In shared 8405 * segment case reference count in pcache entry counts active locks from many 8406 * different segments so we can't expect segment purging to succeed even when 8407 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8408 * segment. To be able to determine when there're no pending pagelocks in 8409 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8410 * but instead softlockcnt is incremented and decremented for every 8411 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8412 * list was created or an existing one was found. When softlockcnt drops to 0 8413 * this segment no longer has any claims for pcached shadow lists and the 8414 * segment can be freed even if there're still active pcache entries 8415 * shared by this segment anon map. Shared segment pcache entries belong to 8416 * anon map and are typically removed when anon map is freed after all 8417 * processes destroy the segments that use this anon map. 8418 */ 8419 static int 8420 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8421 enum lock_type type, enum seg_rw rw) 8422 { 8423 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8424 size_t np; 8425 pgcnt_t adjustpages; 8426 pgcnt_t npages; 8427 ulong_t anon_index; 8428 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8429 uint_t error; 8430 struct anon_map *amp; 8431 pgcnt_t anpgcnt; 8432 struct page **pplist, **pl, *pp; 8433 caddr_t a; 8434 size_t page; 8435 caddr_t lpgaddr, lpgeaddr; 8436 anon_sync_obj_t cookie; 8437 int anlock; 8438 struct anon_map *pamp; 8439 caddr_t paddr; 8440 seg_preclaim_cbfunc_t preclaim_callback; 8441 size_t pgsz; 8442 int use_pcache; 8443 size_t wlen; 8444 uint_t pflags = 0; 8445 int sftlck_sbase = 0; 8446 int sftlck_send = 0; 8447 8448 #ifdef DEBUG 8449 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8450 hrtime_t ts = gethrtime(); 8451 if ((ts % segvn_pglock_mtbf) == 0) { 8452 return (ENOTSUP); 8453 } 8454 if ((ts % segvn_pglock_mtbf) == 1) { 8455 return (EFAULT); 8456 } 8457 } 8458 #endif 8459 8460 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8461 "segvn_pagelock: start seg %p addr %p", seg, addr); 8462 8463 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8464 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8465 8466 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8467 8468 /* 8469 * for now we only support pagelock to anon memory. We would have to 8470 * check protections for vnode objects and call into the vnode driver. 8471 * That's too much for a fast path. Let the fault entry point handle 8472 * it. 8473 */ 8474 if (svd->vp != NULL) { 8475 if (type == L_PAGELOCK) { 8476 error = ENOTSUP; 8477 goto out; 8478 } 8479 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8480 } 8481 if ((amp = svd->amp) == NULL) { 8482 if (type == L_PAGELOCK) { 8483 error = EFAULT; 8484 goto out; 8485 } 8486 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8487 } 8488 if (rw != S_READ && rw != S_WRITE) { 8489 if (type == L_PAGELOCK) { 8490 error = ENOTSUP; 8491 goto out; 8492 } 8493 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8494 } 8495 8496 if (seg->s_szc != 0) { 8497 /* 8498 * We are adjusting the pagelock region to the large page size 8499 * boundary because the unlocked part of a large page cannot 8500 * be freed anyway unless all constituent pages of a large 8501 * page are locked. Bigger regions reduce pcache chain length 8502 * and improve lookup performance. The tradeoff is that the 8503 * very first segvn_pagelock() call for a given page is more 8504 * expensive if only 1 page_t is needed for IO. This is only 8505 * an issue if pcache entry doesn't get reused by several 8506 * subsequent calls. We optimize here for the case when pcache 8507 * is heavily used by repeated IOs to the same address range. 8508 * 8509 * Note segment's page size cannot change while we are holding 8510 * as lock. And then it cannot change while softlockcnt is 8511 * not 0. This will allow us to correctly recalculate large 8512 * page size region for the matching pageunlock/reclaim call 8513 * since as_pageunlock() caller must always match 8514 * as_pagelock() call's addr and len. 8515 * 8516 * For pageunlock *ppp points to the pointer of page_t that 8517 * corresponds to the real unadjusted start address. Similar 8518 * for pagelock *ppp must point to the pointer of page_t that 8519 * corresponds to the real unadjusted start address. 8520 */ 8521 pgsz = page_get_pagesize(seg->s_szc); 8522 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8523 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8524 } else if (len < segvn_pglock_comb_thrshld) { 8525 lpgaddr = addr; 8526 lpgeaddr = addr + len; 8527 adjustpages = 0; 8528 pgsz = PAGESIZE; 8529 } else { 8530 /* 8531 * Align the address range of large enough requests to allow 8532 * combining of different shadow lists into 1 to reduce memory 8533 * overhead from potentially overlapping large shadow lists 8534 * (worst case is we have a 1MB IO into buffers with start 8535 * addresses separated by 4K). Alignment is only possible if 8536 * padded chunks have sufficient access permissions. Note 8537 * permissions won't change between L_PAGELOCK and 8538 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8539 * segvn_setprot() to wait until softlockcnt drops to 0. This 8540 * allows us to determine in L_PAGEUNLOCK the same range we 8541 * computed in L_PAGELOCK. 8542 * 8543 * If alignment is limited by segment ends set 8544 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8545 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8546 * per segment counters. In L_PAGEUNLOCK case decrease 8547 * softlockcnt_sbase/softlockcnt_send counters if 8548 * sftlck_sbase/sftlck_send flags are set. When 8549 * softlockcnt_sbase/softlockcnt_send are non 0 8550 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8551 * won't merge the segments. This restriction combined with 8552 * restriction on segment unmapping and splitting for segments 8553 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8554 * correctly determine the same range that was previously 8555 * locked by matching L_PAGELOCK. 8556 */ 8557 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8558 pgsz = PAGESIZE; 8559 if (svd->type == MAP_PRIVATE) { 8560 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8561 segvn_pglock_comb_balign); 8562 if (lpgaddr < seg->s_base) { 8563 lpgaddr = seg->s_base; 8564 sftlck_sbase = 1; 8565 } 8566 } else { 8567 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8568 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8569 if (aaix < svd->anon_index) { 8570 lpgaddr = seg->s_base; 8571 sftlck_sbase = 1; 8572 } else { 8573 lpgaddr = addr - ptob(aix - aaix); 8574 ASSERT(lpgaddr >= seg->s_base); 8575 } 8576 } 8577 if (svd->pageprot && lpgaddr != addr) { 8578 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8579 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8580 while (vp < evp) { 8581 if ((VPP_PROT(vp) & protchk) == 0) { 8582 break; 8583 } 8584 vp++; 8585 } 8586 if (vp < evp) { 8587 lpgaddr = addr; 8588 pflags = 0; 8589 } 8590 } 8591 lpgeaddr = addr + len; 8592 if (pflags) { 8593 if (svd->type == MAP_PRIVATE) { 8594 lpgeaddr = (caddr_t)P2ROUNDUP( 8595 (uintptr_t)lpgeaddr, 8596 segvn_pglock_comb_balign); 8597 } else { 8598 ulong_t aix = svd->anon_index + 8599 seg_page(seg, lpgeaddr); 8600 ulong_t aaix = P2ROUNDUP(aix, 8601 segvn_pglock_comb_palign); 8602 if (aaix < aix) { 8603 lpgeaddr = 0; 8604 } else { 8605 lpgeaddr += ptob(aaix - aix); 8606 } 8607 } 8608 if (lpgeaddr == 0 || 8609 lpgeaddr > seg->s_base + seg->s_size) { 8610 lpgeaddr = seg->s_base + seg->s_size; 8611 sftlck_send = 1; 8612 } 8613 } 8614 if (svd->pageprot && lpgeaddr != addr + len) { 8615 struct vpage *vp; 8616 struct vpage *evp; 8617 8618 vp = &svd->vpage[seg_page(seg, addr + len)]; 8619 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 8620 8621 while (vp < evp) { 8622 if ((VPP_PROT(vp) & protchk) == 0) { 8623 break; 8624 } 8625 vp++; 8626 } 8627 if (vp < evp) { 8628 lpgeaddr = addr + len; 8629 } 8630 } 8631 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8632 } 8633 8634 /* 8635 * For MAP_SHARED segments we create pcache entries tagged by amp and 8636 * anon index so that we can share pcache entries with other segments 8637 * that map this amp. For private segments pcache entries are tagged 8638 * with segment and virtual address. 8639 */ 8640 if (svd->type == MAP_SHARED) { 8641 pamp = amp; 8642 paddr = (caddr_t)((lpgaddr - seg->s_base) + 8643 ptob(svd->anon_index)); 8644 preclaim_callback = shamp_reclaim; 8645 } else { 8646 pamp = NULL; 8647 paddr = lpgaddr; 8648 preclaim_callback = segvn_reclaim; 8649 } 8650 8651 if (type == L_PAGEUNLOCK) { 8652 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8653 8654 /* 8655 * update hat ref bits for /proc. We need to make sure 8656 * that threads tracing the ref and mod bits of the 8657 * address space get the right data. 8658 * Note: page ref and mod bits are updated at reclaim time 8659 */ 8660 if (seg->s_as->a_vbits) { 8661 for (a = addr; a < addr + len; a += PAGESIZE) { 8662 if (rw == S_WRITE) { 8663 hat_setstat(seg->s_as, a, 8664 PAGESIZE, P_REF | P_MOD); 8665 } else { 8666 hat_setstat(seg->s_as, a, 8667 PAGESIZE, P_REF); 8668 } 8669 } 8670 } 8671 8672 /* 8673 * Check the shadow list entry after the last page used in 8674 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 8675 * was not inserted into pcache and is not large page 8676 * adjusted. In this case call reclaim callback directly and 8677 * don't adjust the shadow list start and size for large 8678 * pages. 8679 */ 8680 npages = btop(len); 8681 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 8682 void *ptag; 8683 if (pamp != NULL) { 8684 ASSERT(svd->type == MAP_SHARED); 8685 ptag = (void *)pamp; 8686 paddr = (caddr_t)((addr - seg->s_base) + 8687 ptob(svd->anon_index)); 8688 } else { 8689 ptag = (void *)seg; 8690 paddr = addr; 8691 } 8692 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0); 8693 } else { 8694 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 8695 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 8696 len = lpgeaddr - lpgaddr; 8697 npages = btop(len); 8698 seg_pinactive(seg, pamp, paddr, len, 8699 *ppp - adjustpages, rw, pflags, preclaim_callback); 8700 } 8701 8702 if (pamp != NULL) { 8703 ASSERT(svd->type == MAP_SHARED); 8704 ASSERT(svd->softlockcnt >= npages); 8705 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 8706 } 8707 8708 if (sftlck_sbase) { 8709 ASSERT(svd->softlockcnt_sbase > 0); 8710 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1); 8711 } 8712 if (sftlck_send) { 8713 ASSERT(svd->softlockcnt_send > 0); 8714 atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1); 8715 } 8716 8717 /* 8718 * If someone is blocked while unmapping, we purge 8719 * segment page cache and thus reclaim pplist synchronously 8720 * without waiting for seg_pasync_thread. This speeds up 8721 * unmapping in cases where munmap(2) is called, while 8722 * raw async i/o is still in progress or where a thread 8723 * exits on data fault in a multithreaded application. 8724 */ 8725 if (AS_ISUNMAPWAIT(seg->s_as)) { 8726 if (svd->softlockcnt == 0) { 8727 mutex_enter(&seg->s_as->a_contents); 8728 if (AS_ISUNMAPWAIT(seg->s_as)) { 8729 AS_CLRUNMAPWAIT(seg->s_as); 8730 cv_broadcast(&seg->s_as->a_cv); 8731 } 8732 mutex_exit(&seg->s_as->a_contents); 8733 } else if (pamp == NULL) { 8734 /* 8735 * softlockcnt is not 0 and this is a 8736 * MAP_PRIVATE segment. Try to purge its 8737 * pcache entries to reduce softlockcnt. 8738 * If it drops to 0 segvn_reclaim() 8739 * will wake up a thread waiting on 8740 * unmapwait flag. 8741 * 8742 * We don't purge MAP_SHARED segments with non 8743 * 0 softlockcnt since IO is still in progress 8744 * for such segments. 8745 */ 8746 ASSERT(svd->type == MAP_PRIVATE); 8747 segvn_purge(seg); 8748 } 8749 } 8750 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8751 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8752 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8753 return (0); 8754 } 8755 8756 /* The L_PAGELOCK case ... */ 8757 8758 VM_STAT_ADD(segvnvmstats.pagelock[1]); 8759 8760 /* 8761 * For MAP_SHARED segments we have to check protections before 8762 * seg_plookup() since pcache entries may be shared by many segments 8763 * with potentially different page protections. 8764 */ 8765 if (pamp != NULL) { 8766 ASSERT(svd->type == MAP_SHARED); 8767 if (svd->pageprot == 0) { 8768 if ((svd->prot & protchk) == 0) { 8769 error = EACCES; 8770 goto out; 8771 } 8772 } else { 8773 /* 8774 * check page protections 8775 */ 8776 caddr_t ea; 8777 8778 if (seg->s_szc) { 8779 a = lpgaddr; 8780 ea = lpgeaddr; 8781 } else { 8782 a = addr; 8783 ea = addr + len; 8784 } 8785 for (; a < ea; a += pgsz) { 8786 struct vpage *vp; 8787 8788 ASSERT(seg->s_szc == 0 || 8789 sameprot(seg, a, pgsz)); 8790 vp = &svd->vpage[seg_page(seg, a)]; 8791 if ((VPP_PROT(vp) & protchk) == 0) { 8792 error = EACCES; 8793 goto out; 8794 } 8795 } 8796 } 8797 } 8798 8799 /* 8800 * try to find pages in segment page cache 8801 */ 8802 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 8803 if (pplist != NULL) { 8804 if (pamp != NULL) { 8805 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 8806 ASSERT(svd->type == MAP_SHARED); 8807 atomic_add_long((ulong_t *)&svd->softlockcnt, 8808 npages); 8809 } 8810 if (sftlck_sbase) { 8811 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1); 8812 } 8813 if (sftlck_send) { 8814 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1); 8815 } 8816 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8817 *ppp = pplist + adjustpages; 8818 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 8819 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 8820 return (0); 8821 } 8822 8823 /* 8824 * For MAP_SHARED segments we already verified above that segment 8825 * protections allow this pagelock operation. 8826 */ 8827 if (pamp == NULL) { 8828 ASSERT(svd->type == MAP_PRIVATE); 8829 if (svd->pageprot == 0) { 8830 if ((svd->prot & protchk) == 0) { 8831 error = EACCES; 8832 goto out; 8833 } 8834 if (svd->prot & PROT_WRITE) { 8835 wlen = lpgeaddr - lpgaddr; 8836 } else { 8837 wlen = 0; 8838 ASSERT(rw == S_READ); 8839 } 8840 } else { 8841 int wcont = 1; 8842 /* 8843 * check page protections 8844 */ 8845 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 8846 struct vpage *vp; 8847 8848 ASSERT(seg->s_szc == 0 || 8849 sameprot(seg, a, pgsz)); 8850 vp = &svd->vpage[seg_page(seg, a)]; 8851 if ((VPP_PROT(vp) & protchk) == 0) { 8852 error = EACCES; 8853 goto out; 8854 } 8855 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 8856 wlen += pgsz; 8857 } else { 8858 wcont = 0; 8859 ASSERT(rw == S_READ); 8860 } 8861 } 8862 } 8863 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 8864 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 8865 } 8866 8867 /* 8868 * Only build large page adjusted shadow list if we expect to insert 8869 * it into pcache. For large enough pages it's a big overhead to 8870 * create a shadow list of the entire large page. But this overhead 8871 * should be amortized over repeated pcache hits on subsequent reuse 8872 * of this shadow list (IO into any range within this shadow list will 8873 * find it in pcache since we large page align the request for pcache 8874 * lookups). pcache performance is improved with bigger shadow lists 8875 * as it reduces the time to pcache the entire big segment and reduces 8876 * pcache chain length. 8877 */ 8878 if (seg_pinsert_check(seg, pamp, paddr, 8879 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 8880 addr = lpgaddr; 8881 len = lpgeaddr - lpgaddr; 8882 use_pcache = 1; 8883 } else { 8884 use_pcache = 0; 8885 /* 8886 * Since this entry will not be inserted into the pcache, we 8887 * will not do any adjustments to the starting address or 8888 * size of the memory to be locked. 8889 */ 8890 adjustpages = 0; 8891 } 8892 npages = btop(len); 8893 8894 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 8895 pl = pplist; 8896 *ppp = pplist + adjustpages; 8897 /* 8898 * If use_pcache is 0 this shadow list is not large page adjusted. 8899 * Record this info in the last entry of shadow array so that 8900 * L_PAGEUNLOCK can determine if it should large page adjust the 8901 * address range to find the real range that was locked. 8902 */ 8903 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 8904 8905 page = seg_page(seg, addr); 8906 anon_index = svd->anon_index + page; 8907 8908 anlock = 0; 8909 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8910 ASSERT(amp->a_szc >= seg->s_szc); 8911 anpgcnt = page_get_pagecnt(amp->a_szc); 8912 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 8913 struct anon *ap; 8914 struct vnode *vp; 8915 u_offset_t off; 8916 8917 /* 8918 * Lock and unlock anon array only once per large page. 8919 * anon_array_enter() locks the root anon slot according to 8920 * a_szc which can't change while anon map is locked. We lock 8921 * anon the first time through this loop and each time we 8922 * reach anon index that corresponds to a root of a large 8923 * page. 8924 */ 8925 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 8926 ASSERT(anlock == 0); 8927 anon_array_enter(amp, anon_index, &cookie); 8928 anlock = 1; 8929 } 8930 ap = anon_get_ptr(amp->ahp, anon_index); 8931 8932 /* 8933 * We must never use seg_pcache for COW pages 8934 * because we might end up with original page still 8935 * lying in seg_pcache even after private page is 8936 * created. This leads to data corruption as 8937 * aio_write refers to the page still in cache 8938 * while all other accesses refer to the private 8939 * page. 8940 */ 8941 if (ap == NULL || ap->an_refcnt != 1) { 8942 struct vpage *vpage; 8943 8944 if (seg->s_szc) { 8945 error = EFAULT; 8946 break; 8947 } 8948 if (svd->vpage != NULL) { 8949 vpage = &svd->vpage[seg_page(seg, a)]; 8950 } else { 8951 vpage = NULL; 8952 } 8953 ASSERT(anlock); 8954 anon_array_exit(&cookie); 8955 anlock = 0; 8956 pp = NULL; 8957 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 8958 vpage, &pp, 0, F_INVAL, rw, 1); 8959 if (error) { 8960 error = fc_decode(error); 8961 break; 8962 } 8963 anon_array_enter(amp, anon_index, &cookie); 8964 anlock = 1; 8965 ap = anon_get_ptr(amp->ahp, anon_index); 8966 if (ap == NULL || ap->an_refcnt != 1) { 8967 error = EFAULT; 8968 break; 8969 } 8970 } 8971 swap_xlate(ap, &vp, &off); 8972 pp = page_lookup_nowait(vp, off, SE_SHARED); 8973 if (pp == NULL) { 8974 error = EFAULT; 8975 break; 8976 } 8977 if (ap->an_pvp != NULL) { 8978 anon_swap_free(ap, pp); 8979 } 8980 /* 8981 * Unlock anon if this is the last slot in a large page. 8982 */ 8983 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 8984 ASSERT(anlock); 8985 anon_array_exit(&cookie); 8986 anlock = 0; 8987 } 8988 *pplist++ = pp; 8989 } 8990 if (anlock) { /* Ensure the lock is dropped */ 8991 anon_array_exit(&cookie); 8992 } 8993 ANON_LOCK_EXIT(&->a_rwlock); 8994 8995 if (a >= addr + len) { 8996 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 8997 if (pamp != NULL) { 8998 ASSERT(svd->type == MAP_SHARED); 8999 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9000 npages); 9001 wlen = len; 9002 } 9003 if (sftlck_sbase) { 9004 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1); 9005 } 9006 if (sftlck_send) { 9007 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1); 9008 } 9009 if (use_pcache) { 9010 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9011 rw, pflags, preclaim_callback); 9012 } 9013 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9014 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9015 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9016 return (0); 9017 } 9018 9019 pplist = pl; 9020 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9021 while (np > (uint_t)0) { 9022 ASSERT(PAGE_LOCKED(*pplist)); 9023 page_unlock(*pplist); 9024 np--; 9025 pplist++; 9026 } 9027 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9028 out: 9029 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9030 *ppp = NULL; 9031 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9032 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9033 return (error); 9034 } 9035 9036 /* 9037 * purge any cached pages in the I/O page cache 9038 */ 9039 static void 9040 segvn_purge(struct seg *seg) 9041 { 9042 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9043 9044 /* 9045 * pcache is only used by pure anon segments. 9046 */ 9047 if (svd->amp == NULL || svd->vp != NULL) { 9048 return; 9049 } 9050 9051 /* 9052 * For MAP_SHARED segments non 0 segment's softlockcnt means 9053 * active IO is still in progress via this segment. So we only 9054 * purge MAP_SHARED segments when their softlockcnt is 0. 9055 */ 9056 if (svd->type == MAP_PRIVATE) { 9057 if (svd->softlockcnt) { 9058 seg_ppurge(seg, NULL, 0); 9059 } 9060 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9061 seg_ppurge(seg, svd->amp, 0); 9062 } 9063 } 9064 9065 /* 9066 * If async argument is not 0 we are called from pcache async thread and don't 9067 * hold AS lock. 9068 */ 9069 9070 /*ARGSUSED*/ 9071 static int 9072 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9073 enum seg_rw rw, int async) 9074 { 9075 struct seg *seg = (struct seg *)ptag; 9076 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9077 pgcnt_t np, npages; 9078 struct page **pl; 9079 9080 npages = np = btop(len); 9081 ASSERT(npages); 9082 9083 ASSERT(svd->vp == NULL && svd->amp != NULL); 9084 ASSERT(svd->softlockcnt >= npages); 9085 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9086 9087 pl = pplist; 9088 9089 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9090 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9091 9092 while (np > (uint_t)0) { 9093 if (rw == S_WRITE) { 9094 hat_setrefmod(*pplist); 9095 } else { 9096 hat_setref(*pplist); 9097 } 9098 page_unlock(*pplist); 9099 np--; 9100 pplist++; 9101 } 9102 9103 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9104 9105 /* 9106 * If we are pcache async thread we don't hold AS lock. This means if 9107 * softlockcnt drops to 0 after the decrement below address space may 9108 * get freed. We can't allow it since after softlock derement to 0 we 9109 * still need to access as structure for possible wakeup of unmap 9110 * waiters. To prevent the disappearance of as we take this segment 9111 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9112 * make sure this routine completes before segment is freed. 9113 * 9114 * The second complication we have to deal with in async case is a 9115 * possibility of missed wake up of unmap wait thread. When we don't 9116 * hold as lock here we may take a_contents lock before unmap wait 9117 * thread that was first to see softlockcnt was still not 0. As a 9118 * result we'll fail to wake up an unmap wait thread. To avoid this 9119 * race we set nounmapwait flag in as structure if we drop softlockcnt 9120 * to 0 when we were called by pcache async thread. unmapwait thread 9121 * will not block if this flag is set. 9122 */ 9123 if (async) { 9124 mutex_enter(&svd->segfree_syncmtx); 9125 } 9126 9127 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9128 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9129 mutex_enter(&seg->s_as->a_contents); 9130 if (async) { 9131 AS_SETNOUNMAPWAIT(seg->s_as); 9132 } 9133 if (AS_ISUNMAPWAIT(seg->s_as)) { 9134 AS_CLRUNMAPWAIT(seg->s_as); 9135 cv_broadcast(&seg->s_as->a_cv); 9136 } 9137 mutex_exit(&seg->s_as->a_contents); 9138 } 9139 } 9140 9141 if (async) { 9142 mutex_exit(&svd->segfree_syncmtx); 9143 } 9144 return (0); 9145 } 9146 9147 /*ARGSUSED*/ 9148 static int 9149 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9150 enum seg_rw rw, int async) 9151 { 9152 amp_t *amp = (amp_t *)ptag; 9153 pgcnt_t np, npages; 9154 struct page **pl; 9155 9156 npages = np = btop(len); 9157 ASSERT(npages); 9158 ASSERT(amp->a_softlockcnt >= npages); 9159 9160 pl = pplist; 9161 9162 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9163 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9164 9165 while (np > (uint_t)0) { 9166 if (rw == S_WRITE) { 9167 hat_setrefmod(*pplist); 9168 } else { 9169 hat_setref(*pplist); 9170 } 9171 page_unlock(*pplist); 9172 np--; 9173 pplist++; 9174 } 9175 9176 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9177 9178 /* 9179 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9180 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9181 * and anonmap_purge() acquires a_purgemtx. 9182 */ 9183 mutex_enter(&->a_purgemtx); 9184 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9185 amp->a_purgewait) { 9186 amp->a_purgewait = 0; 9187 cv_broadcast(&->a_purgecv); 9188 } 9189 mutex_exit(&->a_purgemtx); 9190 return (0); 9191 } 9192 9193 /* 9194 * get a memory ID for an addr in a given segment 9195 * 9196 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9197 * At fault time they will be relocated into larger pages. 9198 */ 9199 static int 9200 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9201 { 9202 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9203 struct anon *ap = NULL; 9204 ulong_t anon_index; 9205 struct anon_map *amp; 9206 anon_sync_obj_t cookie; 9207 9208 if (svd->type == MAP_PRIVATE) { 9209 memidp->val[0] = (uintptr_t)seg->s_as; 9210 memidp->val[1] = (uintptr_t)addr; 9211 return (0); 9212 } 9213 9214 if (svd->type == MAP_SHARED) { 9215 if (svd->vp) { 9216 memidp->val[0] = (uintptr_t)svd->vp; 9217 memidp->val[1] = (u_longlong_t)svd->offset + 9218 (uintptr_t)(addr - seg->s_base); 9219 return (0); 9220 } else { 9221 9222 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9223 if ((amp = svd->amp) != NULL) { 9224 anon_index = svd->anon_index + 9225 seg_page(seg, addr); 9226 } 9227 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9228 9229 ASSERT(amp != NULL); 9230 9231 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9232 anon_array_enter(amp, anon_index, &cookie); 9233 ap = anon_get_ptr(amp->ahp, anon_index); 9234 if (ap == NULL) { 9235 page_t *pp; 9236 9237 pp = anon_zero(seg, addr, &ap, svd->cred); 9238 if (pp == NULL) { 9239 anon_array_exit(&cookie); 9240 ANON_LOCK_EXIT(&->a_rwlock); 9241 return (ENOMEM); 9242 } 9243 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9244 == NULL); 9245 (void) anon_set_ptr(amp->ahp, anon_index, 9246 ap, ANON_SLEEP); 9247 page_unlock(pp); 9248 } 9249 9250 anon_array_exit(&cookie); 9251 ANON_LOCK_EXIT(&->a_rwlock); 9252 9253 memidp->val[0] = (uintptr_t)ap; 9254 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9255 return (0); 9256 } 9257 } 9258 return (EINVAL); 9259 } 9260 9261 static int 9262 sameprot(struct seg *seg, caddr_t a, size_t len) 9263 { 9264 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9265 struct vpage *vpage; 9266 spgcnt_t pages = btop(len); 9267 uint_t prot; 9268 9269 if (svd->pageprot == 0) 9270 return (1); 9271 9272 ASSERT(svd->vpage != NULL); 9273 9274 vpage = &svd->vpage[seg_page(seg, a)]; 9275 prot = VPP_PROT(vpage); 9276 vpage++; 9277 pages--; 9278 while (pages-- > 0) { 9279 if (prot != VPP_PROT(vpage)) 9280 return (0); 9281 vpage++; 9282 } 9283 return (1); 9284 } 9285 9286 /* 9287 * Get memory allocation policy info for specified address in given segment 9288 */ 9289 static lgrp_mem_policy_info_t * 9290 segvn_getpolicy(struct seg *seg, caddr_t addr) 9291 { 9292 struct anon_map *amp; 9293 ulong_t anon_index; 9294 lgrp_mem_policy_info_t *policy_info; 9295 struct segvn_data *svn_data; 9296 u_offset_t vn_off; 9297 vnode_t *vp; 9298 9299 ASSERT(seg != NULL); 9300 9301 svn_data = (struct segvn_data *)seg->s_data; 9302 if (svn_data == NULL) 9303 return (NULL); 9304 9305 /* 9306 * Get policy info for private or shared memory 9307 */ 9308 if (svn_data->type != MAP_SHARED) { 9309 if (svn_data->tr_state != SEGVN_TR_ON) { 9310 policy_info = &svn_data->policy_info; 9311 } else { 9312 policy_info = &svn_data->tr_policy_info; 9313 ASSERT(policy_info->mem_policy == 9314 LGRP_MEM_POLICY_NEXT_SEG); 9315 } 9316 } else { 9317 amp = svn_data->amp; 9318 anon_index = svn_data->anon_index + seg_page(seg, addr); 9319 vp = svn_data->vp; 9320 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9321 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9322 } 9323 9324 return (policy_info); 9325 } 9326 9327 /*ARGSUSED*/ 9328 static int 9329 segvn_capable(struct seg *seg, segcapability_t capability) 9330 { 9331 return (0); 9332 } 9333 9334 /* 9335 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9336 * established to per vnode mapping per lgroup amp pages instead of to vnode 9337 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9338 * may share the same text replication amp. If a suitable amp doesn't already 9339 * exist in svntr hash table create a new one. We may fail to bind to amp if 9340 * segment is not eligible for text replication. Code below first checks for 9341 * these conditions. If binding is successful segment tr_state is set to on 9342 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9343 * svd->amp remains as NULL. 9344 */ 9345 static void 9346 segvn_textrepl(struct seg *seg) 9347 { 9348 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9349 vnode_t *vp = svd->vp; 9350 u_offset_t off = svd->offset; 9351 size_t size = seg->s_size; 9352 u_offset_t eoff = off + size; 9353 uint_t szc = seg->s_szc; 9354 ulong_t hash = SVNTR_HASH_FUNC(vp); 9355 svntr_t *svntrp; 9356 struct vattr va; 9357 proc_t *p = seg->s_as->a_proc; 9358 lgrp_id_t lgrp_id; 9359 lgrp_id_t olid; 9360 int first; 9361 struct anon_map *amp; 9362 9363 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9364 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9365 ASSERT(p != NULL); 9366 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9367 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9368 ASSERT(svd->flags & MAP_TEXT); 9369 ASSERT(svd->type == MAP_PRIVATE); 9370 ASSERT(vp != NULL && svd->amp == NULL); 9371 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9372 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9373 ASSERT(seg->s_as != &kas); 9374 ASSERT(off < eoff); 9375 ASSERT(svntr_hashtab != NULL); 9376 9377 /* 9378 * If numa optimizations are no longer desired bail out. 9379 */ 9380 if (!lgrp_optimizations()) { 9381 svd->tr_state = SEGVN_TR_OFF; 9382 return; 9383 } 9384 9385 /* 9386 * Avoid creating anon maps with size bigger than the file size. 9387 * If VOP_GETATTR() call fails bail out. 9388 */ 9389 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9390 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9391 svd->tr_state = SEGVN_TR_OFF; 9392 SEGVN_TR_ADDSTAT(gaerr); 9393 return; 9394 } 9395 if (btopr(va.va_size) < btopr(eoff)) { 9396 svd->tr_state = SEGVN_TR_OFF; 9397 SEGVN_TR_ADDSTAT(overmap); 9398 return; 9399 } 9400 9401 /* 9402 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9403 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9404 * mapping that checks if trcache for this vnode needs to be 9405 * invalidated can't miss us. 9406 */ 9407 if (!(vp->v_flag & VVMEXEC)) { 9408 mutex_enter(&vp->v_lock); 9409 vp->v_flag |= VVMEXEC; 9410 mutex_exit(&vp->v_lock); 9411 } 9412 mutex_enter(&svntr_hashtab[hash].tr_lock); 9413 /* 9414 * Bail out if potentially MAP_SHARED writable mappings exist to this 9415 * vnode. We don't want to use old file contents from existing 9416 * replicas if this mapping was established after the original file 9417 * was changed. 9418 */ 9419 if (vn_is_mapped(vp, V_WRITE)) { 9420 mutex_exit(&svntr_hashtab[hash].tr_lock); 9421 svd->tr_state = SEGVN_TR_OFF; 9422 SEGVN_TR_ADDSTAT(wrcnt); 9423 return; 9424 } 9425 svntrp = svntr_hashtab[hash].tr_head; 9426 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9427 ASSERT(svntrp->tr_refcnt != 0); 9428 if (svntrp->tr_vp != vp) { 9429 continue; 9430 } 9431 9432 /* 9433 * Bail out if the file or its attributes were changed after 9434 * this replication entry was created since we need to use the 9435 * latest file contents. Note that mtime test alone is not 9436 * sufficient because a user can explicitly change mtime via 9437 * utimes(2) interfaces back to the old value after modifiying 9438 * the file contents. To detect this case we also have to test 9439 * ctime which among other things records the time of the last 9440 * mtime change by utimes(2). ctime is not changed when the file 9441 * is only read or executed so we expect that typically existing 9442 * replication amp's can be used most of the time. 9443 */ 9444 if (!svntrp->tr_valid || 9445 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9446 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9447 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9448 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9449 mutex_exit(&svntr_hashtab[hash].tr_lock); 9450 svd->tr_state = SEGVN_TR_OFF; 9451 SEGVN_TR_ADDSTAT(stale); 9452 return; 9453 } 9454 /* 9455 * if off, eoff and szc match current segment we found the 9456 * existing entry we can use. 9457 */ 9458 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9459 svntrp->tr_szc == szc) { 9460 break; 9461 } 9462 /* 9463 * Don't create different but overlapping in file offsets 9464 * entries to avoid replication of the same file pages more 9465 * than once per lgroup. 9466 */ 9467 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9468 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9469 mutex_exit(&svntr_hashtab[hash].tr_lock); 9470 svd->tr_state = SEGVN_TR_OFF; 9471 SEGVN_TR_ADDSTAT(overlap); 9472 return; 9473 } 9474 } 9475 /* 9476 * If we didn't find existing entry create a new one. 9477 */ 9478 if (svntrp == NULL) { 9479 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9480 if (svntrp == NULL) { 9481 mutex_exit(&svntr_hashtab[hash].tr_lock); 9482 svd->tr_state = SEGVN_TR_OFF; 9483 SEGVN_TR_ADDSTAT(nokmem); 9484 return; 9485 } 9486 #ifdef DEBUG 9487 { 9488 lgrp_id_t i; 9489 for (i = 0; i < NLGRPS_MAX; i++) { 9490 ASSERT(svntrp->tr_amp[i] == NULL); 9491 } 9492 } 9493 #endif /* DEBUG */ 9494 svntrp->tr_vp = vp; 9495 svntrp->tr_off = off; 9496 svntrp->tr_eoff = eoff; 9497 svntrp->tr_szc = szc; 9498 svntrp->tr_valid = 1; 9499 svntrp->tr_mtime = va.va_mtime; 9500 svntrp->tr_ctime = va.va_ctime; 9501 svntrp->tr_refcnt = 0; 9502 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9503 svntr_hashtab[hash].tr_head = svntrp; 9504 } 9505 first = 1; 9506 again: 9507 /* 9508 * We want to pick a replica with pages on main thread's (t_tid = 1, 9509 * aka T1) lgrp. Currently text replication is only optimized for 9510 * workloads that either have all threads of a process on the same 9511 * lgrp or execute their large text primarily on main thread. 9512 */ 9513 lgrp_id = p->p_t1_lgrpid; 9514 if (lgrp_id == LGRP_NONE) { 9515 /* 9516 * In case exec() prefaults text on non main thread use 9517 * current thread lgrpid. It will become main thread anyway 9518 * soon. 9519 */ 9520 lgrp_id = lgrp_home_id(curthread); 9521 } 9522 /* 9523 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9524 * just set it to NLGRPS_MAX if it's different from current process T1 9525 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9526 * replication and T1 new home is different from lgrp used for text 9527 * replication. When this happens asyncronous segvn thread rechecks if 9528 * segments should change lgrps used for text replication. If we fail 9529 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas 9530 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to 9531 * use. We don't need to use cas in this case because another thread 9532 * that races in between our non atomic check and set may only change 9533 * p_tr_lgrpid to NLGRPS_MAX at this point. 9534 */ 9535 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9536 olid = p->p_tr_lgrpid; 9537 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9538 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9539 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) { 9540 olid = p->p_tr_lgrpid; 9541 ASSERT(olid != LGRP_NONE); 9542 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9543 p->p_tr_lgrpid = NLGRPS_MAX; 9544 } 9545 } 9546 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9547 membar_producer(); 9548 /* 9549 * lgrp_move_thread() won't schedule async recheck after 9550 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9551 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9552 * is not LGRP_NONE. 9553 */ 9554 if (first && p->p_t1_lgrpid != LGRP_NONE && 9555 p->p_t1_lgrpid != lgrp_id) { 9556 first = 0; 9557 goto again; 9558 } 9559 } 9560 /* 9561 * If no amp was created yet for lgrp_id create a new one as long as 9562 * we have enough memory to afford it. 9563 */ 9564 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9565 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9566 if (trmem > segvn_textrepl_max_bytes) { 9567 SEGVN_TR_ADDSTAT(normem); 9568 goto fail; 9569 } 9570 if (anon_try_resv_zone(size, NULL) == 0) { 9571 SEGVN_TR_ADDSTAT(noanon); 9572 goto fail; 9573 } 9574 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9575 if (amp == NULL) { 9576 anon_unresv_zone(size, NULL); 9577 SEGVN_TR_ADDSTAT(nokmem); 9578 goto fail; 9579 } 9580 ASSERT(amp->refcnt == 1); 9581 amp->a_szc = szc; 9582 svntrp->tr_amp[lgrp_id] = amp; 9583 SEGVN_TR_ADDSTAT(newamp); 9584 } 9585 svntrp->tr_refcnt++; 9586 ASSERT(svd->svn_trnext == NULL); 9587 ASSERT(svd->svn_trprev == NULL); 9588 svd->svn_trnext = svntrp->tr_svnhead; 9589 svd->svn_trprev = NULL; 9590 if (svntrp->tr_svnhead != NULL) { 9591 svntrp->tr_svnhead->svn_trprev = svd; 9592 } 9593 svntrp->tr_svnhead = svd; 9594 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9595 ASSERT(amp->refcnt >= 1); 9596 svd->amp = amp; 9597 svd->anon_index = 0; 9598 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9599 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9600 svd->tr_state = SEGVN_TR_ON; 9601 mutex_exit(&svntr_hashtab[hash].tr_lock); 9602 SEGVN_TR_ADDSTAT(repl); 9603 return; 9604 fail: 9605 ASSERT(segvn_textrepl_bytes >= size); 9606 atomic_add_long(&segvn_textrepl_bytes, -size); 9607 ASSERT(svntrp != NULL); 9608 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9609 if (svntrp->tr_refcnt == 0) { 9610 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9611 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9612 mutex_exit(&svntr_hashtab[hash].tr_lock); 9613 kmem_cache_free(svntr_cache, svntrp); 9614 } else { 9615 mutex_exit(&svntr_hashtab[hash].tr_lock); 9616 } 9617 svd->tr_state = SEGVN_TR_OFF; 9618 } 9619 9620 /* 9621 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9622 * replication amp. This routine is most typically called when segment is 9623 * unmapped but can also be called when segment no longer qualifies for text 9624 * replication (e.g. due to protection changes). If unload_unmap is set use 9625 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9626 * svntr free all its anon maps and remove it from the hash table. 9627 */ 9628 static void 9629 segvn_textunrepl(struct seg *seg, int unload_unmap) 9630 { 9631 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9632 vnode_t *vp = svd->vp; 9633 u_offset_t off = svd->offset; 9634 size_t size = seg->s_size; 9635 u_offset_t eoff = off + size; 9636 uint_t szc = seg->s_szc; 9637 ulong_t hash = SVNTR_HASH_FUNC(vp); 9638 svntr_t *svntrp; 9639 svntr_t **prv_svntrp; 9640 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9641 lgrp_id_t i; 9642 9643 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9644 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9645 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9646 ASSERT(svd->tr_state == SEGVN_TR_ON); 9647 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9648 ASSERT(svd->amp != NULL); 9649 ASSERT(svd->amp->refcnt >= 1); 9650 ASSERT(svd->anon_index == 0); 9651 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9652 ASSERT(svntr_hashtab != NULL); 9653 9654 mutex_enter(&svntr_hashtab[hash].tr_lock); 9655 prv_svntrp = &svntr_hashtab[hash].tr_head; 9656 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9657 ASSERT(svntrp->tr_refcnt != 0); 9658 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9659 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9660 break; 9661 } 9662 } 9663 if (svntrp == NULL) { 9664 panic("segvn_textunrepl: svntr record not found"); 9665 } 9666 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9667 panic("segvn_textunrepl: amp mismatch"); 9668 } 9669 svd->tr_state = SEGVN_TR_OFF; 9670 svd->amp = NULL; 9671 if (svd->svn_trprev == NULL) { 9672 ASSERT(svntrp->tr_svnhead == svd); 9673 svntrp->tr_svnhead = svd->svn_trnext; 9674 if (svntrp->tr_svnhead != NULL) { 9675 svntrp->tr_svnhead->svn_trprev = NULL; 9676 } 9677 svd->svn_trnext = NULL; 9678 } else { 9679 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9680 if (svd->svn_trnext != NULL) { 9681 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9682 svd->svn_trnext = NULL; 9683 } 9684 svd->svn_trprev = NULL; 9685 } 9686 if (--svntrp->tr_refcnt) { 9687 mutex_exit(&svntr_hashtab[hash].tr_lock); 9688 goto done; 9689 } 9690 *prv_svntrp = svntrp->tr_next; 9691 mutex_exit(&svntr_hashtab[hash].tr_lock); 9692 for (i = 0; i < NLGRPS_MAX; i++) { 9693 struct anon_map *amp = svntrp->tr_amp[i]; 9694 if (amp == NULL) { 9695 continue; 9696 } 9697 ASSERT(amp->refcnt == 1); 9698 ASSERT(amp->swresv == size); 9699 ASSERT(amp->size == size); 9700 ASSERT(amp->a_szc == szc); 9701 if (amp->a_szc != 0) { 9702 anon_free_pages(amp->ahp, 0, size, szc); 9703 } else { 9704 anon_free(amp->ahp, 0, size); 9705 } 9706 svntrp->tr_amp[i] = NULL; 9707 ASSERT(segvn_textrepl_bytes >= size); 9708 atomic_add_long(&segvn_textrepl_bytes, -size); 9709 anon_unresv_zone(amp->swresv, NULL); 9710 amp->refcnt = 0; 9711 anonmap_free(amp); 9712 } 9713 kmem_cache_free(svntr_cache, svntrp); 9714 done: 9715 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9716 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9717 } 9718 9719 /* 9720 * This is called when a MAP_SHARED writable mapping is created to a vnode 9721 * that is currently used for execution (VVMEXEC flag is set). In this case we 9722 * need to prevent further use of existing replicas. 9723 */ 9724 static void 9725 segvn_inval_trcache(vnode_t *vp) 9726 { 9727 ulong_t hash = SVNTR_HASH_FUNC(vp); 9728 svntr_t *svntrp; 9729 9730 ASSERT(vp->v_flag & VVMEXEC); 9731 9732 if (svntr_hashtab == NULL) { 9733 return; 9734 } 9735 9736 mutex_enter(&svntr_hashtab[hash].tr_lock); 9737 svntrp = svntr_hashtab[hash].tr_head; 9738 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9739 ASSERT(svntrp->tr_refcnt != 0); 9740 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9741 svntrp->tr_valid = 0; 9742 } 9743 } 9744 mutex_exit(&svntr_hashtab[hash].tr_lock); 9745 } 9746 9747 static void 9748 segvn_trasync_thread(void) 9749 { 9750 callb_cpr_t cpr_info; 9751 kmutex_t cpr_lock; /* just for CPR stuff */ 9752 9753 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9754 9755 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9756 callb_generic_cpr, "segvn_async"); 9757 9758 if (segvn_update_textrepl_interval == 0) { 9759 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9760 } else { 9761 segvn_update_textrepl_interval *= hz; 9762 } 9763 (void) timeout(segvn_trupdate_wakeup, NULL, 9764 segvn_update_textrepl_interval); 9765 9766 for (;;) { 9767 mutex_enter(&cpr_lock); 9768 CALLB_CPR_SAFE_BEGIN(&cpr_info); 9769 mutex_exit(&cpr_lock); 9770 sema_p(&segvn_trasync_sem); 9771 mutex_enter(&cpr_lock); 9772 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 9773 mutex_exit(&cpr_lock); 9774 segvn_trupdate(); 9775 } 9776 } 9777 9778 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 9779 9780 static void 9781 segvn_trupdate_wakeup(void *dummy) 9782 { 9783 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 9784 9785 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 9786 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 9787 sema_v(&segvn_trasync_sem); 9788 } 9789 9790 if (!segvn_disable_textrepl_update && 9791 segvn_update_textrepl_interval != 0) { 9792 (void) timeout(segvn_trupdate_wakeup, dummy, 9793 segvn_update_textrepl_interval); 9794 } 9795 } 9796 9797 static void 9798 segvn_trupdate(void) 9799 { 9800 ulong_t hash; 9801 svntr_t *svntrp; 9802 segvn_data_t *svd; 9803 9804 ASSERT(svntr_hashtab != NULL); 9805 9806 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 9807 mutex_enter(&svntr_hashtab[hash].tr_lock); 9808 svntrp = svntr_hashtab[hash].tr_head; 9809 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9810 ASSERT(svntrp->tr_refcnt != 0); 9811 svd = svntrp->tr_svnhead; 9812 for (; svd != NULL; svd = svd->svn_trnext) { 9813 segvn_trupdate_seg(svd->seg, svd, svntrp, 9814 hash); 9815 } 9816 } 9817 mutex_exit(&svntr_hashtab[hash].tr_lock); 9818 } 9819 } 9820 9821 static void 9822 segvn_trupdate_seg(struct seg *seg, 9823 segvn_data_t *svd, 9824 svntr_t *svntrp, 9825 ulong_t hash) 9826 { 9827 proc_t *p; 9828 lgrp_id_t lgrp_id; 9829 struct as *as; 9830 size_t size; 9831 struct anon_map *amp; 9832 9833 ASSERT(svd->vp != NULL); 9834 ASSERT(svd->vp == svntrp->tr_vp); 9835 ASSERT(svd->offset == svntrp->tr_off); 9836 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 9837 ASSERT(seg != NULL); 9838 ASSERT(svd->seg == seg); 9839 ASSERT(seg->s_data == (void *)svd); 9840 ASSERT(seg->s_szc == svntrp->tr_szc); 9841 ASSERT(svd->tr_state == SEGVN_TR_ON); 9842 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9843 ASSERT(svd->amp != NULL); 9844 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9845 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 9846 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 9847 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 9848 ASSERT(svntrp->tr_refcnt != 0); 9849 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 9850 9851 as = seg->s_as; 9852 ASSERT(as != NULL && as != &kas); 9853 p = as->a_proc; 9854 ASSERT(p != NULL); 9855 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9856 lgrp_id = p->p_t1_lgrpid; 9857 if (lgrp_id == LGRP_NONE) { 9858 return; 9859 } 9860 ASSERT(lgrp_id < NLGRPS_MAX); 9861 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 9862 return; 9863 } 9864 9865 /* 9866 * Use tryenter locking since we are locking as/seg and svntr hash 9867 * lock in reverse from syncrounous thread order. 9868 */ 9869 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 9870 SEGVN_TR_ADDSTAT(nolock); 9871 if (segvn_lgrp_trthr_migrs_snpsht) { 9872 segvn_lgrp_trthr_migrs_snpsht = 0; 9873 } 9874 return; 9875 } 9876 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 9877 AS_LOCK_EXIT(as, &as->a_lock); 9878 SEGVN_TR_ADDSTAT(nolock); 9879 if (segvn_lgrp_trthr_migrs_snpsht) { 9880 segvn_lgrp_trthr_migrs_snpsht = 0; 9881 } 9882 return; 9883 } 9884 size = seg->s_size; 9885 if (svntrp->tr_amp[lgrp_id] == NULL) { 9886 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9887 if (trmem > segvn_textrepl_max_bytes) { 9888 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9889 AS_LOCK_EXIT(as, &as->a_lock); 9890 atomic_add_long(&segvn_textrepl_bytes, -size); 9891 SEGVN_TR_ADDSTAT(normem); 9892 return; 9893 } 9894 if (anon_try_resv_zone(size, NULL) == 0) { 9895 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9896 AS_LOCK_EXIT(as, &as->a_lock); 9897 atomic_add_long(&segvn_textrepl_bytes, -size); 9898 SEGVN_TR_ADDSTAT(noanon); 9899 return; 9900 } 9901 amp = anonmap_alloc(size, size, KM_NOSLEEP); 9902 if (amp == NULL) { 9903 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9904 AS_LOCK_EXIT(as, &as->a_lock); 9905 atomic_add_long(&segvn_textrepl_bytes, -size); 9906 anon_unresv_zone(size, NULL); 9907 SEGVN_TR_ADDSTAT(nokmem); 9908 return; 9909 } 9910 ASSERT(amp->refcnt == 1); 9911 amp->a_szc = seg->s_szc; 9912 svntrp->tr_amp[lgrp_id] = amp; 9913 } 9914 /* 9915 * We don't need to drop the bucket lock but here we give other 9916 * threads a chance. svntr and svd can't be unlinked as long as 9917 * segment lock is held as a writer and AS held as well. After we 9918 * retake bucket lock we'll continue from where we left. We'll be able 9919 * to reach the end of either list since new entries are always added 9920 * to the beginning of the lists. 9921 */ 9922 mutex_exit(&svntr_hashtab[hash].tr_lock); 9923 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 9924 mutex_enter(&svntr_hashtab[hash].tr_lock); 9925 9926 ASSERT(svd->tr_state == SEGVN_TR_ON); 9927 ASSERT(svd->amp != NULL); 9928 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9929 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 9930 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 9931 9932 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9933 svd->amp = svntrp->tr_amp[lgrp_id]; 9934 p->p_tr_lgrpid = NLGRPS_MAX; 9935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9936 AS_LOCK_EXIT(as, &as->a_lock); 9937 9938 ASSERT(svntrp->tr_refcnt != 0); 9939 ASSERT(svd->vp == svntrp->tr_vp); 9940 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 9941 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 9942 ASSERT(svd->seg == seg); 9943 ASSERT(svd->tr_state == SEGVN_TR_ON); 9944 9945 SEGVN_TR_ADDSTAT(asyncrepl); 9946 }