Print this page
patch SEGOP_SWAPOUT-delete
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * University Copyright- Copyright (c) 1982, 1986, 1988
30 30 * The Regents of the University of California
31 31 * All Rights Reserved
32 32 *
33 33 * University Acknowledgment- Portions of this document are derived from
34 34 * software developed by the University of California, Berkeley, and its
35 35 * contributors.
36 36 */
37 37
38 38 /*
39 39 * VM - shared or copy-on-write from a vnode/anonymous memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/param.h>
44 44 #include <sys/t_lock.h>
45 45 #include <sys/errno.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/mman.h>
48 48 #include <sys/debug.h>
49 49 #include <sys/cred.h>
50 50 #include <sys/vmsystm.h>
51 51 #include <sys/tuneable.h>
52 52 #include <sys/bitmap.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/sysmacros.h>
56 56 #include <sys/vtrace.h>
57 57 #include <sys/cmn_err.h>
58 58 #include <sys/callb.h>
59 59 #include <sys/vm.h>
60 60 #include <sys/dumphdr.h>
61 61 #include <sys/lgrp.h>
62 62
63 63 #include <vm/hat.h>
64 64 #include <vm/as.h>
65 65 #include <vm/seg.h>
66 66 #include <vm/seg_vn.h>
67 67 #include <vm/pvn.h>
68 68 #include <vm/anon.h>
69 69 #include <vm/page.h>
70 70 #include <vm/vpage.h>
71 71 #include <sys/proc.h>
72 72 #include <sys/task.h>
73 73 #include <sys/project.h>
74 74 #include <sys/zone.h>
75 75 #include <sys/shm_impl.h>
76 76 /*
77 77 * Private seg op routines.
78 78 */
79 79 static int segvn_dup(struct seg *seg, struct seg *newseg);
80 80 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
81 81 static void segvn_free(struct seg *seg);
82 82 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
83 83 caddr_t addr, size_t len, enum fault_type type,
84 84 enum seg_rw rw);
85 85 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
86 86 static int segvn_setprot(struct seg *seg, caddr_t addr,
87 87 size_t len, uint_t prot);
88 88 static int segvn_checkprot(struct seg *seg, caddr_t addr,
89 89 size_t len, uint_t prot);
90 90 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
91 -static size_t segvn_swapout(struct seg *seg);
92 91 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
93 92 int attr, uint_t flags);
94 93 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
95 94 char *vec);
96 95 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
97 96 int attr, int op, ulong_t *lockmap, size_t pos);
98 97 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
99 98 uint_t *protv);
100 99 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
101 100 static int segvn_gettype(struct seg *seg, caddr_t addr);
102 101 static int segvn_getvp(struct seg *seg, caddr_t addr,
103 102 struct vnode **vpp);
104 103 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
105 104 uint_t behav);
106 105 static void segvn_dump(struct seg *seg);
107 106 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
108 107 struct page ***ppp, enum lock_type type, enum seg_rw rw);
109 108 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
110 109 uint_t szc);
111 110 static int segvn_getmemid(struct seg *seg, caddr_t addr,
112 111 memid_t *memidp);
113 112 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
114 113 static int segvn_capable(struct seg *seg, segcapability_t capable);
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
115 114
116 115 struct seg_ops segvn_ops = {
117 116 segvn_dup,
118 117 segvn_unmap,
119 118 segvn_free,
120 119 segvn_fault,
121 120 segvn_faulta,
122 121 segvn_setprot,
123 122 segvn_checkprot,
124 123 segvn_kluster,
125 - segvn_swapout,
126 124 segvn_sync,
127 125 segvn_incore,
128 126 segvn_lockop,
129 127 segvn_getprot,
130 128 segvn_getoffset,
131 129 segvn_gettype,
132 130 segvn_getvp,
133 131 segvn_advise,
134 132 segvn_dump,
135 133 segvn_pagelock,
136 134 segvn_setpagesize,
137 135 segvn_getmemid,
138 136 segvn_getpolicy,
139 137 segvn_capable,
140 138 };
141 139
142 140 /*
143 141 * Common zfod structures, provided as a shorthand for others to use.
144 142 */
145 143 static segvn_crargs_t zfod_segvn_crargs =
146 144 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
147 145 static segvn_crargs_t kzfod_segvn_crargs =
148 146 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
149 147 PROT_ALL & ~PROT_USER);
150 148 static segvn_crargs_t stack_noexec_crargs =
151 149 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
152 150
153 151 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
154 152 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
155 153 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
156 154 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
157 155
158 156 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
159 157
160 158 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
161 159
162 160 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
163 161 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
164 162 uint_t segvn_pglock_comb_bshift;
165 163 size_t segvn_pglock_comb_palign;
166 164
167 165 static int segvn_concat(struct seg *, struct seg *, int);
168 166 static int segvn_extend_prev(struct seg *, struct seg *,
169 167 struct segvn_crargs *, size_t);
170 168 static int segvn_extend_next(struct seg *, struct seg *,
171 169 struct segvn_crargs *, size_t);
172 170 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
173 171 static void segvn_pagelist_rele(page_t **);
174 172 static void segvn_setvnode_mpss(vnode_t *);
175 173 static void segvn_relocate_pages(page_t **, page_t *);
176 174 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
177 175 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
178 176 uint_t, page_t **, page_t **, uint_t *, int *);
179 177 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
180 178 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
181 179 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
182 180 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
183 181 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
184 182 u_offset_t, struct vpage *, page_t **, uint_t,
185 183 enum fault_type, enum seg_rw, int);
186 184 static void segvn_vpage(struct seg *);
187 185 static size_t segvn_count_swap_by_vpages(struct seg *);
188 186
189 187 static void segvn_purge(struct seg *seg);
190 188 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
191 189 enum seg_rw, int);
192 190 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
193 191 enum seg_rw, int);
194 192
195 193 static int sameprot(struct seg *, caddr_t, size_t);
196 194
197 195 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
198 196 static int segvn_clrszc(struct seg *);
199 197 static struct seg *segvn_split_seg(struct seg *, caddr_t);
200 198 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
201 199 ulong_t, uint_t);
202 200
203 201 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
204 202 size_t, void *, u_offset_t);
205 203
206 204 static struct kmem_cache *segvn_cache;
207 205 static struct kmem_cache **segvn_szc_cache;
208 206
209 207 #ifdef VM_STATS
210 208 static struct segvnvmstats_str {
211 209 ulong_t fill_vp_pages[31];
212 210 ulong_t fltvnpages[49];
213 211 ulong_t fullszcpages[10];
214 212 ulong_t relocatepages[3];
215 213 ulong_t fltanpages[17];
216 214 ulong_t pagelock[2];
217 215 ulong_t demoterange[3];
218 216 } segvnvmstats;
219 217 #endif /* VM_STATS */
220 218
221 219 #define SDR_RANGE 1 /* demote entire range */
222 220 #define SDR_END 2 /* demote non aligned ends only */
223 221
224 222 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
225 223 if ((len) != 0) { \
226 224 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
227 225 ASSERT(lpgaddr >= (seg)->s_base); \
228 226 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
229 227 (len)), pgsz); \
230 228 ASSERT(lpgeaddr > lpgaddr); \
231 229 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
232 230 } else { \
233 231 lpgeaddr = lpgaddr = (addr); \
234 232 } \
235 233 }
236 234
237 235 /*ARGSUSED*/
238 236 static int
239 237 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
240 238 {
241 239 struct segvn_data *svd = buf;
242 240
243 241 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
244 242 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
245 243 svd->svn_trnext = svd->svn_trprev = NULL;
246 244 return (0);
247 245 }
248 246
249 247 /*ARGSUSED1*/
250 248 static void
251 249 segvn_cache_destructor(void *buf, void *cdrarg)
252 250 {
253 251 struct segvn_data *svd = buf;
254 252
255 253 rw_destroy(&svd->lock);
256 254 mutex_destroy(&svd->segfree_syncmtx);
257 255 }
258 256
259 257 /*ARGSUSED*/
260 258 static int
261 259 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
262 260 {
263 261 bzero(buf, sizeof (svntr_t));
264 262 return (0);
265 263 }
266 264
267 265 /*
268 266 * Patching this variable to non-zero allows the system to run with
269 267 * stacks marked as "not executable". It's a bit of a kludge, but is
270 268 * provided as a tweakable for platforms that export those ABIs
271 269 * (e.g. sparc V8) that have executable stacks enabled by default.
272 270 * There are also some restrictions for platforms that don't actually
273 271 * implement 'noexec' protections.
274 272 *
275 273 * Once enabled, the system is (therefore) unable to provide a fully
276 274 * ABI-compliant execution environment, though practically speaking,
277 275 * most everything works. The exceptions are generally some interpreters
278 276 * and debuggers that create executable code on the stack and jump
279 277 * into it (without explicitly mprotecting the address range to include
280 278 * PROT_EXEC).
281 279 *
282 280 * One important class of applications that are disabled are those
283 281 * that have been transformed into malicious agents using one of the
284 282 * numerous "buffer overflow" attacks. See 4007890.
285 283 */
286 284 int noexec_user_stack = 0;
287 285 int noexec_user_stack_log = 1;
288 286
289 287 int segvn_lpg_disable = 0;
290 288 uint_t segvn_maxpgszc = 0;
291 289
292 290 ulong_t segvn_vmpss_clrszc_cnt;
293 291 ulong_t segvn_vmpss_clrszc_err;
294 292 ulong_t segvn_fltvnpages_clrszc_cnt;
295 293 ulong_t segvn_fltvnpages_clrszc_err;
296 294 ulong_t segvn_setpgsz_align_err;
297 295 ulong_t segvn_setpgsz_anon_align_err;
298 296 ulong_t segvn_setpgsz_getattr_err;
299 297 ulong_t segvn_setpgsz_eof_err;
300 298 ulong_t segvn_faultvnmpss_align_err1;
301 299 ulong_t segvn_faultvnmpss_align_err2;
302 300 ulong_t segvn_faultvnmpss_align_err3;
303 301 ulong_t segvn_faultvnmpss_align_err4;
304 302 ulong_t segvn_faultvnmpss_align_err5;
305 303 ulong_t segvn_vmpss_pageio_deadlk_err;
306 304
307 305 int segvn_use_regions = 1;
308 306
309 307 /*
310 308 * Segvn supports text replication optimization for NUMA platforms. Text
311 309 * replica's are represented by anon maps (amp). There's one amp per text file
312 310 * region per lgroup. A process chooses the amp for each of its text mappings
313 311 * based on the lgroup assignment of its main thread (t_tid = 1). All
314 312 * processes that want a replica on a particular lgroup for the same text file
315 313 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
316 314 * with vp,off,size,szc used as a key. Text replication segments are read only
317 315 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
318 316 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
319 317 * pages. Replication amp is assigned to a segment when it gets its first
320 318 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
321 319 * rechecks periodically if the process still maps an amp local to the main
322 320 * thread. If not async thread forces process to remap to an amp in the new
323 321 * home lgroup of the main thread. Current text replication implementation
324 322 * only provides the benefit to workloads that do most of their work in the
325 323 * main thread of a process or all the threads of a process run in the same
326 324 * lgroup. To extend text replication benefit to different types of
327 325 * multithreaded workloads further work would be needed in the hat layer to
328 326 * allow the same virtual address in the same hat to simultaneously map
329 327 * different physical addresses (i.e. page table replication would be needed
330 328 * for x86).
331 329 *
332 330 * amp pages are used instead of vnode pages as long as segment has a very
333 331 * simple life cycle. It's created via segvn_create(), handles S_EXEC
334 332 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
335 333 * happens such as protection is changed, real COW fault happens, pagesize is
336 334 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
337 335 * text replication by converting the segment back to vnode only segment
338 336 * (unmap segment's address range and set svd->amp to NULL).
339 337 *
340 338 * The original file can be changed after amp is inserted into
341 339 * svntr_hashtab. Processes that are launched after the file is already
342 340 * changed can't use the replica's created prior to the file change. To
343 341 * implement this functionality hash entries are timestamped. Replica's can
344 342 * only be used if current file modification time is the same as the timestamp
345 343 * saved when hash entry was created. However just timestamps alone are not
346 344 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
347 345 * deal with file changes via MAP_SHARED mappings differently. When writable
348 346 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
349 347 * existing replica's for this vnode as not usable for future text
350 348 * mappings. And we don't create new replica's for files that currently have
351 349 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
352 350 * true).
353 351 */
354 352
355 353 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
356 354 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
357 355
358 356 static ulong_t svntr_hashtab_sz = 512;
359 357 static svntr_bucket_t *svntr_hashtab = NULL;
360 358 static struct kmem_cache *svntr_cache;
361 359 static svntr_stats_t *segvn_textrepl_stats;
362 360 static ksema_t segvn_trasync_sem;
363 361
364 362 int segvn_disable_textrepl = 1;
365 363 size_t textrepl_size_thresh = (size_t)-1;
366 364 size_t segvn_textrepl_bytes = 0;
367 365 size_t segvn_textrepl_max_bytes = 0;
368 366 clock_t segvn_update_textrepl_interval = 0;
369 367 int segvn_update_tr_time = 10;
370 368 int segvn_disable_textrepl_update = 0;
371 369
372 370 static void segvn_textrepl(struct seg *);
373 371 static void segvn_textunrepl(struct seg *, int);
374 372 static void segvn_inval_trcache(vnode_t *);
375 373 static void segvn_trasync_thread(void);
376 374 static void segvn_trupdate_wakeup(void *);
377 375 static void segvn_trupdate(void);
378 376 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
379 377 ulong_t);
380 378
381 379 /*
382 380 * Initialize segvn data structures
383 381 */
384 382 void
385 383 segvn_init(void)
386 384 {
387 385 uint_t maxszc;
388 386 uint_t szc;
389 387 size_t pgsz;
390 388
391 389 segvn_cache = kmem_cache_create("segvn_cache",
392 390 sizeof (struct segvn_data), 0,
393 391 segvn_cache_constructor, segvn_cache_destructor, NULL,
394 392 NULL, NULL, 0);
395 393
396 394 if (segvn_lpg_disable == 0) {
397 395 szc = maxszc = page_num_pagesizes() - 1;
398 396 if (szc == 0) {
399 397 segvn_lpg_disable = 1;
400 398 }
401 399 if (page_get_pagesize(0) != PAGESIZE) {
402 400 panic("segvn_init: bad szc 0");
403 401 /*NOTREACHED*/
404 402 }
405 403 while (szc != 0) {
406 404 pgsz = page_get_pagesize(szc);
407 405 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
408 406 panic("segvn_init: bad szc %d", szc);
409 407 /*NOTREACHED*/
410 408 }
411 409 szc--;
412 410 }
413 411 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
414 412 segvn_maxpgszc = maxszc;
415 413 }
416 414
417 415 if (segvn_maxpgszc) {
418 416 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
419 417 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
420 418 KM_SLEEP);
421 419 }
422 420
423 421 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
424 422 char str[32];
425 423
426 424 (void) sprintf(str, "segvn_szc_cache%d", szc);
427 425 segvn_szc_cache[szc] = kmem_cache_create(str,
428 426 page_get_pagecnt(szc) * sizeof (page_t *), 0,
429 427 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
430 428 }
431 429
432 430
433 431 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
434 432 segvn_use_regions = 0;
435 433
436 434 /*
437 435 * For now shared regions and text replication segvn support
438 436 * are mutually exclusive. This is acceptable because
439 437 * currently significant benefit from text replication was
440 438 * only observed on AMD64 NUMA platforms (due to relatively
441 439 * small L2$ size) and currently we don't support shared
442 440 * regions on x86.
443 441 */
444 442 if (segvn_use_regions && !segvn_disable_textrepl) {
445 443 segvn_disable_textrepl = 1;
446 444 }
447 445
448 446 #if defined(_LP64)
449 447 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
450 448 !segvn_disable_textrepl) {
451 449 ulong_t i;
452 450 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
453 451
454 452 svntr_cache = kmem_cache_create("svntr_cache",
455 453 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
456 454 NULL, NULL, NULL, 0);
457 455 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
458 456 for (i = 0; i < svntr_hashtab_sz; i++) {
459 457 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
460 458 MUTEX_DEFAULT, NULL);
461 459 }
462 460 segvn_textrepl_max_bytes = ptob(physmem) /
463 461 segvn_textrepl_max_bytes_factor;
464 462 segvn_textrepl_stats = kmem_zalloc(NCPU *
465 463 sizeof (svntr_stats_t), KM_SLEEP);
466 464 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
467 465 (void) thread_create(NULL, 0, segvn_trasync_thread,
468 466 NULL, 0, &p0, TS_RUN, minclsyspri);
469 467 }
470 468 #endif
471 469
472 470 if (!ISP2(segvn_pglock_comb_balign) ||
473 471 segvn_pglock_comb_balign < PAGESIZE) {
474 472 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
475 473 }
476 474 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
477 475 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
478 476 }
479 477
480 478 #define SEGVN_PAGEIO ((void *)0x1)
481 479 #define SEGVN_NOPAGEIO ((void *)0x2)
482 480
483 481 static void
484 482 segvn_setvnode_mpss(vnode_t *vp)
485 483 {
486 484 int err;
487 485
488 486 ASSERT(vp->v_mpssdata == NULL ||
489 487 vp->v_mpssdata == SEGVN_PAGEIO ||
490 488 vp->v_mpssdata == SEGVN_NOPAGEIO);
491 489
492 490 if (vp->v_mpssdata == NULL) {
493 491 if (vn_vmpss_usepageio(vp)) {
494 492 err = VOP_PAGEIO(vp, (page_t *)NULL,
495 493 (u_offset_t)0, 0, 0, CRED(), NULL);
496 494 } else {
497 495 err = ENOSYS;
498 496 }
499 497 /*
500 498 * set v_mpssdata just once per vnode life
501 499 * so that it never changes.
502 500 */
503 501 mutex_enter(&vp->v_lock);
504 502 if (vp->v_mpssdata == NULL) {
505 503 if (err == EINVAL) {
506 504 vp->v_mpssdata = SEGVN_PAGEIO;
507 505 } else {
508 506 vp->v_mpssdata = SEGVN_NOPAGEIO;
509 507 }
510 508 }
511 509 mutex_exit(&vp->v_lock);
512 510 }
513 511 }
514 512
515 513 int
516 514 segvn_create(struct seg *seg, void *argsp)
517 515 {
518 516 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
519 517 struct segvn_data *svd;
520 518 size_t swresv = 0;
521 519 struct cred *cred;
522 520 struct anon_map *amp;
523 521 int error = 0;
524 522 size_t pgsz;
525 523 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
526 524 int use_rgn = 0;
527 525 int trok = 0;
528 526
529 527 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
530 528
531 529 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
532 530 panic("segvn_create type");
533 531 /*NOTREACHED*/
534 532 }
535 533
536 534 /*
537 535 * Check arguments. If a shared anon structure is given then
538 536 * it is illegal to also specify a vp.
539 537 */
540 538 if (a->amp != NULL && a->vp != NULL) {
541 539 panic("segvn_create anon_map");
542 540 /*NOTREACHED*/
543 541 }
544 542
545 543 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
546 544 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
547 545 segvn_use_regions) {
548 546 use_rgn = 1;
549 547 }
550 548
551 549 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
552 550 if (a->type == MAP_SHARED)
553 551 a->flags &= ~MAP_NORESERVE;
554 552
555 553 if (a->szc != 0) {
556 554 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
557 555 (a->amp != NULL && a->type == MAP_PRIVATE) ||
558 556 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
559 557 a->szc = 0;
560 558 } else {
561 559 if (a->szc > segvn_maxpgszc)
562 560 a->szc = segvn_maxpgszc;
563 561 pgsz = page_get_pagesize(a->szc);
564 562 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
565 563 !IS_P2ALIGNED(seg->s_size, pgsz)) {
566 564 a->szc = 0;
567 565 } else if (a->vp != NULL) {
568 566 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
569 567 /*
570 568 * paranoid check.
571 569 * hat_page_demote() is not supported
572 570 * on swapfs pages.
573 571 */
574 572 a->szc = 0;
575 573 } else if (map_addr_vacalign_check(seg->s_base,
576 574 a->offset & PAGEMASK)) {
577 575 a->szc = 0;
578 576 }
579 577 } else if (a->amp != NULL) {
580 578 pgcnt_t anum = btopr(a->offset);
581 579 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
582 580 if (!IS_P2ALIGNED(anum, pgcnt)) {
583 581 a->szc = 0;
584 582 }
585 583 }
586 584 }
587 585 }
588 586
589 587 /*
590 588 * If segment may need private pages, reserve them now.
591 589 */
592 590 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
593 591 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
594 592 if (anon_resv_zone(seg->s_size,
595 593 seg->s_as->a_proc->p_zone) == 0)
596 594 return (EAGAIN);
597 595 swresv = seg->s_size;
598 596 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
599 597 seg, swresv, 1);
600 598 }
601 599
602 600 /*
603 601 * Reserve any mapping structures that may be required.
604 602 *
605 603 * Don't do it for segments that may use regions. It's currently a
606 604 * noop in the hat implementations anyway.
607 605 */
608 606 if (!use_rgn) {
609 607 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
610 608 }
611 609
612 610 if (a->cred) {
613 611 cred = a->cred;
614 612 crhold(cred);
615 613 } else {
616 614 crhold(cred = CRED());
617 615 }
618 616
619 617 /* Inform the vnode of the new mapping */
620 618 if (a->vp != NULL) {
621 619 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
622 620 seg->s_as, seg->s_base, seg->s_size, a->prot,
623 621 a->maxprot, a->type, cred, NULL);
624 622 if (error) {
625 623 if (swresv != 0) {
626 624 anon_unresv_zone(swresv,
627 625 seg->s_as->a_proc->p_zone);
628 626 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
629 627 "anon proc:%p %lu %u", seg, swresv, 0);
630 628 }
631 629 crfree(cred);
632 630 if (!use_rgn) {
633 631 hat_unload(seg->s_as->a_hat, seg->s_base,
634 632 seg->s_size, HAT_UNLOAD_UNMAP);
635 633 }
636 634 return (error);
637 635 }
638 636 /*
639 637 * svntr_hashtab will be NULL if we support shared regions.
640 638 */
641 639 trok = ((a->flags & MAP_TEXT) &&
642 640 (seg->s_size > textrepl_size_thresh ||
643 641 (a->flags & _MAP_TEXTREPL)) &&
644 642 lgrp_optimizations() && svntr_hashtab != NULL &&
645 643 a->type == MAP_PRIVATE && swresv == 0 &&
646 644 !(a->flags & MAP_NORESERVE) &&
647 645 seg->s_as != &kas && a->vp->v_type == VREG);
648 646
649 647 ASSERT(!trok || !use_rgn);
650 648 }
651 649
652 650 /*
653 651 * MAP_NORESERVE mappings don't count towards the VSZ of a process
654 652 * until we fault the pages in.
655 653 */
656 654 if ((a->vp == NULL || a->vp->v_type != VREG) &&
657 655 a->flags & MAP_NORESERVE) {
658 656 seg->s_as->a_resvsize -= seg->s_size;
659 657 }
660 658
661 659 /*
662 660 * If more than one segment in the address space, and they're adjacent
663 661 * virtually, try to concatenate them. Don't concatenate if an
664 662 * explicit anon_map structure was supplied (e.g., SystemV shared
665 663 * memory) or if we'll use text replication for this segment.
666 664 */
667 665 if (a->amp == NULL && !use_rgn && !trok) {
668 666 struct seg *pseg, *nseg;
669 667 struct segvn_data *psvd, *nsvd;
670 668 lgrp_mem_policy_t ppolicy, npolicy;
671 669 uint_t lgrp_mem_policy_flags = 0;
672 670 extern lgrp_mem_policy_t lgrp_mem_default_policy;
673 671
674 672 /*
675 673 * Memory policy flags (lgrp_mem_policy_flags) is valid when
676 674 * extending stack/heap segments.
677 675 */
678 676 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
679 677 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
680 678 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
681 679 } else {
682 680 /*
683 681 * Get policy when not extending it from another segment
684 682 */
685 683 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
686 684 }
687 685
688 686 /*
689 687 * First, try to concatenate the previous and new segments
690 688 */
691 689 pseg = AS_SEGPREV(seg->s_as, seg);
692 690 if (pseg != NULL &&
693 691 pseg->s_base + pseg->s_size == seg->s_base &&
694 692 pseg->s_ops == &segvn_ops) {
695 693 /*
696 694 * Get memory allocation policy from previous segment.
697 695 * When extension is specified (e.g. for heap) apply
698 696 * this policy to the new segment regardless of the
699 697 * outcome of segment concatenation. Extension occurs
700 698 * for non-default policy otherwise default policy is
701 699 * used and is based on extended segment size.
702 700 */
703 701 psvd = (struct segvn_data *)pseg->s_data;
704 702 ppolicy = psvd->policy_info.mem_policy;
705 703 if (lgrp_mem_policy_flags ==
706 704 LGRP_MP_FLAG_EXTEND_UP) {
707 705 if (ppolicy != lgrp_mem_default_policy) {
708 706 mpolicy = ppolicy;
709 707 } else {
710 708 mpolicy = lgrp_mem_policy_default(
711 709 pseg->s_size + seg->s_size,
712 710 a->type);
713 711 }
714 712 }
715 713
716 714 if (mpolicy == ppolicy &&
717 715 (pseg->s_size + seg->s_size <=
718 716 segvn_comb_thrshld || psvd->amp == NULL) &&
719 717 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
720 718 /*
721 719 * success! now try to concatenate
722 720 * with following seg
723 721 */
724 722 crfree(cred);
725 723 nseg = AS_SEGNEXT(pseg->s_as, pseg);
726 724 if (nseg != NULL &&
727 725 nseg != pseg &&
728 726 nseg->s_ops == &segvn_ops &&
729 727 pseg->s_base + pseg->s_size ==
730 728 nseg->s_base)
731 729 (void) segvn_concat(pseg, nseg, 0);
732 730 ASSERT(pseg->s_szc == 0 ||
733 731 (a->szc == pseg->s_szc &&
734 732 IS_P2ALIGNED(pseg->s_base, pgsz) &&
735 733 IS_P2ALIGNED(pseg->s_size, pgsz)));
736 734 return (0);
737 735 }
738 736 }
739 737
740 738 /*
741 739 * Failed, so try to concatenate with following seg
742 740 */
743 741 nseg = AS_SEGNEXT(seg->s_as, seg);
744 742 if (nseg != NULL &&
745 743 seg->s_base + seg->s_size == nseg->s_base &&
746 744 nseg->s_ops == &segvn_ops) {
747 745 /*
748 746 * Get memory allocation policy from next segment.
749 747 * When extension is specified (e.g. for stack) apply
750 748 * this policy to the new segment regardless of the
751 749 * outcome of segment concatenation. Extension occurs
752 750 * for non-default policy otherwise default policy is
753 751 * used and is based on extended segment size.
754 752 */
755 753 nsvd = (struct segvn_data *)nseg->s_data;
756 754 npolicy = nsvd->policy_info.mem_policy;
757 755 if (lgrp_mem_policy_flags ==
758 756 LGRP_MP_FLAG_EXTEND_DOWN) {
759 757 if (npolicy != lgrp_mem_default_policy) {
760 758 mpolicy = npolicy;
761 759 } else {
762 760 mpolicy = lgrp_mem_policy_default(
763 761 nseg->s_size + seg->s_size,
764 762 a->type);
765 763 }
766 764 }
767 765
768 766 if (mpolicy == npolicy &&
769 767 segvn_extend_next(seg, nseg, a, swresv) == 0) {
770 768 crfree(cred);
771 769 ASSERT(nseg->s_szc == 0 ||
772 770 (a->szc == nseg->s_szc &&
773 771 IS_P2ALIGNED(nseg->s_base, pgsz) &&
774 772 IS_P2ALIGNED(nseg->s_size, pgsz)));
775 773 return (0);
776 774 }
777 775 }
778 776 }
779 777
780 778 if (a->vp != NULL) {
781 779 VN_HOLD(a->vp);
782 780 if (a->type == MAP_SHARED)
783 781 lgrp_shm_policy_init(NULL, a->vp);
784 782 }
785 783 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
786 784
787 785 seg->s_ops = &segvn_ops;
788 786 seg->s_data = (void *)svd;
789 787 seg->s_szc = a->szc;
790 788
791 789 svd->seg = seg;
792 790 svd->vp = a->vp;
793 791 /*
794 792 * Anonymous mappings have no backing file so the offset is meaningless.
795 793 */
796 794 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
797 795 svd->prot = a->prot;
798 796 svd->maxprot = a->maxprot;
799 797 svd->pageprot = 0;
800 798 svd->type = a->type;
801 799 svd->vpage = NULL;
802 800 svd->cred = cred;
803 801 svd->advice = MADV_NORMAL;
804 802 svd->pageadvice = 0;
805 803 svd->flags = (ushort_t)a->flags;
806 804 svd->softlockcnt = 0;
807 805 svd->softlockcnt_sbase = 0;
808 806 svd->softlockcnt_send = 0;
809 807 svd->rcookie = HAT_INVALID_REGION_COOKIE;
810 808 svd->pageswap = 0;
811 809
812 810 if (a->szc != 0 && a->vp != NULL) {
813 811 segvn_setvnode_mpss(a->vp);
814 812 }
815 813 if (svd->type == MAP_SHARED && svd->vp != NULL &&
816 814 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
817 815 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
818 816 segvn_inval_trcache(svd->vp);
819 817 }
820 818
821 819 amp = a->amp;
822 820 if ((svd->amp = amp) == NULL) {
823 821 svd->anon_index = 0;
824 822 if (svd->type == MAP_SHARED) {
825 823 svd->swresv = 0;
826 824 /*
827 825 * Shared mappings to a vp need no other setup.
828 826 * If we have a shared mapping to an anon_map object
829 827 * which hasn't been allocated yet, allocate the
830 828 * struct now so that it will be properly shared
831 829 * by remembering the swap reservation there.
832 830 */
833 831 if (a->vp == NULL) {
834 832 svd->amp = anonmap_alloc(seg->s_size, swresv,
835 833 ANON_SLEEP);
836 834 svd->amp->a_szc = seg->s_szc;
837 835 }
838 836 } else {
839 837 /*
840 838 * Private mapping (with or without a vp).
841 839 * Allocate anon_map when needed.
842 840 */
843 841 svd->swresv = swresv;
844 842 }
845 843 } else {
846 844 pgcnt_t anon_num;
847 845
848 846 /*
849 847 * Mapping to an existing anon_map structure without a vp.
850 848 * For now we will insure that the segment size isn't larger
851 849 * than the size - offset gives us. Later on we may wish to
852 850 * have the anon array dynamically allocated itself so that
853 851 * we don't always have to allocate all the anon pointer slots.
854 852 * This of course involves adding extra code to check that we
855 853 * aren't trying to use an anon pointer slot beyond the end
856 854 * of the currently allocated anon array.
857 855 */
858 856 if ((amp->size - a->offset) < seg->s_size) {
859 857 panic("segvn_create anon_map size");
860 858 /*NOTREACHED*/
861 859 }
862 860
863 861 anon_num = btopr(a->offset);
864 862
865 863 if (a->type == MAP_SHARED) {
866 864 /*
867 865 * SHARED mapping to a given anon_map.
868 866 */
869 867 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
870 868 amp->refcnt++;
871 869 if (a->szc > amp->a_szc) {
872 870 amp->a_szc = a->szc;
873 871 }
874 872 ANON_LOCK_EXIT(&->a_rwlock);
875 873 svd->anon_index = anon_num;
876 874 svd->swresv = 0;
877 875 } else {
878 876 /*
879 877 * PRIVATE mapping to a given anon_map.
880 878 * Make sure that all the needed anon
881 879 * structures are created (so that we will
882 880 * share the underlying pages if nothing
883 881 * is written by this mapping) and then
884 882 * duplicate the anon array as is done
885 883 * when a privately mapped segment is dup'ed.
886 884 */
887 885 struct anon *ap;
888 886 caddr_t addr;
889 887 caddr_t eaddr;
890 888 ulong_t anon_idx;
891 889 int hat_flag = HAT_LOAD;
892 890
893 891 if (svd->flags & MAP_TEXT) {
894 892 hat_flag |= HAT_LOAD_TEXT;
895 893 }
896 894
897 895 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
898 896 svd->amp->a_szc = seg->s_szc;
899 897 svd->anon_index = 0;
900 898 svd->swresv = swresv;
901 899
902 900 /*
903 901 * Prevent 2 threads from allocating anon
904 902 * slots simultaneously.
905 903 */
906 904 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
907 905 eaddr = seg->s_base + seg->s_size;
908 906
909 907 for (anon_idx = anon_num, addr = seg->s_base;
910 908 addr < eaddr; addr += PAGESIZE, anon_idx++) {
911 909 page_t *pp;
912 910
913 911 if ((ap = anon_get_ptr(amp->ahp,
914 912 anon_idx)) != NULL)
915 913 continue;
916 914
917 915 /*
918 916 * Allocate the anon struct now.
919 917 * Might as well load up translation
920 918 * to the page while we're at it...
921 919 */
922 920 pp = anon_zero(seg, addr, &ap, cred);
923 921 if (ap == NULL || pp == NULL) {
924 922 panic("segvn_create anon_zero");
925 923 /*NOTREACHED*/
926 924 }
927 925
928 926 /*
929 927 * Re-acquire the anon_map lock and
930 928 * initialize the anon array entry.
931 929 */
932 930 ASSERT(anon_get_ptr(amp->ahp,
933 931 anon_idx) == NULL);
934 932 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
935 933 ANON_SLEEP);
936 934
937 935 ASSERT(seg->s_szc == 0);
938 936 ASSERT(!IS_VMODSORT(pp->p_vnode));
939 937
940 938 ASSERT(use_rgn == 0);
941 939 hat_memload(seg->s_as->a_hat, addr, pp,
942 940 svd->prot & ~PROT_WRITE, hat_flag);
943 941
944 942 page_unlock(pp);
945 943 }
946 944 ASSERT(seg->s_szc == 0);
947 945 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
948 946 0, seg->s_size);
949 947 ANON_LOCK_EXIT(&->a_rwlock);
950 948 }
951 949 }
952 950
953 951 /*
954 952 * Set default memory allocation policy for segment
955 953 *
956 954 * Always set policy for private memory at least for initialization
957 955 * even if this is a shared memory segment
958 956 */
959 957 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
960 958
961 959 if (svd->type == MAP_SHARED)
962 960 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
963 961 svd->vp, svd->offset, seg->s_size);
964 962
965 963 if (use_rgn) {
966 964 ASSERT(!trok);
967 965 ASSERT(svd->amp == NULL);
968 966 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
969 967 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
970 968 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
971 969 HAT_REGION_TEXT);
972 970 }
973 971
974 972 ASSERT(!trok || !(svd->prot & PROT_WRITE));
975 973 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
976 974
977 975 return (0);
978 976 }
979 977
980 978 /*
981 979 * Concatenate two existing segments, if possible.
982 980 * Return 0 on success, -1 if two segments are not compatible
983 981 * or -2 on memory allocation failure.
984 982 * If amp_cat == 1 then try and concat segments with anon maps
985 983 */
986 984 static int
987 985 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
988 986 {
989 987 struct segvn_data *svd1 = seg1->s_data;
990 988 struct segvn_data *svd2 = seg2->s_data;
991 989 struct anon_map *amp1 = svd1->amp;
992 990 struct anon_map *amp2 = svd2->amp;
993 991 struct vpage *vpage1 = svd1->vpage;
994 992 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
995 993 size_t size, nvpsize;
996 994 pgcnt_t npages1, npages2;
997 995
998 996 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
999 997 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1000 998 ASSERT(seg1->s_ops == seg2->s_ops);
1001 999
1002 1000 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1003 1001 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1004 1002 return (-1);
1005 1003 }
1006 1004
1007 1005 /* both segments exist, try to merge them */
1008 1006 #define incompat(x) (svd1->x != svd2->x)
1009 1007 if (incompat(vp) || incompat(maxprot) ||
1010 1008 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1011 1009 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1012 1010 incompat(type) || incompat(cred) || incompat(flags) ||
1013 1011 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1014 1012 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1015 1013 return (-1);
1016 1014 #undef incompat
1017 1015
1018 1016 /*
1019 1017 * vp == NULL implies zfod, offset doesn't matter
1020 1018 */
1021 1019 if (svd1->vp != NULL &&
1022 1020 svd1->offset + seg1->s_size != svd2->offset) {
1023 1021 return (-1);
1024 1022 }
1025 1023
1026 1024 /*
1027 1025 * Don't concatenate if either segment uses text replication.
1028 1026 */
1029 1027 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1030 1028 return (-1);
1031 1029 }
1032 1030
1033 1031 /*
1034 1032 * Fail early if we're not supposed to concatenate
1035 1033 * segments with non NULL amp.
1036 1034 */
1037 1035 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1038 1036 return (-1);
1039 1037 }
1040 1038
1041 1039 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1042 1040 if (amp1 != amp2) {
1043 1041 return (-1);
1044 1042 }
1045 1043 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1046 1044 svd2->anon_index) {
1047 1045 return (-1);
1048 1046 }
1049 1047 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1050 1048 }
1051 1049
1052 1050 /*
1053 1051 * If either seg has vpages, create a new merged vpage array.
1054 1052 */
1055 1053 if (vpage1 != NULL || vpage2 != NULL) {
1056 1054 struct vpage *vp, *evp;
1057 1055
1058 1056 npages1 = seg_pages(seg1);
1059 1057 npages2 = seg_pages(seg2);
1060 1058 nvpsize = vpgtob(npages1 + npages2);
1061 1059
1062 1060 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1063 1061 return (-2);
1064 1062 }
1065 1063
1066 1064 if (vpage1 != NULL) {
1067 1065 bcopy(vpage1, nvpage, vpgtob(npages1));
1068 1066 } else {
1069 1067 evp = nvpage + npages1;
1070 1068 for (vp = nvpage; vp < evp; vp++) {
1071 1069 VPP_SETPROT(vp, svd1->prot);
1072 1070 VPP_SETADVICE(vp, svd1->advice);
1073 1071 }
1074 1072 }
1075 1073
1076 1074 if (vpage2 != NULL) {
1077 1075 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1078 1076 } else {
1079 1077 evp = nvpage + npages1 + npages2;
1080 1078 for (vp = nvpage + npages1; vp < evp; vp++) {
1081 1079 VPP_SETPROT(vp, svd2->prot);
1082 1080 VPP_SETADVICE(vp, svd2->advice);
1083 1081 }
1084 1082 }
1085 1083
1086 1084 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1087 1085 ASSERT(svd1->swresv == seg1->s_size);
1088 1086 ASSERT(!(svd1->flags & MAP_NORESERVE));
1089 1087 ASSERT(!(svd2->flags & MAP_NORESERVE));
1090 1088 evp = nvpage + npages1;
1091 1089 for (vp = nvpage; vp < evp; vp++) {
1092 1090 VPP_SETSWAPRES(vp);
1093 1091 }
1094 1092 }
1095 1093
1096 1094 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1097 1095 ASSERT(svd2->swresv == seg2->s_size);
1098 1096 ASSERT(!(svd1->flags & MAP_NORESERVE));
1099 1097 ASSERT(!(svd2->flags & MAP_NORESERVE));
1100 1098 vp = nvpage + npages1;
1101 1099 evp = vp + npages2;
1102 1100 for (; vp < evp; vp++) {
1103 1101 VPP_SETSWAPRES(vp);
1104 1102 }
1105 1103 }
1106 1104 }
1107 1105 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1108 1106 (svd1->pageswap == 0 && svd2->pageswap == 0));
1109 1107
1110 1108 /*
1111 1109 * If either segment has private pages, create a new merged anon
1112 1110 * array. If mergeing shared anon segments just decrement anon map's
1113 1111 * refcnt.
1114 1112 */
1115 1113 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1116 1114 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1117 1115 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1118 1116 ASSERT(amp1->refcnt >= 2);
1119 1117 amp1->refcnt--;
1120 1118 ANON_LOCK_EXIT(&1->a_rwlock);
1121 1119 svd2->amp = NULL;
1122 1120 } else if (amp1 != NULL || amp2 != NULL) {
1123 1121 struct anon_hdr *nahp;
1124 1122 struct anon_map *namp = NULL;
1125 1123 size_t asize;
1126 1124
1127 1125 ASSERT(svd1->type == MAP_PRIVATE);
1128 1126
1129 1127 asize = seg1->s_size + seg2->s_size;
1130 1128 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1131 1129 if (nvpage != NULL) {
1132 1130 kmem_free(nvpage, nvpsize);
1133 1131 }
1134 1132 return (-2);
1135 1133 }
1136 1134 if (amp1 != NULL) {
1137 1135 /*
1138 1136 * XXX anon rwlock is not really needed because
1139 1137 * this is a private segment and we are writers.
1140 1138 */
1141 1139 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1142 1140 ASSERT(amp1->refcnt == 1);
1143 1141 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1144 1142 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1145 1143 anon_release(nahp, btop(asize));
1146 1144 ANON_LOCK_EXIT(&1->a_rwlock);
1147 1145 if (nvpage != NULL) {
1148 1146 kmem_free(nvpage, nvpsize);
1149 1147 }
1150 1148 return (-2);
1151 1149 }
1152 1150 }
1153 1151 if (amp2 != NULL) {
1154 1152 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1155 1153 ASSERT(amp2->refcnt == 1);
1156 1154 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1157 1155 nahp, btop(seg1->s_size), btop(seg2->s_size),
1158 1156 ANON_NOSLEEP)) {
1159 1157 anon_release(nahp, btop(asize));
1160 1158 ANON_LOCK_EXIT(&2->a_rwlock);
1161 1159 if (amp1 != NULL) {
1162 1160 ANON_LOCK_EXIT(&1->a_rwlock);
1163 1161 }
1164 1162 if (nvpage != NULL) {
1165 1163 kmem_free(nvpage, nvpsize);
1166 1164 }
1167 1165 return (-2);
1168 1166 }
1169 1167 }
1170 1168 if (amp1 != NULL) {
1171 1169 namp = amp1;
1172 1170 anon_release(amp1->ahp, btop(amp1->size));
1173 1171 }
1174 1172 if (amp2 != NULL) {
1175 1173 if (namp == NULL) {
1176 1174 ASSERT(amp1 == NULL);
1177 1175 namp = amp2;
1178 1176 anon_release(amp2->ahp, btop(amp2->size));
1179 1177 } else {
1180 1178 amp2->refcnt--;
1181 1179 ANON_LOCK_EXIT(&2->a_rwlock);
1182 1180 anonmap_free(amp2);
1183 1181 }
1184 1182 svd2->amp = NULL; /* needed for seg_free */
1185 1183 }
1186 1184 namp->ahp = nahp;
1187 1185 namp->size = asize;
1188 1186 svd1->amp = namp;
1189 1187 svd1->anon_index = 0;
1190 1188 ANON_LOCK_EXIT(&namp->a_rwlock);
1191 1189 }
1192 1190 /*
1193 1191 * Now free the old vpage structures.
1194 1192 */
1195 1193 if (nvpage != NULL) {
1196 1194 if (vpage1 != NULL) {
1197 1195 kmem_free(vpage1, vpgtob(npages1));
1198 1196 }
1199 1197 if (vpage2 != NULL) {
1200 1198 svd2->vpage = NULL;
1201 1199 kmem_free(vpage2, vpgtob(npages2));
1202 1200 }
1203 1201 if (svd2->pageprot) {
1204 1202 svd1->pageprot = 1;
1205 1203 }
1206 1204 if (svd2->pageadvice) {
1207 1205 svd1->pageadvice = 1;
1208 1206 }
1209 1207 if (svd2->pageswap) {
1210 1208 svd1->pageswap = 1;
1211 1209 }
1212 1210 svd1->vpage = nvpage;
1213 1211 }
1214 1212
1215 1213 /* all looks ok, merge segments */
1216 1214 svd1->swresv += svd2->swresv;
1217 1215 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1218 1216 size = seg2->s_size;
1219 1217 seg_free(seg2);
1220 1218 seg1->s_size += size;
1221 1219 return (0);
1222 1220 }
1223 1221
1224 1222 /*
1225 1223 * Extend the previous segment (seg1) to include the
1226 1224 * new segment (seg2 + a), if possible.
1227 1225 * Return 0 on success.
1228 1226 */
1229 1227 static int
1230 1228 segvn_extend_prev(seg1, seg2, a, swresv)
1231 1229 struct seg *seg1, *seg2;
1232 1230 struct segvn_crargs *a;
1233 1231 size_t swresv;
1234 1232 {
1235 1233 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1236 1234 size_t size;
1237 1235 struct anon_map *amp1;
1238 1236 struct vpage *new_vpage;
1239 1237
1240 1238 /*
1241 1239 * We don't need any segment level locks for "segvn" data
1242 1240 * since the address space is "write" locked.
1243 1241 */
1244 1242 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1245 1243
1246 1244 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1247 1245 return (-1);
1248 1246 }
1249 1247
1250 1248 /* second segment is new, try to extend first */
1251 1249 /* XXX - should also check cred */
1252 1250 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1253 1251 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1254 1252 svd1->type != a->type || svd1->flags != a->flags ||
1255 1253 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1256 1254 return (-1);
1257 1255
1258 1256 /* vp == NULL implies zfod, offset doesn't matter */
1259 1257 if (svd1->vp != NULL &&
1260 1258 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1261 1259 return (-1);
1262 1260
1263 1261 if (svd1->tr_state != SEGVN_TR_OFF) {
1264 1262 return (-1);
1265 1263 }
1266 1264
1267 1265 amp1 = svd1->amp;
1268 1266 if (amp1) {
1269 1267 pgcnt_t newpgs;
1270 1268
1271 1269 /*
1272 1270 * Segment has private pages, can data structures
1273 1271 * be expanded?
1274 1272 *
1275 1273 * Acquire the anon_map lock to prevent it from changing,
1276 1274 * if it is shared. This ensures that the anon_map
1277 1275 * will not change while a thread which has a read/write
1278 1276 * lock on an address space references it.
1279 1277 * XXX - Don't need the anon_map lock at all if "refcnt"
1280 1278 * is 1.
1281 1279 *
1282 1280 * Can't grow a MAP_SHARED segment with an anonmap because
1283 1281 * there may be existing anon slots where we want to extend
1284 1282 * the segment and we wouldn't know what to do with them
1285 1283 * (e.g., for tmpfs right thing is to just leave them there,
1286 1284 * for /dev/zero they should be cleared out).
1287 1285 */
1288 1286 if (svd1->type == MAP_SHARED)
1289 1287 return (-1);
1290 1288
1291 1289 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1292 1290 if (amp1->refcnt > 1) {
1293 1291 ANON_LOCK_EXIT(&1->a_rwlock);
1294 1292 return (-1);
1295 1293 }
1296 1294 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1297 1295 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1298 1296
1299 1297 if (newpgs == 0) {
1300 1298 ANON_LOCK_EXIT(&1->a_rwlock);
1301 1299 return (-1);
1302 1300 }
1303 1301 amp1->size = ptob(newpgs);
1304 1302 ANON_LOCK_EXIT(&1->a_rwlock);
1305 1303 }
1306 1304 if (svd1->vpage != NULL) {
1307 1305 struct vpage *vp, *evp;
1308 1306 new_vpage =
1309 1307 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1310 1308 KM_NOSLEEP);
1311 1309 if (new_vpage == NULL)
1312 1310 return (-1);
1313 1311 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1314 1312 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1315 1313 svd1->vpage = new_vpage;
1316 1314
1317 1315 vp = new_vpage + seg_pages(seg1);
1318 1316 evp = vp + seg_pages(seg2);
1319 1317 for (; vp < evp; vp++)
1320 1318 VPP_SETPROT(vp, a->prot);
1321 1319 if (svd1->pageswap && swresv) {
1322 1320 ASSERT(!(svd1->flags & MAP_NORESERVE));
1323 1321 ASSERT(swresv == seg2->s_size);
1324 1322 vp = new_vpage + seg_pages(seg1);
1325 1323 for (; vp < evp; vp++) {
1326 1324 VPP_SETSWAPRES(vp);
1327 1325 }
1328 1326 }
1329 1327 }
1330 1328 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1331 1329 size = seg2->s_size;
1332 1330 seg_free(seg2);
1333 1331 seg1->s_size += size;
1334 1332 svd1->swresv += swresv;
1335 1333 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1336 1334 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1337 1335 (svd1->vp->v_flag & VVMEXEC)) {
1338 1336 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1339 1337 segvn_inval_trcache(svd1->vp);
1340 1338 }
1341 1339 return (0);
1342 1340 }
1343 1341
1344 1342 /*
1345 1343 * Extend the next segment (seg2) to include the
1346 1344 * new segment (seg1 + a), if possible.
1347 1345 * Return 0 on success.
1348 1346 */
1349 1347 static int
1350 1348 segvn_extend_next(
1351 1349 struct seg *seg1,
1352 1350 struct seg *seg2,
1353 1351 struct segvn_crargs *a,
1354 1352 size_t swresv)
1355 1353 {
1356 1354 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1357 1355 size_t size;
1358 1356 struct anon_map *amp2;
1359 1357 struct vpage *new_vpage;
1360 1358
1361 1359 /*
1362 1360 * We don't need any segment level locks for "segvn" data
1363 1361 * since the address space is "write" locked.
1364 1362 */
1365 1363 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1366 1364
1367 1365 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1368 1366 return (-1);
1369 1367 }
1370 1368
1371 1369 /* first segment is new, try to extend second */
1372 1370 /* XXX - should also check cred */
1373 1371 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1374 1372 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1375 1373 svd2->type != a->type || svd2->flags != a->flags ||
1376 1374 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1377 1375 return (-1);
1378 1376 /* vp == NULL implies zfod, offset doesn't matter */
1379 1377 if (svd2->vp != NULL &&
1380 1378 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1381 1379 return (-1);
1382 1380
1383 1381 if (svd2->tr_state != SEGVN_TR_OFF) {
1384 1382 return (-1);
1385 1383 }
1386 1384
1387 1385 amp2 = svd2->amp;
1388 1386 if (amp2) {
1389 1387 pgcnt_t newpgs;
1390 1388
1391 1389 /*
1392 1390 * Segment has private pages, can data structures
1393 1391 * be expanded?
1394 1392 *
1395 1393 * Acquire the anon_map lock to prevent it from changing,
1396 1394 * if it is shared. This ensures that the anon_map
1397 1395 * will not change while a thread which has a read/write
1398 1396 * lock on an address space references it.
1399 1397 *
1400 1398 * XXX - Don't need the anon_map lock at all if "refcnt"
1401 1399 * is 1.
1402 1400 */
1403 1401 if (svd2->type == MAP_SHARED)
1404 1402 return (-1);
1405 1403
1406 1404 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1407 1405 if (amp2->refcnt > 1) {
1408 1406 ANON_LOCK_EXIT(&2->a_rwlock);
1409 1407 return (-1);
1410 1408 }
1411 1409 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1412 1410 btop(seg2->s_size), btop(seg1->s_size),
1413 1411 ANON_NOSLEEP | ANON_GROWDOWN);
1414 1412
1415 1413 if (newpgs == 0) {
1416 1414 ANON_LOCK_EXIT(&2->a_rwlock);
1417 1415 return (-1);
1418 1416 }
1419 1417 amp2->size = ptob(newpgs);
1420 1418 ANON_LOCK_EXIT(&2->a_rwlock);
1421 1419 }
1422 1420 if (svd2->vpage != NULL) {
1423 1421 struct vpage *vp, *evp;
1424 1422 new_vpage =
1425 1423 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1426 1424 KM_NOSLEEP);
1427 1425 if (new_vpage == NULL) {
1428 1426 /* Not merging segments so adjust anon_index back */
1429 1427 if (amp2)
1430 1428 svd2->anon_index += seg_pages(seg1);
1431 1429 return (-1);
1432 1430 }
1433 1431 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1434 1432 vpgtob(seg_pages(seg2)));
1435 1433 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1436 1434 svd2->vpage = new_vpage;
1437 1435
1438 1436 vp = new_vpage;
1439 1437 evp = vp + seg_pages(seg1);
1440 1438 for (; vp < evp; vp++)
1441 1439 VPP_SETPROT(vp, a->prot);
1442 1440 if (svd2->pageswap && swresv) {
1443 1441 ASSERT(!(svd2->flags & MAP_NORESERVE));
1444 1442 ASSERT(swresv == seg1->s_size);
1445 1443 vp = new_vpage;
1446 1444 for (; vp < evp; vp++) {
1447 1445 VPP_SETSWAPRES(vp);
1448 1446 }
1449 1447 }
1450 1448 }
1451 1449 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1452 1450 size = seg1->s_size;
1453 1451 seg_free(seg1);
1454 1452 seg2->s_size += size;
1455 1453 seg2->s_base -= size;
1456 1454 svd2->offset -= size;
1457 1455 svd2->swresv += swresv;
1458 1456 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1459 1457 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1460 1458 (svd2->vp->v_flag & VVMEXEC)) {
1461 1459 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1462 1460 segvn_inval_trcache(svd2->vp);
1463 1461 }
1464 1462 return (0);
1465 1463 }
1466 1464
1467 1465 static int
1468 1466 segvn_dup(struct seg *seg, struct seg *newseg)
1469 1467 {
1470 1468 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1471 1469 struct segvn_data *newsvd;
1472 1470 pgcnt_t npages = seg_pages(seg);
1473 1471 int error = 0;
1474 1472 uint_t prot;
1475 1473 size_t len;
1476 1474 struct anon_map *amp;
1477 1475
1478 1476 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1479 1477 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1480 1478
1481 1479 /*
1482 1480 * If segment has anon reserved, reserve more for the new seg.
1483 1481 * For a MAP_NORESERVE segment swresv will be a count of all the
1484 1482 * allocated anon slots; thus we reserve for the child as many slots
1485 1483 * as the parent has allocated. This semantic prevents the child or
1486 1484 * parent from dieing during a copy-on-write fault caused by trying
1487 1485 * to write a shared pre-existing anon page.
1488 1486 */
1489 1487 if ((len = svd->swresv) != 0) {
1490 1488 if (anon_resv(svd->swresv) == 0)
1491 1489 return (ENOMEM);
1492 1490
1493 1491 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1494 1492 seg, len, 0);
1495 1493 }
1496 1494
1497 1495 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1498 1496
1499 1497 newseg->s_ops = &segvn_ops;
1500 1498 newseg->s_data = (void *)newsvd;
1501 1499 newseg->s_szc = seg->s_szc;
1502 1500
1503 1501 newsvd->seg = newseg;
1504 1502 if ((newsvd->vp = svd->vp) != NULL) {
1505 1503 VN_HOLD(svd->vp);
1506 1504 if (svd->type == MAP_SHARED)
1507 1505 lgrp_shm_policy_init(NULL, svd->vp);
1508 1506 }
1509 1507 newsvd->offset = svd->offset;
1510 1508 newsvd->prot = svd->prot;
1511 1509 newsvd->maxprot = svd->maxprot;
1512 1510 newsvd->pageprot = svd->pageprot;
1513 1511 newsvd->type = svd->type;
1514 1512 newsvd->cred = svd->cred;
1515 1513 crhold(newsvd->cred);
1516 1514 newsvd->advice = svd->advice;
1517 1515 newsvd->pageadvice = svd->pageadvice;
1518 1516 newsvd->swresv = svd->swresv;
1519 1517 newsvd->pageswap = svd->pageswap;
1520 1518 newsvd->flags = svd->flags;
1521 1519 newsvd->softlockcnt = 0;
1522 1520 newsvd->softlockcnt_sbase = 0;
1523 1521 newsvd->softlockcnt_send = 0;
1524 1522 newsvd->policy_info = svd->policy_info;
1525 1523 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1526 1524
1527 1525 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1528 1526 /*
1529 1527 * Not attaching to a shared anon object.
1530 1528 */
1531 1529 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1532 1530 svd->tr_state == SEGVN_TR_OFF);
1533 1531 if (svd->tr_state == SEGVN_TR_ON) {
1534 1532 ASSERT(newsvd->vp != NULL && amp != NULL);
1535 1533 newsvd->tr_state = SEGVN_TR_INIT;
1536 1534 } else {
1537 1535 newsvd->tr_state = svd->tr_state;
1538 1536 }
1539 1537 newsvd->amp = NULL;
1540 1538 newsvd->anon_index = 0;
1541 1539 } else {
1542 1540 /* regions for now are only used on pure vnode segments */
1543 1541 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1544 1542 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1545 1543 newsvd->tr_state = SEGVN_TR_OFF;
1546 1544 if (svd->type == MAP_SHARED) {
1547 1545 newsvd->amp = amp;
1548 1546 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1549 1547 amp->refcnt++;
1550 1548 ANON_LOCK_EXIT(&->a_rwlock);
1551 1549 newsvd->anon_index = svd->anon_index;
1552 1550 } else {
1553 1551 int reclaim = 1;
1554 1552
1555 1553 /*
1556 1554 * Allocate and initialize new anon_map structure.
1557 1555 */
1558 1556 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1559 1557 ANON_SLEEP);
1560 1558 newsvd->amp->a_szc = newseg->s_szc;
1561 1559 newsvd->anon_index = 0;
1562 1560
1563 1561 /*
1564 1562 * We don't have to acquire the anon_map lock
1565 1563 * for the new segment (since it belongs to an
1566 1564 * address space that is still not associated
1567 1565 * with any process), or the segment in the old
1568 1566 * address space (since all threads in it
1569 1567 * are stopped while duplicating the address space).
1570 1568 */
1571 1569
1572 1570 /*
1573 1571 * The goal of the following code is to make sure that
1574 1572 * softlocked pages do not end up as copy on write
1575 1573 * pages. This would cause problems where one
1576 1574 * thread writes to a page that is COW and a different
1577 1575 * thread in the same process has softlocked it. The
1578 1576 * softlock lock would move away from this process
1579 1577 * because the write would cause this process to get
1580 1578 * a copy (without the softlock).
1581 1579 *
1582 1580 * The strategy here is to just break the
1583 1581 * sharing on pages that could possibly be
1584 1582 * softlocked.
1585 1583 */
1586 1584 retry:
1587 1585 if (svd->softlockcnt) {
1588 1586 struct anon *ap, *newap;
1589 1587 size_t i;
1590 1588 uint_t vpprot;
1591 1589 page_t *anon_pl[1+1], *pp;
1592 1590 caddr_t addr;
1593 1591 ulong_t old_idx = svd->anon_index;
1594 1592 ulong_t new_idx = 0;
1595 1593
1596 1594 /*
1597 1595 * The softlock count might be non zero
1598 1596 * because some pages are still stuck in the
1599 1597 * cache for lazy reclaim. Flush the cache
1600 1598 * now. This should drop the count to zero.
1601 1599 * [or there is really I/O going on to these
1602 1600 * pages]. Note, we have the writers lock so
1603 1601 * nothing gets inserted during the flush.
1604 1602 */
1605 1603 if (reclaim == 1) {
1606 1604 segvn_purge(seg);
1607 1605 reclaim = 0;
1608 1606 goto retry;
1609 1607 }
1610 1608 i = btopr(seg->s_size);
1611 1609 addr = seg->s_base;
1612 1610 /*
1613 1611 * XXX break cow sharing using PAGESIZE
1614 1612 * pages. They will be relocated into larger
1615 1613 * pages at fault time.
1616 1614 */
1617 1615 while (i-- > 0) {
1618 1616 if (ap = anon_get_ptr(amp->ahp,
1619 1617 old_idx)) {
1620 1618 error = anon_getpage(&ap,
1621 1619 &vpprot, anon_pl, PAGESIZE,
1622 1620 seg, addr, S_READ,
1623 1621 svd->cred);
1624 1622 if (error) {
1625 1623 newsvd->vpage = NULL;
1626 1624 goto out;
1627 1625 }
1628 1626 /*
1629 1627 * prot need not be computed
1630 1628 * below 'cause anon_private is
1631 1629 * going to ignore it anyway
1632 1630 * as child doesn't inherit
1633 1631 * pagelock from parent.
1634 1632 */
1635 1633 prot = svd->pageprot ?
1636 1634 VPP_PROT(
1637 1635 &svd->vpage[
1638 1636 seg_page(seg, addr)])
1639 1637 : svd->prot;
1640 1638 pp = anon_private(&newap,
1641 1639 newseg, addr, prot,
1642 1640 anon_pl[0], 0,
1643 1641 newsvd->cred);
1644 1642 if (pp == NULL) {
1645 1643 /* no mem abort */
1646 1644 newsvd->vpage = NULL;
1647 1645 error = ENOMEM;
1648 1646 goto out;
1649 1647 }
1650 1648 (void) anon_set_ptr(
1651 1649 newsvd->amp->ahp, new_idx,
1652 1650 newap, ANON_SLEEP);
1653 1651 page_unlock(pp);
1654 1652 }
1655 1653 addr += PAGESIZE;
1656 1654 old_idx++;
1657 1655 new_idx++;
1658 1656 }
1659 1657 } else { /* common case */
1660 1658 if (seg->s_szc != 0) {
1661 1659 /*
1662 1660 * If at least one of anon slots of a
1663 1661 * large page exists then make sure
1664 1662 * all anon slots of a large page
1665 1663 * exist to avoid partial cow sharing
1666 1664 * of a large page in the future.
1667 1665 */
1668 1666 anon_dup_fill_holes(amp->ahp,
1669 1667 svd->anon_index, newsvd->amp->ahp,
1670 1668 0, seg->s_size, seg->s_szc,
1671 1669 svd->vp != NULL);
1672 1670 } else {
1673 1671 anon_dup(amp->ahp, svd->anon_index,
1674 1672 newsvd->amp->ahp, 0, seg->s_size);
1675 1673 }
1676 1674
1677 1675 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1678 1676 seg->s_size, PROT_WRITE);
1679 1677 }
1680 1678 }
1681 1679 }
1682 1680 /*
1683 1681 * If necessary, create a vpage structure for the new segment.
1684 1682 * Do not copy any page lock indications.
1685 1683 */
1686 1684 if (svd->vpage != NULL) {
1687 1685 uint_t i;
1688 1686 struct vpage *ovp = svd->vpage;
1689 1687 struct vpage *nvp;
1690 1688
1691 1689 nvp = newsvd->vpage =
1692 1690 kmem_alloc(vpgtob(npages), KM_SLEEP);
1693 1691 for (i = 0; i < npages; i++) {
1694 1692 *nvp = *ovp++;
1695 1693 VPP_CLRPPLOCK(nvp++);
1696 1694 }
1697 1695 } else
1698 1696 newsvd->vpage = NULL;
1699 1697
1700 1698 /* Inform the vnode of the new mapping */
1701 1699 if (newsvd->vp != NULL) {
1702 1700 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1703 1701 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1704 1702 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1705 1703 }
1706 1704 out:
1707 1705 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1708 1706 ASSERT(newsvd->amp == NULL);
1709 1707 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1710 1708 newsvd->rcookie = svd->rcookie;
1711 1709 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1712 1710 }
1713 1711 return (error);
1714 1712 }
1715 1713
1716 1714
1717 1715 /*
1718 1716 * callback function to invoke free_vp_pages() for only those pages actually
1719 1717 * processed by the HAT when a shared region is destroyed.
1720 1718 */
1721 1719 extern int free_pages;
1722 1720
1723 1721 static void
1724 1722 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1725 1723 size_t r_size, void *r_obj, u_offset_t r_objoff)
1726 1724 {
1727 1725 u_offset_t off;
1728 1726 size_t len;
1729 1727 vnode_t *vp = (vnode_t *)r_obj;
1730 1728
1731 1729 ASSERT(eaddr > saddr);
1732 1730 ASSERT(saddr >= r_saddr);
1733 1731 ASSERT(saddr < r_saddr + r_size);
1734 1732 ASSERT(eaddr > r_saddr);
1735 1733 ASSERT(eaddr <= r_saddr + r_size);
1736 1734 ASSERT(vp != NULL);
1737 1735
1738 1736 if (!free_pages) {
1739 1737 return;
1740 1738 }
1741 1739
1742 1740 len = eaddr - saddr;
1743 1741 off = (saddr - r_saddr) + r_objoff;
1744 1742 free_vp_pages(vp, off, len);
1745 1743 }
1746 1744
1747 1745 /*
1748 1746 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1749 1747 * those pages actually processed by the HAT
1750 1748 */
1751 1749 static void
1752 1750 segvn_hat_unload_callback(hat_callback_t *cb)
1753 1751 {
1754 1752 struct seg *seg = cb->hcb_data;
1755 1753 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1756 1754 size_t len;
1757 1755 u_offset_t off;
1758 1756
1759 1757 ASSERT(svd->vp != NULL);
1760 1758 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1761 1759 ASSERT(cb->hcb_start_addr >= seg->s_base);
1762 1760
1763 1761 len = cb->hcb_end_addr - cb->hcb_start_addr;
1764 1762 off = cb->hcb_start_addr - seg->s_base;
1765 1763 free_vp_pages(svd->vp, svd->offset + off, len);
1766 1764 }
1767 1765
1768 1766 /*
1769 1767 * This function determines the number of bytes of swap reserved by
1770 1768 * a segment for which per-page accounting is present. It is used to
1771 1769 * calculate the correct value of a segvn_data's swresv.
1772 1770 */
1773 1771 static size_t
1774 1772 segvn_count_swap_by_vpages(struct seg *seg)
1775 1773 {
1776 1774 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1777 1775 struct vpage *vp, *evp;
1778 1776 size_t nswappages = 0;
1779 1777
1780 1778 ASSERT(svd->pageswap);
1781 1779 ASSERT(svd->vpage != NULL);
1782 1780
1783 1781 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1784 1782
1785 1783 for (vp = svd->vpage; vp < evp; vp++) {
1786 1784 if (VPP_ISSWAPRES(vp))
1787 1785 nswappages++;
1788 1786 }
1789 1787
1790 1788 return (nswappages << PAGESHIFT);
1791 1789 }
1792 1790
1793 1791 static int
1794 1792 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1795 1793 {
1796 1794 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1797 1795 struct segvn_data *nsvd;
1798 1796 struct seg *nseg;
1799 1797 struct anon_map *amp;
1800 1798 pgcnt_t opages; /* old segment size in pages */
1801 1799 pgcnt_t npages; /* new segment size in pages */
1802 1800 pgcnt_t dpages; /* pages being deleted (unmapped) */
1803 1801 hat_callback_t callback; /* used for free_vp_pages() */
1804 1802 hat_callback_t *cbp = NULL;
1805 1803 caddr_t nbase;
1806 1804 size_t nsize;
1807 1805 size_t oswresv;
1808 1806 int reclaim = 1;
1809 1807
1810 1808 /*
1811 1809 * We don't need any segment level locks for "segvn" data
1812 1810 * since the address space is "write" locked.
1813 1811 */
1814 1812 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1815 1813
1816 1814 /*
1817 1815 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1818 1816 * softlockcnt is protected from change by the as write lock.
1819 1817 */
1820 1818 retry:
1821 1819 if (svd->softlockcnt > 0) {
1822 1820 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1823 1821
1824 1822 /*
1825 1823 * If this is shared segment non 0 softlockcnt
1826 1824 * means locked pages are still in use.
1827 1825 */
1828 1826 if (svd->type == MAP_SHARED) {
1829 1827 return (EAGAIN);
1830 1828 }
1831 1829
1832 1830 /*
1833 1831 * since we do have the writers lock nobody can fill
1834 1832 * the cache during the purge. The flush either succeeds
1835 1833 * or we still have pending I/Os.
1836 1834 */
1837 1835 if (reclaim == 1) {
1838 1836 segvn_purge(seg);
1839 1837 reclaim = 0;
1840 1838 goto retry;
1841 1839 }
1842 1840 return (EAGAIN);
1843 1841 }
1844 1842
1845 1843 /*
1846 1844 * Check for bad sizes
1847 1845 */
1848 1846 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1849 1847 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1850 1848 panic("segvn_unmap");
1851 1849 /*NOTREACHED*/
1852 1850 }
1853 1851
1854 1852 if (seg->s_szc != 0) {
1855 1853 size_t pgsz = page_get_pagesize(seg->s_szc);
1856 1854 int err;
1857 1855 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1858 1856 ASSERT(seg->s_base != addr || seg->s_size != len);
1859 1857 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1860 1858 ASSERT(svd->amp == NULL);
1861 1859 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1862 1860 hat_leave_region(seg->s_as->a_hat,
1863 1861 svd->rcookie, HAT_REGION_TEXT);
1864 1862 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1865 1863 /*
1866 1864 * could pass a flag to segvn_demote_range()
1867 1865 * below to tell it not to do any unloads but
1868 1866 * this case is rare enough to not bother for
1869 1867 * now.
1870 1868 */
1871 1869 } else if (svd->tr_state == SEGVN_TR_INIT) {
1872 1870 svd->tr_state = SEGVN_TR_OFF;
1873 1871 } else if (svd->tr_state == SEGVN_TR_ON) {
1874 1872 ASSERT(svd->amp != NULL);
1875 1873 segvn_textunrepl(seg, 1);
1876 1874 ASSERT(svd->amp == NULL);
1877 1875 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1878 1876 }
1879 1877 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1880 1878 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1881 1879 if (err == 0) {
1882 1880 return (IE_RETRY);
1883 1881 }
1884 1882 return (err);
1885 1883 }
1886 1884 }
1887 1885
1888 1886 /* Inform the vnode of the unmapping. */
1889 1887 if (svd->vp) {
1890 1888 int error;
1891 1889
1892 1890 error = VOP_DELMAP(svd->vp,
1893 1891 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1894 1892 seg->s_as, addr, len, svd->prot, svd->maxprot,
1895 1893 svd->type, svd->cred, NULL);
1896 1894
1897 1895 if (error == EAGAIN)
1898 1896 return (error);
1899 1897 }
1900 1898
1901 1899 /*
1902 1900 * Remove any page locks set through this mapping.
1903 1901 * If text replication is not off no page locks could have been
1904 1902 * established via this mapping.
1905 1903 */
1906 1904 if (svd->tr_state == SEGVN_TR_OFF) {
1907 1905 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1908 1906 }
1909 1907
1910 1908 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1911 1909 ASSERT(svd->amp == NULL);
1912 1910 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1913 1911 ASSERT(svd->type == MAP_PRIVATE);
1914 1912 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1915 1913 HAT_REGION_TEXT);
1916 1914 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1917 1915 } else if (svd->tr_state == SEGVN_TR_ON) {
1918 1916 ASSERT(svd->amp != NULL);
1919 1917 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1920 1918 segvn_textunrepl(seg, 1);
1921 1919 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1922 1920 } else {
1923 1921 if (svd->tr_state != SEGVN_TR_OFF) {
1924 1922 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1925 1923 svd->tr_state = SEGVN_TR_OFF;
1926 1924 }
1927 1925 /*
1928 1926 * Unload any hardware translations in the range to be taken
1929 1927 * out. Use a callback to invoke free_vp_pages() effectively.
1930 1928 */
1931 1929 if (svd->vp != NULL && free_pages != 0) {
1932 1930 callback.hcb_data = seg;
1933 1931 callback.hcb_function = segvn_hat_unload_callback;
1934 1932 cbp = &callback;
1935 1933 }
1936 1934 hat_unload_callback(seg->s_as->a_hat, addr, len,
1937 1935 HAT_UNLOAD_UNMAP, cbp);
1938 1936
1939 1937 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1940 1938 (svd->vp->v_flag & VVMEXEC) &&
1941 1939 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
1942 1940 segvn_inval_trcache(svd->vp);
1943 1941 }
1944 1942 }
1945 1943
1946 1944 /*
1947 1945 * Check for entire segment
1948 1946 */
1949 1947 if (addr == seg->s_base && len == seg->s_size) {
1950 1948 seg_free(seg);
1951 1949 return (0);
1952 1950 }
1953 1951
1954 1952 opages = seg_pages(seg);
1955 1953 dpages = btop(len);
1956 1954 npages = opages - dpages;
1957 1955 amp = svd->amp;
1958 1956 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
1959 1957
1960 1958 /*
1961 1959 * Check for beginning of segment
1962 1960 */
1963 1961 if (addr == seg->s_base) {
1964 1962 if (svd->vpage != NULL) {
1965 1963 size_t nbytes;
1966 1964 struct vpage *ovpage;
1967 1965
1968 1966 ovpage = svd->vpage; /* keep pointer to vpage */
1969 1967
1970 1968 nbytes = vpgtob(npages);
1971 1969 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
1972 1970 bcopy(&ovpage[dpages], svd->vpage, nbytes);
1973 1971
1974 1972 /* free up old vpage */
1975 1973 kmem_free(ovpage, vpgtob(opages));
1976 1974 }
1977 1975 if (amp != NULL) {
1978 1976 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1979 1977 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
1980 1978 /*
1981 1979 * Shared anon map is no longer in use. Before
1982 1980 * freeing its pages purge all entries from
1983 1981 * pcache that belong to this amp.
1984 1982 */
1985 1983 if (svd->type == MAP_SHARED) {
1986 1984 ASSERT(amp->refcnt == 1);
1987 1985 ASSERT(svd->softlockcnt == 0);
1988 1986 anonmap_purge(amp);
1989 1987 }
1990 1988 /*
1991 1989 * Free up now unused parts of anon_map array.
1992 1990 */
1993 1991 if (amp->a_szc == seg->s_szc) {
1994 1992 if (seg->s_szc != 0) {
1995 1993 anon_free_pages(amp->ahp,
1996 1994 svd->anon_index, len,
1997 1995 seg->s_szc);
1998 1996 } else {
1999 1997 anon_free(amp->ahp,
2000 1998 svd->anon_index,
2001 1999 len);
2002 2000 }
2003 2001 } else {
2004 2002 ASSERT(svd->type == MAP_SHARED);
2005 2003 ASSERT(amp->a_szc > seg->s_szc);
2006 2004 anon_shmap_free_pages(amp,
2007 2005 svd->anon_index, len);
2008 2006 }
2009 2007
2010 2008 /*
2011 2009 * Unreserve swap space for the
2012 2010 * unmapped chunk of this segment in
2013 2011 * case it's MAP_SHARED
2014 2012 */
2015 2013 if (svd->type == MAP_SHARED) {
2016 2014 anon_unresv_zone(len,
2017 2015 seg->s_as->a_proc->p_zone);
2018 2016 amp->swresv -= len;
2019 2017 }
2020 2018 }
2021 2019 ANON_LOCK_EXIT(&->a_rwlock);
2022 2020 svd->anon_index += dpages;
2023 2021 }
2024 2022 if (svd->vp != NULL)
2025 2023 svd->offset += len;
2026 2024
2027 2025 seg->s_base += len;
2028 2026 seg->s_size -= len;
2029 2027
2030 2028 if (svd->swresv) {
2031 2029 if (svd->flags & MAP_NORESERVE) {
2032 2030 ASSERT(amp);
2033 2031 oswresv = svd->swresv;
2034 2032
2035 2033 svd->swresv = ptob(anon_pages(amp->ahp,
2036 2034 svd->anon_index, npages));
2037 2035 anon_unresv_zone(oswresv - svd->swresv,
2038 2036 seg->s_as->a_proc->p_zone);
2039 2037 if (SEG_IS_PARTIAL_RESV(seg))
2040 2038 seg->s_as->a_resvsize -= oswresv -
2041 2039 svd->swresv;
2042 2040 } else {
2043 2041 size_t unlen;
2044 2042
2045 2043 if (svd->pageswap) {
2046 2044 oswresv = svd->swresv;
2047 2045 svd->swresv =
2048 2046 segvn_count_swap_by_vpages(seg);
2049 2047 ASSERT(oswresv >= svd->swresv);
2050 2048 unlen = oswresv - svd->swresv;
2051 2049 } else {
2052 2050 svd->swresv -= len;
2053 2051 ASSERT(svd->swresv == seg->s_size);
2054 2052 unlen = len;
2055 2053 }
2056 2054 anon_unresv_zone(unlen,
2057 2055 seg->s_as->a_proc->p_zone);
2058 2056 }
2059 2057 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2060 2058 seg, len, 0);
2061 2059 }
2062 2060
2063 2061 return (0);
2064 2062 }
2065 2063
2066 2064 /*
2067 2065 * Check for end of segment
2068 2066 */
2069 2067 if (addr + len == seg->s_base + seg->s_size) {
2070 2068 if (svd->vpage != NULL) {
2071 2069 size_t nbytes;
2072 2070 struct vpage *ovpage;
2073 2071
2074 2072 ovpage = svd->vpage; /* keep pointer to vpage */
2075 2073
2076 2074 nbytes = vpgtob(npages);
2077 2075 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2078 2076 bcopy(ovpage, svd->vpage, nbytes);
2079 2077
2080 2078 /* free up old vpage */
2081 2079 kmem_free(ovpage, vpgtob(opages));
2082 2080
2083 2081 }
2084 2082 if (amp != NULL) {
2085 2083 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2086 2084 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2087 2085 /*
2088 2086 * Free up now unused parts of anon_map array.
2089 2087 */
2090 2088 ulong_t an_idx = svd->anon_index + npages;
2091 2089
2092 2090 /*
2093 2091 * Shared anon map is no longer in use. Before
2094 2092 * freeing its pages purge all entries from
2095 2093 * pcache that belong to this amp.
2096 2094 */
2097 2095 if (svd->type == MAP_SHARED) {
2098 2096 ASSERT(amp->refcnt == 1);
2099 2097 ASSERT(svd->softlockcnt == 0);
2100 2098 anonmap_purge(amp);
2101 2099 }
2102 2100
2103 2101 if (amp->a_szc == seg->s_szc) {
2104 2102 if (seg->s_szc != 0) {
2105 2103 anon_free_pages(amp->ahp,
2106 2104 an_idx, len,
2107 2105 seg->s_szc);
2108 2106 } else {
2109 2107 anon_free(amp->ahp, an_idx,
2110 2108 len);
2111 2109 }
2112 2110 } else {
2113 2111 ASSERT(svd->type == MAP_SHARED);
2114 2112 ASSERT(amp->a_szc > seg->s_szc);
2115 2113 anon_shmap_free_pages(amp,
2116 2114 an_idx, len);
2117 2115 }
2118 2116
2119 2117 /*
2120 2118 * Unreserve swap space for the
2121 2119 * unmapped chunk of this segment in
2122 2120 * case it's MAP_SHARED
2123 2121 */
2124 2122 if (svd->type == MAP_SHARED) {
2125 2123 anon_unresv_zone(len,
2126 2124 seg->s_as->a_proc->p_zone);
2127 2125 amp->swresv -= len;
2128 2126 }
2129 2127 }
2130 2128 ANON_LOCK_EXIT(&->a_rwlock);
2131 2129 }
2132 2130
2133 2131 seg->s_size -= len;
2134 2132
2135 2133 if (svd->swresv) {
2136 2134 if (svd->flags & MAP_NORESERVE) {
2137 2135 ASSERT(amp);
2138 2136 oswresv = svd->swresv;
2139 2137 svd->swresv = ptob(anon_pages(amp->ahp,
2140 2138 svd->anon_index, npages));
2141 2139 anon_unresv_zone(oswresv - svd->swresv,
2142 2140 seg->s_as->a_proc->p_zone);
2143 2141 if (SEG_IS_PARTIAL_RESV(seg))
2144 2142 seg->s_as->a_resvsize -= oswresv -
2145 2143 svd->swresv;
2146 2144 } else {
2147 2145 size_t unlen;
2148 2146
2149 2147 if (svd->pageswap) {
2150 2148 oswresv = svd->swresv;
2151 2149 svd->swresv =
2152 2150 segvn_count_swap_by_vpages(seg);
2153 2151 ASSERT(oswresv >= svd->swresv);
2154 2152 unlen = oswresv - svd->swresv;
2155 2153 } else {
2156 2154 svd->swresv -= len;
2157 2155 ASSERT(svd->swresv == seg->s_size);
2158 2156 unlen = len;
2159 2157 }
2160 2158 anon_unresv_zone(unlen,
2161 2159 seg->s_as->a_proc->p_zone);
2162 2160 }
2163 2161 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2164 2162 "anon proc:%p %lu %u", seg, len, 0);
2165 2163 }
2166 2164
2167 2165 return (0);
2168 2166 }
2169 2167
2170 2168 /*
2171 2169 * The section to go is in the middle of the segment,
2172 2170 * have to make it into two segments. nseg is made for
2173 2171 * the high end while seg is cut down at the low end.
2174 2172 */
2175 2173 nbase = addr + len; /* new seg base */
2176 2174 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2177 2175 seg->s_size = addr - seg->s_base; /* shrink old seg */
2178 2176 nseg = seg_alloc(seg->s_as, nbase, nsize);
2179 2177 if (nseg == NULL) {
2180 2178 panic("segvn_unmap seg_alloc");
2181 2179 /*NOTREACHED*/
2182 2180 }
2183 2181 nseg->s_ops = seg->s_ops;
2184 2182 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2185 2183 nseg->s_data = (void *)nsvd;
2186 2184 nseg->s_szc = seg->s_szc;
2187 2185 *nsvd = *svd;
2188 2186 nsvd->seg = nseg;
2189 2187 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2190 2188 nsvd->swresv = 0;
2191 2189 nsvd->softlockcnt = 0;
2192 2190 nsvd->softlockcnt_sbase = 0;
2193 2191 nsvd->softlockcnt_send = 0;
2194 2192 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2195 2193
2196 2194 if (svd->vp != NULL) {
2197 2195 VN_HOLD(nsvd->vp);
2198 2196 if (nsvd->type == MAP_SHARED)
2199 2197 lgrp_shm_policy_init(NULL, nsvd->vp);
2200 2198 }
2201 2199 crhold(svd->cred);
2202 2200
2203 2201 if (svd->vpage == NULL) {
2204 2202 nsvd->vpage = NULL;
2205 2203 } else {
2206 2204 /* need to split vpage into two arrays */
2207 2205 size_t nbytes;
2208 2206 struct vpage *ovpage;
2209 2207
2210 2208 ovpage = svd->vpage; /* keep pointer to vpage */
2211 2209
2212 2210 npages = seg_pages(seg); /* seg has shrunk */
2213 2211 nbytes = vpgtob(npages);
2214 2212 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2215 2213
2216 2214 bcopy(ovpage, svd->vpage, nbytes);
2217 2215
2218 2216 npages = seg_pages(nseg);
2219 2217 nbytes = vpgtob(npages);
2220 2218 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2221 2219
2222 2220 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2223 2221
2224 2222 /* free up old vpage */
2225 2223 kmem_free(ovpage, vpgtob(opages));
2226 2224 }
2227 2225
2228 2226 if (amp == NULL) {
2229 2227 nsvd->amp = NULL;
2230 2228 nsvd->anon_index = 0;
2231 2229 } else {
2232 2230 /*
2233 2231 * Need to create a new anon map for the new segment.
2234 2232 * We'll also allocate a new smaller array for the old
2235 2233 * smaller segment to save space.
2236 2234 */
2237 2235 opages = btop((uintptr_t)(addr - seg->s_base));
2238 2236 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2239 2237 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2240 2238 /*
2241 2239 * Free up now unused parts of anon_map array.
2242 2240 */
2243 2241 ulong_t an_idx = svd->anon_index + opages;
2244 2242
2245 2243 /*
2246 2244 * Shared anon map is no longer in use. Before
2247 2245 * freeing its pages purge all entries from
2248 2246 * pcache that belong to this amp.
2249 2247 */
2250 2248 if (svd->type == MAP_SHARED) {
2251 2249 ASSERT(amp->refcnt == 1);
2252 2250 ASSERT(svd->softlockcnt == 0);
2253 2251 anonmap_purge(amp);
2254 2252 }
2255 2253
2256 2254 if (amp->a_szc == seg->s_szc) {
2257 2255 if (seg->s_szc != 0) {
2258 2256 anon_free_pages(amp->ahp, an_idx, len,
2259 2257 seg->s_szc);
2260 2258 } else {
2261 2259 anon_free(amp->ahp, an_idx,
2262 2260 len);
2263 2261 }
2264 2262 } else {
2265 2263 ASSERT(svd->type == MAP_SHARED);
2266 2264 ASSERT(amp->a_szc > seg->s_szc);
2267 2265 anon_shmap_free_pages(amp, an_idx, len);
2268 2266 }
2269 2267
2270 2268 /*
2271 2269 * Unreserve swap space for the
2272 2270 * unmapped chunk of this segment in
2273 2271 * case it's MAP_SHARED
2274 2272 */
2275 2273 if (svd->type == MAP_SHARED) {
2276 2274 anon_unresv_zone(len,
2277 2275 seg->s_as->a_proc->p_zone);
2278 2276 amp->swresv -= len;
2279 2277 }
2280 2278 }
2281 2279 nsvd->anon_index = svd->anon_index +
2282 2280 btop((uintptr_t)(nseg->s_base - seg->s_base));
2283 2281 if (svd->type == MAP_SHARED) {
2284 2282 amp->refcnt++;
2285 2283 nsvd->amp = amp;
2286 2284 } else {
2287 2285 struct anon_map *namp;
2288 2286 struct anon_hdr *nahp;
2289 2287
2290 2288 ASSERT(svd->type == MAP_PRIVATE);
2291 2289 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2292 2290 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2293 2291 namp->a_szc = seg->s_szc;
2294 2292 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2295 2293 0, btop(seg->s_size), ANON_SLEEP);
2296 2294 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2297 2295 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2298 2296 anon_release(amp->ahp, btop(amp->size));
2299 2297 svd->anon_index = 0;
2300 2298 nsvd->anon_index = 0;
2301 2299 amp->ahp = nahp;
2302 2300 amp->size = seg->s_size;
2303 2301 nsvd->amp = namp;
2304 2302 }
2305 2303 ANON_LOCK_EXIT(&->a_rwlock);
2306 2304 }
2307 2305 if (svd->swresv) {
2308 2306 if (svd->flags & MAP_NORESERVE) {
2309 2307 ASSERT(amp);
2310 2308 oswresv = svd->swresv;
2311 2309 svd->swresv = ptob(anon_pages(amp->ahp,
2312 2310 svd->anon_index, btop(seg->s_size)));
2313 2311 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2314 2312 nsvd->anon_index, btop(nseg->s_size)));
2315 2313 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2316 2314 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2317 2315 seg->s_as->a_proc->p_zone);
2318 2316 if (SEG_IS_PARTIAL_RESV(seg))
2319 2317 seg->s_as->a_resvsize -= oswresv -
2320 2318 (svd->swresv + nsvd->swresv);
2321 2319 } else {
2322 2320 size_t unlen;
2323 2321
2324 2322 if (svd->pageswap) {
2325 2323 oswresv = svd->swresv;
2326 2324 svd->swresv = segvn_count_swap_by_vpages(seg);
2327 2325 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2328 2326 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2329 2327 unlen = oswresv - (svd->swresv + nsvd->swresv);
2330 2328 } else {
2331 2329 if (seg->s_size + nseg->s_size + len !=
2332 2330 svd->swresv) {
2333 2331 panic("segvn_unmap: cannot split "
2334 2332 "swap reservation");
2335 2333 /*NOTREACHED*/
2336 2334 }
2337 2335 svd->swresv = seg->s_size;
2338 2336 nsvd->swresv = nseg->s_size;
2339 2337 unlen = len;
2340 2338 }
2341 2339 anon_unresv_zone(unlen,
2342 2340 seg->s_as->a_proc->p_zone);
2343 2341 }
2344 2342 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2345 2343 seg, len, 0);
2346 2344 }
2347 2345
2348 2346 return (0); /* I'm glad that's all over with! */
2349 2347 }
2350 2348
2351 2349 static void
2352 2350 segvn_free(struct seg *seg)
2353 2351 {
2354 2352 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2355 2353 pgcnt_t npages = seg_pages(seg);
2356 2354 struct anon_map *amp;
2357 2355 size_t len;
2358 2356
2359 2357 /*
2360 2358 * We don't need any segment level locks for "segvn" data
2361 2359 * since the address space is "write" locked.
2362 2360 */
2363 2361 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2364 2362 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2365 2363
2366 2364 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2367 2365
2368 2366 /*
2369 2367 * Be sure to unlock pages. XXX Why do things get free'ed instead
2370 2368 * of unmapped? XXX
2371 2369 */
2372 2370 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2373 2371 0, MC_UNLOCK, NULL, 0);
2374 2372
2375 2373 /*
2376 2374 * Deallocate the vpage and anon pointers if necessary and possible.
2377 2375 */
2378 2376 if (svd->vpage != NULL) {
2379 2377 kmem_free(svd->vpage, vpgtob(npages));
2380 2378 svd->vpage = NULL;
2381 2379 }
2382 2380 if ((amp = svd->amp) != NULL) {
2383 2381 /*
2384 2382 * If there are no more references to this anon_map
2385 2383 * structure, then deallocate the structure after freeing
2386 2384 * up all the anon slot pointers that we can.
2387 2385 */
2388 2386 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2389 2387 ASSERT(amp->a_szc >= seg->s_szc);
2390 2388 if (--amp->refcnt == 0) {
2391 2389 if (svd->type == MAP_PRIVATE) {
2392 2390 /*
2393 2391 * Private - we only need to anon_free
2394 2392 * the part that this segment refers to.
2395 2393 */
2396 2394 if (seg->s_szc != 0) {
2397 2395 anon_free_pages(amp->ahp,
2398 2396 svd->anon_index, seg->s_size,
2399 2397 seg->s_szc);
2400 2398 } else {
2401 2399 anon_free(amp->ahp, svd->anon_index,
2402 2400 seg->s_size);
2403 2401 }
2404 2402 } else {
2405 2403
2406 2404 /*
2407 2405 * Shared anon map is no longer in use. Before
2408 2406 * freeing its pages purge all entries from
2409 2407 * pcache that belong to this amp.
2410 2408 */
2411 2409 ASSERT(svd->softlockcnt == 0);
2412 2410 anonmap_purge(amp);
2413 2411
2414 2412 /*
2415 2413 * Shared - anon_free the entire
2416 2414 * anon_map's worth of stuff and
2417 2415 * release any swap reservation.
2418 2416 */
2419 2417 if (amp->a_szc != 0) {
2420 2418 anon_shmap_free_pages(amp, 0,
2421 2419 amp->size);
2422 2420 } else {
2423 2421 anon_free(amp->ahp, 0, amp->size);
2424 2422 }
2425 2423 if ((len = amp->swresv) != 0) {
2426 2424 anon_unresv_zone(len,
2427 2425 seg->s_as->a_proc->p_zone);
2428 2426 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2429 2427 "anon proc:%p %lu %u", seg, len, 0);
2430 2428 }
2431 2429 }
2432 2430 svd->amp = NULL;
2433 2431 ANON_LOCK_EXIT(&->a_rwlock);
2434 2432 anonmap_free(amp);
2435 2433 } else if (svd->type == MAP_PRIVATE) {
2436 2434 /*
2437 2435 * We had a private mapping which still has
2438 2436 * a held anon_map so just free up all the
2439 2437 * anon slot pointers that we were using.
2440 2438 */
2441 2439 if (seg->s_szc != 0) {
2442 2440 anon_free_pages(amp->ahp, svd->anon_index,
2443 2441 seg->s_size, seg->s_szc);
2444 2442 } else {
2445 2443 anon_free(amp->ahp, svd->anon_index,
2446 2444 seg->s_size);
2447 2445 }
2448 2446 ANON_LOCK_EXIT(&->a_rwlock);
2449 2447 } else {
2450 2448 ANON_LOCK_EXIT(&->a_rwlock);
2451 2449 }
2452 2450 }
2453 2451
2454 2452 /*
2455 2453 * Release swap reservation.
2456 2454 */
2457 2455 if ((len = svd->swresv) != 0) {
2458 2456 anon_unresv_zone(svd->swresv,
2459 2457 seg->s_as->a_proc->p_zone);
2460 2458 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2461 2459 seg, len, 0);
2462 2460 if (SEG_IS_PARTIAL_RESV(seg))
2463 2461 seg->s_as->a_resvsize -= svd->swresv;
2464 2462 svd->swresv = 0;
2465 2463 }
2466 2464 /*
2467 2465 * Release claim on vnode, credentials, and finally free the
2468 2466 * private data.
2469 2467 */
2470 2468 if (svd->vp != NULL) {
2471 2469 if (svd->type == MAP_SHARED)
2472 2470 lgrp_shm_policy_fini(NULL, svd->vp);
2473 2471 VN_RELE(svd->vp);
2474 2472 svd->vp = NULL;
2475 2473 }
2476 2474 crfree(svd->cred);
2477 2475 svd->pageprot = 0;
2478 2476 svd->pageadvice = 0;
2479 2477 svd->pageswap = 0;
2480 2478 svd->cred = NULL;
2481 2479
2482 2480 /*
2483 2481 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2484 2482 * still working with this segment without holding as lock (in case
2485 2483 * it's called by pcache async thread).
2486 2484 */
2487 2485 ASSERT(svd->softlockcnt == 0);
2488 2486 mutex_enter(&svd->segfree_syncmtx);
2489 2487 mutex_exit(&svd->segfree_syncmtx);
2490 2488
2491 2489 seg->s_data = NULL;
2492 2490 kmem_cache_free(segvn_cache, svd);
2493 2491 }
2494 2492
2495 2493 /*
2496 2494 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2497 2495 * already been F_SOFTLOCK'ed.
2498 2496 * Caller must always match addr and len of a softunlock with a previous
2499 2497 * softlock with exactly the same addr and len.
2500 2498 */
2501 2499 static void
2502 2500 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2503 2501 {
2504 2502 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2505 2503 page_t *pp;
2506 2504 caddr_t adr;
2507 2505 struct vnode *vp;
2508 2506 u_offset_t offset;
2509 2507 ulong_t anon_index;
2510 2508 struct anon_map *amp;
2511 2509 struct anon *ap = NULL;
2512 2510
2513 2511 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2514 2512 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2515 2513
2516 2514 if ((amp = svd->amp) != NULL)
2517 2515 anon_index = svd->anon_index + seg_page(seg, addr);
2518 2516
2519 2517 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2520 2518 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2521 2519 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2522 2520 } else {
2523 2521 hat_unlock(seg->s_as->a_hat, addr, len);
2524 2522 }
2525 2523 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2526 2524 if (amp != NULL) {
2527 2525 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2528 2526 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2529 2527 != NULL) {
2530 2528 swap_xlate(ap, &vp, &offset);
2531 2529 } else {
2532 2530 vp = svd->vp;
2533 2531 offset = svd->offset +
2534 2532 (uintptr_t)(adr - seg->s_base);
2535 2533 }
2536 2534 ANON_LOCK_EXIT(&->a_rwlock);
2537 2535 } else {
2538 2536 vp = svd->vp;
2539 2537 offset = svd->offset +
2540 2538 (uintptr_t)(adr - seg->s_base);
2541 2539 }
2542 2540
2543 2541 /*
2544 2542 * Use page_find() instead of page_lookup() to
2545 2543 * find the page since we know that it is locked.
2546 2544 */
2547 2545 pp = page_find(vp, offset);
2548 2546 if (pp == NULL) {
2549 2547 panic(
2550 2548 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2551 2549 (void *)adr, (void *)ap, (void *)vp, offset);
2552 2550 /*NOTREACHED*/
2553 2551 }
2554 2552
2555 2553 if (rw == S_WRITE) {
2556 2554 hat_setrefmod(pp);
2557 2555 if (seg->s_as->a_vbits)
2558 2556 hat_setstat(seg->s_as, adr, PAGESIZE,
2559 2557 P_REF | P_MOD);
2560 2558 } else if (rw != S_OTHER) {
2561 2559 hat_setref(pp);
2562 2560 if (seg->s_as->a_vbits)
2563 2561 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2564 2562 }
2565 2563 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2566 2564 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2567 2565 page_unlock(pp);
2568 2566 }
2569 2567 ASSERT(svd->softlockcnt >= btop(len));
2570 2568 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2571 2569 /*
2572 2570 * All SOFTLOCKS are gone. Wakeup any waiting
2573 2571 * unmappers so they can try again to unmap.
2574 2572 * Check for waiters first without the mutex
2575 2573 * held so we don't always grab the mutex on
2576 2574 * softunlocks.
2577 2575 */
2578 2576 if (AS_ISUNMAPWAIT(seg->s_as)) {
2579 2577 mutex_enter(&seg->s_as->a_contents);
2580 2578 if (AS_ISUNMAPWAIT(seg->s_as)) {
2581 2579 AS_CLRUNMAPWAIT(seg->s_as);
2582 2580 cv_broadcast(&seg->s_as->a_cv);
2583 2581 }
2584 2582 mutex_exit(&seg->s_as->a_contents);
2585 2583 }
2586 2584 }
2587 2585 }
2588 2586
2589 2587 #define PAGE_HANDLED ((page_t *)-1)
2590 2588
2591 2589 /*
2592 2590 * Release all the pages in the NULL terminated ppp list
2593 2591 * which haven't already been converted to PAGE_HANDLED.
2594 2592 */
2595 2593 static void
2596 2594 segvn_pagelist_rele(page_t **ppp)
2597 2595 {
2598 2596 for (; *ppp != NULL; ppp++) {
2599 2597 if (*ppp != PAGE_HANDLED)
2600 2598 page_unlock(*ppp);
2601 2599 }
2602 2600 }
2603 2601
2604 2602 static int stealcow = 1;
2605 2603
2606 2604 /*
2607 2605 * Workaround for viking chip bug. See bug id 1220902.
2608 2606 * To fix this down in pagefault() would require importing so
2609 2607 * much as and segvn code as to be unmaintainable.
2610 2608 */
2611 2609 int enable_mbit_wa = 0;
2612 2610
2613 2611 /*
2614 2612 * Handles all the dirty work of getting the right
2615 2613 * anonymous pages and loading up the translations.
2616 2614 * This routine is called only from segvn_fault()
2617 2615 * when looping over the range of addresses requested.
2618 2616 *
2619 2617 * The basic algorithm here is:
2620 2618 * If this is an anon_zero case
2621 2619 * Call anon_zero to allocate page
2622 2620 * Load up translation
2623 2621 * Return
2624 2622 * endif
2625 2623 * If this is an anon page
2626 2624 * Use anon_getpage to get the page
2627 2625 * else
2628 2626 * Find page in pl[] list passed in
2629 2627 * endif
2630 2628 * If not a cow
2631 2629 * Load up the translation to the page
2632 2630 * return
2633 2631 * endif
2634 2632 * Call anon_private to handle cow
2635 2633 * Load up (writable) translation to new page
2636 2634 */
2637 2635 static faultcode_t
2638 2636 segvn_faultpage(
2639 2637 struct hat *hat, /* the hat to use for mapping */
2640 2638 struct seg *seg, /* seg_vn of interest */
2641 2639 caddr_t addr, /* address in as */
2642 2640 u_offset_t off, /* offset in vp */
2643 2641 struct vpage *vpage, /* pointer to vpage for vp, off */
2644 2642 page_t *pl[], /* object source page pointer */
2645 2643 uint_t vpprot, /* access allowed to object pages */
2646 2644 enum fault_type type, /* type of fault */
2647 2645 enum seg_rw rw, /* type of access at fault */
2648 2646 int brkcow) /* we may need to break cow */
2649 2647 {
2650 2648 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2651 2649 page_t *pp, **ppp;
2652 2650 uint_t pageflags = 0;
2653 2651 page_t *anon_pl[1 + 1];
2654 2652 page_t *opp = NULL; /* original page */
2655 2653 uint_t prot;
2656 2654 int err;
2657 2655 int cow;
2658 2656 int claim;
2659 2657 int steal = 0;
2660 2658 ulong_t anon_index;
2661 2659 struct anon *ap, *oldap;
2662 2660 struct anon_map *amp;
2663 2661 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2664 2662 int anon_lock = 0;
2665 2663 anon_sync_obj_t cookie;
2666 2664
2667 2665 if (svd->flags & MAP_TEXT) {
2668 2666 hat_flag |= HAT_LOAD_TEXT;
2669 2667 }
2670 2668
2671 2669 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2672 2670 ASSERT(seg->s_szc == 0);
2673 2671 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2674 2672
2675 2673 /*
2676 2674 * Initialize protection value for this page.
2677 2675 * If we have per page protection values check it now.
2678 2676 */
2679 2677 if (svd->pageprot) {
2680 2678 uint_t protchk;
2681 2679
2682 2680 switch (rw) {
2683 2681 case S_READ:
2684 2682 protchk = PROT_READ;
2685 2683 break;
2686 2684 case S_WRITE:
2687 2685 protchk = PROT_WRITE;
2688 2686 break;
2689 2687 case S_EXEC:
2690 2688 protchk = PROT_EXEC;
2691 2689 break;
2692 2690 case S_OTHER:
2693 2691 default:
2694 2692 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2695 2693 break;
2696 2694 }
2697 2695
2698 2696 prot = VPP_PROT(vpage);
2699 2697 if ((prot & protchk) == 0)
2700 2698 return (FC_PROT); /* illegal access type */
2701 2699 } else {
2702 2700 prot = svd->prot;
2703 2701 }
2704 2702
2705 2703 if (type == F_SOFTLOCK) {
2706 2704 atomic_add_long((ulong_t *)&svd->softlockcnt, 1);
2707 2705 }
2708 2706
2709 2707 /*
2710 2708 * Always acquire the anon array lock to prevent 2 threads from
2711 2709 * allocating separate anon slots for the same "addr".
2712 2710 */
2713 2711
2714 2712 if ((amp = svd->amp) != NULL) {
2715 2713 ASSERT(RW_READ_HELD(&->a_rwlock));
2716 2714 anon_index = svd->anon_index + seg_page(seg, addr);
2717 2715 anon_array_enter(amp, anon_index, &cookie);
2718 2716 anon_lock = 1;
2719 2717 }
2720 2718
2721 2719 if (svd->vp == NULL && amp != NULL) {
2722 2720 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2723 2721 /*
2724 2722 * Allocate a (normally) writable anonymous page of
2725 2723 * zeroes. If no advance reservations, reserve now.
2726 2724 */
2727 2725 if (svd->flags & MAP_NORESERVE) {
2728 2726 if (anon_resv_zone(ptob(1),
2729 2727 seg->s_as->a_proc->p_zone)) {
2730 2728 atomic_add_long(&svd->swresv, ptob(1));
2731 2729 atomic_add_long(&seg->s_as->a_resvsize,
2732 2730 ptob(1));
2733 2731 } else {
2734 2732 err = ENOMEM;
2735 2733 goto out;
2736 2734 }
2737 2735 }
2738 2736 if ((pp = anon_zero(seg, addr, &ap,
2739 2737 svd->cred)) == NULL) {
2740 2738 err = ENOMEM;
2741 2739 goto out; /* out of swap space */
2742 2740 }
2743 2741 /*
2744 2742 * Re-acquire the anon_map lock and
2745 2743 * initialize the anon array entry.
2746 2744 */
2747 2745 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2748 2746 ANON_SLEEP);
2749 2747
2750 2748 ASSERT(pp->p_szc == 0);
2751 2749
2752 2750 /*
2753 2751 * Handle pages that have been marked for migration
2754 2752 */
2755 2753 if (lgrp_optimizations())
2756 2754 page_migrate(seg, addr, &pp, 1);
2757 2755
2758 2756 if (enable_mbit_wa) {
2759 2757 if (rw == S_WRITE)
2760 2758 hat_setmod(pp);
2761 2759 else if (!hat_ismod(pp))
2762 2760 prot &= ~PROT_WRITE;
2763 2761 }
2764 2762 /*
2765 2763 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2766 2764 * with MC_LOCKAS, MCL_FUTURE) and this is a
2767 2765 * MAP_NORESERVE segment, we may need to
2768 2766 * permanently lock the page as it is being faulted
2769 2767 * for the first time. The following text applies
2770 2768 * only to MAP_NORESERVE segments:
2771 2769 *
2772 2770 * As per memcntl(2), if this segment was created
2773 2771 * after MCL_FUTURE was applied (a "future"
2774 2772 * segment), its pages must be locked. If this
2775 2773 * segment existed at MCL_FUTURE application (a
2776 2774 * "past" segment), the interface is unclear.
2777 2775 *
2778 2776 * We decide to lock only if vpage is present:
2779 2777 *
2780 2778 * - "future" segments will have a vpage array (see
2781 2779 * as_map), and so will be locked as required
2782 2780 *
2783 2781 * - "past" segments may not have a vpage array,
2784 2782 * depending on whether events (such as
2785 2783 * mprotect) have occurred. Locking if vpage
2786 2784 * exists will preserve legacy behavior. Not
2787 2785 * locking if vpage is absent, will not break
2788 2786 * the interface or legacy behavior. Note that
2789 2787 * allocating vpage here if it's absent requires
2790 2788 * upgrading the segvn reader lock, the cost of
2791 2789 * which does not seem worthwhile.
2792 2790 *
2793 2791 * Usually testing and setting VPP_ISPPLOCK and
2794 2792 * VPP_SETPPLOCK requires holding the segvn lock as
2795 2793 * writer, but in this case all readers are
2796 2794 * serializing on the anon array lock.
2797 2795 */
2798 2796 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2799 2797 (svd->flags & MAP_NORESERVE) &&
2800 2798 !VPP_ISPPLOCK(vpage)) {
2801 2799 proc_t *p = seg->s_as->a_proc;
2802 2800 ASSERT(svd->type == MAP_PRIVATE);
2803 2801 mutex_enter(&p->p_lock);
2804 2802 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2805 2803 1) == 0) {
2806 2804 claim = VPP_PROT(vpage) & PROT_WRITE;
2807 2805 if (page_pp_lock(pp, claim, 0)) {
2808 2806 VPP_SETPPLOCK(vpage);
2809 2807 } else {
2810 2808 rctl_decr_locked_mem(p, NULL,
2811 2809 PAGESIZE, 1);
2812 2810 }
2813 2811 }
2814 2812 mutex_exit(&p->p_lock);
2815 2813 }
2816 2814
2817 2815 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2818 2816 hat_memload(hat, addr, pp, prot, hat_flag);
2819 2817
2820 2818 if (!(hat_flag & HAT_LOAD_LOCK))
2821 2819 page_unlock(pp);
2822 2820
2823 2821 anon_array_exit(&cookie);
2824 2822 return (0);
2825 2823 }
2826 2824 }
2827 2825
2828 2826 /*
2829 2827 * Obtain the page structure via anon_getpage() if it is
2830 2828 * a private copy of an object (the result of a previous
2831 2829 * copy-on-write).
2832 2830 */
2833 2831 if (amp != NULL) {
2834 2832 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2835 2833 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2836 2834 seg, addr, rw, svd->cred);
2837 2835 if (err)
2838 2836 goto out;
2839 2837
2840 2838 if (svd->type == MAP_SHARED) {
2841 2839 /*
2842 2840 * If this is a shared mapping to an
2843 2841 * anon_map, then ignore the write
2844 2842 * permissions returned by anon_getpage().
2845 2843 * They apply to the private mappings
2846 2844 * of this anon_map.
2847 2845 */
2848 2846 vpprot |= PROT_WRITE;
2849 2847 }
2850 2848 opp = anon_pl[0];
2851 2849 }
2852 2850 }
2853 2851
2854 2852 /*
2855 2853 * Search the pl[] list passed in if it is from the
2856 2854 * original object (i.e., not a private copy).
2857 2855 */
2858 2856 if (opp == NULL) {
2859 2857 /*
2860 2858 * Find original page. We must be bringing it in
2861 2859 * from the list in pl[].
2862 2860 */
2863 2861 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2864 2862 if (opp == PAGE_HANDLED)
2865 2863 continue;
2866 2864 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2867 2865 if (opp->p_offset == off)
2868 2866 break;
2869 2867 }
2870 2868 if (opp == NULL) {
2871 2869 panic("segvn_faultpage not found");
2872 2870 /*NOTREACHED*/
2873 2871 }
2874 2872 *ppp = PAGE_HANDLED;
2875 2873
2876 2874 }
2877 2875
2878 2876 ASSERT(PAGE_LOCKED(opp));
2879 2877
2880 2878 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2881 2879 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2882 2880
2883 2881 /*
2884 2882 * The fault is treated as a copy-on-write fault if a
2885 2883 * write occurs on a private segment and the object
2886 2884 * page (i.e., mapping) is write protected. We assume
2887 2885 * that fatal protection checks have already been made.
2888 2886 */
2889 2887
2890 2888 if (brkcow) {
2891 2889 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2892 2890 cow = !(vpprot & PROT_WRITE);
2893 2891 } else if (svd->tr_state == SEGVN_TR_ON) {
2894 2892 /*
2895 2893 * If we are doing text replication COW on first touch.
2896 2894 */
2897 2895 ASSERT(amp != NULL);
2898 2896 ASSERT(svd->vp != NULL);
2899 2897 ASSERT(rw != S_WRITE);
2900 2898 cow = (ap == NULL);
2901 2899 } else {
2902 2900 cow = 0;
2903 2901 }
2904 2902
2905 2903 /*
2906 2904 * If not a copy-on-write case load the translation
2907 2905 * and return.
2908 2906 */
2909 2907 if (cow == 0) {
2910 2908
2911 2909 /*
2912 2910 * Handle pages that have been marked for migration
2913 2911 */
2914 2912 if (lgrp_optimizations())
2915 2913 page_migrate(seg, addr, &opp, 1);
2916 2914
2917 2915 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2918 2916 if (rw == S_WRITE)
2919 2917 hat_setmod(opp);
2920 2918 else if (rw != S_OTHER && !hat_ismod(opp))
2921 2919 prot &= ~PROT_WRITE;
2922 2920 }
2923 2921
2924 2922 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2925 2923 (!svd->pageprot && svd->prot == (prot & vpprot)));
2926 2924 ASSERT(amp == NULL ||
2927 2925 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2928 2926 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2929 2927 svd->rcookie);
2930 2928
2931 2929 if (!(hat_flag & HAT_LOAD_LOCK))
2932 2930 page_unlock(opp);
2933 2931
2934 2932 if (anon_lock) {
2935 2933 anon_array_exit(&cookie);
2936 2934 }
2937 2935 return (0);
2938 2936 }
2939 2937
2940 2938 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2941 2939
2942 2940 hat_setref(opp);
2943 2941
2944 2942 ASSERT(amp != NULL && anon_lock);
2945 2943
2946 2944 /*
2947 2945 * Steal the page only if it isn't a private page
2948 2946 * since stealing a private page is not worth the effort.
2949 2947 */
2950 2948 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
2951 2949 steal = 1;
2952 2950
2953 2951 /*
2954 2952 * Steal the original page if the following conditions are true:
2955 2953 *
2956 2954 * We are low on memory, the page is not private, page is not large,
2957 2955 * not shared, not modified, not `locked' or if we have it `locked'
2958 2956 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
2959 2957 * that the page is not shared) and if it doesn't have any
2960 2958 * translations. page_struct_lock isn't needed to look at p_cowcnt
2961 2959 * and p_lckcnt because we first get exclusive lock on page.
2962 2960 */
2963 2961 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
2964 2962
2965 2963 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
2966 2964 page_tryupgrade(opp) && !hat_ismod(opp) &&
2967 2965 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
2968 2966 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
2969 2967 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
2970 2968 /*
2971 2969 * Check if this page has other translations
2972 2970 * after unloading our translation.
2973 2971 */
2974 2972 if (hat_page_is_mapped(opp)) {
2975 2973 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2976 2974 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
2977 2975 HAT_UNLOAD);
2978 2976 }
2979 2977
2980 2978 /*
2981 2979 * hat_unload() might sync back someone else's recent
2982 2980 * modification, so check again.
2983 2981 */
2984 2982 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
2985 2983 pageflags |= STEAL_PAGE;
2986 2984 }
2987 2985
2988 2986 /*
2989 2987 * If we have a vpage pointer, see if it indicates that we have
2990 2988 * ``locked'' the page we map -- if so, tell anon_private to
2991 2989 * transfer the locking resource to the new page.
2992 2990 *
2993 2991 * See Statement at the beginning of segvn_lockop regarding
2994 2992 * the way lockcnts/cowcnts are handled during COW.
2995 2993 *
2996 2994 */
2997 2995 if (vpage != NULL && VPP_ISPPLOCK(vpage))
2998 2996 pageflags |= LOCK_PAGE;
2999 2997
3000 2998 /*
3001 2999 * Allocate a private page and perform the copy.
3002 3000 * For MAP_NORESERVE reserve swap space now, unless this
3003 3001 * is a cow fault on an existing anon page in which case
3004 3002 * MAP_NORESERVE will have made advance reservations.
3005 3003 */
3006 3004 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3007 3005 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3008 3006 atomic_add_long(&svd->swresv, ptob(1));
3009 3007 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3010 3008 } else {
3011 3009 page_unlock(opp);
3012 3010 err = ENOMEM;
3013 3011 goto out;
3014 3012 }
3015 3013 }
3016 3014 oldap = ap;
3017 3015 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3018 3016 if (pp == NULL) {
3019 3017 err = ENOMEM; /* out of swap space */
3020 3018 goto out;
3021 3019 }
3022 3020
3023 3021 /*
3024 3022 * If we copied away from an anonymous page, then
3025 3023 * we are one step closer to freeing up an anon slot.
3026 3024 *
3027 3025 * NOTE: The original anon slot must be released while
3028 3026 * holding the "anon_map" lock. This is necessary to prevent
3029 3027 * other threads from obtaining a pointer to the anon slot
3030 3028 * which may be freed if its "refcnt" is 1.
3031 3029 */
3032 3030 if (oldap != NULL)
3033 3031 anon_decref(oldap);
3034 3032
3035 3033 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3036 3034
3037 3035 /*
3038 3036 * Handle pages that have been marked for migration
3039 3037 */
3040 3038 if (lgrp_optimizations())
3041 3039 page_migrate(seg, addr, &pp, 1);
3042 3040
3043 3041 ASSERT(pp->p_szc == 0);
3044 3042
3045 3043 ASSERT(!IS_VMODSORT(pp->p_vnode));
3046 3044 if (enable_mbit_wa) {
3047 3045 if (rw == S_WRITE)
3048 3046 hat_setmod(pp);
3049 3047 else if (!hat_ismod(pp))
3050 3048 prot &= ~PROT_WRITE;
3051 3049 }
3052 3050
3053 3051 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3054 3052 hat_memload(hat, addr, pp, prot, hat_flag);
3055 3053
3056 3054 if (!(hat_flag & HAT_LOAD_LOCK))
3057 3055 page_unlock(pp);
3058 3056
3059 3057 ASSERT(anon_lock);
3060 3058 anon_array_exit(&cookie);
3061 3059 return (0);
3062 3060 out:
3063 3061 if (anon_lock)
3064 3062 anon_array_exit(&cookie);
3065 3063
3066 3064 if (type == F_SOFTLOCK) {
3067 3065 atomic_add_long((ulong_t *)&svd->softlockcnt, -1);
3068 3066 }
3069 3067 return (FC_MAKE_ERR(err));
3070 3068 }
3071 3069
3072 3070 /*
3073 3071 * relocate a bunch of smaller targ pages into one large repl page. all targ
3074 3072 * pages must be complete pages smaller than replacement pages.
3075 3073 * it's assumed that no page's szc can change since they are all PAGESIZE or
3076 3074 * complete large pages locked SHARED.
3077 3075 */
3078 3076 static void
3079 3077 segvn_relocate_pages(page_t **targ, page_t *replacement)
3080 3078 {
3081 3079 page_t *pp;
3082 3080 pgcnt_t repl_npgs, curnpgs;
3083 3081 pgcnt_t i;
3084 3082 uint_t repl_szc = replacement->p_szc;
3085 3083 page_t *first_repl = replacement;
3086 3084 page_t *repl;
3087 3085 spgcnt_t npgs;
3088 3086
3089 3087 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3090 3088
3091 3089 ASSERT(repl_szc != 0);
3092 3090 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3093 3091
3094 3092 i = 0;
3095 3093 while (repl_npgs) {
3096 3094 spgcnt_t nreloc;
3097 3095 int err;
3098 3096 ASSERT(replacement != NULL);
3099 3097 pp = targ[i];
3100 3098 ASSERT(pp->p_szc < repl_szc);
3101 3099 ASSERT(PAGE_EXCL(pp));
3102 3100 ASSERT(!PP_ISFREE(pp));
3103 3101 curnpgs = page_get_pagecnt(pp->p_szc);
3104 3102 if (curnpgs == 1) {
3105 3103 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3106 3104 repl = replacement;
3107 3105 page_sub(&replacement, repl);
3108 3106 ASSERT(PAGE_EXCL(repl));
3109 3107 ASSERT(!PP_ISFREE(repl));
3110 3108 ASSERT(repl->p_szc == repl_szc);
3111 3109 } else {
3112 3110 page_t *repl_savepp;
3113 3111 int j;
3114 3112 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3115 3113 repl_savepp = replacement;
3116 3114 for (j = 0; j < curnpgs; j++) {
3117 3115 repl = replacement;
3118 3116 page_sub(&replacement, repl);
3119 3117 ASSERT(PAGE_EXCL(repl));
3120 3118 ASSERT(!PP_ISFREE(repl));
3121 3119 ASSERT(repl->p_szc == repl_szc);
3122 3120 ASSERT(page_pptonum(targ[i + j]) ==
3123 3121 page_pptonum(targ[i]) + j);
3124 3122 }
3125 3123 repl = repl_savepp;
3126 3124 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3127 3125 }
3128 3126 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3129 3127 if (err || nreloc != curnpgs) {
3130 3128 panic("segvn_relocate_pages: "
3131 3129 "page_relocate failed err=%d curnpgs=%ld "
3132 3130 "nreloc=%ld", err, curnpgs, nreloc);
3133 3131 }
3134 3132 ASSERT(curnpgs <= repl_npgs);
3135 3133 repl_npgs -= curnpgs;
3136 3134 i += curnpgs;
3137 3135 }
3138 3136 ASSERT(replacement == NULL);
3139 3137
3140 3138 repl = first_repl;
3141 3139 repl_npgs = npgs;
3142 3140 for (i = 0; i < repl_npgs; i++) {
3143 3141 ASSERT(PAGE_EXCL(repl));
3144 3142 ASSERT(!PP_ISFREE(repl));
3145 3143 targ[i] = repl;
3146 3144 page_downgrade(targ[i]);
3147 3145 repl++;
3148 3146 }
3149 3147 }
3150 3148
3151 3149 /*
3152 3150 * Check if all pages in ppa array are complete smaller than szc pages and
3153 3151 * their roots will still be aligned relative to their current size if the
3154 3152 * entire ppa array is relocated into one szc page. If these conditions are
3155 3153 * not met return 0.
3156 3154 *
3157 3155 * If all pages are properly aligned attempt to upgrade their locks
3158 3156 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3159 3157 * upgrdfail was set to 0 by caller.
3160 3158 *
3161 3159 * Return 1 if all pages are aligned and locked exclusively.
3162 3160 *
3163 3161 * If all pages in ppa array happen to be physically contiguous to make one
3164 3162 * szc page and all exclusive locks are successfully obtained promote the page
3165 3163 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3166 3164 */
3167 3165 static int
3168 3166 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3169 3167 {
3170 3168 page_t *pp;
3171 3169 pfn_t pfn;
3172 3170 pgcnt_t totnpgs = page_get_pagecnt(szc);
3173 3171 pfn_t first_pfn;
3174 3172 int contig = 1;
3175 3173 pgcnt_t i;
3176 3174 pgcnt_t j;
3177 3175 uint_t curszc;
3178 3176 pgcnt_t curnpgs;
3179 3177 int root = 0;
3180 3178
3181 3179 ASSERT(szc > 0);
3182 3180
3183 3181 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3184 3182
3185 3183 for (i = 0; i < totnpgs; i++) {
3186 3184 pp = ppa[i];
3187 3185 ASSERT(PAGE_SHARED(pp));
3188 3186 ASSERT(!PP_ISFREE(pp));
3189 3187 pfn = page_pptonum(pp);
3190 3188 if (i == 0) {
3191 3189 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3192 3190 contig = 0;
3193 3191 } else {
3194 3192 first_pfn = pfn;
3195 3193 }
3196 3194 } else if (contig && pfn != first_pfn + i) {
3197 3195 contig = 0;
3198 3196 }
3199 3197 if (pp->p_szc == 0) {
3200 3198 if (root) {
3201 3199 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3202 3200 return (0);
3203 3201 }
3204 3202 } else if (!root) {
3205 3203 if ((curszc = pp->p_szc) >= szc) {
3206 3204 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3207 3205 return (0);
3208 3206 }
3209 3207 if (curszc == 0) {
3210 3208 /*
3211 3209 * p_szc changed means we don't have all pages
3212 3210 * locked. return failure.
3213 3211 */
3214 3212 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3215 3213 return (0);
3216 3214 }
3217 3215 curnpgs = page_get_pagecnt(curszc);
3218 3216 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3219 3217 !IS_P2ALIGNED(i, curnpgs)) {
3220 3218 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3221 3219 return (0);
3222 3220 }
3223 3221 root = 1;
3224 3222 } else {
3225 3223 ASSERT(i > 0);
3226 3224 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3227 3225 if (pp->p_szc != curszc) {
3228 3226 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3229 3227 return (0);
3230 3228 }
3231 3229 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3232 3230 panic("segvn_full_szcpages: "
3233 3231 "large page not physically contiguous");
3234 3232 }
3235 3233 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3236 3234 root = 0;
3237 3235 }
3238 3236 }
3239 3237 }
3240 3238
3241 3239 for (i = 0; i < totnpgs; i++) {
3242 3240 ASSERT(ppa[i]->p_szc < szc);
3243 3241 if (!page_tryupgrade(ppa[i])) {
3244 3242 for (j = 0; j < i; j++) {
3245 3243 page_downgrade(ppa[j]);
3246 3244 }
3247 3245 *pszc = ppa[i]->p_szc;
3248 3246 *upgrdfail = 1;
3249 3247 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3250 3248 return (0);
3251 3249 }
3252 3250 }
3253 3251
3254 3252 /*
3255 3253 * When a page is put a free cachelist its szc is set to 0. if file
3256 3254 * system reclaimed pages from cachelist targ pages will be physically
3257 3255 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3258 3256 * pages without any relocations.
3259 3257 * To avoid any hat issues with previous small mappings
3260 3258 * hat_pageunload() the target pages first.
3261 3259 */
3262 3260 if (contig) {
3263 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3264 3262 for (i = 0; i < totnpgs; i++) {
3265 3263 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3266 3264 }
3267 3265 for (i = 0; i < totnpgs; i++) {
3268 3266 ppa[i]->p_szc = szc;
3269 3267 }
3270 3268 for (i = 0; i < totnpgs; i++) {
3271 3269 ASSERT(PAGE_EXCL(ppa[i]));
3272 3270 page_downgrade(ppa[i]);
3273 3271 }
3274 3272 if (pszc != NULL) {
3275 3273 *pszc = szc;
3276 3274 }
3277 3275 }
3278 3276 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3279 3277 return (1);
3280 3278 }
3281 3279
3282 3280 /*
3283 3281 * Create physically contiguous pages for [vp, off] - [vp, off +
3284 3282 * page_size(szc)) range and for private segment return them in ppa array.
3285 3283 * Pages are created either via IO or relocations.
3286 3284 *
3287 3285 * Return 1 on success and 0 on failure.
3288 3286 *
3289 3287 * If physically contiguous pages already exist for this range return 1 without
3290 3288 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3291 3289 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3292 3290 */
3293 3291
3294 3292 static int
3295 3293 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3296 3294 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3297 3295 int *downsize)
3298 3296
3299 3297 {
3300 3298 page_t *pplist = *ppplist;
3301 3299 size_t pgsz = page_get_pagesize(szc);
3302 3300 pgcnt_t pages = btop(pgsz);
3303 3301 ulong_t start_off = off;
3304 3302 u_offset_t eoff = off + pgsz;
3305 3303 spgcnt_t nreloc;
3306 3304 u_offset_t io_off = off;
3307 3305 size_t io_len;
3308 3306 page_t *io_pplist = NULL;
3309 3307 page_t *done_pplist = NULL;
3310 3308 pgcnt_t pgidx = 0;
3311 3309 page_t *pp;
3312 3310 page_t *newpp;
3313 3311 page_t *targpp;
3314 3312 int io_err = 0;
3315 3313 int i;
3316 3314 pfn_t pfn;
3317 3315 ulong_t ppages;
3318 3316 page_t *targ_pplist = NULL;
3319 3317 page_t *repl_pplist = NULL;
3320 3318 page_t *tmp_pplist;
3321 3319 int nios = 0;
3322 3320 uint_t pszc;
3323 3321 struct vattr va;
3324 3322
3325 3323 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3326 3324
3327 3325 ASSERT(szc != 0);
3328 3326 ASSERT(pplist->p_szc == szc);
3329 3327
3330 3328 /*
3331 3329 * downsize will be set to 1 only if we fail to lock pages. this will
3332 3330 * allow subsequent faults to try to relocate the page again. If we
3333 3331 * fail due to misalignment don't downsize and let the caller map the
3334 3332 * whole region with small mappings to avoid more faults into the area
3335 3333 * where we can't get large pages anyway.
3336 3334 */
3337 3335 *downsize = 0;
3338 3336
3339 3337 while (off < eoff) {
3340 3338 newpp = pplist;
3341 3339 ASSERT(newpp != NULL);
3342 3340 ASSERT(PAGE_EXCL(newpp));
3343 3341 ASSERT(!PP_ISFREE(newpp));
3344 3342 /*
3345 3343 * we pass NULL for nrelocp to page_lookup_create()
3346 3344 * so that it doesn't relocate. We relocate here
3347 3345 * later only after we make sure we can lock all
3348 3346 * pages in the range we handle and they are all
3349 3347 * aligned.
3350 3348 */
3351 3349 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3352 3350 ASSERT(pp != NULL);
3353 3351 ASSERT(!PP_ISFREE(pp));
3354 3352 ASSERT(pp->p_vnode == vp);
3355 3353 ASSERT(pp->p_offset == off);
3356 3354 if (pp == newpp) {
3357 3355 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3358 3356 page_sub(&pplist, pp);
3359 3357 ASSERT(PAGE_EXCL(pp));
3360 3358 ASSERT(page_iolock_assert(pp));
3361 3359 page_list_concat(&io_pplist, &pp);
3362 3360 off += PAGESIZE;
3363 3361 continue;
3364 3362 }
3365 3363 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3366 3364 pfn = page_pptonum(pp);
3367 3365 pszc = pp->p_szc;
3368 3366 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3369 3367 IS_P2ALIGNED(pfn, pages)) {
3370 3368 ASSERT(repl_pplist == NULL);
3371 3369 ASSERT(done_pplist == NULL);
3372 3370 ASSERT(pplist == *ppplist);
3373 3371 page_unlock(pp);
3374 3372 page_free_replacement_page(pplist);
3375 3373 page_create_putback(pages);
3376 3374 *ppplist = NULL;
3377 3375 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3378 3376 return (1);
3379 3377 }
3380 3378 if (pszc >= szc) {
3381 3379 page_unlock(pp);
3382 3380 segvn_faultvnmpss_align_err1++;
3383 3381 goto out;
3384 3382 }
3385 3383 ppages = page_get_pagecnt(pszc);
3386 3384 if (!IS_P2ALIGNED(pfn, ppages)) {
3387 3385 ASSERT(pszc > 0);
3388 3386 /*
3389 3387 * sizing down to pszc won't help.
3390 3388 */
3391 3389 page_unlock(pp);
3392 3390 segvn_faultvnmpss_align_err2++;
3393 3391 goto out;
3394 3392 }
3395 3393 pfn = page_pptonum(newpp);
3396 3394 if (!IS_P2ALIGNED(pfn, ppages)) {
3397 3395 ASSERT(pszc > 0);
3398 3396 /*
3399 3397 * sizing down to pszc won't help.
3400 3398 */
3401 3399 page_unlock(pp);
3402 3400 segvn_faultvnmpss_align_err3++;
3403 3401 goto out;
3404 3402 }
3405 3403 if (!PAGE_EXCL(pp)) {
3406 3404 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3407 3405 page_unlock(pp);
3408 3406 *downsize = 1;
3409 3407 *ret_pszc = pp->p_szc;
3410 3408 goto out;
3411 3409 }
3412 3410 targpp = pp;
3413 3411 if (io_pplist != NULL) {
3414 3412 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3415 3413 io_len = off - io_off;
3416 3414 /*
3417 3415 * Some file systems like NFS don't check EOF
3418 3416 * conditions in VOP_PAGEIO(). Check it here
3419 3417 * now that pages are locked SE_EXCL. Any file
3420 3418 * truncation will wait until the pages are
3421 3419 * unlocked so no need to worry that file will
3422 3420 * be truncated after we check its size here.
3423 3421 * XXX fix NFS to remove this check.
3424 3422 */
3425 3423 va.va_mask = AT_SIZE;
3426 3424 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3427 3425 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3428 3426 page_unlock(targpp);
3429 3427 goto out;
3430 3428 }
3431 3429 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3432 3430 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3433 3431 *downsize = 1;
3434 3432 *ret_pszc = 0;
3435 3433 page_unlock(targpp);
3436 3434 goto out;
3437 3435 }
3438 3436 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3439 3437 B_READ, svd->cred, NULL);
3440 3438 if (io_err) {
3441 3439 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3442 3440 page_unlock(targpp);
3443 3441 if (io_err == EDEADLK) {
3444 3442 segvn_vmpss_pageio_deadlk_err++;
3445 3443 }
3446 3444 goto out;
3447 3445 }
3448 3446 nios++;
3449 3447 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3450 3448 while (io_pplist != NULL) {
3451 3449 pp = io_pplist;
3452 3450 page_sub(&io_pplist, pp);
3453 3451 ASSERT(page_iolock_assert(pp));
3454 3452 page_io_unlock(pp);
3455 3453 pgidx = (pp->p_offset - start_off) >>
3456 3454 PAGESHIFT;
3457 3455 ASSERT(pgidx < pages);
3458 3456 ppa[pgidx] = pp;
3459 3457 page_list_concat(&done_pplist, &pp);
3460 3458 }
3461 3459 }
3462 3460 pp = targpp;
3463 3461 ASSERT(PAGE_EXCL(pp));
3464 3462 ASSERT(pp->p_szc <= pszc);
3465 3463 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3466 3464 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3467 3465 page_unlock(pp);
3468 3466 *downsize = 1;
3469 3467 *ret_pszc = pp->p_szc;
3470 3468 goto out;
3471 3469 }
3472 3470 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3473 3471 /*
3474 3472 * page szc chould have changed before the entire group was
3475 3473 * locked. reread page szc.
3476 3474 */
3477 3475 pszc = pp->p_szc;
3478 3476 ppages = page_get_pagecnt(pszc);
3479 3477
3480 3478 /* link just the roots */
3481 3479 page_list_concat(&targ_pplist, &pp);
3482 3480 page_sub(&pplist, newpp);
3483 3481 page_list_concat(&repl_pplist, &newpp);
3484 3482 off += PAGESIZE;
3485 3483 while (--ppages != 0) {
3486 3484 newpp = pplist;
3487 3485 page_sub(&pplist, newpp);
3488 3486 off += PAGESIZE;
3489 3487 }
3490 3488 io_off = off;
3491 3489 }
3492 3490 if (io_pplist != NULL) {
3493 3491 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3494 3492 io_len = eoff - io_off;
3495 3493 va.va_mask = AT_SIZE;
3496 3494 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3497 3495 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3498 3496 goto out;
3499 3497 }
3500 3498 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3501 3499 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3502 3500 *downsize = 1;
3503 3501 *ret_pszc = 0;
3504 3502 goto out;
3505 3503 }
3506 3504 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3507 3505 B_READ, svd->cred, NULL);
3508 3506 if (io_err) {
3509 3507 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3510 3508 if (io_err == EDEADLK) {
3511 3509 segvn_vmpss_pageio_deadlk_err++;
3512 3510 }
3513 3511 goto out;
3514 3512 }
3515 3513 nios++;
3516 3514 while (io_pplist != NULL) {
3517 3515 pp = io_pplist;
3518 3516 page_sub(&io_pplist, pp);
3519 3517 ASSERT(page_iolock_assert(pp));
3520 3518 page_io_unlock(pp);
3521 3519 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3522 3520 ASSERT(pgidx < pages);
3523 3521 ppa[pgidx] = pp;
3524 3522 }
3525 3523 }
3526 3524 /*
3527 3525 * we're now bound to succeed or panic.
3528 3526 * remove pages from done_pplist. it's not needed anymore.
3529 3527 */
3530 3528 while (done_pplist != NULL) {
3531 3529 pp = done_pplist;
3532 3530 page_sub(&done_pplist, pp);
3533 3531 }
3534 3532 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3535 3533 ASSERT(pplist == NULL);
3536 3534 *ppplist = NULL;
3537 3535 while (targ_pplist != NULL) {
3538 3536 int ret;
3539 3537 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3540 3538 ASSERT(repl_pplist);
3541 3539 pp = targ_pplist;
3542 3540 page_sub(&targ_pplist, pp);
3543 3541 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3544 3542 newpp = repl_pplist;
3545 3543 page_sub(&repl_pplist, newpp);
3546 3544 #ifdef DEBUG
3547 3545 pfn = page_pptonum(pp);
3548 3546 pszc = pp->p_szc;
3549 3547 ppages = page_get_pagecnt(pszc);
3550 3548 ASSERT(IS_P2ALIGNED(pfn, ppages));
3551 3549 pfn = page_pptonum(newpp);
3552 3550 ASSERT(IS_P2ALIGNED(pfn, ppages));
3553 3551 ASSERT(P2PHASE(pfn, pages) == pgidx);
3554 3552 #endif
3555 3553 nreloc = 0;
3556 3554 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3557 3555 if (ret != 0 || nreloc == 0) {
3558 3556 panic("segvn_fill_vp_pages: "
3559 3557 "page_relocate failed");
3560 3558 }
3561 3559 pp = newpp;
3562 3560 while (nreloc-- != 0) {
3563 3561 ASSERT(PAGE_EXCL(pp));
3564 3562 ASSERT(pp->p_vnode == vp);
3565 3563 ASSERT(pgidx ==
3566 3564 ((pp->p_offset - start_off) >> PAGESHIFT));
3567 3565 ppa[pgidx++] = pp;
3568 3566 pp++;
3569 3567 }
3570 3568 }
3571 3569
3572 3570 if (svd->type == MAP_PRIVATE) {
3573 3571 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3574 3572 for (i = 0; i < pages; i++) {
3575 3573 ASSERT(ppa[i] != NULL);
3576 3574 ASSERT(PAGE_EXCL(ppa[i]));
3577 3575 ASSERT(ppa[i]->p_vnode == vp);
3578 3576 ASSERT(ppa[i]->p_offset ==
3579 3577 start_off + (i << PAGESHIFT));
3580 3578 page_downgrade(ppa[i]);
3581 3579 }
3582 3580 ppa[pages] = NULL;
3583 3581 } else {
3584 3582 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3585 3583 /*
3586 3584 * the caller will still call VOP_GETPAGE() for shared segments
3587 3585 * to check FS write permissions. For private segments we map
3588 3586 * file read only anyway. so no VOP_GETPAGE is needed.
3589 3587 */
3590 3588 for (i = 0; i < pages; i++) {
3591 3589 ASSERT(ppa[i] != NULL);
3592 3590 ASSERT(PAGE_EXCL(ppa[i]));
3593 3591 ASSERT(ppa[i]->p_vnode == vp);
3594 3592 ASSERT(ppa[i]->p_offset ==
3595 3593 start_off + (i << PAGESHIFT));
3596 3594 page_unlock(ppa[i]);
3597 3595 }
3598 3596 ppa[0] = NULL;
3599 3597 }
3600 3598
3601 3599 return (1);
3602 3600 out:
3603 3601 /*
3604 3602 * Do the cleanup. Unlock target pages we didn't relocate. They are
3605 3603 * linked on targ_pplist by root pages. reassemble unused replacement
3606 3604 * and io pages back to pplist.
3607 3605 */
3608 3606 if (io_pplist != NULL) {
3609 3607 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3610 3608 pp = io_pplist;
3611 3609 do {
3612 3610 ASSERT(pp->p_vnode == vp);
3613 3611 ASSERT(pp->p_offset == io_off);
3614 3612 ASSERT(page_iolock_assert(pp));
3615 3613 page_io_unlock(pp);
3616 3614 page_hashout(pp, NULL);
3617 3615 io_off += PAGESIZE;
3618 3616 } while ((pp = pp->p_next) != io_pplist);
3619 3617 page_list_concat(&io_pplist, &pplist);
3620 3618 pplist = io_pplist;
3621 3619 }
3622 3620 tmp_pplist = NULL;
3623 3621 while (targ_pplist != NULL) {
3624 3622 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3625 3623 pp = targ_pplist;
3626 3624 ASSERT(PAGE_EXCL(pp));
3627 3625 page_sub(&targ_pplist, pp);
3628 3626
3629 3627 pszc = pp->p_szc;
3630 3628 ppages = page_get_pagecnt(pszc);
3631 3629 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3632 3630
3633 3631 if (pszc != 0) {
3634 3632 group_page_unlock(pp);
3635 3633 }
3636 3634 page_unlock(pp);
3637 3635
3638 3636 pp = repl_pplist;
3639 3637 ASSERT(pp != NULL);
3640 3638 ASSERT(PAGE_EXCL(pp));
3641 3639 ASSERT(pp->p_szc == szc);
3642 3640 page_sub(&repl_pplist, pp);
3643 3641
3644 3642 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3645 3643
3646 3644 /* relink replacement page */
3647 3645 page_list_concat(&tmp_pplist, &pp);
3648 3646 while (--ppages != 0) {
3649 3647 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3650 3648 pp++;
3651 3649 ASSERT(PAGE_EXCL(pp));
3652 3650 ASSERT(pp->p_szc == szc);
3653 3651 page_list_concat(&tmp_pplist, &pp);
3654 3652 }
3655 3653 }
3656 3654 if (tmp_pplist != NULL) {
3657 3655 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3658 3656 page_list_concat(&tmp_pplist, &pplist);
3659 3657 pplist = tmp_pplist;
3660 3658 }
3661 3659 /*
3662 3660 * at this point all pages are either on done_pplist or
3663 3661 * pplist. They can't be all on done_pplist otherwise
3664 3662 * we'd've been done.
3665 3663 */
3666 3664 ASSERT(pplist != NULL);
3667 3665 if (nios != 0) {
3668 3666 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3669 3667 pp = pplist;
3670 3668 do {
3671 3669 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3672 3670 ASSERT(pp->p_szc == szc);
3673 3671 ASSERT(PAGE_EXCL(pp));
3674 3672 ASSERT(pp->p_vnode != vp);
3675 3673 pp->p_szc = 0;
3676 3674 } while ((pp = pp->p_next) != pplist);
3677 3675
3678 3676 pp = done_pplist;
3679 3677 do {
3680 3678 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3681 3679 ASSERT(pp->p_szc == szc);
3682 3680 ASSERT(PAGE_EXCL(pp));
3683 3681 ASSERT(pp->p_vnode == vp);
3684 3682 pp->p_szc = 0;
3685 3683 } while ((pp = pp->p_next) != done_pplist);
3686 3684
3687 3685 while (pplist != NULL) {
3688 3686 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3689 3687 pp = pplist;
3690 3688 page_sub(&pplist, pp);
3691 3689 page_free(pp, 0);
3692 3690 }
3693 3691
3694 3692 while (done_pplist != NULL) {
3695 3693 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3696 3694 pp = done_pplist;
3697 3695 page_sub(&done_pplist, pp);
3698 3696 page_unlock(pp);
3699 3697 }
3700 3698 *ppplist = NULL;
3701 3699 return (0);
3702 3700 }
3703 3701 ASSERT(pplist == *ppplist);
3704 3702 if (io_err) {
3705 3703 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3706 3704 /*
3707 3705 * don't downsize on io error.
3708 3706 * see if vop_getpage succeeds.
3709 3707 * pplist may still be used in this case
3710 3708 * for relocations.
3711 3709 */
3712 3710 return (0);
3713 3711 }
3714 3712 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3715 3713 page_free_replacement_page(pplist);
3716 3714 page_create_putback(pages);
3717 3715 *ppplist = NULL;
3718 3716 return (0);
3719 3717 }
3720 3718
3721 3719 int segvn_anypgsz = 0;
3722 3720
3723 3721 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3724 3722 if ((type) == F_SOFTLOCK) { \
3725 3723 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3726 3724 -(pages)); \
3727 3725 }
3728 3726
3729 3727 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3730 3728 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3731 3729 if ((rw) == S_WRITE) { \
3732 3730 for (i = 0; i < (pages); i++) { \
3733 3731 ASSERT((ppa)[i]->p_vnode == \
3734 3732 (ppa)[0]->p_vnode); \
3735 3733 hat_setmod((ppa)[i]); \
3736 3734 } \
3737 3735 } else if ((rw) != S_OTHER && \
3738 3736 ((prot) & (vpprot) & PROT_WRITE)) { \
3739 3737 for (i = 0; i < (pages); i++) { \
3740 3738 ASSERT((ppa)[i]->p_vnode == \
3741 3739 (ppa)[0]->p_vnode); \
3742 3740 if (!hat_ismod((ppa)[i])) { \
3743 3741 prot &= ~PROT_WRITE; \
3744 3742 break; \
3745 3743 } \
3746 3744 } \
3747 3745 } \
3748 3746 }
3749 3747
3750 3748 #ifdef VM_STATS
3751 3749
3752 3750 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3753 3751 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3754 3752
3755 3753 #else /* VM_STATS */
3756 3754
3757 3755 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3758 3756
3759 3757 #endif
3760 3758
3761 3759 static faultcode_t
3762 3760 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3763 3761 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3764 3762 caddr_t eaddr, int brkcow)
3765 3763 {
3766 3764 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3767 3765 struct anon_map *amp = svd->amp;
3768 3766 uchar_t segtype = svd->type;
3769 3767 uint_t szc = seg->s_szc;
3770 3768 size_t pgsz = page_get_pagesize(szc);
3771 3769 size_t maxpgsz = pgsz;
3772 3770 pgcnt_t pages = btop(pgsz);
3773 3771 pgcnt_t maxpages = pages;
3774 3772 size_t ppasize = (pages + 1) * sizeof (page_t *);
3775 3773 caddr_t a = lpgaddr;
3776 3774 caddr_t maxlpgeaddr = lpgeaddr;
3777 3775 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3778 3776 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3779 3777 struct vpage *vpage = (svd->vpage != NULL) ?
3780 3778 &svd->vpage[seg_page(seg, a)] : NULL;
3781 3779 vnode_t *vp = svd->vp;
3782 3780 page_t **ppa;
3783 3781 uint_t pszc;
3784 3782 size_t ppgsz;
3785 3783 pgcnt_t ppages;
3786 3784 faultcode_t err = 0;
3787 3785 int ierr;
3788 3786 int vop_size_err = 0;
3789 3787 uint_t protchk, prot, vpprot;
3790 3788 ulong_t i;
3791 3789 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3792 3790 anon_sync_obj_t an_cookie;
3793 3791 enum seg_rw arw;
3794 3792 int alloc_failed = 0;
3795 3793 int adjszc_chk;
3796 3794 struct vattr va;
3797 3795 int xhat = 0;
3798 3796 page_t *pplist;
3799 3797 pfn_t pfn;
3800 3798 int physcontig;
3801 3799 int upgrdfail;
3802 3800 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3803 3801 int tron = (svd->tr_state == SEGVN_TR_ON);
3804 3802
3805 3803 ASSERT(szc != 0);
3806 3804 ASSERT(vp != NULL);
3807 3805 ASSERT(brkcow == 0 || amp != NULL);
3808 3806 ASSERT(tron == 0 || amp != NULL);
3809 3807 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3810 3808 ASSERT(!(svd->flags & MAP_NORESERVE));
3811 3809 ASSERT(type != F_SOFTUNLOCK);
3812 3810 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3813 3811 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3814 3812 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3815 3813 ASSERT(seg->s_szc < NBBY * sizeof (int));
3816 3814 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3817 3815 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3818 3816
3819 3817 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3820 3818 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3821 3819
3822 3820 if (svd->flags & MAP_TEXT) {
3823 3821 hat_flag |= HAT_LOAD_TEXT;
3824 3822 }
3825 3823
3826 3824 if (svd->pageprot) {
3827 3825 switch (rw) {
3828 3826 case S_READ:
3829 3827 protchk = PROT_READ;
3830 3828 break;
3831 3829 case S_WRITE:
3832 3830 protchk = PROT_WRITE;
3833 3831 break;
3834 3832 case S_EXEC:
3835 3833 protchk = PROT_EXEC;
3836 3834 break;
3837 3835 case S_OTHER:
3838 3836 default:
3839 3837 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3840 3838 break;
3841 3839 }
3842 3840 } else {
3843 3841 prot = svd->prot;
3844 3842 /* caller has already done segment level protection check. */
3845 3843 }
3846 3844
3847 3845 if (seg->s_as->a_hat != hat) {
3848 3846 xhat = 1;
3849 3847 }
3850 3848
3851 3849 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3852 3850 SEGVN_VMSTAT_FLTVNPAGES(2);
3853 3851 arw = S_READ;
3854 3852 } else {
3855 3853 arw = rw;
3856 3854 }
3857 3855
3858 3856 ppa = kmem_alloc(ppasize, KM_SLEEP);
3859 3857
3860 3858 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3861 3859
3862 3860 for (;;) {
3863 3861 adjszc_chk = 0;
3864 3862 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3865 3863 if (adjszc_chk) {
3866 3864 while (szc < seg->s_szc) {
3867 3865 uintptr_t e;
3868 3866 uint_t tszc;
3869 3867 tszc = segvn_anypgsz_vnode ? szc + 1 :
3870 3868 seg->s_szc;
3871 3869 ppgsz = page_get_pagesize(tszc);
3872 3870 if (!IS_P2ALIGNED(a, ppgsz) ||
3873 3871 ((alloc_failed >> tszc) & 0x1)) {
3874 3872 break;
3875 3873 }
3876 3874 SEGVN_VMSTAT_FLTVNPAGES(4);
3877 3875 szc = tszc;
3878 3876 pgsz = ppgsz;
3879 3877 pages = btop(pgsz);
3880 3878 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3881 3879 lpgeaddr = (caddr_t)e;
3882 3880 }
3883 3881 }
3884 3882
3885 3883 again:
3886 3884 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3887 3885 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3888 3886 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3889 3887 anon_array_enter(amp, aindx, &an_cookie);
3890 3888 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3891 3889 SEGVN_VMSTAT_FLTVNPAGES(5);
3892 3890 ASSERT(anon_pages(amp->ahp, aindx,
3893 3891 maxpages) == maxpages);
3894 3892 anon_array_exit(&an_cookie);
3895 3893 ANON_LOCK_EXIT(&->a_rwlock);
3896 3894 err = segvn_fault_anonpages(hat, seg,
3897 3895 a, a + maxpgsz, type, rw,
3898 3896 MAX(a, addr),
3899 3897 MIN(a + maxpgsz, eaddr), brkcow);
3900 3898 if (err != 0) {
3901 3899 SEGVN_VMSTAT_FLTVNPAGES(6);
3902 3900 goto out;
3903 3901 }
3904 3902 if (szc < seg->s_szc) {
3905 3903 szc = seg->s_szc;
3906 3904 pgsz = maxpgsz;
3907 3905 pages = maxpages;
3908 3906 lpgeaddr = maxlpgeaddr;
3909 3907 }
3910 3908 goto next;
3911 3909 } else {
3912 3910 ASSERT(anon_pages(amp->ahp, aindx,
3913 3911 maxpages) == 0);
3914 3912 SEGVN_VMSTAT_FLTVNPAGES(7);
3915 3913 anon_array_exit(&an_cookie);
3916 3914 ANON_LOCK_EXIT(&->a_rwlock);
3917 3915 }
3918 3916 }
3919 3917 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3920 3918 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3921 3919
3922 3920 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3923 3921 ASSERT(vpage != NULL);
3924 3922 prot = VPP_PROT(vpage);
3925 3923 ASSERT(sameprot(seg, a, maxpgsz));
3926 3924 if ((prot & protchk) == 0) {
3927 3925 SEGVN_VMSTAT_FLTVNPAGES(8);
3928 3926 err = FC_PROT;
3929 3927 goto out;
3930 3928 }
3931 3929 }
3932 3930 if (type == F_SOFTLOCK) {
3933 3931 atomic_add_long((ulong_t *)&svd->softlockcnt,
3934 3932 pages);
3935 3933 }
3936 3934
3937 3935 pplist = NULL;
3938 3936 physcontig = 0;
3939 3937 ppa[0] = NULL;
3940 3938 if (!brkcow && !tron && szc &&
3941 3939 !page_exists_physcontig(vp, off, szc,
3942 3940 segtype == MAP_PRIVATE ? ppa : NULL)) {
3943 3941 SEGVN_VMSTAT_FLTVNPAGES(9);
3944 3942 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
3945 3943 szc, 0, 0) && type != F_SOFTLOCK) {
3946 3944 SEGVN_VMSTAT_FLTVNPAGES(10);
3947 3945 pszc = 0;
3948 3946 ierr = -1;
3949 3947 alloc_failed |= (1 << szc);
3950 3948 break;
3951 3949 }
3952 3950 if (pplist != NULL &&
3953 3951 vp->v_mpssdata == SEGVN_PAGEIO) {
3954 3952 int downsize;
3955 3953 SEGVN_VMSTAT_FLTVNPAGES(11);
3956 3954 physcontig = segvn_fill_vp_pages(svd,
3957 3955 vp, off, szc, ppa, &pplist,
3958 3956 &pszc, &downsize);
3959 3957 ASSERT(!physcontig || pplist == NULL);
3960 3958 if (!physcontig && downsize &&
3961 3959 type != F_SOFTLOCK) {
3962 3960 ASSERT(pplist == NULL);
3963 3961 SEGVN_VMSTAT_FLTVNPAGES(12);
3964 3962 ierr = -1;
3965 3963 break;
3966 3964 }
3967 3965 ASSERT(!physcontig ||
3968 3966 segtype == MAP_PRIVATE ||
3969 3967 ppa[0] == NULL);
3970 3968 if (physcontig && ppa[0] == NULL) {
3971 3969 physcontig = 0;
3972 3970 }
3973 3971 }
3974 3972 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
3975 3973 SEGVN_VMSTAT_FLTVNPAGES(13);
3976 3974 ASSERT(segtype == MAP_PRIVATE);
3977 3975 physcontig = 1;
3978 3976 }
3979 3977
3980 3978 if (!physcontig) {
3981 3979 SEGVN_VMSTAT_FLTVNPAGES(14);
3982 3980 ppa[0] = NULL;
3983 3981 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
3984 3982 &vpprot, ppa, pgsz, seg, a, arw,
3985 3983 svd->cred, NULL);
3986 3984 #ifdef DEBUG
3987 3985 if (ierr == 0) {
3988 3986 for (i = 0; i < pages; i++) {
3989 3987 ASSERT(PAGE_LOCKED(ppa[i]));
3990 3988 ASSERT(!PP_ISFREE(ppa[i]));
3991 3989 ASSERT(ppa[i]->p_vnode == vp);
3992 3990 ASSERT(ppa[i]->p_offset ==
3993 3991 off + (i << PAGESHIFT));
3994 3992 }
3995 3993 }
3996 3994 #endif /* DEBUG */
3997 3995 if (segtype == MAP_PRIVATE) {
3998 3996 SEGVN_VMSTAT_FLTVNPAGES(15);
3999 3997 vpprot &= ~PROT_WRITE;
4000 3998 }
4001 3999 } else {
4002 4000 ASSERT(segtype == MAP_PRIVATE);
4003 4001 SEGVN_VMSTAT_FLTVNPAGES(16);
4004 4002 vpprot = PROT_ALL & ~PROT_WRITE;
4005 4003 ierr = 0;
4006 4004 }
4007 4005
4008 4006 if (ierr != 0) {
4009 4007 SEGVN_VMSTAT_FLTVNPAGES(17);
4010 4008 if (pplist != NULL) {
4011 4009 SEGVN_VMSTAT_FLTVNPAGES(18);
4012 4010 page_free_replacement_page(pplist);
4013 4011 page_create_putback(pages);
4014 4012 }
4015 4013 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4016 4014 if (a + pgsz <= eaddr) {
4017 4015 SEGVN_VMSTAT_FLTVNPAGES(19);
4018 4016 err = FC_MAKE_ERR(ierr);
4019 4017 goto out;
4020 4018 }
4021 4019 va.va_mask = AT_SIZE;
4022 4020 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4023 4021 SEGVN_VMSTAT_FLTVNPAGES(20);
4024 4022 err = FC_MAKE_ERR(EIO);
4025 4023 goto out;
4026 4024 }
4027 4025 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4028 4026 SEGVN_VMSTAT_FLTVNPAGES(21);
4029 4027 err = FC_MAKE_ERR(ierr);
4030 4028 goto out;
4031 4029 }
4032 4030 if (btopr(va.va_size) <
4033 4031 btopr(off + (eaddr - a))) {
4034 4032 SEGVN_VMSTAT_FLTVNPAGES(22);
4035 4033 err = FC_MAKE_ERR(ierr);
4036 4034 goto out;
4037 4035 }
4038 4036 if (brkcow || tron || type == F_SOFTLOCK) {
4039 4037 /* can't reduce map area */
4040 4038 SEGVN_VMSTAT_FLTVNPAGES(23);
4041 4039 vop_size_err = 1;
4042 4040 goto out;
4043 4041 }
4044 4042 SEGVN_VMSTAT_FLTVNPAGES(24);
4045 4043 ASSERT(szc != 0);
4046 4044 pszc = 0;
4047 4045 ierr = -1;
4048 4046 break;
4049 4047 }
4050 4048
4051 4049 if (amp != NULL) {
4052 4050 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4053 4051 anon_array_enter(amp, aindx, &an_cookie);
4054 4052 }
4055 4053 if (amp != NULL &&
4056 4054 anon_get_ptr(amp->ahp, aindx) != NULL) {
4057 4055 ulong_t taindx = P2ALIGN(aindx, maxpages);
4058 4056
4059 4057 SEGVN_VMSTAT_FLTVNPAGES(25);
4060 4058 ASSERT(anon_pages(amp->ahp, taindx,
4061 4059 maxpages) == maxpages);
4062 4060 for (i = 0; i < pages; i++) {
4063 4061 page_unlock(ppa[i]);
4064 4062 }
4065 4063 anon_array_exit(&an_cookie);
4066 4064 ANON_LOCK_EXIT(&->a_rwlock);
4067 4065 if (pplist != NULL) {
4068 4066 page_free_replacement_page(pplist);
4069 4067 page_create_putback(pages);
4070 4068 }
4071 4069 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4072 4070 if (szc < seg->s_szc) {
4073 4071 SEGVN_VMSTAT_FLTVNPAGES(26);
4074 4072 /*
4075 4073 * For private segments SOFTLOCK
4076 4074 * either always breaks cow (any rw
4077 4075 * type except S_READ_NOCOW) or
4078 4076 * address space is locked as writer
4079 4077 * (S_READ_NOCOW case) and anon slots
4080 4078 * can't show up on second check.
4081 4079 * Therefore if we are here for
4082 4080 * SOFTLOCK case it must be a cow
4083 4081 * break but cow break never reduces
4084 4082 * szc. text replication (tron) in
4085 4083 * this case works as cow break.
4086 4084 * Thus the assert below.
4087 4085 */
4088 4086 ASSERT(!brkcow && !tron &&
4089 4087 type != F_SOFTLOCK);
4090 4088 pszc = seg->s_szc;
4091 4089 ierr = -2;
4092 4090 break;
4093 4091 }
4094 4092 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4095 4093 goto again;
4096 4094 }
4097 4095 #ifdef DEBUG
4098 4096 if (amp != NULL) {
4099 4097 ulong_t taindx = P2ALIGN(aindx, maxpages);
4100 4098 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4101 4099 }
4102 4100 #endif /* DEBUG */
4103 4101
4104 4102 if (brkcow || tron) {
4105 4103 ASSERT(amp != NULL);
4106 4104 ASSERT(pplist == NULL);
4107 4105 ASSERT(szc == seg->s_szc);
4108 4106 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4109 4107 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4110 4108 SEGVN_VMSTAT_FLTVNPAGES(27);
4111 4109 ierr = anon_map_privatepages(amp, aindx, szc,
4112 4110 seg, a, prot, ppa, vpage, segvn_anypgsz,
4113 4111 tron ? PG_LOCAL : 0, svd->cred);
4114 4112 if (ierr != 0) {
4115 4113 SEGVN_VMSTAT_FLTVNPAGES(28);
4116 4114 anon_array_exit(&an_cookie);
4117 4115 ANON_LOCK_EXIT(&->a_rwlock);
4118 4116 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4119 4117 err = FC_MAKE_ERR(ierr);
4120 4118 goto out;
4121 4119 }
4122 4120
4123 4121 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4124 4122 /*
4125 4123 * p_szc can't be changed for locked
4126 4124 * swapfs pages.
4127 4125 */
4128 4126 ASSERT(svd->rcookie ==
4129 4127 HAT_INVALID_REGION_COOKIE);
4130 4128 hat_memload_array(hat, a, pgsz, ppa, prot,
4131 4129 hat_flag);
4132 4130
4133 4131 if (!(hat_flag & HAT_LOAD_LOCK)) {
4134 4132 SEGVN_VMSTAT_FLTVNPAGES(29);
4135 4133 for (i = 0; i < pages; i++) {
4136 4134 page_unlock(ppa[i]);
4137 4135 }
4138 4136 }
4139 4137 anon_array_exit(&an_cookie);
4140 4138 ANON_LOCK_EXIT(&->a_rwlock);
4141 4139 goto next;
4142 4140 }
4143 4141
4144 4142 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4145 4143 (!svd->pageprot && svd->prot == (prot & vpprot)));
4146 4144
4147 4145 pfn = page_pptonum(ppa[0]);
4148 4146 /*
4149 4147 * hat_page_demote() needs an SE_EXCL lock on one of
4150 4148 * constituent page_t's and it decreases root's p_szc
4151 4149 * last. This means if root's p_szc is equal szc and
4152 4150 * all its constituent pages are locked
4153 4151 * hat_page_demote() that could have changed p_szc to
4154 4152 * szc is already done and no new have page_demote()
4155 4153 * can start for this large page.
4156 4154 */
4157 4155
4158 4156 /*
4159 4157 * we need to make sure same mapping size is used for
4160 4158 * the same address range if there's a possibility the
4161 4159 * adddress is already mapped because hat layer panics
4162 4160 * when translation is loaded for the range already
4163 4161 * mapped with a different page size. We achieve it
4164 4162 * by always using largest page size possible subject
4165 4163 * to the constraints of page size, segment page size
4166 4164 * and page alignment. Since mappings are invalidated
4167 4165 * when those constraints change and make it
4168 4166 * impossible to use previously used mapping size no
4169 4167 * mapping size conflicts should happen.
4170 4168 */
4171 4169
4172 4170 chkszc:
4173 4171 if ((pszc = ppa[0]->p_szc) == szc &&
4174 4172 IS_P2ALIGNED(pfn, pages)) {
4175 4173
4176 4174 SEGVN_VMSTAT_FLTVNPAGES(30);
4177 4175 #ifdef DEBUG
4178 4176 for (i = 0; i < pages; i++) {
4179 4177 ASSERT(PAGE_LOCKED(ppa[i]));
4180 4178 ASSERT(!PP_ISFREE(ppa[i]));
4181 4179 ASSERT(page_pptonum(ppa[i]) ==
4182 4180 pfn + i);
4183 4181 ASSERT(ppa[i]->p_szc == szc);
4184 4182 ASSERT(ppa[i]->p_vnode == vp);
4185 4183 ASSERT(ppa[i]->p_offset ==
4186 4184 off + (i << PAGESHIFT));
4187 4185 }
4188 4186 #endif /* DEBUG */
4189 4187 /*
4190 4188 * All pages are of szc we need and they are
4191 4189 * all locked so they can't change szc. load
4192 4190 * translations.
4193 4191 *
4194 4192 * if page got promoted since last check
4195 4193 * we don't need pplist.
4196 4194 */
4197 4195 if (pplist != NULL) {
4198 4196 page_free_replacement_page(pplist);
4199 4197 page_create_putback(pages);
4200 4198 }
4201 4199 if (PP_ISMIGRATE(ppa[0])) {
4202 4200 page_migrate(seg, a, ppa, pages);
4203 4201 }
4204 4202 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4205 4203 prot, vpprot);
4206 4204 if (!xhat) {
4207 4205 hat_memload_array_region(hat, a, pgsz,
4208 4206 ppa, prot & vpprot, hat_flag,
4209 4207 svd->rcookie);
4210 4208 } else {
4211 4209 /*
4212 4210 * avoid large xhat mappings to FS
4213 4211 * pages so that hat_page_demote()
4214 4212 * doesn't need to check for xhat
4215 4213 * large mappings.
4216 4214 * Don't use regions with xhats.
4217 4215 */
4218 4216 for (i = 0; i < pages; i++) {
4219 4217 hat_memload(hat,
4220 4218 a + (i << PAGESHIFT),
4221 4219 ppa[i], prot & vpprot,
4222 4220 hat_flag);
4223 4221 }
4224 4222 }
4225 4223
4226 4224 if (!(hat_flag & HAT_LOAD_LOCK)) {
4227 4225 for (i = 0; i < pages; i++) {
4228 4226 page_unlock(ppa[i]);
4229 4227 }
4230 4228 }
4231 4229 if (amp != NULL) {
4232 4230 anon_array_exit(&an_cookie);
4233 4231 ANON_LOCK_EXIT(&->a_rwlock);
4234 4232 }
4235 4233 goto next;
4236 4234 }
4237 4235
4238 4236 /*
4239 4237 * See if upsize is possible.
4240 4238 */
4241 4239 if (pszc > szc && szc < seg->s_szc &&
4242 4240 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4243 4241 pgcnt_t aphase;
4244 4242 uint_t pszc1 = MIN(pszc, seg->s_szc);
4245 4243 ppgsz = page_get_pagesize(pszc1);
4246 4244 ppages = btop(ppgsz);
4247 4245 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4248 4246
4249 4247 ASSERT(type != F_SOFTLOCK);
4250 4248
4251 4249 SEGVN_VMSTAT_FLTVNPAGES(31);
4252 4250 if (aphase != P2PHASE(pfn, ppages)) {
4253 4251 segvn_faultvnmpss_align_err4++;
4254 4252 } else {
4255 4253 SEGVN_VMSTAT_FLTVNPAGES(32);
4256 4254 if (pplist != NULL) {
4257 4255 page_t *pl = pplist;
4258 4256 page_free_replacement_page(pl);
4259 4257 page_create_putback(pages);
4260 4258 }
4261 4259 for (i = 0; i < pages; i++) {
4262 4260 page_unlock(ppa[i]);
4263 4261 }
4264 4262 if (amp != NULL) {
4265 4263 anon_array_exit(&an_cookie);
4266 4264 ANON_LOCK_EXIT(&->a_rwlock);
4267 4265 }
4268 4266 pszc = pszc1;
4269 4267 ierr = -2;
4270 4268 break;
4271 4269 }
4272 4270 }
4273 4271
4274 4272 /*
4275 4273 * check if we should use smallest mapping size.
4276 4274 */
4277 4275 upgrdfail = 0;
4278 4276 if (szc == 0 || xhat ||
4279 4277 (pszc >= szc &&
4280 4278 !IS_P2ALIGNED(pfn, pages)) ||
4281 4279 (pszc < szc &&
4282 4280 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4283 4281 &pszc))) {
4284 4282
4285 4283 if (upgrdfail && type != F_SOFTLOCK) {
4286 4284 /*
4287 4285 * segvn_full_szcpages failed to lock
4288 4286 * all pages EXCL. Size down.
4289 4287 */
4290 4288 ASSERT(pszc < szc);
4291 4289
4292 4290 SEGVN_VMSTAT_FLTVNPAGES(33);
4293 4291
4294 4292 if (pplist != NULL) {
4295 4293 page_t *pl = pplist;
4296 4294 page_free_replacement_page(pl);
4297 4295 page_create_putback(pages);
4298 4296 }
4299 4297
4300 4298 for (i = 0; i < pages; i++) {
4301 4299 page_unlock(ppa[i]);
4302 4300 }
4303 4301 if (amp != NULL) {
4304 4302 anon_array_exit(&an_cookie);
4305 4303 ANON_LOCK_EXIT(&->a_rwlock);
4306 4304 }
4307 4305 ierr = -1;
4308 4306 break;
4309 4307 }
4310 4308 if (szc != 0 && !xhat && !upgrdfail) {
4311 4309 segvn_faultvnmpss_align_err5++;
4312 4310 }
4313 4311 SEGVN_VMSTAT_FLTVNPAGES(34);
4314 4312 if (pplist != NULL) {
4315 4313 page_free_replacement_page(pplist);
4316 4314 page_create_putback(pages);
4317 4315 }
4318 4316 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4319 4317 prot, vpprot);
4320 4318 if (upgrdfail && segvn_anypgsz_vnode) {
4321 4319 /* SOFTLOCK case */
4322 4320 hat_memload_array_region(hat, a, pgsz,
4323 4321 ppa, prot & vpprot, hat_flag,
4324 4322 svd->rcookie);
4325 4323 } else {
4326 4324 for (i = 0; i < pages; i++) {
4327 4325 hat_memload_region(hat,
4328 4326 a + (i << PAGESHIFT),
4329 4327 ppa[i], prot & vpprot,
4330 4328 hat_flag, svd->rcookie);
4331 4329 }
4332 4330 }
4333 4331 if (!(hat_flag & HAT_LOAD_LOCK)) {
4334 4332 for (i = 0; i < pages; i++) {
4335 4333 page_unlock(ppa[i]);
4336 4334 }
4337 4335 }
4338 4336 if (amp != NULL) {
4339 4337 anon_array_exit(&an_cookie);
4340 4338 ANON_LOCK_EXIT(&->a_rwlock);
4341 4339 }
4342 4340 goto next;
4343 4341 }
4344 4342
4345 4343 if (pszc == szc) {
4346 4344 /*
4347 4345 * segvn_full_szcpages() upgraded pages szc.
4348 4346 */
4349 4347 ASSERT(pszc == ppa[0]->p_szc);
4350 4348 ASSERT(IS_P2ALIGNED(pfn, pages));
4351 4349 goto chkszc;
4352 4350 }
4353 4351
4354 4352 if (pszc > szc) {
4355 4353 kmutex_t *szcmtx;
4356 4354 SEGVN_VMSTAT_FLTVNPAGES(35);
4357 4355 /*
4358 4356 * p_szc of ppa[0] can change since we haven't
4359 4357 * locked all constituent pages. Call
4360 4358 * page_lock_szc() to prevent szc changes.
4361 4359 * This should be a rare case that happens when
4362 4360 * multiple segments use a different page size
4363 4361 * to map the same file offsets.
4364 4362 */
4365 4363 szcmtx = page_szc_lock(ppa[0]);
4366 4364 pszc = ppa[0]->p_szc;
4367 4365 ASSERT(szcmtx != NULL || pszc == 0);
4368 4366 ASSERT(ppa[0]->p_szc <= pszc);
4369 4367 if (pszc <= szc) {
4370 4368 SEGVN_VMSTAT_FLTVNPAGES(36);
4371 4369 if (szcmtx != NULL) {
4372 4370 mutex_exit(szcmtx);
4373 4371 }
4374 4372 goto chkszc;
4375 4373 }
4376 4374 if (pplist != NULL) {
4377 4375 /*
4378 4376 * page got promoted since last check.
4379 4377 * we don't need preaalocated large
4380 4378 * page.
4381 4379 */
4382 4380 SEGVN_VMSTAT_FLTVNPAGES(37);
4383 4381 page_free_replacement_page(pplist);
4384 4382 page_create_putback(pages);
4385 4383 }
4386 4384 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4387 4385 prot, vpprot);
4388 4386 hat_memload_array_region(hat, a, pgsz, ppa,
4389 4387 prot & vpprot, hat_flag, svd->rcookie);
4390 4388 mutex_exit(szcmtx);
4391 4389 if (!(hat_flag & HAT_LOAD_LOCK)) {
4392 4390 for (i = 0; i < pages; i++) {
4393 4391 page_unlock(ppa[i]);
4394 4392 }
4395 4393 }
4396 4394 if (amp != NULL) {
4397 4395 anon_array_exit(&an_cookie);
4398 4396 ANON_LOCK_EXIT(&->a_rwlock);
4399 4397 }
4400 4398 goto next;
4401 4399 }
4402 4400
4403 4401 /*
4404 4402 * if page got demoted since last check
4405 4403 * we could have not allocated larger page.
4406 4404 * allocate now.
4407 4405 */
4408 4406 if (pplist == NULL &&
4409 4407 page_alloc_pages(vp, seg, a, &pplist, NULL,
4410 4408 szc, 0, 0) && type != F_SOFTLOCK) {
4411 4409 SEGVN_VMSTAT_FLTVNPAGES(38);
4412 4410 for (i = 0; i < pages; i++) {
4413 4411 page_unlock(ppa[i]);
4414 4412 }
4415 4413 if (amp != NULL) {
4416 4414 anon_array_exit(&an_cookie);
4417 4415 ANON_LOCK_EXIT(&->a_rwlock);
4418 4416 }
4419 4417 ierr = -1;
4420 4418 alloc_failed |= (1 << szc);
4421 4419 break;
4422 4420 }
4423 4421
4424 4422 SEGVN_VMSTAT_FLTVNPAGES(39);
4425 4423
4426 4424 if (pplist != NULL) {
4427 4425 segvn_relocate_pages(ppa, pplist);
4428 4426 #ifdef DEBUG
4429 4427 } else {
4430 4428 ASSERT(type == F_SOFTLOCK);
4431 4429 SEGVN_VMSTAT_FLTVNPAGES(40);
4432 4430 #endif /* DEBUG */
4433 4431 }
4434 4432
4435 4433 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4436 4434
4437 4435 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4438 4436 ASSERT(type == F_SOFTLOCK);
4439 4437 for (i = 0; i < pages; i++) {
4440 4438 ASSERT(ppa[i]->p_szc < szc);
4441 4439 hat_memload_region(hat,
4442 4440 a + (i << PAGESHIFT),
4443 4441 ppa[i], prot & vpprot, hat_flag,
4444 4442 svd->rcookie);
4445 4443 }
4446 4444 } else {
4447 4445 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4448 4446 hat_memload_array_region(hat, a, pgsz, ppa,
4449 4447 prot & vpprot, hat_flag, svd->rcookie);
4450 4448 }
4451 4449 if (!(hat_flag & HAT_LOAD_LOCK)) {
4452 4450 for (i = 0; i < pages; i++) {
4453 4451 ASSERT(PAGE_SHARED(ppa[i]));
4454 4452 page_unlock(ppa[i]);
4455 4453 }
4456 4454 }
4457 4455 if (amp != NULL) {
4458 4456 anon_array_exit(&an_cookie);
4459 4457 ANON_LOCK_EXIT(&->a_rwlock);
4460 4458 }
4461 4459
4462 4460 next:
4463 4461 if (vpage != NULL) {
4464 4462 vpage += pages;
4465 4463 }
4466 4464 adjszc_chk = 1;
4467 4465 }
4468 4466 if (a == lpgeaddr)
4469 4467 break;
4470 4468 ASSERT(a < lpgeaddr);
4471 4469
4472 4470 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4473 4471
4474 4472 /*
4475 4473 * ierr == -1 means we failed to map with a large page.
4476 4474 * (either due to allocation/relocation failures or
4477 4475 * misalignment with other mappings to this file.
4478 4476 *
4479 4477 * ierr == -2 means some other thread allocated a large page
4480 4478 * after we gave up tp map with a large page. retry with
4481 4479 * larger mapping.
4482 4480 */
4483 4481 ASSERT(ierr == -1 || ierr == -2);
4484 4482 ASSERT(ierr == -2 || szc != 0);
4485 4483 ASSERT(ierr == -1 || szc < seg->s_szc);
4486 4484 if (ierr == -2) {
4487 4485 SEGVN_VMSTAT_FLTVNPAGES(41);
4488 4486 ASSERT(pszc > szc && pszc <= seg->s_szc);
4489 4487 szc = pszc;
4490 4488 } else if (segvn_anypgsz_vnode) {
4491 4489 SEGVN_VMSTAT_FLTVNPAGES(42);
4492 4490 szc--;
4493 4491 } else {
4494 4492 SEGVN_VMSTAT_FLTVNPAGES(43);
4495 4493 ASSERT(pszc < szc);
4496 4494 /*
4497 4495 * other process created pszc large page.
4498 4496 * but we still have to drop to 0 szc.
4499 4497 */
4500 4498 szc = 0;
4501 4499 }
4502 4500
4503 4501 pgsz = page_get_pagesize(szc);
4504 4502 pages = btop(pgsz);
4505 4503 if (ierr == -2) {
4506 4504 /*
4507 4505 * Size up case. Note lpgaddr may only be needed for
4508 4506 * softlock case so we don't adjust it here.
4509 4507 */
4510 4508 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4511 4509 ASSERT(a >= lpgaddr);
4512 4510 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4513 4511 off = svd->offset + (uintptr_t)(a - seg->s_base);
4514 4512 aindx = svd->anon_index + seg_page(seg, a);
4515 4513 vpage = (svd->vpage != NULL) ?
4516 4514 &svd->vpage[seg_page(seg, a)] : NULL;
4517 4515 } else {
4518 4516 /*
4519 4517 * Size down case. Note lpgaddr may only be needed for
4520 4518 * softlock case so we don't adjust it here.
4521 4519 */
4522 4520 ASSERT(IS_P2ALIGNED(a, pgsz));
4523 4521 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4524 4522 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4525 4523 ASSERT(a < lpgeaddr);
4526 4524 if (a < addr) {
4527 4525 SEGVN_VMSTAT_FLTVNPAGES(44);
4528 4526 /*
4529 4527 * The beginning of the large page region can
4530 4528 * be pulled to the right to make a smaller
4531 4529 * region. We haven't yet faulted a single
4532 4530 * page.
4533 4531 */
4534 4532 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4535 4533 ASSERT(a >= lpgaddr);
4536 4534 off = svd->offset +
4537 4535 (uintptr_t)(a - seg->s_base);
4538 4536 aindx = svd->anon_index + seg_page(seg, a);
4539 4537 vpage = (svd->vpage != NULL) ?
4540 4538 &svd->vpage[seg_page(seg, a)] : NULL;
4541 4539 }
4542 4540 }
4543 4541 }
4544 4542 out:
4545 4543 kmem_free(ppa, ppasize);
4546 4544 if (!err && !vop_size_err) {
4547 4545 SEGVN_VMSTAT_FLTVNPAGES(45);
4548 4546 return (0);
4549 4547 }
4550 4548 if (type == F_SOFTLOCK && a > lpgaddr) {
4551 4549 SEGVN_VMSTAT_FLTVNPAGES(46);
4552 4550 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4553 4551 }
4554 4552 if (!vop_size_err) {
4555 4553 SEGVN_VMSTAT_FLTVNPAGES(47);
4556 4554 return (err);
4557 4555 }
4558 4556 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4559 4557 /*
4560 4558 * Large page end is mapped beyond the end of file and it's a cow
4561 4559 * fault (can be a text replication induced cow) or softlock so we can't
4562 4560 * reduce the map area. For now just demote the segment. This should
4563 4561 * really only happen if the end of the file changed after the mapping
4564 4562 * was established since when large page segments are created we make
4565 4563 * sure they don't extend beyond the end of the file.
4566 4564 */
4567 4565 SEGVN_VMSTAT_FLTVNPAGES(48);
4568 4566
4569 4567 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4570 4568 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4571 4569 err = 0;
4572 4570 if (seg->s_szc != 0) {
4573 4571 segvn_fltvnpages_clrszc_cnt++;
4574 4572 ASSERT(svd->softlockcnt == 0);
4575 4573 err = segvn_clrszc(seg);
4576 4574 if (err != 0) {
4577 4575 segvn_fltvnpages_clrszc_err++;
4578 4576 }
4579 4577 }
4580 4578 ASSERT(err || seg->s_szc == 0);
4581 4579 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4582 4580 /* segvn_fault will do its job as if szc had been zero to begin with */
4583 4581 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4584 4582 }
4585 4583
4586 4584 /*
4587 4585 * This routine will attempt to fault in one large page.
4588 4586 * it will use smaller pages if that fails.
4589 4587 * It should only be called for pure anonymous segments.
4590 4588 */
4591 4589 static faultcode_t
4592 4590 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4593 4591 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4594 4592 caddr_t eaddr, int brkcow)
4595 4593 {
4596 4594 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4597 4595 struct anon_map *amp = svd->amp;
4598 4596 uchar_t segtype = svd->type;
4599 4597 uint_t szc = seg->s_szc;
4600 4598 size_t pgsz = page_get_pagesize(szc);
4601 4599 size_t maxpgsz = pgsz;
4602 4600 pgcnt_t pages = btop(pgsz);
4603 4601 uint_t ppaszc = szc;
4604 4602 caddr_t a = lpgaddr;
4605 4603 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4606 4604 struct vpage *vpage = (svd->vpage != NULL) ?
4607 4605 &svd->vpage[seg_page(seg, a)] : NULL;
4608 4606 page_t **ppa;
4609 4607 uint_t ppa_szc;
4610 4608 faultcode_t err;
4611 4609 int ierr;
4612 4610 uint_t protchk, prot, vpprot;
4613 4611 ulong_t i;
4614 4612 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4615 4613 anon_sync_obj_t cookie;
4616 4614 int adjszc_chk;
4617 4615 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4618 4616
4619 4617 ASSERT(szc != 0);
4620 4618 ASSERT(amp != NULL);
4621 4619 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4622 4620 ASSERT(!(svd->flags & MAP_NORESERVE));
4623 4621 ASSERT(type != F_SOFTUNLOCK);
4624 4622 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4625 4623 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4626 4624 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4627 4625
4628 4626 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4629 4627
4630 4628 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4631 4629 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4632 4630
4633 4631 if (svd->flags & MAP_TEXT) {
4634 4632 hat_flag |= HAT_LOAD_TEXT;
4635 4633 }
4636 4634
4637 4635 if (svd->pageprot) {
4638 4636 switch (rw) {
4639 4637 case S_READ:
4640 4638 protchk = PROT_READ;
4641 4639 break;
4642 4640 case S_WRITE:
4643 4641 protchk = PROT_WRITE;
4644 4642 break;
4645 4643 case S_EXEC:
4646 4644 protchk = PROT_EXEC;
4647 4645 break;
4648 4646 case S_OTHER:
4649 4647 default:
4650 4648 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4651 4649 break;
4652 4650 }
4653 4651 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4654 4652 } else {
4655 4653 prot = svd->prot;
4656 4654 /* caller has already done segment level protection check. */
4657 4655 }
4658 4656
4659 4657 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4660 4658 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4661 4659 for (;;) {
4662 4660 adjszc_chk = 0;
4663 4661 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4664 4662 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4665 4663 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4666 4664 ASSERT(vpage != NULL);
4667 4665 prot = VPP_PROT(vpage);
4668 4666 ASSERT(sameprot(seg, a, maxpgsz));
4669 4667 if ((prot & protchk) == 0) {
4670 4668 err = FC_PROT;
4671 4669 goto error;
4672 4670 }
4673 4671 }
4674 4672 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4675 4673 pgsz < maxpgsz) {
4676 4674 ASSERT(a > lpgaddr);
4677 4675 szc = seg->s_szc;
4678 4676 pgsz = maxpgsz;
4679 4677 pages = btop(pgsz);
4680 4678 ASSERT(IS_P2ALIGNED(aindx, pages));
4681 4679 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4682 4680 pgsz);
4683 4681 }
4684 4682 if (type == F_SOFTLOCK) {
4685 4683 atomic_add_long((ulong_t *)&svd->softlockcnt,
4686 4684 pages);
4687 4685 }
4688 4686 anon_array_enter(amp, aindx, &cookie);
4689 4687 ppa_szc = (uint_t)-1;
4690 4688 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4691 4689 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4692 4690 segvn_anypgsz, pgflags, svd->cred);
4693 4691 if (ierr != 0) {
4694 4692 anon_array_exit(&cookie);
4695 4693 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4696 4694 if (type == F_SOFTLOCK) {
4697 4695 atomic_add_long(
4698 4696 (ulong_t *)&svd->softlockcnt,
4699 4697 -pages);
4700 4698 }
4701 4699 if (ierr > 0) {
4702 4700 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4703 4701 err = FC_MAKE_ERR(ierr);
4704 4702 goto error;
4705 4703 }
4706 4704 break;
4707 4705 }
4708 4706
4709 4707 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4710 4708
4711 4709 ASSERT(segtype == MAP_SHARED ||
4712 4710 ppa[0]->p_szc <= szc);
4713 4711 ASSERT(segtype == MAP_PRIVATE ||
4714 4712 ppa[0]->p_szc >= szc);
4715 4713
4716 4714 /*
4717 4715 * Handle pages that have been marked for migration
4718 4716 */
4719 4717 if (lgrp_optimizations())
4720 4718 page_migrate(seg, a, ppa, pages);
4721 4719
4722 4720 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4723 4721
4724 4722 if (segtype == MAP_SHARED) {
4725 4723 vpprot |= PROT_WRITE;
4726 4724 }
4727 4725
4728 4726 hat_memload_array(hat, a, pgsz, ppa,
4729 4727 prot & vpprot, hat_flag);
4730 4728
4731 4729 if (hat_flag & HAT_LOAD_LOCK) {
4732 4730 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4733 4731 } else {
4734 4732 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4735 4733 for (i = 0; i < pages; i++)
4736 4734 page_unlock(ppa[i]);
4737 4735 }
4738 4736 if (vpage != NULL)
4739 4737 vpage += pages;
4740 4738
4741 4739 anon_array_exit(&cookie);
4742 4740 adjszc_chk = 1;
4743 4741 }
4744 4742 if (a == lpgeaddr)
4745 4743 break;
4746 4744 ASSERT(a < lpgeaddr);
4747 4745 /*
4748 4746 * ierr == -1 means we failed to allocate a large page.
4749 4747 * so do a size down operation.
4750 4748 *
4751 4749 * ierr == -2 means some other process that privately shares
4752 4750 * pages with this process has allocated a larger page and we
4753 4751 * need to retry with larger pages. So do a size up
4754 4752 * operation. This relies on the fact that large pages are
4755 4753 * never partially shared i.e. if we share any constituent
4756 4754 * page of a large page with another process we must share the
4757 4755 * entire large page. Note this cannot happen for SOFTLOCK
4758 4756 * case, unless current address (a) is at the beginning of the
4759 4757 * next page size boundary because the other process couldn't
4760 4758 * have relocated locked pages.
4761 4759 */
4762 4760 ASSERT(ierr == -1 || ierr == -2);
4763 4761
4764 4762 if (segvn_anypgsz) {
4765 4763 ASSERT(ierr == -2 || szc != 0);
4766 4764 ASSERT(ierr == -1 || szc < seg->s_szc);
4767 4765 szc = (ierr == -1) ? szc - 1 : szc + 1;
4768 4766 } else {
4769 4767 /*
4770 4768 * For non COW faults and segvn_anypgsz == 0
4771 4769 * we need to be careful not to loop forever
4772 4770 * if existing page is found with szc other
4773 4771 * than 0 or seg->s_szc. This could be due
4774 4772 * to page relocations on behalf of DR or
4775 4773 * more likely large page creation. For this
4776 4774 * case simply re-size to existing page's szc
4777 4775 * if returned by anon_map_getpages().
4778 4776 */
4779 4777 if (ppa_szc == (uint_t)-1) {
4780 4778 szc = (ierr == -1) ? 0 : seg->s_szc;
4781 4779 } else {
4782 4780 ASSERT(ppa_szc <= seg->s_szc);
4783 4781 ASSERT(ierr == -2 || ppa_szc < szc);
4784 4782 ASSERT(ierr == -1 || ppa_szc > szc);
4785 4783 szc = ppa_szc;
4786 4784 }
4787 4785 }
4788 4786
4789 4787 pgsz = page_get_pagesize(szc);
4790 4788 pages = btop(pgsz);
4791 4789 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4792 4790 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4793 4791 if (type == F_SOFTLOCK) {
4794 4792 /*
4795 4793 * For softlocks we cannot reduce the fault area
4796 4794 * (calculated based on the largest page size for this
4797 4795 * segment) for size down and a is already next
4798 4796 * page size aligned as assertted above for size
4799 4797 * ups. Therefore just continue in case of softlock.
4800 4798 */
4801 4799 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4802 4800 continue; /* keep lint happy */
4803 4801 } else if (ierr == -2) {
4804 4802
4805 4803 /*
4806 4804 * Size up case. Note lpgaddr may only be needed for
4807 4805 * softlock case so we don't adjust it here.
4808 4806 */
4809 4807 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4810 4808 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4811 4809 ASSERT(a >= lpgaddr);
4812 4810 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4813 4811 aindx = svd->anon_index + seg_page(seg, a);
4814 4812 vpage = (svd->vpage != NULL) ?
4815 4813 &svd->vpage[seg_page(seg, a)] : NULL;
4816 4814 } else {
4817 4815 /*
4818 4816 * Size down case. Note lpgaddr may only be needed for
4819 4817 * softlock case so we don't adjust it here.
4820 4818 */
4821 4819 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4822 4820 ASSERT(IS_P2ALIGNED(a, pgsz));
4823 4821 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4824 4822 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4825 4823 ASSERT(a < lpgeaddr);
4826 4824 if (a < addr) {
4827 4825 /*
4828 4826 * The beginning of the large page region can
4829 4827 * be pulled to the right to make a smaller
4830 4828 * region. We haven't yet faulted a single
4831 4829 * page.
4832 4830 */
4833 4831 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4834 4832 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4835 4833 ASSERT(a >= lpgaddr);
4836 4834 aindx = svd->anon_index + seg_page(seg, a);
4837 4835 vpage = (svd->vpage != NULL) ?
4838 4836 &svd->vpage[seg_page(seg, a)] : NULL;
4839 4837 }
4840 4838 }
4841 4839 }
4842 4840 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4843 4841 ANON_LOCK_EXIT(&->a_rwlock);
4844 4842 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4845 4843 return (0);
4846 4844 error:
4847 4845 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4848 4846 ANON_LOCK_EXIT(&->a_rwlock);
4849 4847 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4850 4848 if (type == F_SOFTLOCK && a > lpgaddr) {
4851 4849 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4852 4850 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4853 4851 }
4854 4852 return (err);
4855 4853 }
4856 4854
4857 4855 int fltadvice = 1; /* set to free behind pages for sequential access */
4858 4856
4859 4857 /*
4860 4858 * This routine is called via a machine specific fault handling routine.
4861 4859 * It is also called by software routines wishing to lock or unlock
4862 4860 * a range of addresses.
4863 4861 *
4864 4862 * Here is the basic algorithm:
4865 4863 * If unlocking
4866 4864 * Call segvn_softunlock
4867 4865 * Return
4868 4866 * endif
4869 4867 * Checking and set up work
4870 4868 * If we will need some non-anonymous pages
4871 4869 * Call VOP_GETPAGE over the range of non-anonymous pages
4872 4870 * endif
4873 4871 * Loop over all addresses requested
4874 4872 * Call segvn_faultpage passing in page list
4875 4873 * to load up translations and handle anonymous pages
4876 4874 * endloop
4877 4875 * Load up translation to any additional pages in page list not
4878 4876 * already handled that fit into this segment
4879 4877 */
4880 4878 static faultcode_t
4881 4879 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4882 4880 enum fault_type type, enum seg_rw rw)
4883 4881 {
4884 4882 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4885 4883 page_t **plp, **ppp, *pp;
4886 4884 u_offset_t off;
4887 4885 caddr_t a;
4888 4886 struct vpage *vpage;
4889 4887 uint_t vpprot, prot;
4890 4888 int err;
4891 4889 page_t *pl[PVN_GETPAGE_NUM + 1];
4892 4890 size_t plsz, pl_alloc_sz;
4893 4891 size_t page;
4894 4892 ulong_t anon_index;
4895 4893 struct anon_map *amp;
4896 4894 int dogetpage = 0;
4897 4895 caddr_t lpgaddr, lpgeaddr;
4898 4896 size_t pgsz;
4899 4897 anon_sync_obj_t cookie;
4900 4898 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4901 4899
4902 4900 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4903 4901 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4904 4902
4905 4903 /*
4906 4904 * First handle the easy stuff
4907 4905 */
4908 4906 if (type == F_SOFTUNLOCK) {
4909 4907 if (rw == S_READ_NOCOW) {
4910 4908 rw = S_READ;
4911 4909 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4912 4910 }
4913 4911 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4914 4912 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4915 4913 page_get_pagesize(seg->s_szc);
4916 4914 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4917 4915 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4918 4916 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4919 4917 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4920 4918 return (0);
4921 4919 }
4922 4920
4923 4921 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4924 4922 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4925 4923 if (brkcow == 0) {
4926 4924 if (svd->tr_state == SEGVN_TR_INIT) {
4927 4925 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4928 4926 if (svd->tr_state == SEGVN_TR_INIT) {
4929 4927 ASSERT(svd->vp != NULL && svd->amp == NULL);
4930 4928 ASSERT(svd->flags & MAP_TEXT);
4931 4929 ASSERT(svd->type == MAP_PRIVATE);
4932 4930 segvn_textrepl(seg);
4933 4931 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4934 4932 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4935 4933 svd->amp != NULL);
4936 4934 }
4937 4935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4938 4936 }
4939 4937 } else if (svd->tr_state != SEGVN_TR_OFF) {
4940 4938 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4941 4939
4942 4940 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4943 4941 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4944 4942 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4945 4943 return (FC_PROT);
4946 4944 }
4947 4945
4948 4946 if (svd->tr_state == SEGVN_TR_ON) {
4949 4947 ASSERT(svd->vp != NULL && svd->amp != NULL);
4950 4948 segvn_textunrepl(seg, 0);
4951 4949 ASSERT(svd->amp == NULL &&
4952 4950 svd->tr_state == SEGVN_TR_OFF);
4953 4951 } else if (svd->tr_state != SEGVN_TR_OFF) {
4954 4952 svd->tr_state = SEGVN_TR_OFF;
4955 4953 }
4956 4954 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4957 4955 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4958 4956 }
4959 4957
4960 4958 top:
4961 4959 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4962 4960
4963 4961 /*
4964 4962 * If we have the same protections for the entire segment,
4965 4963 * insure that the access being attempted is legitimate.
4966 4964 */
4967 4965
4968 4966 if (svd->pageprot == 0) {
4969 4967 uint_t protchk;
4970 4968
4971 4969 switch (rw) {
4972 4970 case S_READ:
4973 4971 case S_READ_NOCOW:
4974 4972 protchk = PROT_READ;
4975 4973 break;
4976 4974 case S_WRITE:
4977 4975 protchk = PROT_WRITE;
4978 4976 break;
4979 4977 case S_EXEC:
4980 4978 protchk = PROT_EXEC;
4981 4979 break;
4982 4980 case S_OTHER:
4983 4981 default:
4984 4982 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4985 4983 break;
4986 4984 }
4987 4985
4988 4986 if ((svd->prot & protchk) == 0) {
4989 4987 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4990 4988 return (FC_PROT); /* illegal access type */
4991 4989 }
4992 4990 }
4993 4991
4994 4992 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
4995 4993 /* this must be SOFTLOCK S_READ fault */
4996 4994 ASSERT(svd->amp == NULL);
4997 4995 ASSERT(svd->tr_state == SEGVN_TR_OFF);
4998 4996 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4999 4997 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5000 4998 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5001 4999 /*
5002 5000 * this must be the first ever non S_READ_NOCOW
5003 5001 * softlock for this segment.
5004 5002 */
5005 5003 ASSERT(svd->softlockcnt == 0);
5006 5004 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5007 5005 HAT_REGION_TEXT);
5008 5006 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5009 5007 }
5010 5008 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5011 5009 goto top;
5012 5010 }
5013 5011
5014 5012 /*
5015 5013 * We can't allow the long term use of softlocks for vmpss segments,
5016 5014 * because in some file truncation cases we should be able to demote
5017 5015 * the segment, which requires that there are no softlocks. The
5018 5016 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5019 5017 * segment is S_READ_NOCOW, where the caller holds the address space
5020 5018 * locked as writer and calls softunlock before dropping the as lock.
5021 5019 * S_READ_NOCOW is used by /proc to read memory from another user.
5022 5020 *
5023 5021 * Another deadlock between SOFTLOCK and file truncation can happen
5024 5022 * because segvn_fault_vnodepages() calls the FS one pagesize at
5025 5023 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5026 5024 * can cause a deadlock because the first set of page_t's remain
5027 5025 * locked SE_SHARED. To avoid this, we demote segments on a first
5028 5026 * SOFTLOCK if they have a length greater than the segment's
5029 5027 * page size.
5030 5028 *
5031 5029 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5032 5030 * the access type is S_READ_NOCOW and the fault length is less than
5033 5031 * or equal to the segment's page size. While this is quite restrictive,
5034 5032 * it should be the most common case of SOFTLOCK against a vmpss
5035 5033 * segment.
5036 5034 *
5037 5035 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5038 5036 * caller makes sure no COW will be caused by another thread for a
5039 5037 * softlocked page.
5040 5038 */
5041 5039 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5042 5040 int demote = 0;
5043 5041
5044 5042 if (rw != S_READ_NOCOW) {
5045 5043 demote = 1;
5046 5044 }
5047 5045 if (!demote && len > PAGESIZE) {
5048 5046 pgsz = page_get_pagesize(seg->s_szc);
5049 5047 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5050 5048 lpgeaddr);
5051 5049 if (lpgeaddr - lpgaddr > pgsz) {
5052 5050 demote = 1;
5053 5051 }
5054 5052 }
5055 5053
5056 5054 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5057 5055
5058 5056 if (demote) {
5059 5057 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5060 5058 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5061 5059 if (seg->s_szc != 0) {
5062 5060 segvn_vmpss_clrszc_cnt++;
5063 5061 ASSERT(svd->softlockcnt == 0);
5064 5062 err = segvn_clrszc(seg);
5065 5063 if (err) {
5066 5064 segvn_vmpss_clrszc_err++;
5067 5065 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5068 5066 return (FC_MAKE_ERR(err));
5069 5067 }
5070 5068 }
5071 5069 ASSERT(seg->s_szc == 0);
5072 5070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5073 5071 goto top;
5074 5072 }
5075 5073 }
5076 5074
5077 5075 /*
5078 5076 * Check to see if we need to allocate an anon_map structure.
5079 5077 */
5080 5078 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5081 5079 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5082 5080 /*
5083 5081 * Drop the "read" lock on the segment and acquire
5084 5082 * the "write" version since we have to allocate the
5085 5083 * anon_map.
5086 5084 */
5087 5085 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5088 5086 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5089 5087
5090 5088 if (svd->amp == NULL) {
5091 5089 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5092 5090 svd->amp->a_szc = seg->s_szc;
5093 5091 }
5094 5092 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5095 5093
5096 5094 /*
5097 5095 * Start all over again since segment protections
5098 5096 * may have changed after we dropped the "read" lock.
5099 5097 */
5100 5098 goto top;
5101 5099 }
5102 5100
5103 5101 /*
5104 5102 * S_READ_NOCOW vs S_READ distinction was
5105 5103 * only needed for the code above. After
5106 5104 * that we treat it as S_READ.
5107 5105 */
5108 5106 if (rw == S_READ_NOCOW) {
5109 5107 ASSERT(type == F_SOFTLOCK);
5110 5108 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5111 5109 rw = S_READ;
5112 5110 }
5113 5111
5114 5112 amp = svd->amp;
5115 5113
5116 5114 /*
5117 5115 * MADV_SEQUENTIAL work is ignored for large page segments.
5118 5116 */
5119 5117 if (seg->s_szc != 0) {
5120 5118 pgsz = page_get_pagesize(seg->s_szc);
5121 5119 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5122 5120 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5123 5121 if (svd->vp == NULL) {
5124 5122 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5125 5123 lpgeaddr, type, rw, addr, addr + len, brkcow);
5126 5124 } else {
5127 5125 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5128 5126 lpgeaddr, type, rw, addr, addr + len, brkcow);
5129 5127 if (err == IE_RETRY) {
5130 5128 ASSERT(seg->s_szc == 0);
5131 5129 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5132 5130 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5133 5131 goto top;
5134 5132 }
5135 5133 }
5136 5134 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5137 5135 return (err);
5138 5136 }
5139 5137
5140 5138 page = seg_page(seg, addr);
5141 5139 if (amp != NULL) {
5142 5140 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5143 5141 anon_index = svd->anon_index + page;
5144 5142
5145 5143 if (type == F_PROT && rw == S_READ &&
5146 5144 svd->tr_state == SEGVN_TR_OFF &&
5147 5145 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5148 5146 size_t index = anon_index;
5149 5147 struct anon *ap;
5150 5148
5151 5149 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5152 5150 /*
5153 5151 * The fast path could apply to S_WRITE also, except
5154 5152 * that the protection fault could be caused by lazy
5155 5153 * tlb flush when ro->rw. In this case, the pte is
5156 5154 * RW already. But RO in the other cpu's tlb causes
5157 5155 * the fault. Since hat_chgprot won't do anything if
5158 5156 * pte doesn't change, we may end up faulting
5159 5157 * indefinitely until the RO tlb entry gets replaced.
5160 5158 */
5161 5159 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5162 5160 anon_array_enter(amp, index, &cookie);
5163 5161 ap = anon_get_ptr(amp->ahp, index);
5164 5162 anon_array_exit(&cookie);
5165 5163 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5166 5164 ANON_LOCK_EXIT(&->a_rwlock);
5167 5165 goto slow;
5168 5166 }
5169 5167 }
5170 5168 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5171 5169 ANON_LOCK_EXIT(&->a_rwlock);
5172 5170 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5173 5171 return (0);
5174 5172 }
5175 5173 }
5176 5174 slow:
5177 5175
5178 5176 if (svd->vpage == NULL)
5179 5177 vpage = NULL;
5180 5178 else
5181 5179 vpage = &svd->vpage[page];
5182 5180
5183 5181 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5184 5182
5185 5183 /*
5186 5184 * If MADV_SEQUENTIAL has been set for the particular page we
5187 5185 * are faulting on, free behind all pages in the segment and put
5188 5186 * them on the free list.
5189 5187 */
5190 5188
5191 5189 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5192 5190 struct vpage *vpp;
5193 5191 ulong_t fanon_index;
5194 5192 size_t fpage;
5195 5193 u_offset_t pgoff, fpgoff;
5196 5194 struct vnode *fvp;
5197 5195 struct anon *fap = NULL;
5198 5196
5199 5197 if (svd->advice == MADV_SEQUENTIAL ||
5200 5198 (svd->pageadvice &&
5201 5199 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5202 5200 pgoff = off - PAGESIZE;
5203 5201 fpage = page - 1;
5204 5202 if (vpage != NULL)
5205 5203 vpp = &svd->vpage[fpage];
5206 5204 if (amp != NULL)
5207 5205 fanon_index = svd->anon_index + fpage;
5208 5206
5209 5207 while (pgoff > svd->offset) {
5210 5208 if (svd->advice != MADV_SEQUENTIAL &&
5211 5209 (!svd->pageadvice || (vpage &&
5212 5210 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5213 5211 break;
5214 5212
5215 5213 /*
5216 5214 * If this is an anon page, we must find the
5217 5215 * correct <vp, offset> for it
5218 5216 */
5219 5217 fap = NULL;
5220 5218 if (amp != NULL) {
5221 5219 ANON_LOCK_ENTER(&->a_rwlock,
5222 5220 RW_READER);
5223 5221 anon_array_enter(amp, fanon_index,
5224 5222 &cookie);
5225 5223 fap = anon_get_ptr(amp->ahp,
5226 5224 fanon_index);
5227 5225 if (fap != NULL) {
5228 5226 swap_xlate(fap, &fvp, &fpgoff);
5229 5227 } else {
5230 5228 fpgoff = pgoff;
5231 5229 fvp = svd->vp;
5232 5230 }
5233 5231 anon_array_exit(&cookie);
5234 5232 ANON_LOCK_EXIT(&->a_rwlock);
5235 5233 } else {
5236 5234 fpgoff = pgoff;
5237 5235 fvp = svd->vp;
5238 5236 }
5239 5237 if (fvp == NULL)
5240 5238 break; /* XXX */
5241 5239 /*
5242 5240 * Skip pages that are free or have an
5243 5241 * "exclusive" lock.
5244 5242 */
5245 5243 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5246 5244 if (pp == NULL)
5247 5245 break;
5248 5246 /*
5249 5247 * We don't need the page_struct_lock to test
5250 5248 * as this is only advisory; even if we
5251 5249 * acquire it someone might race in and lock
5252 5250 * the page after we unlock and before the
5253 5251 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5254 5252 */
5255 5253 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5256 5254 /*
5257 5255 * Hold the vnode before releasing
5258 5256 * the page lock to prevent it from
5259 5257 * being freed and re-used by some
5260 5258 * other thread.
5261 5259 */
5262 5260 VN_HOLD(fvp);
5263 5261 page_unlock(pp);
5264 5262 /*
5265 5263 * We should build a page list
5266 5264 * to kluster putpages XXX
5267 5265 */
5268 5266 (void) VOP_PUTPAGE(fvp,
5269 5267 (offset_t)fpgoff, PAGESIZE,
5270 5268 (B_DONTNEED|B_FREE|B_ASYNC),
5271 5269 svd->cred, NULL);
5272 5270 VN_RELE(fvp);
5273 5271 } else {
5274 5272 /*
5275 5273 * XXX - Should the loop terminate if
5276 5274 * the page is `locked'?
5277 5275 */
5278 5276 page_unlock(pp);
5279 5277 }
5280 5278 --vpp;
5281 5279 --fanon_index;
5282 5280 pgoff -= PAGESIZE;
5283 5281 }
5284 5282 }
5285 5283 }
5286 5284
5287 5285 plp = pl;
5288 5286 *plp = NULL;
5289 5287 pl_alloc_sz = 0;
5290 5288
5291 5289 /*
5292 5290 * See if we need to call VOP_GETPAGE for
5293 5291 * *any* of the range being faulted on.
5294 5292 * We can skip all of this work if there
5295 5293 * was no original vnode.
5296 5294 */
5297 5295 if (svd->vp != NULL) {
5298 5296 u_offset_t vp_off;
5299 5297 size_t vp_len;
5300 5298 struct anon *ap;
5301 5299 vnode_t *vp;
5302 5300
5303 5301 vp_off = off;
5304 5302 vp_len = len;
5305 5303
5306 5304 if (amp == NULL)
5307 5305 dogetpage = 1;
5308 5306 else {
5309 5307 /*
5310 5308 * Only acquire reader lock to prevent amp->ahp
5311 5309 * from being changed. It's ok to miss pages,
5312 5310 * hence we don't do anon_array_enter
5313 5311 */
5314 5312 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5315 5313 ap = anon_get_ptr(amp->ahp, anon_index);
5316 5314
5317 5315 if (len <= PAGESIZE)
5318 5316 /* inline non_anon() */
5319 5317 dogetpage = (ap == NULL);
5320 5318 else
5321 5319 dogetpage = non_anon(amp->ahp, anon_index,
5322 5320 &vp_off, &vp_len);
5323 5321 ANON_LOCK_EXIT(&->a_rwlock);
5324 5322 }
5325 5323
5326 5324 if (dogetpage) {
5327 5325 enum seg_rw arw;
5328 5326 struct as *as = seg->s_as;
5329 5327
5330 5328 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5331 5329 /*
5332 5330 * Page list won't fit in local array,
5333 5331 * allocate one of the needed size.
5334 5332 */
5335 5333 pl_alloc_sz =
5336 5334 (btop(len) + 1) * sizeof (page_t *);
5337 5335 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5338 5336 plp[0] = NULL;
5339 5337 plsz = len;
5340 5338 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5341 5339 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5342 5340 (((size_t)(addr + PAGESIZE) <
5343 5341 (size_t)(seg->s_base + seg->s_size)) &&
5344 5342 hat_probe(as->a_hat, addr + PAGESIZE))) {
5345 5343 /*
5346 5344 * Ask VOP_GETPAGE to return the exact number
5347 5345 * of pages if
5348 5346 * (a) this is a COW fault, or
5349 5347 * (b) this is a software fault, or
5350 5348 * (c) next page is already mapped.
5351 5349 */
5352 5350 plsz = len;
5353 5351 } else {
5354 5352 /*
5355 5353 * Ask VOP_GETPAGE to return adjacent pages
5356 5354 * within the segment.
5357 5355 */
5358 5356 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5359 5357 ((seg->s_base + seg->s_size) - addr));
5360 5358 ASSERT((addr + plsz) <=
5361 5359 (seg->s_base + seg->s_size));
5362 5360 }
5363 5361
5364 5362 /*
5365 5363 * Need to get some non-anonymous pages.
5366 5364 * We need to make only one call to GETPAGE to do
5367 5365 * this to prevent certain deadlocking conditions
5368 5366 * when we are doing locking. In this case
5369 5367 * non_anon() should have picked up the smallest
5370 5368 * range which includes all the non-anonymous
5371 5369 * pages in the requested range. We have to
5372 5370 * be careful regarding which rw flag to pass in
5373 5371 * because on a private mapping, the underlying
5374 5372 * object is never allowed to be written.
5375 5373 */
5376 5374 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5377 5375 arw = S_READ;
5378 5376 } else {
5379 5377 arw = rw;
5380 5378 }
5381 5379 vp = svd->vp;
5382 5380 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5383 5381 "segvn_getpage:seg %p addr %p vp %p",
5384 5382 seg, addr, vp);
5385 5383 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5386 5384 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5387 5385 svd->cred, NULL);
5388 5386 if (err) {
5389 5387 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5390 5388 segvn_pagelist_rele(plp);
5391 5389 if (pl_alloc_sz)
5392 5390 kmem_free(plp, pl_alloc_sz);
5393 5391 return (FC_MAKE_ERR(err));
5394 5392 }
5395 5393 if (svd->type == MAP_PRIVATE)
5396 5394 vpprot &= ~PROT_WRITE;
5397 5395 }
5398 5396 }
5399 5397
5400 5398 /*
5401 5399 * N.B. at this time the plp array has all the needed non-anon
5402 5400 * pages in addition to (possibly) having some adjacent pages.
5403 5401 */
5404 5402
5405 5403 /*
5406 5404 * Always acquire the anon_array_lock to prevent
5407 5405 * 2 threads from allocating separate anon slots for
5408 5406 * the same "addr".
5409 5407 *
5410 5408 * If this is a copy-on-write fault and we don't already
5411 5409 * have the anon_array_lock, acquire it to prevent the
5412 5410 * fault routine from handling multiple copy-on-write faults
5413 5411 * on the same "addr" in the same address space.
5414 5412 *
5415 5413 * Only one thread should deal with the fault since after
5416 5414 * it is handled, the other threads can acquire a translation
5417 5415 * to the newly created private page. This prevents two or
5418 5416 * more threads from creating different private pages for the
5419 5417 * same fault.
5420 5418 *
5421 5419 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5422 5420 * to prevent deadlock between this thread and another thread
5423 5421 * which has soft-locked this page and wants to acquire serial_lock.
5424 5422 * ( bug 4026339 )
5425 5423 *
5426 5424 * The fix for bug 4026339 becomes unnecessary when using the
5427 5425 * locking scheme with per amp rwlock and a global set of hash
5428 5426 * lock, anon_array_lock. If we steal a vnode page when low
5429 5427 * on memory and upgrad the page lock through page_rename,
5430 5428 * then the page is PAGE_HANDLED, nothing needs to be done
5431 5429 * for this page after returning from segvn_faultpage.
5432 5430 *
5433 5431 * But really, the page lock should be downgraded after
5434 5432 * the stolen page is page_rename'd.
5435 5433 */
5436 5434
5437 5435 if (amp != NULL)
5438 5436 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5439 5437
5440 5438 /*
5441 5439 * Ok, now loop over the address range and handle faults
5442 5440 */
5443 5441 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5444 5442 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5445 5443 type, rw, brkcow);
5446 5444 if (err) {
5447 5445 if (amp != NULL)
5448 5446 ANON_LOCK_EXIT(&->a_rwlock);
5449 5447 if (type == F_SOFTLOCK && a > addr) {
5450 5448 segvn_softunlock(seg, addr, (a - addr),
5451 5449 S_OTHER);
5452 5450 }
5453 5451 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5454 5452 segvn_pagelist_rele(plp);
5455 5453 if (pl_alloc_sz)
5456 5454 kmem_free(plp, pl_alloc_sz);
5457 5455 return (err);
5458 5456 }
5459 5457 if (vpage) {
5460 5458 vpage++;
5461 5459 } else if (svd->vpage) {
5462 5460 page = seg_page(seg, addr);
5463 5461 vpage = &svd->vpage[++page];
5464 5462 }
5465 5463 }
5466 5464
5467 5465 /* Didn't get pages from the underlying fs so we're done */
5468 5466 if (!dogetpage)
5469 5467 goto done;
5470 5468
5471 5469 /*
5472 5470 * Now handle any other pages in the list returned.
5473 5471 * If the page can be used, load up the translations now.
5474 5472 * Note that the for loop will only be entered if "plp"
5475 5473 * is pointing to a non-NULL page pointer which means that
5476 5474 * VOP_GETPAGE() was called and vpprot has been initialized.
5477 5475 */
5478 5476 if (svd->pageprot == 0)
5479 5477 prot = svd->prot & vpprot;
5480 5478
5481 5479
5482 5480 /*
5483 5481 * Large Files: diff should be unsigned value because we started
5484 5482 * supporting > 2GB segment sizes from 2.5.1 and when a
5485 5483 * large file of size > 2GB gets mapped to address space
5486 5484 * the diff value can be > 2GB.
5487 5485 */
5488 5486
5489 5487 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5490 5488 size_t diff;
5491 5489 struct anon *ap;
5492 5490 int anon_index;
5493 5491 anon_sync_obj_t cookie;
5494 5492 int hat_flag = HAT_LOAD_ADV;
5495 5493
5496 5494 if (svd->flags & MAP_TEXT) {
5497 5495 hat_flag |= HAT_LOAD_TEXT;
5498 5496 }
5499 5497
5500 5498 if (pp == PAGE_HANDLED)
5501 5499 continue;
5502 5500
5503 5501 if (svd->tr_state != SEGVN_TR_ON &&
5504 5502 pp->p_offset >= svd->offset &&
5505 5503 pp->p_offset < svd->offset + seg->s_size) {
5506 5504
5507 5505 diff = pp->p_offset - svd->offset;
5508 5506
5509 5507 /*
5510 5508 * Large Files: Following is the assertion
5511 5509 * validating the above cast.
5512 5510 */
5513 5511 ASSERT(svd->vp == pp->p_vnode);
5514 5512
5515 5513 page = btop(diff);
5516 5514 if (svd->pageprot)
5517 5515 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5518 5516
5519 5517 /*
5520 5518 * Prevent other threads in the address space from
5521 5519 * creating private pages (i.e., allocating anon slots)
5522 5520 * while we are in the process of loading translations
5523 5521 * to additional pages returned by the underlying
5524 5522 * object.
5525 5523 */
5526 5524 if (amp != NULL) {
5527 5525 anon_index = svd->anon_index + page;
5528 5526 anon_array_enter(amp, anon_index, &cookie);
5529 5527 ap = anon_get_ptr(amp->ahp, anon_index);
5530 5528 }
5531 5529 if ((amp == NULL) || (ap == NULL)) {
5532 5530 if (IS_VMODSORT(pp->p_vnode) ||
5533 5531 enable_mbit_wa) {
5534 5532 if (rw == S_WRITE)
5535 5533 hat_setmod(pp);
5536 5534 else if (rw != S_OTHER &&
5537 5535 !hat_ismod(pp))
5538 5536 prot &= ~PROT_WRITE;
5539 5537 }
5540 5538 /*
5541 5539 * Skip mapping read ahead pages marked
5542 5540 * for migration, so they will get migrated
5543 5541 * properly on fault
5544 5542 */
5545 5543 ASSERT(amp == NULL ||
5546 5544 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5547 5545 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5548 5546 hat_memload_region(hat,
5549 5547 seg->s_base + diff,
5550 5548 pp, prot, hat_flag,
5551 5549 svd->rcookie);
5552 5550 }
5553 5551 }
5554 5552 if (amp != NULL)
5555 5553 anon_array_exit(&cookie);
5556 5554 }
5557 5555 page_unlock(pp);
5558 5556 }
5559 5557 done:
5560 5558 if (amp != NULL)
5561 5559 ANON_LOCK_EXIT(&->a_rwlock);
5562 5560 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5563 5561 if (pl_alloc_sz)
5564 5562 kmem_free(plp, pl_alloc_sz);
5565 5563 return (0);
5566 5564 }
5567 5565
5568 5566 /*
5569 5567 * This routine is used to start I/O on pages asynchronously. XXX it will
5570 5568 * only create PAGESIZE pages. At fault time they will be relocated into
5571 5569 * larger pages.
5572 5570 */
5573 5571 static faultcode_t
5574 5572 segvn_faulta(struct seg *seg, caddr_t addr)
5575 5573 {
5576 5574 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5577 5575 int err;
5578 5576 struct anon_map *amp;
5579 5577 vnode_t *vp;
5580 5578
5581 5579 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5582 5580
5583 5581 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5584 5582 if ((amp = svd->amp) != NULL) {
5585 5583 struct anon *ap;
5586 5584
5587 5585 /*
5588 5586 * Reader lock to prevent amp->ahp from being changed.
5589 5587 * This is advisory, it's ok to miss a page, so
5590 5588 * we don't do anon_array_enter lock.
5591 5589 */
5592 5590 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5593 5591 if ((ap = anon_get_ptr(amp->ahp,
5594 5592 svd->anon_index + seg_page(seg, addr))) != NULL) {
5595 5593
5596 5594 err = anon_getpage(&ap, NULL, NULL,
5597 5595 0, seg, addr, S_READ, svd->cred);
5598 5596
5599 5597 ANON_LOCK_EXIT(&->a_rwlock);
5600 5598 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5601 5599 if (err)
5602 5600 return (FC_MAKE_ERR(err));
5603 5601 return (0);
5604 5602 }
5605 5603 ANON_LOCK_EXIT(&->a_rwlock);
5606 5604 }
5607 5605
5608 5606 if (svd->vp == NULL) {
5609 5607 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5610 5608 return (0); /* zfod page - do nothing now */
5611 5609 }
5612 5610
5613 5611 vp = svd->vp;
5614 5612 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5615 5613 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5616 5614 err = VOP_GETPAGE(vp,
5617 5615 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5618 5616 PAGESIZE, NULL, NULL, 0, seg, addr,
5619 5617 S_OTHER, svd->cred, NULL);
5620 5618
5621 5619 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5622 5620 if (err)
5623 5621 return (FC_MAKE_ERR(err));
5624 5622 return (0);
5625 5623 }
5626 5624
5627 5625 static int
5628 5626 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5629 5627 {
5630 5628 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5631 5629 struct vpage *cvp, *svp, *evp;
5632 5630 struct vnode *vp;
5633 5631 size_t pgsz;
5634 5632 pgcnt_t pgcnt;
5635 5633 anon_sync_obj_t cookie;
5636 5634 int unload_done = 0;
5637 5635
5638 5636 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5639 5637
5640 5638 if ((svd->maxprot & prot) != prot)
5641 5639 return (EACCES); /* violated maxprot */
5642 5640
5643 5641 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5644 5642
5645 5643 /* return if prot is the same */
5646 5644 if (!svd->pageprot && svd->prot == prot) {
5647 5645 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5648 5646 return (0);
5649 5647 }
5650 5648
5651 5649 /*
5652 5650 * Since we change protections we first have to flush the cache.
5653 5651 * This makes sure all the pagelock calls have to recheck
5654 5652 * protections.
5655 5653 */
5656 5654 if (svd->softlockcnt > 0) {
5657 5655 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5658 5656
5659 5657 /*
5660 5658 * If this is shared segment non 0 softlockcnt
5661 5659 * means locked pages are still in use.
5662 5660 */
5663 5661 if (svd->type == MAP_SHARED) {
5664 5662 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5665 5663 return (EAGAIN);
5666 5664 }
5667 5665
5668 5666 /*
5669 5667 * Since we do have the segvn writers lock nobody can fill
5670 5668 * the cache with entries belonging to this seg during
5671 5669 * the purge. The flush either succeeds or we still have
5672 5670 * pending I/Os.
5673 5671 */
5674 5672 segvn_purge(seg);
5675 5673 if (svd->softlockcnt > 0) {
5676 5674 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5677 5675 return (EAGAIN);
5678 5676 }
5679 5677 }
5680 5678
5681 5679 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5682 5680 ASSERT(svd->amp == NULL);
5683 5681 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5684 5682 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5685 5683 HAT_REGION_TEXT);
5686 5684 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5687 5685 unload_done = 1;
5688 5686 } else if (svd->tr_state == SEGVN_TR_INIT) {
5689 5687 svd->tr_state = SEGVN_TR_OFF;
5690 5688 } else if (svd->tr_state == SEGVN_TR_ON) {
5691 5689 ASSERT(svd->amp != NULL);
5692 5690 segvn_textunrepl(seg, 0);
5693 5691 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5694 5692 unload_done = 1;
5695 5693 }
5696 5694
5697 5695 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5698 5696 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5699 5697 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5700 5698 segvn_inval_trcache(svd->vp);
5701 5699 }
5702 5700 if (seg->s_szc != 0) {
5703 5701 int err;
5704 5702 pgsz = page_get_pagesize(seg->s_szc);
5705 5703 pgcnt = pgsz >> PAGESHIFT;
5706 5704 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5707 5705 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5708 5706 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5709 5707 ASSERT(seg->s_base != addr || seg->s_size != len);
5710 5708 /*
5711 5709 * If we are holding the as lock as a reader then
5712 5710 * we need to return IE_RETRY and let the as
5713 5711 * layer drop and re-acquire the lock as a writer.
5714 5712 */
5715 5713 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5716 5714 return (IE_RETRY);
5717 5715 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5718 5716 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5719 5717 err = segvn_demote_range(seg, addr, len,
5720 5718 SDR_END, 0);
5721 5719 } else {
5722 5720 uint_t szcvec = map_pgszcvec(seg->s_base,
5723 5721 pgsz, (uintptr_t)seg->s_base,
5724 5722 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5725 5723 err = segvn_demote_range(seg, addr, len,
5726 5724 SDR_END, szcvec);
5727 5725 }
5728 5726 if (err == 0)
5729 5727 return (IE_RETRY);
5730 5728 if (err == ENOMEM)
5731 5729 return (IE_NOMEM);
5732 5730 return (err);
5733 5731 }
5734 5732 }
5735 5733
5736 5734
5737 5735 /*
5738 5736 * If it's a private mapping and we're making it writable then we
5739 5737 * may have to reserve the additional swap space now. If we are
5740 5738 * making writable only a part of the segment then we use its vpage
5741 5739 * array to keep a record of the pages for which we have reserved
5742 5740 * swap. In this case we set the pageswap field in the segment's
5743 5741 * segvn structure to record this.
5744 5742 *
5745 5743 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5746 5744 * removing write permission on the entire segment and we haven't
5747 5745 * modified any pages, we can release the swap space.
5748 5746 */
5749 5747 if (svd->type == MAP_PRIVATE) {
5750 5748 if (prot & PROT_WRITE) {
5751 5749 if (!(svd->flags & MAP_NORESERVE) &&
5752 5750 !(svd->swresv && svd->pageswap == 0)) {
5753 5751 size_t sz = 0;
5754 5752
5755 5753 /*
5756 5754 * Start by determining how much swap
5757 5755 * space is required.
5758 5756 */
5759 5757 if (addr == seg->s_base &&
5760 5758 len == seg->s_size &&
5761 5759 svd->pageswap == 0) {
5762 5760 /* The whole segment */
5763 5761 sz = seg->s_size;
5764 5762 } else {
5765 5763 /*
5766 5764 * Make sure that the vpage array
5767 5765 * exists, and make a note of the
5768 5766 * range of elements corresponding
5769 5767 * to len.
5770 5768 */
5771 5769 segvn_vpage(seg);
5772 5770 svp = &svd->vpage[seg_page(seg, addr)];
5773 5771 evp = &svd->vpage[seg_page(seg,
5774 5772 addr + len)];
5775 5773
5776 5774 if (svd->pageswap == 0) {
5777 5775 /*
5778 5776 * This is the first time we've
5779 5777 * asked for a part of this
5780 5778 * segment, so we need to
5781 5779 * reserve everything we've
5782 5780 * been asked for.
5783 5781 */
5784 5782 sz = len;
5785 5783 } else {
5786 5784 /*
5787 5785 * We have to count the number
5788 5786 * of pages required.
5789 5787 */
5790 5788 for (cvp = svp; cvp < evp;
5791 5789 cvp++) {
5792 5790 if (!VPP_ISSWAPRES(cvp))
5793 5791 sz++;
5794 5792 }
5795 5793 sz <<= PAGESHIFT;
5796 5794 }
5797 5795 }
5798 5796
5799 5797 /* Try to reserve the necessary swap. */
5800 5798 if (anon_resv_zone(sz,
5801 5799 seg->s_as->a_proc->p_zone) == 0) {
5802 5800 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5803 5801 return (IE_NOMEM);
5804 5802 }
5805 5803
5806 5804 /*
5807 5805 * Make a note of how much swap space
5808 5806 * we've reserved.
5809 5807 */
5810 5808 if (svd->pageswap == 0 && sz == seg->s_size) {
5811 5809 svd->swresv = sz;
5812 5810 } else {
5813 5811 ASSERT(svd->vpage != NULL);
5814 5812 svd->swresv += sz;
5815 5813 svd->pageswap = 1;
5816 5814 for (cvp = svp; cvp < evp; cvp++) {
5817 5815 if (!VPP_ISSWAPRES(cvp))
5818 5816 VPP_SETSWAPRES(cvp);
5819 5817 }
5820 5818 }
5821 5819 }
5822 5820 } else {
5823 5821 /*
5824 5822 * Swap space is released only if this segment
5825 5823 * does not map anonymous memory, since read faults
5826 5824 * on such segments still need an anon slot to read
5827 5825 * in the data.
5828 5826 */
5829 5827 if (svd->swresv != 0 && svd->vp != NULL &&
5830 5828 svd->amp == NULL && addr == seg->s_base &&
5831 5829 len == seg->s_size && svd->pageprot == 0) {
5832 5830 ASSERT(svd->pageswap == 0);
5833 5831 anon_unresv_zone(svd->swresv,
5834 5832 seg->s_as->a_proc->p_zone);
5835 5833 svd->swresv = 0;
5836 5834 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5837 5835 "anon proc:%p %lu %u", seg, 0, 0);
5838 5836 }
5839 5837 }
5840 5838 }
5841 5839
5842 5840 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5843 5841 if (svd->prot == prot) {
5844 5842 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5845 5843 return (0); /* all done */
5846 5844 }
5847 5845 svd->prot = (uchar_t)prot;
5848 5846 } else if (svd->type == MAP_PRIVATE) {
5849 5847 struct anon *ap = NULL;
5850 5848 page_t *pp;
5851 5849 u_offset_t offset, off;
5852 5850 struct anon_map *amp;
5853 5851 ulong_t anon_idx = 0;
5854 5852
5855 5853 /*
5856 5854 * A vpage structure exists or else the change does not
5857 5855 * involve the entire segment. Establish a vpage structure
5858 5856 * if none is there. Then, for each page in the range,
5859 5857 * adjust its individual permissions. Note that write-
5860 5858 * enabling a MAP_PRIVATE page can affect the claims for
5861 5859 * locked down memory. Overcommitting memory terminates
5862 5860 * the operation.
5863 5861 */
5864 5862 segvn_vpage(seg);
5865 5863 svd->pageprot = 1;
5866 5864 if ((amp = svd->amp) != NULL) {
5867 5865 anon_idx = svd->anon_index + seg_page(seg, addr);
5868 5866 ASSERT(seg->s_szc == 0 ||
5869 5867 IS_P2ALIGNED(anon_idx, pgcnt));
5870 5868 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5871 5869 }
5872 5870
5873 5871 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5874 5872 evp = &svd->vpage[seg_page(seg, addr + len)];
5875 5873
5876 5874 /*
5877 5875 * See Statement at the beginning of segvn_lockop regarding
5878 5876 * the way cowcnts and lckcnts are handled.
5879 5877 */
5880 5878 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5881 5879
5882 5880 if (seg->s_szc != 0) {
5883 5881 if (amp != NULL) {
5884 5882 anon_array_enter(amp, anon_idx,
5885 5883 &cookie);
5886 5884 }
5887 5885 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5888 5886 !segvn_claim_pages(seg, svp, offset,
5889 5887 anon_idx, prot)) {
5890 5888 if (amp != NULL) {
5891 5889 anon_array_exit(&cookie);
5892 5890 }
5893 5891 break;
5894 5892 }
5895 5893 if (amp != NULL) {
5896 5894 anon_array_exit(&cookie);
5897 5895 }
5898 5896 anon_idx++;
5899 5897 } else {
5900 5898 if (amp != NULL) {
5901 5899 anon_array_enter(amp, anon_idx,
5902 5900 &cookie);
5903 5901 ap = anon_get_ptr(amp->ahp, anon_idx++);
5904 5902 }
5905 5903
5906 5904 if (VPP_ISPPLOCK(svp) &&
5907 5905 VPP_PROT(svp) != prot) {
5908 5906
5909 5907 if (amp == NULL || ap == NULL) {
5910 5908 vp = svd->vp;
5911 5909 off = offset;
5912 5910 } else
5913 5911 swap_xlate(ap, &vp, &off);
5914 5912 if (amp != NULL)
5915 5913 anon_array_exit(&cookie);
5916 5914
5917 5915 if ((pp = page_lookup(vp, off,
5918 5916 SE_SHARED)) == NULL) {
5919 5917 panic("segvn_setprot: no page");
5920 5918 /*NOTREACHED*/
5921 5919 }
5922 5920 ASSERT(seg->s_szc == 0);
5923 5921 if ((VPP_PROT(svp) ^ prot) &
5924 5922 PROT_WRITE) {
5925 5923 if (prot & PROT_WRITE) {
5926 5924 if (!page_addclaim(
5927 5925 pp)) {
5928 5926 page_unlock(pp);
5929 5927 break;
5930 5928 }
5931 5929 } else {
5932 5930 if (!page_subclaim(
5933 5931 pp)) {
5934 5932 page_unlock(pp);
5935 5933 break;
5936 5934 }
5937 5935 }
5938 5936 }
5939 5937 page_unlock(pp);
5940 5938 } else if (amp != NULL)
5941 5939 anon_array_exit(&cookie);
5942 5940 }
5943 5941 VPP_SETPROT(svp, prot);
5944 5942 offset += PAGESIZE;
5945 5943 }
5946 5944 if (amp != NULL)
5947 5945 ANON_LOCK_EXIT(&->a_rwlock);
5948 5946
5949 5947 /*
5950 5948 * Did we terminate prematurely? If so, simply unload
5951 5949 * the translations to the things we've updated so far.
5952 5950 */
5953 5951 if (svp != evp) {
5954 5952 if (unload_done) {
5955 5953 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5956 5954 return (IE_NOMEM);
5957 5955 }
5958 5956 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
5959 5957 PAGESIZE;
5960 5958 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
5961 5959 if (len != 0)
5962 5960 hat_unload(seg->s_as->a_hat, addr,
5963 5961 len, HAT_UNLOAD);
5964 5962 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5965 5963 return (IE_NOMEM);
5966 5964 }
5967 5965 } else {
5968 5966 segvn_vpage(seg);
5969 5967 svd->pageprot = 1;
5970 5968 evp = &svd->vpage[seg_page(seg, addr + len)];
5971 5969 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5972 5970 VPP_SETPROT(svp, prot);
5973 5971 }
5974 5972 }
5975 5973
5976 5974 if (unload_done) {
5977 5975 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5978 5976 return (0);
5979 5977 }
5980 5978
5981 5979 if (((prot & PROT_WRITE) != 0 &&
5982 5980 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
5983 5981 (prot & ~PROT_USER) == PROT_NONE) {
5984 5982 /*
5985 5983 * Either private or shared data with write access (in
5986 5984 * which case we need to throw out all former translations
5987 5985 * so that we get the right translations set up on fault
5988 5986 * and we don't allow write access to any copy-on-write pages
5989 5987 * that might be around or to prevent write access to pages
5990 5988 * representing holes in a file), or we don't have permission
5991 5989 * to access the memory at all (in which case we have to
5992 5990 * unload any current translations that might exist).
5993 5991 */
5994 5992 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
5995 5993 } else {
5996 5994 /*
5997 5995 * A shared mapping or a private mapping in which write
5998 5996 * protection is going to be denied - just change all the
5999 5997 * protections over the range of addresses in question.
6000 5998 * segvn does not support any other attributes other
6001 5999 * than prot so we can use hat_chgattr.
6002 6000 */
6003 6001 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6004 6002 }
6005 6003
6006 6004 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6007 6005
6008 6006 return (0);
6009 6007 }
6010 6008
6011 6009 /*
6012 6010 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6013 6011 * to determine if the seg is capable of mapping the requested szc.
6014 6012 */
6015 6013 static int
6016 6014 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6017 6015 {
6018 6016 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6019 6017 struct segvn_data *nsvd;
6020 6018 struct anon_map *amp = svd->amp;
6021 6019 struct seg *nseg;
6022 6020 caddr_t eaddr = addr + len, a;
6023 6021 size_t pgsz = page_get_pagesize(szc);
6024 6022 pgcnt_t pgcnt = page_get_pagecnt(szc);
6025 6023 int err;
6026 6024 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6027 6025
6028 6026 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6029 6027 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6030 6028
6031 6029 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6032 6030 return (0);
6033 6031 }
6034 6032
6035 6033 /*
6036 6034 * addr should always be pgsz aligned but eaddr may be misaligned if
6037 6035 * it's at the end of the segment.
6038 6036 *
6039 6037 * XXX we should assert this condition since as_setpagesize() logic
6040 6038 * guarantees it.
6041 6039 */
6042 6040 if (!IS_P2ALIGNED(addr, pgsz) ||
6043 6041 (!IS_P2ALIGNED(eaddr, pgsz) &&
6044 6042 eaddr != seg->s_base + seg->s_size)) {
6045 6043
6046 6044 segvn_setpgsz_align_err++;
6047 6045 return (EINVAL);
6048 6046 }
6049 6047
6050 6048 if (amp != NULL && svd->type == MAP_SHARED) {
6051 6049 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6052 6050 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6053 6051
6054 6052 segvn_setpgsz_anon_align_err++;
6055 6053 return (EINVAL);
6056 6054 }
6057 6055 }
6058 6056
6059 6057 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6060 6058 szc > segvn_maxpgszc) {
6061 6059 return (EINVAL);
6062 6060 }
6063 6061
6064 6062 /* paranoid check */
6065 6063 if (svd->vp != NULL &&
6066 6064 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6067 6065 return (EINVAL);
6068 6066 }
6069 6067
6070 6068 if (seg->s_szc == 0 && svd->vp != NULL &&
6071 6069 map_addr_vacalign_check(addr, off)) {
6072 6070 return (EINVAL);
6073 6071 }
6074 6072
6075 6073 /*
6076 6074 * Check that protections are the same within new page
6077 6075 * size boundaries.
6078 6076 */
6079 6077 if (svd->pageprot) {
6080 6078 for (a = addr; a < eaddr; a += pgsz) {
6081 6079 if ((a + pgsz) > eaddr) {
6082 6080 if (!sameprot(seg, a, eaddr - a)) {
6083 6081 return (EINVAL);
6084 6082 }
6085 6083 } else {
6086 6084 if (!sameprot(seg, a, pgsz)) {
6087 6085 return (EINVAL);
6088 6086 }
6089 6087 }
6090 6088 }
6091 6089 }
6092 6090
6093 6091 /*
6094 6092 * Since we are changing page size we first have to flush
6095 6093 * the cache. This makes sure all the pagelock calls have
6096 6094 * to recheck protections.
6097 6095 */
6098 6096 if (svd->softlockcnt > 0) {
6099 6097 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6100 6098
6101 6099 /*
6102 6100 * If this is shared segment non 0 softlockcnt
6103 6101 * means locked pages are still in use.
6104 6102 */
6105 6103 if (svd->type == MAP_SHARED) {
6106 6104 return (EAGAIN);
6107 6105 }
6108 6106
6109 6107 /*
6110 6108 * Since we do have the segvn writers lock nobody can fill
6111 6109 * the cache with entries belonging to this seg during
6112 6110 * the purge. The flush either succeeds or we still have
6113 6111 * pending I/Os.
6114 6112 */
6115 6113 segvn_purge(seg);
6116 6114 if (svd->softlockcnt > 0) {
6117 6115 return (EAGAIN);
6118 6116 }
6119 6117 }
6120 6118
6121 6119 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6122 6120 ASSERT(svd->amp == NULL);
6123 6121 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6124 6122 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6125 6123 HAT_REGION_TEXT);
6126 6124 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6127 6125 } else if (svd->tr_state == SEGVN_TR_INIT) {
6128 6126 svd->tr_state = SEGVN_TR_OFF;
6129 6127 } else if (svd->tr_state == SEGVN_TR_ON) {
6130 6128 ASSERT(svd->amp != NULL);
6131 6129 segvn_textunrepl(seg, 1);
6132 6130 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6133 6131 amp = NULL;
6134 6132 }
6135 6133
6136 6134 /*
6137 6135 * Operation for sub range of existing segment.
6138 6136 */
6139 6137 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6140 6138 if (szc < seg->s_szc) {
6141 6139 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6142 6140 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6143 6141 if (err == 0) {
6144 6142 return (IE_RETRY);
6145 6143 }
6146 6144 if (err == ENOMEM) {
6147 6145 return (IE_NOMEM);
6148 6146 }
6149 6147 return (err);
6150 6148 }
6151 6149 if (addr != seg->s_base) {
6152 6150 nseg = segvn_split_seg(seg, addr);
6153 6151 if (eaddr != (nseg->s_base + nseg->s_size)) {
6154 6152 /* eaddr is szc aligned */
6155 6153 (void) segvn_split_seg(nseg, eaddr);
6156 6154 }
6157 6155 return (IE_RETRY);
6158 6156 }
6159 6157 if (eaddr != (seg->s_base + seg->s_size)) {
6160 6158 /* eaddr is szc aligned */
6161 6159 (void) segvn_split_seg(seg, eaddr);
6162 6160 }
6163 6161 return (IE_RETRY);
6164 6162 }
6165 6163
6166 6164 /*
6167 6165 * Break any low level sharing and reset seg->s_szc to 0.
6168 6166 */
6169 6167 if ((err = segvn_clrszc(seg)) != 0) {
6170 6168 if (err == ENOMEM) {
6171 6169 err = IE_NOMEM;
6172 6170 }
6173 6171 return (err);
6174 6172 }
6175 6173 ASSERT(seg->s_szc == 0);
6176 6174
6177 6175 /*
6178 6176 * If the end of the current segment is not pgsz aligned
6179 6177 * then attempt to concatenate with the next segment.
6180 6178 */
6181 6179 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6182 6180 nseg = AS_SEGNEXT(seg->s_as, seg);
6183 6181 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6184 6182 return (ENOMEM);
6185 6183 }
6186 6184 if (nseg->s_ops != &segvn_ops) {
6187 6185 return (EINVAL);
6188 6186 }
6189 6187 nsvd = (struct segvn_data *)nseg->s_data;
6190 6188 if (nsvd->softlockcnt > 0) {
6191 6189 /*
6192 6190 * If this is shared segment non 0 softlockcnt
6193 6191 * means locked pages are still in use.
6194 6192 */
6195 6193 if (nsvd->type == MAP_SHARED) {
6196 6194 return (EAGAIN);
6197 6195 }
6198 6196 segvn_purge(nseg);
6199 6197 if (nsvd->softlockcnt > 0) {
6200 6198 return (EAGAIN);
6201 6199 }
6202 6200 }
6203 6201 err = segvn_clrszc(nseg);
6204 6202 if (err == ENOMEM) {
6205 6203 err = IE_NOMEM;
6206 6204 }
6207 6205 if (err != 0) {
6208 6206 return (err);
6209 6207 }
6210 6208 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6211 6209 err = segvn_concat(seg, nseg, 1);
6212 6210 if (err == -1) {
6213 6211 return (EINVAL);
6214 6212 }
6215 6213 if (err == -2) {
6216 6214 return (IE_NOMEM);
6217 6215 }
6218 6216 return (IE_RETRY);
6219 6217 }
6220 6218
6221 6219 /*
6222 6220 * May need to re-align anon array to
6223 6221 * new szc.
6224 6222 */
6225 6223 if (amp != NULL) {
6226 6224 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6227 6225 struct anon_hdr *nahp;
6228 6226
6229 6227 ASSERT(svd->type == MAP_PRIVATE);
6230 6228
6231 6229 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6232 6230 ASSERT(amp->refcnt == 1);
6233 6231 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6234 6232 if (nahp == NULL) {
6235 6233 ANON_LOCK_EXIT(&->a_rwlock);
6236 6234 return (IE_NOMEM);
6237 6235 }
6238 6236 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6239 6237 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6240 6238 anon_release(nahp, btop(amp->size));
6241 6239 ANON_LOCK_EXIT(&->a_rwlock);
6242 6240 return (IE_NOMEM);
6243 6241 }
6244 6242 anon_release(amp->ahp, btop(amp->size));
6245 6243 amp->ahp = nahp;
6246 6244 svd->anon_index = 0;
6247 6245 ANON_LOCK_EXIT(&->a_rwlock);
6248 6246 }
6249 6247 }
6250 6248 if (svd->vp != NULL && szc != 0) {
6251 6249 struct vattr va;
6252 6250 u_offset_t eoffpage = svd->offset;
6253 6251 va.va_mask = AT_SIZE;
6254 6252 eoffpage += seg->s_size;
6255 6253 eoffpage = btopr(eoffpage);
6256 6254 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6257 6255 segvn_setpgsz_getattr_err++;
6258 6256 return (EINVAL);
6259 6257 }
6260 6258 if (btopr(va.va_size) < eoffpage) {
6261 6259 segvn_setpgsz_eof_err++;
6262 6260 return (EINVAL);
6263 6261 }
6264 6262 if (amp != NULL) {
6265 6263 /*
6266 6264 * anon_fill_cow_holes() may call VOP_GETPAGE().
6267 6265 * don't take anon map lock here to avoid holding it
6268 6266 * across VOP_GETPAGE() calls that may call back into
6269 6267 * segvn for klsutering checks. We don't really need
6270 6268 * anon map lock here since it's a private segment and
6271 6269 * we hold as level lock as writers.
6272 6270 */
6273 6271 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6274 6272 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6275 6273 seg->s_size, szc, svd->prot, svd->vpage,
6276 6274 svd->cred)) != 0) {
6277 6275 return (EINVAL);
6278 6276 }
6279 6277 }
6280 6278 segvn_setvnode_mpss(svd->vp);
6281 6279 }
6282 6280
6283 6281 if (amp != NULL) {
6284 6282 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6285 6283 if (svd->type == MAP_PRIVATE) {
6286 6284 amp->a_szc = szc;
6287 6285 } else if (szc > amp->a_szc) {
6288 6286 amp->a_szc = szc;
6289 6287 }
6290 6288 ANON_LOCK_EXIT(&->a_rwlock);
6291 6289 }
6292 6290
6293 6291 seg->s_szc = szc;
6294 6292
6295 6293 return (0);
6296 6294 }
6297 6295
6298 6296 static int
6299 6297 segvn_clrszc(struct seg *seg)
6300 6298 {
6301 6299 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6302 6300 struct anon_map *amp = svd->amp;
6303 6301 size_t pgsz;
6304 6302 pgcnt_t pages;
6305 6303 int err = 0;
6306 6304 caddr_t a = seg->s_base;
6307 6305 caddr_t ea = a + seg->s_size;
6308 6306 ulong_t an_idx = svd->anon_index;
6309 6307 vnode_t *vp = svd->vp;
6310 6308 struct vpage *vpage = svd->vpage;
6311 6309 page_t *anon_pl[1 + 1], *pp;
6312 6310 struct anon *ap, *oldap;
6313 6311 uint_t prot = svd->prot, vpprot;
6314 6312 int pageflag = 0;
6315 6313
6316 6314 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6317 6315 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6318 6316 ASSERT(svd->softlockcnt == 0);
6319 6317
6320 6318 if (vp == NULL && amp == NULL) {
6321 6319 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6322 6320 seg->s_szc = 0;
6323 6321 return (0);
6324 6322 }
6325 6323
6326 6324 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6327 6325 ASSERT(svd->amp == NULL);
6328 6326 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6329 6327 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6330 6328 HAT_REGION_TEXT);
6331 6329 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6332 6330 } else if (svd->tr_state == SEGVN_TR_ON) {
6333 6331 ASSERT(svd->amp != NULL);
6334 6332 segvn_textunrepl(seg, 1);
6335 6333 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6336 6334 amp = NULL;
6337 6335 } else {
6338 6336 if (svd->tr_state != SEGVN_TR_OFF) {
6339 6337 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6340 6338 svd->tr_state = SEGVN_TR_OFF;
6341 6339 }
6342 6340
6343 6341 /*
6344 6342 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6345 6343 * unload argument is 0 when we are freeing the segment
6346 6344 * and unload was already done.
6347 6345 */
6348 6346 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6349 6347 HAT_UNLOAD_UNMAP);
6350 6348 }
6351 6349
6352 6350 if (amp == NULL || svd->type == MAP_SHARED) {
6353 6351 seg->s_szc = 0;
6354 6352 return (0);
6355 6353 }
6356 6354
6357 6355 pgsz = page_get_pagesize(seg->s_szc);
6358 6356 pages = btop(pgsz);
6359 6357
6360 6358 /*
6361 6359 * XXX anon rwlock is not really needed because this is a
6362 6360 * private segment and we are writers.
6363 6361 */
6364 6362 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6365 6363
6366 6364 for (; a < ea; a += pgsz, an_idx += pages) {
6367 6365 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6368 6366 ASSERT(vpage != NULL || svd->pageprot == 0);
6369 6367 if (vpage != NULL) {
6370 6368 ASSERT(sameprot(seg, a, pgsz));
6371 6369 prot = VPP_PROT(vpage);
6372 6370 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6373 6371 }
6374 6372 if (seg->s_szc != 0) {
6375 6373 ASSERT(vp == NULL || anon_pages(amp->ahp,
6376 6374 an_idx, pages) == pages);
6377 6375 if ((err = anon_map_demotepages(amp, an_idx,
6378 6376 seg, a, prot, vpage, svd->cred)) != 0) {
6379 6377 goto out;
6380 6378 }
6381 6379 } else {
6382 6380 if (oldap->an_refcnt == 1) {
6383 6381 continue;
6384 6382 }
6385 6383 if ((err = anon_getpage(&oldap, &vpprot,
6386 6384 anon_pl, PAGESIZE, seg, a, S_READ,
6387 6385 svd->cred))) {
6388 6386 goto out;
6389 6387 }
6390 6388 if ((pp = anon_private(&ap, seg, a, prot,
6391 6389 anon_pl[0], pageflag, svd->cred)) == NULL) {
6392 6390 err = ENOMEM;
6393 6391 goto out;
6394 6392 }
6395 6393 anon_decref(oldap);
6396 6394 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6397 6395 ANON_SLEEP);
6398 6396 page_unlock(pp);
6399 6397 }
6400 6398 }
6401 6399 vpage = (vpage == NULL) ? NULL : vpage + pages;
6402 6400 }
6403 6401
6404 6402 amp->a_szc = 0;
6405 6403 seg->s_szc = 0;
6406 6404 out:
6407 6405 ANON_LOCK_EXIT(&->a_rwlock);
6408 6406 return (err);
6409 6407 }
6410 6408
6411 6409 static int
6412 6410 segvn_claim_pages(
6413 6411 struct seg *seg,
6414 6412 struct vpage *svp,
6415 6413 u_offset_t off,
6416 6414 ulong_t anon_idx,
6417 6415 uint_t prot)
6418 6416 {
6419 6417 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6420 6418 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6421 6419 page_t **ppa;
6422 6420 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6423 6421 struct anon_map *amp = svd->amp;
6424 6422 struct vpage *evp = svp + pgcnt;
6425 6423 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6426 6424 + seg->s_base;
6427 6425 struct anon *ap;
6428 6426 struct vnode *vp = svd->vp;
6429 6427 page_t *pp;
6430 6428 pgcnt_t pg_idx, i;
6431 6429 int err = 0;
6432 6430 anoff_t aoff;
6433 6431 int anon = (amp != NULL) ? 1 : 0;
6434 6432
6435 6433 ASSERT(svd->type == MAP_PRIVATE);
6436 6434 ASSERT(svd->vpage != NULL);
6437 6435 ASSERT(seg->s_szc != 0);
6438 6436 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6439 6437 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6440 6438 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6441 6439
6442 6440 if (VPP_PROT(svp) == prot)
6443 6441 return (1);
6444 6442 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6445 6443 return (1);
6446 6444
6447 6445 ppa = kmem_alloc(ppasize, KM_SLEEP);
6448 6446 if (anon && vp != NULL) {
6449 6447 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6450 6448 anon = 0;
6451 6449 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6452 6450 }
6453 6451 ASSERT(!anon ||
6454 6452 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6455 6453 }
6456 6454
6457 6455 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6458 6456 if (!VPP_ISPPLOCK(svp))
6459 6457 continue;
6460 6458 if (anon) {
6461 6459 ap = anon_get_ptr(amp->ahp, anon_idx);
6462 6460 if (ap == NULL) {
6463 6461 panic("segvn_claim_pages: no anon slot");
6464 6462 }
6465 6463 swap_xlate(ap, &vp, &aoff);
6466 6464 off = (u_offset_t)aoff;
6467 6465 }
6468 6466 ASSERT(vp != NULL);
6469 6467 if ((pp = page_lookup(vp,
6470 6468 (u_offset_t)off, SE_SHARED)) == NULL) {
6471 6469 panic("segvn_claim_pages: no page");
6472 6470 }
6473 6471 ppa[pg_idx++] = pp;
6474 6472 off += PAGESIZE;
6475 6473 }
6476 6474
6477 6475 if (ppa[0] == NULL) {
6478 6476 kmem_free(ppa, ppasize);
6479 6477 return (1);
6480 6478 }
6481 6479
6482 6480 ASSERT(pg_idx <= pgcnt);
6483 6481 ppa[pg_idx] = NULL;
6484 6482
6485 6483
6486 6484 /* Find each large page within ppa, and adjust its claim */
6487 6485
6488 6486 /* Does ppa cover a single large page? */
6489 6487 if (ppa[0]->p_szc == seg->s_szc) {
6490 6488 if (prot & PROT_WRITE)
6491 6489 err = page_addclaim_pages(ppa);
6492 6490 else
6493 6491 err = page_subclaim_pages(ppa);
6494 6492 } else {
6495 6493 for (i = 0; ppa[i]; i += pgcnt) {
6496 6494 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6497 6495 if (prot & PROT_WRITE)
6498 6496 err = page_addclaim_pages(&ppa[i]);
6499 6497 else
6500 6498 err = page_subclaim_pages(&ppa[i]);
6501 6499 if (err == 0)
6502 6500 break;
6503 6501 }
6504 6502 }
6505 6503
6506 6504 for (i = 0; i < pg_idx; i++) {
6507 6505 ASSERT(ppa[i] != NULL);
6508 6506 page_unlock(ppa[i]);
6509 6507 }
6510 6508
6511 6509 kmem_free(ppa, ppasize);
6512 6510 return (err);
6513 6511 }
6514 6512
6515 6513 /*
6516 6514 * Returns right (upper address) segment if split occurred.
6517 6515 * If the address is equal to the beginning or end of its segment it returns
6518 6516 * the current segment.
6519 6517 */
6520 6518 static struct seg *
6521 6519 segvn_split_seg(struct seg *seg, caddr_t addr)
6522 6520 {
6523 6521 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6524 6522 struct seg *nseg;
6525 6523 size_t nsize;
6526 6524 struct segvn_data *nsvd;
6527 6525
6528 6526 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6529 6527 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6530 6528
6531 6529 ASSERT(addr >= seg->s_base);
6532 6530 ASSERT(addr <= seg->s_base + seg->s_size);
6533 6531 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6534 6532
6535 6533 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6536 6534 return (seg);
6537 6535
6538 6536 nsize = seg->s_base + seg->s_size - addr;
6539 6537 seg->s_size = addr - seg->s_base;
6540 6538 nseg = seg_alloc(seg->s_as, addr, nsize);
6541 6539 ASSERT(nseg != NULL);
6542 6540 nseg->s_ops = seg->s_ops;
6543 6541 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6544 6542 nseg->s_data = (void *)nsvd;
6545 6543 nseg->s_szc = seg->s_szc;
6546 6544 *nsvd = *svd;
6547 6545 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6548 6546 nsvd->seg = nseg;
6549 6547 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6550 6548
6551 6549 if (nsvd->vp != NULL) {
6552 6550 VN_HOLD(nsvd->vp);
6553 6551 nsvd->offset = svd->offset +
6554 6552 (uintptr_t)(nseg->s_base - seg->s_base);
6555 6553 if (nsvd->type == MAP_SHARED)
6556 6554 lgrp_shm_policy_init(NULL, nsvd->vp);
6557 6555 } else {
6558 6556 /*
6559 6557 * The offset for an anonymous segment has no signifigance in
6560 6558 * terms of an offset into a file. If we were to use the above
6561 6559 * calculation instead, the structures read out of
6562 6560 * /proc/<pid>/xmap would be more difficult to decipher since
6563 6561 * it would be unclear whether two seemingly contiguous
6564 6562 * prxmap_t structures represented different segments or a
6565 6563 * single segment that had been split up into multiple prxmap_t
6566 6564 * structures (e.g. if some part of the segment had not yet
6567 6565 * been faulted in).
6568 6566 */
6569 6567 nsvd->offset = 0;
6570 6568 }
6571 6569
6572 6570 ASSERT(svd->softlockcnt == 0);
6573 6571 ASSERT(svd->softlockcnt_sbase == 0);
6574 6572 ASSERT(svd->softlockcnt_send == 0);
6575 6573 crhold(svd->cred);
6576 6574
6577 6575 if (svd->vpage != NULL) {
6578 6576 size_t bytes = vpgtob(seg_pages(seg));
6579 6577 size_t nbytes = vpgtob(seg_pages(nseg));
6580 6578 struct vpage *ovpage = svd->vpage;
6581 6579
6582 6580 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6583 6581 bcopy(ovpage, svd->vpage, bytes);
6584 6582 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6585 6583 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6586 6584 kmem_free(ovpage, bytes + nbytes);
6587 6585 }
6588 6586 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6589 6587 struct anon_map *oamp = svd->amp, *namp;
6590 6588 struct anon_hdr *nahp;
6591 6589
6592 6590 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6593 6591 ASSERT(oamp->refcnt == 1);
6594 6592 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6595 6593 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6596 6594 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6597 6595
6598 6596 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6599 6597 namp->a_szc = nseg->s_szc;
6600 6598 (void) anon_copy_ptr(oamp->ahp,
6601 6599 svd->anon_index + btop(seg->s_size),
6602 6600 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6603 6601 anon_release(oamp->ahp, btop(oamp->size));
6604 6602 oamp->ahp = nahp;
6605 6603 oamp->size = seg->s_size;
6606 6604 svd->anon_index = 0;
6607 6605 nsvd->amp = namp;
6608 6606 nsvd->anon_index = 0;
6609 6607 ANON_LOCK_EXIT(&oamp->a_rwlock);
6610 6608 } else if (svd->amp != NULL) {
6611 6609 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6612 6610 ASSERT(svd->amp == nsvd->amp);
6613 6611 ASSERT(seg->s_szc <= svd->amp->a_szc);
6614 6612 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6615 6613 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6616 6614 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6617 6615 svd->amp->refcnt++;
6618 6616 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6619 6617 }
6620 6618
6621 6619 /*
6622 6620 * Split the amount of swap reserved.
6623 6621 */
6624 6622 if (svd->swresv) {
6625 6623 /*
6626 6624 * For MAP_NORESERVE, only allocate swap reserve for pages
6627 6625 * being used. Other segments get enough to cover whole
6628 6626 * segment.
6629 6627 */
6630 6628 if (svd->flags & MAP_NORESERVE) {
6631 6629 size_t oswresv;
6632 6630
6633 6631 ASSERT(svd->amp);
6634 6632 oswresv = svd->swresv;
6635 6633 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6636 6634 svd->anon_index, btop(seg->s_size)));
6637 6635 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6638 6636 nsvd->anon_index, btop(nseg->s_size)));
6639 6637 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6640 6638 } else {
6641 6639 if (svd->pageswap) {
6642 6640 svd->swresv = segvn_count_swap_by_vpages(seg);
6643 6641 ASSERT(nsvd->swresv >= svd->swresv);
6644 6642 nsvd->swresv -= svd->swresv;
6645 6643 } else {
6646 6644 ASSERT(svd->swresv == seg->s_size +
6647 6645 nseg->s_size);
6648 6646 svd->swresv = seg->s_size;
6649 6647 nsvd->swresv = nseg->s_size;
6650 6648 }
6651 6649 }
6652 6650 }
6653 6651
6654 6652 return (nseg);
6655 6653 }
6656 6654
6657 6655 /*
6658 6656 * called on memory operations (unmap, setprot, setpagesize) for a subset
6659 6657 * of a large page segment to either demote the memory range (SDR_RANGE)
6660 6658 * or the ends (SDR_END) by addr/len.
6661 6659 *
6662 6660 * returns 0 on success. returns errno, including ENOMEM, on failure.
6663 6661 */
6664 6662 static int
6665 6663 segvn_demote_range(
6666 6664 struct seg *seg,
6667 6665 caddr_t addr,
6668 6666 size_t len,
6669 6667 int flag,
6670 6668 uint_t szcvec)
6671 6669 {
6672 6670 caddr_t eaddr = addr + len;
6673 6671 caddr_t lpgaddr, lpgeaddr;
6674 6672 struct seg *nseg;
6675 6673 struct seg *badseg1 = NULL;
6676 6674 struct seg *badseg2 = NULL;
6677 6675 size_t pgsz;
6678 6676 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6679 6677 int err;
6680 6678 uint_t szc = seg->s_szc;
6681 6679 uint_t tszcvec;
6682 6680
6683 6681 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6684 6682 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6685 6683 ASSERT(szc != 0);
6686 6684 pgsz = page_get_pagesize(szc);
6687 6685 ASSERT(seg->s_base != addr || seg->s_size != len);
6688 6686 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6689 6687 ASSERT(svd->softlockcnt == 0);
6690 6688 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6691 6689 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6692 6690
6693 6691 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6694 6692 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6695 6693 if (flag == SDR_RANGE) {
6696 6694 /* demote entire range */
6697 6695 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6698 6696 (void) segvn_split_seg(nseg, lpgeaddr);
6699 6697 ASSERT(badseg1->s_base == lpgaddr);
6700 6698 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6701 6699 } else if (addr != lpgaddr) {
6702 6700 ASSERT(flag == SDR_END);
6703 6701 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6704 6702 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6705 6703 eaddr < lpgaddr + 2 * pgsz) {
6706 6704 (void) segvn_split_seg(nseg, lpgeaddr);
6707 6705 ASSERT(badseg1->s_base == lpgaddr);
6708 6706 ASSERT(badseg1->s_size == 2 * pgsz);
6709 6707 } else {
6710 6708 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6711 6709 ASSERT(badseg1->s_base == lpgaddr);
6712 6710 ASSERT(badseg1->s_size == pgsz);
6713 6711 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6714 6712 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6715 6713 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6716 6714 badseg2 = nseg;
6717 6715 (void) segvn_split_seg(nseg, lpgeaddr);
6718 6716 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6719 6717 ASSERT(badseg2->s_size == pgsz);
6720 6718 }
6721 6719 }
6722 6720 } else {
6723 6721 ASSERT(flag == SDR_END);
6724 6722 ASSERT(eaddr < lpgeaddr);
6725 6723 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6726 6724 (void) segvn_split_seg(nseg, lpgeaddr);
6727 6725 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6728 6726 ASSERT(badseg1->s_size == pgsz);
6729 6727 }
6730 6728
6731 6729 ASSERT(badseg1 != NULL);
6732 6730 ASSERT(badseg1->s_szc == szc);
6733 6731 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6734 6732 badseg1->s_size == 2 * pgsz);
6735 6733 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6736 6734 ASSERT(badseg1->s_size == pgsz ||
6737 6735 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6738 6736 if (err = segvn_clrszc(badseg1)) {
6739 6737 return (err);
6740 6738 }
6741 6739 ASSERT(badseg1->s_szc == 0);
6742 6740
6743 6741 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6744 6742 uint_t tszc = highbit(tszcvec) - 1;
6745 6743 caddr_t ta = MAX(addr, badseg1->s_base);
6746 6744 caddr_t te;
6747 6745 size_t tpgsz = page_get_pagesize(tszc);
6748 6746
6749 6747 ASSERT(svd->type == MAP_SHARED);
6750 6748 ASSERT(flag == SDR_END);
6751 6749 ASSERT(tszc < szc && tszc > 0);
6752 6750
6753 6751 if (eaddr > badseg1->s_base + badseg1->s_size) {
6754 6752 te = badseg1->s_base + badseg1->s_size;
6755 6753 } else {
6756 6754 te = eaddr;
6757 6755 }
6758 6756
6759 6757 ASSERT(ta <= te);
6760 6758 badseg1->s_szc = tszc;
6761 6759 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6762 6760 if (badseg2 != NULL) {
6763 6761 err = segvn_demote_range(badseg1, ta, te - ta,
6764 6762 SDR_END, tszcvec);
6765 6763 if (err != 0) {
6766 6764 return (err);
6767 6765 }
6768 6766 } else {
6769 6767 return (segvn_demote_range(badseg1, ta,
6770 6768 te - ta, SDR_END, tszcvec));
6771 6769 }
6772 6770 }
6773 6771 }
6774 6772
6775 6773 if (badseg2 == NULL)
6776 6774 return (0);
6777 6775 ASSERT(badseg2->s_szc == szc);
6778 6776 ASSERT(badseg2->s_size == pgsz);
6779 6777 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6780 6778 if (err = segvn_clrszc(badseg2)) {
6781 6779 return (err);
6782 6780 }
6783 6781 ASSERT(badseg2->s_szc == 0);
6784 6782
6785 6783 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6786 6784 uint_t tszc = highbit(tszcvec) - 1;
6787 6785 size_t tpgsz = page_get_pagesize(tszc);
6788 6786
6789 6787 ASSERT(svd->type == MAP_SHARED);
6790 6788 ASSERT(flag == SDR_END);
6791 6789 ASSERT(tszc < szc && tszc > 0);
6792 6790 ASSERT(badseg2->s_base > addr);
6793 6791 ASSERT(eaddr > badseg2->s_base);
6794 6792 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6795 6793
6796 6794 badseg2->s_szc = tszc;
6797 6795 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6798 6796 return (segvn_demote_range(badseg2, badseg2->s_base,
6799 6797 eaddr - badseg2->s_base, SDR_END, tszcvec));
6800 6798 }
6801 6799 }
6802 6800
6803 6801 return (0);
6804 6802 }
6805 6803
6806 6804 static int
6807 6805 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6808 6806 {
6809 6807 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6810 6808 struct vpage *vp, *evp;
6811 6809
6812 6810 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6813 6811
6814 6812 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6815 6813 /*
6816 6814 * If segment protection can be used, simply check against them.
6817 6815 */
6818 6816 if (svd->pageprot == 0) {
6819 6817 int err;
6820 6818
6821 6819 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6822 6820 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6823 6821 return (err);
6824 6822 }
6825 6823
6826 6824 /*
6827 6825 * Have to check down to the vpage level.
6828 6826 */
6829 6827 evp = &svd->vpage[seg_page(seg, addr + len)];
6830 6828 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6831 6829 if ((VPP_PROT(vp) & prot) != prot) {
6832 6830 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6833 6831 return (EACCES);
6834 6832 }
6835 6833 }
6836 6834 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6837 6835 return (0);
6838 6836 }
6839 6837
6840 6838 static int
6841 6839 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6842 6840 {
6843 6841 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6844 6842 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6845 6843
6846 6844 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6847 6845
6848 6846 if (pgno != 0) {
6849 6847 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6850 6848 if (svd->pageprot == 0) {
6851 6849 do {
6852 6850 protv[--pgno] = svd->prot;
6853 6851 } while (pgno != 0);
6854 6852 } else {
6855 6853 size_t pgoff = seg_page(seg, addr);
6856 6854
6857 6855 do {
6858 6856 pgno--;
6859 6857 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6860 6858 } while (pgno != 0);
6861 6859 }
6862 6860 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6863 6861 }
6864 6862 return (0);
6865 6863 }
6866 6864
6867 6865 static u_offset_t
6868 6866 segvn_getoffset(struct seg *seg, caddr_t addr)
6869 6867 {
6870 6868 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6871 6869
6872 6870 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6873 6871
6874 6872 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6875 6873 }
6876 6874
6877 6875 /*ARGSUSED*/
6878 6876 static int
6879 6877 segvn_gettype(struct seg *seg, caddr_t addr)
6880 6878 {
6881 6879 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6882 6880
6883 6881 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6884 6882
6885 6883 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6886 6884 MAP_INITDATA)));
6887 6885 }
6888 6886
6889 6887 /*ARGSUSED*/
6890 6888 static int
6891 6889 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6892 6890 {
6893 6891 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6894 6892
6895 6893 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6896 6894
6897 6895 *vpp = svd->vp;
6898 6896 return (0);
6899 6897 }
6900 6898
6901 6899 /*
6902 6900 * Check to see if it makes sense to do kluster/read ahead to
6903 6901 * addr + delta relative to the mapping at addr. We assume here
6904 6902 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6905 6903 *
6906 6904 * For segvn, we currently "approve" of the action if we are
6907 6905 * still in the segment and it maps from the same vp/off,
6908 6906 * or if the advice stored in segvn_data or vpages allows it.
6909 6907 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6910 6908 */
6911 6909 static int
6912 6910 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6913 6911 {
6914 6912 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6915 6913 struct anon *oap, *ap;
6916 6914 ssize_t pd;
6917 6915 size_t page;
6918 6916 struct vnode *vp1, *vp2;
6919 6917 u_offset_t off1, off2;
6920 6918 struct anon_map *amp;
6921 6919
6922 6920 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6923 6921 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6924 6922 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6925 6923
6926 6924 if (addr + delta < seg->s_base ||
6927 6925 addr + delta >= (seg->s_base + seg->s_size))
6928 6926 return (-1); /* exceeded segment bounds */
6929 6927
6930 6928 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
6931 6929 page = seg_page(seg, addr);
6932 6930
6933 6931 /*
6934 6932 * Check to see if either of the pages addr or addr + delta
6935 6933 * have advice set that prevents klustering (if MADV_RANDOM advice
6936 6934 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6937 6935 * is negative).
6938 6936 */
6939 6937 if (svd->advice == MADV_RANDOM ||
6940 6938 svd->advice == MADV_SEQUENTIAL && delta < 0)
6941 6939 return (-1);
6942 6940 else if (svd->pageadvice && svd->vpage) {
6943 6941 struct vpage *bvpp, *evpp;
6944 6942
6945 6943 bvpp = &svd->vpage[page];
6946 6944 evpp = &svd->vpage[page + pd];
6947 6945 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
6948 6946 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
6949 6947 return (-1);
6950 6948 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
6951 6949 VPP_ADVICE(evpp) == MADV_RANDOM)
6952 6950 return (-1);
6953 6951 }
6954 6952
6955 6953 if (svd->type == MAP_SHARED)
6956 6954 return (0); /* shared mapping - all ok */
6957 6955
6958 6956 if ((amp = svd->amp) == NULL)
6959 6957 return (0); /* off original vnode */
6960 6958
6961 6959 page += svd->anon_index;
6962 6960
6963 6961 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
6964 6962
6965 6963 oap = anon_get_ptr(amp->ahp, page);
6966 6964 ap = anon_get_ptr(amp->ahp, page + pd);
6967 6965
6968 6966 ANON_LOCK_EXIT(&->a_rwlock);
6969 6967
6970 6968 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
6971 6969 return (-1); /* one with and one without an anon */
6972 6970 }
6973 6971
6974 6972 if (oap == NULL) { /* implies that ap == NULL */
6975 6973 return (0); /* off original vnode */
6976 6974 }
6977 6975
6978 6976 /*
6979 6977 * Now we know we have two anon pointers - check to
6980 6978 * see if they happen to be properly allocated.
6981 6979 */
6982 6980
6983 6981 /*
6984 6982 * XXX We cheat here and don't lock the anon slots. We can't because
6985 6983 * we may have been called from the anon layer which might already
6986 6984 * have locked them. We are holding a refcnt on the slots so they
6987 6985 * can't disappear. The worst that will happen is we'll get the wrong
6988 6986 * names (vp, off) for the slots and make a poor klustering decision.
6989 6987 */
↓ open down ↓ |
6854 lines elided |
↑ open up ↑ |
6990 6988 swap_xlate(ap, &vp1, &off1);
6991 6989 swap_xlate(oap, &vp2, &off2);
6992 6990
6993 6991
6994 6992 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
6995 6993 return (-1);
6996 6994 return (0);
6997 6995 }
6998 6996
6999 6997 /*
7000 - * Swap the pages of seg out to secondary storage, returning the
7001 - * number of bytes of storage freed.
7002 - *
7003 - * The basic idea is first to unload all translations and then to call
7004 - * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7005 - * swap device. Pages to which other segments have mappings will remain
7006 - * mapped and won't be swapped. Our caller (as_swapout) has already
7007 - * performed the unloading step.
7008 - *
7009 - * The value returned is intended to correlate well with the process's
7010 - * memory requirements. However, there are some caveats:
7011 - * 1) When given a shared segment as argument, this routine will
7012 - * only succeed in swapping out pages for the last sharer of the
7013 - * segment. (Previous callers will only have decremented mapping
7014 - * reference counts.)
7015 - * 2) We assume that the hat layer maintains a large enough translation
7016 - * cache to capture process reference patterns.
7017 - */
7018 -static size_t
7019 -segvn_swapout(struct seg *seg)
7020 -{
7021 - struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7022 - struct anon_map *amp;
7023 - pgcnt_t pgcnt = 0;
7024 - pgcnt_t npages;
7025 - pgcnt_t page;
7026 - ulong_t anon_index;
7027 -
7028 - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7029 -
7030 - SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7031 - /*
7032 - * Find pages unmapped by our caller and force them
7033 - * out to the virtual swap device.
7034 - */
7035 - if ((amp = svd->amp) != NULL)
7036 - anon_index = svd->anon_index;
7037 - npages = seg->s_size >> PAGESHIFT;
7038 - for (page = 0; page < npages; page++) {
7039 - page_t *pp;
7040 - struct anon *ap;
7041 - struct vnode *vp;
7042 - u_offset_t off;
7043 - anon_sync_obj_t cookie;
7044 -
7045 - /*
7046 - * Obtain <vp, off> pair for the page, then look it up.
7047 - *
7048 - * Note that this code is willing to consider regular
7049 - * pages as well as anon pages. Is this appropriate here?
7050 - */
7051 - ap = NULL;
7052 - if (amp != NULL) {
7053 - ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7054 - if (anon_array_try_enter(amp, anon_index + page,
7055 - &cookie)) {
7056 - ANON_LOCK_EXIT(&->a_rwlock);
7057 - continue;
7058 - }
7059 - ap = anon_get_ptr(amp->ahp, anon_index + page);
7060 - if (ap != NULL) {
7061 - swap_xlate(ap, &vp, &off);
7062 - } else {
7063 - vp = svd->vp;
7064 - off = svd->offset + ptob(page);
7065 - }
7066 - anon_array_exit(&cookie);
7067 - ANON_LOCK_EXIT(&->a_rwlock);
7068 - } else {
7069 - vp = svd->vp;
7070 - off = svd->offset + ptob(page);
7071 - }
7072 - if (vp == NULL) { /* untouched zfod page */
7073 - ASSERT(ap == NULL);
7074 - continue;
7075 - }
7076 -
7077 - pp = page_lookup_nowait(vp, off, SE_SHARED);
7078 - if (pp == NULL)
7079 - continue;
7080 -
7081 -
7082 - /*
7083 - * Examine the page to see whether it can be tossed out,
7084 - * keeping track of how many we've found.
7085 - */
7086 - if (!page_tryupgrade(pp)) {
7087 - /*
7088 - * If the page has an i/o lock and no mappings,
7089 - * it's very likely that the page is being
7090 - * written out as a result of klustering.
7091 - * Assume this is so and take credit for it here.
7092 - */
7093 - if (!page_io_trylock(pp)) {
7094 - if (!hat_page_is_mapped(pp))
7095 - pgcnt++;
7096 - } else {
7097 - page_io_unlock(pp);
7098 - }
7099 - page_unlock(pp);
7100 - continue;
7101 - }
7102 - ASSERT(!page_iolock_assert(pp));
7103 -
7104 -
7105 - /*
7106 - * Skip if page is locked or has mappings.
7107 - * We don't need the page_struct_lock to look at lckcnt
7108 - * and cowcnt because the page is exclusive locked.
7109 - */
7110 - if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7111 - hat_page_is_mapped(pp)) {
7112 - page_unlock(pp);
7113 - continue;
7114 - }
7115 -
7116 - /*
7117 - * dispose skips large pages so try to demote first.
7118 - */
7119 - if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7120 - page_unlock(pp);
7121 - /*
7122 - * XXX should skip the remaining page_t's of this
7123 - * large page.
7124 - */
7125 - continue;
7126 - }
7127 -
7128 - ASSERT(pp->p_szc == 0);
7129 -
7130 - /*
7131 - * No longer mapped -- we can toss it out. How
7132 - * we do so depends on whether or not it's dirty.
7133 - */
7134 - if (hat_ismod(pp) && pp->p_vnode) {
7135 - /*
7136 - * We must clean the page before it can be
7137 - * freed. Setting B_FREE will cause pvn_done
7138 - * to free the page when the i/o completes.
7139 - * XXX: This also causes it to be accounted
7140 - * as a pageout instead of a swap: need
7141 - * B_SWAPOUT bit to use instead of B_FREE.
7142 - *
7143 - * Hold the vnode before releasing the page lock
7144 - * to prevent it from being freed and re-used by
7145 - * some other thread.
7146 - */
7147 - VN_HOLD(vp);
7148 - page_unlock(pp);
7149 -
7150 - /*
7151 - * Queue all i/o requests for the pageout thread
7152 - * to avoid saturating the pageout devices.
7153 - */
7154 - if (!queue_io_request(vp, off))
7155 - VN_RELE(vp);
7156 - } else {
7157 - /*
7158 - * The page was clean, free it.
7159 - *
7160 - * XXX: Can we ever encounter modified pages
7161 - * with no associated vnode here?
7162 - */
7163 - ASSERT(pp->p_vnode != NULL);
7164 - /*LINTED: constant in conditional context*/
7165 - VN_DISPOSE(pp, B_FREE, 0, kcred);
7166 - }
7167 -
7168 - /*
7169 - * Credit now even if i/o is in progress.
7170 - */
7171 - pgcnt++;
7172 - }
7173 - SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7174 -
7175 - /*
7176 - * Wakeup pageout to initiate i/o on all queued requests.
7177 - */
7178 - cv_signal_pageout();
7179 - return (ptob(pgcnt));
7180 -}
7181 -
7182 -/*
7183 6998 * Synchronize primary storage cache with real object in virtual memory.
7184 6999 *
7185 7000 * XXX - Anonymous pages should not be sync'ed out at all.
7186 7001 */
7187 7002 static int
7188 7003 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7189 7004 {
7190 7005 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7191 7006 struct vpage *vpp;
7192 7007 page_t *pp;
7193 7008 u_offset_t offset;
7194 7009 struct vnode *vp;
7195 7010 u_offset_t off;
7196 7011 caddr_t eaddr;
7197 7012 int bflags;
7198 7013 int err = 0;
7199 7014 int segtype;
7200 7015 int pageprot;
7201 7016 int prot;
7202 7017 ulong_t anon_index;
7203 7018 struct anon_map *amp;
7204 7019 struct anon *ap;
7205 7020 anon_sync_obj_t cookie;
7206 7021
7207 7022 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7208 7023
7209 7024 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7210 7025
7211 7026 if (svd->softlockcnt > 0) {
7212 7027 /*
7213 7028 * If this is shared segment non 0 softlockcnt
7214 7029 * means locked pages are still in use.
7215 7030 */
7216 7031 if (svd->type == MAP_SHARED) {
7217 7032 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7218 7033 return (EAGAIN);
7219 7034 }
7220 7035
7221 7036 /*
7222 7037 * flush all pages from seg cache
7223 7038 * otherwise we may deadlock in swap_putpage
7224 7039 * for B_INVAL page (4175402).
7225 7040 *
7226 7041 * Even if we grab segvn WRITER's lock
7227 7042 * here, there might be another thread which could've
7228 7043 * successfully performed lookup/insert just before
7229 7044 * we acquired the lock here. So, grabbing either
7230 7045 * lock here is of not much use. Until we devise
7231 7046 * a strategy at upper layers to solve the
7232 7047 * synchronization issues completely, we expect
7233 7048 * applications to handle this appropriately.
7234 7049 */
7235 7050 segvn_purge(seg);
7236 7051 if (svd->softlockcnt > 0) {
7237 7052 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7238 7053 return (EAGAIN);
7239 7054 }
7240 7055 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7241 7056 svd->amp->a_softlockcnt > 0) {
7242 7057 /*
7243 7058 * Try to purge this amp's entries from pcache. It will
7244 7059 * succeed only if other segments that share the amp have no
7245 7060 * outstanding softlock's.
7246 7061 */
7247 7062 segvn_purge(seg);
7248 7063 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7249 7064 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7250 7065 return (EAGAIN);
7251 7066 }
7252 7067 }
7253 7068
7254 7069 vpp = svd->vpage;
7255 7070 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7256 7071 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7257 7072 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7258 7073
7259 7074 if (attr) {
7260 7075 pageprot = attr & ~(SHARED|PRIVATE);
7261 7076 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7262 7077
7263 7078 /*
7264 7079 * We are done if the segment types don't match
7265 7080 * or if we have segment level protections and
7266 7081 * they don't match.
7267 7082 */
7268 7083 if (svd->type != segtype) {
7269 7084 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7270 7085 return (0);
7271 7086 }
7272 7087 if (vpp == NULL) {
7273 7088 if (svd->prot != pageprot) {
7274 7089 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7275 7090 return (0);
7276 7091 }
7277 7092 prot = svd->prot;
7278 7093 } else
7279 7094 vpp = &svd->vpage[seg_page(seg, addr)];
7280 7095
7281 7096 } else if (svd->vp && svd->amp == NULL &&
7282 7097 (flags & MS_INVALIDATE) == 0) {
7283 7098
7284 7099 /*
7285 7100 * No attributes, no anonymous pages and MS_INVALIDATE flag
7286 7101 * is not on, just use one big request.
7287 7102 */
7288 7103 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7289 7104 bflags, svd->cred, NULL);
7290 7105 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7291 7106 return (err);
7292 7107 }
7293 7108
7294 7109 if ((amp = svd->amp) != NULL)
7295 7110 anon_index = svd->anon_index + seg_page(seg, addr);
7296 7111
7297 7112 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7298 7113 ap = NULL;
7299 7114 if (amp != NULL) {
7300 7115 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7301 7116 anon_array_enter(amp, anon_index, &cookie);
7302 7117 ap = anon_get_ptr(amp->ahp, anon_index++);
7303 7118 if (ap != NULL) {
7304 7119 swap_xlate(ap, &vp, &off);
7305 7120 } else {
7306 7121 vp = svd->vp;
7307 7122 off = offset;
7308 7123 }
7309 7124 anon_array_exit(&cookie);
7310 7125 ANON_LOCK_EXIT(&->a_rwlock);
7311 7126 } else {
7312 7127 vp = svd->vp;
7313 7128 off = offset;
7314 7129 }
7315 7130 offset += PAGESIZE;
7316 7131
7317 7132 if (vp == NULL) /* untouched zfod page */
7318 7133 continue;
7319 7134
7320 7135 if (attr) {
7321 7136 if (vpp) {
7322 7137 prot = VPP_PROT(vpp);
7323 7138 vpp++;
7324 7139 }
7325 7140 if (prot != pageprot) {
7326 7141 continue;
7327 7142 }
7328 7143 }
7329 7144
7330 7145 /*
7331 7146 * See if any of these pages are locked -- if so, then we
7332 7147 * will have to truncate an invalidate request at the first
7333 7148 * locked one. We don't need the page_struct_lock to test
7334 7149 * as this is only advisory; even if we acquire it someone
7335 7150 * might race in and lock the page after we unlock and before
7336 7151 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7337 7152 */
7338 7153 if (flags & MS_INVALIDATE) {
7339 7154 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7340 7155 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7341 7156 page_unlock(pp);
7342 7157 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7343 7158 return (EBUSY);
7344 7159 }
7345 7160 if (ap != NULL && pp->p_szc != 0 &&
7346 7161 page_tryupgrade(pp)) {
7347 7162 if (pp->p_lckcnt == 0 &&
7348 7163 pp->p_cowcnt == 0) {
7349 7164 /*
7350 7165 * swapfs VN_DISPOSE() won't
7351 7166 * invalidate large pages.
7352 7167 * Attempt to demote.
7353 7168 * XXX can't help it if it
7354 7169 * fails. But for swapfs
7355 7170 * pages it is no big deal.
7356 7171 */
7357 7172 (void) page_try_demote_pages(
7358 7173 pp);
7359 7174 }
7360 7175 }
7361 7176 page_unlock(pp);
7362 7177 }
7363 7178 } else if (svd->type == MAP_SHARED && amp != NULL) {
7364 7179 /*
7365 7180 * Avoid writing out to disk ISM's large pages
7366 7181 * because segspt_free_pages() relies on NULL an_pvp
7367 7182 * of anon slots of such pages.
7368 7183 */
7369 7184
7370 7185 ASSERT(svd->vp == NULL);
7371 7186 /*
7372 7187 * swapfs uses page_lookup_nowait if not freeing or
7373 7188 * invalidating and skips a page if
7374 7189 * page_lookup_nowait returns NULL.
7375 7190 */
7376 7191 pp = page_lookup_nowait(vp, off, SE_SHARED);
7377 7192 if (pp == NULL) {
7378 7193 continue;
7379 7194 }
7380 7195 if (pp->p_szc != 0) {
7381 7196 page_unlock(pp);
7382 7197 continue;
7383 7198 }
7384 7199
7385 7200 /*
7386 7201 * Note ISM pages are created large so (vp, off)'s
7387 7202 * page cannot suddenly become large after we unlock
7388 7203 * pp.
7389 7204 */
7390 7205 page_unlock(pp);
7391 7206 }
7392 7207 /*
7393 7208 * XXX - Should ultimately try to kluster
7394 7209 * calls to VOP_PUTPAGE() for performance.
7395 7210 */
7396 7211 VN_HOLD(vp);
7397 7212 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7398 7213 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7399 7214 svd->cred, NULL);
7400 7215
7401 7216 VN_RELE(vp);
7402 7217 if (err)
7403 7218 break;
7404 7219 }
7405 7220 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7406 7221 return (err);
7407 7222 }
7408 7223
7409 7224 /*
7410 7225 * Determine if we have data corresponding to pages in the
7411 7226 * primary storage virtual memory cache (i.e., "in core").
7412 7227 */
7413 7228 static size_t
7414 7229 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7415 7230 {
7416 7231 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7417 7232 struct vnode *vp, *avp;
7418 7233 u_offset_t offset, aoffset;
7419 7234 size_t p, ep;
7420 7235 int ret;
7421 7236 struct vpage *vpp;
7422 7237 page_t *pp;
7423 7238 uint_t start;
7424 7239 struct anon_map *amp; /* XXX - for locknest */
7425 7240 struct anon *ap;
7426 7241 uint_t attr;
7427 7242 anon_sync_obj_t cookie;
7428 7243
7429 7244 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7430 7245
7431 7246 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7432 7247 if (svd->amp == NULL && svd->vp == NULL) {
7433 7248 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7434 7249 bzero(vec, btopr(len));
7435 7250 return (len); /* no anonymous pages created yet */
7436 7251 }
7437 7252
7438 7253 p = seg_page(seg, addr);
7439 7254 ep = seg_page(seg, addr + len);
7440 7255 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7441 7256
7442 7257 amp = svd->amp;
7443 7258 for (; p < ep; p++, addr += PAGESIZE) {
7444 7259 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7445 7260 ret = start;
7446 7261 ap = NULL;
7447 7262 avp = NULL;
7448 7263 /* Grab the vnode/offset for the anon slot */
7449 7264 if (amp != NULL) {
7450 7265 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7451 7266 anon_array_enter(amp, svd->anon_index + p, &cookie);
7452 7267 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7453 7268 if (ap != NULL) {
7454 7269 swap_xlate(ap, &avp, &aoffset);
7455 7270 }
7456 7271 anon_array_exit(&cookie);
7457 7272 ANON_LOCK_EXIT(&->a_rwlock);
7458 7273 }
7459 7274 if ((avp != NULL) && page_exists(avp, aoffset)) {
7460 7275 /* A page exists for the anon slot */
7461 7276 ret |= SEG_PAGE_INCORE;
7462 7277
7463 7278 /*
7464 7279 * If page is mapped and writable
7465 7280 */
7466 7281 attr = (uint_t)0;
7467 7282 if ((hat_getattr(seg->s_as->a_hat, addr,
7468 7283 &attr) != -1) && (attr & PROT_WRITE)) {
7469 7284 ret |= SEG_PAGE_ANON;
7470 7285 }
7471 7286 /*
7472 7287 * Don't get page_struct lock for lckcnt and cowcnt,
7473 7288 * since this is purely advisory.
7474 7289 */
7475 7290 if ((pp = page_lookup_nowait(avp, aoffset,
7476 7291 SE_SHARED)) != NULL) {
7477 7292 if (pp->p_lckcnt)
7478 7293 ret |= SEG_PAGE_SOFTLOCK;
7479 7294 if (pp->p_cowcnt)
7480 7295 ret |= SEG_PAGE_HASCOW;
7481 7296 page_unlock(pp);
7482 7297 }
7483 7298 }
7484 7299
7485 7300 /* Gather vnode statistics */
7486 7301 vp = svd->vp;
7487 7302 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7488 7303
7489 7304 if (vp != NULL) {
7490 7305 /*
7491 7306 * Try to obtain a "shared" lock on the page
7492 7307 * without blocking. If this fails, determine
7493 7308 * if the page is in memory.
7494 7309 */
7495 7310 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7496 7311 if ((pp == NULL) && (page_exists(vp, offset))) {
7497 7312 /* Page is incore, and is named */
7498 7313 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7499 7314 }
7500 7315 /*
7501 7316 * Don't get page_struct lock for lckcnt and cowcnt,
7502 7317 * since this is purely advisory.
7503 7318 */
7504 7319 if (pp != NULL) {
7505 7320 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7506 7321 if (pp->p_lckcnt)
7507 7322 ret |= SEG_PAGE_SOFTLOCK;
7508 7323 if (pp->p_cowcnt)
7509 7324 ret |= SEG_PAGE_HASCOW;
7510 7325 page_unlock(pp);
7511 7326 }
7512 7327 }
7513 7328
7514 7329 /* Gather virtual page information */
7515 7330 if (vpp) {
7516 7331 if (VPP_ISPPLOCK(vpp))
7517 7332 ret |= SEG_PAGE_LOCKED;
7518 7333 vpp++;
7519 7334 }
7520 7335
7521 7336 *vec++ = (char)ret;
7522 7337 }
7523 7338 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7524 7339 return (len);
7525 7340 }
7526 7341
7527 7342 /*
7528 7343 * Statement for p_cowcnts/p_lckcnts.
7529 7344 *
7530 7345 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7531 7346 * irrespective of the following factors or anything else:
7532 7347 *
7533 7348 * (1) anon slots are populated or not
7534 7349 * (2) cow is broken or not
7535 7350 * (3) refcnt on ap is 1 or greater than 1
7536 7351 *
7537 7352 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7538 7353 * and munlock.
7539 7354 *
7540 7355 *
7541 7356 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7542 7357 *
7543 7358 * if vpage has PROT_WRITE
7544 7359 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7545 7360 * else
7546 7361 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7547 7362 *
7548 7363 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7549 7364 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7550 7365 *
7551 7366 * We may also break COW if softlocking on read access in the physio case.
7552 7367 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7553 7368 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7554 7369 * vpage doesn't have PROT_WRITE.
7555 7370 *
7556 7371 *
7557 7372 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7558 7373 *
7559 7374 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7560 7375 * increment p_lckcnt by calling page_subclaim() which takes care of
7561 7376 * availrmem accounting and p_lckcnt overflow.
7562 7377 *
7563 7378 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7564 7379 * increment p_cowcnt by calling page_addclaim() which takes care of
7565 7380 * availrmem availability and p_cowcnt overflow.
7566 7381 */
7567 7382
7568 7383 /*
7569 7384 * Lock down (or unlock) pages mapped by this segment.
7570 7385 *
7571 7386 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7572 7387 * At fault time they will be relocated into larger pages.
7573 7388 */
7574 7389 static int
7575 7390 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7576 7391 int attr, int op, ulong_t *lockmap, size_t pos)
7577 7392 {
7578 7393 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7579 7394 struct vpage *vpp;
7580 7395 struct vpage *evp;
7581 7396 page_t *pp;
7582 7397 u_offset_t offset;
7583 7398 u_offset_t off;
7584 7399 int segtype;
7585 7400 int pageprot;
7586 7401 int claim;
7587 7402 struct vnode *vp;
7588 7403 ulong_t anon_index;
7589 7404 struct anon_map *amp;
7590 7405 struct anon *ap;
7591 7406 struct vattr va;
7592 7407 anon_sync_obj_t cookie;
7593 7408 struct kshmid *sp = NULL;
7594 7409 struct proc *p = curproc;
7595 7410 kproject_t *proj = NULL;
7596 7411 int chargeproc = 1;
7597 7412 size_t locked_bytes = 0;
7598 7413 size_t unlocked_bytes = 0;
7599 7414 int err = 0;
7600 7415
7601 7416 /*
7602 7417 * Hold write lock on address space because may split or concatenate
7603 7418 * segments
7604 7419 */
7605 7420 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7606 7421
7607 7422 /*
7608 7423 * If this is a shm, use shm's project and zone, else use
7609 7424 * project and zone of calling process
7610 7425 */
7611 7426
7612 7427 /* Determine if this segment backs a sysV shm */
7613 7428 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7614 7429 ASSERT(svd->type == MAP_SHARED);
7615 7430 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7616 7431 sp = svd->amp->a_sp;
7617 7432 proj = sp->shm_perm.ipc_proj;
7618 7433 chargeproc = 0;
7619 7434 }
7620 7435
7621 7436 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7622 7437 if (attr) {
7623 7438 pageprot = attr & ~(SHARED|PRIVATE);
7624 7439 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7625 7440
7626 7441 /*
7627 7442 * We are done if the segment types don't match
7628 7443 * or if we have segment level protections and
7629 7444 * they don't match.
7630 7445 */
7631 7446 if (svd->type != segtype) {
7632 7447 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7633 7448 return (0);
7634 7449 }
7635 7450 if (svd->pageprot == 0 && svd->prot != pageprot) {
7636 7451 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7637 7452 return (0);
7638 7453 }
7639 7454 }
7640 7455
7641 7456 if (op == MC_LOCK) {
7642 7457 if (svd->tr_state == SEGVN_TR_INIT) {
7643 7458 svd->tr_state = SEGVN_TR_OFF;
7644 7459 } else if (svd->tr_state == SEGVN_TR_ON) {
7645 7460 ASSERT(svd->amp != NULL);
7646 7461 segvn_textunrepl(seg, 0);
7647 7462 ASSERT(svd->amp == NULL &&
7648 7463 svd->tr_state == SEGVN_TR_OFF);
7649 7464 }
7650 7465 }
7651 7466
7652 7467 /*
7653 7468 * If we're locking, then we must create a vpage structure if
7654 7469 * none exists. If we're unlocking, then check to see if there
7655 7470 * is a vpage -- if not, then we could not have locked anything.
7656 7471 */
7657 7472
7658 7473 if ((vpp = svd->vpage) == NULL) {
7659 7474 if (op == MC_LOCK)
7660 7475 segvn_vpage(seg);
7661 7476 else {
7662 7477 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7663 7478 return (0);
7664 7479 }
7665 7480 }
7666 7481
7667 7482 /*
7668 7483 * The anonymous data vector (i.e., previously
7669 7484 * unreferenced mapping to swap space) can be allocated
7670 7485 * by lazily testing for its existence.
7671 7486 */
7672 7487 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7673 7488 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7674 7489 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7675 7490 svd->amp->a_szc = seg->s_szc;
7676 7491 }
7677 7492
7678 7493 if ((amp = svd->amp) != NULL) {
7679 7494 anon_index = svd->anon_index + seg_page(seg, addr);
7680 7495 }
7681 7496
7682 7497 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7683 7498 evp = &svd->vpage[seg_page(seg, addr + len)];
7684 7499
7685 7500 if (sp != NULL)
7686 7501 mutex_enter(&sp->shm_mlock);
7687 7502
7688 7503 /* determine number of unlocked bytes in range for lock operation */
7689 7504 if (op == MC_LOCK) {
7690 7505
7691 7506 if (sp == NULL) {
7692 7507 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7693 7508 vpp++) {
7694 7509 if (!VPP_ISPPLOCK(vpp))
7695 7510 unlocked_bytes += PAGESIZE;
7696 7511 }
7697 7512 } else {
7698 7513 ulong_t i_idx, i_edx;
7699 7514 anon_sync_obj_t i_cookie;
7700 7515 struct anon *i_ap;
7701 7516 struct vnode *i_vp;
7702 7517 u_offset_t i_off;
7703 7518
7704 7519 /* Only count sysV pages once for locked memory */
7705 7520 i_edx = svd->anon_index + seg_page(seg, addr + len);
7706 7521 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7707 7522 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7708 7523 anon_array_enter(amp, i_idx, &i_cookie);
7709 7524 i_ap = anon_get_ptr(amp->ahp, i_idx);
7710 7525 if (i_ap == NULL) {
7711 7526 unlocked_bytes += PAGESIZE;
7712 7527 anon_array_exit(&i_cookie);
7713 7528 continue;
7714 7529 }
7715 7530 swap_xlate(i_ap, &i_vp, &i_off);
7716 7531 anon_array_exit(&i_cookie);
7717 7532 pp = page_lookup(i_vp, i_off, SE_SHARED);
7718 7533 if (pp == NULL) {
7719 7534 unlocked_bytes += PAGESIZE;
7720 7535 continue;
7721 7536 } else if (pp->p_lckcnt == 0)
7722 7537 unlocked_bytes += PAGESIZE;
7723 7538 page_unlock(pp);
7724 7539 }
7725 7540 ANON_LOCK_EXIT(&->a_rwlock);
7726 7541 }
7727 7542
7728 7543 mutex_enter(&p->p_lock);
7729 7544 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7730 7545 chargeproc);
7731 7546 mutex_exit(&p->p_lock);
7732 7547
7733 7548 if (err) {
7734 7549 if (sp != NULL)
7735 7550 mutex_exit(&sp->shm_mlock);
7736 7551 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7737 7552 return (err);
7738 7553 }
7739 7554 }
7740 7555 /*
7741 7556 * Loop over all pages in the range. Process if we're locking and
7742 7557 * page has not already been locked in this mapping; or if we're
7743 7558 * unlocking and the page has been locked.
7744 7559 */
7745 7560 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7746 7561 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7747 7562 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7748 7563 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7749 7564 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7750 7565
7751 7566 if (amp != NULL)
7752 7567 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7753 7568 /*
7754 7569 * If this isn't a MAP_NORESERVE segment and
7755 7570 * we're locking, allocate anon slots if they
7756 7571 * don't exist. The page is brought in later on.
7757 7572 */
7758 7573 if (op == MC_LOCK && svd->vp == NULL &&
7759 7574 ((svd->flags & MAP_NORESERVE) == 0) &&
7760 7575 amp != NULL &&
7761 7576 ((ap = anon_get_ptr(amp->ahp, anon_index))
7762 7577 == NULL)) {
7763 7578 anon_array_enter(amp, anon_index, &cookie);
7764 7579
7765 7580 if ((ap = anon_get_ptr(amp->ahp,
7766 7581 anon_index)) == NULL) {
7767 7582 pp = anon_zero(seg, addr, &ap,
7768 7583 svd->cred);
7769 7584 if (pp == NULL) {
7770 7585 anon_array_exit(&cookie);
7771 7586 ANON_LOCK_EXIT(&->a_rwlock);
7772 7587 err = ENOMEM;
7773 7588 goto out;
7774 7589 }
7775 7590 ASSERT(anon_get_ptr(amp->ahp,
7776 7591 anon_index) == NULL);
7777 7592 (void) anon_set_ptr(amp->ahp,
7778 7593 anon_index, ap, ANON_SLEEP);
7779 7594 page_unlock(pp);
7780 7595 }
7781 7596 anon_array_exit(&cookie);
7782 7597 }
7783 7598
7784 7599 /*
7785 7600 * Get name for page, accounting for
7786 7601 * existence of private copy.
7787 7602 */
7788 7603 ap = NULL;
7789 7604 if (amp != NULL) {
7790 7605 anon_array_enter(amp, anon_index, &cookie);
7791 7606 ap = anon_get_ptr(amp->ahp, anon_index);
7792 7607 if (ap != NULL) {
7793 7608 swap_xlate(ap, &vp, &off);
7794 7609 } else {
7795 7610 if (svd->vp == NULL &&
7796 7611 (svd->flags & MAP_NORESERVE)) {
7797 7612 anon_array_exit(&cookie);
7798 7613 ANON_LOCK_EXIT(&->a_rwlock);
7799 7614 continue;
7800 7615 }
7801 7616 vp = svd->vp;
7802 7617 off = offset;
7803 7618 }
7804 7619 if (op != MC_LOCK || ap == NULL) {
7805 7620 anon_array_exit(&cookie);
7806 7621 ANON_LOCK_EXIT(&->a_rwlock);
7807 7622 }
7808 7623 } else {
7809 7624 vp = svd->vp;
7810 7625 off = offset;
7811 7626 }
7812 7627
7813 7628 /*
7814 7629 * Get page frame. It's ok if the page is
7815 7630 * not available when we're unlocking, as this
7816 7631 * may simply mean that a page we locked got
7817 7632 * truncated out of existence after we locked it.
7818 7633 *
7819 7634 * Invoke VOP_GETPAGE() to obtain the page struct
7820 7635 * since we may need to read it from disk if its
7821 7636 * been paged out.
7822 7637 */
7823 7638 if (op != MC_LOCK)
7824 7639 pp = page_lookup(vp, off, SE_SHARED);
7825 7640 else {
7826 7641 page_t *pl[1 + 1];
7827 7642 int error;
7828 7643
7829 7644 ASSERT(vp != NULL);
7830 7645
7831 7646 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7832 7647 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7833 7648 S_OTHER, svd->cred, NULL);
7834 7649
7835 7650 if (error && ap != NULL) {
7836 7651 anon_array_exit(&cookie);
7837 7652 ANON_LOCK_EXIT(&->a_rwlock);
7838 7653 }
7839 7654
7840 7655 /*
7841 7656 * If the error is EDEADLK then we must bounce
7842 7657 * up and drop all vm subsystem locks and then
7843 7658 * retry the operation later
7844 7659 * This behavior is a temporary measure because
7845 7660 * ufs/sds logging is badly designed and will
7846 7661 * deadlock if we don't allow this bounce to
7847 7662 * happen. The real solution is to re-design
7848 7663 * the logging code to work properly. See bug
7849 7664 * 4125102 for details of the problem.
7850 7665 */
7851 7666 if (error == EDEADLK) {
7852 7667 err = error;
7853 7668 goto out;
7854 7669 }
7855 7670 /*
7856 7671 * Quit if we fail to fault in the page. Treat
7857 7672 * the failure as an error, unless the addr
7858 7673 * is mapped beyond the end of a file.
7859 7674 */
7860 7675 if (error && svd->vp) {
7861 7676 va.va_mask = AT_SIZE;
7862 7677 if (VOP_GETATTR(svd->vp, &va, 0,
7863 7678 svd->cred, NULL) != 0) {
7864 7679 err = EIO;
7865 7680 goto out;
7866 7681 }
7867 7682 if (btopr(va.va_size) >=
7868 7683 btopr(off + 1)) {
7869 7684 err = EIO;
7870 7685 goto out;
7871 7686 }
7872 7687 goto out;
7873 7688
7874 7689 } else if (error) {
7875 7690 err = EIO;
7876 7691 goto out;
7877 7692 }
7878 7693 pp = pl[0];
7879 7694 ASSERT(pp != NULL);
7880 7695 }
7881 7696
7882 7697 /*
7883 7698 * See Statement at the beginning of this routine.
7884 7699 *
7885 7700 * claim is always set if MAP_PRIVATE and PROT_WRITE
7886 7701 * irrespective of following factors:
7887 7702 *
7888 7703 * (1) anon slots are populated or not
7889 7704 * (2) cow is broken or not
7890 7705 * (3) refcnt on ap is 1 or greater than 1
7891 7706 *
7892 7707 * See 4140683 for details
7893 7708 */
7894 7709 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7895 7710 (svd->type == MAP_PRIVATE));
7896 7711
7897 7712 /*
7898 7713 * Perform page-level operation appropriate to
7899 7714 * operation. If locking, undo the SOFTLOCK
7900 7715 * performed to bring the page into memory
7901 7716 * after setting the lock. If unlocking,
7902 7717 * and no page was found, account for the claim
7903 7718 * separately.
7904 7719 */
7905 7720 if (op == MC_LOCK) {
7906 7721 int ret = 1; /* Assume success */
7907 7722
7908 7723 ASSERT(!VPP_ISPPLOCK(vpp));
7909 7724
7910 7725 ret = page_pp_lock(pp, claim, 0);
7911 7726 if (ap != NULL) {
7912 7727 if (ap->an_pvp != NULL) {
7913 7728 anon_swap_free(ap, pp);
7914 7729 }
7915 7730 anon_array_exit(&cookie);
7916 7731 ANON_LOCK_EXIT(&->a_rwlock);
7917 7732 }
7918 7733 if (ret == 0) {
7919 7734 /* locking page failed */
7920 7735 page_unlock(pp);
7921 7736 err = EAGAIN;
7922 7737 goto out;
7923 7738 }
7924 7739 VPP_SETPPLOCK(vpp);
7925 7740 if (sp != NULL) {
7926 7741 if (pp->p_lckcnt == 1)
7927 7742 locked_bytes += PAGESIZE;
7928 7743 } else
7929 7744 locked_bytes += PAGESIZE;
7930 7745
7931 7746 if (lockmap != (ulong_t *)NULL)
7932 7747 BT_SET(lockmap, pos);
7933 7748
7934 7749 page_unlock(pp);
7935 7750 } else {
7936 7751 ASSERT(VPP_ISPPLOCK(vpp));
7937 7752 if (pp != NULL) {
7938 7753 /* sysV pages should be locked */
7939 7754 ASSERT(sp == NULL || pp->p_lckcnt > 0);
7940 7755 page_pp_unlock(pp, claim, 0);
7941 7756 if (sp != NULL) {
7942 7757 if (pp->p_lckcnt == 0)
7943 7758 unlocked_bytes
7944 7759 += PAGESIZE;
7945 7760 } else
7946 7761 unlocked_bytes += PAGESIZE;
7947 7762 page_unlock(pp);
7948 7763 } else {
7949 7764 ASSERT(sp == NULL);
7950 7765 unlocked_bytes += PAGESIZE;
7951 7766 }
7952 7767 VPP_CLRPPLOCK(vpp);
7953 7768 }
7954 7769 }
7955 7770 }
7956 7771 out:
7957 7772 if (op == MC_LOCK) {
7958 7773 /* Credit back bytes that did not get locked */
7959 7774 if ((unlocked_bytes - locked_bytes) > 0) {
7960 7775 if (proj == NULL)
7961 7776 mutex_enter(&p->p_lock);
7962 7777 rctl_decr_locked_mem(p, proj,
7963 7778 (unlocked_bytes - locked_bytes), chargeproc);
7964 7779 if (proj == NULL)
7965 7780 mutex_exit(&p->p_lock);
7966 7781 }
7967 7782
7968 7783 } else {
7969 7784 /* Account bytes that were unlocked */
7970 7785 if (unlocked_bytes > 0) {
7971 7786 if (proj == NULL)
7972 7787 mutex_enter(&p->p_lock);
7973 7788 rctl_decr_locked_mem(p, proj, unlocked_bytes,
7974 7789 chargeproc);
7975 7790 if (proj == NULL)
7976 7791 mutex_exit(&p->p_lock);
7977 7792 }
7978 7793 }
7979 7794 if (sp != NULL)
7980 7795 mutex_exit(&sp->shm_mlock);
7981 7796 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7982 7797
7983 7798 return (err);
7984 7799 }
7985 7800
7986 7801 /*
7987 7802 * Set advice from user for specified pages
7988 7803 * There are 5 types of advice:
7989 7804 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7990 7805 * MADV_RANDOM - Random page references
7991 7806 * do not allow readahead or 'klustering'
7992 7807 * MADV_SEQUENTIAL - Sequential page references
7993 7808 * Pages previous to the one currently being
7994 7809 * accessed (determined by fault) are 'not needed'
7995 7810 * and are freed immediately
7996 7811 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7997 7812 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7998 7813 * MADV_FREE - Contents can be discarded
7999 7814 * MADV_ACCESS_DEFAULT- Default access
8000 7815 * MADV_ACCESS_LWP - Next LWP will access heavily
8001 7816 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8002 7817 */
8003 7818 static int
8004 7819 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8005 7820 {
8006 7821 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8007 7822 size_t page;
8008 7823 int err = 0;
8009 7824 int already_set;
8010 7825 struct anon_map *amp;
8011 7826 ulong_t anon_index;
8012 7827 struct seg *next;
8013 7828 lgrp_mem_policy_t policy;
8014 7829 struct seg *prev;
8015 7830 struct vnode *vp;
8016 7831
8017 7832 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8018 7833
8019 7834 /*
8020 7835 * In case of MADV_FREE, we won't be modifying any segment private
8021 7836 * data structures; so, we only need to grab READER's lock
8022 7837 */
8023 7838 if (behav != MADV_FREE) {
8024 7839 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8025 7840 if (svd->tr_state != SEGVN_TR_OFF) {
8026 7841 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8027 7842 return (0);
8028 7843 }
8029 7844 } else {
8030 7845 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8031 7846 }
8032 7847
8033 7848 /*
8034 7849 * Large pages are assumed to be only turned on when accesses to the
8035 7850 * segment's address range have spatial and temporal locality. That
8036 7851 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8037 7852 * Also, ignore advice affecting lgroup memory allocation
8038 7853 * if don't need to do lgroup optimizations on this system
8039 7854 */
8040 7855
8041 7856 if ((behav == MADV_SEQUENTIAL &&
8042 7857 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8043 7858 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8044 7859 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8045 7860 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8046 7861 return (0);
8047 7862 }
8048 7863
8049 7864 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8050 7865 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8051 7866 /*
8052 7867 * Since we are going to unload hat mappings
8053 7868 * we first have to flush the cache. Otherwise
8054 7869 * this might lead to system panic if another
8055 7870 * thread is doing physio on the range whose
8056 7871 * mappings are unloaded by madvise(3C).
8057 7872 */
8058 7873 if (svd->softlockcnt > 0) {
8059 7874 /*
8060 7875 * If this is shared segment non 0 softlockcnt
8061 7876 * means locked pages are still in use.
8062 7877 */
8063 7878 if (svd->type == MAP_SHARED) {
8064 7879 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8065 7880 return (EAGAIN);
8066 7881 }
8067 7882 /*
8068 7883 * Since we do have the segvn writers lock
8069 7884 * nobody can fill the cache with entries
8070 7885 * belonging to this seg during the purge.
8071 7886 * The flush either succeeds or we still
8072 7887 * have pending I/Os. In the later case,
8073 7888 * madvise(3C) fails.
8074 7889 */
8075 7890 segvn_purge(seg);
8076 7891 if (svd->softlockcnt > 0) {
8077 7892 /*
8078 7893 * Since madvise(3C) is advisory and
8079 7894 * it's not part of UNIX98, madvise(3C)
8080 7895 * failure here doesn't cause any hardship.
8081 7896 * Note that we don't block in "as" layer.
8082 7897 */
8083 7898 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8084 7899 return (EAGAIN);
8085 7900 }
8086 7901 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8087 7902 svd->amp->a_softlockcnt > 0) {
8088 7903 /*
8089 7904 * Try to purge this amp's entries from pcache. It
8090 7905 * will succeed only if other segments that share the
8091 7906 * amp have no outstanding softlock's.
8092 7907 */
8093 7908 segvn_purge(seg);
8094 7909 }
8095 7910 }
8096 7911
8097 7912 amp = svd->amp;
8098 7913 vp = svd->vp;
8099 7914 if (behav == MADV_FREE) {
8100 7915 /*
8101 7916 * MADV_FREE is not supported for segments with
8102 7917 * underlying object; if anonmap is NULL, anon slots
8103 7918 * are not yet populated and there is nothing for
8104 7919 * us to do. As MADV_FREE is advisory, we don't
8105 7920 * return error in either case.
8106 7921 */
8107 7922 if (vp != NULL || amp == NULL) {
8108 7923 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8109 7924 return (0);
8110 7925 }
8111 7926
8112 7927 segvn_purge(seg);
8113 7928
8114 7929 page = seg_page(seg, addr);
8115 7930 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8116 7931 anon_disclaim(amp, svd->anon_index + page, len);
8117 7932 ANON_LOCK_EXIT(&->a_rwlock);
8118 7933 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8119 7934 return (0);
8120 7935 }
8121 7936
8122 7937 /*
8123 7938 * If advice is to be applied to entire segment,
8124 7939 * use advice field in seg_data structure
8125 7940 * otherwise use appropriate vpage entry.
8126 7941 */
8127 7942 if ((addr == seg->s_base) && (len == seg->s_size)) {
8128 7943 switch (behav) {
8129 7944 case MADV_ACCESS_LWP:
8130 7945 case MADV_ACCESS_MANY:
8131 7946 case MADV_ACCESS_DEFAULT:
8132 7947 /*
8133 7948 * Set memory allocation policy for this segment
8134 7949 */
8135 7950 policy = lgrp_madv_to_policy(behav, len, svd->type);
8136 7951 if (svd->type == MAP_SHARED)
8137 7952 already_set = lgrp_shm_policy_set(policy, amp,
8138 7953 svd->anon_index, vp, svd->offset, len);
8139 7954 else {
8140 7955 /*
8141 7956 * For private memory, need writers lock on
8142 7957 * address space because the segment may be
8143 7958 * split or concatenated when changing policy
8144 7959 */
8145 7960 if (AS_READ_HELD(seg->s_as,
8146 7961 &seg->s_as->a_lock)) {
8147 7962 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8148 7963 return (IE_RETRY);
8149 7964 }
8150 7965
8151 7966 already_set = lgrp_privm_policy_set(policy,
8152 7967 &svd->policy_info, len);
8153 7968 }
8154 7969
8155 7970 /*
8156 7971 * If policy set already and it shouldn't be reapplied,
8157 7972 * don't do anything.
8158 7973 */
8159 7974 if (already_set &&
8160 7975 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8161 7976 break;
8162 7977
8163 7978 /*
8164 7979 * Mark any existing pages in given range for
8165 7980 * migration
8166 7981 */
8167 7982 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8168 7983 vp, svd->offset, 1);
8169 7984
8170 7985 /*
8171 7986 * If same policy set already or this is a shared
8172 7987 * memory segment, don't need to try to concatenate
8173 7988 * segment with adjacent ones.
8174 7989 */
8175 7990 if (already_set || svd->type == MAP_SHARED)
8176 7991 break;
8177 7992
8178 7993 /*
8179 7994 * Try to concatenate this segment with previous
8180 7995 * one and next one, since we changed policy for
8181 7996 * this one and it may be compatible with adjacent
8182 7997 * ones now.
8183 7998 */
8184 7999 prev = AS_SEGPREV(seg->s_as, seg);
8185 8000 next = AS_SEGNEXT(seg->s_as, seg);
8186 8001
8187 8002 if (next && next->s_ops == &segvn_ops &&
8188 8003 addr + len == next->s_base)
8189 8004 (void) segvn_concat(seg, next, 1);
8190 8005
8191 8006 if (prev && prev->s_ops == &segvn_ops &&
8192 8007 addr == prev->s_base + prev->s_size) {
8193 8008 /*
8194 8009 * Drop lock for private data of current
8195 8010 * segment before concatenating (deleting) it
8196 8011 * and return IE_REATTACH to tell as_ctl() that
8197 8012 * current segment has changed
8198 8013 */
8199 8014 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8200 8015 if (!segvn_concat(prev, seg, 1))
8201 8016 err = IE_REATTACH;
8202 8017
8203 8018 return (err);
8204 8019 }
8205 8020 break;
8206 8021
8207 8022 case MADV_SEQUENTIAL:
8208 8023 /*
8209 8024 * unloading mapping guarantees
8210 8025 * detection in segvn_fault
8211 8026 */
8212 8027 ASSERT(seg->s_szc == 0);
8213 8028 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8214 8029 hat_unload(seg->s_as->a_hat, addr, len,
8215 8030 HAT_UNLOAD);
8216 8031 /* FALLTHROUGH */
8217 8032 case MADV_NORMAL:
8218 8033 case MADV_RANDOM:
8219 8034 svd->advice = (uchar_t)behav;
8220 8035 svd->pageadvice = 0;
8221 8036 break;
8222 8037 case MADV_WILLNEED: /* handled in memcntl */
8223 8038 case MADV_DONTNEED: /* handled in memcntl */
8224 8039 case MADV_FREE: /* handled above */
8225 8040 break;
8226 8041 default:
8227 8042 err = EINVAL;
8228 8043 }
8229 8044 } else {
8230 8045 caddr_t eaddr;
8231 8046 struct seg *new_seg;
8232 8047 struct segvn_data *new_svd;
8233 8048 u_offset_t off;
8234 8049 caddr_t oldeaddr;
8235 8050
8236 8051 page = seg_page(seg, addr);
8237 8052
8238 8053 segvn_vpage(seg);
8239 8054
8240 8055 switch (behav) {
8241 8056 struct vpage *bvpp, *evpp;
8242 8057
8243 8058 case MADV_ACCESS_LWP:
8244 8059 case MADV_ACCESS_MANY:
8245 8060 case MADV_ACCESS_DEFAULT:
8246 8061 /*
8247 8062 * Set memory allocation policy for portion of this
8248 8063 * segment
8249 8064 */
8250 8065
8251 8066 /*
8252 8067 * Align address and length of advice to page
8253 8068 * boundaries for large pages
8254 8069 */
8255 8070 if (seg->s_szc != 0) {
8256 8071 size_t pgsz;
8257 8072
8258 8073 pgsz = page_get_pagesize(seg->s_szc);
8259 8074 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8260 8075 len = P2ROUNDUP(len, pgsz);
8261 8076 }
8262 8077
8263 8078 /*
8264 8079 * Check to see whether policy is set already
8265 8080 */
8266 8081 policy = lgrp_madv_to_policy(behav, len, svd->type);
8267 8082
8268 8083 anon_index = svd->anon_index + page;
8269 8084 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8270 8085
8271 8086 if (svd->type == MAP_SHARED)
8272 8087 already_set = lgrp_shm_policy_set(policy, amp,
8273 8088 anon_index, vp, off, len);
8274 8089 else
8275 8090 already_set =
8276 8091 (policy == svd->policy_info.mem_policy);
8277 8092
8278 8093 /*
8279 8094 * If policy set already and it shouldn't be reapplied,
8280 8095 * don't do anything.
8281 8096 */
8282 8097 if (already_set &&
8283 8098 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8284 8099 break;
8285 8100
8286 8101 /*
8287 8102 * For private memory, need writers lock on
8288 8103 * address space because the segment may be
8289 8104 * split or concatenated when changing policy
8290 8105 */
8291 8106 if (svd->type == MAP_PRIVATE &&
8292 8107 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8293 8108 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8294 8109 return (IE_RETRY);
8295 8110 }
8296 8111
8297 8112 /*
8298 8113 * Mark any existing pages in given range for
8299 8114 * migration
8300 8115 */
8301 8116 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8302 8117 vp, svd->offset, 1);
8303 8118
8304 8119 /*
8305 8120 * Don't need to try to split or concatenate
8306 8121 * segments, since policy is same or this is a shared
8307 8122 * memory segment
8308 8123 */
8309 8124 if (already_set || svd->type == MAP_SHARED)
8310 8125 break;
8311 8126
8312 8127 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8313 8128 ASSERT(svd->amp == NULL);
8314 8129 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8315 8130 ASSERT(svd->softlockcnt == 0);
8316 8131 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8317 8132 HAT_REGION_TEXT);
8318 8133 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8319 8134 }
8320 8135
8321 8136 /*
8322 8137 * Split off new segment if advice only applies to a
8323 8138 * portion of existing segment starting in middle
8324 8139 */
8325 8140 new_seg = NULL;
8326 8141 eaddr = addr + len;
8327 8142 oldeaddr = seg->s_base + seg->s_size;
8328 8143 if (addr > seg->s_base) {
8329 8144 /*
8330 8145 * Must flush I/O page cache
8331 8146 * before splitting segment
8332 8147 */
8333 8148 if (svd->softlockcnt > 0)
8334 8149 segvn_purge(seg);
8335 8150
8336 8151 /*
8337 8152 * Split segment and return IE_REATTACH to tell
8338 8153 * as_ctl() that current segment changed
8339 8154 */
8340 8155 new_seg = segvn_split_seg(seg, addr);
8341 8156 new_svd = (struct segvn_data *)new_seg->s_data;
8342 8157 err = IE_REATTACH;
8343 8158
8344 8159 /*
8345 8160 * If new segment ends where old one
8346 8161 * did, try to concatenate the new
8347 8162 * segment with next one.
8348 8163 */
8349 8164 if (eaddr == oldeaddr) {
8350 8165 /*
8351 8166 * Set policy for new segment
8352 8167 */
8353 8168 (void) lgrp_privm_policy_set(policy,
8354 8169 &new_svd->policy_info,
8355 8170 new_seg->s_size);
8356 8171
8357 8172 next = AS_SEGNEXT(new_seg->s_as,
8358 8173 new_seg);
8359 8174
8360 8175 if (next &&
8361 8176 next->s_ops == &segvn_ops &&
8362 8177 eaddr == next->s_base)
8363 8178 (void) segvn_concat(new_seg,
8364 8179 next, 1);
8365 8180 }
8366 8181 }
8367 8182
8368 8183 /*
8369 8184 * Split off end of existing segment if advice only
8370 8185 * applies to a portion of segment ending before
8371 8186 * end of the existing segment
8372 8187 */
8373 8188 if (eaddr < oldeaddr) {
8374 8189 /*
8375 8190 * Must flush I/O page cache
8376 8191 * before splitting segment
8377 8192 */
8378 8193 if (svd->softlockcnt > 0)
8379 8194 segvn_purge(seg);
8380 8195
8381 8196 /*
8382 8197 * If beginning of old segment was already
8383 8198 * split off, use new segment to split end off
8384 8199 * from.
8385 8200 */
8386 8201 if (new_seg != NULL && new_seg != seg) {
8387 8202 /*
8388 8203 * Split segment
8389 8204 */
8390 8205 (void) segvn_split_seg(new_seg, eaddr);
8391 8206
8392 8207 /*
8393 8208 * Set policy for new segment
8394 8209 */
8395 8210 (void) lgrp_privm_policy_set(policy,
8396 8211 &new_svd->policy_info,
8397 8212 new_seg->s_size);
8398 8213 } else {
8399 8214 /*
8400 8215 * Split segment and return IE_REATTACH
8401 8216 * to tell as_ctl() that current
8402 8217 * segment changed
8403 8218 */
8404 8219 (void) segvn_split_seg(seg, eaddr);
8405 8220 err = IE_REATTACH;
8406 8221
8407 8222 (void) lgrp_privm_policy_set(policy,
8408 8223 &svd->policy_info, seg->s_size);
8409 8224
8410 8225 /*
8411 8226 * If new segment starts where old one
8412 8227 * did, try to concatenate it with
8413 8228 * previous segment.
8414 8229 */
8415 8230 if (addr == seg->s_base) {
8416 8231 prev = AS_SEGPREV(seg->s_as,
8417 8232 seg);
8418 8233
8419 8234 /*
8420 8235 * Drop lock for private data
8421 8236 * of current segment before
8422 8237 * concatenating (deleting) it
8423 8238 */
8424 8239 if (prev &&
8425 8240 prev->s_ops ==
8426 8241 &segvn_ops &&
8427 8242 addr == prev->s_base +
8428 8243 prev->s_size) {
8429 8244 SEGVN_LOCK_EXIT(
8430 8245 seg->s_as,
8431 8246 &svd->lock);
8432 8247 (void) segvn_concat(
8433 8248 prev, seg, 1);
8434 8249 return (err);
8435 8250 }
8436 8251 }
8437 8252 }
8438 8253 }
8439 8254 break;
8440 8255 case MADV_SEQUENTIAL:
8441 8256 ASSERT(seg->s_szc == 0);
8442 8257 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8443 8258 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8444 8259 /* FALLTHROUGH */
8445 8260 case MADV_NORMAL:
8446 8261 case MADV_RANDOM:
8447 8262 bvpp = &svd->vpage[page];
8448 8263 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8449 8264 for (; bvpp < evpp; bvpp++)
8450 8265 VPP_SETADVICE(bvpp, behav);
8451 8266 svd->advice = MADV_NORMAL;
8452 8267 break;
8453 8268 case MADV_WILLNEED: /* handled in memcntl */
8454 8269 case MADV_DONTNEED: /* handled in memcntl */
8455 8270 case MADV_FREE: /* handled above */
8456 8271 break;
8457 8272 default:
8458 8273 err = EINVAL;
8459 8274 }
8460 8275 }
8461 8276 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8462 8277 return (err);
8463 8278 }
8464 8279
8465 8280 /*
8466 8281 * Create a vpage structure for this seg.
8467 8282 */
8468 8283 static void
8469 8284 segvn_vpage(struct seg *seg)
8470 8285 {
8471 8286 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8472 8287 struct vpage *vp, *evp;
8473 8288
8474 8289 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8475 8290
8476 8291 /*
8477 8292 * If no vpage structure exists, allocate one. Copy the protections
8478 8293 * and the advice from the segment itself to the individual pages.
8479 8294 */
8480 8295 if (svd->vpage == NULL) {
8481 8296 svd->pageadvice = 1;
8482 8297 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage),
8483 8298 KM_SLEEP);
8484 8299 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8485 8300 for (vp = svd->vpage; vp < evp; vp++) {
8486 8301 VPP_SETPROT(vp, svd->prot);
8487 8302 VPP_SETADVICE(vp, svd->advice);
8488 8303 }
8489 8304 }
8490 8305 }
8491 8306
8492 8307 /*
8493 8308 * Dump the pages belonging to this segvn segment.
8494 8309 */
8495 8310 static void
8496 8311 segvn_dump(struct seg *seg)
8497 8312 {
8498 8313 struct segvn_data *svd;
8499 8314 page_t *pp;
8500 8315 struct anon_map *amp;
8501 8316 ulong_t anon_index;
8502 8317 struct vnode *vp;
8503 8318 u_offset_t off, offset;
8504 8319 pfn_t pfn;
8505 8320 pgcnt_t page, npages;
8506 8321 caddr_t addr;
8507 8322
8508 8323 npages = seg_pages(seg);
8509 8324 svd = (struct segvn_data *)seg->s_data;
8510 8325 vp = svd->vp;
8511 8326 off = offset = svd->offset;
8512 8327 addr = seg->s_base;
8513 8328
8514 8329 if ((amp = svd->amp) != NULL) {
8515 8330 anon_index = svd->anon_index;
8516 8331 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8517 8332 }
8518 8333
8519 8334 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8520 8335 struct anon *ap;
8521 8336 int we_own_it = 0;
8522 8337
8523 8338 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8524 8339 swap_xlate_nopanic(ap, &vp, &off);
8525 8340 } else {
8526 8341 vp = svd->vp;
8527 8342 off = offset;
8528 8343 }
8529 8344
8530 8345 /*
8531 8346 * If pp == NULL, the page either does not exist
8532 8347 * or is exclusively locked. So determine if it
8533 8348 * exists before searching for it.
8534 8349 */
8535 8350
8536 8351 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8537 8352 we_own_it = 1;
8538 8353 else
8539 8354 pp = page_exists(vp, off);
8540 8355
8541 8356 if (pp) {
8542 8357 pfn = page_pptonum(pp);
8543 8358 dump_addpage(seg->s_as, addr, pfn);
8544 8359 if (we_own_it)
8545 8360 page_unlock(pp);
8546 8361 }
8547 8362 addr += PAGESIZE;
8548 8363 dump_timeleft = dump_timeout;
8549 8364 }
8550 8365
8551 8366 if (amp != NULL)
8552 8367 ANON_LOCK_EXIT(&->a_rwlock);
8553 8368 }
8554 8369
8555 8370 #ifdef DEBUG
8556 8371 static uint32_t segvn_pglock_mtbf = 0;
8557 8372 #endif
8558 8373
8559 8374 #define PCACHE_SHWLIST ((page_t *)-2)
8560 8375 #define NOPCACHE_SHWLIST ((page_t *)-1)
8561 8376
8562 8377 /*
8563 8378 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8564 8379 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8565 8380 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8566 8381 * the same parts of the segment. Currently shadow list creation is only
8567 8382 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8568 8383 * tagged with segment pointer, starting virtual address and length. This
8569 8384 * approach for MAP_SHARED segments may add many pcache entries for the same
8570 8385 * set of pages and lead to long hash chains that decrease pcache lookup
8571 8386 * performance. To avoid this issue for shared segments shared anon map and
8572 8387 * starting anon index are used for pcache entry tagging. This allows all
8573 8388 * segments to share pcache entries for the same anon range and reduces pcache
8574 8389 * chain's length as well as memory overhead from duplicate shadow lists and
8575 8390 * pcache entries.
8576 8391 *
8577 8392 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8578 8393 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8579 8394 * part of softlockcnt accounting is done differently for private and shared
8580 8395 * segments. In private segment case softlock is only incremented when a new
8581 8396 * shadow list is created but not when an existing one is found via
8582 8397 * seg_plookup(). pcache entries have reference count incremented/decremented
8583 8398 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8584 8399 * reference count can be purged (and purging is needed before segment can be
8585 8400 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8586 8401 * decrement softlockcnt. Since in private segment case each of its pcache
8587 8402 * entries only belongs to this segment we can expect that when
8588 8403 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8589 8404 * segment purge will succeed and softlockcnt will drop to 0. In shared
8590 8405 * segment case reference count in pcache entry counts active locks from many
8591 8406 * different segments so we can't expect segment purging to succeed even when
8592 8407 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8593 8408 * segment. To be able to determine when there're no pending pagelocks in
8594 8409 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8595 8410 * but instead softlockcnt is incremented and decremented for every
8596 8411 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8597 8412 * list was created or an existing one was found. When softlockcnt drops to 0
8598 8413 * this segment no longer has any claims for pcached shadow lists and the
8599 8414 * segment can be freed even if there're still active pcache entries
8600 8415 * shared by this segment anon map. Shared segment pcache entries belong to
8601 8416 * anon map and are typically removed when anon map is freed after all
8602 8417 * processes destroy the segments that use this anon map.
8603 8418 */
8604 8419 static int
8605 8420 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8606 8421 enum lock_type type, enum seg_rw rw)
8607 8422 {
8608 8423 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8609 8424 size_t np;
8610 8425 pgcnt_t adjustpages;
8611 8426 pgcnt_t npages;
8612 8427 ulong_t anon_index;
8613 8428 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8614 8429 uint_t error;
8615 8430 struct anon_map *amp;
8616 8431 pgcnt_t anpgcnt;
8617 8432 struct page **pplist, **pl, *pp;
8618 8433 caddr_t a;
8619 8434 size_t page;
8620 8435 caddr_t lpgaddr, lpgeaddr;
8621 8436 anon_sync_obj_t cookie;
8622 8437 int anlock;
8623 8438 struct anon_map *pamp;
8624 8439 caddr_t paddr;
8625 8440 seg_preclaim_cbfunc_t preclaim_callback;
8626 8441 size_t pgsz;
8627 8442 int use_pcache;
8628 8443 size_t wlen;
8629 8444 uint_t pflags = 0;
8630 8445 int sftlck_sbase = 0;
8631 8446 int sftlck_send = 0;
8632 8447
8633 8448 #ifdef DEBUG
8634 8449 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8635 8450 hrtime_t ts = gethrtime();
8636 8451 if ((ts % segvn_pglock_mtbf) == 0) {
8637 8452 return (ENOTSUP);
8638 8453 }
8639 8454 if ((ts % segvn_pglock_mtbf) == 1) {
8640 8455 return (EFAULT);
8641 8456 }
8642 8457 }
8643 8458 #endif
8644 8459
8645 8460 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8646 8461 "segvn_pagelock: start seg %p addr %p", seg, addr);
8647 8462
8648 8463 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8649 8464 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8650 8465
8651 8466 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8652 8467
8653 8468 /*
8654 8469 * for now we only support pagelock to anon memory. We would have to
8655 8470 * check protections for vnode objects and call into the vnode driver.
8656 8471 * That's too much for a fast path. Let the fault entry point handle
8657 8472 * it.
8658 8473 */
8659 8474 if (svd->vp != NULL) {
8660 8475 if (type == L_PAGELOCK) {
8661 8476 error = ENOTSUP;
8662 8477 goto out;
8663 8478 }
8664 8479 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8665 8480 }
8666 8481 if ((amp = svd->amp) == NULL) {
8667 8482 if (type == L_PAGELOCK) {
8668 8483 error = EFAULT;
8669 8484 goto out;
8670 8485 }
8671 8486 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8672 8487 }
8673 8488 if (rw != S_READ && rw != S_WRITE) {
8674 8489 if (type == L_PAGELOCK) {
8675 8490 error = ENOTSUP;
8676 8491 goto out;
8677 8492 }
8678 8493 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8679 8494 }
8680 8495
8681 8496 if (seg->s_szc != 0) {
8682 8497 /*
8683 8498 * We are adjusting the pagelock region to the large page size
8684 8499 * boundary because the unlocked part of a large page cannot
8685 8500 * be freed anyway unless all constituent pages of a large
8686 8501 * page are locked. Bigger regions reduce pcache chain length
8687 8502 * and improve lookup performance. The tradeoff is that the
8688 8503 * very first segvn_pagelock() call for a given page is more
8689 8504 * expensive if only 1 page_t is needed for IO. This is only
8690 8505 * an issue if pcache entry doesn't get reused by several
8691 8506 * subsequent calls. We optimize here for the case when pcache
8692 8507 * is heavily used by repeated IOs to the same address range.
8693 8508 *
8694 8509 * Note segment's page size cannot change while we are holding
8695 8510 * as lock. And then it cannot change while softlockcnt is
8696 8511 * not 0. This will allow us to correctly recalculate large
8697 8512 * page size region for the matching pageunlock/reclaim call
8698 8513 * since as_pageunlock() caller must always match
8699 8514 * as_pagelock() call's addr and len.
8700 8515 *
8701 8516 * For pageunlock *ppp points to the pointer of page_t that
8702 8517 * corresponds to the real unadjusted start address. Similar
8703 8518 * for pagelock *ppp must point to the pointer of page_t that
8704 8519 * corresponds to the real unadjusted start address.
8705 8520 */
8706 8521 pgsz = page_get_pagesize(seg->s_szc);
8707 8522 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8708 8523 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8709 8524 } else if (len < segvn_pglock_comb_thrshld) {
8710 8525 lpgaddr = addr;
8711 8526 lpgeaddr = addr + len;
8712 8527 adjustpages = 0;
8713 8528 pgsz = PAGESIZE;
8714 8529 } else {
8715 8530 /*
8716 8531 * Align the address range of large enough requests to allow
8717 8532 * combining of different shadow lists into 1 to reduce memory
8718 8533 * overhead from potentially overlapping large shadow lists
8719 8534 * (worst case is we have a 1MB IO into buffers with start
8720 8535 * addresses separated by 4K). Alignment is only possible if
8721 8536 * padded chunks have sufficient access permissions. Note
8722 8537 * permissions won't change between L_PAGELOCK and
8723 8538 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8724 8539 * segvn_setprot() to wait until softlockcnt drops to 0. This
8725 8540 * allows us to determine in L_PAGEUNLOCK the same range we
8726 8541 * computed in L_PAGELOCK.
8727 8542 *
8728 8543 * If alignment is limited by segment ends set
8729 8544 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8730 8545 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8731 8546 * per segment counters. In L_PAGEUNLOCK case decrease
8732 8547 * softlockcnt_sbase/softlockcnt_send counters if
8733 8548 * sftlck_sbase/sftlck_send flags are set. When
8734 8549 * softlockcnt_sbase/softlockcnt_send are non 0
8735 8550 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8736 8551 * won't merge the segments. This restriction combined with
8737 8552 * restriction on segment unmapping and splitting for segments
8738 8553 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8739 8554 * correctly determine the same range that was previously
8740 8555 * locked by matching L_PAGELOCK.
8741 8556 */
8742 8557 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8743 8558 pgsz = PAGESIZE;
8744 8559 if (svd->type == MAP_PRIVATE) {
8745 8560 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8746 8561 segvn_pglock_comb_balign);
8747 8562 if (lpgaddr < seg->s_base) {
8748 8563 lpgaddr = seg->s_base;
8749 8564 sftlck_sbase = 1;
8750 8565 }
8751 8566 } else {
8752 8567 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8753 8568 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8754 8569 if (aaix < svd->anon_index) {
8755 8570 lpgaddr = seg->s_base;
8756 8571 sftlck_sbase = 1;
8757 8572 } else {
8758 8573 lpgaddr = addr - ptob(aix - aaix);
8759 8574 ASSERT(lpgaddr >= seg->s_base);
8760 8575 }
8761 8576 }
8762 8577 if (svd->pageprot && lpgaddr != addr) {
8763 8578 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8764 8579 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8765 8580 while (vp < evp) {
8766 8581 if ((VPP_PROT(vp) & protchk) == 0) {
8767 8582 break;
8768 8583 }
8769 8584 vp++;
8770 8585 }
8771 8586 if (vp < evp) {
8772 8587 lpgaddr = addr;
8773 8588 pflags = 0;
8774 8589 }
8775 8590 }
8776 8591 lpgeaddr = addr + len;
8777 8592 if (pflags) {
8778 8593 if (svd->type == MAP_PRIVATE) {
8779 8594 lpgeaddr = (caddr_t)P2ROUNDUP(
8780 8595 (uintptr_t)lpgeaddr,
8781 8596 segvn_pglock_comb_balign);
8782 8597 } else {
8783 8598 ulong_t aix = svd->anon_index +
8784 8599 seg_page(seg, lpgeaddr);
8785 8600 ulong_t aaix = P2ROUNDUP(aix,
8786 8601 segvn_pglock_comb_palign);
8787 8602 if (aaix < aix) {
8788 8603 lpgeaddr = 0;
8789 8604 } else {
8790 8605 lpgeaddr += ptob(aaix - aix);
8791 8606 }
8792 8607 }
8793 8608 if (lpgeaddr == 0 ||
8794 8609 lpgeaddr > seg->s_base + seg->s_size) {
8795 8610 lpgeaddr = seg->s_base + seg->s_size;
8796 8611 sftlck_send = 1;
8797 8612 }
8798 8613 }
8799 8614 if (svd->pageprot && lpgeaddr != addr + len) {
8800 8615 struct vpage *vp;
8801 8616 struct vpage *evp;
8802 8617
8803 8618 vp = &svd->vpage[seg_page(seg, addr + len)];
8804 8619 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8805 8620
8806 8621 while (vp < evp) {
8807 8622 if ((VPP_PROT(vp) & protchk) == 0) {
8808 8623 break;
8809 8624 }
8810 8625 vp++;
8811 8626 }
8812 8627 if (vp < evp) {
8813 8628 lpgeaddr = addr + len;
8814 8629 }
8815 8630 }
8816 8631 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8817 8632 }
8818 8633
8819 8634 /*
8820 8635 * For MAP_SHARED segments we create pcache entries tagged by amp and
8821 8636 * anon index so that we can share pcache entries with other segments
8822 8637 * that map this amp. For private segments pcache entries are tagged
8823 8638 * with segment and virtual address.
8824 8639 */
8825 8640 if (svd->type == MAP_SHARED) {
8826 8641 pamp = amp;
8827 8642 paddr = (caddr_t)((lpgaddr - seg->s_base) +
8828 8643 ptob(svd->anon_index));
8829 8644 preclaim_callback = shamp_reclaim;
8830 8645 } else {
8831 8646 pamp = NULL;
8832 8647 paddr = lpgaddr;
8833 8648 preclaim_callback = segvn_reclaim;
8834 8649 }
8835 8650
8836 8651 if (type == L_PAGEUNLOCK) {
8837 8652 VM_STAT_ADD(segvnvmstats.pagelock[0]);
8838 8653
8839 8654 /*
8840 8655 * update hat ref bits for /proc. We need to make sure
8841 8656 * that threads tracing the ref and mod bits of the
8842 8657 * address space get the right data.
8843 8658 * Note: page ref and mod bits are updated at reclaim time
8844 8659 */
8845 8660 if (seg->s_as->a_vbits) {
8846 8661 for (a = addr; a < addr + len; a += PAGESIZE) {
8847 8662 if (rw == S_WRITE) {
8848 8663 hat_setstat(seg->s_as, a,
8849 8664 PAGESIZE, P_REF | P_MOD);
8850 8665 } else {
8851 8666 hat_setstat(seg->s_as, a,
8852 8667 PAGESIZE, P_REF);
8853 8668 }
8854 8669 }
8855 8670 }
8856 8671
8857 8672 /*
8858 8673 * Check the shadow list entry after the last page used in
8859 8674 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8860 8675 * was not inserted into pcache and is not large page
8861 8676 * adjusted. In this case call reclaim callback directly and
8862 8677 * don't adjust the shadow list start and size for large
8863 8678 * pages.
8864 8679 */
8865 8680 npages = btop(len);
8866 8681 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
8867 8682 void *ptag;
8868 8683 if (pamp != NULL) {
8869 8684 ASSERT(svd->type == MAP_SHARED);
8870 8685 ptag = (void *)pamp;
8871 8686 paddr = (caddr_t)((addr - seg->s_base) +
8872 8687 ptob(svd->anon_index));
8873 8688 } else {
8874 8689 ptag = (void *)seg;
8875 8690 paddr = addr;
8876 8691 }
8877 8692 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8878 8693 } else {
8879 8694 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8880 8695 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8881 8696 len = lpgeaddr - lpgaddr;
8882 8697 npages = btop(len);
8883 8698 seg_pinactive(seg, pamp, paddr, len,
8884 8699 *ppp - adjustpages, rw, pflags, preclaim_callback);
8885 8700 }
8886 8701
8887 8702 if (pamp != NULL) {
8888 8703 ASSERT(svd->type == MAP_SHARED);
8889 8704 ASSERT(svd->softlockcnt >= npages);
8890 8705 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8891 8706 }
8892 8707
8893 8708 if (sftlck_sbase) {
8894 8709 ASSERT(svd->softlockcnt_sbase > 0);
8895 8710 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1);
8896 8711 }
8897 8712 if (sftlck_send) {
8898 8713 ASSERT(svd->softlockcnt_send > 0);
8899 8714 atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1);
8900 8715 }
8901 8716
8902 8717 /*
8903 8718 * If someone is blocked while unmapping, we purge
8904 8719 * segment page cache and thus reclaim pplist synchronously
8905 8720 * without waiting for seg_pasync_thread. This speeds up
8906 8721 * unmapping in cases where munmap(2) is called, while
8907 8722 * raw async i/o is still in progress or where a thread
8908 8723 * exits on data fault in a multithreaded application.
8909 8724 */
8910 8725 if (AS_ISUNMAPWAIT(seg->s_as)) {
8911 8726 if (svd->softlockcnt == 0) {
8912 8727 mutex_enter(&seg->s_as->a_contents);
8913 8728 if (AS_ISUNMAPWAIT(seg->s_as)) {
8914 8729 AS_CLRUNMAPWAIT(seg->s_as);
8915 8730 cv_broadcast(&seg->s_as->a_cv);
8916 8731 }
8917 8732 mutex_exit(&seg->s_as->a_contents);
8918 8733 } else if (pamp == NULL) {
8919 8734 /*
8920 8735 * softlockcnt is not 0 and this is a
8921 8736 * MAP_PRIVATE segment. Try to purge its
8922 8737 * pcache entries to reduce softlockcnt.
8923 8738 * If it drops to 0 segvn_reclaim()
8924 8739 * will wake up a thread waiting on
8925 8740 * unmapwait flag.
8926 8741 *
8927 8742 * We don't purge MAP_SHARED segments with non
8928 8743 * 0 softlockcnt since IO is still in progress
8929 8744 * for such segments.
8930 8745 */
8931 8746 ASSERT(svd->type == MAP_PRIVATE);
8932 8747 segvn_purge(seg);
8933 8748 }
8934 8749 }
8935 8750 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8936 8751 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
8937 8752 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
8938 8753 return (0);
8939 8754 }
8940 8755
8941 8756 /* The L_PAGELOCK case ... */
8942 8757
8943 8758 VM_STAT_ADD(segvnvmstats.pagelock[1]);
8944 8759
8945 8760 /*
8946 8761 * For MAP_SHARED segments we have to check protections before
8947 8762 * seg_plookup() since pcache entries may be shared by many segments
8948 8763 * with potentially different page protections.
8949 8764 */
8950 8765 if (pamp != NULL) {
8951 8766 ASSERT(svd->type == MAP_SHARED);
8952 8767 if (svd->pageprot == 0) {
8953 8768 if ((svd->prot & protchk) == 0) {
8954 8769 error = EACCES;
8955 8770 goto out;
8956 8771 }
8957 8772 } else {
8958 8773 /*
8959 8774 * check page protections
8960 8775 */
8961 8776 caddr_t ea;
8962 8777
8963 8778 if (seg->s_szc) {
8964 8779 a = lpgaddr;
8965 8780 ea = lpgeaddr;
8966 8781 } else {
8967 8782 a = addr;
8968 8783 ea = addr + len;
8969 8784 }
8970 8785 for (; a < ea; a += pgsz) {
8971 8786 struct vpage *vp;
8972 8787
8973 8788 ASSERT(seg->s_szc == 0 ||
8974 8789 sameprot(seg, a, pgsz));
8975 8790 vp = &svd->vpage[seg_page(seg, a)];
8976 8791 if ((VPP_PROT(vp) & protchk) == 0) {
8977 8792 error = EACCES;
8978 8793 goto out;
8979 8794 }
8980 8795 }
8981 8796 }
8982 8797 }
8983 8798
8984 8799 /*
8985 8800 * try to find pages in segment page cache
8986 8801 */
8987 8802 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8988 8803 if (pplist != NULL) {
8989 8804 if (pamp != NULL) {
8990 8805 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8991 8806 ASSERT(svd->type == MAP_SHARED);
8992 8807 atomic_add_long((ulong_t *)&svd->softlockcnt,
8993 8808 npages);
8994 8809 }
8995 8810 if (sftlck_sbase) {
8996 8811 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
8997 8812 }
8998 8813 if (sftlck_send) {
8999 8814 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
9000 8815 }
9001 8816 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9002 8817 *ppp = pplist + adjustpages;
9003 8818 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9004 8819 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9005 8820 return (0);
9006 8821 }
9007 8822
9008 8823 /*
9009 8824 * For MAP_SHARED segments we already verified above that segment
9010 8825 * protections allow this pagelock operation.
9011 8826 */
9012 8827 if (pamp == NULL) {
9013 8828 ASSERT(svd->type == MAP_PRIVATE);
9014 8829 if (svd->pageprot == 0) {
9015 8830 if ((svd->prot & protchk) == 0) {
9016 8831 error = EACCES;
9017 8832 goto out;
9018 8833 }
9019 8834 if (svd->prot & PROT_WRITE) {
9020 8835 wlen = lpgeaddr - lpgaddr;
9021 8836 } else {
9022 8837 wlen = 0;
9023 8838 ASSERT(rw == S_READ);
9024 8839 }
9025 8840 } else {
9026 8841 int wcont = 1;
9027 8842 /*
9028 8843 * check page protections
9029 8844 */
9030 8845 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9031 8846 struct vpage *vp;
9032 8847
9033 8848 ASSERT(seg->s_szc == 0 ||
9034 8849 sameprot(seg, a, pgsz));
9035 8850 vp = &svd->vpage[seg_page(seg, a)];
9036 8851 if ((VPP_PROT(vp) & protchk) == 0) {
9037 8852 error = EACCES;
9038 8853 goto out;
9039 8854 }
9040 8855 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9041 8856 wlen += pgsz;
9042 8857 } else {
9043 8858 wcont = 0;
9044 8859 ASSERT(rw == S_READ);
9045 8860 }
9046 8861 }
9047 8862 }
9048 8863 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9049 8864 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9050 8865 }
9051 8866
9052 8867 /*
9053 8868 * Only build large page adjusted shadow list if we expect to insert
9054 8869 * it into pcache. For large enough pages it's a big overhead to
9055 8870 * create a shadow list of the entire large page. But this overhead
9056 8871 * should be amortized over repeated pcache hits on subsequent reuse
9057 8872 * of this shadow list (IO into any range within this shadow list will
9058 8873 * find it in pcache since we large page align the request for pcache
9059 8874 * lookups). pcache performance is improved with bigger shadow lists
9060 8875 * as it reduces the time to pcache the entire big segment and reduces
9061 8876 * pcache chain length.
9062 8877 */
9063 8878 if (seg_pinsert_check(seg, pamp, paddr,
9064 8879 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9065 8880 addr = lpgaddr;
9066 8881 len = lpgeaddr - lpgaddr;
9067 8882 use_pcache = 1;
9068 8883 } else {
9069 8884 use_pcache = 0;
9070 8885 /*
9071 8886 * Since this entry will not be inserted into the pcache, we
9072 8887 * will not do any adjustments to the starting address or
9073 8888 * size of the memory to be locked.
9074 8889 */
9075 8890 adjustpages = 0;
9076 8891 }
9077 8892 npages = btop(len);
9078 8893
9079 8894 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9080 8895 pl = pplist;
9081 8896 *ppp = pplist + adjustpages;
9082 8897 /*
9083 8898 * If use_pcache is 0 this shadow list is not large page adjusted.
9084 8899 * Record this info in the last entry of shadow array so that
9085 8900 * L_PAGEUNLOCK can determine if it should large page adjust the
9086 8901 * address range to find the real range that was locked.
9087 8902 */
9088 8903 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9089 8904
9090 8905 page = seg_page(seg, addr);
9091 8906 anon_index = svd->anon_index + page;
9092 8907
9093 8908 anlock = 0;
9094 8909 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9095 8910 ASSERT(amp->a_szc >= seg->s_szc);
9096 8911 anpgcnt = page_get_pagecnt(amp->a_szc);
9097 8912 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9098 8913 struct anon *ap;
9099 8914 struct vnode *vp;
9100 8915 u_offset_t off;
9101 8916
9102 8917 /*
9103 8918 * Lock and unlock anon array only once per large page.
9104 8919 * anon_array_enter() locks the root anon slot according to
9105 8920 * a_szc which can't change while anon map is locked. We lock
9106 8921 * anon the first time through this loop and each time we
9107 8922 * reach anon index that corresponds to a root of a large
9108 8923 * page.
9109 8924 */
9110 8925 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9111 8926 ASSERT(anlock == 0);
9112 8927 anon_array_enter(amp, anon_index, &cookie);
9113 8928 anlock = 1;
9114 8929 }
9115 8930 ap = anon_get_ptr(amp->ahp, anon_index);
9116 8931
9117 8932 /*
9118 8933 * We must never use seg_pcache for COW pages
9119 8934 * because we might end up with original page still
9120 8935 * lying in seg_pcache even after private page is
9121 8936 * created. This leads to data corruption as
9122 8937 * aio_write refers to the page still in cache
9123 8938 * while all other accesses refer to the private
9124 8939 * page.
9125 8940 */
9126 8941 if (ap == NULL || ap->an_refcnt != 1) {
9127 8942 struct vpage *vpage;
9128 8943
9129 8944 if (seg->s_szc) {
9130 8945 error = EFAULT;
9131 8946 break;
9132 8947 }
9133 8948 if (svd->vpage != NULL) {
9134 8949 vpage = &svd->vpage[seg_page(seg, a)];
9135 8950 } else {
9136 8951 vpage = NULL;
9137 8952 }
9138 8953 ASSERT(anlock);
9139 8954 anon_array_exit(&cookie);
9140 8955 anlock = 0;
9141 8956 pp = NULL;
9142 8957 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9143 8958 vpage, &pp, 0, F_INVAL, rw, 1);
9144 8959 if (error) {
9145 8960 error = fc_decode(error);
9146 8961 break;
9147 8962 }
9148 8963 anon_array_enter(amp, anon_index, &cookie);
9149 8964 anlock = 1;
9150 8965 ap = anon_get_ptr(amp->ahp, anon_index);
9151 8966 if (ap == NULL || ap->an_refcnt != 1) {
9152 8967 error = EFAULT;
9153 8968 break;
9154 8969 }
9155 8970 }
9156 8971 swap_xlate(ap, &vp, &off);
9157 8972 pp = page_lookup_nowait(vp, off, SE_SHARED);
9158 8973 if (pp == NULL) {
9159 8974 error = EFAULT;
9160 8975 break;
9161 8976 }
9162 8977 if (ap->an_pvp != NULL) {
9163 8978 anon_swap_free(ap, pp);
9164 8979 }
9165 8980 /*
9166 8981 * Unlock anon if this is the last slot in a large page.
9167 8982 */
9168 8983 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9169 8984 ASSERT(anlock);
9170 8985 anon_array_exit(&cookie);
9171 8986 anlock = 0;
9172 8987 }
9173 8988 *pplist++ = pp;
9174 8989 }
9175 8990 if (anlock) { /* Ensure the lock is dropped */
9176 8991 anon_array_exit(&cookie);
9177 8992 }
9178 8993 ANON_LOCK_EXIT(&->a_rwlock);
9179 8994
9180 8995 if (a >= addr + len) {
9181 8996 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9182 8997 if (pamp != NULL) {
9183 8998 ASSERT(svd->type == MAP_SHARED);
9184 8999 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9185 9000 npages);
9186 9001 wlen = len;
9187 9002 }
9188 9003 if (sftlck_sbase) {
9189 9004 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
9190 9005 }
9191 9006 if (sftlck_send) {
9192 9007 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
9193 9008 }
9194 9009 if (use_pcache) {
9195 9010 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9196 9011 rw, pflags, preclaim_callback);
9197 9012 }
9198 9013 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9199 9014 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9200 9015 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9201 9016 return (0);
9202 9017 }
9203 9018
9204 9019 pplist = pl;
9205 9020 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9206 9021 while (np > (uint_t)0) {
9207 9022 ASSERT(PAGE_LOCKED(*pplist));
9208 9023 page_unlock(*pplist);
9209 9024 np--;
9210 9025 pplist++;
9211 9026 }
9212 9027 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9213 9028 out:
9214 9029 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9215 9030 *ppp = NULL;
9216 9031 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9217 9032 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9218 9033 return (error);
9219 9034 }
9220 9035
9221 9036 /*
9222 9037 * purge any cached pages in the I/O page cache
9223 9038 */
9224 9039 static void
9225 9040 segvn_purge(struct seg *seg)
9226 9041 {
9227 9042 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9228 9043
9229 9044 /*
9230 9045 * pcache is only used by pure anon segments.
9231 9046 */
9232 9047 if (svd->amp == NULL || svd->vp != NULL) {
9233 9048 return;
9234 9049 }
9235 9050
9236 9051 /*
9237 9052 * For MAP_SHARED segments non 0 segment's softlockcnt means
9238 9053 * active IO is still in progress via this segment. So we only
9239 9054 * purge MAP_SHARED segments when their softlockcnt is 0.
9240 9055 */
9241 9056 if (svd->type == MAP_PRIVATE) {
9242 9057 if (svd->softlockcnt) {
9243 9058 seg_ppurge(seg, NULL, 0);
9244 9059 }
9245 9060 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9246 9061 seg_ppurge(seg, svd->amp, 0);
9247 9062 }
9248 9063 }
9249 9064
9250 9065 /*
9251 9066 * If async argument is not 0 we are called from pcache async thread and don't
9252 9067 * hold AS lock.
9253 9068 */
9254 9069
9255 9070 /*ARGSUSED*/
9256 9071 static int
9257 9072 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9258 9073 enum seg_rw rw, int async)
9259 9074 {
9260 9075 struct seg *seg = (struct seg *)ptag;
9261 9076 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9262 9077 pgcnt_t np, npages;
9263 9078 struct page **pl;
9264 9079
9265 9080 npages = np = btop(len);
9266 9081 ASSERT(npages);
9267 9082
9268 9083 ASSERT(svd->vp == NULL && svd->amp != NULL);
9269 9084 ASSERT(svd->softlockcnt >= npages);
9270 9085 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9271 9086
9272 9087 pl = pplist;
9273 9088
9274 9089 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9275 9090 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9276 9091
9277 9092 while (np > (uint_t)0) {
9278 9093 if (rw == S_WRITE) {
9279 9094 hat_setrefmod(*pplist);
9280 9095 } else {
9281 9096 hat_setref(*pplist);
9282 9097 }
9283 9098 page_unlock(*pplist);
9284 9099 np--;
9285 9100 pplist++;
9286 9101 }
9287 9102
9288 9103 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9289 9104
9290 9105 /*
9291 9106 * If we are pcache async thread we don't hold AS lock. This means if
9292 9107 * softlockcnt drops to 0 after the decrement below address space may
9293 9108 * get freed. We can't allow it since after softlock derement to 0 we
9294 9109 * still need to access as structure for possible wakeup of unmap
9295 9110 * waiters. To prevent the disappearance of as we take this segment
9296 9111 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9297 9112 * make sure this routine completes before segment is freed.
9298 9113 *
9299 9114 * The second complication we have to deal with in async case is a
9300 9115 * possibility of missed wake up of unmap wait thread. When we don't
9301 9116 * hold as lock here we may take a_contents lock before unmap wait
9302 9117 * thread that was first to see softlockcnt was still not 0. As a
9303 9118 * result we'll fail to wake up an unmap wait thread. To avoid this
9304 9119 * race we set nounmapwait flag in as structure if we drop softlockcnt
9305 9120 * to 0 when we were called by pcache async thread. unmapwait thread
9306 9121 * will not block if this flag is set.
9307 9122 */
9308 9123 if (async) {
9309 9124 mutex_enter(&svd->segfree_syncmtx);
9310 9125 }
9311 9126
9312 9127 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9313 9128 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9314 9129 mutex_enter(&seg->s_as->a_contents);
9315 9130 if (async) {
9316 9131 AS_SETNOUNMAPWAIT(seg->s_as);
9317 9132 }
9318 9133 if (AS_ISUNMAPWAIT(seg->s_as)) {
9319 9134 AS_CLRUNMAPWAIT(seg->s_as);
9320 9135 cv_broadcast(&seg->s_as->a_cv);
9321 9136 }
9322 9137 mutex_exit(&seg->s_as->a_contents);
9323 9138 }
9324 9139 }
9325 9140
9326 9141 if (async) {
9327 9142 mutex_exit(&svd->segfree_syncmtx);
9328 9143 }
9329 9144 return (0);
9330 9145 }
9331 9146
9332 9147 /*ARGSUSED*/
9333 9148 static int
9334 9149 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9335 9150 enum seg_rw rw, int async)
9336 9151 {
9337 9152 amp_t *amp = (amp_t *)ptag;
9338 9153 pgcnt_t np, npages;
9339 9154 struct page **pl;
9340 9155
9341 9156 npages = np = btop(len);
9342 9157 ASSERT(npages);
9343 9158 ASSERT(amp->a_softlockcnt >= npages);
9344 9159
9345 9160 pl = pplist;
9346 9161
9347 9162 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9348 9163 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9349 9164
9350 9165 while (np > (uint_t)0) {
9351 9166 if (rw == S_WRITE) {
9352 9167 hat_setrefmod(*pplist);
9353 9168 } else {
9354 9169 hat_setref(*pplist);
9355 9170 }
9356 9171 page_unlock(*pplist);
9357 9172 np--;
9358 9173 pplist++;
9359 9174 }
9360 9175
9361 9176 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9362 9177
9363 9178 /*
9364 9179 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9365 9180 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9366 9181 * and anonmap_purge() acquires a_purgemtx.
9367 9182 */
9368 9183 mutex_enter(&->a_purgemtx);
9369 9184 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9370 9185 amp->a_purgewait) {
9371 9186 amp->a_purgewait = 0;
9372 9187 cv_broadcast(&->a_purgecv);
9373 9188 }
9374 9189 mutex_exit(&->a_purgemtx);
9375 9190 return (0);
9376 9191 }
9377 9192
9378 9193 /*
9379 9194 * get a memory ID for an addr in a given segment
9380 9195 *
9381 9196 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9382 9197 * At fault time they will be relocated into larger pages.
9383 9198 */
9384 9199 static int
9385 9200 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9386 9201 {
9387 9202 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9388 9203 struct anon *ap = NULL;
9389 9204 ulong_t anon_index;
9390 9205 struct anon_map *amp;
9391 9206 anon_sync_obj_t cookie;
9392 9207
9393 9208 if (svd->type == MAP_PRIVATE) {
9394 9209 memidp->val[0] = (uintptr_t)seg->s_as;
9395 9210 memidp->val[1] = (uintptr_t)addr;
9396 9211 return (0);
9397 9212 }
9398 9213
9399 9214 if (svd->type == MAP_SHARED) {
9400 9215 if (svd->vp) {
9401 9216 memidp->val[0] = (uintptr_t)svd->vp;
9402 9217 memidp->val[1] = (u_longlong_t)svd->offset +
9403 9218 (uintptr_t)(addr - seg->s_base);
9404 9219 return (0);
9405 9220 } else {
9406 9221
9407 9222 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9408 9223 if ((amp = svd->amp) != NULL) {
9409 9224 anon_index = svd->anon_index +
9410 9225 seg_page(seg, addr);
9411 9226 }
9412 9227 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9413 9228
9414 9229 ASSERT(amp != NULL);
9415 9230
9416 9231 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9417 9232 anon_array_enter(amp, anon_index, &cookie);
9418 9233 ap = anon_get_ptr(amp->ahp, anon_index);
9419 9234 if (ap == NULL) {
9420 9235 page_t *pp;
9421 9236
9422 9237 pp = anon_zero(seg, addr, &ap, svd->cred);
9423 9238 if (pp == NULL) {
9424 9239 anon_array_exit(&cookie);
9425 9240 ANON_LOCK_EXIT(&->a_rwlock);
9426 9241 return (ENOMEM);
9427 9242 }
9428 9243 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9429 9244 == NULL);
9430 9245 (void) anon_set_ptr(amp->ahp, anon_index,
9431 9246 ap, ANON_SLEEP);
9432 9247 page_unlock(pp);
9433 9248 }
9434 9249
9435 9250 anon_array_exit(&cookie);
9436 9251 ANON_LOCK_EXIT(&->a_rwlock);
9437 9252
9438 9253 memidp->val[0] = (uintptr_t)ap;
9439 9254 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9440 9255 return (0);
9441 9256 }
9442 9257 }
9443 9258 return (EINVAL);
9444 9259 }
9445 9260
9446 9261 static int
9447 9262 sameprot(struct seg *seg, caddr_t a, size_t len)
9448 9263 {
9449 9264 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9450 9265 struct vpage *vpage;
9451 9266 spgcnt_t pages = btop(len);
9452 9267 uint_t prot;
9453 9268
9454 9269 if (svd->pageprot == 0)
9455 9270 return (1);
9456 9271
9457 9272 ASSERT(svd->vpage != NULL);
9458 9273
9459 9274 vpage = &svd->vpage[seg_page(seg, a)];
9460 9275 prot = VPP_PROT(vpage);
9461 9276 vpage++;
9462 9277 pages--;
9463 9278 while (pages-- > 0) {
9464 9279 if (prot != VPP_PROT(vpage))
9465 9280 return (0);
9466 9281 vpage++;
9467 9282 }
9468 9283 return (1);
9469 9284 }
9470 9285
9471 9286 /*
9472 9287 * Get memory allocation policy info for specified address in given segment
9473 9288 */
9474 9289 static lgrp_mem_policy_info_t *
9475 9290 segvn_getpolicy(struct seg *seg, caddr_t addr)
9476 9291 {
9477 9292 struct anon_map *amp;
9478 9293 ulong_t anon_index;
9479 9294 lgrp_mem_policy_info_t *policy_info;
9480 9295 struct segvn_data *svn_data;
9481 9296 u_offset_t vn_off;
9482 9297 vnode_t *vp;
9483 9298
9484 9299 ASSERT(seg != NULL);
9485 9300
9486 9301 svn_data = (struct segvn_data *)seg->s_data;
9487 9302 if (svn_data == NULL)
9488 9303 return (NULL);
9489 9304
9490 9305 /*
9491 9306 * Get policy info for private or shared memory
9492 9307 */
9493 9308 if (svn_data->type != MAP_SHARED) {
9494 9309 if (svn_data->tr_state != SEGVN_TR_ON) {
9495 9310 policy_info = &svn_data->policy_info;
9496 9311 } else {
9497 9312 policy_info = &svn_data->tr_policy_info;
9498 9313 ASSERT(policy_info->mem_policy ==
9499 9314 LGRP_MEM_POLICY_NEXT_SEG);
9500 9315 }
9501 9316 } else {
9502 9317 amp = svn_data->amp;
9503 9318 anon_index = svn_data->anon_index + seg_page(seg, addr);
9504 9319 vp = svn_data->vp;
9505 9320 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9506 9321 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9507 9322 }
9508 9323
9509 9324 return (policy_info);
9510 9325 }
9511 9326
9512 9327 /*ARGSUSED*/
9513 9328 static int
9514 9329 segvn_capable(struct seg *seg, segcapability_t capability)
9515 9330 {
9516 9331 return (0);
9517 9332 }
9518 9333
9519 9334 /*
9520 9335 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9521 9336 * established to per vnode mapping per lgroup amp pages instead of to vnode
9522 9337 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9523 9338 * may share the same text replication amp. If a suitable amp doesn't already
9524 9339 * exist in svntr hash table create a new one. We may fail to bind to amp if
9525 9340 * segment is not eligible for text replication. Code below first checks for
9526 9341 * these conditions. If binding is successful segment tr_state is set to on
9527 9342 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9528 9343 * svd->amp remains as NULL.
9529 9344 */
9530 9345 static void
9531 9346 segvn_textrepl(struct seg *seg)
9532 9347 {
9533 9348 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9534 9349 vnode_t *vp = svd->vp;
9535 9350 u_offset_t off = svd->offset;
9536 9351 size_t size = seg->s_size;
9537 9352 u_offset_t eoff = off + size;
9538 9353 uint_t szc = seg->s_szc;
9539 9354 ulong_t hash = SVNTR_HASH_FUNC(vp);
9540 9355 svntr_t *svntrp;
9541 9356 struct vattr va;
9542 9357 proc_t *p = seg->s_as->a_proc;
9543 9358 lgrp_id_t lgrp_id;
9544 9359 lgrp_id_t olid;
9545 9360 int first;
9546 9361 struct anon_map *amp;
9547 9362
9548 9363 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9549 9364 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9550 9365 ASSERT(p != NULL);
9551 9366 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9552 9367 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9553 9368 ASSERT(svd->flags & MAP_TEXT);
9554 9369 ASSERT(svd->type == MAP_PRIVATE);
9555 9370 ASSERT(vp != NULL && svd->amp == NULL);
9556 9371 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9557 9372 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9558 9373 ASSERT(seg->s_as != &kas);
9559 9374 ASSERT(off < eoff);
9560 9375 ASSERT(svntr_hashtab != NULL);
9561 9376
9562 9377 /*
9563 9378 * If numa optimizations are no longer desired bail out.
9564 9379 */
9565 9380 if (!lgrp_optimizations()) {
9566 9381 svd->tr_state = SEGVN_TR_OFF;
9567 9382 return;
9568 9383 }
9569 9384
9570 9385 /*
9571 9386 * Avoid creating anon maps with size bigger than the file size.
9572 9387 * If VOP_GETATTR() call fails bail out.
9573 9388 */
9574 9389 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9575 9390 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9576 9391 svd->tr_state = SEGVN_TR_OFF;
9577 9392 SEGVN_TR_ADDSTAT(gaerr);
9578 9393 return;
9579 9394 }
9580 9395 if (btopr(va.va_size) < btopr(eoff)) {
9581 9396 svd->tr_state = SEGVN_TR_OFF;
9582 9397 SEGVN_TR_ADDSTAT(overmap);
9583 9398 return;
9584 9399 }
9585 9400
9586 9401 /*
9587 9402 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9588 9403 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9589 9404 * mapping that checks if trcache for this vnode needs to be
9590 9405 * invalidated can't miss us.
9591 9406 */
9592 9407 if (!(vp->v_flag & VVMEXEC)) {
9593 9408 mutex_enter(&vp->v_lock);
9594 9409 vp->v_flag |= VVMEXEC;
9595 9410 mutex_exit(&vp->v_lock);
9596 9411 }
9597 9412 mutex_enter(&svntr_hashtab[hash].tr_lock);
9598 9413 /*
9599 9414 * Bail out if potentially MAP_SHARED writable mappings exist to this
9600 9415 * vnode. We don't want to use old file contents from existing
9601 9416 * replicas if this mapping was established after the original file
9602 9417 * was changed.
9603 9418 */
9604 9419 if (vn_is_mapped(vp, V_WRITE)) {
9605 9420 mutex_exit(&svntr_hashtab[hash].tr_lock);
9606 9421 svd->tr_state = SEGVN_TR_OFF;
9607 9422 SEGVN_TR_ADDSTAT(wrcnt);
9608 9423 return;
9609 9424 }
9610 9425 svntrp = svntr_hashtab[hash].tr_head;
9611 9426 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9612 9427 ASSERT(svntrp->tr_refcnt != 0);
9613 9428 if (svntrp->tr_vp != vp) {
9614 9429 continue;
9615 9430 }
9616 9431
9617 9432 /*
9618 9433 * Bail out if the file or its attributes were changed after
9619 9434 * this replication entry was created since we need to use the
9620 9435 * latest file contents. Note that mtime test alone is not
9621 9436 * sufficient because a user can explicitly change mtime via
9622 9437 * utimes(2) interfaces back to the old value after modifiying
9623 9438 * the file contents. To detect this case we also have to test
9624 9439 * ctime which among other things records the time of the last
9625 9440 * mtime change by utimes(2). ctime is not changed when the file
9626 9441 * is only read or executed so we expect that typically existing
9627 9442 * replication amp's can be used most of the time.
9628 9443 */
9629 9444 if (!svntrp->tr_valid ||
9630 9445 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9631 9446 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9632 9447 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9633 9448 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9634 9449 mutex_exit(&svntr_hashtab[hash].tr_lock);
9635 9450 svd->tr_state = SEGVN_TR_OFF;
9636 9451 SEGVN_TR_ADDSTAT(stale);
9637 9452 return;
9638 9453 }
9639 9454 /*
9640 9455 * if off, eoff and szc match current segment we found the
9641 9456 * existing entry we can use.
9642 9457 */
9643 9458 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9644 9459 svntrp->tr_szc == szc) {
9645 9460 break;
9646 9461 }
9647 9462 /*
9648 9463 * Don't create different but overlapping in file offsets
9649 9464 * entries to avoid replication of the same file pages more
9650 9465 * than once per lgroup.
9651 9466 */
9652 9467 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9653 9468 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9654 9469 mutex_exit(&svntr_hashtab[hash].tr_lock);
9655 9470 svd->tr_state = SEGVN_TR_OFF;
9656 9471 SEGVN_TR_ADDSTAT(overlap);
9657 9472 return;
9658 9473 }
9659 9474 }
9660 9475 /*
9661 9476 * If we didn't find existing entry create a new one.
9662 9477 */
9663 9478 if (svntrp == NULL) {
9664 9479 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9665 9480 if (svntrp == NULL) {
9666 9481 mutex_exit(&svntr_hashtab[hash].tr_lock);
9667 9482 svd->tr_state = SEGVN_TR_OFF;
9668 9483 SEGVN_TR_ADDSTAT(nokmem);
9669 9484 return;
9670 9485 }
9671 9486 #ifdef DEBUG
9672 9487 {
9673 9488 lgrp_id_t i;
9674 9489 for (i = 0; i < NLGRPS_MAX; i++) {
9675 9490 ASSERT(svntrp->tr_amp[i] == NULL);
9676 9491 }
9677 9492 }
9678 9493 #endif /* DEBUG */
9679 9494 svntrp->tr_vp = vp;
9680 9495 svntrp->tr_off = off;
9681 9496 svntrp->tr_eoff = eoff;
9682 9497 svntrp->tr_szc = szc;
9683 9498 svntrp->tr_valid = 1;
9684 9499 svntrp->tr_mtime = va.va_mtime;
9685 9500 svntrp->tr_ctime = va.va_ctime;
9686 9501 svntrp->tr_refcnt = 0;
9687 9502 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9688 9503 svntr_hashtab[hash].tr_head = svntrp;
9689 9504 }
9690 9505 first = 1;
9691 9506 again:
9692 9507 /*
9693 9508 * We want to pick a replica with pages on main thread's (t_tid = 1,
9694 9509 * aka T1) lgrp. Currently text replication is only optimized for
9695 9510 * workloads that either have all threads of a process on the same
9696 9511 * lgrp or execute their large text primarily on main thread.
9697 9512 */
9698 9513 lgrp_id = p->p_t1_lgrpid;
9699 9514 if (lgrp_id == LGRP_NONE) {
9700 9515 /*
9701 9516 * In case exec() prefaults text on non main thread use
9702 9517 * current thread lgrpid. It will become main thread anyway
9703 9518 * soon.
9704 9519 */
9705 9520 lgrp_id = lgrp_home_id(curthread);
9706 9521 }
9707 9522 /*
9708 9523 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9709 9524 * just set it to NLGRPS_MAX if it's different from current process T1
9710 9525 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9711 9526 * replication and T1 new home is different from lgrp used for text
9712 9527 * replication. When this happens asyncronous segvn thread rechecks if
9713 9528 * segments should change lgrps used for text replication. If we fail
9714 9529 * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas
9715 9530 * if it's not already NLGRPS_MAX and not equal lgrp_id we want to
9716 9531 * use. We don't need to use cas in this case because another thread
9717 9532 * that races in between our non atomic check and set may only change
9718 9533 * p_tr_lgrpid to NLGRPS_MAX at this point.
9719 9534 */
9720 9535 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9721 9536 olid = p->p_tr_lgrpid;
9722 9537 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9723 9538 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9724 9539 if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) {
9725 9540 olid = p->p_tr_lgrpid;
9726 9541 ASSERT(olid != LGRP_NONE);
9727 9542 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9728 9543 p->p_tr_lgrpid = NLGRPS_MAX;
9729 9544 }
9730 9545 }
9731 9546 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9732 9547 membar_producer();
9733 9548 /*
9734 9549 * lgrp_move_thread() won't schedule async recheck after
9735 9550 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9736 9551 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9737 9552 * is not LGRP_NONE.
9738 9553 */
9739 9554 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9740 9555 p->p_t1_lgrpid != lgrp_id) {
9741 9556 first = 0;
9742 9557 goto again;
9743 9558 }
9744 9559 }
9745 9560 /*
9746 9561 * If no amp was created yet for lgrp_id create a new one as long as
9747 9562 * we have enough memory to afford it.
9748 9563 */
9749 9564 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9750 9565 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9751 9566 if (trmem > segvn_textrepl_max_bytes) {
9752 9567 SEGVN_TR_ADDSTAT(normem);
9753 9568 goto fail;
9754 9569 }
9755 9570 if (anon_try_resv_zone(size, NULL) == 0) {
9756 9571 SEGVN_TR_ADDSTAT(noanon);
9757 9572 goto fail;
9758 9573 }
9759 9574 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9760 9575 if (amp == NULL) {
9761 9576 anon_unresv_zone(size, NULL);
9762 9577 SEGVN_TR_ADDSTAT(nokmem);
9763 9578 goto fail;
9764 9579 }
9765 9580 ASSERT(amp->refcnt == 1);
9766 9581 amp->a_szc = szc;
9767 9582 svntrp->tr_amp[lgrp_id] = amp;
9768 9583 SEGVN_TR_ADDSTAT(newamp);
9769 9584 }
9770 9585 svntrp->tr_refcnt++;
9771 9586 ASSERT(svd->svn_trnext == NULL);
9772 9587 ASSERT(svd->svn_trprev == NULL);
9773 9588 svd->svn_trnext = svntrp->tr_svnhead;
9774 9589 svd->svn_trprev = NULL;
9775 9590 if (svntrp->tr_svnhead != NULL) {
9776 9591 svntrp->tr_svnhead->svn_trprev = svd;
9777 9592 }
9778 9593 svntrp->tr_svnhead = svd;
9779 9594 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9780 9595 ASSERT(amp->refcnt >= 1);
9781 9596 svd->amp = amp;
9782 9597 svd->anon_index = 0;
9783 9598 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9784 9599 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9785 9600 svd->tr_state = SEGVN_TR_ON;
9786 9601 mutex_exit(&svntr_hashtab[hash].tr_lock);
9787 9602 SEGVN_TR_ADDSTAT(repl);
9788 9603 return;
9789 9604 fail:
9790 9605 ASSERT(segvn_textrepl_bytes >= size);
9791 9606 atomic_add_long(&segvn_textrepl_bytes, -size);
9792 9607 ASSERT(svntrp != NULL);
9793 9608 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9794 9609 if (svntrp->tr_refcnt == 0) {
9795 9610 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9796 9611 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9797 9612 mutex_exit(&svntr_hashtab[hash].tr_lock);
9798 9613 kmem_cache_free(svntr_cache, svntrp);
9799 9614 } else {
9800 9615 mutex_exit(&svntr_hashtab[hash].tr_lock);
9801 9616 }
9802 9617 svd->tr_state = SEGVN_TR_OFF;
9803 9618 }
9804 9619
9805 9620 /*
9806 9621 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9807 9622 * replication amp. This routine is most typically called when segment is
9808 9623 * unmapped but can also be called when segment no longer qualifies for text
9809 9624 * replication (e.g. due to protection changes). If unload_unmap is set use
9810 9625 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9811 9626 * svntr free all its anon maps and remove it from the hash table.
9812 9627 */
9813 9628 static void
9814 9629 segvn_textunrepl(struct seg *seg, int unload_unmap)
9815 9630 {
9816 9631 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9817 9632 vnode_t *vp = svd->vp;
9818 9633 u_offset_t off = svd->offset;
9819 9634 size_t size = seg->s_size;
9820 9635 u_offset_t eoff = off + size;
9821 9636 uint_t szc = seg->s_szc;
9822 9637 ulong_t hash = SVNTR_HASH_FUNC(vp);
9823 9638 svntr_t *svntrp;
9824 9639 svntr_t **prv_svntrp;
9825 9640 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
9826 9641 lgrp_id_t i;
9827 9642
9828 9643 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9829 9644 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
9830 9645 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9831 9646 ASSERT(svd->tr_state == SEGVN_TR_ON);
9832 9647 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9833 9648 ASSERT(svd->amp != NULL);
9834 9649 ASSERT(svd->amp->refcnt >= 1);
9835 9650 ASSERT(svd->anon_index == 0);
9836 9651 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9837 9652 ASSERT(svntr_hashtab != NULL);
9838 9653
9839 9654 mutex_enter(&svntr_hashtab[hash].tr_lock);
9840 9655 prv_svntrp = &svntr_hashtab[hash].tr_head;
9841 9656 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
9842 9657 ASSERT(svntrp->tr_refcnt != 0);
9843 9658 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
9844 9659 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
9845 9660 break;
9846 9661 }
9847 9662 }
9848 9663 if (svntrp == NULL) {
9849 9664 panic("segvn_textunrepl: svntr record not found");
9850 9665 }
9851 9666 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
9852 9667 panic("segvn_textunrepl: amp mismatch");
9853 9668 }
9854 9669 svd->tr_state = SEGVN_TR_OFF;
9855 9670 svd->amp = NULL;
9856 9671 if (svd->svn_trprev == NULL) {
9857 9672 ASSERT(svntrp->tr_svnhead == svd);
9858 9673 svntrp->tr_svnhead = svd->svn_trnext;
9859 9674 if (svntrp->tr_svnhead != NULL) {
9860 9675 svntrp->tr_svnhead->svn_trprev = NULL;
9861 9676 }
9862 9677 svd->svn_trnext = NULL;
9863 9678 } else {
9864 9679 svd->svn_trprev->svn_trnext = svd->svn_trnext;
9865 9680 if (svd->svn_trnext != NULL) {
9866 9681 svd->svn_trnext->svn_trprev = svd->svn_trprev;
9867 9682 svd->svn_trnext = NULL;
9868 9683 }
9869 9684 svd->svn_trprev = NULL;
9870 9685 }
9871 9686 if (--svntrp->tr_refcnt) {
9872 9687 mutex_exit(&svntr_hashtab[hash].tr_lock);
9873 9688 goto done;
9874 9689 }
9875 9690 *prv_svntrp = svntrp->tr_next;
9876 9691 mutex_exit(&svntr_hashtab[hash].tr_lock);
9877 9692 for (i = 0; i < NLGRPS_MAX; i++) {
9878 9693 struct anon_map *amp = svntrp->tr_amp[i];
9879 9694 if (amp == NULL) {
9880 9695 continue;
9881 9696 }
9882 9697 ASSERT(amp->refcnt == 1);
9883 9698 ASSERT(amp->swresv == size);
9884 9699 ASSERT(amp->size == size);
9885 9700 ASSERT(amp->a_szc == szc);
9886 9701 if (amp->a_szc != 0) {
9887 9702 anon_free_pages(amp->ahp, 0, size, szc);
9888 9703 } else {
9889 9704 anon_free(amp->ahp, 0, size);
9890 9705 }
9891 9706 svntrp->tr_amp[i] = NULL;
9892 9707 ASSERT(segvn_textrepl_bytes >= size);
9893 9708 atomic_add_long(&segvn_textrepl_bytes, -size);
9894 9709 anon_unresv_zone(amp->swresv, NULL);
9895 9710 amp->refcnt = 0;
9896 9711 anonmap_free(amp);
9897 9712 }
9898 9713 kmem_cache_free(svntr_cache, svntrp);
9899 9714 done:
9900 9715 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
9901 9716 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
9902 9717 }
9903 9718
9904 9719 /*
9905 9720 * This is called when a MAP_SHARED writable mapping is created to a vnode
9906 9721 * that is currently used for execution (VVMEXEC flag is set). In this case we
9907 9722 * need to prevent further use of existing replicas.
9908 9723 */
9909 9724 static void
9910 9725 segvn_inval_trcache(vnode_t *vp)
9911 9726 {
9912 9727 ulong_t hash = SVNTR_HASH_FUNC(vp);
9913 9728 svntr_t *svntrp;
9914 9729
9915 9730 ASSERT(vp->v_flag & VVMEXEC);
9916 9731
9917 9732 if (svntr_hashtab == NULL) {
9918 9733 return;
9919 9734 }
9920 9735
9921 9736 mutex_enter(&svntr_hashtab[hash].tr_lock);
9922 9737 svntrp = svntr_hashtab[hash].tr_head;
9923 9738 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9924 9739 ASSERT(svntrp->tr_refcnt != 0);
9925 9740 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
9926 9741 svntrp->tr_valid = 0;
9927 9742 }
9928 9743 }
9929 9744 mutex_exit(&svntr_hashtab[hash].tr_lock);
9930 9745 }
9931 9746
9932 9747 static void
9933 9748 segvn_trasync_thread(void)
9934 9749 {
9935 9750 callb_cpr_t cpr_info;
9936 9751 kmutex_t cpr_lock; /* just for CPR stuff */
9937 9752
9938 9753 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
9939 9754
9940 9755 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
9941 9756 callb_generic_cpr, "segvn_async");
9942 9757
9943 9758 if (segvn_update_textrepl_interval == 0) {
9944 9759 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
9945 9760 } else {
9946 9761 segvn_update_textrepl_interval *= hz;
9947 9762 }
9948 9763 (void) timeout(segvn_trupdate_wakeup, NULL,
9949 9764 segvn_update_textrepl_interval);
9950 9765
9951 9766 for (;;) {
9952 9767 mutex_enter(&cpr_lock);
9953 9768 CALLB_CPR_SAFE_BEGIN(&cpr_info);
9954 9769 mutex_exit(&cpr_lock);
9955 9770 sema_p(&segvn_trasync_sem);
9956 9771 mutex_enter(&cpr_lock);
9957 9772 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
9958 9773 mutex_exit(&cpr_lock);
9959 9774 segvn_trupdate();
9960 9775 }
9961 9776 }
9962 9777
9963 9778 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
9964 9779
9965 9780 static void
9966 9781 segvn_trupdate_wakeup(void *dummy)
9967 9782 {
9968 9783 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
9969 9784
9970 9785 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
9971 9786 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
9972 9787 sema_v(&segvn_trasync_sem);
9973 9788 }
9974 9789
9975 9790 if (!segvn_disable_textrepl_update &&
9976 9791 segvn_update_textrepl_interval != 0) {
9977 9792 (void) timeout(segvn_trupdate_wakeup, dummy,
9978 9793 segvn_update_textrepl_interval);
9979 9794 }
9980 9795 }
9981 9796
9982 9797 static void
9983 9798 segvn_trupdate(void)
9984 9799 {
9985 9800 ulong_t hash;
9986 9801 svntr_t *svntrp;
9987 9802 segvn_data_t *svd;
9988 9803
9989 9804 ASSERT(svntr_hashtab != NULL);
9990 9805
9991 9806 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
9992 9807 mutex_enter(&svntr_hashtab[hash].tr_lock);
9993 9808 svntrp = svntr_hashtab[hash].tr_head;
9994 9809 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9995 9810 ASSERT(svntrp->tr_refcnt != 0);
9996 9811 svd = svntrp->tr_svnhead;
9997 9812 for (; svd != NULL; svd = svd->svn_trnext) {
9998 9813 segvn_trupdate_seg(svd->seg, svd, svntrp,
9999 9814 hash);
10000 9815 }
10001 9816 }
10002 9817 mutex_exit(&svntr_hashtab[hash].tr_lock);
10003 9818 }
10004 9819 }
10005 9820
10006 9821 static void
10007 9822 segvn_trupdate_seg(struct seg *seg,
10008 9823 segvn_data_t *svd,
10009 9824 svntr_t *svntrp,
10010 9825 ulong_t hash)
10011 9826 {
10012 9827 proc_t *p;
10013 9828 lgrp_id_t lgrp_id;
10014 9829 struct as *as;
10015 9830 size_t size;
10016 9831 struct anon_map *amp;
10017 9832
10018 9833 ASSERT(svd->vp != NULL);
10019 9834 ASSERT(svd->vp == svntrp->tr_vp);
10020 9835 ASSERT(svd->offset == svntrp->tr_off);
10021 9836 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10022 9837 ASSERT(seg != NULL);
10023 9838 ASSERT(svd->seg == seg);
10024 9839 ASSERT(seg->s_data == (void *)svd);
10025 9840 ASSERT(seg->s_szc == svntrp->tr_szc);
10026 9841 ASSERT(svd->tr_state == SEGVN_TR_ON);
10027 9842 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10028 9843 ASSERT(svd->amp != NULL);
10029 9844 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10030 9845 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10031 9846 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10032 9847 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10033 9848 ASSERT(svntrp->tr_refcnt != 0);
10034 9849 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10035 9850
10036 9851 as = seg->s_as;
10037 9852 ASSERT(as != NULL && as != &kas);
10038 9853 p = as->a_proc;
10039 9854 ASSERT(p != NULL);
10040 9855 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10041 9856 lgrp_id = p->p_t1_lgrpid;
10042 9857 if (lgrp_id == LGRP_NONE) {
10043 9858 return;
10044 9859 }
10045 9860 ASSERT(lgrp_id < NLGRPS_MAX);
10046 9861 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10047 9862 return;
10048 9863 }
10049 9864
10050 9865 /*
10051 9866 * Use tryenter locking since we are locking as/seg and svntr hash
10052 9867 * lock in reverse from syncrounous thread order.
10053 9868 */
10054 9869 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10055 9870 SEGVN_TR_ADDSTAT(nolock);
10056 9871 if (segvn_lgrp_trthr_migrs_snpsht) {
10057 9872 segvn_lgrp_trthr_migrs_snpsht = 0;
10058 9873 }
10059 9874 return;
10060 9875 }
10061 9876 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10062 9877 AS_LOCK_EXIT(as, &as->a_lock);
10063 9878 SEGVN_TR_ADDSTAT(nolock);
10064 9879 if (segvn_lgrp_trthr_migrs_snpsht) {
10065 9880 segvn_lgrp_trthr_migrs_snpsht = 0;
10066 9881 }
10067 9882 return;
10068 9883 }
10069 9884 size = seg->s_size;
10070 9885 if (svntrp->tr_amp[lgrp_id] == NULL) {
10071 9886 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10072 9887 if (trmem > segvn_textrepl_max_bytes) {
10073 9888 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10074 9889 AS_LOCK_EXIT(as, &as->a_lock);
10075 9890 atomic_add_long(&segvn_textrepl_bytes, -size);
10076 9891 SEGVN_TR_ADDSTAT(normem);
10077 9892 return;
10078 9893 }
10079 9894 if (anon_try_resv_zone(size, NULL) == 0) {
10080 9895 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10081 9896 AS_LOCK_EXIT(as, &as->a_lock);
10082 9897 atomic_add_long(&segvn_textrepl_bytes, -size);
10083 9898 SEGVN_TR_ADDSTAT(noanon);
10084 9899 return;
10085 9900 }
10086 9901 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10087 9902 if (amp == NULL) {
10088 9903 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10089 9904 AS_LOCK_EXIT(as, &as->a_lock);
10090 9905 atomic_add_long(&segvn_textrepl_bytes, -size);
10091 9906 anon_unresv_zone(size, NULL);
10092 9907 SEGVN_TR_ADDSTAT(nokmem);
10093 9908 return;
10094 9909 }
10095 9910 ASSERT(amp->refcnt == 1);
10096 9911 amp->a_szc = seg->s_szc;
10097 9912 svntrp->tr_amp[lgrp_id] = amp;
10098 9913 }
10099 9914 /*
10100 9915 * We don't need to drop the bucket lock but here we give other
10101 9916 * threads a chance. svntr and svd can't be unlinked as long as
10102 9917 * segment lock is held as a writer and AS held as well. After we
10103 9918 * retake bucket lock we'll continue from where we left. We'll be able
10104 9919 * to reach the end of either list since new entries are always added
10105 9920 * to the beginning of the lists.
10106 9921 */
10107 9922 mutex_exit(&svntr_hashtab[hash].tr_lock);
10108 9923 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10109 9924 mutex_enter(&svntr_hashtab[hash].tr_lock);
10110 9925
10111 9926 ASSERT(svd->tr_state == SEGVN_TR_ON);
10112 9927 ASSERT(svd->amp != NULL);
10113 9928 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10114 9929 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10115 9930 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10116 9931
10117 9932 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10118 9933 svd->amp = svntrp->tr_amp[lgrp_id];
10119 9934 p->p_tr_lgrpid = NLGRPS_MAX;
10120 9935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10121 9936 AS_LOCK_EXIT(as, &as->a_lock);
10122 9937
10123 9938 ASSERT(svntrp->tr_refcnt != 0);
10124 9939 ASSERT(svd->vp == svntrp->tr_vp);
10125 9940 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10126 9941 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10127 9942 ASSERT(svd->seg == seg);
10128 9943 ASSERT(svd->tr_state == SEGVN_TR_ON);
10129 9944
10130 9945 SEGVN_TR_ADDSTAT(asyncrepl);
10131 9946 }
↓ open down ↓ |
2939 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX