Print this page
5508 move segvn #defines into seg_vn.c
Reviewed by: Marcel Telka <marcel@telka.sk>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
23 24 */
24 25
25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 27 /* All Rights Reserved */
27 28
28 29 /*
29 30 * University Copyright- Copyright (c) 1982, 1986, 1988
30 31 * The Regents of the University of California
31 32 * All Rights Reserved
32 33 *
33 34 * University Acknowledgment- Portions of this document are derived from
34 35 * software developed by the University of California, Berkeley, and its
35 36 * contributors.
36 37 */
37 38
38 39 /*
39 40 * VM - shared or copy-on-write from a vnode/anonymous memory.
40 41 */
41 42
42 43 #include <sys/types.h>
43 44 #include <sys/param.h>
44 45 #include <sys/t_lock.h>
45 46 #include <sys/errno.h>
46 47 #include <sys/systm.h>
47 48 #include <sys/mman.h>
48 49 #include <sys/debug.h>
49 50 #include <sys/cred.h>
50 51 #include <sys/vmsystm.h>
51 52 #include <sys/tuneable.h>
52 53 #include <sys/bitmap.h>
53 54 #include <sys/swap.h>
54 55 #include <sys/kmem.h>
55 56 #include <sys/sysmacros.h>
56 57 #include <sys/vtrace.h>
57 58 #include <sys/cmn_err.h>
58 59 #include <sys/callb.h>
59 60 #include <sys/vm.h>
60 61 #include <sys/dumphdr.h>
61 62 #include <sys/lgrp.h>
62 63
63 64 #include <vm/hat.h>
64 65 #include <vm/as.h>
65 66 #include <vm/seg.h>
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
66 67 #include <vm/seg_vn.h>
67 68 #include <vm/pvn.h>
68 69 #include <vm/anon.h>
69 70 #include <vm/page.h>
70 71 #include <vm/vpage.h>
71 72 #include <sys/proc.h>
72 73 #include <sys/task.h>
73 74 #include <sys/project.h>
74 75 #include <sys/zone.h>
75 76 #include <sys/shm_impl.h>
77 +
78 +/*
79 + * segvn_fault needs a temporary page list array. To avoid calling kmem all
80 + * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
81 + * it can. In the rare case when this page list is not large enough, it
82 + * goes and gets a large enough array from kmem.
83 + *
84 + * This small page list array covers either 8 pages or 64kB worth of pages -
85 + * whichever is smaller.
86 + */
87 +#define PVN_MAX_GETPAGE_SZ 0x10000
88 +#define PVN_MAX_GETPAGE_NUM 0x8
89 +
90 +#if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
91 +#define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
92 +#define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
93 +#else
94 +#define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
95 +#define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
96 +#endif
97 +
76 98 /*
77 99 * Private seg op routines.
78 100 */
79 101 static int segvn_dup(struct seg *seg, struct seg *newseg);
80 102 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
81 103 static void segvn_free(struct seg *seg);
82 104 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
83 105 caddr_t addr, size_t len, enum fault_type type,
84 106 enum seg_rw rw);
85 107 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
86 108 static int segvn_setprot(struct seg *seg, caddr_t addr,
87 109 size_t len, uint_t prot);
88 110 static int segvn_checkprot(struct seg *seg, caddr_t addr,
89 111 size_t len, uint_t prot);
90 112 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
91 113 static size_t segvn_swapout(struct seg *seg);
92 114 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
93 115 int attr, uint_t flags);
94 116 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
95 117 char *vec);
96 118 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
97 119 int attr, int op, ulong_t *lockmap, size_t pos);
98 120 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
99 121 uint_t *protv);
100 122 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
101 123 static int segvn_gettype(struct seg *seg, caddr_t addr);
102 124 static int segvn_getvp(struct seg *seg, caddr_t addr,
103 125 struct vnode **vpp);
104 126 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
105 127 uint_t behav);
106 128 static void segvn_dump(struct seg *seg);
107 129 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
108 130 struct page ***ppp, enum lock_type type, enum seg_rw rw);
109 131 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
110 132 uint_t szc);
111 133 static int segvn_getmemid(struct seg *seg, caddr_t addr,
112 134 memid_t *memidp);
113 135 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
114 136 static int segvn_capable(struct seg *seg, segcapability_t capable);
115 137
116 138 struct seg_ops segvn_ops = {
117 139 segvn_dup,
118 140 segvn_unmap,
119 141 segvn_free,
120 142 segvn_fault,
121 143 segvn_faulta,
122 144 segvn_setprot,
123 145 segvn_checkprot,
124 146 segvn_kluster,
125 147 segvn_swapout,
126 148 segvn_sync,
127 149 segvn_incore,
128 150 segvn_lockop,
129 151 segvn_getprot,
130 152 segvn_getoffset,
131 153 segvn_gettype,
132 154 segvn_getvp,
133 155 segvn_advise,
134 156 segvn_dump,
135 157 segvn_pagelock,
136 158 segvn_setpagesize,
137 159 segvn_getmemid,
138 160 segvn_getpolicy,
139 161 segvn_capable,
140 162 };
141 163
142 164 /*
143 165 * Common zfod structures, provided as a shorthand for others to use.
144 166 */
145 167 static segvn_crargs_t zfod_segvn_crargs =
146 168 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
147 169 static segvn_crargs_t kzfod_segvn_crargs =
148 170 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
149 171 PROT_ALL & ~PROT_USER);
150 172 static segvn_crargs_t stack_noexec_crargs =
151 173 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
152 174
153 175 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
154 176 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
155 177 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
156 178 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
157 179
158 180 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
159 181
160 182 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
161 183
162 184 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
163 185 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
164 186 uint_t segvn_pglock_comb_bshift;
165 187 size_t segvn_pglock_comb_palign;
166 188
167 189 static int segvn_concat(struct seg *, struct seg *, int);
168 190 static int segvn_extend_prev(struct seg *, struct seg *,
169 191 struct segvn_crargs *, size_t);
170 192 static int segvn_extend_next(struct seg *, struct seg *,
171 193 struct segvn_crargs *, size_t);
172 194 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
173 195 static void segvn_pagelist_rele(page_t **);
174 196 static void segvn_setvnode_mpss(vnode_t *);
175 197 static void segvn_relocate_pages(page_t **, page_t *);
176 198 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
177 199 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
178 200 uint_t, page_t **, page_t **, uint_t *, int *);
179 201 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
180 202 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
181 203 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
182 204 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
183 205 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
184 206 u_offset_t, struct vpage *, page_t **, uint_t,
185 207 enum fault_type, enum seg_rw, int);
186 208 static void segvn_vpage(struct seg *);
187 209 static size_t segvn_count_swap_by_vpages(struct seg *);
188 210
189 211 static void segvn_purge(struct seg *seg);
190 212 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
191 213 enum seg_rw, int);
192 214 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
193 215 enum seg_rw, int);
194 216
195 217 static int sameprot(struct seg *, caddr_t, size_t);
196 218
197 219 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
198 220 static int segvn_clrszc(struct seg *);
199 221 static struct seg *segvn_split_seg(struct seg *, caddr_t);
200 222 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
201 223 ulong_t, uint_t);
202 224
203 225 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
204 226 size_t, void *, u_offset_t);
205 227
206 228 static struct kmem_cache *segvn_cache;
207 229 static struct kmem_cache **segvn_szc_cache;
208 230
209 231 #ifdef VM_STATS
210 232 static struct segvnvmstats_str {
211 233 ulong_t fill_vp_pages[31];
212 234 ulong_t fltvnpages[49];
213 235 ulong_t fullszcpages[10];
214 236 ulong_t relocatepages[3];
215 237 ulong_t fltanpages[17];
216 238 ulong_t pagelock[2];
217 239 ulong_t demoterange[3];
218 240 } segvnvmstats;
219 241 #endif /* VM_STATS */
220 242
221 243 #define SDR_RANGE 1 /* demote entire range */
222 244 #define SDR_END 2 /* demote non aligned ends only */
223 245
224 246 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
225 247 if ((len) != 0) { \
226 248 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
227 249 ASSERT(lpgaddr >= (seg)->s_base); \
228 250 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
229 251 (len)), pgsz); \
230 252 ASSERT(lpgeaddr > lpgaddr); \
231 253 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
232 254 } else { \
233 255 lpgeaddr = lpgaddr = (addr); \
234 256 } \
235 257 }
236 258
237 259 /*ARGSUSED*/
238 260 static int
239 261 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
240 262 {
241 263 struct segvn_data *svd = buf;
242 264
243 265 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
244 266 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
245 267 svd->svn_trnext = svd->svn_trprev = NULL;
246 268 return (0);
247 269 }
248 270
249 271 /*ARGSUSED1*/
250 272 static void
251 273 segvn_cache_destructor(void *buf, void *cdrarg)
252 274 {
253 275 struct segvn_data *svd = buf;
254 276
255 277 rw_destroy(&svd->lock);
256 278 mutex_destroy(&svd->segfree_syncmtx);
257 279 }
258 280
259 281 /*ARGSUSED*/
260 282 static int
261 283 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
262 284 {
263 285 bzero(buf, sizeof (svntr_t));
264 286 return (0);
265 287 }
266 288
267 289 /*
268 290 * Patching this variable to non-zero allows the system to run with
269 291 * stacks marked as "not executable". It's a bit of a kludge, but is
270 292 * provided as a tweakable for platforms that export those ABIs
271 293 * (e.g. sparc V8) that have executable stacks enabled by default.
272 294 * There are also some restrictions for platforms that don't actually
273 295 * implement 'noexec' protections.
274 296 *
275 297 * Once enabled, the system is (therefore) unable to provide a fully
276 298 * ABI-compliant execution environment, though practically speaking,
277 299 * most everything works. The exceptions are generally some interpreters
278 300 * and debuggers that create executable code on the stack and jump
279 301 * into it (without explicitly mprotecting the address range to include
280 302 * PROT_EXEC).
281 303 *
282 304 * One important class of applications that are disabled are those
283 305 * that have been transformed into malicious agents using one of the
284 306 * numerous "buffer overflow" attacks. See 4007890.
285 307 */
286 308 int noexec_user_stack = 0;
287 309 int noexec_user_stack_log = 1;
288 310
289 311 int segvn_lpg_disable = 0;
290 312 uint_t segvn_maxpgszc = 0;
291 313
292 314 ulong_t segvn_vmpss_clrszc_cnt;
293 315 ulong_t segvn_vmpss_clrszc_err;
294 316 ulong_t segvn_fltvnpages_clrszc_cnt;
295 317 ulong_t segvn_fltvnpages_clrszc_err;
296 318 ulong_t segvn_setpgsz_align_err;
297 319 ulong_t segvn_setpgsz_anon_align_err;
298 320 ulong_t segvn_setpgsz_getattr_err;
299 321 ulong_t segvn_setpgsz_eof_err;
300 322 ulong_t segvn_faultvnmpss_align_err1;
301 323 ulong_t segvn_faultvnmpss_align_err2;
302 324 ulong_t segvn_faultvnmpss_align_err3;
303 325 ulong_t segvn_faultvnmpss_align_err4;
304 326 ulong_t segvn_faultvnmpss_align_err5;
305 327 ulong_t segvn_vmpss_pageio_deadlk_err;
306 328
307 329 int segvn_use_regions = 1;
308 330
309 331 /*
310 332 * Segvn supports text replication optimization for NUMA platforms. Text
311 333 * replica's are represented by anon maps (amp). There's one amp per text file
312 334 * region per lgroup. A process chooses the amp for each of its text mappings
313 335 * based on the lgroup assignment of its main thread (t_tid = 1). All
314 336 * processes that want a replica on a particular lgroup for the same text file
315 337 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
316 338 * with vp,off,size,szc used as a key. Text replication segments are read only
317 339 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
318 340 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
319 341 * pages. Replication amp is assigned to a segment when it gets its first
320 342 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
321 343 * rechecks periodically if the process still maps an amp local to the main
322 344 * thread. If not async thread forces process to remap to an amp in the new
323 345 * home lgroup of the main thread. Current text replication implementation
324 346 * only provides the benefit to workloads that do most of their work in the
325 347 * main thread of a process or all the threads of a process run in the same
326 348 * lgroup. To extend text replication benefit to different types of
327 349 * multithreaded workloads further work would be needed in the hat layer to
328 350 * allow the same virtual address in the same hat to simultaneously map
329 351 * different physical addresses (i.e. page table replication would be needed
330 352 * for x86).
331 353 *
332 354 * amp pages are used instead of vnode pages as long as segment has a very
333 355 * simple life cycle. It's created via segvn_create(), handles S_EXEC
334 356 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
335 357 * happens such as protection is changed, real COW fault happens, pagesize is
336 358 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
337 359 * text replication by converting the segment back to vnode only segment
338 360 * (unmap segment's address range and set svd->amp to NULL).
339 361 *
340 362 * The original file can be changed after amp is inserted into
341 363 * svntr_hashtab. Processes that are launched after the file is already
342 364 * changed can't use the replica's created prior to the file change. To
343 365 * implement this functionality hash entries are timestamped. Replica's can
344 366 * only be used if current file modification time is the same as the timestamp
345 367 * saved when hash entry was created. However just timestamps alone are not
346 368 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
347 369 * deal with file changes via MAP_SHARED mappings differently. When writable
348 370 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
349 371 * existing replica's for this vnode as not usable for future text
350 372 * mappings. And we don't create new replica's for files that currently have
351 373 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
352 374 * true).
353 375 */
354 376
355 377 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
356 378 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
357 379
358 380 static ulong_t svntr_hashtab_sz = 512;
359 381 static svntr_bucket_t *svntr_hashtab = NULL;
360 382 static struct kmem_cache *svntr_cache;
361 383 static svntr_stats_t *segvn_textrepl_stats;
362 384 static ksema_t segvn_trasync_sem;
363 385
364 386 int segvn_disable_textrepl = 1;
365 387 size_t textrepl_size_thresh = (size_t)-1;
366 388 size_t segvn_textrepl_bytes = 0;
367 389 size_t segvn_textrepl_max_bytes = 0;
368 390 clock_t segvn_update_textrepl_interval = 0;
369 391 int segvn_update_tr_time = 10;
370 392 int segvn_disable_textrepl_update = 0;
371 393
372 394 static void segvn_textrepl(struct seg *);
373 395 static void segvn_textunrepl(struct seg *, int);
374 396 static void segvn_inval_trcache(vnode_t *);
375 397 static void segvn_trasync_thread(void);
376 398 static void segvn_trupdate_wakeup(void *);
377 399 static void segvn_trupdate(void);
378 400 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
379 401 ulong_t);
380 402
381 403 /*
382 404 * Initialize segvn data structures
383 405 */
384 406 void
385 407 segvn_init(void)
386 408 {
387 409 uint_t maxszc;
388 410 uint_t szc;
389 411 size_t pgsz;
390 412
391 413 segvn_cache = kmem_cache_create("segvn_cache",
392 414 sizeof (struct segvn_data), 0,
393 415 segvn_cache_constructor, segvn_cache_destructor, NULL,
394 416 NULL, NULL, 0);
395 417
396 418 if (segvn_lpg_disable == 0) {
397 419 szc = maxszc = page_num_pagesizes() - 1;
398 420 if (szc == 0) {
399 421 segvn_lpg_disable = 1;
400 422 }
401 423 if (page_get_pagesize(0) != PAGESIZE) {
402 424 panic("segvn_init: bad szc 0");
403 425 /*NOTREACHED*/
404 426 }
405 427 while (szc != 0) {
406 428 pgsz = page_get_pagesize(szc);
407 429 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
408 430 panic("segvn_init: bad szc %d", szc);
409 431 /*NOTREACHED*/
410 432 }
411 433 szc--;
412 434 }
413 435 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
414 436 segvn_maxpgszc = maxszc;
415 437 }
416 438
417 439 if (segvn_maxpgszc) {
418 440 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
419 441 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
420 442 KM_SLEEP);
421 443 }
422 444
423 445 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
424 446 char str[32];
425 447
426 448 (void) sprintf(str, "segvn_szc_cache%d", szc);
427 449 segvn_szc_cache[szc] = kmem_cache_create(str,
428 450 page_get_pagecnt(szc) * sizeof (page_t *), 0,
429 451 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
430 452 }
431 453
432 454
433 455 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
434 456 segvn_use_regions = 0;
435 457
436 458 /*
437 459 * For now shared regions and text replication segvn support
438 460 * are mutually exclusive. This is acceptable because
439 461 * currently significant benefit from text replication was
440 462 * only observed on AMD64 NUMA platforms (due to relatively
441 463 * small L2$ size) and currently we don't support shared
442 464 * regions on x86.
443 465 */
444 466 if (segvn_use_regions && !segvn_disable_textrepl) {
445 467 segvn_disable_textrepl = 1;
446 468 }
447 469
448 470 #if defined(_LP64)
449 471 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
450 472 !segvn_disable_textrepl) {
451 473 ulong_t i;
452 474 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
453 475
454 476 svntr_cache = kmem_cache_create("svntr_cache",
455 477 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
456 478 NULL, NULL, NULL, 0);
457 479 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
458 480 for (i = 0; i < svntr_hashtab_sz; i++) {
459 481 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
460 482 MUTEX_DEFAULT, NULL);
461 483 }
462 484 segvn_textrepl_max_bytes = ptob(physmem) /
463 485 segvn_textrepl_max_bytes_factor;
464 486 segvn_textrepl_stats = kmem_zalloc(NCPU *
465 487 sizeof (svntr_stats_t), KM_SLEEP);
466 488 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
467 489 (void) thread_create(NULL, 0, segvn_trasync_thread,
468 490 NULL, 0, &p0, TS_RUN, minclsyspri);
469 491 }
470 492 #endif
471 493
472 494 if (!ISP2(segvn_pglock_comb_balign) ||
473 495 segvn_pglock_comb_balign < PAGESIZE) {
474 496 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
475 497 }
476 498 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
477 499 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
478 500 }
479 501
480 502 #define SEGVN_PAGEIO ((void *)0x1)
481 503 #define SEGVN_NOPAGEIO ((void *)0x2)
482 504
483 505 static void
484 506 segvn_setvnode_mpss(vnode_t *vp)
485 507 {
486 508 int err;
487 509
488 510 ASSERT(vp->v_mpssdata == NULL ||
489 511 vp->v_mpssdata == SEGVN_PAGEIO ||
490 512 vp->v_mpssdata == SEGVN_NOPAGEIO);
491 513
492 514 if (vp->v_mpssdata == NULL) {
493 515 if (vn_vmpss_usepageio(vp)) {
494 516 err = VOP_PAGEIO(vp, (page_t *)NULL,
495 517 (u_offset_t)0, 0, 0, CRED(), NULL);
496 518 } else {
497 519 err = ENOSYS;
498 520 }
499 521 /*
500 522 * set v_mpssdata just once per vnode life
501 523 * so that it never changes.
502 524 */
503 525 mutex_enter(&vp->v_lock);
504 526 if (vp->v_mpssdata == NULL) {
505 527 if (err == EINVAL) {
506 528 vp->v_mpssdata = SEGVN_PAGEIO;
507 529 } else {
508 530 vp->v_mpssdata = SEGVN_NOPAGEIO;
509 531 }
510 532 }
511 533 mutex_exit(&vp->v_lock);
512 534 }
513 535 }
514 536
515 537 int
516 538 segvn_create(struct seg *seg, void *argsp)
517 539 {
518 540 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
519 541 struct segvn_data *svd;
520 542 size_t swresv = 0;
521 543 struct cred *cred;
522 544 struct anon_map *amp;
523 545 int error = 0;
524 546 size_t pgsz;
525 547 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
526 548 int use_rgn = 0;
527 549 int trok = 0;
528 550
529 551 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
530 552
531 553 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
532 554 panic("segvn_create type");
533 555 /*NOTREACHED*/
534 556 }
535 557
536 558 /*
537 559 * Check arguments. If a shared anon structure is given then
538 560 * it is illegal to also specify a vp.
539 561 */
540 562 if (a->amp != NULL && a->vp != NULL) {
541 563 panic("segvn_create anon_map");
542 564 /*NOTREACHED*/
543 565 }
544 566
545 567 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
546 568 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
547 569 segvn_use_regions) {
548 570 use_rgn = 1;
549 571 }
550 572
551 573 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
552 574 if (a->type == MAP_SHARED)
553 575 a->flags &= ~MAP_NORESERVE;
554 576
555 577 if (a->szc != 0) {
556 578 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
557 579 (a->amp != NULL && a->type == MAP_PRIVATE) ||
558 580 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
559 581 a->szc = 0;
560 582 } else {
561 583 if (a->szc > segvn_maxpgszc)
562 584 a->szc = segvn_maxpgszc;
563 585 pgsz = page_get_pagesize(a->szc);
564 586 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
565 587 !IS_P2ALIGNED(seg->s_size, pgsz)) {
566 588 a->szc = 0;
567 589 } else if (a->vp != NULL) {
568 590 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
569 591 /*
570 592 * paranoid check.
571 593 * hat_page_demote() is not supported
572 594 * on swapfs pages.
573 595 */
574 596 a->szc = 0;
575 597 } else if (map_addr_vacalign_check(seg->s_base,
576 598 a->offset & PAGEMASK)) {
577 599 a->szc = 0;
578 600 }
579 601 } else if (a->amp != NULL) {
580 602 pgcnt_t anum = btopr(a->offset);
581 603 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
582 604 if (!IS_P2ALIGNED(anum, pgcnt)) {
583 605 a->szc = 0;
584 606 }
585 607 }
586 608 }
587 609 }
588 610
589 611 /*
590 612 * If segment may need private pages, reserve them now.
591 613 */
592 614 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
593 615 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
594 616 if (anon_resv_zone(seg->s_size,
595 617 seg->s_as->a_proc->p_zone) == 0)
596 618 return (EAGAIN);
597 619 swresv = seg->s_size;
598 620 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
599 621 seg, swresv, 1);
600 622 }
601 623
602 624 /*
603 625 * Reserve any mapping structures that may be required.
604 626 *
605 627 * Don't do it for segments that may use regions. It's currently a
606 628 * noop in the hat implementations anyway.
607 629 */
608 630 if (!use_rgn) {
609 631 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
610 632 }
611 633
612 634 if (a->cred) {
613 635 cred = a->cred;
614 636 crhold(cred);
615 637 } else {
616 638 crhold(cred = CRED());
617 639 }
618 640
619 641 /* Inform the vnode of the new mapping */
620 642 if (a->vp != NULL) {
621 643 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
622 644 seg->s_as, seg->s_base, seg->s_size, a->prot,
623 645 a->maxprot, a->type, cred, NULL);
624 646 if (error) {
625 647 if (swresv != 0) {
626 648 anon_unresv_zone(swresv,
627 649 seg->s_as->a_proc->p_zone);
628 650 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
629 651 "anon proc:%p %lu %u", seg, swresv, 0);
630 652 }
631 653 crfree(cred);
632 654 if (!use_rgn) {
633 655 hat_unload(seg->s_as->a_hat, seg->s_base,
634 656 seg->s_size, HAT_UNLOAD_UNMAP);
635 657 }
636 658 return (error);
637 659 }
638 660 /*
639 661 * svntr_hashtab will be NULL if we support shared regions.
640 662 */
641 663 trok = ((a->flags & MAP_TEXT) &&
642 664 (seg->s_size > textrepl_size_thresh ||
643 665 (a->flags & _MAP_TEXTREPL)) &&
644 666 lgrp_optimizations() && svntr_hashtab != NULL &&
645 667 a->type == MAP_PRIVATE && swresv == 0 &&
646 668 !(a->flags & MAP_NORESERVE) &&
647 669 seg->s_as != &kas && a->vp->v_type == VREG);
648 670
649 671 ASSERT(!trok || !use_rgn);
650 672 }
651 673
652 674 /*
653 675 * MAP_NORESERVE mappings don't count towards the VSZ of a process
654 676 * until we fault the pages in.
655 677 */
656 678 if ((a->vp == NULL || a->vp->v_type != VREG) &&
657 679 a->flags & MAP_NORESERVE) {
658 680 seg->s_as->a_resvsize -= seg->s_size;
659 681 }
660 682
661 683 /*
662 684 * If more than one segment in the address space, and they're adjacent
663 685 * virtually, try to concatenate them. Don't concatenate if an
664 686 * explicit anon_map structure was supplied (e.g., SystemV shared
665 687 * memory) or if we'll use text replication for this segment.
666 688 */
667 689 if (a->amp == NULL && !use_rgn && !trok) {
668 690 struct seg *pseg, *nseg;
669 691 struct segvn_data *psvd, *nsvd;
670 692 lgrp_mem_policy_t ppolicy, npolicy;
671 693 uint_t lgrp_mem_policy_flags = 0;
672 694 extern lgrp_mem_policy_t lgrp_mem_default_policy;
673 695
674 696 /*
675 697 * Memory policy flags (lgrp_mem_policy_flags) is valid when
676 698 * extending stack/heap segments.
677 699 */
678 700 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
679 701 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
680 702 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
681 703 } else {
682 704 /*
683 705 * Get policy when not extending it from another segment
684 706 */
685 707 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
686 708 }
687 709
688 710 /*
689 711 * First, try to concatenate the previous and new segments
690 712 */
691 713 pseg = AS_SEGPREV(seg->s_as, seg);
692 714 if (pseg != NULL &&
693 715 pseg->s_base + pseg->s_size == seg->s_base &&
694 716 pseg->s_ops == &segvn_ops) {
695 717 /*
696 718 * Get memory allocation policy from previous segment.
697 719 * When extension is specified (e.g. for heap) apply
698 720 * this policy to the new segment regardless of the
699 721 * outcome of segment concatenation. Extension occurs
700 722 * for non-default policy otherwise default policy is
701 723 * used and is based on extended segment size.
702 724 */
703 725 psvd = (struct segvn_data *)pseg->s_data;
704 726 ppolicy = psvd->policy_info.mem_policy;
705 727 if (lgrp_mem_policy_flags ==
706 728 LGRP_MP_FLAG_EXTEND_UP) {
707 729 if (ppolicy != lgrp_mem_default_policy) {
708 730 mpolicy = ppolicy;
709 731 } else {
710 732 mpolicy = lgrp_mem_policy_default(
711 733 pseg->s_size + seg->s_size,
712 734 a->type);
713 735 }
714 736 }
715 737
716 738 if (mpolicy == ppolicy &&
717 739 (pseg->s_size + seg->s_size <=
718 740 segvn_comb_thrshld || psvd->amp == NULL) &&
719 741 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
720 742 /*
721 743 * success! now try to concatenate
722 744 * with following seg
723 745 */
724 746 crfree(cred);
725 747 nseg = AS_SEGNEXT(pseg->s_as, pseg);
726 748 if (nseg != NULL &&
727 749 nseg != pseg &&
728 750 nseg->s_ops == &segvn_ops &&
729 751 pseg->s_base + pseg->s_size ==
730 752 nseg->s_base)
731 753 (void) segvn_concat(pseg, nseg, 0);
732 754 ASSERT(pseg->s_szc == 0 ||
733 755 (a->szc == pseg->s_szc &&
734 756 IS_P2ALIGNED(pseg->s_base, pgsz) &&
735 757 IS_P2ALIGNED(pseg->s_size, pgsz)));
736 758 return (0);
737 759 }
738 760 }
739 761
740 762 /*
741 763 * Failed, so try to concatenate with following seg
742 764 */
743 765 nseg = AS_SEGNEXT(seg->s_as, seg);
744 766 if (nseg != NULL &&
745 767 seg->s_base + seg->s_size == nseg->s_base &&
746 768 nseg->s_ops == &segvn_ops) {
747 769 /*
748 770 * Get memory allocation policy from next segment.
749 771 * When extension is specified (e.g. for stack) apply
750 772 * this policy to the new segment regardless of the
751 773 * outcome of segment concatenation. Extension occurs
752 774 * for non-default policy otherwise default policy is
753 775 * used and is based on extended segment size.
754 776 */
755 777 nsvd = (struct segvn_data *)nseg->s_data;
756 778 npolicy = nsvd->policy_info.mem_policy;
757 779 if (lgrp_mem_policy_flags ==
758 780 LGRP_MP_FLAG_EXTEND_DOWN) {
759 781 if (npolicy != lgrp_mem_default_policy) {
760 782 mpolicy = npolicy;
761 783 } else {
762 784 mpolicy = lgrp_mem_policy_default(
763 785 nseg->s_size + seg->s_size,
764 786 a->type);
765 787 }
766 788 }
767 789
768 790 if (mpolicy == npolicy &&
769 791 segvn_extend_next(seg, nseg, a, swresv) == 0) {
770 792 crfree(cred);
771 793 ASSERT(nseg->s_szc == 0 ||
772 794 (a->szc == nseg->s_szc &&
773 795 IS_P2ALIGNED(nseg->s_base, pgsz) &&
774 796 IS_P2ALIGNED(nseg->s_size, pgsz)));
775 797 return (0);
776 798 }
777 799 }
778 800 }
779 801
780 802 if (a->vp != NULL) {
781 803 VN_HOLD(a->vp);
782 804 if (a->type == MAP_SHARED)
783 805 lgrp_shm_policy_init(NULL, a->vp);
784 806 }
785 807 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
786 808
787 809 seg->s_ops = &segvn_ops;
788 810 seg->s_data = (void *)svd;
789 811 seg->s_szc = a->szc;
790 812
791 813 svd->seg = seg;
792 814 svd->vp = a->vp;
793 815 /*
794 816 * Anonymous mappings have no backing file so the offset is meaningless.
795 817 */
796 818 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
797 819 svd->prot = a->prot;
798 820 svd->maxprot = a->maxprot;
799 821 svd->pageprot = 0;
800 822 svd->type = a->type;
801 823 svd->vpage = NULL;
802 824 svd->cred = cred;
803 825 svd->advice = MADV_NORMAL;
804 826 svd->pageadvice = 0;
805 827 svd->flags = (ushort_t)a->flags;
806 828 svd->softlockcnt = 0;
807 829 svd->softlockcnt_sbase = 0;
808 830 svd->softlockcnt_send = 0;
809 831 svd->rcookie = HAT_INVALID_REGION_COOKIE;
810 832 svd->pageswap = 0;
811 833
812 834 if (a->szc != 0 && a->vp != NULL) {
813 835 segvn_setvnode_mpss(a->vp);
814 836 }
815 837 if (svd->type == MAP_SHARED && svd->vp != NULL &&
816 838 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
817 839 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
818 840 segvn_inval_trcache(svd->vp);
819 841 }
820 842
821 843 amp = a->amp;
822 844 if ((svd->amp = amp) == NULL) {
823 845 svd->anon_index = 0;
824 846 if (svd->type == MAP_SHARED) {
825 847 svd->swresv = 0;
826 848 /*
827 849 * Shared mappings to a vp need no other setup.
828 850 * If we have a shared mapping to an anon_map object
829 851 * which hasn't been allocated yet, allocate the
830 852 * struct now so that it will be properly shared
831 853 * by remembering the swap reservation there.
832 854 */
833 855 if (a->vp == NULL) {
834 856 svd->amp = anonmap_alloc(seg->s_size, swresv,
835 857 ANON_SLEEP);
836 858 svd->amp->a_szc = seg->s_szc;
837 859 }
838 860 } else {
839 861 /*
840 862 * Private mapping (with or without a vp).
841 863 * Allocate anon_map when needed.
842 864 */
843 865 svd->swresv = swresv;
844 866 }
845 867 } else {
846 868 pgcnt_t anon_num;
847 869
848 870 /*
849 871 * Mapping to an existing anon_map structure without a vp.
850 872 * For now we will insure that the segment size isn't larger
851 873 * than the size - offset gives us. Later on we may wish to
852 874 * have the anon array dynamically allocated itself so that
853 875 * we don't always have to allocate all the anon pointer slots.
854 876 * This of course involves adding extra code to check that we
855 877 * aren't trying to use an anon pointer slot beyond the end
856 878 * of the currently allocated anon array.
857 879 */
858 880 if ((amp->size - a->offset) < seg->s_size) {
859 881 panic("segvn_create anon_map size");
860 882 /*NOTREACHED*/
861 883 }
862 884
863 885 anon_num = btopr(a->offset);
864 886
865 887 if (a->type == MAP_SHARED) {
866 888 /*
867 889 * SHARED mapping to a given anon_map.
868 890 */
869 891 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
870 892 amp->refcnt++;
871 893 if (a->szc > amp->a_szc) {
872 894 amp->a_szc = a->szc;
873 895 }
874 896 ANON_LOCK_EXIT(&->a_rwlock);
875 897 svd->anon_index = anon_num;
876 898 svd->swresv = 0;
877 899 } else {
878 900 /*
879 901 * PRIVATE mapping to a given anon_map.
880 902 * Make sure that all the needed anon
881 903 * structures are created (so that we will
882 904 * share the underlying pages if nothing
883 905 * is written by this mapping) and then
884 906 * duplicate the anon array as is done
885 907 * when a privately mapped segment is dup'ed.
886 908 */
887 909 struct anon *ap;
888 910 caddr_t addr;
889 911 caddr_t eaddr;
890 912 ulong_t anon_idx;
891 913 int hat_flag = HAT_LOAD;
892 914
893 915 if (svd->flags & MAP_TEXT) {
894 916 hat_flag |= HAT_LOAD_TEXT;
895 917 }
896 918
897 919 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
898 920 svd->amp->a_szc = seg->s_szc;
899 921 svd->anon_index = 0;
900 922 svd->swresv = swresv;
901 923
902 924 /*
903 925 * Prevent 2 threads from allocating anon
904 926 * slots simultaneously.
905 927 */
906 928 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
907 929 eaddr = seg->s_base + seg->s_size;
908 930
909 931 for (anon_idx = anon_num, addr = seg->s_base;
910 932 addr < eaddr; addr += PAGESIZE, anon_idx++) {
911 933 page_t *pp;
912 934
913 935 if ((ap = anon_get_ptr(amp->ahp,
914 936 anon_idx)) != NULL)
915 937 continue;
916 938
917 939 /*
918 940 * Allocate the anon struct now.
919 941 * Might as well load up translation
920 942 * to the page while we're at it...
921 943 */
922 944 pp = anon_zero(seg, addr, &ap, cred);
923 945 if (ap == NULL || pp == NULL) {
924 946 panic("segvn_create anon_zero");
925 947 /*NOTREACHED*/
926 948 }
927 949
928 950 /*
929 951 * Re-acquire the anon_map lock and
930 952 * initialize the anon array entry.
931 953 */
932 954 ASSERT(anon_get_ptr(amp->ahp,
933 955 anon_idx) == NULL);
934 956 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
935 957 ANON_SLEEP);
936 958
937 959 ASSERT(seg->s_szc == 0);
938 960 ASSERT(!IS_VMODSORT(pp->p_vnode));
939 961
940 962 ASSERT(use_rgn == 0);
941 963 hat_memload(seg->s_as->a_hat, addr, pp,
942 964 svd->prot & ~PROT_WRITE, hat_flag);
943 965
944 966 page_unlock(pp);
945 967 }
946 968 ASSERT(seg->s_szc == 0);
947 969 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
948 970 0, seg->s_size);
949 971 ANON_LOCK_EXIT(&->a_rwlock);
950 972 }
951 973 }
952 974
953 975 /*
954 976 * Set default memory allocation policy for segment
955 977 *
956 978 * Always set policy for private memory at least for initialization
957 979 * even if this is a shared memory segment
958 980 */
959 981 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
960 982
961 983 if (svd->type == MAP_SHARED)
962 984 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
963 985 svd->vp, svd->offset, seg->s_size);
964 986
965 987 if (use_rgn) {
966 988 ASSERT(!trok);
967 989 ASSERT(svd->amp == NULL);
968 990 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
969 991 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
970 992 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
971 993 HAT_REGION_TEXT);
972 994 }
973 995
974 996 ASSERT(!trok || !(svd->prot & PROT_WRITE));
975 997 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
976 998
977 999 return (0);
978 1000 }
979 1001
980 1002 /*
981 1003 * Concatenate two existing segments, if possible.
982 1004 * Return 0 on success, -1 if two segments are not compatible
983 1005 * or -2 on memory allocation failure.
984 1006 * If amp_cat == 1 then try and concat segments with anon maps
985 1007 */
986 1008 static int
987 1009 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
988 1010 {
989 1011 struct segvn_data *svd1 = seg1->s_data;
990 1012 struct segvn_data *svd2 = seg2->s_data;
991 1013 struct anon_map *amp1 = svd1->amp;
992 1014 struct anon_map *amp2 = svd2->amp;
993 1015 struct vpage *vpage1 = svd1->vpage;
994 1016 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
995 1017 size_t size, nvpsize;
996 1018 pgcnt_t npages1, npages2;
997 1019
998 1020 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
999 1021 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1000 1022 ASSERT(seg1->s_ops == seg2->s_ops);
1001 1023
1002 1024 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1003 1025 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1004 1026 return (-1);
1005 1027 }
1006 1028
1007 1029 /* both segments exist, try to merge them */
1008 1030 #define incompat(x) (svd1->x != svd2->x)
1009 1031 if (incompat(vp) || incompat(maxprot) ||
1010 1032 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1011 1033 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1012 1034 incompat(type) || incompat(cred) || incompat(flags) ||
1013 1035 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1014 1036 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1015 1037 return (-1);
1016 1038 #undef incompat
1017 1039
1018 1040 /*
1019 1041 * vp == NULL implies zfod, offset doesn't matter
1020 1042 */
1021 1043 if (svd1->vp != NULL &&
1022 1044 svd1->offset + seg1->s_size != svd2->offset) {
1023 1045 return (-1);
1024 1046 }
1025 1047
1026 1048 /*
1027 1049 * Don't concatenate if either segment uses text replication.
1028 1050 */
1029 1051 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1030 1052 return (-1);
1031 1053 }
1032 1054
1033 1055 /*
1034 1056 * Fail early if we're not supposed to concatenate
1035 1057 * segments with non NULL amp.
1036 1058 */
1037 1059 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1038 1060 return (-1);
1039 1061 }
1040 1062
1041 1063 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1042 1064 if (amp1 != amp2) {
1043 1065 return (-1);
1044 1066 }
1045 1067 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1046 1068 svd2->anon_index) {
1047 1069 return (-1);
1048 1070 }
1049 1071 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1050 1072 }
1051 1073
1052 1074 /*
1053 1075 * If either seg has vpages, create a new merged vpage array.
1054 1076 */
1055 1077 if (vpage1 != NULL || vpage2 != NULL) {
1056 1078 struct vpage *vp, *evp;
1057 1079
1058 1080 npages1 = seg_pages(seg1);
1059 1081 npages2 = seg_pages(seg2);
1060 1082 nvpsize = vpgtob(npages1 + npages2);
1061 1083
1062 1084 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1063 1085 return (-2);
1064 1086 }
1065 1087
1066 1088 if (vpage1 != NULL) {
1067 1089 bcopy(vpage1, nvpage, vpgtob(npages1));
1068 1090 } else {
1069 1091 evp = nvpage + npages1;
1070 1092 for (vp = nvpage; vp < evp; vp++) {
1071 1093 VPP_SETPROT(vp, svd1->prot);
1072 1094 VPP_SETADVICE(vp, svd1->advice);
1073 1095 }
1074 1096 }
1075 1097
1076 1098 if (vpage2 != NULL) {
1077 1099 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1078 1100 } else {
1079 1101 evp = nvpage + npages1 + npages2;
1080 1102 for (vp = nvpage + npages1; vp < evp; vp++) {
1081 1103 VPP_SETPROT(vp, svd2->prot);
1082 1104 VPP_SETADVICE(vp, svd2->advice);
1083 1105 }
1084 1106 }
1085 1107
1086 1108 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1087 1109 ASSERT(svd1->swresv == seg1->s_size);
1088 1110 ASSERT(!(svd1->flags & MAP_NORESERVE));
1089 1111 ASSERT(!(svd2->flags & MAP_NORESERVE));
1090 1112 evp = nvpage + npages1;
1091 1113 for (vp = nvpage; vp < evp; vp++) {
1092 1114 VPP_SETSWAPRES(vp);
1093 1115 }
1094 1116 }
1095 1117
1096 1118 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1097 1119 ASSERT(svd2->swresv == seg2->s_size);
1098 1120 ASSERT(!(svd1->flags & MAP_NORESERVE));
1099 1121 ASSERT(!(svd2->flags & MAP_NORESERVE));
1100 1122 vp = nvpage + npages1;
1101 1123 evp = vp + npages2;
1102 1124 for (; vp < evp; vp++) {
1103 1125 VPP_SETSWAPRES(vp);
1104 1126 }
1105 1127 }
1106 1128 }
1107 1129 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1108 1130 (svd1->pageswap == 0 && svd2->pageswap == 0));
1109 1131
1110 1132 /*
1111 1133 * If either segment has private pages, create a new merged anon
1112 1134 * array. If mergeing shared anon segments just decrement anon map's
1113 1135 * refcnt.
1114 1136 */
1115 1137 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1116 1138 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1117 1139 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1118 1140 ASSERT(amp1->refcnt >= 2);
1119 1141 amp1->refcnt--;
1120 1142 ANON_LOCK_EXIT(&1->a_rwlock);
1121 1143 svd2->amp = NULL;
1122 1144 } else if (amp1 != NULL || amp2 != NULL) {
1123 1145 struct anon_hdr *nahp;
1124 1146 struct anon_map *namp = NULL;
1125 1147 size_t asize;
1126 1148
1127 1149 ASSERT(svd1->type == MAP_PRIVATE);
1128 1150
1129 1151 asize = seg1->s_size + seg2->s_size;
1130 1152 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1131 1153 if (nvpage != NULL) {
1132 1154 kmem_free(nvpage, nvpsize);
1133 1155 }
1134 1156 return (-2);
1135 1157 }
1136 1158 if (amp1 != NULL) {
1137 1159 /*
1138 1160 * XXX anon rwlock is not really needed because
1139 1161 * this is a private segment and we are writers.
1140 1162 */
1141 1163 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1142 1164 ASSERT(amp1->refcnt == 1);
1143 1165 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1144 1166 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1145 1167 anon_release(nahp, btop(asize));
1146 1168 ANON_LOCK_EXIT(&1->a_rwlock);
1147 1169 if (nvpage != NULL) {
1148 1170 kmem_free(nvpage, nvpsize);
1149 1171 }
1150 1172 return (-2);
1151 1173 }
1152 1174 }
1153 1175 if (amp2 != NULL) {
1154 1176 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1155 1177 ASSERT(amp2->refcnt == 1);
1156 1178 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1157 1179 nahp, btop(seg1->s_size), btop(seg2->s_size),
1158 1180 ANON_NOSLEEP)) {
1159 1181 anon_release(nahp, btop(asize));
1160 1182 ANON_LOCK_EXIT(&2->a_rwlock);
1161 1183 if (amp1 != NULL) {
1162 1184 ANON_LOCK_EXIT(&1->a_rwlock);
1163 1185 }
1164 1186 if (nvpage != NULL) {
1165 1187 kmem_free(nvpage, nvpsize);
1166 1188 }
1167 1189 return (-2);
1168 1190 }
1169 1191 }
1170 1192 if (amp1 != NULL) {
1171 1193 namp = amp1;
1172 1194 anon_release(amp1->ahp, btop(amp1->size));
1173 1195 }
1174 1196 if (amp2 != NULL) {
1175 1197 if (namp == NULL) {
1176 1198 ASSERT(amp1 == NULL);
1177 1199 namp = amp2;
1178 1200 anon_release(amp2->ahp, btop(amp2->size));
1179 1201 } else {
1180 1202 amp2->refcnt--;
1181 1203 ANON_LOCK_EXIT(&2->a_rwlock);
1182 1204 anonmap_free(amp2);
1183 1205 }
1184 1206 svd2->amp = NULL; /* needed for seg_free */
1185 1207 }
1186 1208 namp->ahp = nahp;
1187 1209 namp->size = asize;
1188 1210 svd1->amp = namp;
1189 1211 svd1->anon_index = 0;
1190 1212 ANON_LOCK_EXIT(&namp->a_rwlock);
1191 1213 }
1192 1214 /*
1193 1215 * Now free the old vpage structures.
1194 1216 */
1195 1217 if (nvpage != NULL) {
1196 1218 if (vpage1 != NULL) {
1197 1219 kmem_free(vpage1, vpgtob(npages1));
1198 1220 }
1199 1221 if (vpage2 != NULL) {
1200 1222 svd2->vpage = NULL;
1201 1223 kmem_free(vpage2, vpgtob(npages2));
1202 1224 }
1203 1225 if (svd2->pageprot) {
1204 1226 svd1->pageprot = 1;
1205 1227 }
1206 1228 if (svd2->pageadvice) {
1207 1229 svd1->pageadvice = 1;
1208 1230 }
1209 1231 if (svd2->pageswap) {
1210 1232 svd1->pageswap = 1;
1211 1233 }
1212 1234 svd1->vpage = nvpage;
1213 1235 }
1214 1236
1215 1237 /* all looks ok, merge segments */
1216 1238 svd1->swresv += svd2->swresv;
1217 1239 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1218 1240 size = seg2->s_size;
1219 1241 seg_free(seg2);
1220 1242 seg1->s_size += size;
1221 1243 return (0);
1222 1244 }
1223 1245
1224 1246 /*
1225 1247 * Extend the previous segment (seg1) to include the
1226 1248 * new segment (seg2 + a), if possible.
1227 1249 * Return 0 on success.
1228 1250 */
1229 1251 static int
1230 1252 segvn_extend_prev(seg1, seg2, a, swresv)
1231 1253 struct seg *seg1, *seg2;
1232 1254 struct segvn_crargs *a;
1233 1255 size_t swresv;
1234 1256 {
1235 1257 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1236 1258 size_t size;
1237 1259 struct anon_map *amp1;
1238 1260 struct vpage *new_vpage;
1239 1261
1240 1262 /*
1241 1263 * We don't need any segment level locks for "segvn" data
1242 1264 * since the address space is "write" locked.
1243 1265 */
1244 1266 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1245 1267
1246 1268 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1247 1269 return (-1);
1248 1270 }
1249 1271
1250 1272 /* second segment is new, try to extend first */
1251 1273 /* XXX - should also check cred */
1252 1274 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1253 1275 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1254 1276 svd1->type != a->type || svd1->flags != a->flags ||
1255 1277 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1256 1278 return (-1);
1257 1279
1258 1280 /* vp == NULL implies zfod, offset doesn't matter */
1259 1281 if (svd1->vp != NULL &&
1260 1282 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1261 1283 return (-1);
1262 1284
1263 1285 if (svd1->tr_state != SEGVN_TR_OFF) {
1264 1286 return (-1);
1265 1287 }
1266 1288
1267 1289 amp1 = svd1->amp;
1268 1290 if (amp1) {
1269 1291 pgcnt_t newpgs;
1270 1292
1271 1293 /*
1272 1294 * Segment has private pages, can data structures
1273 1295 * be expanded?
1274 1296 *
1275 1297 * Acquire the anon_map lock to prevent it from changing,
1276 1298 * if it is shared. This ensures that the anon_map
1277 1299 * will not change while a thread which has a read/write
1278 1300 * lock on an address space references it.
1279 1301 * XXX - Don't need the anon_map lock at all if "refcnt"
1280 1302 * is 1.
1281 1303 *
1282 1304 * Can't grow a MAP_SHARED segment with an anonmap because
1283 1305 * there may be existing anon slots where we want to extend
1284 1306 * the segment and we wouldn't know what to do with them
1285 1307 * (e.g., for tmpfs right thing is to just leave them there,
1286 1308 * for /dev/zero they should be cleared out).
1287 1309 */
1288 1310 if (svd1->type == MAP_SHARED)
1289 1311 return (-1);
1290 1312
1291 1313 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1292 1314 if (amp1->refcnt > 1) {
1293 1315 ANON_LOCK_EXIT(&1->a_rwlock);
1294 1316 return (-1);
1295 1317 }
1296 1318 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1297 1319 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1298 1320
1299 1321 if (newpgs == 0) {
1300 1322 ANON_LOCK_EXIT(&1->a_rwlock);
1301 1323 return (-1);
1302 1324 }
1303 1325 amp1->size = ptob(newpgs);
1304 1326 ANON_LOCK_EXIT(&1->a_rwlock);
1305 1327 }
1306 1328 if (svd1->vpage != NULL) {
1307 1329 struct vpage *vp, *evp;
1308 1330 new_vpage =
1309 1331 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1310 1332 KM_NOSLEEP);
1311 1333 if (new_vpage == NULL)
1312 1334 return (-1);
1313 1335 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1314 1336 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1315 1337 svd1->vpage = new_vpage;
1316 1338
1317 1339 vp = new_vpage + seg_pages(seg1);
1318 1340 evp = vp + seg_pages(seg2);
1319 1341 for (; vp < evp; vp++)
1320 1342 VPP_SETPROT(vp, a->prot);
1321 1343 if (svd1->pageswap && swresv) {
1322 1344 ASSERT(!(svd1->flags & MAP_NORESERVE));
1323 1345 ASSERT(swresv == seg2->s_size);
1324 1346 vp = new_vpage + seg_pages(seg1);
1325 1347 for (; vp < evp; vp++) {
1326 1348 VPP_SETSWAPRES(vp);
1327 1349 }
1328 1350 }
1329 1351 }
1330 1352 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1331 1353 size = seg2->s_size;
1332 1354 seg_free(seg2);
1333 1355 seg1->s_size += size;
1334 1356 svd1->swresv += swresv;
1335 1357 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1336 1358 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1337 1359 (svd1->vp->v_flag & VVMEXEC)) {
1338 1360 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1339 1361 segvn_inval_trcache(svd1->vp);
1340 1362 }
1341 1363 return (0);
1342 1364 }
1343 1365
1344 1366 /*
1345 1367 * Extend the next segment (seg2) to include the
1346 1368 * new segment (seg1 + a), if possible.
1347 1369 * Return 0 on success.
1348 1370 */
1349 1371 static int
1350 1372 segvn_extend_next(
1351 1373 struct seg *seg1,
1352 1374 struct seg *seg2,
1353 1375 struct segvn_crargs *a,
1354 1376 size_t swresv)
1355 1377 {
1356 1378 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1357 1379 size_t size;
1358 1380 struct anon_map *amp2;
1359 1381 struct vpage *new_vpage;
1360 1382
1361 1383 /*
1362 1384 * We don't need any segment level locks for "segvn" data
1363 1385 * since the address space is "write" locked.
1364 1386 */
1365 1387 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1366 1388
1367 1389 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1368 1390 return (-1);
1369 1391 }
1370 1392
1371 1393 /* first segment is new, try to extend second */
1372 1394 /* XXX - should also check cred */
1373 1395 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1374 1396 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1375 1397 svd2->type != a->type || svd2->flags != a->flags ||
1376 1398 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1377 1399 return (-1);
1378 1400 /* vp == NULL implies zfod, offset doesn't matter */
1379 1401 if (svd2->vp != NULL &&
1380 1402 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1381 1403 return (-1);
1382 1404
1383 1405 if (svd2->tr_state != SEGVN_TR_OFF) {
1384 1406 return (-1);
1385 1407 }
1386 1408
1387 1409 amp2 = svd2->amp;
1388 1410 if (amp2) {
1389 1411 pgcnt_t newpgs;
1390 1412
1391 1413 /*
1392 1414 * Segment has private pages, can data structures
1393 1415 * be expanded?
1394 1416 *
1395 1417 * Acquire the anon_map lock to prevent it from changing,
1396 1418 * if it is shared. This ensures that the anon_map
1397 1419 * will not change while a thread which has a read/write
1398 1420 * lock on an address space references it.
1399 1421 *
1400 1422 * XXX - Don't need the anon_map lock at all if "refcnt"
1401 1423 * is 1.
1402 1424 */
1403 1425 if (svd2->type == MAP_SHARED)
1404 1426 return (-1);
1405 1427
1406 1428 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1407 1429 if (amp2->refcnt > 1) {
1408 1430 ANON_LOCK_EXIT(&2->a_rwlock);
1409 1431 return (-1);
1410 1432 }
1411 1433 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1412 1434 btop(seg2->s_size), btop(seg1->s_size),
1413 1435 ANON_NOSLEEP | ANON_GROWDOWN);
1414 1436
1415 1437 if (newpgs == 0) {
1416 1438 ANON_LOCK_EXIT(&2->a_rwlock);
1417 1439 return (-1);
1418 1440 }
1419 1441 amp2->size = ptob(newpgs);
1420 1442 ANON_LOCK_EXIT(&2->a_rwlock);
1421 1443 }
1422 1444 if (svd2->vpage != NULL) {
1423 1445 struct vpage *vp, *evp;
1424 1446 new_vpage =
1425 1447 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1426 1448 KM_NOSLEEP);
1427 1449 if (new_vpage == NULL) {
1428 1450 /* Not merging segments so adjust anon_index back */
1429 1451 if (amp2)
1430 1452 svd2->anon_index += seg_pages(seg1);
1431 1453 return (-1);
1432 1454 }
1433 1455 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1434 1456 vpgtob(seg_pages(seg2)));
1435 1457 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1436 1458 svd2->vpage = new_vpage;
1437 1459
1438 1460 vp = new_vpage;
1439 1461 evp = vp + seg_pages(seg1);
1440 1462 for (; vp < evp; vp++)
1441 1463 VPP_SETPROT(vp, a->prot);
1442 1464 if (svd2->pageswap && swresv) {
1443 1465 ASSERT(!(svd2->flags & MAP_NORESERVE));
1444 1466 ASSERT(swresv == seg1->s_size);
1445 1467 vp = new_vpage;
1446 1468 for (; vp < evp; vp++) {
1447 1469 VPP_SETSWAPRES(vp);
1448 1470 }
1449 1471 }
1450 1472 }
1451 1473 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1452 1474 size = seg1->s_size;
1453 1475 seg_free(seg1);
1454 1476 seg2->s_size += size;
1455 1477 seg2->s_base -= size;
1456 1478 svd2->offset -= size;
1457 1479 svd2->swresv += swresv;
1458 1480 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1459 1481 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1460 1482 (svd2->vp->v_flag & VVMEXEC)) {
1461 1483 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1462 1484 segvn_inval_trcache(svd2->vp);
1463 1485 }
1464 1486 return (0);
1465 1487 }
1466 1488
1467 1489 static int
1468 1490 segvn_dup(struct seg *seg, struct seg *newseg)
1469 1491 {
1470 1492 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1471 1493 struct segvn_data *newsvd;
1472 1494 pgcnt_t npages = seg_pages(seg);
1473 1495 int error = 0;
1474 1496 uint_t prot;
1475 1497 size_t len;
1476 1498 struct anon_map *amp;
1477 1499
1478 1500 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1479 1501 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1480 1502
1481 1503 /*
1482 1504 * If segment has anon reserved, reserve more for the new seg.
1483 1505 * For a MAP_NORESERVE segment swresv will be a count of all the
1484 1506 * allocated anon slots; thus we reserve for the child as many slots
1485 1507 * as the parent has allocated. This semantic prevents the child or
1486 1508 * parent from dieing during a copy-on-write fault caused by trying
1487 1509 * to write a shared pre-existing anon page.
1488 1510 */
1489 1511 if ((len = svd->swresv) != 0) {
1490 1512 if (anon_resv(svd->swresv) == 0)
1491 1513 return (ENOMEM);
1492 1514
1493 1515 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1494 1516 seg, len, 0);
1495 1517 }
1496 1518
1497 1519 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1498 1520
1499 1521 newseg->s_ops = &segvn_ops;
1500 1522 newseg->s_data = (void *)newsvd;
1501 1523 newseg->s_szc = seg->s_szc;
1502 1524
1503 1525 newsvd->seg = newseg;
1504 1526 if ((newsvd->vp = svd->vp) != NULL) {
1505 1527 VN_HOLD(svd->vp);
1506 1528 if (svd->type == MAP_SHARED)
1507 1529 lgrp_shm_policy_init(NULL, svd->vp);
1508 1530 }
1509 1531 newsvd->offset = svd->offset;
1510 1532 newsvd->prot = svd->prot;
1511 1533 newsvd->maxprot = svd->maxprot;
1512 1534 newsvd->pageprot = svd->pageprot;
1513 1535 newsvd->type = svd->type;
1514 1536 newsvd->cred = svd->cred;
1515 1537 crhold(newsvd->cred);
1516 1538 newsvd->advice = svd->advice;
1517 1539 newsvd->pageadvice = svd->pageadvice;
1518 1540 newsvd->swresv = svd->swresv;
1519 1541 newsvd->pageswap = svd->pageswap;
1520 1542 newsvd->flags = svd->flags;
1521 1543 newsvd->softlockcnt = 0;
1522 1544 newsvd->softlockcnt_sbase = 0;
1523 1545 newsvd->softlockcnt_send = 0;
1524 1546 newsvd->policy_info = svd->policy_info;
1525 1547 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1526 1548
1527 1549 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1528 1550 /*
1529 1551 * Not attaching to a shared anon object.
1530 1552 */
1531 1553 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1532 1554 svd->tr_state == SEGVN_TR_OFF);
1533 1555 if (svd->tr_state == SEGVN_TR_ON) {
1534 1556 ASSERT(newsvd->vp != NULL && amp != NULL);
1535 1557 newsvd->tr_state = SEGVN_TR_INIT;
1536 1558 } else {
1537 1559 newsvd->tr_state = svd->tr_state;
1538 1560 }
1539 1561 newsvd->amp = NULL;
1540 1562 newsvd->anon_index = 0;
1541 1563 } else {
1542 1564 /* regions for now are only used on pure vnode segments */
1543 1565 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1544 1566 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1545 1567 newsvd->tr_state = SEGVN_TR_OFF;
1546 1568 if (svd->type == MAP_SHARED) {
1547 1569 newsvd->amp = amp;
1548 1570 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1549 1571 amp->refcnt++;
1550 1572 ANON_LOCK_EXIT(&->a_rwlock);
1551 1573 newsvd->anon_index = svd->anon_index;
1552 1574 } else {
1553 1575 int reclaim = 1;
1554 1576
1555 1577 /*
1556 1578 * Allocate and initialize new anon_map structure.
1557 1579 */
1558 1580 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1559 1581 ANON_SLEEP);
1560 1582 newsvd->amp->a_szc = newseg->s_szc;
1561 1583 newsvd->anon_index = 0;
1562 1584
1563 1585 /*
1564 1586 * We don't have to acquire the anon_map lock
1565 1587 * for the new segment (since it belongs to an
1566 1588 * address space that is still not associated
1567 1589 * with any process), or the segment in the old
1568 1590 * address space (since all threads in it
1569 1591 * are stopped while duplicating the address space).
1570 1592 */
1571 1593
1572 1594 /*
1573 1595 * The goal of the following code is to make sure that
1574 1596 * softlocked pages do not end up as copy on write
1575 1597 * pages. This would cause problems where one
1576 1598 * thread writes to a page that is COW and a different
1577 1599 * thread in the same process has softlocked it. The
1578 1600 * softlock lock would move away from this process
1579 1601 * because the write would cause this process to get
1580 1602 * a copy (without the softlock).
1581 1603 *
1582 1604 * The strategy here is to just break the
1583 1605 * sharing on pages that could possibly be
1584 1606 * softlocked.
1585 1607 */
1586 1608 retry:
1587 1609 if (svd->softlockcnt) {
1588 1610 struct anon *ap, *newap;
1589 1611 size_t i;
1590 1612 uint_t vpprot;
1591 1613 page_t *anon_pl[1+1], *pp;
1592 1614 caddr_t addr;
1593 1615 ulong_t old_idx = svd->anon_index;
1594 1616 ulong_t new_idx = 0;
1595 1617
1596 1618 /*
1597 1619 * The softlock count might be non zero
1598 1620 * because some pages are still stuck in the
1599 1621 * cache for lazy reclaim. Flush the cache
1600 1622 * now. This should drop the count to zero.
1601 1623 * [or there is really I/O going on to these
1602 1624 * pages]. Note, we have the writers lock so
1603 1625 * nothing gets inserted during the flush.
1604 1626 */
1605 1627 if (reclaim == 1) {
1606 1628 segvn_purge(seg);
1607 1629 reclaim = 0;
1608 1630 goto retry;
1609 1631 }
1610 1632 i = btopr(seg->s_size);
1611 1633 addr = seg->s_base;
1612 1634 /*
1613 1635 * XXX break cow sharing using PAGESIZE
1614 1636 * pages. They will be relocated into larger
1615 1637 * pages at fault time.
1616 1638 */
1617 1639 while (i-- > 0) {
1618 1640 if (ap = anon_get_ptr(amp->ahp,
1619 1641 old_idx)) {
1620 1642 error = anon_getpage(&ap,
1621 1643 &vpprot, anon_pl, PAGESIZE,
1622 1644 seg, addr, S_READ,
1623 1645 svd->cred);
1624 1646 if (error) {
1625 1647 newsvd->vpage = NULL;
1626 1648 goto out;
1627 1649 }
1628 1650 /*
1629 1651 * prot need not be computed
1630 1652 * below 'cause anon_private is
1631 1653 * going to ignore it anyway
1632 1654 * as child doesn't inherit
1633 1655 * pagelock from parent.
1634 1656 */
1635 1657 prot = svd->pageprot ?
1636 1658 VPP_PROT(
1637 1659 &svd->vpage[
1638 1660 seg_page(seg, addr)])
1639 1661 : svd->prot;
1640 1662 pp = anon_private(&newap,
1641 1663 newseg, addr, prot,
1642 1664 anon_pl[0], 0,
1643 1665 newsvd->cred);
1644 1666 if (pp == NULL) {
1645 1667 /* no mem abort */
1646 1668 newsvd->vpage = NULL;
1647 1669 error = ENOMEM;
1648 1670 goto out;
1649 1671 }
1650 1672 (void) anon_set_ptr(
1651 1673 newsvd->amp->ahp, new_idx,
1652 1674 newap, ANON_SLEEP);
1653 1675 page_unlock(pp);
1654 1676 }
1655 1677 addr += PAGESIZE;
1656 1678 old_idx++;
1657 1679 new_idx++;
1658 1680 }
1659 1681 } else { /* common case */
1660 1682 if (seg->s_szc != 0) {
1661 1683 /*
1662 1684 * If at least one of anon slots of a
1663 1685 * large page exists then make sure
1664 1686 * all anon slots of a large page
1665 1687 * exist to avoid partial cow sharing
1666 1688 * of a large page in the future.
1667 1689 */
1668 1690 anon_dup_fill_holes(amp->ahp,
1669 1691 svd->anon_index, newsvd->amp->ahp,
1670 1692 0, seg->s_size, seg->s_szc,
1671 1693 svd->vp != NULL);
1672 1694 } else {
1673 1695 anon_dup(amp->ahp, svd->anon_index,
1674 1696 newsvd->amp->ahp, 0, seg->s_size);
1675 1697 }
1676 1698
1677 1699 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1678 1700 seg->s_size, PROT_WRITE);
1679 1701 }
1680 1702 }
1681 1703 }
1682 1704 /*
1683 1705 * If necessary, create a vpage structure for the new segment.
1684 1706 * Do not copy any page lock indications.
1685 1707 */
1686 1708 if (svd->vpage != NULL) {
1687 1709 uint_t i;
1688 1710 struct vpage *ovp = svd->vpage;
1689 1711 struct vpage *nvp;
1690 1712
1691 1713 nvp = newsvd->vpage =
1692 1714 kmem_alloc(vpgtob(npages), KM_SLEEP);
1693 1715 for (i = 0; i < npages; i++) {
1694 1716 *nvp = *ovp++;
1695 1717 VPP_CLRPPLOCK(nvp++);
1696 1718 }
1697 1719 } else
1698 1720 newsvd->vpage = NULL;
1699 1721
1700 1722 /* Inform the vnode of the new mapping */
1701 1723 if (newsvd->vp != NULL) {
1702 1724 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1703 1725 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1704 1726 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1705 1727 }
1706 1728 out:
1707 1729 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1708 1730 ASSERT(newsvd->amp == NULL);
1709 1731 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1710 1732 newsvd->rcookie = svd->rcookie;
1711 1733 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1712 1734 }
1713 1735 return (error);
1714 1736 }
1715 1737
1716 1738
1717 1739 /*
1718 1740 * callback function to invoke free_vp_pages() for only those pages actually
1719 1741 * processed by the HAT when a shared region is destroyed.
1720 1742 */
1721 1743 extern int free_pages;
1722 1744
1723 1745 static void
1724 1746 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1725 1747 size_t r_size, void *r_obj, u_offset_t r_objoff)
1726 1748 {
1727 1749 u_offset_t off;
1728 1750 size_t len;
1729 1751 vnode_t *vp = (vnode_t *)r_obj;
1730 1752
1731 1753 ASSERT(eaddr > saddr);
1732 1754 ASSERT(saddr >= r_saddr);
1733 1755 ASSERT(saddr < r_saddr + r_size);
1734 1756 ASSERT(eaddr > r_saddr);
1735 1757 ASSERT(eaddr <= r_saddr + r_size);
1736 1758 ASSERT(vp != NULL);
1737 1759
1738 1760 if (!free_pages) {
1739 1761 return;
1740 1762 }
1741 1763
1742 1764 len = eaddr - saddr;
1743 1765 off = (saddr - r_saddr) + r_objoff;
1744 1766 free_vp_pages(vp, off, len);
1745 1767 }
1746 1768
1747 1769 /*
1748 1770 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1749 1771 * those pages actually processed by the HAT
1750 1772 */
1751 1773 static void
1752 1774 segvn_hat_unload_callback(hat_callback_t *cb)
1753 1775 {
1754 1776 struct seg *seg = cb->hcb_data;
1755 1777 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1756 1778 size_t len;
1757 1779 u_offset_t off;
1758 1780
1759 1781 ASSERT(svd->vp != NULL);
1760 1782 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1761 1783 ASSERT(cb->hcb_start_addr >= seg->s_base);
1762 1784
1763 1785 len = cb->hcb_end_addr - cb->hcb_start_addr;
1764 1786 off = cb->hcb_start_addr - seg->s_base;
1765 1787 free_vp_pages(svd->vp, svd->offset + off, len);
1766 1788 }
1767 1789
1768 1790 /*
1769 1791 * This function determines the number of bytes of swap reserved by
1770 1792 * a segment for which per-page accounting is present. It is used to
1771 1793 * calculate the correct value of a segvn_data's swresv.
1772 1794 */
1773 1795 static size_t
1774 1796 segvn_count_swap_by_vpages(struct seg *seg)
1775 1797 {
1776 1798 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1777 1799 struct vpage *vp, *evp;
1778 1800 size_t nswappages = 0;
1779 1801
1780 1802 ASSERT(svd->pageswap);
1781 1803 ASSERT(svd->vpage != NULL);
1782 1804
1783 1805 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1784 1806
1785 1807 for (vp = svd->vpage; vp < evp; vp++) {
1786 1808 if (VPP_ISSWAPRES(vp))
1787 1809 nswappages++;
1788 1810 }
1789 1811
1790 1812 return (nswappages << PAGESHIFT);
1791 1813 }
1792 1814
1793 1815 static int
1794 1816 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1795 1817 {
1796 1818 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1797 1819 struct segvn_data *nsvd;
1798 1820 struct seg *nseg;
1799 1821 struct anon_map *amp;
1800 1822 pgcnt_t opages; /* old segment size in pages */
1801 1823 pgcnt_t npages; /* new segment size in pages */
1802 1824 pgcnt_t dpages; /* pages being deleted (unmapped) */
1803 1825 hat_callback_t callback; /* used for free_vp_pages() */
1804 1826 hat_callback_t *cbp = NULL;
1805 1827 caddr_t nbase;
1806 1828 size_t nsize;
1807 1829 size_t oswresv;
1808 1830 int reclaim = 1;
1809 1831
1810 1832 /*
1811 1833 * We don't need any segment level locks for "segvn" data
1812 1834 * since the address space is "write" locked.
1813 1835 */
1814 1836 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1815 1837
1816 1838 /*
1817 1839 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1818 1840 * softlockcnt is protected from change by the as write lock.
1819 1841 */
1820 1842 retry:
1821 1843 if (svd->softlockcnt > 0) {
1822 1844 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1823 1845
1824 1846 /*
1825 1847 * If this is shared segment non 0 softlockcnt
1826 1848 * means locked pages are still in use.
1827 1849 */
1828 1850 if (svd->type == MAP_SHARED) {
1829 1851 return (EAGAIN);
1830 1852 }
1831 1853
1832 1854 /*
1833 1855 * since we do have the writers lock nobody can fill
1834 1856 * the cache during the purge. The flush either succeeds
1835 1857 * or we still have pending I/Os.
1836 1858 */
1837 1859 if (reclaim == 1) {
1838 1860 segvn_purge(seg);
1839 1861 reclaim = 0;
1840 1862 goto retry;
1841 1863 }
1842 1864 return (EAGAIN);
1843 1865 }
1844 1866
1845 1867 /*
1846 1868 * Check for bad sizes
1847 1869 */
1848 1870 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1849 1871 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1850 1872 panic("segvn_unmap");
1851 1873 /*NOTREACHED*/
1852 1874 }
1853 1875
1854 1876 if (seg->s_szc != 0) {
1855 1877 size_t pgsz = page_get_pagesize(seg->s_szc);
1856 1878 int err;
1857 1879 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1858 1880 ASSERT(seg->s_base != addr || seg->s_size != len);
1859 1881 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1860 1882 ASSERT(svd->amp == NULL);
1861 1883 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1862 1884 hat_leave_region(seg->s_as->a_hat,
1863 1885 svd->rcookie, HAT_REGION_TEXT);
1864 1886 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1865 1887 /*
1866 1888 * could pass a flag to segvn_demote_range()
1867 1889 * below to tell it not to do any unloads but
1868 1890 * this case is rare enough to not bother for
1869 1891 * now.
1870 1892 */
1871 1893 } else if (svd->tr_state == SEGVN_TR_INIT) {
1872 1894 svd->tr_state = SEGVN_TR_OFF;
1873 1895 } else if (svd->tr_state == SEGVN_TR_ON) {
1874 1896 ASSERT(svd->amp != NULL);
1875 1897 segvn_textunrepl(seg, 1);
1876 1898 ASSERT(svd->amp == NULL);
1877 1899 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1878 1900 }
1879 1901 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1880 1902 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1881 1903 if (err == 0) {
1882 1904 return (IE_RETRY);
1883 1905 }
1884 1906 return (err);
1885 1907 }
1886 1908 }
1887 1909
1888 1910 /* Inform the vnode of the unmapping. */
1889 1911 if (svd->vp) {
1890 1912 int error;
1891 1913
1892 1914 error = VOP_DELMAP(svd->vp,
1893 1915 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1894 1916 seg->s_as, addr, len, svd->prot, svd->maxprot,
1895 1917 svd->type, svd->cred, NULL);
1896 1918
1897 1919 if (error == EAGAIN)
1898 1920 return (error);
1899 1921 }
1900 1922
1901 1923 /*
1902 1924 * Remove any page locks set through this mapping.
1903 1925 * If text replication is not off no page locks could have been
1904 1926 * established via this mapping.
1905 1927 */
1906 1928 if (svd->tr_state == SEGVN_TR_OFF) {
1907 1929 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1908 1930 }
1909 1931
1910 1932 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1911 1933 ASSERT(svd->amp == NULL);
1912 1934 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1913 1935 ASSERT(svd->type == MAP_PRIVATE);
1914 1936 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1915 1937 HAT_REGION_TEXT);
1916 1938 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1917 1939 } else if (svd->tr_state == SEGVN_TR_ON) {
1918 1940 ASSERT(svd->amp != NULL);
1919 1941 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1920 1942 segvn_textunrepl(seg, 1);
1921 1943 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1922 1944 } else {
1923 1945 if (svd->tr_state != SEGVN_TR_OFF) {
1924 1946 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1925 1947 svd->tr_state = SEGVN_TR_OFF;
1926 1948 }
1927 1949 /*
1928 1950 * Unload any hardware translations in the range to be taken
1929 1951 * out. Use a callback to invoke free_vp_pages() effectively.
1930 1952 */
1931 1953 if (svd->vp != NULL && free_pages != 0) {
1932 1954 callback.hcb_data = seg;
1933 1955 callback.hcb_function = segvn_hat_unload_callback;
1934 1956 cbp = &callback;
1935 1957 }
1936 1958 hat_unload_callback(seg->s_as->a_hat, addr, len,
1937 1959 HAT_UNLOAD_UNMAP, cbp);
1938 1960
1939 1961 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1940 1962 (svd->vp->v_flag & VVMEXEC) &&
1941 1963 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
1942 1964 segvn_inval_trcache(svd->vp);
1943 1965 }
1944 1966 }
1945 1967
1946 1968 /*
1947 1969 * Check for entire segment
1948 1970 */
1949 1971 if (addr == seg->s_base && len == seg->s_size) {
1950 1972 seg_free(seg);
1951 1973 return (0);
1952 1974 }
1953 1975
1954 1976 opages = seg_pages(seg);
1955 1977 dpages = btop(len);
1956 1978 npages = opages - dpages;
1957 1979 amp = svd->amp;
1958 1980 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
1959 1981
1960 1982 /*
1961 1983 * Check for beginning of segment
1962 1984 */
1963 1985 if (addr == seg->s_base) {
1964 1986 if (svd->vpage != NULL) {
1965 1987 size_t nbytes;
1966 1988 struct vpage *ovpage;
1967 1989
1968 1990 ovpage = svd->vpage; /* keep pointer to vpage */
1969 1991
1970 1992 nbytes = vpgtob(npages);
1971 1993 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
1972 1994 bcopy(&ovpage[dpages], svd->vpage, nbytes);
1973 1995
1974 1996 /* free up old vpage */
1975 1997 kmem_free(ovpage, vpgtob(opages));
1976 1998 }
1977 1999 if (amp != NULL) {
1978 2000 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1979 2001 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
1980 2002 /*
1981 2003 * Shared anon map is no longer in use. Before
1982 2004 * freeing its pages purge all entries from
1983 2005 * pcache that belong to this amp.
1984 2006 */
1985 2007 if (svd->type == MAP_SHARED) {
1986 2008 ASSERT(amp->refcnt == 1);
1987 2009 ASSERT(svd->softlockcnt == 0);
1988 2010 anonmap_purge(amp);
1989 2011 }
1990 2012 /*
1991 2013 * Free up now unused parts of anon_map array.
1992 2014 */
1993 2015 if (amp->a_szc == seg->s_szc) {
1994 2016 if (seg->s_szc != 0) {
1995 2017 anon_free_pages(amp->ahp,
1996 2018 svd->anon_index, len,
1997 2019 seg->s_szc);
1998 2020 } else {
1999 2021 anon_free(amp->ahp,
2000 2022 svd->anon_index,
2001 2023 len);
2002 2024 }
2003 2025 } else {
2004 2026 ASSERT(svd->type == MAP_SHARED);
2005 2027 ASSERT(amp->a_szc > seg->s_szc);
2006 2028 anon_shmap_free_pages(amp,
2007 2029 svd->anon_index, len);
2008 2030 }
2009 2031
2010 2032 /*
2011 2033 * Unreserve swap space for the
2012 2034 * unmapped chunk of this segment in
2013 2035 * case it's MAP_SHARED
2014 2036 */
2015 2037 if (svd->type == MAP_SHARED) {
2016 2038 anon_unresv_zone(len,
2017 2039 seg->s_as->a_proc->p_zone);
2018 2040 amp->swresv -= len;
2019 2041 }
2020 2042 }
2021 2043 ANON_LOCK_EXIT(&->a_rwlock);
2022 2044 svd->anon_index += dpages;
2023 2045 }
2024 2046 if (svd->vp != NULL)
2025 2047 svd->offset += len;
2026 2048
2027 2049 seg->s_base += len;
2028 2050 seg->s_size -= len;
2029 2051
2030 2052 if (svd->swresv) {
2031 2053 if (svd->flags & MAP_NORESERVE) {
2032 2054 ASSERT(amp);
2033 2055 oswresv = svd->swresv;
2034 2056
2035 2057 svd->swresv = ptob(anon_pages(amp->ahp,
2036 2058 svd->anon_index, npages));
2037 2059 anon_unresv_zone(oswresv - svd->swresv,
2038 2060 seg->s_as->a_proc->p_zone);
2039 2061 if (SEG_IS_PARTIAL_RESV(seg))
2040 2062 seg->s_as->a_resvsize -= oswresv -
2041 2063 svd->swresv;
2042 2064 } else {
2043 2065 size_t unlen;
2044 2066
2045 2067 if (svd->pageswap) {
2046 2068 oswresv = svd->swresv;
2047 2069 svd->swresv =
2048 2070 segvn_count_swap_by_vpages(seg);
2049 2071 ASSERT(oswresv >= svd->swresv);
2050 2072 unlen = oswresv - svd->swresv;
2051 2073 } else {
2052 2074 svd->swresv -= len;
2053 2075 ASSERT(svd->swresv == seg->s_size);
2054 2076 unlen = len;
2055 2077 }
2056 2078 anon_unresv_zone(unlen,
2057 2079 seg->s_as->a_proc->p_zone);
2058 2080 }
2059 2081 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2060 2082 seg, len, 0);
2061 2083 }
2062 2084
2063 2085 return (0);
2064 2086 }
2065 2087
2066 2088 /*
2067 2089 * Check for end of segment
2068 2090 */
2069 2091 if (addr + len == seg->s_base + seg->s_size) {
2070 2092 if (svd->vpage != NULL) {
2071 2093 size_t nbytes;
2072 2094 struct vpage *ovpage;
2073 2095
2074 2096 ovpage = svd->vpage; /* keep pointer to vpage */
2075 2097
2076 2098 nbytes = vpgtob(npages);
2077 2099 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2078 2100 bcopy(ovpage, svd->vpage, nbytes);
2079 2101
2080 2102 /* free up old vpage */
2081 2103 kmem_free(ovpage, vpgtob(opages));
2082 2104
2083 2105 }
2084 2106 if (amp != NULL) {
2085 2107 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2086 2108 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2087 2109 /*
2088 2110 * Free up now unused parts of anon_map array.
2089 2111 */
2090 2112 ulong_t an_idx = svd->anon_index + npages;
2091 2113
2092 2114 /*
2093 2115 * Shared anon map is no longer in use. Before
2094 2116 * freeing its pages purge all entries from
2095 2117 * pcache that belong to this amp.
2096 2118 */
2097 2119 if (svd->type == MAP_SHARED) {
2098 2120 ASSERT(amp->refcnt == 1);
2099 2121 ASSERT(svd->softlockcnt == 0);
2100 2122 anonmap_purge(amp);
2101 2123 }
2102 2124
2103 2125 if (amp->a_szc == seg->s_szc) {
2104 2126 if (seg->s_szc != 0) {
2105 2127 anon_free_pages(amp->ahp,
2106 2128 an_idx, len,
2107 2129 seg->s_szc);
2108 2130 } else {
2109 2131 anon_free(amp->ahp, an_idx,
2110 2132 len);
2111 2133 }
2112 2134 } else {
2113 2135 ASSERT(svd->type == MAP_SHARED);
2114 2136 ASSERT(amp->a_szc > seg->s_szc);
2115 2137 anon_shmap_free_pages(amp,
2116 2138 an_idx, len);
2117 2139 }
2118 2140
2119 2141 /*
2120 2142 * Unreserve swap space for the
2121 2143 * unmapped chunk of this segment in
2122 2144 * case it's MAP_SHARED
2123 2145 */
2124 2146 if (svd->type == MAP_SHARED) {
2125 2147 anon_unresv_zone(len,
2126 2148 seg->s_as->a_proc->p_zone);
2127 2149 amp->swresv -= len;
2128 2150 }
2129 2151 }
2130 2152 ANON_LOCK_EXIT(&->a_rwlock);
2131 2153 }
2132 2154
2133 2155 seg->s_size -= len;
2134 2156
2135 2157 if (svd->swresv) {
2136 2158 if (svd->flags & MAP_NORESERVE) {
2137 2159 ASSERT(amp);
2138 2160 oswresv = svd->swresv;
2139 2161 svd->swresv = ptob(anon_pages(amp->ahp,
2140 2162 svd->anon_index, npages));
2141 2163 anon_unresv_zone(oswresv - svd->swresv,
2142 2164 seg->s_as->a_proc->p_zone);
2143 2165 if (SEG_IS_PARTIAL_RESV(seg))
2144 2166 seg->s_as->a_resvsize -= oswresv -
2145 2167 svd->swresv;
2146 2168 } else {
2147 2169 size_t unlen;
2148 2170
2149 2171 if (svd->pageswap) {
2150 2172 oswresv = svd->swresv;
2151 2173 svd->swresv =
2152 2174 segvn_count_swap_by_vpages(seg);
2153 2175 ASSERT(oswresv >= svd->swresv);
2154 2176 unlen = oswresv - svd->swresv;
2155 2177 } else {
2156 2178 svd->swresv -= len;
2157 2179 ASSERT(svd->swresv == seg->s_size);
2158 2180 unlen = len;
2159 2181 }
2160 2182 anon_unresv_zone(unlen,
2161 2183 seg->s_as->a_proc->p_zone);
2162 2184 }
2163 2185 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2164 2186 "anon proc:%p %lu %u", seg, len, 0);
2165 2187 }
2166 2188
2167 2189 return (0);
2168 2190 }
2169 2191
2170 2192 /*
2171 2193 * The section to go is in the middle of the segment,
2172 2194 * have to make it into two segments. nseg is made for
2173 2195 * the high end while seg is cut down at the low end.
2174 2196 */
2175 2197 nbase = addr + len; /* new seg base */
2176 2198 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2177 2199 seg->s_size = addr - seg->s_base; /* shrink old seg */
2178 2200 nseg = seg_alloc(seg->s_as, nbase, nsize);
2179 2201 if (nseg == NULL) {
2180 2202 panic("segvn_unmap seg_alloc");
2181 2203 /*NOTREACHED*/
2182 2204 }
2183 2205 nseg->s_ops = seg->s_ops;
2184 2206 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2185 2207 nseg->s_data = (void *)nsvd;
2186 2208 nseg->s_szc = seg->s_szc;
2187 2209 *nsvd = *svd;
2188 2210 nsvd->seg = nseg;
2189 2211 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2190 2212 nsvd->swresv = 0;
2191 2213 nsvd->softlockcnt = 0;
2192 2214 nsvd->softlockcnt_sbase = 0;
2193 2215 nsvd->softlockcnt_send = 0;
2194 2216 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2195 2217
2196 2218 if (svd->vp != NULL) {
2197 2219 VN_HOLD(nsvd->vp);
2198 2220 if (nsvd->type == MAP_SHARED)
2199 2221 lgrp_shm_policy_init(NULL, nsvd->vp);
2200 2222 }
2201 2223 crhold(svd->cred);
2202 2224
2203 2225 if (svd->vpage == NULL) {
2204 2226 nsvd->vpage = NULL;
2205 2227 } else {
2206 2228 /* need to split vpage into two arrays */
2207 2229 size_t nbytes;
2208 2230 struct vpage *ovpage;
2209 2231
2210 2232 ovpage = svd->vpage; /* keep pointer to vpage */
2211 2233
2212 2234 npages = seg_pages(seg); /* seg has shrunk */
2213 2235 nbytes = vpgtob(npages);
2214 2236 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2215 2237
2216 2238 bcopy(ovpage, svd->vpage, nbytes);
2217 2239
2218 2240 npages = seg_pages(nseg);
2219 2241 nbytes = vpgtob(npages);
2220 2242 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2221 2243
2222 2244 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2223 2245
2224 2246 /* free up old vpage */
2225 2247 kmem_free(ovpage, vpgtob(opages));
2226 2248 }
2227 2249
2228 2250 if (amp == NULL) {
2229 2251 nsvd->amp = NULL;
2230 2252 nsvd->anon_index = 0;
2231 2253 } else {
2232 2254 /*
2233 2255 * Need to create a new anon map for the new segment.
2234 2256 * We'll also allocate a new smaller array for the old
2235 2257 * smaller segment to save space.
2236 2258 */
2237 2259 opages = btop((uintptr_t)(addr - seg->s_base));
2238 2260 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2239 2261 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2240 2262 /*
2241 2263 * Free up now unused parts of anon_map array.
2242 2264 */
2243 2265 ulong_t an_idx = svd->anon_index + opages;
2244 2266
2245 2267 /*
2246 2268 * Shared anon map is no longer in use. Before
2247 2269 * freeing its pages purge all entries from
2248 2270 * pcache that belong to this amp.
2249 2271 */
2250 2272 if (svd->type == MAP_SHARED) {
2251 2273 ASSERT(amp->refcnt == 1);
2252 2274 ASSERT(svd->softlockcnt == 0);
2253 2275 anonmap_purge(amp);
2254 2276 }
2255 2277
2256 2278 if (amp->a_szc == seg->s_szc) {
2257 2279 if (seg->s_szc != 0) {
2258 2280 anon_free_pages(amp->ahp, an_idx, len,
2259 2281 seg->s_szc);
2260 2282 } else {
2261 2283 anon_free(amp->ahp, an_idx,
2262 2284 len);
2263 2285 }
2264 2286 } else {
2265 2287 ASSERT(svd->type == MAP_SHARED);
2266 2288 ASSERT(amp->a_szc > seg->s_szc);
2267 2289 anon_shmap_free_pages(amp, an_idx, len);
2268 2290 }
2269 2291
2270 2292 /*
2271 2293 * Unreserve swap space for the
2272 2294 * unmapped chunk of this segment in
2273 2295 * case it's MAP_SHARED
2274 2296 */
2275 2297 if (svd->type == MAP_SHARED) {
2276 2298 anon_unresv_zone(len,
2277 2299 seg->s_as->a_proc->p_zone);
2278 2300 amp->swresv -= len;
2279 2301 }
2280 2302 }
2281 2303 nsvd->anon_index = svd->anon_index +
2282 2304 btop((uintptr_t)(nseg->s_base - seg->s_base));
2283 2305 if (svd->type == MAP_SHARED) {
2284 2306 amp->refcnt++;
2285 2307 nsvd->amp = amp;
2286 2308 } else {
2287 2309 struct anon_map *namp;
2288 2310 struct anon_hdr *nahp;
2289 2311
2290 2312 ASSERT(svd->type == MAP_PRIVATE);
2291 2313 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2292 2314 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2293 2315 namp->a_szc = seg->s_szc;
2294 2316 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2295 2317 0, btop(seg->s_size), ANON_SLEEP);
2296 2318 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2297 2319 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2298 2320 anon_release(amp->ahp, btop(amp->size));
2299 2321 svd->anon_index = 0;
2300 2322 nsvd->anon_index = 0;
2301 2323 amp->ahp = nahp;
2302 2324 amp->size = seg->s_size;
2303 2325 nsvd->amp = namp;
2304 2326 }
2305 2327 ANON_LOCK_EXIT(&->a_rwlock);
2306 2328 }
2307 2329 if (svd->swresv) {
2308 2330 if (svd->flags & MAP_NORESERVE) {
2309 2331 ASSERT(amp);
2310 2332 oswresv = svd->swresv;
2311 2333 svd->swresv = ptob(anon_pages(amp->ahp,
2312 2334 svd->anon_index, btop(seg->s_size)));
2313 2335 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2314 2336 nsvd->anon_index, btop(nseg->s_size)));
2315 2337 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2316 2338 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2317 2339 seg->s_as->a_proc->p_zone);
2318 2340 if (SEG_IS_PARTIAL_RESV(seg))
2319 2341 seg->s_as->a_resvsize -= oswresv -
2320 2342 (svd->swresv + nsvd->swresv);
2321 2343 } else {
2322 2344 size_t unlen;
2323 2345
2324 2346 if (svd->pageswap) {
2325 2347 oswresv = svd->swresv;
2326 2348 svd->swresv = segvn_count_swap_by_vpages(seg);
2327 2349 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2328 2350 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2329 2351 unlen = oswresv - (svd->swresv + nsvd->swresv);
2330 2352 } else {
2331 2353 if (seg->s_size + nseg->s_size + len !=
2332 2354 svd->swresv) {
2333 2355 panic("segvn_unmap: cannot split "
2334 2356 "swap reservation");
2335 2357 /*NOTREACHED*/
2336 2358 }
2337 2359 svd->swresv = seg->s_size;
2338 2360 nsvd->swresv = nseg->s_size;
2339 2361 unlen = len;
2340 2362 }
2341 2363 anon_unresv_zone(unlen,
2342 2364 seg->s_as->a_proc->p_zone);
2343 2365 }
2344 2366 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2345 2367 seg, len, 0);
2346 2368 }
2347 2369
2348 2370 return (0); /* I'm glad that's all over with! */
2349 2371 }
2350 2372
2351 2373 static void
2352 2374 segvn_free(struct seg *seg)
2353 2375 {
2354 2376 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2355 2377 pgcnt_t npages = seg_pages(seg);
2356 2378 struct anon_map *amp;
2357 2379 size_t len;
2358 2380
2359 2381 /*
2360 2382 * We don't need any segment level locks for "segvn" data
2361 2383 * since the address space is "write" locked.
2362 2384 */
2363 2385 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2364 2386 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2365 2387
2366 2388 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2367 2389
2368 2390 /*
2369 2391 * Be sure to unlock pages. XXX Why do things get free'ed instead
2370 2392 * of unmapped? XXX
2371 2393 */
2372 2394 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2373 2395 0, MC_UNLOCK, NULL, 0);
2374 2396
2375 2397 /*
2376 2398 * Deallocate the vpage and anon pointers if necessary and possible.
2377 2399 */
2378 2400 if (svd->vpage != NULL) {
2379 2401 kmem_free(svd->vpage, vpgtob(npages));
2380 2402 svd->vpage = NULL;
2381 2403 }
2382 2404 if ((amp = svd->amp) != NULL) {
2383 2405 /*
2384 2406 * If there are no more references to this anon_map
2385 2407 * structure, then deallocate the structure after freeing
2386 2408 * up all the anon slot pointers that we can.
2387 2409 */
2388 2410 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2389 2411 ASSERT(amp->a_szc >= seg->s_szc);
2390 2412 if (--amp->refcnt == 0) {
2391 2413 if (svd->type == MAP_PRIVATE) {
2392 2414 /*
2393 2415 * Private - we only need to anon_free
2394 2416 * the part that this segment refers to.
2395 2417 */
2396 2418 if (seg->s_szc != 0) {
2397 2419 anon_free_pages(amp->ahp,
2398 2420 svd->anon_index, seg->s_size,
2399 2421 seg->s_szc);
2400 2422 } else {
2401 2423 anon_free(amp->ahp, svd->anon_index,
2402 2424 seg->s_size);
2403 2425 }
2404 2426 } else {
2405 2427
2406 2428 /*
2407 2429 * Shared anon map is no longer in use. Before
2408 2430 * freeing its pages purge all entries from
2409 2431 * pcache that belong to this amp.
2410 2432 */
2411 2433 ASSERT(svd->softlockcnt == 0);
2412 2434 anonmap_purge(amp);
2413 2435
2414 2436 /*
2415 2437 * Shared - anon_free the entire
2416 2438 * anon_map's worth of stuff and
2417 2439 * release any swap reservation.
2418 2440 */
2419 2441 if (amp->a_szc != 0) {
2420 2442 anon_shmap_free_pages(amp, 0,
2421 2443 amp->size);
2422 2444 } else {
2423 2445 anon_free(amp->ahp, 0, amp->size);
2424 2446 }
2425 2447 if ((len = amp->swresv) != 0) {
2426 2448 anon_unresv_zone(len,
2427 2449 seg->s_as->a_proc->p_zone);
2428 2450 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2429 2451 "anon proc:%p %lu %u", seg, len, 0);
2430 2452 }
2431 2453 }
2432 2454 svd->amp = NULL;
2433 2455 ANON_LOCK_EXIT(&->a_rwlock);
2434 2456 anonmap_free(amp);
2435 2457 } else if (svd->type == MAP_PRIVATE) {
2436 2458 /*
2437 2459 * We had a private mapping which still has
2438 2460 * a held anon_map so just free up all the
2439 2461 * anon slot pointers that we were using.
2440 2462 */
2441 2463 if (seg->s_szc != 0) {
2442 2464 anon_free_pages(amp->ahp, svd->anon_index,
2443 2465 seg->s_size, seg->s_szc);
2444 2466 } else {
2445 2467 anon_free(amp->ahp, svd->anon_index,
2446 2468 seg->s_size);
2447 2469 }
2448 2470 ANON_LOCK_EXIT(&->a_rwlock);
2449 2471 } else {
2450 2472 ANON_LOCK_EXIT(&->a_rwlock);
2451 2473 }
2452 2474 }
2453 2475
2454 2476 /*
2455 2477 * Release swap reservation.
2456 2478 */
2457 2479 if ((len = svd->swresv) != 0) {
2458 2480 anon_unresv_zone(svd->swresv,
2459 2481 seg->s_as->a_proc->p_zone);
2460 2482 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2461 2483 seg, len, 0);
2462 2484 if (SEG_IS_PARTIAL_RESV(seg))
2463 2485 seg->s_as->a_resvsize -= svd->swresv;
2464 2486 svd->swresv = 0;
2465 2487 }
2466 2488 /*
2467 2489 * Release claim on vnode, credentials, and finally free the
2468 2490 * private data.
2469 2491 */
2470 2492 if (svd->vp != NULL) {
2471 2493 if (svd->type == MAP_SHARED)
2472 2494 lgrp_shm_policy_fini(NULL, svd->vp);
2473 2495 VN_RELE(svd->vp);
2474 2496 svd->vp = NULL;
2475 2497 }
2476 2498 crfree(svd->cred);
2477 2499 svd->pageprot = 0;
2478 2500 svd->pageadvice = 0;
2479 2501 svd->pageswap = 0;
2480 2502 svd->cred = NULL;
2481 2503
2482 2504 /*
2483 2505 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2484 2506 * still working with this segment without holding as lock (in case
2485 2507 * it's called by pcache async thread).
2486 2508 */
2487 2509 ASSERT(svd->softlockcnt == 0);
2488 2510 mutex_enter(&svd->segfree_syncmtx);
2489 2511 mutex_exit(&svd->segfree_syncmtx);
2490 2512
2491 2513 seg->s_data = NULL;
2492 2514 kmem_cache_free(segvn_cache, svd);
2493 2515 }
2494 2516
2495 2517 /*
2496 2518 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2497 2519 * already been F_SOFTLOCK'ed.
2498 2520 * Caller must always match addr and len of a softunlock with a previous
2499 2521 * softlock with exactly the same addr and len.
2500 2522 */
2501 2523 static void
2502 2524 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2503 2525 {
2504 2526 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2505 2527 page_t *pp;
2506 2528 caddr_t adr;
2507 2529 struct vnode *vp;
2508 2530 u_offset_t offset;
2509 2531 ulong_t anon_index;
2510 2532 struct anon_map *amp;
2511 2533 struct anon *ap = NULL;
2512 2534
2513 2535 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2514 2536 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2515 2537
2516 2538 if ((amp = svd->amp) != NULL)
2517 2539 anon_index = svd->anon_index + seg_page(seg, addr);
2518 2540
2519 2541 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2520 2542 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2521 2543 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2522 2544 } else {
2523 2545 hat_unlock(seg->s_as->a_hat, addr, len);
2524 2546 }
2525 2547 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2526 2548 if (amp != NULL) {
2527 2549 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2528 2550 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2529 2551 != NULL) {
2530 2552 swap_xlate(ap, &vp, &offset);
2531 2553 } else {
2532 2554 vp = svd->vp;
2533 2555 offset = svd->offset +
2534 2556 (uintptr_t)(adr - seg->s_base);
2535 2557 }
2536 2558 ANON_LOCK_EXIT(&->a_rwlock);
2537 2559 } else {
2538 2560 vp = svd->vp;
2539 2561 offset = svd->offset +
2540 2562 (uintptr_t)(adr - seg->s_base);
2541 2563 }
2542 2564
2543 2565 /*
2544 2566 * Use page_find() instead of page_lookup() to
2545 2567 * find the page since we know that it is locked.
2546 2568 */
2547 2569 pp = page_find(vp, offset);
2548 2570 if (pp == NULL) {
2549 2571 panic(
2550 2572 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2551 2573 (void *)adr, (void *)ap, (void *)vp, offset);
2552 2574 /*NOTREACHED*/
2553 2575 }
2554 2576
2555 2577 if (rw == S_WRITE) {
2556 2578 hat_setrefmod(pp);
2557 2579 if (seg->s_as->a_vbits)
2558 2580 hat_setstat(seg->s_as, adr, PAGESIZE,
2559 2581 P_REF | P_MOD);
2560 2582 } else if (rw != S_OTHER) {
2561 2583 hat_setref(pp);
2562 2584 if (seg->s_as->a_vbits)
2563 2585 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2564 2586 }
2565 2587 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2566 2588 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2567 2589 page_unlock(pp);
2568 2590 }
2569 2591 ASSERT(svd->softlockcnt >= btop(len));
2570 2592 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2571 2593 /*
2572 2594 * All SOFTLOCKS are gone. Wakeup any waiting
2573 2595 * unmappers so they can try again to unmap.
2574 2596 * Check for waiters first without the mutex
2575 2597 * held so we don't always grab the mutex on
2576 2598 * softunlocks.
2577 2599 */
2578 2600 if (AS_ISUNMAPWAIT(seg->s_as)) {
2579 2601 mutex_enter(&seg->s_as->a_contents);
2580 2602 if (AS_ISUNMAPWAIT(seg->s_as)) {
2581 2603 AS_CLRUNMAPWAIT(seg->s_as);
2582 2604 cv_broadcast(&seg->s_as->a_cv);
2583 2605 }
2584 2606 mutex_exit(&seg->s_as->a_contents);
2585 2607 }
2586 2608 }
2587 2609 }
2588 2610
2589 2611 #define PAGE_HANDLED ((page_t *)-1)
2590 2612
2591 2613 /*
2592 2614 * Release all the pages in the NULL terminated ppp list
2593 2615 * which haven't already been converted to PAGE_HANDLED.
2594 2616 */
2595 2617 static void
2596 2618 segvn_pagelist_rele(page_t **ppp)
2597 2619 {
2598 2620 for (; *ppp != NULL; ppp++) {
2599 2621 if (*ppp != PAGE_HANDLED)
2600 2622 page_unlock(*ppp);
2601 2623 }
2602 2624 }
2603 2625
2604 2626 static int stealcow = 1;
2605 2627
2606 2628 /*
2607 2629 * Workaround for viking chip bug. See bug id 1220902.
2608 2630 * To fix this down in pagefault() would require importing so
2609 2631 * much as and segvn code as to be unmaintainable.
2610 2632 */
2611 2633 int enable_mbit_wa = 0;
2612 2634
2613 2635 /*
2614 2636 * Handles all the dirty work of getting the right
2615 2637 * anonymous pages and loading up the translations.
2616 2638 * This routine is called only from segvn_fault()
2617 2639 * when looping over the range of addresses requested.
2618 2640 *
2619 2641 * The basic algorithm here is:
2620 2642 * If this is an anon_zero case
2621 2643 * Call anon_zero to allocate page
2622 2644 * Load up translation
2623 2645 * Return
2624 2646 * endif
2625 2647 * If this is an anon page
2626 2648 * Use anon_getpage to get the page
2627 2649 * else
2628 2650 * Find page in pl[] list passed in
2629 2651 * endif
2630 2652 * If not a cow
2631 2653 * Load up the translation to the page
2632 2654 * return
2633 2655 * endif
2634 2656 * Call anon_private to handle cow
2635 2657 * Load up (writable) translation to new page
2636 2658 */
2637 2659 static faultcode_t
2638 2660 segvn_faultpage(
2639 2661 struct hat *hat, /* the hat to use for mapping */
2640 2662 struct seg *seg, /* seg_vn of interest */
2641 2663 caddr_t addr, /* address in as */
2642 2664 u_offset_t off, /* offset in vp */
2643 2665 struct vpage *vpage, /* pointer to vpage for vp, off */
2644 2666 page_t *pl[], /* object source page pointer */
2645 2667 uint_t vpprot, /* access allowed to object pages */
2646 2668 enum fault_type type, /* type of fault */
2647 2669 enum seg_rw rw, /* type of access at fault */
2648 2670 int brkcow) /* we may need to break cow */
2649 2671 {
2650 2672 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2651 2673 page_t *pp, **ppp;
2652 2674 uint_t pageflags = 0;
2653 2675 page_t *anon_pl[1 + 1];
2654 2676 page_t *opp = NULL; /* original page */
2655 2677 uint_t prot;
2656 2678 int err;
2657 2679 int cow;
2658 2680 int claim;
2659 2681 int steal = 0;
2660 2682 ulong_t anon_index;
2661 2683 struct anon *ap, *oldap;
2662 2684 struct anon_map *amp;
2663 2685 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2664 2686 int anon_lock = 0;
2665 2687 anon_sync_obj_t cookie;
2666 2688
2667 2689 if (svd->flags & MAP_TEXT) {
2668 2690 hat_flag |= HAT_LOAD_TEXT;
2669 2691 }
2670 2692
2671 2693 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2672 2694 ASSERT(seg->s_szc == 0);
2673 2695 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2674 2696
2675 2697 /*
2676 2698 * Initialize protection value for this page.
2677 2699 * If we have per page protection values check it now.
2678 2700 */
2679 2701 if (svd->pageprot) {
2680 2702 uint_t protchk;
2681 2703
2682 2704 switch (rw) {
2683 2705 case S_READ:
2684 2706 protchk = PROT_READ;
2685 2707 break;
2686 2708 case S_WRITE:
2687 2709 protchk = PROT_WRITE;
2688 2710 break;
2689 2711 case S_EXEC:
2690 2712 protchk = PROT_EXEC;
2691 2713 break;
2692 2714 case S_OTHER:
2693 2715 default:
2694 2716 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2695 2717 break;
2696 2718 }
2697 2719
2698 2720 prot = VPP_PROT(vpage);
2699 2721 if ((prot & protchk) == 0)
2700 2722 return (FC_PROT); /* illegal access type */
2701 2723 } else {
2702 2724 prot = svd->prot;
2703 2725 }
2704 2726
2705 2727 if (type == F_SOFTLOCK) {
2706 2728 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2707 2729 }
2708 2730
2709 2731 /*
2710 2732 * Always acquire the anon array lock to prevent 2 threads from
2711 2733 * allocating separate anon slots for the same "addr".
2712 2734 */
2713 2735
2714 2736 if ((amp = svd->amp) != NULL) {
2715 2737 ASSERT(RW_READ_HELD(&->a_rwlock));
2716 2738 anon_index = svd->anon_index + seg_page(seg, addr);
2717 2739 anon_array_enter(amp, anon_index, &cookie);
2718 2740 anon_lock = 1;
2719 2741 }
2720 2742
2721 2743 if (svd->vp == NULL && amp != NULL) {
2722 2744 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2723 2745 /*
2724 2746 * Allocate a (normally) writable anonymous page of
2725 2747 * zeroes. If no advance reservations, reserve now.
2726 2748 */
2727 2749 if (svd->flags & MAP_NORESERVE) {
2728 2750 if (anon_resv_zone(ptob(1),
2729 2751 seg->s_as->a_proc->p_zone)) {
2730 2752 atomic_add_long(&svd->swresv, ptob(1));
2731 2753 atomic_add_long(&seg->s_as->a_resvsize,
2732 2754 ptob(1));
2733 2755 } else {
2734 2756 err = ENOMEM;
2735 2757 goto out;
2736 2758 }
2737 2759 }
2738 2760 if ((pp = anon_zero(seg, addr, &ap,
2739 2761 svd->cred)) == NULL) {
2740 2762 err = ENOMEM;
2741 2763 goto out; /* out of swap space */
2742 2764 }
2743 2765 /*
2744 2766 * Re-acquire the anon_map lock and
2745 2767 * initialize the anon array entry.
2746 2768 */
2747 2769 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2748 2770 ANON_SLEEP);
2749 2771
2750 2772 ASSERT(pp->p_szc == 0);
2751 2773
2752 2774 /*
2753 2775 * Handle pages that have been marked for migration
2754 2776 */
2755 2777 if (lgrp_optimizations())
2756 2778 page_migrate(seg, addr, &pp, 1);
2757 2779
2758 2780 if (enable_mbit_wa) {
2759 2781 if (rw == S_WRITE)
2760 2782 hat_setmod(pp);
2761 2783 else if (!hat_ismod(pp))
2762 2784 prot &= ~PROT_WRITE;
2763 2785 }
2764 2786 /*
2765 2787 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2766 2788 * with MC_LOCKAS, MCL_FUTURE) and this is a
2767 2789 * MAP_NORESERVE segment, we may need to
2768 2790 * permanently lock the page as it is being faulted
2769 2791 * for the first time. The following text applies
2770 2792 * only to MAP_NORESERVE segments:
2771 2793 *
2772 2794 * As per memcntl(2), if this segment was created
2773 2795 * after MCL_FUTURE was applied (a "future"
2774 2796 * segment), its pages must be locked. If this
2775 2797 * segment existed at MCL_FUTURE application (a
2776 2798 * "past" segment), the interface is unclear.
2777 2799 *
2778 2800 * We decide to lock only if vpage is present:
2779 2801 *
2780 2802 * - "future" segments will have a vpage array (see
2781 2803 * as_map), and so will be locked as required
2782 2804 *
2783 2805 * - "past" segments may not have a vpage array,
2784 2806 * depending on whether events (such as
2785 2807 * mprotect) have occurred. Locking if vpage
2786 2808 * exists will preserve legacy behavior. Not
2787 2809 * locking if vpage is absent, will not break
2788 2810 * the interface or legacy behavior. Note that
2789 2811 * allocating vpage here if it's absent requires
2790 2812 * upgrading the segvn reader lock, the cost of
2791 2813 * which does not seem worthwhile.
2792 2814 *
2793 2815 * Usually testing and setting VPP_ISPPLOCK and
2794 2816 * VPP_SETPPLOCK requires holding the segvn lock as
2795 2817 * writer, but in this case all readers are
2796 2818 * serializing on the anon array lock.
2797 2819 */
2798 2820 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2799 2821 (svd->flags & MAP_NORESERVE) &&
2800 2822 !VPP_ISPPLOCK(vpage)) {
2801 2823 proc_t *p = seg->s_as->a_proc;
2802 2824 ASSERT(svd->type == MAP_PRIVATE);
2803 2825 mutex_enter(&p->p_lock);
2804 2826 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2805 2827 1) == 0) {
2806 2828 claim = VPP_PROT(vpage) & PROT_WRITE;
2807 2829 if (page_pp_lock(pp, claim, 0)) {
2808 2830 VPP_SETPPLOCK(vpage);
2809 2831 } else {
2810 2832 rctl_decr_locked_mem(p, NULL,
2811 2833 PAGESIZE, 1);
2812 2834 }
2813 2835 }
2814 2836 mutex_exit(&p->p_lock);
2815 2837 }
2816 2838
2817 2839 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2818 2840 hat_memload(hat, addr, pp, prot, hat_flag);
2819 2841
2820 2842 if (!(hat_flag & HAT_LOAD_LOCK))
2821 2843 page_unlock(pp);
2822 2844
2823 2845 anon_array_exit(&cookie);
2824 2846 return (0);
2825 2847 }
2826 2848 }
2827 2849
2828 2850 /*
2829 2851 * Obtain the page structure via anon_getpage() if it is
2830 2852 * a private copy of an object (the result of a previous
2831 2853 * copy-on-write).
2832 2854 */
2833 2855 if (amp != NULL) {
2834 2856 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2835 2857 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2836 2858 seg, addr, rw, svd->cred);
2837 2859 if (err)
2838 2860 goto out;
2839 2861
2840 2862 if (svd->type == MAP_SHARED) {
2841 2863 /*
2842 2864 * If this is a shared mapping to an
2843 2865 * anon_map, then ignore the write
2844 2866 * permissions returned by anon_getpage().
2845 2867 * They apply to the private mappings
2846 2868 * of this anon_map.
2847 2869 */
2848 2870 vpprot |= PROT_WRITE;
2849 2871 }
2850 2872 opp = anon_pl[0];
2851 2873 }
2852 2874 }
2853 2875
2854 2876 /*
2855 2877 * Search the pl[] list passed in if it is from the
2856 2878 * original object (i.e., not a private copy).
2857 2879 */
2858 2880 if (opp == NULL) {
2859 2881 /*
2860 2882 * Find original page. We must be bringing it in
2861 2883 * from the list in pl[].
2862 2884 */
2863 2885 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2864 2886 if (opp == PAGE_HANDLED)
2865 2887 continue;
2866 2888 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2867 2889 if (opp->p_offset == off)
2868 2890 break;
2869 2891 }
2870 2892 if (opp == NULL) {
2871 2893 panic("segvn_faultpage not found");
2872 2894 /*NOTREACHED*/
2873 2895 }
2874 2896 *ppp = PAGE_HANDLED;
2875 2897
2876 2898 }
2877 2899
2878 2900 ASSERT(PAGE_LOCKED(opp));
2879 2901
2880 2902 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2881 2903 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2882 2904
2883 2905 /*
2884 2906 * The fault is treated as a copy-on-write fault if a
2885 2907 * write occurs on a private segment and the object
2886 2908 * page (i.e., mapping) is write protected. We assume
2887 2909 * that fatal protection checks have already been made.
2888 2910 */
2889 2911
2890 2912 if (brkcow) {
2891 2913 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2892 2914 cow = !(vpprot & PROT_WRITE);
2893 2915 } else if (svd->tr_state == SEGVN_TR_ON) {
2894 2916 /*
2895 2917 * If we are doing text replication COW on first touch.
2896 2918 */
2897 2919 ASSERT(amp != NULL);
2898 2920 ASSERT(svd->vp != NULL);
2899 2921 ASSERT(rw != S_WRITE);
2900 2922 cow = (ap == NULL);
2901 2923 } else {
2902 2924 cow = 0;
2903 2925 }
2904 2926
2905 2927 /*
2906 2928 * If not a copy-on-write case load the translation
2907 2929 * and return.
2908 2930 */
2909 2931 if (cow == 0) {
2910 2932
2911 2933 /*
2912 2934 * Handle pages that have been marked for migration
2913 2935 */
2914 2936 if (lgrp_optimizations())
2915 2937 page_migrate(seg, addr, &opp, 1);
2916 2938
2917 2939 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2918 2940 if (rw == S_WRITE)
2919 2941 hat_setmod(opp);
2920 2942 else if (rw != S_OTHER && !hat_ismod(opp))
2921 2943 prot &= ~PROT_WRITE;
2922 2944 }
2923 2945
2924 2946 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2925 2947 (!svd->pageprot && svd->prot == (prot & vpprot)));
2926 2948 ASSERT(amp == NULL ||
2927 2949 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2928 2950 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2929 2951 svd->rcookie);
2930 2952
2931 2953 if (!(hat_flag & HAT_LOAD_LOCK))
2932 2954 page_unlock(opp);
2933 2955
2934 2956 if (anon_lock) {
2935 2957 anon_array_exit(&cookie);
2936 2958 }
2937 2959 return (0);
2938 2960 }
2939 2961
2940 2962 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2941 2963
2942 2964 hat_setref(opp);
2943 2965
2944 2966 ASSERT(amp != NULL && anon_lock);
2945 2967
2946 2968 /*
2947 2969 * Steal the page only if it isn't a private page
2948 2970 * since stealing a private page is not worth the effort.
2949 2971 */
2950 2972 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
2951 2973 steal = 1;
2952 2974
2953 2975 /*
2954 2976 * Steal the original page if the following conditions are true:
2955 2977 *
2956 2978 * We are low on memory, the page is not private, page is not large,
2957 2979 * not shared, not modified, not `locked' or if we have it `locked'
2958 2980 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
2959 2981 * that the page is not shared) and if it doesn't have any
2960 2982 * translations. page_struct_lock isn't needed to look at p_cowcnt
2961 2983 * and p_lckcnt because we first get exclusive lock on page.
2962 2984 */
2963 2985 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
2964 2986
2965 2987 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
2966 2988 page_tryupgrade(opp) && !hat_ismod(opp) &&
2967 2989 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
2968 2990 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
2969 2991 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
2970 2992 /*
2971 2993 * Check if this page has other translations
2972 2994 * after unloading our translation.
2973 2995 */
2974 2996 if (hat_page_is_mapped(opp)) {
2975 2997 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2976 2998 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
2977 2999 HAT_UNLOAD);
2978 3000 }
2979 3001
2980 3002 /*
2981 3003 * hat_unload() might sync back someone else's recent
2982 3004 * modification, so check again.
2983 3005 */
2984 3006 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
2985 3007 pageflags |= STEAL_PAGE;
2986 3008 }
2987 3009
2988 3010 /*
2989 3011 * If we have a vpage pointer, see if it indicates that we have
2990 3012 * ``locked'' the page we map -- if so, tell anon_private to
2991 3013 * transfer the locking resource to the new page.
2992 3014 *
2993 3015 * See Statement at the beginning of segvn_lockop regarding
2994 3016 * the way lockcnts/cowcnts are handled during COW.
2995 3017 *
2996 3018 */
2997 3019 if (vpage != NULL && VPP_ISPPLOCK(vpage))
2998 3020 pageflags |= LOCK_PAGE;
2999 3021
3000 3022 /*
3001 3023 * Allocate a private page and perform the copy.
3002 3024 * For MAP_NORESERVE reserve swap space now, unless this
3003 3025 * is a cow fault on an existing anon page in which case
3004 3026 * MAP_NORESERVE will have made advance reservations.
3005 3027 */
3006 3028 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3007 3029 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3008 3030 atomic_add_long(&svd->swresv, ptob(1));
3009 3031 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3010 3032 } else {
3011 3033 page_unlock(opp);
3012 3034 err = ENOMEM;
3013 3035 goto out;
3014 3036 }
3015 3037 }
3016 3038 oldap = ap;
3017 3039 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3018 3040 if (pp == NULL) {
3019 3041 err = ENOMEM; /* out of swap space */
3020 3042 goto out;
3021 3043 }
3022 3044
3023 3045 /*
3024 3046 * If we copied away from an anonymous page, then
3025 3047 * we are one step closer to freeing up an anon slot.
3026 3048 *
3027 3049 * NOTE: The original anon slot must be released while
3028 3050 * holding the "anon_map" lock. This is necessary to prevent
3029 3051 * other threads from obtaining a pointer to the anon slot
3030 3052 * which may be freed if its "refcnt" is 1.
3031 3053 */
3032 3054 if (oldap != NULL)
3033 3055 anon_decref(oldap);
3034 3056
3035 3057 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3036 3058
3037 3059 /*
3038 3060 * Handle pages that have been marked for migration
3039 3061 */
3040 3062 if (lgrp_optimizations())
3041 3063 page_migrate(seg, addr, &pp, 1);
3042 3064
3043 3065 ASSERT(pp->p_szc == 0);
3044 3066
3045 3067 ASSERT(!IS_VMODSORT(pp->p_vnode));
3046 3068 if (enable_mbit_wa) {
3047 3069 if (rw == S_WRITE)
3048 3070 hat_setmod(pp);
3049 3071 else if (!hat_ismod(pp))
3050 3072 prot &= ~PROT_WRITE;
3051 3073 }
3052 3074
3053 3075 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3054 3076 hat_memload(hat, addr, pp, prot, hat_flag);
3055 3077
3056 3078 if (!(hat_flag & HAT_LOAD_LOCK))
3057 3079 page_unlock(pp);
3058 3080
3059 3081 ASSERT(anon_lock);
3060 3082 anon_array_exit(&cookie);
3061 3083 return (0);
3062 3084 out:
3063 3085 if (anon_lock)
3064 3086 anon_array_exit(&cookie);
3065 3087
3066 3088 if (type == F_SOFTLOCK) {
3067 3089 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3068 3090 }
3069 3091 return (FC_MAKE_ERR(err));
3070 3092 }
3071 3093
3072 3094 /*
3073 3095 * relocate a bunch of smaller targ pages into one large repl page. all targ
3074 3096 * pages must be complete pages smaller than replacement pages.
3075 3097 * it's assumed that no page's szc can change since they are all PAGESIZE or
3076 3098 * complete large pages locked SHARED.
3077 3099 */
3078 3100 static void
3079 3101 segvn_relocate_pages(page_t **targ, page_t *replacement)
3080 3102 {
3081 3103 page_t *pp;
3082 3104 pgcnt_t repl_npgs, curnpgs;
3083 3105 pgcnt_t i;
3084 3106 uint_t repl_szc = replacement->p_szc;
3085 3107 page_t *first_repl = replacement;
3086 3108 page_t *repl;
3087 3109 spgcnt_t npgs;
3088 3110
3089 3111 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3090 3112
3091 3113 ASSERT(repl_szc != 0);
3092 3114 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3093 3115
3094 3116 i = 0;
3095 3117 while (repl_npgs) {
3096 3118 spgcnt_t nreloc;
3097 3119 int err;
3098 3120 ASSERT(replacement != NULL);
3099 3121 pp = targ[i];
3100 3122 ASSERT(pp->p_szc < repl_szc);
3101 3123 ASSERT(PAGE_EXCL(pp));
3102 3124 ASSERT(!PP_ISFREE(pp));
3103 3125 curnpgs = page_get_pagecnt(pp->p_szc);
3104 3126 if (curnpgs == 1) {
3105 3127 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3106 3128 repl = replacement;
3107 3129 page_sub(&replacement, repl);
3108 3130 ASSERT(PAGE_EXCL(repl));
3109 3131 ASSERT(!PP_ISFREE(repl));
3110 3132 ASSERT(repl->p_szc == repl_szc);
3111 3133 } else {
3112 3134 page_t *repl_savepp;
3113 3135 int j;
3114 3136 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3115 3137 repl_savepp = replacement;
3116 3138 for (j = 0; j < curnpgs; j++) {
3117 3139 repl = replacement;
3118 3140 page_sub(&replacement, repl);
3119 3141 ASSERT(PAGE_EXCL(repl));
3120 3142 ASSERT(!PP_ISFREE(repl));
3121 3143 ASSERT(repl->p_szc == repl_szc);
3122 3144 ASSERT(page_pptonum(targ[i + j]) ==
3123 3145 page_pptonum(targ[i]) + j);
3124 3146 }
3125 3147 repl = repl_savepp;
3126 3148 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3127 3149 }
3128 3150 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3129 3151 if (err || nreloc != curnpgs) {
3130 3152 panic("segvn_relocate_pages: "
3131 3153 "page_relocate failed err=%d curnpgs=%ld "
3132 3154 "nreloc=%ld", err, curnpgs, nreloc);
3133 3155 }
3134 3156 ASSERT(curnpgs <= repl_npgs);
3135 3157 repl_npgs -= curnpgs;
3136 3158 i += curnpgs;
3137 3159 }
3138 3160 ASSERT(replacement == NULL);
3139 3161
3140 3162 repl = first_repl;
3141 3163 repl_npgs = npgs;
3142 3164 for (i = 0; i < repl_npgs; i++) {
3143 3165 ASSERT(PAGE_EXCL(repl));
3144 3166 ASSERT(!PP_ISFREE(repl));
3145 3167 targ[i] = repl;
3146 3168 page_downgrade(targ[i]);
3147 3169 repl++;
3148 3170 }
3149 3171 }
3150 3172
3151 3173 /*
3152 3174 * Check if all pages in ppa array are complete smaller than szc pages and
3153 3175 * their roots will still be aligned relative to their current size if the
3154 3176 * entire ppa array is relocated into one szc page. If these conditions are
3155 3177 * not met return 0.
3156 3178 *
3157 3179 * If all pages are properly aligned attempt to upgrade their locks
3158 3180 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3159 3181 * upgrdfail was set to 0 by caller.
3160 3182 *
3161 3183 * Return 1 if all pages are aligned and locked exclusively.
3162 3184 *
3163 3185 * If all pages in ppa array happen to be physically contiguous to make one
3164 3186 * szc page and all exclusive locks are successfully obtained promote the page
3165 3187 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3166 3188 */
3167 3189 static int
3168 3190 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3169 3191 {
3170 3192 page_t *pp;
3171 3193 pfn_t pfn;
3172 3194 pgcnt_t totnpgs = page_get_pagecnt(szc);
3173 3195 pfn_t first_pfn;
3174 3196 int contig = 1;
3175 3197 pgcnt_t i;
3176 3198 pgcnt_t j;
3177 3199 uint_t curszc;
3178 3200 pgcnt_t curnpgs;
3179 3201 int root = 0;
3180 3202
3181 3203 ASSERT(szc > 0);
3182 3204
3183 3205 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3184 3206
3185 3207 for (i = 0; i < totnpgs; i++) {
3186 3208 pp = ppa[i];
3187 3209 ASSERT(PAGE_SHARED(pp));
3188 3210 ASSERT(!PP_ISFREE(pp));
3189 3211 pfn = page_pptonum(pp);
3190 3212 if (i == 0) {
3191 3213 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3192 3214 contig = 0;
3193 3215 } else {
3194 3216 first_pfn = pfn;
3195 3217 }
3196 3218 } else if (contig && pfn != first_pfn + i) {
3197 3219 contig = 0;
3198 3220 }
3199 3221 if (pp->p_szc == 0) {
3200 3222 if (root) {
3201 3223 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3202 3224 return (0);
3203 3225 }
3204 3226 } else if (!root) {
3205 3227 if ((curszc = pp->p_szc) >= szc) {
3206 3228 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3207 3229 return (0);
3208 3230 }
3209 3231 if (curszc == 0) {
3210 3232 /*
3211 3233 * p_szc changed means we don't have all pages
3212 3234 * locked. return failure.
3213 3235 */
3214 3236 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3215 3237 return (0);
3216 3238 }
3217 3239 curnpgs = page_get_pagecnt(curszc);
3218 3240 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3219 3241 !IS_P2ALIGNED(i, curnpgs)) {
3220 3242 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3221 3243 return (0);
3222 3244 }
3223 3245 root = 1;
3224 3246 } else {
3225 3247 ASSERT(i > 0);
3226 3248 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3227 3249 if (pp->p_szc != curszc) {
3228 3250 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3229 3251 return (0);
3230 3252 }
3231 3253 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3232 3254 panic("segvn_full_szcpages: "
3233 3255 "large page not physically contiguous");
3234 3256 }
3235 3257 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3236 3258 root = 0;
3237 3259 }
3238 3260 }
3239 3261 }
3240 3262
3241 3263 for (i = 0; i < totnpgs; i++) {
3242 3264 ASSERT(ppa[i]->p_szc < szc);
3243 3265 if (!page_tryupgrade(ppa[i])) {
3244 3266 for (j = 0; j < i; j++) {
3245 3267 page_downgrade(ppa[j]);
3246 3268 }
3247 3269 *pszc = ppa[i]->p_szc;
3248 3270 *upgrdfail = 1;
3249 3271 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3250 3272 return (0);
3251 3273 }
3252 3274 }
3253 3275
3254 3276 /*
3255 3277 * When a page is put a free cachelist its szc is set to 0. if file
3256 3278 * system reclaimed pages from cachelist targ pages will be physically
3257 3279 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3258 3280 * pages without any relocations.
3259 3281 * To avoid any hat issues with previous small mappings
3260 3282 * hat_pageunload() the target pages first.
3261 3283 */
3262 3284 if (contig) {
3263 3285 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3264 3286 for (i = 0; i < totnpgs; i++) {
3265 3287 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3266 3288 }
3267 3289 for (i = 0; i < totnpgs; i++) {
3268 3290 ppa[i]->p_szc = szc;
3269 3291 }
3270 3292 for (i = 0; i < totnpgs; i++) {
3271 3293 ASSERT(PAGE_EXCL(ppa[i]));
3272 3294 page_downgrade(ppa[i]);
3273 3295 }
3274 3296 if (pszc != NULL) {
3275 3297 *pszc = szc;
3276 3298 }
3277 3299 }
3278 3300 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3279 3301 return (1);
3280 3302 }
3281 3303
3282 3304 /*
3283 3305 * Create physically contiguous pages for [vp, off] - [vp, off +
3284 3306 * page_size(szc)) range and for private segment return them in ppa array.
3285 3307 * Pages are created either via IO or relocations.
3286 3308 *
3287 3309 * Return 1 on success and 0 on failure.
3288 3310 *
3289 3311 * If physically contiguous pages already exist for this range return 1 without
3290 3312 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3291 3313 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3292 3314 */
3293 3315
3294 3316 static int
3295 3317 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3296 3318 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3297 3319 int *downsize)
3298 3320
3299 3321 {
3300 3322 page_t *pplist = *ppplist;
3301 3323 size_t pgsz = page_get_pagesize(szc);
3302 3324 pgcnt_t pages = btop(pgsz);
3303 3325 ulong_t start_off = off;
3304 3326 u_offset_t eoff = off + pgsz;
3305 3327 spgcnt_t nreloc;
3306 3328 u_offset_t io_off = off;
3307 3329 size_t io_len;
3308 3330 page_t *io_pplist = NULL;
3309 3331 page_t *done_pplist = NULL;
3310 3332 pgcnt_t pgidx = 0;
3311 3333 page_t *pp;
3312 3334 page_t *newpp;
3313 3335 page_t *targpp;
3314 3336 int io_err = 0;
3315 3337 int i;
3316 3338 pfn_t pfn;
3317 3339 ulong_t ppages;
3318 3340 page_t *targ_pplist = NULL;
3319 3341 page_t *repl_pplist = NULL;
3320 3342 page_t *tmp_pplist;
3321 3343 int nios = 0;
3322 3344 uint_t pszc;
3323 3345 struct vattr va;
3324 3346
3325 3347 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3326 3348
3327 3349 ASSERT(szc != 0);
3328 3350 ASSERT(pplist->p_szc == szc);
3329 3351
3330 3352 /*
3331 3353 * downsize will be set to 1 only if we fail to lock pages. this will
3332 3354 * allow subsequent faults to try to relocate the page again. If we
3333 3355 * fail due to misalignment don't downsize and let the caller map the
3334 3356 * whole region with small mappings to avoid more faults into the area
3335 3357 * where we can't get large pages anyway.
3336 3358 */
3337 3359 *downsize = 0;
3338 3360
3339 3361 while (off < eoff) {
3340 3362 newpp = pplist;
3341 3363 ASSERT(newpp != NULL);
3342 3364 ASSERT(PAGE_EXCL(newpp));
3343 3365 ASSERT(!PP_ISFREE(newpp));
3344 3366 /*
3345 3367 * we pass NULL for nrelocp to page_lookup_create()
3346 3368 * so that it doesn't relocate. We relocate here
3347 3369 * later only after we make sure we can lock all
3348 3370 * pages in the range we handle and they are all
3349 3371 * aligned.
3350 3372 */
3351 3373 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3352 3374 ASSERT(pp != NULL);
3353 3375 ASSERT(!PP_ISFREE(pp));
3354 3376 ASSERT(pp->p_vnode == vp);
3355 3377 ASSERT(pp->p_offset == off);
3356 3378 if (pp == newpp) {
3357 3379 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3358 3380 page_sub(&pplist, pp);
3359 3381 ASSERT(PAGE_EXCL(pp));
3360 3382 ASSERT(page_iolock_assert(pp));
3361 3383 page_list_concat(&io_pplist, &pp);
3362 3384 off += PAGESIZE;
3363 3385 continue;
3364 3386 }
3365 3387 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3366 3388 pfn = page_pptonum(pp);
3367 3389 pszc = pp->p_szc;
3368 3390 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3369 3391 IS_P2ALIGNED(pfn, pages)) {
3370 3392 ASSERT(repl_pplist == NULL);
3371 3393 ASSERT(done_pplist == NULL);
3372 3394 ASSERT(pplist == *ppplist);
3373 3395 page_unlock(pp);
3374 3396 page_free_replacement_page(pplist);
3375 3397 page_create_putback(pages);
3376 3398 *ppplist = NULL;
3377 3399 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3378 3400 return (1);
3379 3401 }
3380 3402 if (pszc >= szc) {
3381 3403 page_unlock(pp);
3382 3404 segvn_faultvnmpss_align_err1++;
3383 3405 goto out;
3384 3406 }
3385 3407 ppages = page_get_pagecnt(pszc);
3386 3408 if (!IS_P2ALIGNED(pfn, ppages)) {
3387 3409 ASSERT(pszc > 0);
3388 3410 /*
3389 3411 * sizing down to pszc won't help.
3390 3412 */
3391 3413 page_unlock(pp);
3392 3414 segvn_faultvnmpss_align_err2++;
3393 3415 goto out;
3394 3416 }
3395 3417 pfn = page_pptonum(newpp);
3396 3418 if (!IS_P2ALIGNED(pfn, ppages)) {
3397 3419 ASSERT(pszc > 0);
3398 3420 /*
3399 3421 * sizing down to pszc won't help.
3400 3422 */
3401 3423 page_unlock(pp);
3402 3424 segvn_faultvnmpss_align_err3++;
3403 3425 goto out;
3404 3426 }
3405 3427 if (!PAGE_EXCL(pp)) {
3406 3428 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3407 3429 page_unlock(pp);
3408 3430 *downsize = 1;
3409 3431 *ret_pszc = pp->p_szc;
3410 3432 goto out;
3411 3433 }
3412 3434 targpp = pp;
3413 3435 if (io_pplist != NULL) {
3414 3436 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3415 3437 io_len = off - io_off;
3416 3438 /*
3417 3439 * Some file systems like NFS don't check EOF
3418 3440 * conditions in VOP_PAGEIO(). Check it here
3419 3441 * now that pages are locked SE_EXCL. Any file
3420 3442 * truncation will wait until the pages are
3421 3443 * unlocked so no need to worry that file will
3422 3444 * be truncated after we check its size here.
3423 3445 * XXX fix NFS to remove this check.
3424 3446 */
3425 3447 va.va_mask = AT_SIZE;
3426 3448 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3427 3449 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3428 3450 page_unlock(targpp);
3429 3451 goto out;
3430 3452 }
3431 3453 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3432 3454 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3433 3455 *downsize = 1;
3434 3456 *ret_pszc = 0;
3435 3457 page_unlock(targpp);
3436 3458 goto out;
3437 3459 }
3438 3460 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3439 3461 B_READ, svd->cred, NULL);
3440 3462 if (io_err) {
3441 3463 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3442 3464 page_unlock(targpp);
3443 3465 if (io_err == EDEADLK) {
3444 3466 segvn_vmpss_pageio_deadlk_err++;
3445 3467 }
3446 3468 goto out;
3447 3469 }
3448 3470 nios++;
3449 3471 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3450 3472 while (io_pplist != NULL) {
3451 3473 pp = io_pplist;
3452 3474 page_sub(&io_pplist, pp);
3453 3475 ASSERT(page_iolock_assert(pp));
3454 3476 page_io_unlock(pp);
3455 3477 pgidx = (pp->p_offset - start_off) >>
3456 3478 PAGESHIFT;
3457 3479 ASSERT(pgidx < pages);
3458 3480 ppa[pgidx] = pp;
3459 3481 page_list_concat(&done_pplist, &pp);
3460 3482 }
3461 3483 }
3462 3484 pp = targpp;
3463 3485 ASSERT(PAGE_EXCL(pp));
3464 3486 ASSERT(pp->p_szc <= pszc);
3465 3487 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3466 3488 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3467 3489 page_unlock(pp);
3468 3490 *downsize = 1;
3469 3491 *ret_pszc = pp->p_szc;
3470 3492 goto out;
3471 3493 }
3472 3494 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3473 3495 /*
3474 3496 * page szc chould have changed before the entire group was
3475 3497 * locked. reread page szc.
3476 3498 */
3477 3499 pszc = pp->p_szc;
3478 3500 ppages = page_get_pagecnt(pszc);
3479 3501
3480 3502 /* link just the roots */
3481 3503 page_list_concat(&targ_pplist, &pp);
3482 3504 page_sub(&pplist, newpp);
3483 3505 page_list_concat(&repl_pplist, &newpp);
3484 3506 off += PAGESIZE;
3485 3507 while (--ppages != 0) {
3486 3508 newpp = pplist;
3487 3509 page_sub(&pplist, newpp);
3488 3510 off += PAGESIZE;
3489 3511 }
3490 3512 io_off = off;
3491 3513 }
3492 3514 if (io_pplist != NULL) {
3493 3515 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3494 3516 io_len = eoff - io_off;
3495 3517 va.va_mask = AT_SIZE;
3496 3518 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3497 3519 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3498 3520 goto out;
3499 3521 }
3500 3522 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3501 3523 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3502 3524 *downsize = 1;
3503 3525 *ret_pszc = 0;
3504 3526 goto out;
3505 3527 }
3506 3528 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3507 3529 B_READ, svd->cred, NULL);
3508 3530 if (io_err) {
3509 3531 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3510 3532 if (io_err == EDEADLK) {
3511 3533 segvn_vmpss_pageio_deadlk_err++;
3512 3534 }
3513 3535 goto out;
3514 3536 }
3515 3537 nios++;
3516 3538 while (io_pplist != NULL) {
3517 3539 pp = io_pplist;
3518 3540 page_sub(&io_pplist, pp);
3519 3541 ASSERT(page_iolock_assert(pp));
3520 3542 page_io_unlock(pp);
3521 3543 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3522 3544 ASSERT(pgidx < pages);
3523 3545 ppa[pgidx] = pp;
3524 3546 }
3525 3547 }
3526 3548 /*
3527 3549 * we're now bound to succeed or panic.
3528 3550 * remove pages from done_pplist. it's not needed anymore.
3529 3551 */
3530 3552 while (done_pplist != NULL) {
3531 3553 pp = done_pplist;
3532 3554 page_sub(&done_pplist, pp);
3533 3555 }
3534 3556 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3535 3557 ASSERT(pplist == NULL);
3536 3558 *ppplist = NULL;
3537 3559 while (targ_pplist != NULL) {
3538 3560 int ret;
3539 3561 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3540 3562 ASSERT(repl_pplist);
3541 3563 pp = targ_pplist;
3542 3564 page_sub(&targ_pplist, pp);
3543 3565 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3544 3566 newpp = repl_pplist;
3545 3567 page_sub(&repl_pplist, newpp);
3546 3568 #ifdef DEBUG
3547 3569 pfn = page_pptonum(pp);
3548 3570 pszc = pp->p_szc;
3549 3571 ppages = page_get_pagecnt(pszc);
3550 3572 ASSERT(IS_P2ALIGNED(pfn, ppages));
3551 3573 pfn = page_pptonum(newpp);
3552 3574 ASSERT(IS_P2ALIGNED(pfn, ppages));
3553 3575 ASSERT(P2PHASE(pfn, pages) == pgidx);
3554 3576 #endif
3555 3577 nreloc = 0;
3556 3578 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3557 3579 if (ret != 0 || nreloc == 0) {
3558 3580 panic("segvn_fill_vp_pages: "
3559 3581 "page_relocate failed");
3560 3582 }
3561 3583 pp = newpp;
3562 3584 while (nreloc-- != 0) {
3563 3585 ASSERT(PAGE_EXCL(pp));
3564 3586 ASSERT(pp->p_vnode == vp);
3565 3587 ASSERT(pgidx ==
3566 3588 ((pp->p_offset - start_off) >> PAGESHIFT));
3567 3589 ppa[pgidx++] = pp;
3568 3590 pp++;
3569 3591 }
3570 3592 }
3571 3593
3572 3594 if (svd->type == MAP_PRIVATE) {
3573 3595 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3574 3596 for (i = 0; i < pages; i++) {
3575 3597 ASSERT(ppa[i] != NULL);
3576 3598 ASSERT(PAGE_EXCL(ppa[i]));
3577 3599 ASSERT(ppa[i]->p_vnode == vp);
3578 3600 ASSERT(ppa[i]->p_offset ==
3579 3601 start_off + (i << PAGESHIFT));
3580 3602 page_downgrade(ppa[i]);
3581 3603 }
3582 3604 ppa[pages] = NULL;
3583 3605 } else {
3584 3606 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3585 3607 /*
3586 3608 * the caller will still call VOP_GETPAGE() for shared segments
3587 3609 * to check FS write permissions. For private segments we map
3588 3610 * file read only anyway. so no VOP_GETPAGE is needed.
3589 3611 */
3590 3612 for (i = 0; i < pages; i++) {
3591 3613 ASSERT(ppa[i] != NULL);
3592 3614 ASSERT(PAGE_EXCL(ppa[i]));
3593 3615 ASSERT(ppa[i]->p_vnode == vp);
3594 3616 ASSERT(ppa[i]->p_offset ==
3595 3617 start_off + (i << PAGESHIFT));
3596 3618 page_unlock(ppa[i]);
3597 3619 }
3598 3620 ppa[0] = NULL;
3599 3621 }
3600 3622
3601 3623 return (1);
3602 3624 out:
3603 3625 /*
3604 3626 * Do the cleanup. Unlock target pages we didn't relocate. They are
3605 3627 * linked on targ_pplist by root pages. reassemble unused replacement
3606 3628 * and io pages back to pplist.
3607 3629 */
3608 3630 if (io_pplist != NULL) {
3609 3631 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3610 3632 pp = io_pplist;
3611 3633 do {
3612 3634 ASSERT(pp->p_vnode == vp);
3613 3635 ASSERT(pp->p_offset == io_off);
3614 3636 ASSERT(page_iolock_assert(pp));
3615 3637 page_io_unlock(pp);
3616 3638 page_hashout(pp, NULL);
3617 3639 io_off += PAGESIZE;
3618 3640 } while ((pp = pp->p_next) != io_pplist);
3619 3641 page_list_concat(&io_pplist, &pplist);
3620 3642 pplist = io_pplist;
3621 3643 }
3622 3644 tmp_pplist = NULL;
3623 3645 while (targ_pplist != NULL) {
3624 3646 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3625 3647 pp = targ_pplist;
3626 3648 ASSERT(PAGE_EXCL(pp));
3627 3649 page_sub(&targ_pplist, pp);
3628 3650
3629 3651 pszc = pp->p_szc;
3630 3652 ppages = page_get_pagecnt(pszc);
3631 3653 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3632 3654
3633 3655 if (pszc != 0) {
3634 3656 group_page_unlock(pp);
3635 3657 }
3636 3658 page_unlock(pp);
3637 3659
3638 3660 pp = repl_pplist;
3639 3661 ASSERT(pp != NULL);
3640 3662 ASSERT(PAGE_EXCL(pp));
3641 3663 ASSERT(pp->p_szc == szc);
3642 3664 page_sub(&repl_pplist, pp);
3643 3665
3644 3666 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3645 3667
3646 3668 /* relink replacement page */
3647 3669 page_list_concat(&tmp_pplist, &pp);
3648 3670 while (--ppages != 0) {
3649 3671 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3650 3672 pp++;
3651 3673 ASSERT(PAGE_EXCL(pp));
3652 3674 ASSERT(pp->p_szc == szc);
3653 3675 page_list_concat(&tmp_pplist, &pp);
3654 3676 }
3655 3677 }
3656 3678 if (tmp_pplist != NULL) {
3657 3679 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3658 3680 page_list_concat(&tmp_pplist, &pplist);
3659 3681 pplist = tmp_pplist;
3660 3682 }
3661 3683 /*
3662 3684 * at this point all pages are either on done_pplist or
3663 3685 * pplist. They can't be all on done_pplist otherwise
3664 3686 * we'd've been done.
3665 3687 */
3666 3688 ASSERT(pplist != NULL);
3667 3689 if (nios != 0) {
3668 3690 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3669 3691 pp = pplist;
3670 3692 do {
3671 3693 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3672 3694 ASSERT(pp->p_szc == szc);
3673 3695 ASSERT(PAGE_EXCL(pp));
3674 3696 ASSERT(pp->p_vnode != vp);
3675 3697 pp->p_szc = 0;
3676 3698 } while ((pp = pp->p_next) != pplist);
3677 3699
3678 3700 pp = done_pplist;
3679 3701 do {
3680 3702 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3681 3703 ASSERT(pp->p_szc == szc);
3682 3704 ASSERT(PAGE_EXCL(pp));
3683 3705 ASSERT(pp->p_vnode == vp);
3684 3706 pp->p_szc = 0;
3685 3707 } while ((pp = pp->p_next) != done_pplist);
3686 3708
3687 3709 while (pplist != NULL) {
3688 3710 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3689 3711 pp = pplist;
3690 3712 page_sub(&pplist, pp);
3691 3713 page_free(pp, 0);
3692 3714 }
3693 3715
3694 3716 while (done_pplist != NULL) {
3695 3717 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3696 3718 pp = done_pplist;
3697 3719 page_sub(&done_pplist, pp);
3698 3720 page_unlock(pp);
3699 3721 }
3700 3722 *ppplist = NULL;
3701 3723 return (0);
3702 3724 }
3703 3725 ASSERT(pplist == *ppplist);
3704 3726 if (io_err) {
3705 3727 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3706 3728 /*
3707 3729 * don't downsize on io error.
3708 3730 * see if vop_getpage succeeds.
3709 3731 * pplist may still be used in this case
3710 3732 * for relocations.
3711 3733 */
3712 3734 return (0);
3713 3735 }
3714 3736 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3715 3737 page_free_replacement_page(pplist);
3716 3738 page_create_putback(pages);
3717 3739 *ppplist = NULL;
3718 3740 return (0);
3719 3741 }
3720 3742
3721 3743 int segvn_anypgsz = 0;
3722 3744
3723 3745 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3724 3746 if ((type) == F_SOFTLOCK) { \
3725 3747 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3726 3748 -(pages)); \
3727 3749 }
3728 3750
3729 3751 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3730 3752 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3731 3753 if ((rw) == S_WRITE) { \
3732 3754 for (i = 0; i < (pages); i++) { \
3733 3755 ASSERT((ppa)[i]->p_vnode == \
3734 3756 (ppa)[0]->p_vnode); \
3735 3757 hat_setmod((ppa)[i]); \
3736 3758 } \
3737 3759 } else if ((rw) != S_OTHER && \
3738 3760 ((prot) & (vpprot) & PROT_WRITE)) { \
3739 3761 for (i = 0; i < (pages); i++) { \
3740 3762 ASSERT((ppa)[i]->p_vnode == \
3741 3763 (ppa)[0]->p_vnode); \
3742 3764 if (!hat_ismod((ppa)[i])) { \
3743 3765 prot &= ~PROT_WRITE; \
3744 3766 break; \
3745 3767 } \
3746 3768 } \
3747 3769 } \
3748 3770 }
3749 3771
3750 3772 #ifdef VM_STATS
3751 3773
3752 3774 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3753 3775 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3754 3776
3755 3777 #else /* VM_STATS */
3756 3778
3757 3779 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3758 3780
3759 3781 #endif
3760 3782
3761 3783 static faultcode_t
3762 3784 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3763 3785 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3764 3786 caddr_t eaddr, int brkcow)
3765 3787 {
3766 3788 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3767 3789 struct anon_map *amp = svd->amp;
3768 3790 uchar_t segtype = svd->type;
3769 3791 uint_t szc = seg->s_szc;
3770 3792 size_t pgsz = page_get_pagesize(szc);
3771 3793 size_t maxpgsz = pgsz;
3772 3794 pgcnt_t pages = btop(pgsz);
3773 3795 pgcnt_t maxpages = pages;
3774 3796 size_t ppasize = (pages + 1) * sizeof (page_t *);
3775 3797 caddr_t a = lpgaddr;
3776 3798 caddr_t maxlpgeaddr = lpgeaddr;
3777 3799 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3778 3800 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3779 3801 struct vpage *vpage = (svd->vpage != NULL) ?
3780 3802 &svd->vpage[seg_page(seg, a)] : NULL;
3781 3803 vnode_t *vp = svd->vp;
3782 3804 page_t **ppa;
3783 3805 uint_t pszc;
3784 3806 size_t ppgsz;
3785 3807 pgcnt_t ppages;
3786 3808 faultcode_t err = 0;
3787 3809 int ierr;
3788 3810 int vop_size_err = 0;
3789 3811 uint_t protchk, prot, vpprot;
3790 3812 ulong_t i;
3791 3813 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3792 3814 anon_sync_obj_t an_cookie;
3793 3815 enum seg_rw arw;
3794 3816 int alloc_failed = 0;
3795 3817 int adjszc_chk;
3796 3818 struct vattr va;
3797 3819 int xhat = 0;
3798 3820 page_t *pplist;
3799 3821 pfn_t pfn;
3800 3822 int physcontig;
3801 3823 int upgrdfail;
3802 3824 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3803 3825 int tron = (svd->tr_state == SEGVN_TR_ON);
3804 3826
3805 3827 ASSERT(szc != 0);
3806 3828 ASSERT(vp != NULL);
3807 3829 ASSERT(brkcow == 0 || amp != NULL);
3808 3830 ASSERT(tron == 0 || amp != NULL);
3809 3831 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3810 3832 ASSERT(!(svd->flags & MAP_NORESERVE));
3811 3833 ASSERT(type != F_SOFTUNLOCK);
3812 3834 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3813 3835 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3814 3836 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3815 3837 ASSERT(seg->s_szc < NBBY * sizeof (int));
3816 3838 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3817 3839 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3818 3840
3819 3841 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3820 3842 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3821 3843
3822 3844 if (svd->flags & MAP_TEXT) {
3823 3845 hat_flag |= HAT_LOAD_TEXT;
3824 3846 }
3825 3847
3826 3848 if (svd->pageprot) {
3827 3849 switch (rw) {
3828 3850 case S_READ:
3829 3851 protchk = PROT_READ;
3830 3852 break;
3831 3853 case S_WRITE:
3832 3854 protchk = PROT_WRITE;
3833 3855 break;
3834 3856 case S_EXEC:
3835 3857 protchk = PROT_EXEC;
3836 3858 break;
3837 3859 case S_OTHER:
3838 3860 default:
3839 3861 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3840 3862 break;
3841 3863 }
3842 3864 } else {
3843 3865 prot = svd->prot;
3844 3866 /* caller has already done segment level protection check. */
3845 3867 }
3846 3868
3847 3869 if (seg->s_as->a_hat != hat) {
3848 3870 xhat = 1;
3849 3871 }
3850 3872
3851 3873 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3852 3874 SEGVN_VMSTAT_FLTVNPAGES(2);
3853 3875 arw = S_READ;
3854 3876 } else {
3855 3877 arw = rw;
3856 3878 }
3857 3879
3858 3880 ppa = kmem_alloc(ppasize, KM_SLEEP);
3859 3881
3860 3882 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3861 3883
3862 3884 for (;;) {
3863 3885 adjszc_chk = 0;
3864 3886 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3865 3887 if (adjszc_chk) {
3866 3888 while (szc < seg->s_szc) {
3867 3889 uintptr_t e;
3868 3890 uint_t tszc;
3869 3891 tszc = segvn_anypgsz_vnode ? szc + 1 :
3870 3892 seg->s_szc;
3871 3893 ppgsz = page_get_pagesize(tszc);
3872 3894 if (!IS_P2ALIGNED(a, ppgsz) ||
3873 3895 ((alloc_failed >> tszc) & 0x1)) {
3874 3896 break;
3875 3897 }
3876 3898 SEGVN_VMSTAT_FLTVNPAGES(4);
3877 3899 szc = tszc;
3878 3900 pgsz = ppgsz;
3879 3901 pages = btop(pgsz);
3880 3902 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3881 3903 lpgeaddr = (caddr_t)e;
3882 3904 }
3883 3905 }
3884 3906
3885 3907 again:
3886 3908 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3887 3909 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3888 3910 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3889 3911 anon_array_enter(amp, aindx, &an_cookie);
3890 3912 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3891 3913 SEGVN_VMSTAT_FLTVNPAGES(5);
3892 3914 ASSERT(anon_pages(amp->ahp, aindx,
3893 3915 maxpages) == maxpages);
3894 3916 anon_array_exit(&an_cookie);
3895 3917 ANON_LOCK_EXIT(&->a_rwlock);
3896 3918 err = segvn_fault_anonpages(hat, seg,
3897 3919 a, a + maxpgsz, type, rw,
3898 3920 MAX(a, addr),
3899 3921 MIN(a + maxpgsz, eaddr), brkcow);
3900 3922 if (err != 0) {
3901 3923 SEGVN_VMSTAT_FLTVNPAGES(6);
3902 3924 goto out;
3903 3925 }
3904 3926 if (szc < seg->s_szc) {
3905 3927 szc = seg->s_szc;
3906 3928 pgsz = maxpgsz;
3907 3929 pages = maxpages;
3908 3930 lpgeaddr = maxlpgeaddr;
3909 3931 }
3910 3932 goto next;
3911 3933 } else {
3912 3934 ASSERT(anon_pages(amp->ahp, aindx,
3913 3935 maxpages) == 0);
3914 3936 SEGVN_VMSTAT_FLTVNPAGES(7);
3915 3937 anon_array_exit(&an_cookie);
3916 3938 ANON_LOCK_EXIT(&->a_rwlock);
3917 3939 }
3918 3940 }
3919 3941 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3920 3942 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3921 3943
3922 3944 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3923 3945 ASSERT(vpage != NULL);
3924 3946 prot = VPP_PROT(vpage);
3925 3947 ASSERT(sameprot(seg, a, maxpgsz));
3926 3948 if ((prot & protchk) == 0) {
3927 3949 SEGVN_VMSTAT_FLTVNPAGES(8);
3928 3950 err = FC_PROT;
3929 3951 goto out;
3930 3952 }
3931 3953 }
3932 3954 if (type == F_SOFTLOCK) {
3933 3955 atomic_add_long((ulong_t *)&svd->softlockcnt,
3934 3956 pages);
3935 3957 }
3936 3958
3937 3959 pplist = NULL;
3938 3960 physcontig = 0;
3939 3961 ppa[0] = NULL;
3940 3962 if (!brkcow && !tron && szc &&
3941 3963 !page_exists_physcontig(vp, off, szc,
3942 3964 segtype == MAP_PRIVATE ? ppa : NULL)) {
3943 3965 SEGVN_VMSTAT_FLTVNPAGES(9);
3944 3966 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
3945 3967 szc, 0, 0) && type != F_SOFTLOCK) {
3946 3968 SEGVN_VMSTAT_FLTVNPAGES(10);
3947 3969 pszc = 0;
3948 3970 ierr = -1;
3949 3971 alloc_failed |= (1 << szc);
3950 3972 break;
3951 3973 }
3952 3974 if (pplist != NULL &&
3953 3975 vp->v_mpssdata == SEGVN_PAGEIO) {
3954 3976 int downsize;
3955 3977 SEGVN_VMSTAT_FLTVNPAGES(11);
3956 3978 physcontig = segvn_fill_vp_pages(svd,
3957 3979 vp, off, szc, ppa, &pplist,
3958 3980 &pszc, &downsize);
3959 3981 ASSERT(!physcontig || pplist == NULL);
3960 3982 if (!physcontig && downsize &&
3961 3983 type != F_SOFTLOCK) {
3962 3984 ASSERT(pplist == NULL);
3963 3985 SEGVN_VMSTAT_FLTVNPAGES(12);
3964 3986 ierr = -1;
3965 3987 break;
3966 3988 }
3967 3989 ASSERT(!physcontig ||
3968 3990 segtype == MAP_PRIVATE ||
3969 3991 ppa[0] == NULL);
3970 3992 if (physcontig && ppa[0] == NULL) {
3971 3993 physcontig = 0;
3972 3994 }
3973 3995 }
3974 3996 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
3975 3997 SEGVN_VMSTAT_FLTVNPAGES(13);
3976 3998 ASSERT(segtype == MAP_PRIVATE);
3977 3999 physcontig = 1;
3978 4000 }
3979 4001
3980 4002 if (!physcontig) {
3981 4003 SEGVN_VMSTAT_FLTVNPAGES(14);
3982 4004 ppa[0] = NULL;
3983 4005 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
3984 4006 &vpprot, ppa, pgsz, seg, a, arw,
3985 4007 svd->cred, NULL);
3986 4008 #ifdef DEBUG
3987 4009 if (ierr == 0) {
3988 4010 for (i = 0; i < pages; i++) {
3989 4011 ASSERT(PAGE_LOCKED(ppa[i]));
3990 4012 ASSERT(!PP_ISFREE(ppa[i]));
3991 4013 ASSERT(ppa[i]->p_vnode == vp);
3992 4014 ASSERT(ppa[i]->p_offset ==
3993 4015 off + (i << PAGESHIFT));
3994 4016 }
3995 4017 }
3996 4018 #endif /* DEBUG */
3997 4019 if (segtype == MAP_PRIVATE) {
3998 4020 SEGVN_VMSTAT_FLTVNPAGES(15);
3999 4021 vpprot &= ~PROT_WRITE;
4000 4022 }
4001 4023 } else {
4002 4024 ASSERT(segtype == MAP_PRIVATE);
4003 4025 SEGVN_VMSTAT_FLTVNPAGES(16);
4004 4026 vpprot = PROT_ALL & ~PROT_WRITE;
4005 4027 ierr = 0;
4006 4028 }
4007 4029
4008 4030 if (ierr != 0) {
4009 4031 SEGVN_VMSTAT_FLTVNPAGES(17);
4010 4032 if (pplist != NULL) {
4011 4033 SEGVN_VMSTAT_FLTVNPAGES(18);
4012 4034 page_free_replacement_page(pplist);
4013 4035 page_create_putback(pages);
4014 4036 }
4015 4037 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4016 4038 if (a + pgsz <= eaddr) {
4017 4039 SEGVN_VMSTAT_FLTVNPAGES(19);
4018 4040 err = FC_MAKE_ERR(ierr);
4019 4041 goto out;
4020 4042 }
4021 4043 va.va_mask = AT_SIZE;
4022 4044 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4023 4045 SEGVN_VMSTAT_FLTVNPAGES(20);
4024 4046 err = FC_MAKE_ERR(EIO);
4025 4047 goto out;
4026 4048 }
4027 4049 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4028 4050 SEGVN_VMSTAT_FLTVNPAGES(21);
4029 4051 err = FC_MAKE_ERR(ierr);
4030 4052 goto out;
4031 4053 }
4032 4054 if (btopr(va.va_size) <
4033 4055 btopr(off + (eaddr - a))) {
4034 4056 SEGVN_VMSTAT_FLTVNPAGES(22);
4035 4057 err = FC_MAKE_ERR(ierr);
4036 4058 goto out;
4037 4059 }
4038 4060 if (brkcow || tron || type == F_SOFTLOCK) {
4039 4061 /* can't reduce map area */
4040 4062 SEGVN_VMSTAT_FLTVNPAGES(23);
4041 4063 vop_size_err = 1;
4042 4064 goto out;
4043 4065 }
4044 4066 SEGVN_VMSTAT_FLTVNPAGES(24);
4045 4067 ASSERT(szc != 0);
4046 4068 pszc = 0;
4047 4069 ierr = -1;
4048 4070 break;
4049 4071 }
4050 4072
4051 4073 if (amp != NULL) {
4052 4074 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4053 4075 anon_array_enter(amp, aindx, &an_cookie);
4054 4076 }
4055 4077 if (amp != NULL &&
4056 4078 anon_get_ptr(amp->ahp, aindx) != NULL) {
4057 4079 ulong_t taindx = P2ALIGN(aindx, maxpages);
4058 4080
4059 4081 SEGVN_VMSTAT_FLTVNPAGES(25);
4060 4082 ASSERT(anon_pages(amp->ahp, taindx,
4061 4083 maxpages) == maxpages);
4062 4084 for (i = 0; i < pages; i++) {
4063 4085 page_unlock(ppa[i]);
4064 4086 }
4065 4087 anon_array_exit(&an_cookie);
4066 4088 ANON_LOCK_EXIT(&->a_rwlock);
4067 4089 if (pplist != NULL) {
4068 4090 page_free_replacement_page(pplist);
4069 4091 page_create_putback(pages);
4070 4092 }
4071 4093 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4072 4094 if (szc < seg->s_szc) {
4073 4095 SEGVN_VMSTAT_FLTVNPAGES(26);
4074 4096 /*
4075 4097 * For private segments SOFTLOCK
4076 4098 * either always breaks cow (any rw
4077 4099 * type except S_READ_NOCOW) or
4078 4100 * address space is locked as writer
4079 4101 * (S_READ_NOCOW case) and anon slots
4080 4102 * can't show up on second check.
4081 4103 * Therefore if we are here for
4082 4104 * SOFTLOCK case it must be a cow
4083 4105 * break but cow break never reduces
4084 4106 * szc. text replication (tron) in
4085 4107 * this case works as cow break.
4086 4108 * Thus the assert below.
4087 4109 */
4088 4110 ASSERT(!brkcow && !tron &&
4089 4111 type != F_SOFTLOCK);
4090 4112 pszc = seg->s_szc;
4091 4113 ierr = -2;
4092 4114 break;
4093 4115 }
4094 4116 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4095 4117 goto again;
4096 4118 }
4097 4119 #ifdef DEBUG
4098 4120 if (amp != NULL) {
4099 4121 ulong_t taindx = P2ALIGN(aindx, maxpages);
4100 4122 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4101 4123 }
4102 4124 #endif /* DEBUG */
4103 4125
4104 4126 if (brkcow || tron) {
4105 4127 ASSERT(amp != NULL);
4106 4128 ASSERT(pplist == NULL);
4107 4129 ASSERT(szc == seg->s_szc);
4108 4130 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4109 4131 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4110 4132 SEGVN_VMSTAT_FLTVNPAGES(27);
4111 4133 ierr = anon_map_privatepages(amp, aindx, szc,
4112 4134 seg, a, prot, ppa, vpage, segvn_anypgsz,
4113 4135 tron ? PG_LOCAL : 0, svd->cred);
4114 4136 if (ierr != 0) {
4115 4137 SEGVN_VMSTAT_FLTVNPAGES(28);
4116 4138 anon_array_exit(&an_cookie);
4117 4139 ANON_LOCK_EXIT(&->a_rwlock);
4118 4140 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4119 4141 err = FC_MAKE_ERR(ierr);
4120 4142 goto out;
4121 4143 }
4122 4144
4123 4145 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4124 4146 /*
4125 4147 * p_szc can't be changed for locked
4126 4148 * swapfs pages.
4127 4149 */
4128 4150 ASSERT(svd->rcookie ==
4129 4151 HAT_INVALID_REGION_COOKIE);
4130 4152 hat_memload_array(hat, a, pgsz, ppa, prot,
4131 4153 hat_flag);
4132 4154
4133 4155 if (!(hat_flag & HAT_LOAD_LOCK)) {
4134 4156 SEGVN_VMSTAT_FLTVNPAGES(29);
4135 4157 for (i = 0; i < pages; i++) {
4136 4158 page_unlock(ppa[i]);
4137 4159 }
4138 4160 }
4139 4161 anon_array_exit(&an_cookie);
4140 4162 ANON_LOCK_EXIT(&->a_rwlock);
4141 4163 goto next;
4142 4164 }
4143 4165
4144 4166 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4145 4167 (!svd->pageprot && svd->prot == (prot & vpprot)));
4146 4168
4147 4169 pfn = page_pptonum(ppa[0]);
4148 4170 /*
4149 4171 * hat_page_demote() needs an SE_EXCL lock on one of
4150 4172 * constituent page_t's and it decreases root's p_szc
4151 4173 * last. This means if root's p_szc is equal szc and
4152 4174 * all its constituent pages are locked
4153 4175 * hat_page_demote() that could have changed p_szc to
4154 4176 * szc is already done and no new have page_demote()
4155 4177 * can start for this large page.
4156 4178 */
4157 4179
4158 4180 /*
4159 4181 * we need to make sure same mapping size is used for
4160 4182 * the same address range if there's a possibility the
4161 4183 * adddress is already mapped because hat layer panics
4162 4184 * when translation is loaded for the range already
4163 4185 * mapped with a different page size. We achieve it
4164 4186 * by always using largest page size possible subject
4165 4187 * to the constraints of page size, segment page size
4166 4188 * and page alignment. Since mappings are invalidated
4167 4189 * when those constraints change and make it
4168 4190 * impossible to use previously used mapping size no
4169 4191 * mapping size conflicts should happen.
4170 4192 */
4171 4193
4172 4194 chkszc:
4173 4195 if ((pszc = ppa[0]->p_szc) == szc &&
4174 4196 IS_P2ALIGNED(pfn, pages)) {
4175 4197
4176 4198 SEGVN_VMSTAT_FLTVNPAGES(30);
4177 4199 #ifdef DEBUG
4178 4200 for (i = 0; i < pages; i++) {
4179 4201 ASSERT(PAGE_LOCKED(ppa[i]));
4180 4202 ASSERT(!PP_ISFREE(ppa[i]));
4181 4203 ASSERT(page_pptonum(ppa[i]) ==
4182 4204 pfn + i);
4183 4205 ASSERT(ppa[i]->p_szc == szc);
4184 4206 ASSERT(ppa[i]->p_vnode == vp);
4185 4207 ASSERT(ppa[i]->p_offset ==
4186 4208 off + (i << PAGESHIFT));
4187 4209 }
4188 4210 #endif /* DEBUG */
4189 4211 /*
4190 4212 * All pages are of szc we need and they are
4191 4213 * all locked so they can't change szc. load
4192 4214 * translations.
4193 4215 *
4194 4216 * if page got promoted since last check
4195 4217 * we don't need pplist.
4196 4218 */
4197 4219 if (pplist != NULL) {
4198 4220 page_free_replacement_page(pplist);
4199 4221 page_create_putback(pages);
4200 4222 }
4201 4223 if (PP_ISMIGRATE(ppa[0])) {
4202 4224 page_migrate(seg, a, ppa, pages);
4203 4225 }
4204 4226 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4205 4227 prot, vpprot);
4206 4228 if (!xhat) {
4207 4229 hat_memload_array_region(hat, a, pgsz,
4208 4230 ppa, prot & vpprot, hat_flag,
4209 4231 svd->rcookie);
4210 4232 } else {
4211 4233 /*
4212 4234 * avoid large xhat mappings to FS
4213 4235 * pages so that hat_page_demote()
4214 4236 * doesn't need to check for xhat
4215 4237 * large mappings.
4216 4238 * Don't use regions with xhats.
4217 4239 */
4218 4240 for (i = 0; i < pages; i++) {
4219 4241 hat_memload(hat,
4220 4242 a + (i << PAGESHIFT),
4221 4243 ppa[i], prot & vpprot,
4222 4244 hat_flag);
4223 4245 }
4224 4246 }
4225 4247
4226 4248 if (!(hat_flag & HAT_LOAD_LOCK)) {
4227 4249 for (i = 0; i < pages; i++) {
4228 4250 page_unlock(ppa[i]);
4229 4251 }
4230 4252 }
4231 4253 if (amp != NULL) {
4232 4254 anon_array_exit(&an_cookie);
4233 4255 ANON_LOCK_EXIT(&->a_rwlock);
4234 4256 }
4235 4257 goto next;
4236 4258 }
4237 4259
4238 4260 /*
4239 4261 * See if upsize is possible.
4240 4262 */
4241 4263 if (pszc > szc && szc < seg->s_szc &&
4242 4264 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4243 4265 pgcnt_t aphase;
4244 4266 uint_t pszc1 = MIN(pszc, seg->s_szc);
4245 4267 ppgsz = page_get_pagesize(pszc1);
4246 4268 ppages = btop(ppgsz);
4247 4269 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4248 4270
4249 4271 ASSERT(type != F_SOFTLOCK);
4250 4272
4251 4273 SEGVN_VMSTAT_FLTVNPAGES(31);
4252 4274 if (aphase != P2PHASE(pfn, ppages)) {
4253 4275 segvn_faultvnmpss_align_err4++;
4254 4276 } else {
4255 4277 SEGVN_VMSTAT_FLTVNPAGES(32);
4256 4278 if (pplist != NULL) {
4257 4279 page_t *pl = pplist;
4258 4280 page_free_replacement_page(pl);
4259 4281 page_create_putback(pages);
4260 4282 }
4261 4283 for (i = 0; i < pages; i++) {
4262 4284 page_unlock(ppa[i]);
4263 4285 }
4264 4286 if (amp != NULL) {
4265 4287 anon_array_exit(&an_cookie);
4266 4288 ANON_LOCK_EXIT(&->a_rwlock);
4267 4289 }
4268 4290 pszc = pszc1;
4269 4291 ierr = -2;
4270 4292 break;
4271 4293 }
4272 4294 }
4273 4295
4274 4296 /*
4275 4297 * check if we should use smallest mapping size.
4276 4298 */
4277 4299 upgrdfail = 0;
4278 4300 if (szc == 0 || xhat ||
4279 4301 (pszc >= szc &&
4280 4302 !IS_P2ALIGNED(pfn, pages)) ||
4281 4303 (pszc < szc &&
4282 4304 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4283 4305 &pszc))) {
4284 4306
4285 4307 if (upgrdfail && type != F_SOFTLOCK) {
4286 4308 /*
4287 4309 * segvn_full_szcpages failed to lock
4288 4310 * all pages EXCL. Size down.
4289 4311 */
4290 4312 ASSERT(pszc < szc);
4291 4313
4292 4314 SEGVN_VMSTAT_FLTVNPAGES(33);
4293 4315
4294 4316 if (pplist != NULL) {
4295 4317 page_t *pl = pplist;
4296 4318 page_free_replacement_page(pl);
4297 4319 page_create_putback(pages);
4298 4320 }
4299 4321
4300 4322 for (i = 0; i < pages; i++) {
4301 4323 page_unlock(ppa[i]);
4302 4324 }
4303 4325 if (amp != NULL) {
4304 4326 anon_array_exit(&an_cookie);
4305 4327 ANON_LOCK_EXIT(&->a_rwlock);
4306 4328 }
4307 4329 ierr = -1;
4308 4330 break;
4309 4331 }
4310 4332 if (szc != 0 && !xhat && !upgrdfail) {
4311 4333 segvn_faultvnmpss_align_err5++;
4312 4334 }
4313 4335 SEGVN_VMSTAT_FLTVNPAGES(34);
4314 4336 if (pplist != NULL) {
4315 4337 page_free_replacement_page(pplist);
4316 4338 page_create_putback(pages);
4317 4339 }
4318 4340 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4319 4341 prot, vpprot);
4320 4342 if (upgrdfail && segvn_anypgsz_vnode) {
4321 4343 /* SOFTLOCK case */
4322 4344 hat_memload_array_region(hat, a, pgsz,
4323 4345 ppa, prot & vpprot, hat_flag,
4324 4346 svd->rcookie);
4325 4347 } else {
4326 4348 for (i = 0; i < pages; i++) {
4327 4349 hat_memload_region(hat,
4328 4350 a + (i << PAGESHIFT),
4329 4351 ppa[i], prot & vpprot,
4330 4352 hat_flag, svd->rcookie);
4331 4353 }
4332 4354 }
4333 4355 if (!(hat_flag & HAT_LOAD_LOCK)) {
4334 4356 for (i = 0; i < pages; i++) {
4335 4357 page_unlock(ppa[i]);
4336 4358 }
4337 4359 }
4338 4360 if (amp != NULL) {
4339 4361 anon_array_exit(&an_cookie);
4340 4362 ANON_LOCK_EXIT(&->a_rwlock);
4341 4363 }
4342 4364 goto next;
4343 4365 }
4344 4366
4345 4367 if (pszc == szc) {
4346 4368 /*
4347 4369 * segvn_full_szcpages() upgraded pages szc.
4348 4370 */
4349 4371 ASSERT(pszc == ppa[0]->p_szc);
4350 4372 ASSERT(IS_P2ALIGNED(pfn, pages));
4351 4373 goto chkszc;
4352 4374 }
4353 4375
4354 4376 if (pszc > szc) {
4355 4377 kmutex_t *szcmtx;
4356 4378 SEGVN_VMSTAT_FLTVNPAGES(35);
4357 4379 /*
4358 4380 * p_szc of ppa[0] can change since we haven't
4359 4381 * locked all constituent pages. Call
4360 4382 * page_lock_szc() to prevent szc changes.
4361 4383 * This should be a rare case that happens when
4362 4384 * multiple segments use a different page size
4363 4385 * to map the same file offsets.
4364 4386 */
4365 4387 szcmtx = page_szc_lock(ppa[0]);
4366 4388 pszc = ppa[0]->p_szc;
4367 4389 ASSERT(szcmtx != NULL || pszc == 0);
4368 4390 ASSERT(ppa[0]->p_szc <= pszc);
4369 4391 if (pszc <= szc) {
4370 4392 SEGVN_VMSTAT_FLTVNPAGES(36);
4371 4393 if (szcmtx != NULL) {
4372 4394 mutex_exit(szcmtx);
4373 4395 }
4374 4396 goto chkszc;
4375 4397 }
4376 4398 if (pplist != NULL) {
4377 4399 /*
4378 4400 * page got promoted since last check.
4379 4401 * we don't need preaalocated large
4380 4402 * page.
4381 4403 */
4382 4404 SEGVN_VMSTAT_FLTVNPAGES(37);
4383 4405 page_free_replacement_page(pplist);
4384 4406 page_create_putback(pages);
4385 4407 }
4386 4408 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4387 4409 prot, vpprot);
4388 4410 hat_memload_array_region(hat, a, pgsz, ppa,
4389 4411 prot & vpprot, hat_flag, svd->rcookie);
4390 4412 mutex_exit(szcmtx);
4391 4413 if (!(hat_flag & HAT_LOAD_LOCK)) {
4392 4414 for (i = 0; i < pages; i++) {
4393 4415 page_unlock(ppa[i]);
4394 4416 }
4395 4417 }
4396 4418 if (amp != NULL) {
4397 4419 anon_array_exit(&an_cookie);
4398 4420 ANON_LOCK_EXIT(&->a_rwlock);
4399 4421 }
4400 4422 goto next;
4401 4423 }
4402 4424
4403 4425 /*
4404 4426 * if page got demoted since last check
4405 4427 * we could have not allocated larger page.
4406 4428 * allocate now.
4407 4429 */
4408 4430 if (pplist == NULL &&
4409 4431 page_alloc_pages(vp, seg, a, &pplist, NULL,
4410 4432 szc, 0, 0) && type != F_SOFTLOCK) {
4411 4433 SEGVN_VMSTAT_FLTVNPAGES(38);
4412 4434 for (i = 0; i < pages; i++) {
4413 4435 page_unlock(ppa[i]);
4414 4436 }
4415 4437 if (amp != NULL) {
4416 4438 anon_array_exit(&an_cookie);
4417 4439 ANON_LOCK_EXIT(&->a_rwlock);
4418 4440 }
4419 4441 ierr = -1;
4420 4442 alloc_failed |= (1 << szc);
4421 4443 break;
4422 4444 }
4423 4445
4424 4446 SEGVN_VMSTAT_FLTVNPAGES(39);
4425 4447
4426 4448 if (pplist != NULL) {
4427 4449 segvn_relocate_pages(ppa, pplist);
4428 4450 #ifdef DEBUG
4429 4451 } else {
4430 4452 ASSERT(type == F_SOFTLOCK);
4431 4453 SEGVN_VMSTAT_FLTVNPAGES(40);
4432 4454 #endif /* DEBUG */
4433 4455 }
4434 4456
4435 4457 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4436 4458
4437 4459 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4438 4460 ASSERT(type == F_SOFTLOCK);
4439 4461 for (i = 0; i < pages; i++) {
4440 4462 ASSERT(ppa[i]->p_szc < szc);
4441 4463 hat_memload_region(hat,
4442 4464 a + (i << PAGESHIFT),
4443 4465 ppa[i], prot & vpprot, hat_flag,
4444 4466 svd->rcookie);
4445 4467 }
4446 4468 } else {
4447 4469 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4448 4470 hat_memload_array_region(hat, a, pgsz, ppa,
4449 4471 prot & vpprot, hat_flag, svd->rcookie);
4450 4472 }
4451 4473 if (!(hat_flag & HAT_LOAD_LOCK)) {
4452 4474 for (i = 0; i < pages; i++) {
4453 4475 ASSERT(PAGE_SHARED(ppa[i]));
4454 4476 page_unlock(ppa[i]);
4455 4477 }
4456 4478 }
4457 4479 if (amp != NULL) {
4458 4480 anon_array_exit(&an_cookie);
4459 4481 ANON_LOCK_EXIT(&->a_rwlock);
4460 4482 }
4461 4483
4462 4484 next:
4463 4485 if (vpage != NULL) {
4464 4486 vpage += pages;
4465 4487 }
4466 4488 adjszc_chk = 1;
4467 4489 }
4468 4490 if (a == lpgeaddr)
4469 4491 break;
4470 4492 ASSERT(a < lpgeaddr);
4471 4493
4472 4494 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4473 4495
4474 4496 /*
4475 4497 * ierr == -1 means we failed to map with a large page.
4476 4498 * (either due to allocation/relocation failures or
4477 4499 * misalignment with other mappings to this file.
4478 4500 *
4479 4501 * ierr == -2 means some other thread allocated a large page
4480 4502 * after we gave up tp map with a large page. retry with
4481 4503 * larger mapping.
4482 4504 */
4483 4505 ASSERT(ierr == -1 || ierr == -2);
4484 4506 ASSERT(ierr == -2 || szc != 0);
4485 4507 ASSERT(ierr == -1 || szc < seg->s_szc);
4486 4508 if (ierr == -2) {
4487 4509 SEGVN_VMSTAT_FLTVNPAGES(41);
4488 4510 ASSERT(pszc > szc && pszc <= seg->s_szc);
4489 4511 szc = pszc;
4490 4512 } else if (segvn_anypgsz_vnode) {
4491 4513 SEGVN_VMSTAT_FLTVNPAGES(42);
4492 4514 szc--;
4493 4515 } else {
4494 4516 SEGVN_VMSTAT_FLTVNPAGES(43);
4495 4517 ASSERT(pszc < szc);
4496 4518 /*
4497 4519 * other process created pszc large page.
4498 4520 * but we still have to drop to 0 szc.
4499 4521 */
4500 4522 szc = 0;
4501 4523 }
4502 4524
4503 4525 pgsz = page_get_pagesize(szc);
4504 4526 pages = btop(pgsz);
4505 4527 if (ierr == -2) {
4506 4528 /*
4507 4529 * Size up case. Note lpgaddr may only be needed for
4508 4530 * softlock case so we don't adjust it here.
4509 4531 */
4510 4532 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4511 4533 ASSERT(a >= lpgaddr);
4512 4534 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4513 4535 off = svd->offset + (uintptr_t)(a - seg->s_base);
4514 4536 aindx = svd->anon_index + seg_page(seg, a);
4515 4537 vpage = (svd->vpage != NULL) ?
4516 4538 &svd->vpage[seg_page(seg, a)] : NULL;
4517 4539 } else {
4518 4540 /*
4519 4541 * Size down case. Note lpgaddr may only be needed for
4520 4542 * softlock case so we don't adjust it here.
4521 4543 */
4522 4544 ASSERT(IS_P2ALIGNED(a, pgsz));
4523 4545 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4524 4546 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4525 4547 ASSERT(a < lpgeaddr);
4526 4548 if (a < addr) {
4527 4549 SEGVN_VMSTAT_FLTVNPAGES(44);
4528 4550 /*
4529 4551 * The beginning of the large page region can
4530 4552 * be pulled to the right to make a smaller
4531 4553 * region. We haven't yet faulted a single
4532 4554 * page.
4533 4555 */
4534 4556 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4535 4557 ASSERT(a >= lpgaddr);
4536 4558 off = svd->offset +
4537 4559 (uintptr_t)(a - seg->s_base);
4538 4560 aindx = svd->anon_index + seg_page(seg, a);
4539 4561 vpage = (svd->vpage != NULL) ?
4540 4562 &svd->vpage[seg_page(seg, a)] : NULL;
4541 4563 }
4542 4564 }
4543 4565 }
4544 4566 out:
4545 4567 kmem_free(ppa, ppasize);
4546 4568 if (!err && !vop_size_err) {
4547 4569 SEGVN_VMSTAT_FLTVNPAGES(45);
4548 4570 return (0);
4549 4571 }
4550 4572 if (type == F_SOFTLOCK && a > lpgaddr) {
4551 4573 SEGVN_VMSTAT_FLTVNPAGES(46);
4552 4574 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4553 4575 }
4554 4576 if (!vop_size_err) {
4555 4577 SEGVN_VMSTAT_FLTVNPAGES(47);
4556 4578 return (err);
4557 4579 }
4558 4580 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4559 4581 /*
4560 4582 * Large page end is mapped beyond the end of file and it's a cow
4561 4583 * fault (can be a text replication induced cow) or softlock so we can't
4562 4584 * reduce the map area. For now just demote the segment. This should
4563 4585 * really only happen if the end of the file changed after the mapping
4564 4586 * was established since when large page segments are created we make
4565 4587 * sure they don't extend beyond the end of the file.
4566 4588 */
4567 4589 SEGVN_VMSTAT_FLTVNPAGES(48);
4568 4590
4569 4591 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4570 4592 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4571 4593 err = 0;
4572 4594 if (seg->s_szc != 0) {
4573 4595 segvn_fltvnpages_clrszc_cnt++;
4574 4596 ASSERT(svd->softlockcnt == 0);
4575 4597 err = segvn_clrszc(seg);
4576 4598 if (err != 0) {
4577 4599 segvn_fltvnpages_clrszc_err++;
4578 4600 }
4579 4601 }
4580 4602 ASSERT(err || seg->s_szc == 0);
4581 4603 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4582 4604 /* segvn_fault will do its job as if szc had been zero to begin with */
4583 4605 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4584 4606 }
4585 4607
4586 4608 /*
4587 4609 * This routine will attempt to fault in one large page.
4588 4610 * it will use smaller pages if that fails.
4589 4611 * It should only be called for pure anonymous segments.
4590 4612 */
4591 4613 static faultcode_t
4592 4614 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4593 4615 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4594 4616 caddr_t eaddr, int brkcow)
4595 4617 {
4596 4618 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4597 4619 struct anon_map *amp = svd->amp;
4598 4620 uchar_t segtype = svd->type;
4599 4621 uint_t szc = seg->s_szc;
4600 4622 size_t pgsz = page_get_pagesize(szc);
4601 4623 size_t maxpgsz = pgsz;
4602 4624 pgcnt_t pages = btop(pgsz);
4603 4625 uint_t ppaszc = szc;
4604 4626 caddr_t a = lpgaddr;
4605 4627 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4606 4628 struct vpage *vpage = (svd->vpage != NULL) ?
4607 4629 &svd->vpage[seg_page(seg, a)] : NULL;
4608 4630 page_t **ppa;
4609 4631 uint_t ppa_szc;
4610 4632 faultcode_t err;
4611 4633 int ierr;
4612 4634 uint_t protchk, prot, vpprot;
4613 4635 ulong_t i;
4614 4636 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4615 4637 anon_sync_obj_t cookie;
4616 4638 int adjszc_chk;
4617 4639 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4618 4640
4619 4641 ASSERT(szc != 0);
4620 4642 ASSERT(amp != NULL);
4621 4643 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4622 4644 ASSERT(!(svd->flags & MAP_NORESERVE));
4623 4645 ASSERT(type != F_SOFTUNLOCK);
4624 4646 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4625 4647 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4626 4648 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4627 4649
4628 4650 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4629 4651
4630 4652 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4631 4653 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4632 4654
4633 4655 if (svd->flags & MAP_TEXT) {
4634 4656 hat_flag |= HAT_LOAD_TEXT;
4635 4657 }
4636 4658
4637 4659 if (svd->pageprot) {
4638 4660 switch (rw) {
4639 4661 case S_READ:
4640 4662 protchk = PROT_READ;
4641 4663 break;
4642 4664 case S_WRITE:
4643 4665 protchk = PROT_WRITE;
4644 4666 break;
4645 4667 case S_EXEC:
4646 4668 protchk = PROT_EXEC;
4647 4669 break;
4648 4670 case S_OTHER:
4649 4671 default:
4650 4672 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4651 4673 break;
4652 4674 }
4653 4675 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4654 4676 } else {
4655 4677 prot = svd->prot;
4656 4678 /* caller has already done segment level protection check. */
4657 4679 }
4658 4680
4659 4681 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4660 4682 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4661 4683 for (;;) {
4662 4684 adjszc_chk = 0;
4663 4685 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4664 4686 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4665 4687 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4666 4688 ASSERT(vpage != NULL);
4667 4689 prot = VPP_PROT(vpage);
4668 4690 ASSERT(sameprot(seg, a, maxpgsz));
4669 4691 if ((prot & protchk) == 0) {
4670 4692 err = FC_PROT;
4671 4693 goto error;
4672 4694 }
4673 4695 }
4674 4696 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4675 4697 pgsz < maxpgsz) {
4676 4698 ASSERT(a > lpgaddr);
4677 4699 szc = seg->s_szc;
4678 4700 pgsz = maxpgsz;
4679 4701 pages = btop(pgsz);
4680 4702 ASSERT(IS_P2ALIGNED(aindx, pages));
4681 4703 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4682 4704 pgsz);
4683 4705 }
4684 4706 if (type == F_SOFTLOCK) {
4685 4707 atomic_add_long((ulong_t *)&svd->softlockcnt,
4686 4708 pages);
4687 4709 }
4688 4710 anon_array_enter(amp, aindx, &cookie);
4689 4711 ppa_szc = (uint_t)-1;
4690 4712 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4691 4713 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4692 4714 segvn_anypgsz, pgflags, svd->cred);
4693 4715 if (ierr != 0) {
4694 4716 anon_array_exit(&cookie);
4695 4717 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4696 4718 if (type == F_SOFTLOCK) {
4697 4719 atomic_add_long(
4698 4720 (ulong_t *)&svd->softlockcnt,
4699 4721 -pages);
4700 4722 }
4701 4723 if (ierr > 0) {
4702 4724 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4703 4725 err = FC_MAKE_ERR(ierr);
4704 4726 goto error;
4705 4727 }
4706 4728 break;
4707 4729 }
4708 4730
4709 4731 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4710 4732
4711 4733 ASSERT(segtype == MAP_SHARED ||
4712 4734 ppa[0]->p_szc <= szc);
4713 4735 ASSERT(segtype == MAP_PRIVATE ||
4714 4736 ppa[0]->p_szc >= szc);
4715 4737
4716 4738 /*
4717 4739 * Handle pages that have been marked for migration
4718 4740 */
4719 4741 if (lgrp_optimizations())
4720 4742 page_migrate(seg, a, ppa, pages);
4721 4743
4722 4744 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4723 4745
4724 4746 if (segtype == MAP_SHARED) {
4725 4747 vpprot |= PROT_WRITE;
4726 4748 }
4727 4749
4728 4750 hat_memload_array(hat, a, pgsz, ppa,
4729 4751 prot & vpprot, hat_flag);
4730 4752
4731 4753 if (hat_flag & HAT_LOAD_LOCK) {
4732 4754 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4733 4755 } else {
4734 4756 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4735 4757 for (i = 0; i < pages; i++)
4736 4758 page_unlock(ppa[i]);
4737 4759 }
4738 4760 if (vpage != NULL)
4739 4761 vpage += pages;
4740 4762
4741 4763 anon_array_exit(&cookie);
4742 4764 adjszc_chk = 1;
4743 4765 }
4744 4766 if (a == lpgeaddr)
4745 4767 break;
4746 4768 ASSERT(a < lpgeaddr);
4747 4769 /*
4748 4770 * ierr == -1 means we failed to allocate a large page.
4749 4771 * so do a size down operation.
4750 4772 *
4751 4773 * ierr == -2 means some other process that privately shares
4752 4774 * pages with this process has allocated a larger page and we
4753 4775 * need to retry with larger pages. So do a size up
4754 4776 * operation. This relies on the fact that large pages are
4755 4777 * never partially shared i.e. if we share any constituent
4756 4778 * page of a large page with another process we must share the
4757 4779 * entire large page. Note this cannot happen for SOFTLOCK
4758 4780 * case, unless current address (a) is at the beginning of the
4759 4781 * next page size boundary because the other process couldn't
4760 4782 * have relocated locked pages.
4761 4783 */
4762 4784 ASSERT(ierr == -1 || ierr == -2);
4763 4785
4764 4786 if (segvn_anypgsz) {
4765 4787 ASSERT(ierr == -2 || szc != 0);
4766 4788 ASSERT(ierr == -1 || szc < seg->s_szc);
4767 4789 szc = (ierr == -1) ? szc - 1 : szc + 1;
4768 4790 } else {
4769 4791 /*
4770 4792 * For non COW faults and segvn_anypgsz == 0
4771 4793 * we need to be careful not to loop forever
4772 4794 * if existing page is found with szc other
4773 4795 * than 0 or seg->s_szc. This could be due
4774 4796 * to page relocations on behalf of DR or
4775 4797 * more likely large page creation. For this
4776 4798 * case simply re-size to existing page's szc
4777 4799 * if returned by anon_map_getpages().
4778 4800 */
4779 4801 if (ppa_szc == (uint_t)-1) {
4780 4802 szc = (ierr == -1) ? 0 : seg->s_szc;
4781 4803 } else {
4782 4804 ASSERT(ppa_szc <= seg->s_szc);
4783 4805 ASSERT(ierr == -2 || ppa_szc < szc);
4784 4806 ASSERT(ierr == -1 || ppa_szc > szc);
4785 4807 szc = ppa_szc;
4786 4808 }
4787 4809 }
4788 4810
4789 4811 pgsz = page_get_pagesize(szc);
4790 4812 pages = btop(pgsz);
4791 4813 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4792 4814 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4793 4815 if (type == F_SOFTLOCK) {
4794 4816 /*
4795 4817 * For softlocks we cannot reduce the fault area
4796 4818 * (calculated based on the largest page size for this
4797 4819 * segment) for size down and a is already next
4798 4820 * page size aligned as assertted above for size
4799 4821 * ups. Therefore just continue in case of softlock.
4800 4822 */
4801 4823 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4802 4824 continue; /* keep lint happy */
4803 4825 } else if (ierr == -2) {
4804 4826
4805 4827 /*
4806 4828 * Size up case. Note lpgaddr may only be needed for
4807 4829 * softlock case so we don't adjust it here.
4808 4830 */
4809 4831 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4810 4832 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4811 4833 ASSERT(a >= lpgaddr);
4812 4834 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4813 4835 aindx = svd->anon_index + seg_page(seg, a);
4814 4836 vpage = (svd->vpage != NULL) ?
4815 4837 &svd->vpage[seg_page(seg, a)] : NULL;
4816 4838 } else {
4817 4839 /*
4818 4840 * Size down case. Note lpgaddr may only be needed for
4819 4841 * softlock case so we don't adjust it here.
4820 4842 */
4821 4843 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4822 4844 ASSERT(IS_P2ALIGNED(a, pgsz));
4823 4845 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4824 4846 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4825 4847 ASSERT(a < lpgeaddr);
4826 4848 if (a < addr) {
4827 4849 /*
4828 4850 * The beginning of the large page region can
4829 4851 * be pulled to the right to make a smaller
4830 4852 * region. We haven't yet faulted a single
4831 4853 * page.
4832 4854 */
4833 4855 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4834 4856 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4835 4857 ASSERT(a >= lpgaddr);
4836 4858 aindx = svd->anon_index + seg_page(seg, a);
4837 4859 vpage = (svd->vpage != NULL) ?
4838 4860 &svd->vpage[seg_page(seg, a)] : NULL;
4839 4861 }
4840 4862 }
4841 4863 }
4842 4864 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4843 4865 ANON_LOCK_EXIT(&->a_rwlock);
4844 4866 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4845 4867 return (0);
4846 4868 error:
4847 4869 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4848 4870 ANON_LOCK_EXIT(&->a_rwlock);
4849 4871 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4850 4872 if (type == F_SOFTLOCK && a > lpgaddr) {
4851 4873 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4852 4874 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4853 4875 }
4854 4876 return (err);
4855 4877 }
4856 4878
4857 4879 int fltadvice = 1; /* set to free behind pages for sequential access */
4858 4880
4859 4881 /*
4860 4882 * This routine is called via a machine specific fault handling routine.
4861 4883 * It is also called by software routines wishing to lock or unlock
4862 4884 * a range of addresses.
4863 4885 *
4864 4886 * Here is the basic algorithm:
4865 4887 * If unlocking
4866 4888 * Call segvn_softunlock
4867 4889 * Return
4868 4890 * endif
4869 4891 * Checking and set up work
4870 4892 * If we will need some non-anonymous pages
4871 4893 * Call VOP_GETPAGE over the range of non-anonymous pages
4872 4894 * endif
4873 4895 * Loop over all addresses requested
4874 4896 * Call segvn_faultpage passing in page list
4875 4897 * to load up translations and handle anonymous pages
4876 4898 * endloop
4877 4899 * Load up translation to any additional pages in page list not
4878 4900 * already handled that fit into this segment
4879 4901 */
4880 4902 static faultcode_t
4881 4903 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4882 4904 enum fault_type type, enum seg_rw rw)
4883 4905 {
4884 4906 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4885 4907 page_t **plp, **ppp, *pp;
4886 4908 u_offset_t off;
4887 4909 caddr_t a;
4888 4910 struct vpage *vpage;
4889 4911 uint_t vpprot, prot;
4890 4912 int err;
4891 4913 page_t *pl[PVN_GETPAGE_NUM + 1];
4892 4914 size_t plsz, pl_alloc_sz;
4893 4915 size_t page;
4894 4916 ulong_t anon_index;
4895 4917 struct anon_map *amp;
4896 4918 int dogetpage = 0;
4897 4919 caddr_t lpgaddr, lpgeaddr;
4898 4920 size_t pgsz;
4899 4921 anon_sync_obj_t cookie;
4900 4922 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4901 4923
4902 4924 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4903 4925 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4904 4926
4905 4927 /*
4906 4928 * First handle the easy stuff
4907 4929 */
4908 4930 if (type == F_SOFTUNLOCK) {
4909 4931 if (rw == S_READ_NOCOW) {
4910 4932 rw = S_READ;
4911 4933 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4912 4934 }
4913 4935 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4914 4936 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4915 4937 page_get_pagesize(seg->s_szc);
4916 4938 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4917 4939 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4918 4940 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4919 4941 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4920 4942 return (0);
4921 4943 }
4922 4944
4923 4945 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4924 4946 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4925 4947 if (brkcow == 0) {
4926 4948 if (svd->tr_state == SEGVN_TR_INIT) {
4927 4949 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4928 4950 if (svd->tr_state == SEGVN_TR_INIT) {
4929 4951 ASSERT(svd->vp != NULL && svd->amp == NULL);
4930 4952 ASSERT(svd->flags & MAP_TEXT);
4931 4953 ASSERT(svd->type == MAP_PRIVATE);
4932 4954 segvn_textrepl(seg);
4933 4955 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4934 4956 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4935 4957 svd->amp != NULL);
4936 4958 }
4937 4959 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4938 4960 }
4939 4961 } else if (svd->tr_state != SEGVN_TR_OFF) {
4940 4962 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4941 4963
4942 4964 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4943 4965 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4944 4966 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4945 4967 return (FC_PROT);
4946 4968 }
4947 4969
4948 4970 if (svd->tr_state == SEGVN_TR_ON) {
4949 4971 ASSERT(svd->vp != NULL && svd->amp != NULL);
4950 4972 segvn_textunrepl(seg, 0);
4951 4973 ASSERT(svd->amp == NULL &&
4952 4974 svd->tr_state == SEGVN_TR_OFF);
4953 4975 } else if (svd->tr_state != SEGVN_TR_OFF) {
4954 4976 svd->tr_state = SEGVN_TR_OFF;
4955 4977 }
4956 4978 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4957 4979 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4958 4980 }
4959 4981
4960 4982 top:
4961 4983 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4962 4984
4963 4985 /*
4964 4986 * If we have the same protections for the entire segment,
4965 4987 * insure that the access being attempted is legitimate.
4966 4988 */
4967 4989
4968 4990 if (svd->pageprot == 0) {
4969 4991 uint_t protchk;
4970 4992
4971 4993 switch (rw) {
4972 4994 case S_READ:
4973 4995 case S_READ_NOCOW:
4974 4996 protchk = PROT_READ;
4975 4997 break;
4976 4998 case S_WRITE:
4977 4999 protchk = PROT_WRITE;
4978 5000 break;
4979 5001 case S_EXEC:
4980 5002 protchk = PROT_EXEC;
4981 5003 break;
4982 5004 case S_OTHER:
4983 5005 default:
4984 5006 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4985 5007 break;
4986 5008 }
4987 5009
4988 5010 if ((svd->prot & protchk) == 0) {
4989 5011 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4990 5012 return (FC_PROT); /* illegal access type */
4991 5013 }
4992 5014 }
4993 5015
4994 5016 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
4995 5017 /* this must be SOFTLOCK S_READ fault */
4996 5018 ASSERT(svd->amp == NULL);
4997 5019 ASSERT(svd->tr_state == SEGVN_TR_OFF);
4998 5020 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4999 5021 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5000 5022 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5001 5023 /*
5002 5024 * this must be the first ever non S_READ_NOCOW
5003 5025 * softlock for this segment.
5004 5026 */
5005 5027 ASSERT(svd->softlockcnt == 0);
5006 5028 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5007 5029 HAT_REGION_TEXT);
5008 5030 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5009 5031 }
5010 5032 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5011 5033 goto top;
5012 5034 }
5013 5035
5014 5036 /*
5015 5037 * We can't allow the long term use of softlocks for vmpss segments,
5016 5038 * because in some file truncation cases we should be able to demote
5017 5039 * the segment, which requires that there are no softlocks. The
5018 5040 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5019 5041 * segment is S_READ_NOCOW, where the caller holds the address space
5020 5042 * locked as writer and calls softunlock before dropping the as lock.
5021 5043 * S_READ_NOCOW is used by /proc to read memory from another user.
5022 5044 *
5023 5045 * Another deadlock between SOFTLOCK and file truncation can happen
5024 5046 * because segvn_fault_vnodepages() calls the FS one pagesize at
5025 5047 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5026 5048 * can cause a deadlock because the first set of page_t's remain
5027 5049 * locked SE_SHARED. To avoid this, we demote segments on a first
5028 5050 * SOFTLOCK if they have a length greater than the segment's
5029 5051 * page size.
5030 5052 *
5031 5053 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5032 5054 * the access type is S_READ_NOCOW and the fault length is less than
5033 5055 * or equal to the segment's page size. While this is quite restrictive,
5034 5056 * it should be the most common case of SOFTLOCK against a vmpss
5035 5057 * segment.
5036 5058 *
5037 5059 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5038 5060 * caller makes sure no COW will be caused by another thread for a
5039 5061 * softlocked page.
5040 5062 */
5041 5063 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5042 5064 int demote = 0;
5043 5065
5044 5066 if (rw != S_READ_NOCOW) {
5045 5067 demote = 1;
5046 5068 }
5047 5069 if (!demote && len > PAGESIZE) {
5048 5070 pgsz = page_get_pagesize(seg->s_szc);
5049 5071 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5050 5072 lpgeaddr);
5051 5073 if (lpgeaddr - lpgaddr > pgsz) {
5052 5074 demote = 1;
5053 5075 }
5054 5076 }
5055 5077
5056 5078 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5057 5079
5058 5080 if (demote) {
5059 5081 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5060 5082 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5061 5083 if (seg->s_szc != 0) {
5062 5084 segvn_vmpss_clrszc_cnt++;
5063 5085 ASSERT(svd->softlockcnt == 0);
5064 5086 err = segvn_clrszc(seg);
5065 5087 if (err) {
5066 5088 segvn_vmpss_clrszc_err++;
5067 5089 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5068 5090 return (FC_MAKE_ERR(err));
5069 5091 }
5070 5092 }
5071 5093 ASSERT(seg->s_szc == 0);
5072 5094 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5073 5095 goto top;
5074 5096 }
5075 5097 }
5076 5098
5077 5099 /*
5078 5100 * Check to see if we need to allocate an anon_map structure.
5079 5101 */
5080 5102 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5081 5103 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5082 5104 /*
5083 5105 * Drop the "read" lock on the segment and acquire
5084 5106 * the "write" version since we have to allocate the
5085 5107 * anon_map.
5086 5108 */
5087 5109 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5088 5110 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5089 5111
5090 5112 if (svd->amp == NULL) {
5091 5113 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5092 5114 svd->amp->a_szc = seg->s_szc;
5093 5115 }
5094 5116 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5095 5117
5096 5118 /*
5097 5119 * Start all over again since segment protections
5098 5120 * may have changed after we dropped the "read" lock.
5099 5121 */
5100 5122 goto top;
5101 5123 }
5102 5124
5103 5125 /*
5104 5126 * S_READ_NOCOW vs S_READ distinction was
5105 5127 * only needed for the code above. After
5106 5128 * that we treat it as S_READ.
5107 5129 */
5108 5130 if (rw == S_READ_NOCOW) {
5109 5131 ASSERT(type == F_SOFTLOCK);
5110 5132 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5111 5133 rw = S_READ;
5112 5134 }
5113 5135
5114 5136 amp = svd->amp;
5115 5137
5116 5138 /*
5117 5139 * MADV_SEQUENTIAL work is ignored for large page segments.
5118 5140 */
5119 5141 if (seg->s_szc != 0) {
5120 5142 pgsz = page_get_pagesize(seg->s_szc);
5121 5143 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5122 5144 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5123 5145 if (svd->vp == NULL) {
5124 5146 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5125 5147 lpgeaddr, type, rw, addr, addr + len, brkcow);
5126 5148 } else {
5127 5149 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5128 5150 lpgeaddr, type, rw, addr, addr + len, brkcow);
5129 5151 if (err == IE_RETRY) {
5130 5152 ASSERT(seg->s_szc == 0);
5131 5153 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5132 5154 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5133 5155 goto top;
5134 5156 }
5135 5157 }
5136 5158 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5137 5159 return (err);
5138 5160 }
5139 5161
5140 5162 page = seg_page(seg, addr);
5141 5163 if (amp != NULL) {
5142 5164 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5143 5165 anon_index = svd->anon_index + page;
5144 5166
5145 5167 if (type == F_PROT && rw == S_READ &&
5146 5168 svd->tr_state == SEGVN_TR_OFF &&
5147 5169 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5148 5170 size_t index = anon_index;
5149 5171 struct anon *ap;
5150 5172
5151 5173 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5152 5174 /*
5153 5175 * The fast path could apply to S_WRITE also, except
5154 5176 * that the protection fault could be caused by lazy
5155 5177 * tlb flush when ro->rw. In this case, the pte is
5156 5178 * RW already. But RO in the other cpu's tlb causes
5157 5179 * the fault. Since hat_chgprot won't do anything if
5158 5180 * pte doesn't change, we may end up faulting
5159 5181 * indefinitely until the RO tlb entry gets replaced.
5160 5182 */
5161 5183 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5162 5184 anon_array_enter(amp, index, &cookie);
5163 5185 ap = anon_get_ptr(amp->ahp, index);
5164 5186 anon_array_exit(&cookie);
5165 5187 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5166 5188 ANON_LOCK_EXIT(&->a_rwlock);
5167 5189 goto slow;
5168 5190 }
5169 5191 }
5170 5192 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5171 5193 ANON_LOCK_EXIT(&->a_rwlock);
5172 5194 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5173 5195 return (0);
5174 5196 }
5175 5197 }
5176 5198 slow:
5177 5199
5178 5200 if (svd->vpage == NULL)
5179 5201 vpage = NULL;
5180 5202 else
5181 5203 vpage = &svd->vpage[page];
5182 5204
5183 5205 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5184 5206
5185 5207 /*
5186 5208 * If MADV_SEQUENTIAL has been set for the particular page we
5187 5209 * are faulting on, free behind all pages in the segment and put
5188 5210 * them on the free list.
5189 5211 */
5190 5212
5191 5213 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5192 5214 struct vpage *vpp;
5193 5215 ulong_t fanon_index;
5194 5216 size_t fpage;
5195 5217 u_offset_t pgoff, fpgoff;
5196 5218 struct vnode *fvp;
5197 5219 struct anon *fap = NULL;
5198 5220
5199 5221 if (svd->advice == MADV_SEQUENTIAL ||
5200 5222 (svd->pageadvice &&
5201 5223 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5202 5224 pgoff = off - PAGESIZE;
5203 5225 fpage = page - 1;
5204 5226 if (vpage != NULL)
5205 5227 vpp = &svd->vpage[fpage];
5206 5228 if (amp != NULL)
5207 5229 fanon_index = svd->anon_index + fpage;
5208 5230
5209 5231 while (pgoff > svd->offset) {
5210 5232 if (svd->advice != MADV_SEQUENTIAL &&
5211 5233 (!svd->pageadvice || (vpage &&
5212 5234 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5213 5235 break;
5214 5236
5215 5237 /*
5216 5238 * If this is an anon page, we must find the
5217 5239 * correct <vp, offset> for it
5218 5240 */
5219 5241 fap = NULL;
5220 5242 if (amp != NULL) {
5221 5243 ANON_LOCK_ENTER(&->a_rwlock,
5222 5244 RW_READER);
5223 5245 anon_array_enter(amp, fanon_index,
5224 5246 &cookie);
5225 5247 fap = anon_get_ptr(amp->ahp,
5226 5248 fanon_index);
5227 5249 if (fap != NULL) {
5228 5250 swap_xlate(fap, &fvp, &fpgoff);
5229 5251 } else {
5230 5252 fpgoff = pgoff;
5231 5253 fvp = svd->vp;
5232 5254 }
5233 5255 anon_array_exit(&cookie);
5234 5256 ANON_LOCK_EXIT(&->a_rwlock);
5235 5257 } else {
5236 5258 fpgoff = pgoff;
5237 5259 fvp = svd->vp;
5238 5260 }
5239 5261 if (fvp == NULL)
5240 5262 break; /* XXX */
5241 5263 /*
5242 5264 * Skip pages that are free or have an
5243 5265 * "exclusive" lock.
5244 5266 */
5245 5267 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5246 5268 if (pp == NULL)
5247 5269 break;
5248 5270 /*
5249 5271 * We don't need the page_struct_lock to test
5250 5272 * as this is only advisory; even if we
5251 5273 * acquire it someone might race in and lock
5252 5274 * the page after we unlock and before the
5253 5275 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5254 5276 */
5255 5277 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5256 5278 /*
5257 5279 * Hold the vnode before releasing
5258 5280 * the page lock to prevent it from
5259 5281 * being freed and re-used by some
5260 5282 * other thread.
5261 5283 */
5262 5284 VN_HOLD(fvp);
5263 5285 page_unlock(pp);
5264 5286 /*
5265 5287 * We should build a page list
5266 5288 * to kluster putpages XXX
5267 5289 */
5268 5290 (void) VOP_PUTPAGE(fvp,
5269 5291 (offset_t)fpgoff, PAGESIZE,
5270 5292 (B_DONTNEED|B_FREE|B_ASYNC),
5271 5293 svd->cred, NULL);
5272 5294 VN_RELE(fvp);
5273 5295 } else {
5274 5296 /*
5275 5297 * XXX - Should the loop terminate if
5276 5298 * the page is `locked'?
5277 5299 */
5278 5300 page_unlock(pp);
5279 5301 }
5280 5302 --vpp;
5281 5303 --fanon_index;
5282 5304 pgoff -= PAGESIZE;
5283 5305 }
5284 5306 }
5285 5307 }
5286 5308
5287 5309 plp = pl;
5288 5310 *plp = NULL;
5289 5311 pl_alloc_sz = 0;
5290 5312
5291 5313 /*
5292 5314 * See if we need to call VOP_GETPAGE for
5293 5315 * *any* of the range being faulted on.
5294 5316 * We can skip all of this work if there
5295 5317 * was no original vnode.
5296 5318 */
5297 5319 if (svd->vp != NULL) {
5298 5320 u_offset_t vp_off;
5299 5321 size_t vp_len;
5300 5322 struct anon *ap;
5301 5323 vnode_t *vp;
5302 5324
5303 5325 vp_off = off;
5304 5326 vp_len = len;
5305 5327
5306 5328 if (amp == NULL)
5307 5329 dogetpage = 1;
5308 5330 else {
5309 5331 /*
5310 5332 * Only acquire reader lock to prevent amp->ahp
5311 5333 * from being changed. It's ok to miss pages,
5312 5334 * hence we don't do anon_array_enter
5313 5335 */
5314 5336 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5315 5337 ap = anon_get_ptr(amp->ahp, anon_index);
5316 5338
5317 5339 if (len <= PAGESIZE)
5318 5340 /* inline non_anon() */
5319 5341 dogetpage = (ap == NULL);
5320 5342 else
5321 5343 dogetpage = non_anon(amp->ahp, anon_index,
5322 5344 &vp_off, &vp_len);
5323 5345 ANON_LOCK_EXIT(&->a_rwlock);
5324 5346 }
5325 5347
5326 5348 if (dogetpage) {
5327 5349 enum seg_rw arw;
5328 5350 struct as *as = seg->s_as;
5329 5351
5330 5352 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5331 5353 /*
5332 5354 * Page list won't fit in local array,
5333 5355 * allocate one of the needed size.
5334 5356 */
5335 5357 pl_alloc_sz =
5336 5358 (btop(len) + 1) * sizeof (page_t *);
5337 5359 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5338 5360 plp[0] = NULL;
5339 5361 plsz = len;
5340 5362 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5341 5363 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5342 5364 (((size_t)(addr + PAGESIZE) <
5343 5365 (size_t)(seg->s_base + seg->s_size)) &&
5344 5366 hat_probe(as->a_hat, addr + PAGESIZE))) {
5345 5367 /*
5346 5368 * Ask VOP_GETPAGE to return the exact number
5347 5369 * of pages if
5348 5370 * (a) this is a COW fault, or
5349 5371 * (b) this is a software fault, or
5350 5372 * (c) next page is already mapped.
5351 5373 */
5352 5374 plsz = len;
5353 5375 } else {
5354 5376 /*
5355 5377 * Ask VOP_GETPAGE to return adjacent pages
5356 5378 * within the segment.
5357 5379 */
5358 5380 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5359 5381 ((seg->s_base + seg->s_size) - addr));
5360 5382 ASSERT((addr + plsz) <=
5361 5383 (seg->s_base + seg->s_size));
5362 5384 }
5363 5385
5364 5386 /*
5365 5387 * Need to get some non-anonymous pages.
5366 5388 * We need to make only one call to GETPAGE to do
5367 5389 * this to prevent certain deadlocking conditions
5368 5390 * when we are doing locking. In this case
5369 5391 * non_anon() should have picked up the smallest
5370 5392 * range which includes all the non-anonymous
5371 5393 * pages in the requested range. We have to
5372 5394 * be careful regarding which rw flag to pass in
5373 5395 * because on a private mapping, the underlying
5374 5396 * object is never allowed to be written.
5375 5397 */
5376 5398 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5377 5399 arw = S_READ;
5378 5400 } else {
5379 5401 arw = rw;
5380 5402 }
5381 5403 vp = svd->vp;
5382 5404 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5383 5405 "segvn_getpage:seg %p addr %p vp %p",
5384 5406 seg, addr, vp);
5385 5407 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5386 5408 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5387 5409 svd->cred, NULL);
5388 5410 if (err) {
5389 5411 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5390 5412 segvn_pagelist_rele(plp);
5391 5413 if (pl_alloc_sz)
5392 5414 kmem_free(plp, pl_alloc_sz);
5393 5415 return (FC_MAKE_ERR(err));
5394 5416 }
5395 5417 if (svd->type == MAP_PRIVATE)
5396 5418 vpprot &= ~PROT_WRITE;
5397 5419 }
5398 5420 }
5399 5421
5400 5422 /*
5401 5423 * N.B. at this time the plp array has all the needed non-anon
5402 5424 * pages in addition to (possibly) having some adjacent pages.
5403 5425 */
5404 5426
5405 5427 /*
5406 5428 * Always acquire the anon_array_lock to prevent
5407 5429 * 2 threads from allocating separate anon slots for
5408 5430 * the same "addr".
5409 5431 *
5410 5432 * If this is a copy-on-write fault and we don't already
5411 5433 * have the anon_array_lock, acquire it to prevent the
5412 5434 * fault routine from handling multiple copy-on-write faults
5413 5435 * on the same "addr" in the same address space.
5414 5436 *
5415 5437 * Only one thread should deal with the fault since after
5416 5438 * it is handled, the other threads can acquire a translation
5417 5439 * to the newly created private page. This prevents two or
5418 5440 * more threads from creating different private pages for the
5419 5441 * same fault.
5420 5442 *
5421 5443 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5422 5444 * to prevent deadlock between this thread and another thread
5423 5445 * which has soft-locked this page and wants to acquire serial_lock.
5424 5446 * ( bug 4026339 )
5425 5447 *
5426 5448 * The fix for bug 4026339 becomes unnecessary when using the
5427 5449 * locking scheme with per amp rwlock and a global set of hash
5428 5450 * lock, anon_array_lock. If we steal a vnode page when low
5429 5451 * on memory and upgrad the page lock through page_rename,
5430 5452 * then the page is PAGE_HANDLED, nothing needs to be done
5431 5453 * for this page after returning from segvn_faultpage.
5432 5454 *
5433 5455 * But really, the page lock should be downgraded after
5434 5456 * the stolen page is page_rename'd.
5435 5457 */
5436 5458
5437 5459 if (amp != NULL)
5438 5460 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5439 5461
5440 5462 /*
5441 5463 * Ok, now loop over the address range and handle faults
5442 5464 */
5443 5465 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5444 5466 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5445 5467 type, rw, brkcow);
5446 5468 if (err) {
5447 5469 if (amp != NULL)
5448 5470 ANON_LOCK_EXIT(&->a_rwlock);
5449 5471 if (type == F_SOFTLOCK && a > addr) {
5450 5472 segvn_softunlock(seg, addr, (a - addr),
5451 5473 S_OTHER);
5452 5474 }
5453 5475 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5454 5476 segvn_pagelist_rele(plp);
5455 5477 if (pl_alloc_sz)
5456 5478 kmem_free(plp, pl_alloc_sz);
5457 5479 return (err);
5458 5480 }
5459 5481 if (vpage) {
5460 5482 vpage++;
5461 5483 } else if (svd->vpage) {
5462 5484 page = seg_page(seg, addr);
5463 5485 vpage = &svd->vpage[++page];
5464 5486 }
5465 5487 }
5466 5488
5467 5489 /* Didn't get pages from the underlying fs so we're done */
5468 5490 if (!dogetpage)
5469 5491 goto done;
5470 5492
5471 5493 /*
5472 5494 * Now handle any other pages in the list returned.
5473 5495 * If the page can be used, load up the translations now.
5474 5496 * Note that the for loop will only be entered if "plp"
5475 5497 * is pointing to a non-NULL page pointer which means that
5476 5498 * VOP_GETPAGE() was called and vpprot has been initialized.
5477 5499 */
5478 5500 if (svd->pageprot == 0)
5479 5501 prot = svd->prot & vpprot;
5480 5502
5481 5503
5482 5504 /*
5483 5505 * Large Files: diff should be unsigned value because we started
5484 5506 * supporting > 2GB segment sizes from 2.5.1 and when a
5485 5507 * large file of size > 2GB gets mapped to address space
5486 5508 * the diff value can be > 2GB.
5487 5509 */
5488 5510
5489 5511 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5490 5512 size_t diff;
5491 5513 struct anon *ap;
5492 5514 int anon_index;
5493 5515 anon_sync_obj_t cookie;
5494 5516 int hat_flag = HAT_LOAD_ADV;
5495 5517
5496 5518 if (svd->flags & MAP_TEXT) {
5497 5519 hat_flag |= HAT_LOAD_TEXT;
5498 5520 }
5499 5521
5500 5522 if (pp == PAGE_HANDLED)
5501 5523 continue;
5502 5524
5503 5525 if (svd->tr_state != SEGVN_TR_ON &&
5504 5526 pp->p_offset >= svd->offset &&
5505 5527 pp->p_offset < svd->offset + seg->s_size) {
5506 5528
5507 5529 diff = pp->p_offset - svd->offset;
5508 5530
5509 5531 /*
5510 5532 * Large Files: Following is the assertion
5511 5533 * validating the above cast.
5512 5534 */
5513 5535 ASSERT(svd->vp == pp->p_vnode);
5514 5536
5515 5537 page = btop(diff);
5516 5538 if (svd->pageprot)
5517 5539 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5518 5540
5519 5541 /*
5520 5542 * Prevent other threads in the address space from
5521 5543 * creating private pages (i.e., allocating anon slots)
5522 5544 * while we are in the process of loading translations
5523 5545 * to additional pages returned by the underlying
5524 5546 * object.
5525 5547 */
5526 5548 if (amp != NULL) {
5527 5549 anon_index = svd->anon_index + page;
5528 5550 anon_array_enter(amp, anon_index, &cookie);
5529 5551 ap = anon_get_ptr(amp->ahp, anon_index);
5530 5552 }
5531 5553 if ((amp == NULL) || (ap == NULL)) {
5532 5554 if (IS_VMODSORT(pp->p_vnode) ||
5533 5555 enable_mbit_wa) {
5534 5556 if (rw == S_WRITE)
5535 5557 hat_setmod(pp);
5536 5558 else if (rw != S_OTHER &&
5537 5559 !hat_ismod(pp))
5538 5560 prot &= ~PROT_WRITE;
5539 5561 }
5540 5562 /*
5541 5563 * Skip mapping read ahead pages marked
5542 5564 * for migration, so they will get migrated
5543 5565 * properly on fault
5544 5566 */
5545 5567 ASSERT(amp == NULL ||
5546 5568 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5547 5569 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5548 5570 hat_memload_region(hat,
5549 5571 seg->s_base + diff,
5550 5572 pp, prot, hat_flag,
5551 5573 svd->rcookie);
5552 5574 }
5553 5575 }
5554 5576 if (amp != NULL)
5555 5577 anon_array_exit(&cookie);
5556 5578 }
5557 5579 page_unlock(pp);
5558 5580 }
5559 5581 done:
5560 5582 if (amp != NULL)
5561 5583 ANON_LOCK_EXIT(&->a_rwlock);
5562 5584 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5563 5585 if (pl_alloc_sz)
5564 5586 kmem_free(plp, pl_alloc_sz);
5565 5587 return (0);
5566 5588 }
5567 5589
5568 5590 /*
5569 5591 * This routine is used to start I/O on pages asynchronously. XXX it will
5570 5592 * only create PAGESIZE pages. At fault time they will be relocated into
5571 5593 * larger pages.
5572 5594 */
5573 5595 static faultcode_t
5574 5596 segvn_faulta(struct seg *seg, caddr_t addr)
5575 5597 {
5576 5598 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5577 5599 int err;
5578 5600 struct anon_map *amp;
5579 5601 vnode_t *vp;
5580 5602
5581 5603 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5582 5604
5583 5605 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5584 5606 if ((amp = svd->amp) != NULL) {
5585 5607 struct anon *ap;
5586 5608
5587 5609 /*
5588 5610 * Reader lock to prevent amp->ahp from being changed.
5589 5611 * This is advisory, it's ok to miss a page, so
5590 5612 * we don't do anon_array_enter lock.
5591 5613 */
5592 5614 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5593 5615 if ((ap = anon_get_ptr(amp->ahp,
5594 5616 svd->anon_index + seg_page(seg, addr))) != NULL) {
5595 5617
5596 5618 err = anon_getpage(&ap, NULL, NULL,
5597 5619 0, seg, addr, S_READ, svd->cred);
5598 5620
5599 5621 ANON_LOCK_EXIT(&->a_rwlock);
5600 5622 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5601 5623 if (err)
5602 5624 return (FC_MAKE_ERR(err));
5603 5625 return (0);
5604 5626 }
5605 5627 ANON_LOCK_EXIT(&->a_rwlock);
5606 5628 }
5607 5629
5608 5630 if (svd->vp == NULL) {
5609 5631 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5610 5632 return (0); /* zfod page - do nothing now */
5611 5633 }
5612 5634
5613 5635 vp = svd->vp;
5614 5636 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5615 5637 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5616 5638 err = VOP_GETPAGE(vp,
5617 5639 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5618 5640 PAGESIZE, NULL, NULL, 0, seg, addr,
5619 5641 S_OTHER, svd->cred, NULL);
5620 5642
5621 5643 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5622 5644 if (err)
5623 5645 return (FC_MAKE_ERR(err));
5624 5646 return (0);
5625 5647 }
5626 5648
5627 5649 static int
5628 5650 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5629 5651 {
5630 5652 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5631 5653 struct vpage *cvp, *svp, *evp;
5632 5654 struct vnode *vp;
5633 5655 size_t pgsz;
5634 5656 pgcnt_t pgcnt;
5635 5657 anon_sync_obj_t cookie;
5636 5658 int unload_done = 0;
5637 5659
5638 5660 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5639 5661
5640 5662 if ((svd->maxprot & prot) != prot)
5641 5663 return (EACCES); /* violated maxprot */
5642 5664
5643 5665 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5644 5666
5645 5667 /* return if prot is the same */
5646 5668 if (!svd->pageprot && svd->prot == prot) {
5647 5669 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5648 5670 return (0);
5649 5671 }
5650 5672
5651 5673 /*
5652 5674 * Since we change protections we first have to flush the cache.
5653 5675 * This makes sure all the pagelock calls have to recheck
5654 5676 * protections.
5655 5677 */
5656 5678 if (svd->softlockcnt > 0) {
5657 5679 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5658 5680
5659 5681 /*
5660 5682 * If this is shared segment non 0 softlockcnt
5661 5683 * means locked pages are still in use.
5662 5684 */
5663 5685 if (svd->type == MAP_SHARED) {
5664 5686 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5665 5687 return (EAGAIN);
5666 5688 }
5667 5689
5668 5690 /*
5669 5691 * Since we do have the segvn writers lock nobody can fill
5670 5692 * the cache with entries belonging to this seg during
5671 5693 * the purge. The flush either succeeds or we still have
5672 5694 * pending I/Os.
5673 5695 */
5674 5696 segvn_purge(seg);
5675 5697 if (svd->softlockcnt > 0) {
5676 5698 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5677 5699 return (EAGAIN);
5678 5700 }
5679 5701 }
5680 5702
5681 5703 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5682 5704 ASSERT(svd->amp == NULL);
5683 5705 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5684 5706 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5685 5707 HAT_REGION_TEXT);
5686 5708 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5687 5709 unload_done = 1;
5688 5710 } else if (svd->tr_state == SEGVN_TR_INIT) {
5689 5711 svd->tr_state = SEGVN_TR_OFF;
5690 5712 } else if (svd->tr_state == SEGVN_TR_ON) {
5691 5713 ASSERT(svd->amp != NULL);
5692 5714 segvn_textunrepl(seg, 0);
5693 5715 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5694 5716 unload_done = 1;
5695 5717 }
5696 5718
5697 5719 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5698 5720 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5699 5721 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5700 5722 segvn_inval_trcache(svd->vp);
5701 5723 }
5702 5724 if (seg->s_szc != 0) {
5703 5725 int err;
5704 5726 pgsz = page_get_pagesize(seg->s_szc);
5705 5727 pgcnt = pgsz >> PAGESHIFT;
5706 5728 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5707 5729 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5708 5730 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5709 5731 ASSERT(seg->s_base != addr || seg->s_size != len);
5710 5732 /*
5711 5733 * If we are holding the as lock as a reader then
5712 5734 * we need to return IE_RETRY and let the as
5713 5735 * layer drop and re-acquire the lock as a writer.
5714 5736 */
5715 5737 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5716 5738 return (IE_RETRY);
5717 5739 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5718 5740 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5719 5741 err = segvn_demote_range(seg, addr, len,
5720 5742 SDR_END, 0);
5721 5743 } else {
5722 5744 uint_t szcvec = map_pgszcvec(seg->s_base,
5723 5745 pgsz, (uintptr_t)seg->s_base,
5724 5746 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5725 5747 err = segvn_demote_range(seg, addr, len,
5726 5748 SDR_END, szcvec);
5727 5749 }
5728 5750 if (err == 0)
5729 5751 return (IE_RETRY);
5730 5752 if (err == ENOMEM)
5731 5753 return (IE_NOMEM);
5732 5754 return (err);
5733 5755 }
5734 5756 }
5735 5757
5736 5758
5737 5759 /*
5738 5760 * If it's a private mapping and we're making it writable then we
5739 5761 * may have to reserve the additional swap space now. If we are
5740 5762 * making writable only a part of the segment then we use its vpage
5741 5763 * array to keep a record of the pages for which we have reserved
5742 5764 * swap. In this case we set the pageswap field in the segment's
5743 5765 * segvn structure to record this.
5744 5766 *
5745 5767 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5746 5768 * removing write permission on the entire segment and we haven't
5747 5769 * modified any pages, we can release the swap space.
5748 5770 */
5749 5771 if (svd->type == MAP_PRIVATE) {
5750 5772 if (prot & PROT_WRITE) {
5751 5773 if (!(svd->flags & MAP_NORESERVE) &&
5752 5774 !(svd->swresv && svd->pageswap == 0)) {
5753 5775 size_t sz = 0;
5754 5776
5755 5777 /*
5756 5778 * Start by determining how much swap
5757 5779 * space is required.
5758 5780 */
5759 5781 if (addr == seg->s_base &&
5760 5782 len == seg->s_size &&
5761 5783 svd->pageswap == 0) {
5762 5784 /* The whole segment */
5763 5785 sz = seg->s_size;
5764 5786 } else {
5765 5787 /*
5766 5788 * Make sure that the vpage array
5767 5789 * exists, and make a note of the
5768 5790 * range of elements corresponding
5769 5791 * to len.
5770 5792 */
5771 5793 segvn_vpage(seg);
5772 5794 svp = &svd->vpage[seg_page(seg, addr)];
5773 5795 evp = &svd->vpage[seg_page(seg,
5774 5796 addr + len)];
5775 5797
5776 5798 if (svd->pageswap == 0) {
5777 5799 /*
5778 5800 * This is the first time we've
5779 5801 * asked for a part of this
5780 5802 * segment, so we need to
5781 5803 * reserve everything we've
5782 5804 * been asked for.
5783 5805 */
5784 5806 sz = len;
5785 5807 } else {
5786 5808 /*
5787 5809 * We have to count the number
5788 5810 * of pages required.
5789 5811 */
5790 5812 for (cvp = svp; cvp < evp;
5791 5813 cvp++) {
5792 5814 if (!VPP_ISSWAPRES(cvp))
5793 5815 sz++;
5794 5816 }
5795 5817 sz <<= PAGESHIFT;
5796 5818 }
5797 5819 }
5798 5820
5799 5821 /* Try to reserve the necessary swap. */
5800 5822 if (anon_resv_zone(sz,
5801 5823 seg->s_as->a_proc->p_zone) == 0) {
5802 5824 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5803 5825 return (IE_NOMEM);
5804 5826 }
5805 5827
5806 5828 /*
5807 5829 * Make a note of how much swap space
5808 5830 * we've reserved.
5809 5831 */
5810 5832 if (svd->pageswap == 0 && sz == seg->s_size) {
5811 5833 svd->swresv = sz;
5812 5834 } else {
5813 5835 ASSERT(svd->vpage != NULL);
5814 5836 svd->swresv += sz;
5815 5837 svd->pageswap = 1;
5816 5838 for (cvp = svp; cvp < evp; cvp++) {
5817 5839 if (!VPP_ISSWAPRES(cvp))
5818 5840 VPP_SETSWAPRES(cvp);
5819 5841 }
5820 5842 }
5821 5843 }
5822 5844 } else {
5823 5845 /*
5824 5846 * Swap space is released only if this segment
5825 5847 * does not map anonymous memory, since read faults
5826 5848 * on such segments still need an anon slot to read
5827 5849 * in the data.
5828 5850 */
5829 5851 if (svd->swresv != 0 && svd->vp != NULL &&
5830 5852 svd->amp == NULL && addr == seg->s_base &&
5831 5853 len == seg->s_size && svd->pageprot == 0) {
5832 5854 ASSERT(svd->pageswap == 0);
5833 5855 anon_unresv_zone(svd->swresv,
5834 5856 seg->s_as->a_proc->p_zone);
5835 5857 svd->swresv = 0;
5836 5858 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5837 5859 "anon proc:%p %lu %u", seg, 0, 0);
5838 5860 }
5839 5861 }
5840 5862 }
5841 5863
5842 5864 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5843 5865 if (svd->prot == prot) {
5844 5866 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5845 5867 return (0); /* all done */
5846 5868 }
5847 5869 svd->prot = (uchar_t)prot;
5848 5870 } else if (svd->type == MAP_PRIVATE) {
5849 5871 struct anon *ap = NULL;
5850 5872 page_t *pp;
5851 5873 u_offset_t offset, off;
5852 5874 struct anon_map *amp;
5853 5875 ulong_t anon_idx = 0;
5854 5876
5855 5877 /*
5856 5878 * A vpage structure exists or else the change does not
5857 5879 * involve the entire segment. Establish a vpage structure
5858 5880 * if none is there. Then, for each page in the range,
5859 5881 * adjust its individual permissions. Note that write-
5860 5882 * enabling a MAP_PRIVATE page can affect the claims for
5861 5883 * locked down memory. Overcommitting memory terminates
5862 5884 * the operation.
5863 5885 */
5864 5886 segvn_vpage(seg);
5865 5887 svd->pageprot = 1;
5866 5888 if ((amp = svd->amp) != NULL) {
5867 5889 anon_idx = svd->anon_index + seg_page(seg, addr);
5868 5890 ASSERT(seg->s_szc == 0 ||
5869 5891 IS_P2ALIGNED(anon_idx, pgcnt));
5870 5892 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5871 5893 }
5872 5894
5873 5895 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5874 5896 evp = &svd->vpage[seg_page(seg, addr + len)];
5875 5897
5876 5898 /*
5877 5899 * See Statement at the beginning of segvn_lockop regarding
5878 5900 * the way cowcnts and lckcnts are handled.
5879 5901 */
5880 5902 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5881 5903
5882 5904 if (seg->s_szc != 0) {
5883 5905 if (amp != NULL) {
5884 5906 anon_array_enter(amp, anon_idx,
5885 5907 &cookie);
5886 5908 }
5887 5909 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5888 5910 !segvn_claim_pages(seg, svp, offset,
5889 5911 anon_idx, prot)) {
5890 5912 if (amp != NULL) {
5891 5913 anon_array_exit(&cookie);
5892 5914 }
5893 5915 break;
5894 5916 }
5895 5917 if (amp != NULL) {
5896 5918 anon_array_exit(&cookie);
5897 5919 }
5898 5920 anon_idx++;
5899 5921 } else {
5900 5922 if (amp != NULL) {
5901 5923 anon_array_enter(amp, anon_idx,
5902 5924 &cookie);
5903 5925 ap = anon_get_ptr(amp->ahp, anon_idx++);
5904 5926 }
5905 5927
5906 5928 if (VPP_ISPPLOCK(svp) &&
5907 5929 VPP_PROT(svp) != prot) {
5908 5930
5909 5931 if (amp == NULL || ap == NULL) {
5910 5932 vp = svd->vp;
5911 5933 off = offset;
5912 5934 } else
5913 5935 swap_xlate(ap, &vp, &off);
5914 5936 if (amp != NULL)
5915 5937 anon_array_exit(&cookie);
5916 5938
5917 5939 if ((pp = page_lookup(vp, off,
5918 5940 SE_SHARED)) == NULL) {
5919 5941 panic("segvn_setprot: no page");
5920 5942 /*NOTREACHED*/
5921 5943 }
5922 5944 ASSERT(seg->s_szc == 0);
5923 5945 if ((VPP_PROT(svp) ^ prot) &
5924 5946 PROT_WRITE) {
5925 5947 if (prot & PROT_WRITE) {
5926 5948 if (!page_addclaim(
5927 5949 pp)) {
5928 5950 page_unlock(pp);
5929 5951 break;
5930 5952 }
5931 5953 } else {
5932 5954 if (!page_subclaim(
5933 5955 pp)) {
5934 5956 page_unlock(pp);
5935 5957 break;
5936 5958 }
5937 5959 }
5938 5960 }
5939 5961 page_unlock(pp);
5940 5962 } else if (amp != NULL)
5941 5963 anon_array_exit(&cookie);
5942 5964 }
5943 5965 VPP_SETPROT(svp, prot);
5944 5966 offset += PAGESIZE;
5945 5967 }
5946 5968 if (amp != NULL)
5947 5969 ANON_LOCK_EXIT(&->a_rwlock);
5948 5970
5949 5971 /*
5950 5972 * Did we terminate prematurely? If so, simply unload
5951 5973 * the translations to the things we've updated so far.
5952 5974 */
5953 5975 if (svp != evp) {
5954 5976 if (unload_done) {
5955 5977 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5956 5978 return (IE_NOMEM);
5957 5979 }
5958 5980 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
5959 5981 PAGESIZE;
5960 5982 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
5961 5983 if (len != 0)
5962 5984 hat_unload(seg->s_as->a_hat, addr,
5963 5985 len, HAT_UNLOAD);
5964 5986 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5965 5987 return (IE_NOMEM);
5966 5988 }
5967 5989 } else {
5968 5990 segvn_vpage(seg);
5969 5991 svd->pageprot = 1;
5970 5992 evp = &svd->vpage[seg_page(seg, addr + len)];
5971 5993 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5972 5994 VPP_SETPROT(svp, prot);
5973 5995 }
5974 5996 }
5975 5997
5976 5998 if (unload_done) {
5977 5999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5978 6000 return (0);
5979 6001 }
5980 6002
5981 6003 if (((prot & PROT_WRITE) != 0 &&
5982 6004 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
5983 6005 (prot & ~PROT_USER) == PROT_NONE) {
5984 6006 /*
5985 6007 * Either private or shared data with write access (in
5986 6008 * which case we need to throw out all former translations
5987 6009 * so that we get the right translations set up on fault
5988 6010 * and we don't allow write access to any copy-on-write pages
5989 6011 * that might be around or to prevent write access to pages
5990 6012 * representing holes in a file), or we don't have permission
5991 6013 * to access the memory at all (in which case we have to
5992 6014 * unload any current translations that might exist).
5993 6015 */
5994 6016 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
5995 6017 } else {
5996 6018 /*
5997 6019 * A shared mapping or a private mapping in which write
5998 6020 * protection is going to be denied - just change all the
5999 6021 * protections over the range of addresses in question.
6000 6022 * segvn does not support any other attributes other
6001 6023 * than prot so we can use hat_chgattr.
6002 6024 */
6003 6025 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6004 6026 }
6005 6027
6006 6028 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6007 6029
6008 6030 return (0);
6009 6031 }
6010 6032
6011 6033 /*
6012 6034 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6013 6035 * to determine if the seg is capable of mapping the requested szc.
6014 6036 */
6015 6037 static int
6016 6038 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6017 6039 {
6018 6040 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6019 6041 struct segvn_data *nsvd;
6020 6042 struct anon_map *amp = svd->amp;
6021 6043 struct seg *nseg;
6022 6044 caddr_t eaddr = addr + len, a;
6023 6045 size_t pgsz = page_get_pagesize(szc);
6024 6046 pgcnt_t pgcnt = page_get_pagecnt(szc);
6025 6047 int err;
6026 6048 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6027 6049
6028 6050 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6029 6051 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6030 6052
6031 6053 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6032 6054 return (0);
6033 6055 }
6034 6056
6035 6057 /*
6036 6058 * addr should always be pgsz aligned but eaddr may be misaligned if
6037 6059 * it's at the end of the segment.
6038 6060 *
6039 6061 * XXX we should assert this condition since as_setpagesize() logic
6040 6062 * guarantees it.
6041 6063 */
6042 6064 if (!IS_P2ALIGNED(addr, pgsz) ||
6043 6065 (!IS_P2ALIGNED(eaddr, pgsz) &&
6044 6066 eaddr != seg->s_base + seg->s_size)) {
6045 6067
6046 6068 segvn_setpgsz_align_err++;
6047 6069 return (EINVAL);
6048 6070 }
6049 6071
6050 6072 if (amp != NULL && svd->type == MAP_SHARED) {
6051 6073 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6052 6074 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6053 6075
6054 6076 segvn_setpgsz_anon_align_err++;
6055 6077 return (EINVAL);
6056 6078 }
6057 6079 }
6058 6080
6059 6081 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6060 6082 szc > segvn_maxpgszc) {
6061 6083 return (EINVAL);
6062 6084 }
6063 6085
6064 6086 /* paranoid check */
6065 6087 if (svd->vp != NULL &&
6066 6088 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6067 6089 return (EINVAL);
6068 6090 }
6069 6091
6070 6092 if (seg->s_szc == 0 && svd->vp != NULL &&
6071 6093 map_addr_vacalign_check(addr, off)) {
6072 6094 return (EINVAL);
6073 6095 }
6074 6096
6075 6097 /*
6076 6098 * Check that protections are the same within new page
6077 6099 * size boundaries.
6078 6100 */
6079 6101 if (svd->pageprot) {
6080 6102 for (a = addr; a < eaddr; a += pgsz) {
6081 6103 if ((a + pgsz) > eaddr) {
6082 6104 if (!sameprot(seg, a, eaddr - a)) {
6083 6105 return (EINVAL);
6084 6106 }
6085 6107 } else {
6086 6108 if (!sameprot(seg, a, pgsz)) {
6087 6109 return (EINVAL);
6088 6110 }
6089 6111 }
6090 6112 }
6091 6113 }
6092 6114
6093 6115 /*
6094 6116 * Since we are changing page size we first have to flush
6095 6117 * the cache. This makes sure all the pagelock calls have
6096 6118 * to recheck protections.
6097 6119 */
6098 6120 if (svd->softlockcnt > 0) {
6099 6121 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6100 6122
6101 6123 /*
6102 6124 * If this is shared segment non 0 softlockcnt
6103 6125 * means locked pages are still in use.
6104 6126 */
6105 6127 if (svd->type == MAP_SHARED) {
6106 6128 return (EAGAIN);
6107 6129 }
6108 6130
6109 6131 /*
6110 6132 * Since we do have the segvn writers lock nobody can fill
6111 6133 * the cache with entries belonging to this seg during
6112 6134 * the purge. The flush either succeeds or we still have
6113 6135 * pending I/Os.
6114 6136 */
6115 6137 segvn_purge(seg);
6116 6138 if (svd->softlockcnt > 0) {
6117 6139 return (EAGAIN);
6118 6140 }
6119 6141 }
6120 6142
6121 6143 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6122 6144 ASSERT(svd->amp == NULL);
6123 6145 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6124 6146 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6125 6147 HAT_REGION_TEXT);
6126 6148 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6127 6149 } else if (svd->tr_state == SEGVN_TR_INIT) {
6128 6150 svd->tr_state = SEGVN_TR_OFF;
6129 6151 } else if (svd->tr_state == SEGVN_TR_ON) {
6130 6152 ASSERT(svd->amp != NULL);
6131 6153 segvn_textunrepl(seg, 1);
6132 6154 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6133 6155 amp = NULL;
6134 6156 }
6135 6157
6136 6158 /*
6137 6159 * Operation for sub range of existing segment.
6138 6160 */
6139 6161 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6140 6162 if (szc < seg->s_szc) {
6141 6163 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6142 6164 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6143 6165 if (err == 0) {
6144 6166 return (IE_RETRY);
6145 6167 }
6146 6168 if (err == ENOMEM) {
6147 6169 return (IE_NOMEM);
6148 6170 }
6149 6171 return (err);
6150 6172 }
6151 6173 if (addr != seg->s_base) {
6152 6174 nseg = segvn_split_seg(seg, addr);
6153 6175 if (eaddr != (nseg->s_base + nseg->s_size)) {
6154 6176 /* eaddr is szc aligned */
6155 6177 (void) segvn_split_seg(nseg, eaddr);
6156 6178 }
6157 6179 return (IE_RETRY);
6158 6180 }
6159 6181 if (eaddr != (seg->s_base + seg->s_size)) {
6160 6182 /* eaddr is szc aligned */
6161 6183 (void) segvn_split_seg(seg, eaddr);
6162 6184 }
6163 6185 return (IE_RETRY);
6164 6186 }
6165 6187
6166 6188 /*
6167 6189 * Break any low level sharing and reset seg->s_szc to 0.
6168 6190 */
6169 6191 if ((err = segvn_clrszc(seg)) != 0) {
6170 6192 if (err == ENOMEM) {
6171 6193 err = IE_NOMEM;
6172 6194 }
6173 6195 return (err);
6174 6196 }
6175 6197 ASSERT(seg->s_szc == 0);
6176 6198
6177 6199 /*
6178 6200 * If the end of the current segment is not pgsz aligned
6179 6201 * then attempt to concatenate with the next segment.
6180 6202 */
6181 6203 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6182 6204 nseg = AS_SEGNEXT(seg->s_as, seg);
6183 6205 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6184 6206 return (ENOMEM);
6185 6207 }
6186 6208 if (nseg->s_ops != &segvn_ops) {
6187 6209 return (EINVAL);
6188 6210 }
6189 6211 nsvd = (struct segvn_data *)nseg->s_data;
6190 6212 if (nsvd->softlockcnt > 0) {
6191 6213 /*
6192 6214 * If this is shared segment non 0 softlockcnt
6193 6215 * means locked pages are still in use.
6194 6216 */
6195 6217 if (nsvd->type == MAP_SHARED) {
6196 6218 return (EAGAIN);
6197 6219 }
6198 6220 segvn_purge(nseg);
6199 6221 if (nsvd->softlockcnt > 0) {
6200 6222 return (EAGAIN);
6201 6223 }
6202 6224 }
6203 6225 err = segvn_clrszc(nseg);
6204 6226 if (err == ENOMEM) {
6205 6227 err = IE_NOMEM;
6206 6228 }
6207 6229 if (err != 0) {
6208 6230 return (err);
6209 6231 }
6210 6232 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6211 6233 err = segvn_concat(seg, nseg, 1);
6212 6234 if (err == -1) {
6213 6235 return (EINVAL);
6214 6236 }
6215 6237 if (err == -2) {
6216 6238 return (IE_NOMEM);
6217 6239 }
6218 6240 return (IE_RETRY);
6219 6241 }
6220 6242
6221 6243 /*
6222 6244 * May need to re-align anon array to
6223 6245 * new szc.
6224 6246 */
6225 6247 if (amp != NULL) {
6226 6248 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6227 6249 struct anon_hdr *nahp;
6228 6250
6229 6251 ASSERT(svd->type == MAP_PRIVATE);
6230 6252
6231 6253 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6232 6254 ASSERT(amp->refcnt == 1);
6233 6255 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6234 6256 if (nahp == NULL) {
6235 6257 ANON_LOCK_EXIT(&->a_rwlock);
6236 6258 return (IE_NOMEM);
6237 6259 }
6238 6260 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6239 6261 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6240 6262 anon_release(nahp, btop(amp->size));
6241 6263 ANON_LOCK_EXIT(&->a_rwlock);
6242 6264 return (IE_NOMEM);
6243 6265 }
6244 6266 anon_release(amp->ahp, btop(amp->size));
6245 6267 amp->ahp = nahp;
6246 6268 svd->anon_index = 0;
6247 6269 ANON_LOCK_EXIT(&->a_rwlock);
6248 6270 }
6249 6271 }
6250 6272 if (svd->vp != NULL && szc != 0) {
6251 6273 struct vattr va;
6252 6274 u_offset_t eoffpage = svd->offset;
6253 6275 va.va_mask = AT_SIZE;
6254 6276 eoffpage += seg->s_size;
6255 6277 eoffpage = btopr(eoffpage);
6256 6278 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6257 6279 segvn_setpgsz_getattr_err++;
6258 6280 return (EINVAL);
6259 6281 }
6260 6282 if (btopr(va.va_size) < eoffpage) {
6261 6283 segvn_setpgsz_eof_err++;
6262 6284 return (EINVAL);
6263 6285 }
6264 6286 if (amp != NULL) {
6265 6287 /*
6266 6288 * anon_fill_cow_holes() may call VOP_GETPAGE().
6267 6289 * don't take anon map lock here to avoid holding it
6268 6290 * across VOP_GETPAGE() calls that may call back into
6269 6291 * segvn for klsutering checks. We don't really need
6270 6292 * anon map lock here since it's a private segment and
6271 6293 * we hold as level lock as writers.
6272 6294 */
6273 6295 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6274 6296 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6275 6297 seg->s_size, szc, svd->prot, svd->vpage,
6276 6298 svd->cred)) != 0) {
6277 6299 return (EINVAL);
6278 6300 }
6279 6301 }
6280 6302 segvn_setvnode_mpss(svd->vp);
6281 6303 }
6282 6304
6283 6305 if (amp != NULL) {
6284 6306 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6285 6307 if (svd->type == MAP_PRIVATE) {
6286 6308 amp->a_szc = szc;
6287 6309 } else if (szc > amp->a_szc) {
6288 6310 amp->a_szc = szc;
6289 6311 }
6290 6312 ANON_LOCK_EXIT(&->a_rwlock);
6291 6313 }
6292 6314
6293 6315 seg->s_szc = szc;
6294 6316
6295 6317 return (0);
6296 6318 }
6297 6319
6298 6320 static int
6299 6321 segvn_clrszc(struct seg *seg)
6300 6322 {
6301 6323 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6302 6324 struct anon_map *amp = svd->amp;
6303 6325 size_t pgsz;
6304 6326 pgcnt_t pages;
6305 6327 int err = 0;
6306 6328 caddr_t a = seg->s_base;
6307 6329 caddr_t ea = a + seg->s_size;
6308 6330 ulong_t an_idx = svd->anon_index;
6309 6331 vnode_t *vp = svd->vp;
6310 6332 struct vpage *vpage = svd->vpage;
6311 6333 page_t *anon_pl[1 + 1], *pp;
6312 6334 struct anon *ap, *oldap;
6313 6335 uint_t prot = svd->prot, vpprot;
6314 6336 int pageflag = 0;
6315 6337
6316 6338 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6317 6339 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6318 6340 ASSERT(svd->softlockcnt == 0);
6319 6341
6320 6342 if (vp == NULL && amp == NULL) {
6321 6343 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6322 6344 seg->s_szc = 0;
6323 6345 return (0);
6324 6346 }
6325 6347
6326 6348 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6327 6349 ASSERT(svd->amp == NULL);
6328 6350 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6329 6351 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6330 6352 HAT_REGION_TEXT);
6331 6353 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6332 6354 } else if (svd->tr_state == SEGVN_TR_ON) {
6333 6355 ASSERT(svd->amp != NULL);
6334 6356 segvn_textunrepl(seg, 1);
6335 6357 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6336 6358 amp = NULL;
6337 6359 } else {
6338 6360 if (svd->tr_state != SEGVN_TR_OFF) {
6339 6361 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6340 6362 svd->tr_state = SEGVN_TR_OFF;
6341 6363 }
6342 6364
6343 6365 /*
6344 6366 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6345 6367 * unload argument is 0 when we are freeing the segment
6346 6368 * and unload was already done.
6347 6369 */
6348 6370 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6349 6371 HAT_UNLOAD_UNMAP);
6350 6372 }
6351 6373
6352 6374 if (amp == NULL || svd->type == MAP_SHARED) {
6353 6375 seg->s_szc = 0;
6354 6376 return (0);
6355 6377 }
6356 6378
6357 6379 pgsz = page_get_pagesize(seg->s_szc);
6358 6380 pages = btop(pgsz);
6359 6381
6360 6382 /*
6361 6383 * XXX anon rwlock is not really needed because this is a
6362 6384 * private segment and we are writers.
6363 6385 */
6364 6386 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6365 6387
6366 6388 for (; a < ea; a += pgsz, an_idx += pages) {
6367 6389 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6368 6390 ASSERT(vpage != NULL || svd->pageprot == 0);
6369 6391 if (vpage != NULL) {
6370 6392 ASSERT(sameprot(seg, a, pgsz));
6371 6393 prot = VPP_PROT(vpage);
6372 6394 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6373 6395 }
6374 6396 if (seg->s_szc != 0) {
6375 6397 ASSERT(vp == NULL || anon_pages(amp->ahp,
6376 6398 an_idx, pages) == pages);
6377 6399 if ((err = anon_map_demotepages(amp, an_idx,
6378 6400 seg, a, prot, vpage, svd->cred)) != 0) {
6379 6401 goto out;
6380 6402 }
6381 6403 } else {
6382 6404 if (oldap->an_refcnt == 1) {
6383 6405 continue;
6384 6406 }
6385 6407 if ((err = anon_getpage(&oldap, &vpprot,
6386 6408 anon_pl, PAGESIZE, seg, a, S_READ,
6387 6409 svd->cred))) {
6388 6410 goto out;
6389 6411 }
6390 6412 if ((pp = anon_private(&ap, seg, a, prot,
6391 6413 anon_pl[0], pageflag, svd->cred)) == NULL) {
6392 6414 err = ENOMEM;
6393 6415 goto out;
6394 6416 }
6395 6417 anon_decref(oldap);
6396 6418 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6397 6419 ANON_SLEEP);
6398 6420 page_unlock(pp);
6399 6421 }
6400 6422 }
6401 6423 vpage = (vpage == NULL) ? NULL : vpage + pages;
6402 6424 }
6403 6425
6404 6426 amp->a_szc = 0;
6405 6427 seg->s_szc = 0;
6406 6428 out:
6407 6429 ANON_LOCK_EXIT(&->a_rwlock);
6408 6430 return (err);
6409 6431 }
6410 6432
6411 6433 static int
6412 6434 segvn_claim_pages(
6413 6435 struct seg *seg,
6414 6436 struct vpage *svp,
6415 6437 u_offset_t off,
6416 6438 ulong_t anon_idx,
6417 6439 uint_t prot)
6418 6440 {
6419 6441 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6420 6442 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6421 6443 page_t **ppa;
6422 6444 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6423 6445 struct anon_map *amp = svd->amp;
6424 6446 struct vpage *evp = svp + pgcnt;
6425 6447 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6426 6448 + seg->s_base;
6427 6449 struct anon *ap;
6428 6450 struct vnode *vp = svd->vp;
6429 6451 page_t *pp;
6430 6452 pgcnt_t pg_idx, i;
6431 6453 int err = 0;
6432 6454 anoff_t aoff;
6433 6455 int anon = (amp != NULL) ? 1 : 0;
6434 6456
6435 6457 ASSERT(svd->type == MAP_PRIVATE);
6436 6458 ASSERT(svd->vpage != NULL);
6437 6459 ASSERT(seg->s_szc != 0);
6438 6460 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6439 6461 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6440 6462 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6441 6463
6442 6464 if (VPP_PROT(svp) == prot)
6443 6465 return (1);
6444 6466 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6445 6467 return (1);
6446 6468
6447 6469 ppa = kmem_alloc(ppasize, KM_SLEEP);
6448 6470 if (anon && vp != NULL) {
6449 6471 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6450 6472 anon = 0;
6451 6473 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6452 6474 }
6453 6475 ASSERT(!anon ||
6454 6476 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6455 6477 }
6456 6478
6457 6479 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6458 6480 if (!VPP_ISPPLOCK(svp))
6459 6481 continue;
6460 6482 if (anon) {
6461 6483 ap = anon_get_ptr(amp->ahp, anon_idx);
6462 6484 if (ap == NULL) {
6463 6485 panic("segvn_claim_pages: no anon slot");
6464 6486 }
6465 6487 swap_xlate(ap, &vp, &aoff);
6466 6488 off = (u_offset_t)aoff;
6467 6489 }
6468 6490 ASSERT(vp != NULL);
6469 6491 if ((pp = page_lookup(vp,
6470 6492 (u_offset_t)off, SE_SHARED)) == NULL) {
6471 6493 panic("segvn_claim_pages: no page");
6472 6494 }
6473 6495 ppa[pg_idx++] = pp;
6474 6496 off += PAGESIZE;
6475 6497 }
6476 6498
6477 6499 if (ppa[0] == NULL) {
6478 6500 kmem_free(ppa, ppasize);
6479 6501 return (1);
6480 6502 }
6481 6503
6482 6504 ASSERT(pg_idx <= pgcnt);
6483 6505 ppa[pg_idx] = NULL;
6484 6506
6485 6507
6486 6508 /* Find each large page within ppa, and adjust its claim */
6487 6509
6488 6510 /* Does ppa cover a single large page? */
6489 6511 if (ppa[0]->p_szc == seg->s_szc) {
6490 6512 if (prot & PROT_WRITE)
6491 6513 err = page_addclaim_pages(ppa);
6492 6514 else
6493 6515 err = page_subclaim_pages(ppa);
6494 6516 } else {
6495 6517 for (i = 0; ppa[i]; i += pgcnt) {
6496 6518 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6497 6519 if (prot & PROT_WRITE)
6498 6520 err = page_addclaim_pages(&ppa[i]);
6499 6521 else
6500 6522 err = page_subclaim_pages(&ppa[i]);
6501 6523 if (err == 0)
6502 6524 break;
6503 6525 }
6504 6526 }
6505 6527
6506 6528 for (i = 0; i < pg_idx; i++) {
6507 6529 ASSERT(ppa[i] != NULL);
6508 6530 page_unlock(ppa[i]);
6509 6531 }
6510 6532
6511 6533 kmem_free(ppa, ppasize);
6512 6534 return (err);
6513 6535 }
6514 6536
6515 6537 /*
6516 6538 * Returns right (upper address) segment if split occurred.
6517 6539 * If the address is equal to the beginning or end of its segment it returns
6518 6540 * the current segment.
6519 6541 */
6520 6542 static struct seg *
6521 6543 segvn_split_seg(struct seg *seg, caddr_t addr)
6522 6544 {
6523 6545 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6524 6546 struct seg *nseg;
6525 6547 size_t nsize;
6526 6548 struct segvn_data *nsvd;
6527 6549
6528 6550 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6529 6551 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6530 6552
6531 6553 ASSERT(addr >= seg->s_base);
6532 6554 ASSERT(addr <= seg->s_base + seg->s_size);
6533 6555 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6534 6556
6535 6557 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6536 6558 return (seg);
6537 6559
6538 6560 nsize = seg->s_base + seg->s_size - addr;
6539 6561 seg->s_size = addr - seg->s_base;
6540 6562 nseg = seg_alloc(seg->s_as, addr, nsize);
6541 6563 ASSERT(nseg != NULL);
6542 6564 nseg->s_ops = seg->s_ops;
6543 6565 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6544 6566 nseg->s_data = (void *)nsvd;
6545 6567 nseg->s_szc = seg->s_szc;
6546 6568 *nsvd = *svd;
6547 6569 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6548 6570 nsvd->seg = nseg;
6549 6571 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6550 6572
6551 6573 if (nsvd->vp != NULL) {
6552 6574 VN_HOLD(nsvd->vp);
6553 6575 nsvd->offset = svd->offset +
6554 6576 (uintptr_t)(nseg->s_base - seg->s_base);
6555 6577 if (nsvd->type == MAP_SHARED)
6556 6578 lgrp_shm_policy_init(NULL, nsvd->vp);
6557 6579 } else {
6558 6580 /*
6559 6581 * The offset for an anonymous segment has no signifigance in
6560 6582 * terms of an offset into a file. If we were to use the above
6561 6583 * calculation instead, the structures read out of
6562 6584 * /proc/<pid>/xmap would be more difficult to decipher since
6563 6585 * it would be unclear whether two seemingly contiguous
6564 6586 * prxmap_t structures represented different segments or a
6565 6587 * single segment that had been split up into multiple prxmap_t
6566 6588 * structures (e.g. if some part of the segment had not yet
6567 6589 * been faulted in).
6568 6590 */
6569 6591 nsvd->offset = 0;
6570 6592 }
6571 6593
6572 6594 ASSERT(svd->softlockcnt == 0);
6573 6595 ASSERT(svd->softlockcnt_sbase == 0);
6574 6596 ASSERT(svd->softlockcnt_send == 0);
6575 6597 crhold(svd->cred);
6576 6598
6577 6599 if (svd->vpage != NULL) {
6578 6600 size_t bytes = vpgtob(seg_pages(seg));
6579 6601 size_t nbytes = vpgtob(seg_pages(nseg));
6580 6602 struct vpage *ovpage = svd->vpage;
6581 6603
6582 6604 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6583 6605 bcopy(ovpage, svd->vpage, bytes);
6584 6606 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6585 6607 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6586 6608 kmem_free(ovpage, bytes + nbytes);
6587 6609 }
6588 6610 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6589 6611 struct anon_map *oamp = svd->amp, *namp;
6590 6612 struct anon_hdr *nahp;
6591 6613
6592 6614 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6593 6615 ASSERT(oamp->refcnt == 1);
6594 6616 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6595 6617 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6596 6618 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6597 6619
6598 6620 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6599 6621 namp->a_szc = nseg->s_szc;
6600 6622 (void) anon_copy_ptr(oamp->ahp,
6601 6623 svd->anon_index + btop(seg->s_size),
6602 6624 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6603 6625 anon_release(oamp->ahp, btop(oamp->size));
6604 6626 oamp->ahp = nahp;
6605 6627 oamp->size = seg->s_size;
6606 6628 svd->anon_index = 0;
6607 6629 nsvd->amp = namp;
6608 6630 nsvd->anon_index = 0;
6609 6631 ANON_LOCK_EXIT(&oamp->a_rwlock);
6610 6632 } else if (svd->amp != NULL) {
6611 6633 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6612 6634 ASSERT(svd->amp == nsvd->amp);
6613 6635 ASSERT(seg->s_szc <= svd->amp->a_szc);
6614 6636 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6615 6637 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6616 6638 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6617 6639 svd->amp->refcnt++;
6618 6640 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6619 6641 }
6620 6642
6621 6643 /*
6622 6644 * Split the amount of swap reserved.
6623 6645 */
6624 6646 if (svd->swresv) {
6625 6647 /*
6626 6648 * For MAP_NORESERVE, only allocate swap reserve for pages
6627 6649 * being used. Other segments get enough to cover whole
6628 6650 * segment.
6629 6651 */
6630 6652 if (svd->flags & MAP_NORESERVE) {
6631 6653 size_t oswresv;
6632 6654
6633 6655 ASSERT(svd->amp);
6634 6656 oswresv = svd->swresv;
6635 6657 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6636 6658 svd->anon_index, btop(seg->s_size)));
6637 6659 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6638 6660 nsvd->anon_index, btop(nseg->s_size)));
6639 6661 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6640 6662 } else {
6641 6663 if (svd->pageswap) {
6642 6664 svd->swresv = segvn_count_swap_by_vpages(seg);
6643 6665 ASSERT(nsvd->swresv >= svd->swresv);
6644 6666 nsvd->swresv -= svd->swresv;
6645 6667 } else {
6646 6668 ASSERT(svd->swresv == seg->s_size +
6647 6669 nseg->s_size);
6648 6670 svd->swresv = seg->s_size;
6649 6671 nsvd->swresv = nseg->s_size;
6650 6672 }
6651 6673 }
6652 6674 }
6653 6675
6654 6676 return (nseg);
6655 6677 }
6656 6678
6657 6679 /*
6658 6680 * called on memory operations (unmap, setprot, setpagesize) for a subset
6659 6681 * of a large page segment to either demote the memory range (SDR_RANGE)
6660 6682 * or the ends (SDR_END) by addr/len.
6661 6683 *
6662 6684 * returns 0 on success. returns errno, including ENOMEM, on failure.
6663 6685 */
6664 6686 static int
6665 6687 segvn_demote_range(
6666 6688 struct seg *seg,
6667 6689 caddr_t addr,
6668 6690 size_t len,
6669 6691 int flag,
6670 6692 uint_t szcvec)
6671 6693 {
6672 6694 caddr_t eaddr = addr + len;
6673 6695 caddr_t lpgaddr, lpgeaddr;
6674 6696 struct seg *nseg;
6675 6697 struct seg *badseg1 = NULL;
6676 6698 struct seg *badseg2 = NULL;
6677 6699 size_t pgsz;
6678 6700 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6679 6701 int err;
6680 6702 uint_t szc = seg->s_szc;
6681 6703 uint_t tszcvec;
6682 6704
6683 6705 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6684 6706 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6685 6707 ASSERT(szc != 0);
6686 6708 pgsz = page_get_pagesize(szc);
6687 6709 ASSERT(seg->s_base != addr || seg->s_size != len);
6688 6710 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6689 6711 ASSERT(svd->softlockcnt == 0);
6690 6712 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6691 6713 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6692 6714
6693 6715 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6694 6716 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6695 6717 if (flag == SDR_RANGE) {
6696 6718 /* demote entire range */
6697 6719 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6698 6720 (void) segvn_split_seg(nseg, lpgeaddr);
6699 6721 ASSERT(badseg1->s_base == lpgaddr);
6700 6722 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6701 6723 } else if (addr != lpgaddr) {
6702 6724 ASSERT(flag == SDR_END);
6703 6725 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6704 6726 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6705 6727 eaddr < lpgaddr + 2 * pgsz) {
6706 6728 (void) segvn_split_seg(nseg, lpgeaddr);
6707 6729 ASSERT(badseg1->s_base == lpgaddr);
6708 6730 ASSERT(badseg1->s_size == 2 * pgsz);
6709 6731 } else {
6710 6732 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6711 6733 ASSERT(badseg1->s_base == lpgaddr);
6712 6734 ASSERT(badseg1->s_size == pgsz);
6713 6735 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6714 6736 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6715 6737 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6716 6738 badseg2 = nseg;
6717 6739 (void) segvn_split_seg(nseg, lpgeaddr);
6718 6740 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6719 6741 ASSERT(badseg2->s_size == pgsz);
6720 6742 }
6721 6743 }
6722 6744 } else {
6723 6745 ASSERT(flag == SDR_END);
6724 6746 ASSERT(eaddr < lpgeaddr);
6725 6747 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6726 6748 (void) segvn_split_seg(nseg, lpgeaddr);
6727 6749 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6728 6750 ASSERT(badseg1->s_size == pgsz);
6729 6751 }
6730 6752
6731 6753 ASSERT(badseg1 != NULL);
6732 6754 ASSERT(badseg1->s_szc == szc);
6733 6755 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6734 6756 badseg1->s_size == 2 * pgsz);
6735 6757 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6736 6758 ASSERT(badseg1->s_size == pgsz ||
6737 6759 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6738 6760 if (err = segvn_clrszc(badseg1)) {
6739 6761 return (err);
6740 6762 }
6741 6763 ASSERT(badseg1->s_szc == 0);
6742 6764
6743 6765 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6744 6766 uint_t tszc = highbit(tszcvec) - 1;
6745 6767 caddr_t ta = MAX(addr, badseg1->s_base);
6746 6768 caddr_t te;
6747 6769 size_t tpgsz = page_get_pagesize(tszc);
6748 6770
6749 6771 ASSERT(svd->type == MAP_SHARED);
6750 6772 ASSERT(flag == SDR_END);
6751 6773 ASSERT(tszc < szc && tszc > 0);
6752 6774
6753 6775 if (eaddr > badseg1->s_base + badseg1->s_size) {
6754 6776 te = badseg1->s_base + badseg1->s_size;
6755 6777 } else {
6756 6778 te = eaddr;
6757 6779 }
6758 6780
6759 6781 ASSERT(ta <= te);
6760 6782 badseg1->s_szc = tszc;
6761 6783 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6762 6784 if (badseg2 != NULL) {
6763 6785 err = segvn_demote_range(badseg1, ta, te - ta,
6764 6786 SDR_END, tszcvec);
6765 6787 if (err != 0) {
6766 6788 return (err);
6767 6789 }
6768 6790 } else {
6769 6791 return (segvn_demote_range(badseg1, ta,
6770 6792 te - ta, SDR_END, tszcvec));
6771 6793 }
6772 6794 }
6773 6795 }
6774 6796
6775 6797 if (badseg2 == NULL)
6776 6798 return (0);
6777 6799 ASSERT(badseg2->s_szc == szc);
6778 6800 ASSERT(badseg2->s_size == pgsz);
6779 6801 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6780 6802 if (err = segvn_clrszc(badseg2)) {
6781 6803 return (err);
6782 6804 }
6783 6805 ASSERT(badseg2->s_szc == 0);
6784 6806
6785 6807 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6786 6808 uint_t tszc = highbit(tszcvec) - 1;
6787 6809 size_t tpgsz = page_get_pagesize(tszc);
6788 6810
6789 6811 ASSERT(svd->type == MAP_SHARED);
6790 6812 ASSERT(flag == SDR_END);
6791 6813 ASSERT(tszc < szc && tszc > 0);
6792 6814 ASSERT(badseg2->s_base > addr);
6793 6815 ASSERT(eaddr > badseg2->s_base);
6794 6816 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6795 6817
6796 6818 badseg2->s_szc = tszc;
6797 6819 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6798 6820 return (segvn_demote_range(badseg2, badseg2->s_base,
6799 6821 eaddr - badseg2->s_base, SDR_END, tszcvec));
6800 6822 }
6801 6823 }
6802 6824
6803 6825 return (0);
6804 6826 }
6805 6827
6806 6828 static int
6807 6829 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6808 6830 {
6809 6831 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6810 6832 struct vpage *vp, *evp;
6811 6833
6812 6834 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6813 6835
6814 6836 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6815 6837 /*
6816 6838 * If segment protection can be used, simply check against them.
6817 6839 */
6818 6840 if (svd->pageprot == 0) {
6819 6841 int err;
6820 6842
6821 6843 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6822 6844 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6823 6845 return (err);
6824 6846 }
6825 6847
6826 6848 /*
6827 6849 * Have to check down to the vpage level.
6828 6850 */
6829 6851 evp = &svd->vpage[seg_page(seg, addr + len)];
6830 6852 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6831 6853 if ((VPP_PROT(vp) & prot) != prot) {
6832 6854 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6833 6855 return (EACCES);
6834 6856 }
6835 6857 }
6836 6858 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6837 6859 return (0);
6838 6860 }
6839 6861
6840 6862 static int
6841 6863 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6842 6864 {
6843 6865 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6844 6866 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6845 6867
6846 6868 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6847 6869
6848 6870 if (pgno != 0) {
6849 6871 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6850 6872 if (svd->pageprot == 0) {
6851 6873 do {
6852 6874 protv[--pgno] = svd->prot;
6853 6875 } while (pgno != 0);
6854 6876 } else {
6855 6877 size_t pgoff = seg_page(seg, addr);
6856 6878
6857 6879 do {
6858 6880 pgno--;
6859 6881 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6860 6882 } while (pgno != 0);
6861 6883 }
6862 6884 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6863 6885 }
6864 6886 return (0);
6865 6887 }
6866 6888
6867 6889 static u_offset_t
6868 6890 segvn_getoffset(struct seg *seg, caddr_t addr)
6869 6891 {
6870 6892 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6871 6893
6872 6894 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6873 6895
6874 6896 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6875 6897 }
6876 6898
6877 6899 /*ARGSUSED*/
6878 6900 static int
6879 6901 segvn_gettype(struct seg *seg, caddr_t addr)
6880 6902 {
6881 6903 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6882 6904
6883 6905 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6884 6906
6885 6907 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6886 6908 MAP_INITDATA)));
6887 6909 }
6888 6910
6889 6911 /*ARGSUSED*/
6890 6912 static int
6891 6913 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6892 6914 {
6893 6915 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6894 6916
6895 6917 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6896 6918
6897 6919 *vpp = svd->vp;
6898 6920 return (0);
6899 6921 }
6900 6922
6901 6923 /*
6902 6924 * Check to see if it makes sense to do kluster/read ahead to
6903 6925 * addr + delta relative to the mapping at addr. We assume here
6904 6926 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6905 6927 *
6906 6928 * For segvn, we currently "approve" of the action if we are
6907 6929 * still in the segment and it maps from the same vp/off,
6908 6930 * or if the advice stored in segvn_data or vpages allows it.
6909 6931 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6910 6932 */
6911 6933 static int
6912 6934 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6913 6935 {
6914 6936 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6915 6937 struct anon *oap, *ap;
6916 6938 ssize_t pd;
6917 6939 size_t page;
6918 6940 struct vnode *vp1, *vp2;
6919 6941 u_offset_t off1, off2;
6920 6942 struct anon_map *amp;
6921 6943
6922 6944 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6923 6945 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6924 6946 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6925 6947
6926 6948 if (addr + delta < seg->s_base ||
6927 6949 addr + delta >= (seg->s_base + seg->s_size))
6928 6950 return (-1); /* exceeded segment bounds */
6929 6951
6930 6952 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
6931 6953 page = seg_page(seg, addr);
6932 6954
6933 6955 /*
6934 6956 * Check to see if either of the pages addr or addr + delta
6935 6957 * have advice set that prevents klustering (if MADV_RANDOM advice
6936 6958 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6937 6959 * is negative).
6938 6960 */
6939 6961 if (svd->advice == MADV_RANDOM ||
6940 6962 svd->advice == MADV_SEQUENTIAL && delta < 0)
6941 6963 return (-1);
6942 6964 else if (svd->pageadvice && svd->vpage) {
6943 6965 struct vpage *bvpp, *evpp;
6944 6966
6945 6967 bvpp = &svd->vpage[page];
6946 6968 evpp = &svd->vpage[page + pd];
6947 6969 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
6948 6970 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
6949 6971 return (-1);
6950 6972 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
6951 6973 VPP_ADVICE(evpp) == MADV_RANDOM)
6952 6974 return (-1);
6953 6975 }
6954 6976
6955 6977 if (svd->type == MAP_SHARED)
6956 6978 return (0); /* shared mapping - all ok */
6957 6979
6958 6980 if ((amp = svd->amp) == NULL)
6959 6981 return (0); /* off original vnode */
6960 6982
6961 6983 page += svd->anon_index;
6962 6984
6963 6985 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
6964 6986
6965 6987 oap = anon_get_ptr(amp->ahp, page);
6966 6988 ap = anon_get_ptr(amp->ahp, page + pd);
6967 6989
6968 6990 ANON_LOCK_EXIT(&->a_rwlock);
6969 6991
6970 6992 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
6971 6993 return (-1); /* one with and one without an anon */
6972 6994 }
6973 6995
6974 6996 if (oap == NULL) { /* implies that ap == NULL */
6975 6997 return (0); /* off original vnode */
6976 6998 }
6977 6999
6978 7000 /*
6979 7001 * Now we know we have two anon pointers - check to
6980 7002 * see if they happen to be properly allocated.
6981 7003 */
6982 7004
6983 7005 /*
6984 7006 * XXX We cheat here and don't lock the anon slots. We can't because
6985 7007 * we may have been called from the anon layer which might already
6986 7008 * have locked them. We are holding a refcnt on the slots so they
6987 7009 * can't disappear. The worst that will happen is we'll get the wrong
6988 7010 * names (vp, off) for the slots and make a poor klustering decision.
6989 7011 */
6990 7012 swap_xlate(ap, &vp1, &off1);
6991 7013 swap_xlate(oap, &vp2, &off2);
6992 7014
6993 7015
6994 7016 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
6995 7017 return (-1);
6996 7018 return (0);
6997 7019 }
6998 7020
6999 7021 /*
7000 7022 * Swap the pages of seg out to secondary storage, returning the
7001 7023 * number of bytes of storage freed.
7002 7024 *
7003 7025 * The basic idea is first to unload all translations and then to call
7004 7026 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7005 7027 * swap device. Pages to which other segments have mappings will remain
7006 7028 * mapped and won't be swapped. Our caller (as_swapout) has already
7007 7029 * performed the unloading step.
7008 7030 *
7009 7031 * The value returned is intended to correlate well with the process's
7010 7032 * memory requirements. However, there are some caveats:
7011 7033 * 1) When given a shared segment as argument, this routine will
7012 7034 * only succeed in swapping out pages for the last sharer of the
7013 7035 * segment. (Previous callers will only have decremented mapping
7014 7036 * reference counts.)
7015 7037 * 2) We assume that the hat layer maintains a large enough translation
7016 7038 * cache to capture process reference patterns.
7017 7039 */
7018 7040 static size_t
7019 7041 segvn_swapout(struct seg *seg)
7020 7042 {
7021 7043 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7022 7044 struct anon_map *amp;
7023 7045 pgcnt_t pgcnt = 0;
7024 7046 pgcnt_t npages;
7025 7047 pgcnt_t page;
7026 7048 ulong_t anon_index;
7027 7049
7028 7050 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7029 7051
7030 7052 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7031 7053 /*
7032 7054 * Find pages unmapped by our caller and force them
7033 7055 * out to the virtual swap device.
7034 7056 */
7035 7057 if ((amp = svd->amp) != NULL)
7036 7058 anon_index = svd->anon_index;
7037 7059 npages = seg->s_size >> PAGESHIFT;
7038 7060 for (page = 0; page < npages; page++) {
7039 7061 page_t *pp;
7040 7062 struct anon *ap;
7041 7063 struct vnode *vp;
7042 7064 u_offset_t off;
7043 7065 anon_sync_obj_t cookie;
7044 7066
7045 7067 /*
7046 7068 * Obtain <vp, off> pair for the page, then look it up.
7047 7069 *
7048 7070 * Note that this code is willing to consider regular
7049 7071 * pages as well as anon pages. Is this appropriate here?
7050 7072 */
7051 7073 ap = NULL;
7052 7074 if (amp != NULL) {
7053 7075 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7054 7076 if (anon_array_try_enter(amp, anon_index + page,
7055 7077 &cookie)) {
7056 7078 ANON_LOCK_EXIT(&->a_rwlock);
7057 7079 continue;
7058 7080 }
7059 7081 ap = anon_get_ptr(amp->ahp, anon_index + page);
7060 7082 if (ap != NULL) {
7061 7083 swap_xlate(ap, &vp, &off);
7062 7084 } else {
7063 7085 vp = svd->vp;
7064 7086 off = svd->offset + ptob(page);
7065 7087 }
7066 7088 anon_array_exit(&cookie);
7067 7089 ANON_LOCK_EXIT(&->a_rwlock);
7068 7090 } else {
7069 7091 vp = svd->vp;
7070 7092 off = svd->offset + ptob(page);
7071 7093 }
7072 7094 if (vp == NULL) { /* untouched zfod page */
7073 7095 ASSERT(ap == NULL);
7074 7096 continue;
7075 7097 }
7076 7098
7077 7099 pp = page_lookup_nowait(vp, off, SE_SHARED);
7078 7100 if (pp == NULL)
7079 7101 continue;
7080 7102
7081 7103
7082 7104 /*
7083 7105 * Examine the page to see whether it can be tossed out,
7084 7106 * keeping track of how many we've found.
7085 7107 */
7086 7108 if (!page_tryupgrade(pp)) {
7087 7109 /*
7088 7110 * If the page has an i/o lock and no mappings,
7089 7111 * it's very likely that the page is being
7090 7112 * written out as a result of klustering.
7091 7113 * Assume this is so and take credit for it here.
7092 7114 */
7093 7115 if (!page_io_trylock(pp)) {
7094 7116 if (!hat_page_is_mapped(pp))
7095 7117 pgcnt++;
7096 7118 } else {
7097 7119 page_io_unlock(pp);
7098 7120 }
7099 7121 page_unlock(pp);
7100 7122 continue;
7101 7123 }
7102 7124 ASSERT(!page_iolock_assert(pp));
7103 7125
7104 7126
7105 7127 /*
7106 7128 * Skip if page is locked or has mappings.
7107 7129 * We don't need the page_struct_lock to look at lckcnt
7108 7130 * and cowcnt because the page is exclusive locked.
7109 7131 */
7110 7132 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7111 7133 hat_page_is_mapped(pp)) {
7112 7134 page_unlock(pp);
7113 7135 continue;
7114 7136 }
7115 7137
7116 7138 /*
7117 7139 * dispose skips large pages so try to demote first.
7118 7140 */
7119 7141 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7120 7142 page_unlock(pp);
7121 7143 /*
7122 7144 * XXX should skip the remaining page_t's of this
7123 7145 * large page.
7124 7146 */
7125 7147 continue;
7126 7148 }
7127 7149
7128 7150 ASSERT(pp->p_szc == 0);
7129 7151
7130 7152 /*
7131 7153 * No longer mapped -- we can toss it out. How
7132 7154 * we do so depends on whether or not it's dirty.
7133 7155 */
7134 7156 if (hat_ismod(pp) && pp->p_vnode) {
7135 7157 /*
7136 7158 * We must clean the page before it can be
7137 7159 * freed. Setting B_FREE will cause pvn_done
7138 7160 * to free the page when the i/o completes.
7139 7161 * XXX: This also causes it to be accounted
7140 7162 * as a pageout instead of a swap: need
7141 7163 * B_SWAPOUT bit to use instead of B_FREE.
7142 7164 *
7143 7165 * Hold the vnode before releasing the page lock
7144 7166 * to prevent it from being freed and re-used by
7145 7167 * some other thread.
7146 7168 */
7147 7169 VN_HOLD(vp);
7148 7170 page_unlock(pp);
7149 7171
7150 7172 /*
7151 7173 * Queue all i/o requests for the pageout thread
7152 7174 * to avoid saturating the pageout devices.
7153 7175 */
7154 7176 if (!queue_io_request(vp, off))
7155 7177 VN_RELE(vp);
7156 7178 } else {
7157 7179 /*
7158 7180 * The page was clean, free it.
7159 7181 *
7160 7182 * XXX: Can we ever encounter modified pages
7161 7183 * with no associated vnode here?
7162 7184 */
7163 7185 ASSERT(pp->p_vnode != NULL);
7164 7186 /*LINTED: constant in conditional context*/
7165 7187 VN_DISPOSE(pp, B_FREE, 0, kcred);
7166 7188 }
7167 7189
7168 7190 /*
7169 7191 * Credit now even if i/o is in progress.
7170 7192 */
7171 7193 pgcnt++;
7172 7194 }
7173 7195 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7174 7196
7175 7197 /*
7176 7198 * Wakeup pageout to initiate i/o on all queued requests.
7177 7199 */
7178 7200 cv_signal_pageout();
7179 7201 return (ptob(pgcnt));
7180 7202 }
7181 7203
7182 7204 /*
7183 7205 * Synchronize primary storage cache with real object in virtual memory.
7184 7206 *
7185 7207 * XXX - Anonymous pages should not be sync'ed out at all.
7186 7208 */
7187 7209 static int
7188 7210 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7189 7211 {
7190 7212 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7191 7213 struct vpage *vpp;
7192 7214 page_t *pp;
7193 7215 u_offset_t offset;
7194 7216 struct vnode *vp;
7195 7217 u_offset_t off;
7196 7218 caddr_t eaddr;
7197 7219 int bflags;
7198 7220 int err = 0;
7199 7221 int segtype;
7200 7222 int pageprot;
7201 7223 int prot;
7202 7224 ulong_t anon_index;
7203 7225 struct anon_map *amp;
7204 7226 struct anon *ap;
7205 7227 anon_sync_obj_t cookie;
7206 7228
7207 7229 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7208 7230
7209 7231 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7210 7232
7211 7233 if (svd->softlockcnt > 0) {
7212 7234 /*
7213 7235 * If this is shared segment non 0 softlockcnt
7214 7236 * means locked pages are still in use.
7215 7237 */
7216 7238 if (svd->type == MAP_SHARED) {
7217 7239 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7218 7240 return (EAGAIN);
7219 7241 }
7220 7242
7221 7243 /*
7222 7244 * flush all pages from seg cache
7223 7245 * otherwise we may deadlock in swap_putpage
7224 7246 * for B_INVAL page (4175402).
7225 7247 *
7226 7248 * Even if we grab segvn WRITER's lock
7227 7249 * here, there might be another thread which could've
7228 7250 * successfully performed lookup/insert just before
7229 7251 * we acquired the lock here. So, grabbing either
7230 7252 * lock here is of not much use. Until we devise
7231 7253 * a strategy at upper layers to solve the
7232 7254 * synchronization issues completely, we expect
7233 7255 * applications to handle this appropriately.
7234 7256 */
7235 7257 segvn_purge(seg);
7236 7258 if (svd->softlockcnt > 0) {
7237 7259 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7238 7260 return (EAGAIN);
7239 7261 }
7240 7262 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7241 7263 svd->amp->a_softlockcnt > 0) {
7242 7264 /*
7243 7265 * Try to purge this amp's entries from pcache. It will
7244 7266 * succeed only if other segments that share the amp have no
7245 7267 * outstanding softlock's.
7246 7268 */
7247 7269 segvn_purge(seg);
7248 7270 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7249 7271 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7250 7272 return (EAGAIN);
7251 7273 }
7252 7274 }
7253 7275
7254 7276 vpp = svd->vpage;
7255 7277 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7256 7278 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7257 7279 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7258 7280
7259 7281 if (attr) {
7260 7282 pageprot = attr & ~(SHARED|PRIVATE);
7261 7283 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7262 7284
7263 7285 /*
7264 7286 * We are done if the segment types don't match
7265 7287 * or if we have segment level protections and
7266 7288 * they don't match.
7267 7289 */
7268 7290 if (svd->type != segtype) {
7269 7291 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7270 7292 return (0);
7271 7293 }
7272 7294 if (vpp == NULL) {
7273 7295 if (svd->prot != pageprot) {
7274 7296 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7275 7297 return (0);
7276 7298 }
7277 7299 prot = svd->prot;
7278 7300 } else
7279 7301 vpp = &svd->vpage[seg_page(seg, addr)];
7280 7302
7281 7303 } else if (svd->vp && svd->amp == NULL &&
7282 7304 (flags & MS_INVALIDATE) == 0) {
7283 7305
7284 7306 /*
7285 7307 * No attributes, no anonymous pages and MS_INVALIDATE flag
7286 7308 * is not on, just use one big request.
7287 7309 */
7288 7310 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7289 7311 bflags, svd->cred, NULL);
7290 7312 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7291 7313 return (err);
7292 7314 }
7293 7315
7294 7316 if ((amp = svd->amp) != NULL)
7295 7317 anon_index = svd->anon_index + seg_page(seg, addr);
7296 7318
7297 7319 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7298 7320 ap = NULL;
7299 7321 if (amp != NULL) {
7300 7322 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7301 7323 anon_array_enter(amp, anon_index, &cookie);
7302 7324 ap = anon_get_ptr(amp->ahp, anon_index++);
7303 7325 if (ap != NULL) {
7304 7326 swap_xlate(ap, &vp, &off);
7305 7327 } else {
7306 7328 vp = svd->vp;
7307 7329 off = offset;
7308 7330 }
7309 7331 anon_array_exit(&cookie);
7310 7332 ANON_LOCK_EXIT(&->a_rwlock);
7311 7333 } else {
7312 7334 vp = svd->vp;
7313 7335 off = offset;
7314 7336 }
7315 7337 offset += PAGESIZE;
7316 7338
7317 7339 if (vp == NULL) /* untouched zfod page */
7318 7340 continue;
7319 7341
7320 7342 if (attr) {
7321 7343 if (vpp) {
7322 7344 prot = VPP_PROT(vpp);
7323 7345 vpp++;
7324 7346 }
7325 7347 if (prot != pageprot) {
7326 7348 continue;
7327 7349 }
7328 7350 }
7329 7351
7330 7352 /*
7331 7353 * See if any of these pages are locked -- if so, then we
7332 7354 * will have to truncate an invalidate request at the first
7333 7355 * locked one. We don't need the page_struct_lock to test
7334 7356 * as this is only advisory; even if we acquire it someone
7335 7357 * might race in and lock the page after we unlock and before
7336 7358 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7337 7359 */
7338 7360 if (flags & MS_INVALIDATE) {
7339 7361 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7340 7362 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7341 7363 page_unlock(pp);
7342 7364 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7343 7365 return (EBUSY);
7344 7366 }
7345 7367 if (ap != NULL && pp->p_szc != 0 &&
7346 7368 page_tryupgrade(pp)) {
7347 7369 if (pp->p_lckcnt == 0 &&
7348 7370 pp->p_cowcnt == 0) {
7349 7371 /*
7350 7372 * swapfs VN_DISPOSE() won't
7351 7373 * invalidate large pages.
7352 7374 * Attempt to demote.
7353 7375 * XXX can't help it if it
7354 7376 * fails. But for swapfs
7355 7377 * pages it is no big deal.
7356 7378 */
7357 7379 (void) page_try_demote_pages(
7358 7380 pp);
7359 7381 }
7360 7382 }
7361 7383 page_unlock(pp);
7362 7384 }
7363 7385 } else if (svd->type == MAP_SHARED && amp != NULL) {
7364 7386 /*
7365 7387 * Avoid writing out to disk ISM's large pages
7366 7388 * because segspt_free_pages() relies on NULL an_pvp
7367 7389 * of anon slots of such pages.
7368 7390 */
7369 7391
7370 7392 ASSERT(svd->vp == NULL);
7371 7393 /*
7372 7394 * swapfs uses page_lookup_nowait if not freeing or
7373 7395 * invalidating and skips a page if
7374 7396 * page_lookup_nowait returns NULL.
7375 7397 */
7376 7398 pp = page_lookup_nowait(vp, off, SE_SHARED);
7377 7399 if (pp == NULL) {
7378 7400 continue;
7379 7401 }
7380 7402 if (pp->p_szc != 0) {
7381 7403 page_unlock(pp);
7382 7404 continue;
7383 7405 }
7384 7406
7385 7407 /*
7386 7408 * Note ISM pages are created large so (vp, off)'s
7387 7409 * page cannot suddenly become large after we unlock
7388 7410 * pp.
7389 7411 */
7390 7412 page_unlock(pp);
7391 7413 }
7392 7414 /*
7393 7415 * XXX - Should ultimately try to kluster
7394 7416 * calls to VOP_PUTPAGE() for performance.
7395 7417 */
7396 7418 VN_HOLD(vp);
7397 7419 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7398 7420 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7399 7421 svd->cred, NULL);
7400 7422
7401 7423 VN_RELE(vp);
7402 7424 if (err)
7403 7425 break;
7404 7426 }
7405 7427 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7406 7428 return (err);
7407 7429 }
7408 7430
7409 7431 /*
7410 7432 * Determine if we have data corresponding to pages in the
7411 7433 * primary storage virtual memory cache (i.e., "in core").
7412 7434 */
7413 7435 static size_t
7414 7436 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7415 7437 {
7416 7438 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7417 7439 struct vnode *vp, *avp;
7418 7440 u_offset_t offset, aoffset;
7419 7441 size_t p, ep;
7420 7442 int ret;
7421 7443 struct vpage *vpp;
7422 7444 page_t *pp;
7423 7445 uint_t start;
7424 7446 struct anon_map *amp; /* XXX - for locknest */
7425 7447 struct anon *ap;
7426 7448 uint_t attr;
7427 7449 anon_sync_obj_t cookie;
7428 7450
7429 7451 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7430 7452
7431 7453 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7432 7454 if (svd->amp == NULL && svd->vp == NULL) {
7433 7455 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7434 7456 bzero(vec, btopr(len));
7435 7457 return (len); /* no anonymous pages created yet */
7436 7458 }
7437 7459
7438 7460 p = seg_page(seg, addr);
7439 7461 ep = seg_page(seg, addr + len);
7440 7462 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7441 7463
7442 7464 amp = svd->amp;
7443 7465 for (; p < ep; p++, addr += PAGESIZE) {
7444 7466 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7445 7467 ret = start;
7446 7468 ap = NULL;
7447 7469 avp = NULL;
7448 7470 /* Grab the vnode/offset for the anon slot */
7449 7471 if (amp != NULL) {
7450 7472 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7451 7473 anon_array_enter(amp, svd->anon_index + p, &cookie);
7452 7474 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7453 7475 if (ap != NULL) {
7454 7476 swap_xlate(ap, &avp, &aoffset);
7455 7477 }
7456 7478 anon_array_exit(&cookie);
7457 7479 ANON_LOCK_EXIT(&->a_rwlock);
7458 7480 }
7459 7481 if ((avp != NULL) && page_exists(avp, aoffset)) {
7460 7482 /* A page exists for the anon slot */
7461 7483 ret |= SEG_PAGE_INCORE;
7462 7484
7463 7485 /*
7464 7486 * If page is mapped and writable
7465 7487 */
7466 7488 attr = (uint_t)0;
7467 7489 if ((hat_getattr(seg->s_as->a_hat, addr,
7468 7490 &attr) != -1) && (attr & PROT_WRITE)) {
7469 7491 ret |= SEG_PAGE_ANON;
7470 7492 }
7471 7493 /*
7472 7494 * Don't get page_struct lock for lckcnt and cowcnt,
7473 7495 * since this is purely advisory.
7474 7496 */
7475 7497 if ((pp = page_lookup_nowait(avp, aoffset,
7476 7498 SE_SHARED)) != NULL) {
7477 7499 if (pp->p_lckcnt)
7478 7500 ret |= SEG_PAGE_SOFTLOCK;
7479 7501 if (pp->p_cowcnt)
7480 7502 ret |= SEG_PAGE_HASCOW;
7481 7503 page_unlock(pp);
7482 7504 }
7483 7505 }
7484 7506
7485 7507 /* Gather vnode statistics */
7486 7508 vp = svd->vp;
7487 7509 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7488 7510
7489 7511 if (vp != NULL) {
7490 7512 /*
7491 7513 * Try to obtain a "shared" lock on the page
7492 7514 * without blocking. If this fails, determine
7493 7515 * if the page is in memory.
7494 7516 */
7495 7517 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7496 7518 if ((pp == NULL) && (page_exists(vp, offset))) {
7497 7519 /* Page is incore, and is named */
7498 7520 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7499 7521 }
7500 7522 /*
7501 7523 * Don't get page_struct lock for lckcnt and cowcnt,
7502 7524 * since this is purely advisory.
7503 7525 */
7504 7526 if (pp != NULL) {
7505 7527 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7506 7528 if (pp->p_lckcnt)
7507 7529 ret |= SEG_PAGE_SOFTLOCK;
7508 7530 if (pp->p_cowcnt)
7509 7531 ret |= SEG_PAGE_HASCOW;
7510 7532 page_unlock(pp);
7511 7533 }
7512 7534 }
7513 7535
7514 7536 /* Gather virtual page information */
7515 7537 if (vpp) {
7516 7538 if (VPP_ISPPLOCK(vpp))
7517 7539 ret |= SEG_PAGE_LOCKED;
7518 7540 vpp++;
7519 7541 }
7520 7542
7521 7543 *vec++ = (char)ret;
7522 7544 }
7523 7545 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7524 7546 return (len);
7525 7547 }
7526 7548
7527 7549 /*
7528 7550 * Statement for p_cowcnts/p_lckcnts.
7529 7551 *
7530 7552 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7531 7553 * irrespective of the following factors or anything else:
7532 7554 *
7533 7555 * (1) anon slots are populated or not
7534 7556 * (2) cow is broken or not
7535 7557 * (3) refcnt on ap is 1 or greater than 1
7536 7558 *
7537 7559 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7538 7560 * and munlock.
7539 7561 *
7540 7562 *
7541 7563 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7542 7564 *
7543 7565 * if vpage has PROT_WRITE
7544 7566 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7545 7567 * else
7546 7568 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7547 7569 *
7548 7570 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7549 7571 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7550 7572 *
7551 7573 * We may also break COW if softlocking on read access in the physio case.
7552 7574 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7553 7575 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7554 7576 * vpage doesn't have PROT_WRITE.
7555 7577 *
7556 7578 *
7557 7579 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7558 7580 *
7559 7581 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7560 7582 * increment p_lckcnt by calling page_subclaim() which takes care of
7561 7583 * availrmem accounting and p_lckcnt overflow.
7562 7584 *
7563 7585 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7564 7586 * increment p_cowcnt by calling page_addclaim() which takes care of
7565 7587 * availrmem availability and p_cowcnt overflow.
7566 7588 */
7567 7589
7568 7590 /*
7569 7591 * Lock down (or unlock) pages mapped by this segment.
7570 7592 *
7571 7593 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7572 7594 * At fault time they will be relocated into larger pages.
7573 7595 */
7574 7596 static int
7575 7597 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7576 7598 int attr, int op, ulong_t *lockmap, size_t pos)
7577 7599 {
7578 7600 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7579 7601 struct vpage *vpp;
7580 7602 struct vpage *evp;
7581 7603 page_t *pp;
7582 7604 u_offset_t offset;
7583 7605 u_offset_t off;
7584 7606 int segtype;
7585 7607 int pageprot;
7586 7608 int claim;
7587 7609 struct vnode *vp;
7588 7610 ulong_t anon_index;
7589 7611 struct anon_map *amp;
7590 7612 struct anon *ap;
7591 7613 struct vattr va;
7592 7614 anon_sync_obj_t cookie;
7593 7615 struct kshmid *sp = NULL;
7594 7616 struct proc *p = curproc;
7595 7617 kproject_t *proj = NULL;
7596 7618 int chargeproc = 1;
7597 7619 size_t locked_bytes = 0;
7598 7620 size_t unlocked_bytes = 0;
7599 7621 int err = 0;
7600 7622
7601 7623 /*
7602 7624 * Hold write lock on address space because may split or concatenate
7603 7625 * segments
7604 7626 */
7605 7627 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7606 7628
7607 7629 /*
7608 7630 * If this is a shm, use shm's project and zone, else use
7609 7631 * project and zone of calling process
7610 7632 */
7611 7633
7612 7634 /* Determine if this segment backs a sysV shm */
7613 7635 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7614 7636 ASSERT(svd->type == MAP_SHARED);
7615 7637 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7616 7638 sp = svd->amp->a_sp;
7617 7639 proj = sp->shm_perm.ipc_proj;
7618 7640 chargeproc = 0;
7619 7641 }
7620 7642
7621 7643 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7622 7644 if (attr) {
7623 7645 pageprot = attr & ~(SHARED|PRIVATE);
7624 7646 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7625 7647
7626 7648 /*
7627 7649 * We are done if the segment types don't match
7628 7650 * or if we have segment level protections and
7629 7651 * they don't match.
7630 7652 */
7631 7653 if (svd->type != segtype) {
7632 7654 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7633 7655 return (0);
7634 7656 }
7635 7657 if (svd->pageprot == 0 && svd->prot != pageprot) {
7636 7658 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7637 7659 return (0);
7638 7660 }
7639 7661 }
7640 7662
7641 7663 if (op == MC_LOCK) {
7642 7664 if (svd->tr_state == SEGVN_TR_INIT) {
7643 7665 svd->tr_state = SEGVN_TR_OFF;
7644 7666 } else if (svd->tr_state == SEGVN_TR_ON) {
7645 7667 ASSERT(svd->amp != NULL);
7646 7668 segvn_textunrepl(seg, 0);
7647 7669 ASSERT(svd->amp == NULL &&
7648 7670 svd->tr_state == SEGVN_TR_OFF);
7649 7671 }
7650 7672 }
7651 7673
7652 7674 /*
7653 7675 * If we're locking, then we must create a vpage structure if
7654 7676 * none exists. If we're unlocking, then check to see if there
7655 7677 * is a vpage -- if not, then we could not have locked anything.
7656 7678 */
7657 7679
7658 7680 if ((vpp = svd->vpage) == NULL) {
7659 7681 if (op == MC_LOCK)
7660 7682 segvn_vpage(seg);
7661 7683 else {
7662 7684 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7663 7685 return (0);
7664 7686 }
7665 7687 }
7666 7688
7667 7689 /*
7668 7690 * The anonymous data vector (i.e., previously
7669 7691 * unreferenced mapping to swap space) can be allocated
7670 7692 * by lazily testing for its existence.
7671 7693 */
7672 7694 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7673 7695 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7674 7696 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7675 7697 svd->amp->a_szc = seg->s_szc;
7676 7698 }
7677 7699
7678 7700 if ((amp = svd->amp) != NULL) {
7679 7701 anon_index = svd->anon_index + seg_page(seg, addr);
7680 7702 }
7681 7703
7682 7704 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7683 7705 evp = &svd->vpage[seg_page(seg, addr + len)];
7684 7706
7685 7707 if (sp != NULL)
7686 7708 mutex_enter(&sp->shm_mlock);
7687 7709
7688 7710 /* determine number of unlocked bytes in range for lock operation */
7689 7711 if (op == MC_LOCK) {
7690 7712
7691 7713 if (sp == NULL) {
7692 7714 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7693 7715 vpp++) {
7694 7716 if (!VPP_ISPPLOCK(vpp))
7695 7717 unlocked_bytes += PAGESIZE;
7696 7718 }
7697 7719 } else {
7698 7720 ulong_t i_idx, i_edx;
7699 7721 anon_sync_obj_t i_cookie;
7700 7722 struct anon *i_ap;
7701 7723 struct vnode *i_vp;
7702 7724 u_offset_t i_off;
7703 7725
7704 7726 /* Only count sysV pages once for locked memory */
7705 7727 i_edx = svd->anon_index + seg_page(seg, addr + len);
7706 7728 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7707 7729 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7708 7730 anon_array_enter(amp, i_idx, &i_cookie);
7709 7731 i_ap = anon_get_ptr(amp->ahp, i_idx);
7710 7732 if (i_ap == NULL) {
7711 7733 unlocked_bytes += PAGESIZE;
7712 7734 anon_array_exit(&i_cookie);
7713 7735 continue;
7714 7736 }
7715 7737 swap_xlate(i_ap, &i_vp, &i_off);
7716 7738 anon_array_exit(&i_cookie);
7717 7739 pp = page_lookup(i_vp, i_off, SE_SHARED);
7718 7740 if (pp == NULL) {
7719 7741 unlocked_bytes += PAGESIZE;
7720 7742 continue;
7721 7743 } else if (pp->p_lckcnt == 0)
7722 7744 unlocked_bytes += PAGESIZE;
7723 7745 page_unlock(pp);
7724 7746 }
7725 7747 ANON_LOCK_EXIT(&->a_rwlock);
7726 7748 }
7727 7749
7728 7750 mutex_enter(&p->p_lock);
7729 7751 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7730 7752 chargeproc);
7731 7753 mutex_exit(&p->p_lock);
7732 7754
7733 7755 if (err) {
7734 7756 if (sp != NULL)
7735 7757 mutex_exit(&sp->shm_mlock);
7736 7758 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7737 7759 return (err);
7738 7760 }
7739 7761 }
7740 7762 /*
7741 7763 * Loop over all pages in the range. Process if we're locking and
7742 7764 * page has not already been locked in this mapping; or if we're
7743 7765 * unlocking and the page has been locked.
7744 7766 */
7745 7767 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7746 7768 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7747 7769 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7748 7770 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7749 7771 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7750 7772
7751 7773 if (amp != NULL)
7752 7774 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7753 7775 /*
7754 7776 * If this isn't a MAP_NORESERVE segment and
7755 7777 * we're locking, allocate anon slots if they
7756 7778 * don't exist. The page is brought in later on.
7757 7779 */
7758 7780 if (op == MC_LOCK && svd->vp == NULL &&
7759 7781 ((svd->flags & MAP_NORESERVE) == 0) &&
7760 7782 amp != NULL &&
7761 7783 ((ap = anon_get_ptr(amp->ahp, anon_index))
7762 7784 == NULL)) {
7763 7785 anon_array_enter(amp, anon_index, &cookie);
7764 7786
7765 7787 if ((ap = anon_get_ptr(amp->ahp,
7766 7788 anon_index)) == NULL) {
7767 7789 pp = anon_zero(seg, addr, &ap,
7768 7790 svd->cred);
7769 7791 if (pp == NULL) {
7770 7792 anon_array_exit(&cookie);
7771 7793 ANON_LOCK_EXIT(&->a_rwlock);
7772 7794 err = ENOMEM;
7773 7795 goto out;
7774 7796 }
7775 7797 ASSERT(anon_get_ptr(amp->ahp,
7776 7798 anon_index) == NULL);
7777 7799 (void) anon_set_ptr(amp->ahp,
7778 7800 anon_index, ap, ANON_SLEEP);
7779 7801 page_unlock(pp);
7780 7802 }
7781 7803 anon_array_exit(&cookie);
7782 7804 }
7783 7805
7784 7806 /*
7785 7807 * Get name for page, accounting for
7786 7808 * existence of private copy.
7787 7809 */
7788 7810 ap = NULL;
7789 7811 if (amp != NULL) {
7790 7812 anon_array_enter(amp, anon_index, &cookie);
7791 7813 ap = anon_get_ptr(amp->ahp, anon_index);
7792 7814 if (ap != NULL) {
7793 7815 swap_xlate(ap, &vp, &off);
7794 7816 } else {
7795 7817 if (svd->vp == NULL &&
7796 7818 (svd->flags & MAP_NORESERVE)) {
7797 7819 anon_array_exit(&cookie);
7798 7820 ANON_LOCK_EXIT(&->a_rwlock);
7799 7821 continue;
7800 7822 }
7801 7823 vp = svd->vp;
7802 7824 off = offset;
7803 7825 }
7804 7826 if (op != MC_LOCK || ap == NULL) {
7805 7827 anon_array_exit(&cookie);
7806 7828 ANON_LOCK_EXIT(&->a_rwlock);
7807 7829 }
7808 7830 } else {
7809 7831 vp = svd->vp;
7810 7832 off = offset;
7811 7833 }
7812 7834
7813 7835 /*
7814 7836 * Get page frame. It's ok if the page is
7815 7837 * not available when we're unlocking, as this
7816 7838 * may simply mean that a page we locked got
7817 7839 * truncated out of existence after we locked it.
7818 7840 *
7819 7841 * Invoke VOP_GETPAGE() to obtain the page struct
7820 7842 * since we may need to read it from disk if its
7821 7843 * been paged out.
7822 7844 */
7823 7845 if (op != MC_LOCK)
7824 7846 pp = page_lookup(vp, off, SE_SHARED);
7825 7847 else {
7826 7848 page_t *pl[1 + 1];
7827 7849 int error;
7828 7850
7829 7851 ASSERT(vp != NULL);
7830 7852
7831 7853 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7832 7854 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7833 7855 S_OTHER, svd->cred, NULL);
7834 7856
7835 7857 if (error && ap != NULL) {
7836 7858 anon_array_exit(&cookie);
7837 7859 ANON_LOCK_EXIT(&->a_rwlock);
7838 7860 }
7839 7861
7840 7862 /*
7841 7863 * If the error is EDEADLK then we must bounce
7842 7864 * up and drop all vm subsystem locks and then
7843 7865 * retry the operation later
7844 7866 * This behavior is a temporary measure because
7845 7867 * ufs/sds logging is badly designed and will
7846 7868 * deadlock if we don't allow this bounce to
7847 7869 * happen. The real solution is to re-design
7848 7870 * the logging code to work properly. See bug
7849 7871 * 4125102 for details of the problem.
7850 7872 */
7851 7873 if (error == EDEADLK) {
7852 7874 err = error;
7853 7875 goto out;
7854 7876 }
7855 7877 /*
7856 7878 * Quit if we fail to fault in the page. Treat
7857 7879 * the failure as an error, unless the addr
7858 7880 * is mapped beyond the end of a file.
7859 7881 */
7860 7882 if (error && svd->vp) {
7861 7883 va.va_mask = AT_SIZE;
7862 7884 if (VOP_GETATTR(svd->vp, &va, 0,
7863 7885 svd->cred, NULL) != 0) {
7864 7886 err = EIO;
7865 7887 goto out;
7866 7888 }
7867 7889 if (btopr(va.va_size) >=
7868 7890 btopr(off + 1)) {
7869 7891 err = EIO;
7870 7892 goto out;
7871 7893 }
7872 7894 goto out;
7873 7895
7874 7896 } else if (error) {
7875 7897 err = EIO;
7876 7898 goto out;
7877 7899 }
7878 7900 pp = pl[0];
7879 7901 ASSERT(pp != NULL);
7880 7902 }
7881 7903
7882 7904 /*
7883 7905 * See Statement at the beginning of this routine.
7884 7906 *
7885 7907 * claim is always set if MAP_PRIVATE and PROT_WRITE
7886 7908 * irrespective of following factors:
7887 7909 *
7888 7910 * (1) anon slots are populated or not
7889 7911 * (2) cow is broken or not
7890 7912 * (3) refcnt on ap is 1 or greater than 1
7891 7913 *
7892 7914 * See 4140683 for details
7893 7915 */
7894 7916 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7895 7917 (svd->type == MAP_PRIVATE));
7896 7918
7897 7919 /*
7898 7920 * Perform page-level operation appropriate to
7899 7921 * operation. If locking, undo the SOFTLOCK
7900 7922 * performed to bring the page into memory
7901 7923 * after setting the lock. If unlocking,
7902 7924 * and no page was found, account for the claim
7903 7925 * separately.
7904 7926 */
7905 7927 if (op == MC_LOCK) {
7906 7928 int ret = 1; /* Assume success */
7907 7929
7908 7930 ASSERT(!VPP_ISPPLOCK(vpp));
7909 7931
7910 7932 ret = page_pp_lock(pp, claim, 0);
7911 7933 if (ap != NULL) {
7912 7934 if (ap->an_pvp != NULL) {
7913 7935 anon_swap_free(ap, pp);
7914 7936 }
7915 7937 anon_array_exit(&cookie);
7916 7938 ANON_LOCK_EXIT(&->a_rwlock);
7917 7939 }
7918 7940 if (ret == 0) {
7919 7941 /* locking page failed */
7920 7942 page_unlock(pp);
7921 7943 err = EAGAIN;
7922 7944 goto out;
7923 7945 }
7924 7946 VPP_SETPPLOCK(vpp);
7925 7947 if (sp != NULL) {
7926 7948 if (pp->p_lckcnt == 1)
7927 7949 locked_bytes += PAGESIZE;
7928 7950 } else
7929 7951 locked_bytes += PAGESIZE;
7930 7952
7931 7953 if (lockmap != (ulong_t *)NULL)
7932 7954 BT_SET(lockmap, pos);
7933 7955
7934 7956 page_unlock(pp);
7935 7957 } else {
7936 7958 ASSERT(VPP_ISPPLOCK(vpp));
7937 7959 if (pp != NULL) {
7938 7960 /* sysV pages should be locked */
7939 7961 ASSERT(sp == NULL || pp->p_lckcnt > 0);
7940 7962 page_pp_unlock(pp, claim, 0);
7941 7963 if (sp != NULL) {
7942 7964 if (pp->p_lckcnt == 0)
7943 7965 unlocked_bytes
7944 7966 += PAGESIZE;
7945 7967 } else
7946 7968 unlocked_bytes += PAGESIZE;
7947 7969 page_unlock(pp);
7948 7970 } else {
7949 7971 ASSERT(sp == NULL);
7950 7972 unlocked_bytes += PAGESIZE;
7951 7973 }
7952 7974 VPP_CLRPPLOCK(vpp);
7953 7975 }
7954 7976 }
7955 7977 }
7956 7978 out:
7957 7979 if (op == MC_LOCK) {
7958 7980 /* Credit back bytes that did not get locked */
7959 7981 if ((unlocked_bytes - locked_bytes) > 0) {
7960 7982 if (proj == NULL)
7961 7983 mutex_enter(&p->p_lock);
7962 7984 rctl_decr_locked_mem(p, proj,
7963 7985 (unlocked_bytes - locked_bytes), chargeproc);
7964 7986 if (proj == NULL)
7965 7987 mutex_exit(&p->p_lock);
7966 7988 }
7967 7989
7968 7990 } else {
7969 7991 /* Account bytes that were unlocked */
7970 7992 if (unlocked_bytes > 0) {
7971 7993 if (proj == NULL)
7972 7994 mutex_enter(&p->p_lock);
7973 7995 rctl_decr_locked_mem(p, proj, unlocked_bytes,
7974 7996 chargeproc);
7975 7997 if (proj == NULL)
7976 7998 mutex_exit(&p->p_lock);
7977 7999 }
7978 8000 }
7979 8001 if (sp != NULL)
7980 8002 mutex_exit(&sp->shm_mlock);
7981 8003 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7982 8004
7983 8005 return (err);
7984 8006 }
7985 8007
7986 8008 /*
7987 8009 * Set advice from user for specified pages
7988 8010 * There are 5 types of advice:
7989 8011 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7990 8012 * MADV_RANDOM - Random page references
7991 8013 * do not allow readahead or 'klustering'
7992 8014 * MADV_SEQUENTIAL - Sequential page references
7993 8015 * Pages previous to the one currently being
7994 8016 * accessed (determined by fault) are 'not needed'
7995 8017 * and are freed immediately
7996 8018 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7997 8019 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7998 8020 * MADV_FREE - Contents can be discarded
7999 8021 * MADV_ACCESS_DEFAULT- Default access
8000 8022 * MADV_ACCESS_LWP - Next LWP will access heavily
8001 8023 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8002 8024 */
8003 8025 static int
8004 8026 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8005 8027 {
8006 8028 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8007 8029 size_t page;
8008 8030 int err = 0;
8009 8031 int already_set;
8010 8032 struct anon_map *amp;
8011 8033 ulong_t anon_index;
8012 8034 struct seg *next;
8013 8035 lgrp_mem_policy_t policy;
8014 8036 struct seg *prev;
8015 8037 struct vnode *vp;
8016 8038
8017 8039 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8018 8040
8019 8041 /*
8020 8042 * In case of MADV_FREE, we won't be modifying any segment private
8021 8043 * data structures; so, we only need to grab READER's lock
8022 8044 */
8023 8045 if (behav != MADV_FREE) {
8024 8046 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8025 8047 if (svd->tr_state != SEGVN_TR_OFF) {
8026 8048 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8027 8049 return (0);
8028 8050 }
8029 8051 } else {
8030 8052 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8031 8053 }
8032 8054
8033 8055 /*
8034 8056 * Large pages are assumed to be only turned on when accesses to the
8035 8057 * segment's address range have spatial and temporal locality. That
8036 8058 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8037 8059 * Also, ignore advice affecting lgroup memory allocation
8038 8060 * if don't need to do lgroup optimizations on this system
8039 8061 */
8040 8062
8041 8063 if ((behav == MADV_SEQUENTIAL &&
8042 8064 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8043 8065 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8044 8066 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8045 8067 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8046 8068 return (0);
8047 8069 }
8048 8070
8049 8071 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8050 8072 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8051 8073 /*
8052 8074 * Since we are going to unload hat mappings
8053 8075 * we first have to flush the cache. Otherwise
8054 8076 * this might lead to system panic if another
8055 8077 * thread is doing physio on the range whose
8056 8078 * mappings are unloaded by madvise(3C).
8057 8079 */
8058 8080 if (svd->softlockcnt > 0) {
8059 8081 /*
8060 8082 * If this is shared segment non 0 softlockcnt
8061 8083 * means locked pages are still in use.
8062 8084 */
8063 8085 if (svd->type == MAP_SHARED) {
8064 8086 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8065 8087 return (EAGAIN);
8066 8088 }
8067 8089 /*
8068 8090 * Since we do have the segvn writers lock
8069 8091 * nobody can fill the cache with entries
8070 8092 * belonging to this seg during the purge.
8071 8093 * The flush either succeeds or we still
8072 8094 * have pending I/Os. In the later case,
8073 8095 * madvise(3C) fails.
8074 8096 */
8075 8097 segvn_purge(seg);
8076 8098 if (svd->softlockcnt > 0) {
8077 8099 /*
8078 8100 * Since madvise(3C) is advisory and
8079 8101 * it's not part of UNIX98, madvise(3C)
8080 8102 * failure here doesn't cause any hardship.
8081 8103 * Note that we don't block in "as" layer.
8082 8104 */
8083 8105 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8084 8106 return (EAGAIN);
8085 8107 }
8086 8108 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8087 8109 svd->amp->a_softlockcnt > 0) {
8088 8110 /*
8089 8111 * Try to purge this amp's entries from pcache. It
8090 8112 * will succeed only if other segments that share the
8091 8113 * amp have no outstanding softlock's.
8092 8114 */
8093 8115 segvn_purge(seg);
8094 8116 }
8095 8117 }
8096 8118
8097 8119 amp = svd->amp;
8098 8120 vp = svd->vp;
8099 8121 if (behav == MADV_FREE) {
8100 8122 /*
8101 8123 * MADV_FREE is not supported for segments with
8102 8124 * underlying object; if anonmap is NULL, anon slots
8103 8125 * are not yet populated and there is nothing for
8104 8126 * us to do. As MADV_FREE is advisory, we don't
8105 8127 * return error in either case.
8106 8128 */
8107 8129 if (vp != NULL || amp == NULL) {
8108 8130 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8109 8131 return (0);
8110 8132 }
8111 8133
8112 8134 segvn_purge(seg);
8113 8135
8114 8136 page = seg_page(seg, addr);
8115 8137 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8116 8138 anon_disclaim(amp, svd->anon_index + page, len);
8117 8139 ANON_LOCK_EXIT(&->a_rwlock);
8118 8140 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8119 8141 return (0);
8120 8142 }
8121 8143
8122 8144 /*
8123 8145 * If advice is to be applied to entire segment,
8124 8146 * use advice field in seg_data structure
8125 8147 * otherwise use appropriate vpage entry.
8126 8148 */
8127 8149 if ((addr == seg->s_base) && (len == seg->s_size)) {
8128 8150 switch (behav) {
8129 8151 case MADV_ACCESS_LWP:
8130 8152 case MADV_ACCESS_MANY:
8131 8153 case MADV_ACCESS_DEFAULT:
8132 8154 /*
8133 8155 * Set memory allocation policy for this segment
8134 8156 */
8135 8157 policy = lgrp_madv_to_policy(behav, len, svd->type);
8136 8158 if (svd->type == MAP_SHARED)
8137 8159 already_set = lgrp_shm_policy_set(policy, amp,
8138 8160 svd->anon_index, vp, svd->offset, len);
8139 8161 else {
8140 8162 /*
8141 8163 * For private memory, need writers lock on
8142 8164 * address space because the segment may be
8143 8165 * split or concatenated when changing policy
8144 8166 */
8145 8167 if (AS_READ_HELD(seg->s_as,
8146 8168 &seg->s_as->a_lock)) {
8147 8169 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8148 8170 return (IE_RETRY);
8149 8171 }
8150 8172
8151 8173 already_set = lgrp_privm_policy_set(policy,
8152 8174 &svd->policy_info, len);
8153 8175 }
8154 8176
8155 8177 /*
8156 8178 * If policy set already and it shouldn't be reapplied,
8157 8179 * don't do anything.
8158 8180 */
8159 8181 if (already_set &&
8160 8182 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8161 8183 break;
8162 8184
8163 8185 /*
8164 8186 * Mark any existing pages in given range for
8165 8187 * migration
8166 8188 */
8167 8189 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8168 8190 vp, svd->offset, 1);
8169 8191
8170 8192 /*
8171 8193 * If same policy set already or this is a shared
8172 8194 * memory segment, don't need to try to concatenate
8173 8195 * segment with adjacent ones.
8174 8196 */
8175 8197 if (already_set || svd->type == MAP_SHARED)
8176 8198 break;
8177 8199
8178 8200 /*
8179 8201 * Try to concatenate this segment with previous
8180 8202 * one and next one, since we changed policy for
8181 8203 * this one and it may be compatible with adjacent
8182 8204 * ones now.
8183 8205 */
8184 8206 prev = AS_SEGPREV(seg->s_as, seg);
8185 8207 next = AS_SEGNEXT(seg->s_as, seg);
8186 8208
8187 8209 if (next && next->s_ops == &segvn_ops &&
8188 8210 addr + len == next->s_base)
8189 8211 (void) segvn_concat(seg, next, 1);
8190 8212
8191 8213 if (prev && prev->s_ops == &segvn_ops &&
8192 8214 addr == prev->s_base + prev->s_size) {
8193 8215 /*
8194 8216 * Drop lock for private data of current
8195 8217 * segment before concatenating (deleting) it
8196 8218 * and return IE_REATTACH to tell as_ctl() that
8197 8219 * current segment has changed
8198 8220 */
8199 8221 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8200 8222 if (!segvn_concat(prev, seg, 1))
8201 8223 err = IE_REATTACH;
8202 8224
8203 8225 return (err);
8204 8226 }
8205 8227 break;
8206 8228
8207 8229 case MADV_SEQUENTIAL:
8208 8230 /*
8209 8231 * unloading mapping guarantees
8210 8232 * detection in segvn_fault
8211 8233 */
8212 8234 ASSERT(seg->s_szc == 0);
8213 8235 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8214 8236 hat_unload(seg->s_as->a_hat, addr, len,
8215 8237 HAT_UNLOAD);
8216 8238 /* FALLTHROUGH */
8217 8239 case MADV_NORMAL:
8218 8240 case MADV_RANDOM:
8219 8241 svd->advice = (uchar_t)behav;
8220 8242 svd->pageadvice = 0;
8221 8243 break;
8222 8244 case MADV_WILLNEED: /* handled in memcntl */
8223 8245 case MADV_DONTNEED: /* handled in memcntl */
8224 8246 case MADV_FREE: /* handled above */
8225 8247 break;
8226 8248 default:
8227 8249 err = EINVAL;
8228 8250 }
8229 8251 } else {
8230 8252 caddr_t eaddr;
8231 8253 struct seg *new_seg;
8232 8254 struct segvn_data *new_svd;
8233 8255 u_offset_t off;
8234 8256 caddr_t oldeaddr;
8235 8257
8236 8258 page = seg_page(seg, addr);
8237 8259
8238 8260 segvn_vpage(seg);
8239 8261
8240 8262 switch (behav) {
8241 8263 struct vpage *bvpp, *evpp;
8242 8264
8243 8265 case MADV_ACCESS_LWP:
8244 8266 case MADV_ACCESS_MANY:
8245 8267 case MADV_ACCESS_DEFAULT:
8246 8268 /*
8247 8269 * Set memory allocation policy for portion of this
8248 8270 * segment
8249 8271 */
8250 8272
8251 8273 /*
8252 8274 * Align address and length of advice to page
8253 8275 * boundaries for large pages
8254 8276 */
8255 8277 if (seg->s_szc != 0) {
8256 8278 size_t pgsz;
8257 8279
8258 8280 pgsz = page_get_pagesize(seg->s_szc);
8259 8281 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8260 8282 len = P2ROUNDUP(len, pgsz);
8261 8283 }
8262 8284
8263 8285 /*
8264 8286 * Check to see whether policy is set already
8265 8287 */
8266 8288 policy = lgrp_madv_to_policy(behav, len, svd->type);
8267 8289
8268 8290 anon_index = svd->anon_index + page;
8269 8291 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8270 8292
8271 8293 if (svd->type == MAP_SHARED)
8272 8294 already_set = lgrp_shm_policy_set(policy, amp,
8273 8295 anon_index, vp, off, len);
8274 8296 else
8275 8297 already_set =
8276 8298 (policy == svd->policy_info.mem_policy);
8277 8299
8278 8300 /*
8279 8301 * If policy set already and it shouldn't be reapplied,
8280 8302 * don't do anything.
8281 8303 */
8282 8304 if (already_set &&
8283 8305 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8284 8306 break;
8285 8307
8286 8308 /*
8287 8309 * For private memory, need writers lock on
8288 8310 * address space because the segment may be
8289 8311 * split or concatenated when changing policy
8290 8312 */
8291 8313 if (svd->type == MAP_PRIVATE &&
8292 8314 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8293 8315 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8294 8316 return (IE_RETRY);
8295 8317 }
8296 8318
8297 8319 /*
8298 8320 * Mark any existing pages in given range for
8299 8321 * migration
8300 8322 */
8301 8323 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8302 8324 vp, svd->offset, 1);
8303 8325
8304 8326 /*
8305 8327 * Don't need to try to split or concatenate
8306 8328 * segments, since policy is same or this is a shared
8307 8329 * memory segment
8308 8330 */
8309 8331 if (already_set || svd->type == MAP_SHARED)
8310 8332 break;
8311 8333
8312 8334 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8313 8335 ASSERT(svd->amp == NULL);
8314 8336 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8315 8337 ASSERT(svd->softlockcnt == 0);
8316 8338 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8317 8339 HAT_REGION_TEXT);
8318 8340 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8319 8341 }
8320 8342
8321 8343 /*
8322 8344 * Split off new segment if advice only applies to a
8323 8345 * portion of existing segment starting in middle
8324 8346 */
8325 8347 new_seg = NULL;
8326 8348 eaddr = addr + len;
8327 8349 oldeaddr = seg->s_base + seg->s_size;
8328 8350 if (addr > seg->s_base) {
8329 8351 /*
8330 8352 * Must flush I/O page cache
8331 8353 * before splitting segment
8332 8354 */
8333 8355 if (svd->softlockcnt > 0)
8334 8356 segvn_purge(seg);
8335 8357
8336 8358 /*
8337 8359 * Split segment and return IE_REATTACH to tell
8338 8360 * as_ctl() that current segment changed
8339 8361 */
8340 8362 new_seg = segvn_split_seg(seg, addr);
8341 8363 new_svd = (struct segvn_data *)new_seg->s_data;
8342 8364 err = IE_REATTACH;
8343 8365
8344 8366 /*
8345 8367 * If new segment ends where old one
8346 8368 * did, try to concatenate the new
8347 8369 * segment with next one.
8348 8370 */
8349 8371 if (eaddr == oldeaddr) {
8350 8372 /*
8351 8373 * Set policy for new segment
8352 8374 */
8353 8375 (void) lgrp_privm_policy_set(policy,
8354 8376 &new_svd->policy_info,
8355 8377 new_seg->s_size);
8356 8378
8357 8379 next = AS_SEGNEXT(new_seg->s_as,
8358 8380 new_seg);
8359 8381
8360 8382 if (next &&
8361 8383 next->s_ops == &segvn_ops &&
8362 8384 eaddr == next->s_base)
8363 8385 (void) segvn_concat(new_seg,
8364 8386 next, 1);
8365 8387 }
8366 8388 }
8367 8389
8368 8390 /*
8369 8391 * Split off end of existing segment if advice only
8370 8392 * applies to a portion of segment ending before
8371 8393 * end of the existing segment
8372 8394 */
8373 8395 if (eaddr < oldeaddr) {
8374 8396 /*
8375 8397 * Must flush I/O page cache
8376 8398 * before splitting segment
8377 8399 */
8378 8400 if (svd->softlockcnt > 0)
8379 8401 segvn_purge(seg);
8380 8402
8381 8403 /*
8382 8404 * If beginning of old segment was already
8383 8405 * split off, use new segment to split end off
8384 8406 * from.
8385 8407 */
8386 8408 if (new_seg != NULL && new_seg != seg) {
8387 8409 /*
8388 8410 * Split segment
8389 8411 */
8390 8412 (void) segvn_split_seg(new_seg, eaddr);
8391 8413
8392 8414 /*
8393 8415 * Set policy for new segment
8394 8416 */
8395 8417 (void) lgrp_privm_policy_set(policy,
8396 8418 &new_svd->policy_info,
8397 8419 new_seg->s_size);
8398 8420 } else {
8399 8421 /*
8400 8422 * Split segment and return IE_REATTACH
8401 8423 * to tell as_ctl() that current
8402 8424 * segment changed
8403 8425 */
8404 8426 (void) segvn_split_seg(seg, eaddr);
8405 8427 err = IE_REATTACH;
8406 8428
8407 8429 (void) lgrp_privm_policy_set(policy,
8408 8430 &svd->policy_info, seg->s_size);
8409 8431
8410 8432 /*
8411 8433 * If new segment starts where old one
8412 8434 * did, try to concatenate it with
8413 8435 * previous segment.
8414 8436 */
8415 8437 if (addr == seg->s_base) {
8416 8438 prev = AS_SEGPREV(seg->s_as,
8417 8439 seg);
8418 8440
8419 8441 /*
8420 8442 * Drop lock for private data
8421 8443 * of current segment before
8422 8444 * concatenating (deleting) it
8423 8445 */
8424 8446 if (prev &&
8425 8447 prev->s_ops ==
8426 8448 &segvn_ops &&
8427 8449 addr == prev->s_base +
8428 8450 prev->s_size) {
8429 8451 SEGVN_LOCK_EXIT(
8430 8452 seg->s_as,
8431 8453 &svd->lock);
8432 8454 (void) segvn_concat(
8433 8455 prev, seg, 1);
8434 8456 return (err);
8435 8457 }
8436 8458 }
8437 8459 }
8438 8460 }
8439 8461 break;
8440 8462 case MADV_SEQUENTIAL:
8441 8463 ASSERT(seg->s_szc == 0);
8442 8464 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8443 8465 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8444 8466 /* FALLTHROUGH */
8445 8467 case MADV_NORMAL:
8446 8468 case MADV_RANDOM:
8447 8469 bvpp = &svd->vpage[page];
8448 8470 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8449 8471 for (; bvpp < evpp; bvpp++)
8450 8472 VPP_SETADVICE(bvpp, behav);
8451 8473 svd->advice = MADV_NORMAL;
8452 8474 break;
8453 8475 case MADV_WILLNEED: /* handled in memcntl */
8454 8476 case MADV_DONTNEED: /* handled in memcntl */
8455 8477 case MADV_FREE: /* handled above */
8456 8478 break;
8457 8479 default:
8458 8480 err = EINVAL;
8459 8481 }
8460 8482 }
8461 8483 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8462 8484 return (err);
8463 8485 }
8464 8486
8465 8487 /*
8466 8488 * Create a vpage structure for this seg.
8467 8489 */
8468 8490 static void
8469 8491 segvn_vpage(struct seg *seg)
8470 8492 {
8471 8493 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8472 8494 struct vpage *vp, *evp;
8473 8495
8474 8496 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8475 8497
8476 8498 /*
8477 8499 * If no vpage structure exists, allocate one. Copy the protections
8478 8500 * and the advice from the segment itself to the individual pages.
8479 8501 */
8480 8502 if (svd->vpage == NULL) {
8481 8503 svd->pageadvice = 1;
8482 8504 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage),
8483 8505 KM_SLEEP);
8484 8506 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8485 8507 for (vp = svd->vpage; vp < evp; vp++) {
8486 8508 VPP_SETPROT(vp, svd->prot);
8487 8509 VPP_SETADVICE(vp, svd->advice);
8488 8510 }
8489 8511 }
8490 8512 }
8491 8513
8492 8514 /*
8493 8515 * Dump the pages belonging to this segvn segment.
8494 8516 */
8495 8517 static void
8496 8518 segvn_dump(struct seg *seg)
8497 8519 {
8498 8520 struct segvn_data *svd;
8499 8521 page_t *pp;
8500 8522 struct anon_map *amp;
8501 8523 ulong_t anon_index;
8502 8524 struct vnode *vp;
8503 8525 u_offset_t off, offset;
8504 8526 pfn_t pfn;
8505 8527 pgcnt_t page, npages;
8506 8528 caddr_t addr;
8507 8529
8508 8530 npages = seg_pages(seg);
8509 8531 svd = (struct segvn_data *)seg->s_data;
8510 8532 vp = svd->vp;
8511 8533 off = offset = svd->offset;
8512 8534 addr = seg->s_base;
8513 8535
8514 8536 if ((amp = svd->amp) != NULL) {
8515 8537 anon_index = svd->anon_index;
8516 8538 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8517 8539 }
8518 8540
8519 8541 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8520 8542 struct anon *ap;
8521 8543 int we_own_it = 0;
8522 8544
8523 8545 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8524 8546 swap_xlate_nopanic(ap, &vp, &off);
8525 8547 } else {
8526 8548 vp = svd->vp;
8527 8549 off = offset;
8528 8550 }
8529 8551
8530 8552 /*
8531 8553 * If pp == NULL, the page either does not exist
8532 8554 * or is exclusively locked. So determine if it
8533 8555 * exists before searching for it.
8534 8556 */
8535 8557
8536 8558 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8537 8559 we_own_it = 1;
8538 8560 else
8539 8561 pp = page_exists(vp, off);
8540 8562
8541 8563 if (pp) {
8542 8564 pfn = page_pptonum(pp);
8543 8565 dump_addpage(seg->s_as, addr, pfn);
8544 8566 if (we_own_it)
8545 8567 page_unlock(pp);
8546 8568 }
8547 8569 addr += PAGESIZE;
8548 8570 dump_timeleft = dump_timeout;
8549 8571 }
8550 8572
8551 8573 if (amp != NULL)
8552 8574 ANON_LOCK_EXIT(&->a_rwlock);
8553 8575 }
8554 8576
8555 8577 #ifdef DEBUG
8556 8578 static uint32_t segvn_pglock_mtbf = 0;
8557 8579 #endif
8558 8580
8559 8581 #define PCACHE_SHWLIST ((page_t *)-2)
8560 8582 #define NOPCACHE_SHWLIST ((page_t *)-1)
8561 8583
8562 8584 /*
8563 8585 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8564 8586 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8565 8587 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8566 8588 * the same parts of the segment. Currently shadow list creation is only
8567 8589 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8568 8590 * tagged with segment pointer, starting virtual address and length. This
8569 8591 * approach for MAP_SHARED segments may add many pcache entries for the same
8570 8592 * set of pages and lead to long hash chains that decrease pcache lookup
8571 8593 * performance. To avoid this issue for shared segments shared anon map and
8572 8594 * starting anon index are used for pcache entry tagging. This allows all
8573 8595 * segments to share pcache entries for the same anon range and reduces pcache
8574 8596 * chain's length as well as memory overhead from duplicate shadow lists and
8575 8597 * pcache entries.
8576 8598 *
8577 8599 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8578 8600 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8579 8601 * part of softlockcnt accounting is done differently for private and shared
8580 8602 * segments. In private segment case softlock is only incremented when a new
8581 8603 * shadow list is created but not when an existing one is found via
8582 8604 * seg_plookup(). pcache entries have reference count incremented/decremented
8583 8605 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8584 8606 * reference count can be purged (and purging is needed before segment can be
8585 8607 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8586 8608 * decrement softlockcnt. Since in private segment case each of its pcache
8587 8609 * entries only belongs to this segment we can expect that when
8588 8610 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8589 8611 * segment purge will succeed and softlockcnt will drop to 0. In shared
8590 8612 * segment case reference count in pcache entry counts active locks from many
8591 8613 * different segments so we can't expect segment purging to succeed even when
8592 8614 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8593 8615 * segment. To be able to determine when there're no pending pagelocks in
8594 8616 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8595 8617 * but instead softlockcnt is incremented and decremented for every
8596 8618 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8597 8619 * list was created or an existing one was found. When softlockcnt drops to 0
8598 8620 * this segment no longer has any claims for pcached shadow lists and the
8599 8621 * segment can be freed even if there're still active pcache entries
8600 8622 * shared by this segment anon map. Shared segment pcache entries belong to
8601 8623 * anon map and are typically removed when anon map is freed after all
8602 8624 * processes destroy the segments that use this anon map.
8603 8625 */
8604 8626 static int
8605 8627 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8606 8628 enum lock_type type, enum seg_rw rw)
8607 8629 {
8608 8630 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8609 8631 size_t np;
8610 8632 pgcnt_t adjustpages;
8611 8633 pgcnt_t npages;
8612 8634 ulong_t anon_index;
8613 8635 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8614 8636 uint_t error;
8615 8637 struct anon_map *amp;
8616 8638 pgcnt_t anpgcnt;
8617 8639 struct page **pplist, **pl, *pp;
8618 8640 caddr_t a;
8619 8641 size_t page;
8620 8642 caddr_t lpgaddr, lpgeaddr;
8621 8643 anon_sync_obj_t cookie;
8622 8644 int anlock;
8623 8645 struct anon_map *pamp;
8624 8646 caddr_t paddr;
8625 8647 seg_preclaim_cbfunc_t preclaim_callback;
8626 8648 size_t pgsz;
8627 8649 int use_pcache;
8628 8650 size_t wlen;
8629 8651 uint_t pflags = 0;
8630 8652 int sftlck_sbase = 0;
8631 8653 int sftlck_send = 0;
8632 8654
8633 8655 #ifdef DEBUG
8634 8656 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8635 8657 hrtime_t ts = gethrtime();
8636 8658 if ((ts % segvn_pglock_mtbf) == 0) {
8637 8659 return (ENOTSUP);
8638 8660 }
8639 8661 if ((ts % segvn_pglock_mtbf) == 1) {
8640 8662 return (EFAULT);
8641 8663 }
8642 8664 }
8643 8665 #endif
8644 8666
8645 8667 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8646 8668 "segvn_pagelock: start seg %p addr %p", seg, addr);
8647 8669
8648 8670 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8649 8671 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8650 8672
8651 8673 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8652 8674
8653 8675 /*
8654 8676 * for now we only support pagelock to anon memory. We would have to
8655 8677 * check protections for vnode objects and call into the vnode driver.
8656 8678 * That's too much for a fast path. Let the fault entry point handle
8657 8679 * it.
8658 8680 */
8659 8681 if (svd->vp != NULL) {
8660 8682 if (type == L_PAGELOCK) {
8661 8683 error = ENOTSUP;
8662 8684 goto out;
8663 8685 }
8664 8686 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8665 8687 }
8666 8688 if ((amp = svd->amp) == NULL) {
8667 8689 if (type == L_PAGELOCK) {
8668 8690 error = EFAULT;
8669 8691 goto out;
8670 8692 }
8671 8693 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8672 8694 }
8673 8695 if (rw != S_READ && rw != S_WRITE) {
8674 8696 if (type == L_PAGELOCK) {
8675 8697 error = ENOTSUP;
8676 8698 goto out;
8677 8699 }
8678 8700 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8679 8701 }
8680 8702
8681 8703 if (seg->s_szc != 0) {
8682 8704 /*
8683 8705 * We are adjusting the pagelock region to the large page size
8684 8706 * boundary because the unlocked part of a large page cannot
8685 8707 * be freed anyway unless all constituent pages of a large
8686 8708 * page are locked. Bigger regions reduce pcache chain length
8687 8709 * and improve lookup performance. The tradeoff is that the
8688 8710 * very first segvn_pagelock() call for a given page is more
8689 8711 * expensive if only 1 page_t is needed for IO. This is only
8690 8712 * an issue if pcache entry doesn't get reused by several
8691 8713 * subsequent calls. We optimize here for the case when pcache
8692 8714 * is heavily used by repeated IOs to the same address range.
8693 8715 *
8694 8716 * Note segment's page size cannot change while we are holding
8695 8717 * as lock. And then it cannot change while softlockcnt is
8696 8718 * not 0. This will allow us to correctly recalculate large
8697 8719 * page size region for the matching pageunlock/reclaim call
8698 8720 * since as_pageunlock() caller must always match
8699 8721 * as_pagelock() call's addr and len.
8700 8722 *
8701 8723 * For pageunlock *ppp points to the pointer of page_t that
8702 8724 * corresponds to the real unadjusted start address. Similar
8703 8725 * for pagelock *ppp must point to the pointer of page_t that
8704 8726 * corresponds to the real unadjusted start address.
8705 8727 */
8706 8728 pgsz = page_get_pagesize(seg->s_szc);
8707 8729 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8708 8730 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8709 8731 } else if (len < segvn_pglock_comb_thrshld) {
8710 8732 lpgaddr = addr;
8711 8733 lpgeaddr = addr + len;
8712 8734 adjustpages = 0;
8713 8735 pgsz = PAGESIZE;
8714 8736 } else {
8715 8737 /*
8716 8738 * Align the address range of large enough requests to allow
8717 8739 * combining of different shadow lists into 1 to reduce memory
8718 8740 * overhead from potentially overlapping large shadow lists
8719 8741 * (worst case is we have a 1MB IO into buffers with start
8720 8742 * addresses separated by 4K). Alignment is only possible if
8721 8743 * padded chunks have sufficient access permissions. Note
8722 8744 * permissions won't change between L_PAGELOCK and
8723 8745 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8724 8746 * segvn_setprot() to wait until softlockcnt drops to 0. This
8725 8747 * allows us to determine in L_PAGEUNLOCK the same range we
8726 8748 * computed in L_PAGELOCK.
8727 8749 *
8728 8750 * If alignment is limited by segment ends set
8729 8751 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8730 8752 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8731 8753 * per segment counters. In L_PAGEUNLOCK case decrease
8732 8754 * softlockcnt_sbase/softlockcnt_send counters if
8733 8755 * sftlck_sbase/sftlck_send flags are set. When
8734 8756 * softlockcnt_sbase/softlockcnt_send are non 0
8735 8757 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8736 8758 * won't merge the segments. This restriction combined with
8737 8759 * restriction on segment unmapping and splitting for segments
8738 8760 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8739 8761 * correctly determine the same range that was previously
8740 8762 * locked by matching L_PAGELOCK.
8741 8763 */
8742 8764 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8743 8765 pgsz = PAGESIZE;
8744 8766 if (svd->type == MAP_PRIVATE) {
8745 8767 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8746 8768 segvn_pglock_comb_balign);
8747 8769 if (lpgaddr < seg->s_base) {
8748 8770 lpgaddr = seg->s_base;
8749 8771 sftlck_sbase = 1;
8750 8772 }
8751 8773 } else {
8752 8774 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8753 8775 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8754 8776 if (aaix < svd->anon_index) {
8755 8777 lpgaddr = seg->s_base;
8756 8778 sftlck_sbase = 1;
8757 8779 } else {
8758 8780 lpgaddr = addr - ptob(aix - aaix);
8759 8781 ASSERT(lpgaddr >= seg->s_base);
8760 8782 }
8761 8783 }
8762 8784 if (svd->pageprot && lpgaddr != addr) {
8763 8785 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8764 8786 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8765 8787 while (vp < evp) {
8766 8788 if ((VPP_PROT(vp) & protchk) == 0) {
8767 8789 break;
8768 8790 }
8769 8791 vp++;
8770 8792 }
8771 8793 if (vp < evp) {
8772 8794 lpgaddr = addr;
8773 8795 pflags = 0;
8774 8796 }
8775 8797 }
8776 8798 lpgeaddr = addr + len;
8777 8799 if (pflags) {
8778 8800 if (svd->type == MAP_PRIVATE) {
8779 8801 lpgeaddr = (caddr_t)P2ROUNDUP(
8780 8802 (uintptr_t)lpgeaddr,
8781 8803 segvn_pglock_comb_balign);
8782 8804 } else {
8783 8805 ulong_t aix = svd->anon_index +
8784 8806 seg_page(seg, lpgeaddr);
8785 8807 ulong_t aaix = P2ROUNDUP(aix,
8786 8808 segvn_pglock_comb_palign);
8787 8809 if (aaix < aix) {
8788 8810 lpgeaddr = 0;
8789 8811 } else {
8790 8812 lpgeaddr += ptob(aaix - aix);
8791 8813 }
8792 8814 }
8793 8815 if (lpgeaddr == 0 ||
8794 8816 lpgeaddr > seg->s_base + seg->s_size) {
8795 8817 lpgeaddr = seg->s_base + seg->s_size;
8796 8818 sftlck_send = 1;
8797 8819 }
8798 8820 }
8799 8821 if (svd->pageprot && lpgeaddr != addr + len) {
8800 8822 struct vpage *vp;
8801 8823 struct vpage *evp;
8802 8824
8803 8825 vp = &svd->vpage[seg_page(seg, addr + len)];
8804 8826 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8805 8827
8806 8828 while (vp < evp) {
8807 8829 if ((VPP_PROT(vp) & protchk) == 0) {
8808 8830 break;
8809 8831 }
8810 8832 vp++;
8811 8833 }
8812 8834 if (vp < evp) {
8813 8835 lpgeaddr = addr + len;
8814 8836 }
8815 8837 }
8816 8838 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8817 8839 }
8818 8840
8819 8841 /*
8820 8842 * For MAP_SHARED segments we create pcache entries tagged by amp and
8821 8843 * anon index so that we can share pcache entries with other segments
8822 8844 * that map this amp. For private segments pcache entries are tagged
8823 8845 * with segment and virtual address.
8824 8846 */
8825 8847 if (svd->type == MAP_SHARED) {
8826 8848 pamp = amp;
8827 8849 paddr = (caddr_t)((lpgaddr - seg->s_base) +
8828 8850 ptob(svd->anon_index));
8829 8851 preclaim_callback = shamp_reclaim;
8830 8852 } else {
8831 8853 pamp = NULL;
8832 8854 paddr = lpgaddr;
8833 8855 preclaim_callback = segvn_reclaim;
8834 8856 }
8835 8857
8836 8858 if (type == L_PAGEUNLOCK) {
8837 8859 VM_STAT_ADD(segvnvmstats.pagelock[0]);
8838 8860
8839 8861 /*
8840 8862 * update hat ref bits for /proc. We need to make sure
8841 8863 * that threads tracing the ref and mod bits of the
8842 8864 * address space get the right data.
8843 8865 * Note: page ref and mod bits are updated at reclaim time
8844 8866 */
8845 8867 if (seg->s_as->a_vbits) {
8846 8868 for (a = addr; a < addr + len; a += PAGESIZE) {
8847 8869 if (rw == S_WRITE) {
8848 8870 hat_setstat(seg->s_as, a,
8849 8871 PAGESIZE, P_REF | P_MOD);
8850 8872 } else {
8851 8873 hat_setstat(seg->s_as, a,
8852 8874 PAGESIZE, P_REF);
8853 8875 }
8854 8876 }
8855 8877 }
8856 8878
8857 8879 /*
8858 8880 * Check the shadow list entry after the last page used in
8859 8881 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8860 8882 * was not inserted into pcache and is not large page
8861 8883 * adjusted. In this case call reclaim callback directly and
8862 8884 * don't adjust the shadow list start and size for large
8863 8885 * pages.
8864 8886 */
8865 8887 npages = btop(len);
8866 8888 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
8867 8889 void *ptag;
8868 8890 if (pamp != NULL) {
8869 8891 ASSERT(svd->type == MAP_SHARED);
8870 8892 ptag = (void *)pamp;
8871 8893 paddr = (caddr_t)((addr - seg->s_base) +
8872 8894 ptob(svd->anon_index));
8873 8895 } else {
8874 8896 ptag = (void *)seg;
8875 8897 paddr = addr;
8876 8898 }
8877 8899 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8878 8900 } else {
8879 8901 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8880 8902 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8881 8903 len = lpgeaddr - lpgaddr;
8882 8904 npages = btop(len);
8883 8905 seg_pinactive(seg, pamp, paddr, len,
8884 8906 *ppp - adjustpages, rw, pflags, preclaim_callback);
8885 8907 }
8886 8908
8887 8909 if (pamp != NULL) {
8888 8910 ASSERT(svd->type == MAP_SHARED);
8889 8911 ASSERT(svd->softlockcnt >= npages);
8890 8912 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8891 8913 }
8892 8914
8893 8915 if (sftlck_sbase) {
8894 8916 ASSERT(svd->softlockcnt_sbase > 0);
8895 8917 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
8896 8918 }
8897 8919 if (sftlck_send) {
8898 8920 ASSERT(svd->softlockcnt_send > 0);
8899 8921 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
8900 8922 }
8901 8923
8902 8924 /*
8903 8925 * If someone is blocked while unmapping, we purge
8904 8926 * segment page cache and thus reclaim pplist synchronously
8905 8927 * without waiting for seg_pasync_thread. This speeds up
8906 8928 * unmapping in cases where munmap(2) is called, while
8907 8929 * raw async i/o is still in progress or where a thread
8908 8930 * exits on data fault in a multithreaded application.
8909 8931 */
8910 8932 if (AS_ISUNMAPWAIT(seg->s_as)) {
8911 8933 if (svd->softlockcnt == 0) {
8912 8934 mutex_enter(&seg->s_as->a_contents);
8913 8935 if (AS_ISUNMAPWAIT(seg->s_as)) {
8914 8936 AS_CLRUNMAPWAIT(seg->s_as);
8915 8937 cv_broadcast(&seg->s_as->a_cv);
8916 8938 }
8917 8939 mutex_exit(&seg->s_as->a_contents);
8918 8940 } else if (pamp == NULL) {
8919 8941 /*
8920 8942 * softlockcnt is not 0 and this is a
8921 8943 * MAP_PRIVATE segment. Try to purge its
8922 8944 * pcache entries to reduce softlockcnt.
8923 8945 * If it drops to 0 segvn_reclaim()
8924 8946 * will wake up a thread waiting on
8925 8947 * unmapwait flag.
8926 8948 *
8927 8949 * We don't purge MAP_SHARED segments with non
8928 8950 * 0 softlockcnt since IO is still in progress
8929 8951 * for such segments.
8930 8952 */
8931 8953 ASSERT(svd->type == MAP_PRIVATE);
8932 8954 segvn_purge(seg);
8933 8955 }
8934 8956 }
8935 8957 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8936 8958 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
8937 8959 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
8938 8960 return (0);
8939 8961 }
8940 8962
8941 8963 /* The L_PAGELOCK case ... */
8942 8964
8943 8965 VM_STAT_ADD(segvnvmstats.pagelock[1]);
8944 8966
8945 8967 /*
8946 8968 * For MAP_SHARED segments we have to check protections before
8947 8969 * seg_plookup() since pcache entries may be shared by many segments
8948 8970 * with potentially different page protections.
8949 8971 */
8950 8972 if (pamp != NULL) {
8951 8973 ASSERT(svd->type == MAP_SHARED);
8952 8974 if (svd->pageprot == 0) {
8953 8975 if ((svd->prot & protchk) == 0) {
8954 8976 error = EACCES;
8955 8977 goto out;
8956 8978 }
8957 8979 } else {
8958 8980 /*
8959 8981 * check page protections
8960 8982 */
8961 8983 caddr_t ea;
8962 8984
8963 8985 if (seg->s_szc) {
8964 8986 a = lpgaddr;
8965 8987 ea = lpgeaddr;
8966 8988 } else {
8967 8989 a = addr;
8968 8990 ea = addr + len;
8969 8991 }
8970 8992 for (; a < ea; a += pgsz) {
8971 8993 struct vpage *vp;
8972 8994
8973 8995 ASSERT(seg->s_szc == 0 ||
8974 8996 sameprot(seg, a, pgsz));
8975 8997 vp = &svd->vpage[seg_page(seg, a)];
8976 8998 if ((VPP_PROT(vp) & protchk) == 0) {
8977 8999 error = EACCES;
8978 9000 goto out;
8979 9001 }
8980 9002 }
8981 9003 }
8982 9004 }
8983 9005
8984 9006 /*
8985 9007 * try to find pages in segment page cache
8986 9008 */
8987 9009 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8988 9010 if (pplist != NULL) {
8989 9011 if (pamp != NULL) {
8990 9012 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8991 9013 ASSERT(svd->type == MAP_SHARED);
8992 9014 atomic_add_long((ulong_t *)&svd->softlockcnt,
8993 9015 npages);
8994 9016 }
8995 9017 if (sftlck_sbase) {
8996 9018 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
8997 9019 }
8998 9020 if (sftlck_send) {
8999 9021 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9000 9022 }
9001 9023 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9002 9024 *ppp = pplist + adjustpages;
9003 9025 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9004 9026 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9005 9027 return (0);
9006 9028 }
9007 9029
9008 9030 /*
9009 9031 * For MAP_SHARED segments we already verified above that segment
9010 9032 * protections allow this pagelock operation.
9011 9033 */
9012 9034 if (pamp == NULL) {
9013 9035 ASSERT(svd->type == MAP_PRIVATE);
9014 9036 if (svd->pageprot == 0) {
9015 9037 if ((svd->prot & protchk) == 0) {
9016 9038 error = EACCES;
9017 9039 goto out;
9018 9040 }
9019 9041 if (svd->prot & PROT_WRITE) {
9020 9042 wlen = lpgeaddr - lpgaddr;
9021 9043 } else {
9022 9044 wlen = 0;
9023 9045 ASSERT(rw == S_READ);
9024 9046 }
9025 9047 } else {
9026 9048 int wcont = 1;
9027 9049 /*
9028 9050 * check page protections
9029 9051 */
9030 9052 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9031 9053 struct vpage *vp;
9032 9054
9033 9055 ASSERT(seg->s_szc == 0 ||
9034 9056 sameprot(seg, a, pgsz));
9035 9057 vp = &svd->vpage[seg_page(seg, a)];
9036 9058 if ((VPP_PROT(vp) & protchk) == 0) {
9037 9059 error = EACCES;
9038 9060 goto out;
9039 9061 }
9040 9062 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9041 9063 wlen += pgsz;
9042 9064 } else {
9043 9065 wcont = 0;
9044 9066 ASSERT(rw == S_READ);
9045 9067 }
9046 9068 }
9047 9069 }
9048 9070 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9049 9071 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9050 9072 }
9051 9073
9052 9074 /*
9053 9075 * Only build large page adjusted shadow list if we expect to insert
9054 9076 * it into pcache. For large enough pages it's a big overhead to
9055 9077 * create a shadow list of the entire large page. But this overhead
9056 9078 * should be amortized over repeated pcache hits on subsequent reuse
9057 9079 * of this shadow list (IO into any range within this shadow list will
9058 9080 * find it in pcache since we large page align the request for pcache
9059 9081 * lookups). pcache performance is improved with bigger shadow lists
9060 9082 * as it reduces the time to pcache the entire big segment and reduces
9061 9083 * pcache chain length.
9062 9084 */
9063 9085 if (seg_pinsert_check(seg, pamp, paddr,
9064 9086 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9065 9087 addr = lpgaddr;
9066 9088 len = lpgeaddr - lpgaddr;
9067 9089 use_pcache = 1;
9068 9090 } else {
9069 9091 use_pcache = 0;
9070 9092 /*
9071 9093 * Since this entry will not be inserted into the pcache, we
9072 9094 * will not do any adjustments to the starting address or
9073 9095 * size of the memory to be locked.
9074 9096 */
9075 9097 adjustpages = 0;
9076 9098 }
9077 9099 npages = btop(len);
9078 9100
9079 9101 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9080 9102 pl = pplist;
9081 9103 *ppp = pplist + adjustpages;
9082 9104 /*
9083 9105 * If use_pcache is 0 this shadow list is not large page adjusted.
9084 9106 * Record this info in the last entry of shadow array so that
9085 9107 * L_PAGEUNLOCK can determine if it should large page adjust the
9086 9108 * address range to find the real range that was locked.
9087 9109 */
9088 9110 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9089 9111
9090 9112 page = seg_page(seg, addr);
9091 9113 anon_index = svd->anon_index + page;
9092 9114
9093 9115 anlock = 0;
9094 9116 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9095 9117 ASSERT(amp->a_szc >= seg->s_szc);
9096 9118 anpgcnt = page_get_pagecnt(amp->a_szc);
9097 9119 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9098 9120 struct anon *ap;
9099 9121 struct vnode *vp;
9100 9122 u_offset_t off;
9101 9123
9102 9124 /*
9103 9125 * Lock and unlock anon array only once per large page.
9104 9126 * anon_array_enter() locks the root anon slot according to
9105 9127 * a_szc which can't change while anon map is locked. We lock
9106 9128 * anon the first time through this loop and each time we
9107 9129 * reach anon index that corresponds to a root of a large
9108 9130 * page.
9109 9131 */
9110 9132 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9111 9133 ASSERT(anlock == 0);
9112 9134 anon_array_enter(amp, anon_index, &cookie);
9113 9135 anlock = 1;
9114 9136 }
9115 9137 ap = anon_get_ptr(amp->ahp, anon_index);
9116 9138
9117 9139 /*
9118 9140 * We must never use seg_pcache for COW pages
9119 9141 * because we might end up with original page still
9120 9142 * lying in seg_pcache even after private page is
9121 9143 * created. This leads to data corruption as
9122 9144 * aio_write refers to the page still in cache
9123 9145 * while all other accesses refer to the private
9124 9146 * page.
9125 9147 */
9126 9148 if (ap == NULL || ap->an_refcnt != 1) {
9127 9149 struct vpage *vpage;
9128 9150
9129 9151 if (seg->s_szc) {
9130 9152 error = EFAULT;
9131 9153 break;
9132 9154 }
9133 9155 if (svd->vpage != NULL) {
9134 9156 vpage = &svd->vpage[seg_page(seg, a)];
9135 9157 } else {
9136 9158 vpage = NULL;
9137 9159 }
9138 9160 ASSERT(anlock);
9139 9161 anon_array_exit(&cookie);
9140 9162 anlock = 0;
9141 9163 pp = NULL;
9142 9164 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9143 9165 vpage, &pp, 0, F_INVAL, rw, 1);
9144 9166 if (error) {
9145 9167 error = fc_decode(error);
9146 9168 break;
9147 9169 }
9148 9170 anon_array_enter(amp, anon_index, &cookie);
9149 9171 anlock = 1;
9150 9172 ap = anon_get_ptr(amp->ahp, anon_index);
9151 9173 if (ap == NULL || ap->an_refcnt != 1) {
9152 9174 error = EFAULT;
9153 9175 break;
9154 9176 }
9155 9177 }
9156 9178 swap_xlate(ap, &vp, &off);
9157 9179 pp = page_lookup_nowait(vp, off, SE_SHARED);
9158 9180 if (pp == NULL) {
9159 9181 error = EFAULT;
9160 9182 break;
9161 9183 }
9162 9184 if (ap->an_pvp != NULL) {
9163 9185 anon_swap_free(ap, pp);
9164 9186 }
9165 9187 /*
9166 9188 * Unlock anon if this is the last slot in a large page.
9167 9189 */
9168 9190 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9169 9191 ASSERT(anlock);
9170 9192 anon_array_exit(&cookie);
9171 9193 anlock = 0;
9172 9194 }
9173 9195 *pplist++ = pp;
9174 9196 }
9175 9197 if (anlock) { /* Ensure the lock is dropped */
9176 9198 anon_array_exit(&cookie);
9177 9199 }
9178 9200 ANON_LOCK_EXIT(&->a_rwlock);
9179 9201
9180 9202 if (a >= addr + len) {
9181 9203 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9182 9204 if (pamp != NULL) {
9183 9205 ASSERT(svd->type == MAP_SHARED);
9184 9206 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9185 9207 npages);
9186 9208 wlen = len;
9187 9209 }
9188 9210 if (sftlck_sbase) {
9189 9211 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9190 9212 }
9191 9213 if (sftlck_send) {
9192 9214 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9193 9215 }
9194 9216 if (use_pcache) {
9195 9217 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9196 9218 rw, pflags, preclaim_callback);
9197 9219 }
9198 9220 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9199 9221 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9200 9222 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9201 9223 return (0);
9202 9224 }
9203 9225
9204 9226 pplist = pl;
9205 9227 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9206 9228 while (np > (uint_t)0) {
9207 9229 ASSERT(PAGE_LOCKED(*pplist));
9208 9230 page_unlock(*pplist);
9209 9231 np--;
9210 9232 pplist++;
9211 9233 }
9212 9234 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9213 9235 out:
9214 9236 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9215 9237 *ppp = NULL;
9216 9238 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9217 9239 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9218 9240 return (error);
9219 9241 }
9220 9242
9221 9243 /*
9222 9244 * purge any cached pages in the I/O page cache
9223 9245 */
9224 9246 static void
9225 9247 segvn_purge(struct seg *seg)
9226 9248 {
9227 9249 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9228 9250
9229 9251 /*
9230 9252 * pcache is only used by pure anon segments.
9231 9253 */
9232 9254 if (svd->amp == NULL || svd->vp != NULL) {
9233 9255 return;
9234 9256 }
9235 9257
9236 9258 /*
9237 9259 * For MAP_SHARED segments non 0 segment's softlockcnt means
9238 9260 * active IO is still in progress via this segment. So we only
9239 9261 * purge MAP_SHARED segments when their softlockcnt is 0.
9240 9262 */
9241 9263 if (svd->type == MAP_PRIVATE) {
9242 9264 if (svd->softlockcnt) {
9243 9265 seg_ppurge(seg, NULL, 0);
9244 9266 }
9245 9267 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9246 9268 seg_ppurge(seg, svd->amp, 0);
9247 9269 }
9248 9270 }
9249 9271
9250 9272 /*
9251 9273 * If async argument is not 0 we are called from pcache async thread and don't
9252 9274 * hold AS lock.
9253 9275 */
9254 9276
9255 9277 /*ARGSUSED*/
9256 9278 static int
9257 9279 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9258 9280 enum seg_rw rw, int async)
9259 9281 {
9260 9282 struct seg *seg = (struct seg *)ptag;
9261 9283 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9262 9284 pgcnt_t np, npages;
9263 9285 struct page **pl;
9264 9286
9265 9287 npages = np = btop(len);
9266 9288 ASSERT(npages);
9267 9289
9268 9290 ASSERT(svd->vp == NULL && svd->amp != NULL);
9269 9291 ASSERT(svd->softlockcnt >= npages);
9270 9292 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9271 9293
9272 9294 pl = pplist;
9273 9295
9274 9296 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9275 9297 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9276 9298
9277 9299 while (np > (uint_t)0) {
9278 9300 if (rw == S_WRITE) {
9279 9301 hat_setrefmod(*pplist);
9280 9302 } else {
9281 9303 hat_setref(*pplist);
9282 9304 }
9283 9305 page_unlock(*pplist);
9284 9306 np--;
9285 9307 pplist++;
9286 9308 }
9287 9309
9288 9310 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9289 9311
9290 9312 /*
9291 9313 * If we are pcache async thread we don't hold AS lock. This means if
9292 9314 * softlockcnt drops to 0 after the decrement below address space may
9293 9315 * get freed. We can't allow it since after softlock derement to 0 we
9294 9316 * still need to access as structure for possible wakeup of unmap
9295 9317 * waiters. To prevent the disappearance of as we take this segment
9296 9318 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9297 9319 * make sure this routine completes before segment is freed.
9298 9320 *
9299 9321 * The second complication we have to deal with in async case is a
9300 9322 * possibility of missed wake up of unmap wait thread. When we don't
9301 9323 * hold as lock here we may take a_contents lock before unmap wait
9302 9324 * thread that was first to see softlockcnt was still not 0. As a
9303 9325 * result we'll fail to wake up an unmap wait thread. To avoid this
9304 9326 * race we set nounmapwait flag in as structure if we drop softlockcnt
9305 9327 * to 0 when we were called by pcache async thread. unmapwait thread
9306 9328 * will not block if this flag is set.
9307 9329 */
9308 9330 if (async) {
9309 9331 mutex_enter(&svd->segfree_syncmtx);
9310 9332 }
9311 9333
9312 9334 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9313 9335 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9314 9336 mutex_enter(&seg->s_as->a_contents);
9315 9337 if (async) {
9316 9338 AS_SETNOUNMAPWAIT(seg->s_as);
9317 9339 }
9318 9340 if (AS_ISUNMAPWAIT(seg->s_as)) {
9319 9341 AS_CLRUNMAPWAIT(seg->s_as);
9320 9342 cv_broadcast(&seg->s_as->a_cv);
9321 9343 }
9322 9344 mutex_exit(&seg->s_as->a_contents);
9323 9345 }
9324 9346 }
9325 9347
9326 9348 if (async) {
9327 9349 mutex_exit(&svd->segfree_syncmtx);
9328 9350 }
9329 9351 return (0);
9330 9352 }
9331 9353
9332 9354 /*ARGSUSED*/
9333 9355 static int
9334 9356 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9335 9357 enum seg_rw rw, int async)
9336 9358 {
9337 9359 amp_t *amp = (amp_t *)ptag;
9338 9360 pgcnt_t np, npages;
9339 9361 struct page **pl;
9340 9362
9341 9363 npages = np = btop(len);
9342 9364 ASSERT(npages);
9343 9365 ASSERT(amp->a_softlockcnt >= npages);
9344 9366
9345 9367 pl = pplist;
9346 9368
9347 9369 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9348 9370 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9349 9371
9350 9372 while (np > (uint_t)0) {
9351 9373 if (rw == S_WRITE) {
9352 9374 hat_setrefmod(*pplist);
9353 9375 } else {
9354 9376 hat_setref(*pplist);
9355 9377 }
9356 9378 page_unlock(*pplist);
9357 9379 np--;
9358 9380 pplist++;
9359 9381 }
9360 9382
9361 9383 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9362 9384
9363 9385 /*
9364 9386 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9365 9387 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9366 9388 * and anonmap_purge() acquires a_purgemtx.
9367 9389 */
9368 9390 mutex_enter(&->a_purgemtx);
9369 9391 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9370 9392 amp->a_purgewait) {
9371 9393 amp->a_purgewait = 0;
9372 9394 cv_broadcast(&->a_purgecv);
9373 9395 }
9374 9396 mutex_exit(&->a_purgemtx);
9375 9397 return (0);
9376 9398 }
9377 9399
9378 9400 /*
9379 9401 * get a memory ID for an addr in a given segment
9380 9402 *
9381 9403 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9382 9404 * At fault time they will be relocated into larger pages.
9383 9405 */
9384 9406 static int
9385 9407 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9386 9408 {
9387 9409 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9388 9410 struct anon *ap = NULL;
9389 9411 ulong_t anon_index;
9390 9412 struct anon_map *amp;
9391 9413 anon_sync_obj_t cookie;
9392 9414
9393 9415 if (svd->type == MAP_PRIVATE) {
9394 9416 memidp->val[0] = (uintptr_t)seg->s_as;
9395 9417 memidp->val[1] = (uintptr_t)addr;
9396 9418 return (0);
9397 9419 }
9398 9420
9399 9421 if (svd->type == MAP_SHARED) {
9400 9422 if (svd->vp) {
9401 9423 memidp->val[0] = (uintptr_t)svd->vp;
9402 9424 memidp->val[1] = (u_longlong_t)svd->offset +
9403 9425 (uintptr_t)(addr - seg->s_base);
9404 9426 return (0);
9405 9427 } else {
9406 9428
9407 9429 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9408 9430 if ((amp = svd->amp) != NULL) {
9409 9431 anon_index = svd->anon_index +
9410 9432 seg_page(seg, addr);
9411 9433 }
9412 9434 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9413 9435
9414 9436 ASSERT(amp != NULL);
9415 9437
9416 9438 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9417 9439 anon_array_enter(amp, anon_index, &cookie);
9418 9440 ap = anon_get_ptr(amp->ahp, anon_index);
9419 9441 if (ap == NULL) {
9420 9442 page_t *pp;
9421 9443
9422 9444 pp = anon_zero(seg, addr, &ap, svd->cred);
9423 9445 if (pp == NULL) {
9424 9446 anon_array_exit(&cookie);
9425 9447 ANON_LOCK_EXIT(&->a_rwlock);
9426 9448 return (ENOMEM);
9427 9449 }
9428 9450 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9429 9451 == NULL);
9430 9452 (void) anon_set_ptr(amp->ahp, anon_index,
9431 9453 ap, ANON_SLEEP);
9432 9454 page_unlock(pp);
9433 9455 }
9434 9456
9435 9457 anon_array_exit(&cookie);
9436 9458 ANON_LOCK_EXIT(&->a_rwlock);
9437 9459
9438 9460 memidp->val[0] = (uintptr_t)ap;
9439 9461 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9440 9462 return (0);
9441 9463 }
9442 9464 }
9443 9465 return (EINVAL);
9444 9466 }
9445 9467
9446 9468 static int
9447 9469 sameprot(struct seg *seg, caddr_t a, size_t len)
9448 9470 {
9449 9471 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9450 9472 struct vpage *vpage;
9451 9473 spgcnt_t pages = btop(len);
9452 9474 uint_t prot;
9453 9475
9454 9476 if (svd->pageprot == 0)
9455 9477 return (1);
9456 9478
9457 9479 ASSERT(svd->vpage != NULL);
9458 9480
9459 9481 vpage = &svd->vpage[seg_page(seg, a)];
9460 9482 prot = VPP_PROT(vpage);
9461 9483 vpage++;
9462 9484 pages--;
9463 9485 while (pages-- > 0) {
9464 9486 if (prot != VPP_PROT(vpage))
9465 9487 return (0);
9466 9488 vpage++;
9467 9489 }
9468 9490 return (1);
9469 9491 }
9470 9492
9471 9493 /*
9472 9494 * Get memory allocation policy info for specified address in given segment
9473 9495 */
9474 9496 static lgrp_mem_policy_info_t *
9475 9497 segvn_getpolicy(struct seg *seg, caddr_t addr)
9476 9498 {
9477 9499 struct anon_map *amp;
9478 9500 ulong_t anon_index;
9479 9501 lgrp_mem_policy_info_t *policy_info;
9480 9502 struct segvn_data *svn_data;
9481 9503 u_offset_t vn_off;
9482 9504 vnode_t *vp;
9483 9505
9484 9506 ASSERT(seg != NULL);
9485 9507
9486 9508 svn_data = (struct segvn_data *)seg->s_data;
9487 9509 if (svn_data == NULL)
9488 9510 return (NULL);
9489 9511
9490 9512 /*
9491 9513 * Get policy info for private or shared memory
9492 9514 */
9493 9515 if (svn_data->type != MAP_SHARED) {
9494 9516 if (svn_data->tr_state != SEGVN_TR_ON) {
9495 9517 policy_info = &svn_data->policy_info;
9496 9518 } else {
9497 9519 policy_info = &svn_data->tr_policy_info;
9498 9520 ASSERT(policy_info->mem_policy ==
9499 9521 LGRP_MEM_POLICY_NEXT_SEG);
9500 9522 }
9501 9523 } else {
9502 9524 amp = svn_data->amp;
9503 9525 anon_index = svn_data->anon_index + seg_page(seg, addr);
9504 9526 vp = svn_data->vp;
9505 9527 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9506 9528 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9507 9529 }
9508 9530
9509 9531 return (policy_info);
9510 9532 }
9511 9533
9512 9534 /*ARGSUSED*/
9513 9535 static int
9514 9536 segvn_capable(struct seg *seg, segcapability_t capability)
9515 9537 {
9516 9538 return (0);
9517 9539 }
9518 9540
9519 9541 /*
9520 9542 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9521 9543 * established to per vnode mapping per lgroup amp pages instead of to vnode
9522 9544 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9523 9545 * may share the same text replication amp. If a suitable amp doesn't already
9524 9546 * exist in svntr hash table create a new one. We may fail to bind to amp if
9525 9547 * segment is not eligible for text replication. Code below first checks for
9526 9548 * these conditions. If binding is successful segment tr_state is set to on
9527 9549 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9528 9550 * svd->amp remains as NULL.
9529 9551 */
9530 9552 static void
9531 9553 segvn_textrepl(struct seg *seg)
9532 9554 {
9533 9555 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9534 9556 vnode_t *vp = svd->vp;
9535 9557 u_offset_t off = svd->offset;
9536 9558 size_t size = seg->s_size;
9537 9559 u_offset_t eoff = off + size;
9538 9560 uint_t szc = seg->s_szc;
9539 9561 ulong_t hash = SVNTR_HASH_FUNC(vp);
9540 9562 svntr_t *svntrp;
9541 9563 struct vattr va;
9542 9564 proc_t *p = seg->s_as->a_proc;
9543 9565 lgrp_id_t lgrp_id;
9544 9566 lgrp_id_t olid;
9545 9567 int first;
9546 9568 struct anon_map *amp;
9547 9569
9548 9570 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9549 9571 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9550 9572 ASSERT(p != NULL);
9551 9573 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9552 9574 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9553 9575 ASSERT(svd->flags & MAP_TEXT);
9554 9576 ASSERT(svd->type == MAP_PRIVATE);
9555 9577 ASSERT(vp != NULL && svd->amp == NULL);
9556 9578 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9557 9579 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9558 9580 ASSERT(seg->s_as != &kas);
9559 9581 ASSERT(off < eoff);
9560 9582 ASSERT(svntr_hashtab != NULL);
9561 9583
9562 9584 /*
9563 9585 * If numa optimizations are no longer desired bail out.
9564 9586 */
9565 9587 if (!lgrp_optimizations()) {
9566 9588 svd->tr_state = SEGVN_TR_OFF;
9567 9589 return;
9568 9590 }
9569 9591
9570 9592 /*
9571 9593 * Avoid creating anon maps with size bigger than the file size.
9572 9594 * If VOP_GETATTR() call fails bail out.
9573 9595 */
9574 9596 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9575 9597 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9576 9598 svd->tr_state = SEGVN_TR_OFF;
9577 9599 SEGVN_TR_ADDSTAT(gaerr);
9578 9600 return;
9579 9601 }
9580 9602 if (btopr(va.va_size) < btopr(eoff)) {
9581 9603 svd->tr_state = SEGVN_TR_OFF;
9582 9604 SEGVN_TR_ADDSTAT(overmap);
9583 9605 return;
9584 9606 }
9585 9607
9586 9608 /*
9587 9609 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9588 9610 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9589 9611 * mapping that checks if trcache for this vnode needs to be
9590 9612 * invalidated can't miss us.
9591 9613 */
9592 9614 if (!(vp->v_flag & VVMEXEC)) {
9593 9615 mutex_enter(&vp->v_lock);
9594 9616 vp->v_flag |= VVMEXEC;
9595 9617 mutex_exit(&vp->v_lock);
9596 9618 }
9597 9619 mutex_enter(&svntr_hashtab[hash].tr_lock);
9598 9620 /*
9599 9621 * Bail out if potentially MAP_SHARED writable mappings exist to this
9600 9622 * vnode. We don't want to use old file contents from existing
9601 9623 * replicas if this mapping was established after the original file
9602 9624 * was changed.
9603 9625 */
9604 9626 if (vn_is_mapped(vp, V_WRITE)) {
9605 9627 mutex_exit(&svntr_hashtab[hash].tr_lock);
9606 9628 svd->tr_state = SEGVN_TR_OFF;
9607 9629 SEGVN_TR_ADDSTAT(wrcnt);
9608 9630 return;
9609 9631 }
9610 9632 svntrp = svntr_hashtab[hash].tr_head;
9611 9633 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9612 9634 ASSERT(svntrp->tr_refcnt != 0);
9613 9635 if (svntrp->tr_vp != vp) {
9614 9636 continue;
9615 9637 }
9616 9638
9617 9639 /*
9618 9640 * Bail out if the file or its attributes were changed after
9619 9641 * this replication entry was created since we need to use the
9620 9642 * latest file contents. Note that mtime test alone is not
9621 9643 * sufficient because a user can explicitly change mtime via
9622 9644 * utimes(2) interfaces back to the old value after modifiying
9623 9645 * the file contents. To detect this case we also have to test
9624 9646 * ctime which among other things records the time of the last
9625 9647 * mtime change by utimes(2). ctime is not changed when the file
9626 9648 * is only read or executed so we expect that typically existing
9627 9649 * replication amp's can be used most of the time.
9628 9650 */
9629 9651 if (!svntrp->tr_valid ||
9630 9652 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9631 9653 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9632 9654 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9633 9655 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9634 9656 mutex_exit(&svntr_hashtab[hash].tr_lock);
9635 9657 svd->tr_state = SEGVN_TR_OFF;
9636 9658 SEGVN_TR_ADDSTAT(stale);
9637 9659 return;
9638 9660 }
9639 9661 /*
9640 9662 * if off, eoff and szc match current segment we found the
9641 9663 * existing entry we can use.
9642 9664 */
9643 9665 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9644 9666 svntrp->tr_szc == szc) {
9645 9667 break;
9646 9668 }
9647 9669 /*
9648 9670 * Don't create different but overlapping in file offsets
9649 9671 * entries to avoid replication of the same file pages more
9650 9672 * than once per lgroup.
9651 9673 */
9652 9674 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9653 9675 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9654 9676 mutex_exit(&svntr_hashtab[hash].tr_lock);
9655 9677 svd->tr_state = SEGVN_TR_OFF;
9656 9678 SEGVN_TR_ADDSTAT(overlap);
9657 9679 return;
9658 9680 }
9659 9681 }
9660 9682 /*
9661 9683 * If we didn't find existing entry create a new one.
9662 9684 */
9663 9685 if (svntrp == NULL) {
9664 9686 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9665 9687 if (svntrp == NULL) {
9666 9688 mutex_exit(&svntr_hashtab[hash].tr_lock);
9667 9689 svd->tr_state = SEGVN_TR_OFF;
9668 9690 SEGVN_TR_ADDSTAT(nokmem);
9669 9691 return;
9670 9692 }
9671 9693 #ifdef DEBUG
9672 9694 {
9673 9695 lgrp_id_t i;
9674 9696 for (i = 0; i < NLGRPS_MAX; i++) {
9675 9697 ASSERT(svntrp->tr_amp[i] == NULL);
9676 9698 }
9677 9699 }
9678 9700 #endif /* DEBUG */
9679 9701 svntrp->tr_vp = vp;
9680 9702 svntrp->tr_off = off;
9681 9703 svntrp->tr_eoff = eoff;
9682 9704 svntrp->tr_szc = szc;
9683 9705 svntrp->tr_valid = 1;
9684 9706 svntrp->tr_mtime = va.va_mtime;
9685 9707 svntrp->tr_ctime = va.va_ctime;
9686 9708 svntrp->tr_refcnt = 0;
9687 9709 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9688 9710 svntr_hashtab[hash].tr_head = svntrp;
9689 9711 }
9690 9712 first = 1;
9691 9713 again:
9692 9714 /*
9693 9715 * We want to pick a replica with pages on main thread's (t_tid = 1,
9694 9716 * aka T1) lgrp. Currently text replication is only optimized for
9695 9717 * workloads that either have all threads of a process on the same
9696 9718 * lgrp or execute their large text primarily on main thread.
9697 9719 */
9698 9720 lgrp_id = p->p_t1_lgrpid;
9699 9721 if (lgrp_id == LGRP_NONE) {
9700 9722 /*
9701 9723 * In case exec() prefaults text on non main thread use
9702 9724 * current thread lgrpid. It will become main thread anyway
9703 9725 * soon.
9704 9726 */
9705 9727 lgrp_id = lgrp_home_id(curthread);
9706 9728 }
9707 9729 /*
9708 9730 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9709 9731 * just set it to NLGRPS_MAX if it's different from current process T1
9710 9732 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9711 9733 * replication and T1 new home is different from lgrp used for text
9712 9734 * replication. When this happens asyncronous segvn thread rechecks if
9713 9735 * segments should change lgrps used for text replication. If we fail
9714 9736 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9715 9737 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9716 9738 * we want to use. We don't need to use cas in this case because
9717 9739 * another thread that races in between our non atomic check and set
9718 9740 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9719 9741 */
9720 9742 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9721 9743 olid = p->p_tr_lgrpid;
9722 9744 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9723 9745 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9724 9746 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9725 9747 olid) {
9726 9748 olid = p->p_tr_lgrpid;
9727 9749 ASSERT(olid != LGRP_NONE);
9728 9750 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9729 9751 p->p_tr_lgrpid = NLGRPS_MAX;
9730 9752 }
9731 9753 }
9732 9754 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9733 9755 membar_producer();
9734 9756 /*
9735 9757 * lgrp_move_thread() won't schedule async recheck after
9736 9758 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9737 9759 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9738 9760 * is not LGRP_NONE.
9739 9761 */
9740 9762 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9741 9763 p->p_t1_lgrpid != lgrp_id) {
9742 9764 first = 0;
9743 9765 goto again;
9744 9766 }
9745 9767 }
9746 9768 /*
9747 9769 * If no amp was created yet for lgrp_id create a new one as long as
9748 9770 * we have enough memory to afford it.
9749 9771 */
9750 9772 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9751 9773 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9752 9774 if (trmem > segvn_textrepl_max_bytes) {
9753 9775 SEGVN_TR_ADDSTAT(normem);
9754 9776 goto fail;
9755 9777 }
9756 9778 if (anon_try_resv_zone(size, NULL) == 0) {
9757 9779 SEGVN_TR_ADDSTAT(noanon);
9758 9780 goto fail;
9759 9781 }
9760 9782 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9761 9783 if (amp == NULL) {
9762 9784 anon_unresv_zone(size, NULL);
9763 9785 SEGVN_TR_ADDSTAT(nokmem);
9764 9786 goto fail;
9765 9787 }
9766 9788 ASSERT(amp->refcnt == 1);
9767 9789 amp->a_szc = szc;
9768 9790 svntrp->tr_amp[lgrp_id] = amp;
9769 9791 SEGVN_TR_ADDSTAT(newamp);
9770 9792 }
9771 9793 svntrp->tr_refcnt++;
9772 9794 ASSERT(svd->svn_trnext == NULL);
9773 9795 ASSERT(svd->svn_trprev == NULL);
9774 9796 svd->svn_trnext = svntrp->tr_svnhead;
9775 9797 svd->svn_trprev = NULL;
9776 9798 if (svntrp->tr_svnhead != NULL) {
9777 9799 svntrp->tr_svnhead->svn_trprev = svd;
9778 9800 }
9779 9801 svntrp->tr_svnhead = svd;
9780 9802 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9781 9803 ASSERT(amp->refcnt >= 1);
9782 9804 svd->amp = amp;
9783 9805 svd->anon_index = 0;
9784 9806 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9785 9807 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9786 9808 svd->tr_state = SEGVN_TR_ON;
9787 9809 mutex_exit(&svntr_hashtab[hash].tr_lock);
9788 9810 SEGVN_TR_ADDSTAT(repl);
9789 9811 return;
9790 9812 fail:
9791 9813 ASSERT(segvn_textrepl_bytes >= size);
9792 9814 atomic_add_long(&segvn_textrepl_bytes, -size);
9793 9815 ASSERT(svntrp != NULL);
9794 9816 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9795 9817 if (svntrp->tr_refcnt == 0) {
9796 9818 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9797 9819 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9798 9820 mutex_exit(&svntr_hashtab[hash].tr_lock);
9799 9821 kmem_cache_free(svntr_cache, svntrp);
9800 9822 } else {
9801 9823 mutex_exit(&svntr_hashtab[hash].tr_lock);
9802 9824 }
9803 9825 svd->tr_state = SEGVN_TR_OFF;
9804 9826 }
9805 9827
9806 9828 /*
9807 9829 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9808 9830 * replication amp. This routine is most typically called when segment is
9809 9831 * unmapped but can also be called when segment no longer qualifies for text
9810 9832 * replication (e.g. due to protection changes). If unload_unmap is set use
9811 9833 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9812 9834 * svntr free all its anon maps and remove it from the hash table.
9813 9835 */
9814 9836 static void
9815 9837 segvn_textunrepl(struct seg *seg, int unload_unmap)
9816 9838 {
9817 9839 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9818 9840 vnode_t *vp = svd->vp;
9819 9841 u_offset_t off = svd->offset;
9820 9842 size_t size = seg->s_size;
9821 9843 u_offset_t eoff = off + size;
9822 9844 uint_t szc = seg->s_szc;
9823 9845 ulong_t hash = SVNTR_HASH_FUNC(vp);
9824 9846 svntr_t *svntrp;
9825 9847 svntr_t **prv_svntrp;
9826 9848 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
9827 9849 lgrp_id_t i;
9828 9850
9829 9851 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9830 9852 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
9831 9853 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9832 9854 ASSERT(svd->tr_state == SEGVN_TR_ON);
9833 9855 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9834 9856 ASSERT(svd->amp != NULL);
9835 9857 ASSERT(svd->amp->refcnt >= 1);
9836 9858 ASSERT(svd->anon_index == 0);
9837 9859 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9838 9860 ASSERT(svntr_hashtab != NULL);
9839 9861
9840 9862 mutex_enter(&svntr_hashtab[hash].tr_lock);
9841 9863 prv_svntrp = &svntr_hashtab[hash].tr_head;
9842 9864 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
9843 9865 ASSERT(svntrp->tr_refcnt != 0);
9844 9866 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
9845 9867 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
9846 9868 break;
9847 9869 }
9848 9870 }
9849 9871 if (svntrp == NULL) {
9850 9872 panic("segvn_textunrepl: svntr record not found");
9851 9873 }
9852 9874 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
9853 9875 panic("segvn_textunrepl: amp mismatch");
9854 9876 }
9855 9877 svd->tr_state = SEGVN_TR_OFF;
9856 9878 svd->amp = NULL;
9857 9879 if (svd->svn_trprev == NULL) {
9858 9880 ASSERT(svntrp->tr_svnhead == svd);
9859 9881 svntrp->tr_svnhead = svd->svn_trnext;
9860 9882 if (svntrp->tr_svnhead != NULL) {
9861 9883 svntrp->tr_svnhead->svn_trprev = NULL;
9862 9884 }
9863 9885 svd->svn_trnext = NULL;
9864 9886 } else {
9865 9887 svd->svn_trprev->svn_trnext = svd->svn_trnext;
9866 9888 if (svd->svn_trnext != NULL) {
9867 9889 svd->svn_trnext->svn_trprev = svd->svn_trprev;
9868 9890 svd->svn_trnext = NULL;
9869 9891 }
9870 9892 svd->svn_trprev = NULL;
9871 9893 }
9872 9894 if (--svntrp->tr_refcnt) {
9873 9895 mutex_exit(&svntr_hashtab[hash].tr_lock);
9874 9896 goto done;
9875 9897 }
9876 9898 *prv_svntrp = svntrp->tr_next;
9877 9899 mutex_exit(&svntr_hashtab[hash].tr_lock);
9878 9900 for (i = 0; i < NLGRPS_MAX; i++) {
9879 9901 struct anon_map *amp = svntrp->tr_amp[i];
9880 9902 if (amp == NULL) {
9881 9903 continue;
9882 9904 }
9883 9905 ASSERT(amp->refcnt == 1);
9884 9906 ASSERT(amp->swresv == size);
9885 9907 ASSERT(amp->size == size);
9886 9908 ASSERT(amp->a_szc == szc);
9887 9909 if (amp->a_szc != 0) {
9888 9910 anon_free_pages(amp->ahp, 0, size, szc);
9889 9911 } else {
9890 9912 anon_free(amp->ahp, 0, size);
9891 9913 }
9892 9914 svntrp->tr_amp[i] = NULL;
9893 9915 ASSERT(segvn_textrepl_bytes >= size);
9894 9916 atomic_add_long(&segvn_textrepl_bytes, -size);
9895 9917 anon_unresv_zone(amp->swresv, NULL);
9896 9918 amp->refcnt = 0;
9897 9919 anonmap_free(amp);
9898 9920 }
9899 9921 kmem_cache_free(svntr_cache, svntrp);
9900 9922 done:
9901 9923 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
9902 9924 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
9903 9925 }
9904 9926
9905 9927 /*
9906 9928 * This is called when a MAP_SHARED writable mapping is created to a vnode
9907 9929 * that is currently used for execution (VVMEXEC flag is set). In this case we
9908 9930 * need to prevent further use of existing replicas.
9909 9931 */
9910 9932 static void
9911 9933 segvn_inval_trcache(vnode_t *vp)
9912 9934 {
9913 9935 ulong_t hash = SVNTR_HASH_FUNC(vp);
9914 9936 svntr_t *svntrp;
9915 9937
9916 9938 ASSERT(vp->v_flag & VVMEXEC);
9917 9939
9918 9940 if (svntr_hashtab == NULL) {
9919 9941 return;
9920 9942 }
9921 9943
9922 9944 mutex_enter(&svntr_hashtab[hash].tr_lock);
9923 9945 svntrp = svntr_hashtab[hash].tr_head;
9924 9946 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9925 9947 ASSERT(svntrp->tr_refcnt != 0);
9926 9948 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
9927 9949 svntrp->tr_valid = 0;
9928 9950 }
9929 9951 }
9930 9952 mutex_exit(&svntr_hashtab[hash].tr_lock);
9931 9953 }
9932 9954
9933 9955 static void
9934 9956 segvn_trasync_thread(void)
9935 9957 {
9936 9958 callb_cpr_t cpr_info;
9937 9959 kmutex_t cpr_lock; /* just for CPR stuff */
9938 9960
9939 9961 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
9940 9962
9941 9963 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
9942 9964 callb_generic_cpr, "segvn_async");
9943 9965
9944 9966 if (segvn_update_textrepl_interval == 0) {
9945 9967 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
9946 9968 } else {
9947 9969 segvn_update_textrepl_interval *= hz;
9948 9970 }
9949 9971 (void) timeout(segvn_trupdate_wakeup, NULL,
9950 9972 segvn_update_textrepl_interval);
9951 9973
9952 9974 for (;;) {
9953 9975 mutex_enter(&cpr_lock);
9954 9976 CALLB_CPR_SAFE_BEGIN(&cpr_info);
9955 9977 mutex_exit(&cpr_lock);
9956 9978 sema_p(&segvn_trasync_sem);
9957 9979 mutex_enter(&cpr_lock);
9958 9980 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
9959 9981 mutex_exit(&cpr_lock);
9960 9982 segvn_trupdate();
9961 9983 }
9962 9984 }
9963 9985
9964 9986 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
9965 9987
9966 9988 static void
9967 9989 segvn_trupdate_wakeup(void *dummy)
9968 9990 {
9969 9991 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
9970 9992
9971 9993 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
9972 9994 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
9973 9995 sema_v(&segvn_trasync_sem);
9974 9996 }
9975 9997
9976 9998 if (!segvn_disable_textrepl_update &&
9977 9999 segvn_update_textrepl_interval != 0) {
9978 10000 (void) timeout(segvn_trupdate_wakeup, dummy,
9979 10001 segvn_update_textrepl_interval);
9980 10002 }
9981 10003 }
9982 10004
9983 10005 static void
9984 10006 segvn_trupdate(void)
9985 10007 {
9986 10008 ulong_t hash;
9987 10009 svntr_t *svntrp;
9988 10010 segvn_data_t *svd;
9989 10011
9990 10012 ASSERT(svntr_hashtab != NULL);
9991 10013
9992 10014 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
9993 10015 mutex_enter(&svntr_hashtab[hash].tr_lock);
9994 10016 svntrp = svntr_hashtab[hash].tr_head;
9995 10017 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9996 10018 ASSERT(svntrp->tr_refcnt != 0);
9997 10019 svd = svntrp->tr_svnhead;
9998 10020 for (; svd != NULL; svd = svd->svn_trnext) {
9999 10021 segvn_trupdate_seg(svd->seg, svd, svntrp,
10000 10022 hash);
10001 10023 }
10002 10024 }
10003 10025 mutex_exit(&svntr_hashtab[hash].tr_lock);
10004 10026 }
10005 10027 }
10006 10028
10007 10029 static void
10008 10030 segvn_trupdate_seg(struct seg *seg,
10009 10031 segvn_data_t *svd,
10010 10032 svntr_t *svntrp,
10011 10033 ulong_t hash)
10012 10034 {
10013 10035 proc_t *p;
10014 10036 lgrp_id_t lgrp_id;
10015 10037 struct as *as;
10016 10038 size_t size;
10017 10039 struct anon_map *amp;
10018 10040
10019 10041 ASSERT(svd->vp != NULL);
10020 10042 ASSERT(svd->vp == svntrp->tr_vp);
10021 10043 ASSERT(svd->offset == svntrp->tr_off);
10022 10044 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10023 10045 ASSERT(seg != NULL);
10024 10046 ASSERT(svd->seg == seg);
10025 10047 ASSERT(seg->s_data == (void *)svd);
10026 10048 ASSERT(seg->s_szc == svntrp->tr_szc);
10027 10049 ASSERT(svd->tr_state == SEGVN_TR_ON);
10028 10050 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10029 10051 ASSERT(svd->amp != NULL);
10030 10052 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10031 10053 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10032 10054 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10033 10055 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10034 10056 ASSERT(svntrp->tr_refcnt != 0);
10035 10057 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10036 10058
10037 10059 as = seg->s_as;
10038 10060 ASSERT(as != NULL && as != &kas);
10039 10061 p = as->a_proc;
10040 10062 ASSERT(p != NULL);
10041 10063 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10042 10064 lgrp_id = p->p_t1_lgrpid;
10043 10065 if (lgrp_id == LGRP_NONE) {
10044 10066 return;
10045 10067 }
10046 10068 ASSERT(lgrp_id < NLGRPS_MAX);
10047 10069 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10048 10070 return;
10049 10071 }
10050 10072
10051 10073 /*
10052 10074 * Use tryenter locking since we are locking as/seg and svntr hash
10053 10075 * lock in reverse from syncrounous thread order.
10054 10076 */
10055 10077 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10056 10078 SEGVN_TR_ADDSTAT(nolock);
10057 10079 if (segvn_lgrp_trthr_migrs_snpsht) {
10058 10080 segvn_lgrp_trthr_migrs_snpsht = 0;
10059 10081 }
10060 10082 return;
10061 10083 }
10062 10084 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10063 10085 AS_LOCK_EXIT(as, &as->a_lock);
10064 10086 SEGVN_TR_ADDSTAT(nolock);
10065 10087 if (segvn_lgrp_trthr_migrs_snpsht) {
10066 10088 segvn_lgrp_trthr_migrs_snpsht = 0;
10067 10089 }
10068 10090 return;
10069 10091 }
10070 10092 size = seg->s_size;
10071 10093 if (svntrp->tr_amp[lgrp_id] == NULL) {
10072 10094 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10073 10095 if (trmem > segvn_textrepl_max_bytes) {
10074 10096 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10075 10097 AS_LOCK_EXIT(as, &as->a_lock);
10076 10098 atomic_add_long(&segvn_textrepl_bytes, -size);
10077 10099 SEGVN_TR_ADDSTAT(normem);
10078 10100 return;
10079 10101 }
10080 10102 if (anon_try_resv_zone(size, NULL) == 0) {
10081 10103 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10082 10104 AS_LOCK_EXIT(as, &as->a_lock);
10083 10105 atomic_add_long(&segvn_textrepl_bytes, -size);
10084 10106 SEGVN_TR_ADDSTAT(noanon);
10085 10107 return;
10086 10108 }
10087 10109 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10088 10110 if (amp == NULL) {
10089 10111 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10090 10112 AS_LOCK_EXIT(as, &as->a_lock);
10091 10113 atomic_add_long(&segvn_textrepl_bytes, -size);
10092 10114 anon_unresv_zone(size, NULL);
10093 10115 SEGVN_TR_ADDSTAT(nokmem);
10094 10116 return;
10095 10117 }
10096 10118 ASSERT(amp->refcnt == 1);
10097 10119 amp->a_szc = seg->s_szc;
10098 10120 svntrp->tr_amp[lgrp_id] = amp;
10099 10121 }
10100 10122 /*
10101 10123 * We don't need to drop the bucket lock but here we give other
10102 10124 * threads a chance. svntr and svd can't be unlinked as long as
10103 10125 * segment lock is held as a writer and AS held as well. After we
10104 10126 * retake bucket lock we'll continue from where we left. We'll be able
10105 10127 * to reach the end of either list since new entries are always added
10106 10128 * to the beginning of the lists.
10107 10129 */
10108 10130 mutex_exit(&svntr_hashtab[hash].tr_lock);
10109 10131 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10110 10132 mutex_enter(&svntr_hashtab[hash].tr_lock);
10111 10133
10112 10134 ASSERT(svd->tr_state == SEGVN_TR_ON);
10113 10135 ASSERT(svd->amp != NULL);
10114 10136 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10115 10137 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10116 10138 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10117 10139
10118 10140 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10119 10141 svd->amp = svntrp->tr_amp[lgrp_id];
10120 10142 p->p_tr_lgrpid = NLGRPS_MAX;
10121 10143 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10122 10144 AS_LOCK_EXIT(as, &as->a_lock);
10123 10145
10124 10146 ASSERT(svntrp->tr_refcnt != 0);
10125 10147 ASSERT(svd->vp == svntrp->tr_vp);
10126 10148 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10127 10149 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10128 10150 ASSERT(svd->seg == seg);
10129 10151 ASSERT(svd->tr_state == SEGVN_TR_ON);
10130 10152
10131 10153 SEGVN_TR_ADDSTAT(asyncrepl);
10132 10154 }
↓ open down ↓ |
10047 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX