Print this page
6345 remove xhat support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - shared or copy-on-write from a vnode/anonymous memory.
42 42 */
43 43
44 44 #include <sys/types.h>
45 45 #include <sys/param.h>
46 46 #include <sys/t_lock.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/vmsystm.h>
53 53 #include <sys/tuneable.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/swap.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/vtrace.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/callb.h>
61 61 #include <sys/vm.h>
62 62 #include <sys/dumphdr.h>
63 63 #include <sys/lgrp.h>
64 64
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_vn.h>
69 69 #include <vm/pvn.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/vpage.h>
73 73 #include <sys/proc.h>
74 74 #include <sys/task.h>
75 75 #include <sys/project.h>
76 76 #include <sys/zone.h>
77 77 #include <sys/shm_impl.h>
78 78
79 79 /*
80 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 82 * it can. In the rare case when this page list is not large enough, it
83 83 * goes and gets a large enough array from kmem.
84 84 *
85 85 * This small page list array covers either 8 pages or 64kB worth of pages -
86 86 * whichever is smaller.
87 87 */
88 88 #define PVN_MAX_GETPAGE_SZ 0x10000
89 89 #define PVN_MAX_GETPAGE_NUM 0x8
90 90
91 91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
94 94 #else
95 95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
97 97 #endif
98 98
99 99 /*
100 100 * Private seg op routines.
101 101 */
102 102 static int segvn_dup(struct seg *seg, struct seg *newseg);
103 103 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
104 104 static void segvn_free(struct seg *seg);
105 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
106 106 caddr_t addr, size_t len, enum fault_type type,
107 107 enum seg_rw rw);
108 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
109 109 static int segvn_setprot(struct seg *seg, caddr_t addr,
110 110 size_t len, uint_t prot);
111 111 static int segvn_checkprot(struct seg *seg, caddr_t addr,
112 112 size_t len, uint_t prot);
113 113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
114 114 static size_t segvn_swapout(struct seg *seg);
115 115 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
116 116 int attr, uint_t flags);
117 117 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
118 118 char *vec);
119 119 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
120 120 int attr, int op, ulong_t *lockmap, size_t pos);
121 121 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
122 122 uint_t *protv);
123 123 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
124 124 static int segvn_gettype(struct seg *seg, caddr_t addr);
125 125 static int segvn_getvp(struct seg *seg, caddr_t addr,
126 126 struct vnode **vpp);
127 127 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
128 128 uint_t behav);
129 129 static void segvn_dump(struct seg *seg);
130 130 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
131 131 struct page ***ppp, enum lock_type type, enum seg_rw rw);
132 132 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
133 133 uint_t szc);
134 134 static int segvn_getmemid(struct seg *seg, caddr_t addr,
135 135 memid_t *memidp);
136 136 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
137 137 static int segvn_capable(struct seg *seg, segcapability_t capable);
138 138 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t);
139 139
140 140 struct seg_ops segvn_ops = {
141 141 segvn_dup,
142 142 segvn_unmap,
143 143 segvn_free,
144 144 segvn_fault,
145 145 segvn_faulta,
146 146 segvn_setprot,
147 147 segvn_checkprot,
148 148 segvn_kluster,
149 149 segvn_swapout,
150 150 segvn_sync,
151 151 segvn_incore,
152 152 segvn_lockop,
153 153 segvn_getprot,
154 154 segvn_getoffset,
155 155 segvn_gettype,
156 156 segvn_getvp,
157 157 segvn_advise,
158 158 segvn_dump,
159 159 segvn_pagelock,
160 160 segvn_setpagesize,
161 161 segvn_getmemid,
162 162 segvn_getpolicy,
163 163 segvn_capable,
164 164 segvn_inherit
165 165 };
166 166
167 167 /*
168 168 * Common zfod structures, provided as a shorthand for others to use.
169 169 */
170 170 static segvn_crargs_t zfod_segvn_crargs =
171 171 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
172 172 static segvn_crargs_t kzfod_segvn_crargs =
173 173 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
174 174 PROT_ALL & ~PROT_USER);
175 175 static segvn_crargs_t stack_noexec_crargs =
176 176 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
177 177
178 178 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
179 179 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
180 180 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
181 181 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
182 182
183 183 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
184 184
185 185 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
186 186
187 187 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
188 188 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
189 189 uint_t segvn_pglock_comb_bshift;
190 190 size_t segvn_pglock_comb_palign;
191 191
192 192 static int segvn_concat(struct seg *, struct seg *, int);
193 193 static int segvn_extend_prev(struct seg *, struct seg *,
194 194 struct segvn_crargs *, size_t);
195 195 static int segvn_extend_next(struct seg *, struct seg *,
196 196 struct segvn_crargs *, size_t);
197 197 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
198 198 static void segvn_pagelist_rele(page_t **);
199 199 static void segvn_setvnode_mpss(vnode_t *);
200 200 static void segvn_relocate_pages(page_t **, page_t *);
201 201 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
202 202 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
203 203 uint_t, page_t **, page_t **, uint_t *, int *);
204 204 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
205 205 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
206 206 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
207 207 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
208 208 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
209 209 u_offset_t, struct vpage *, page_t **, uint_t,
210 210 enum fault_type, enum seg_rw, int);
211 211 static void segvn_vpage(struct seg *);
212 212 static size_t segvn_count_swap_by_vpages(struct seg *);
213 213
214 214 static void segvn_purge(struct seg *seg);
215 215 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
216 216 enum seg_rw, int);
217 217 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
218 218 enum seg_rw, int);
219 219
220 220 static int sameprot(struct seg *, caddr_t, size_t);
221 221
222 222 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
223 223 static int segvn_clrszc(struct seg *);
224 224 static struct seg *segvn_split_seg(struct seg *, caddr_t);
225 225 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
226 226 ulong_t, uint_t);
227 227
228 228 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
229 229 size_t, void *, u_offset_t);
230 230
231 231 static struct kmem_cache *segvn_cache;
232 232 static struct kmem_cache **segvn_szc_cache;
233 233
234 234 #ifdef VM_STATS
235 235 static struct segvnvmstats_str {
236 236 ulong_t fill_vp_pages[31];
237 237 ulong_t fltvnpages[49];
238 238 ulong_t fullszcpages[10];
239 239 ulong_t relocatepages[3];
240 240 ulong_t fltanpages[17];
241 241 ulong_t pagelock[2];
242 242 ulong_t demoterange[3];
243 243 } segvnvmstats;
244 244 #endif /* VM_STATS */
245 245
246 246 #define SDR_RANGE 1 /* demote entire range */
247 247 #define SDR_END 2 /* demote non aligned ends only */
248 248
249 249 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
250 250 if ((len) != 0) { \
251 251 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
252 252 ASSERT(lpgaddr >= (seg)->s_base); \
253 253 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
254 254 (len)), pgsz); \
255 255 ASSERT(lpgeaddr > lpgaddr); \
256 256 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
257 257 } else { \
258 258 lpgeaddr = lpgaddr = (addr); \
259 259 } \
260 260 }
261 261
262 262 /*ARGSUSED*/
263 263 static int
264 264 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
265 265 {
266 266 struct segvn_data *svd = buf;
267 267
268 268 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
269 269 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
270 270 svd->svn_trnext = svd->svn_trprev = NULL;
271 271 return (0);
272 272 }
273 273
274 274 /*ARGSUSED1*/
275 275 static void
276 276 segvn_cache_destructor(void *buf, void *cdrarg)
277 277 {
278 278 struct segvn_data *svd = buf;
279 279
280 280 rw_destroy(&svd->lock);
281 281 mutex_destroy(&svd->segfree_syncmtx);
282 282 }
283 283
284 284 /*ARGSUSED*/
285 285 static int
286 286 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
287 287 {
288 288 bzero(buf, sizeof (svntr_t));
289 289 return (0);
290 290 }
291 291
292 292 /*
293 293 * Patching this variable to non-zero allows the system to run with
294 294 * stacks marked as "not executable". It's a bit of a kludge, but is
295 295 * provided as a tweakable for platforms that export those ABIs
296 296 * (e.g. sparc V8) that have executable stacks enabled by default.
297 297 * There are also some restrictions for platforms that don't actually
298 298 * implement 'noexec' protections.
299 299 *
300 300 * Once enabled, the system is (therefore) unable to provide a fully
301 301 * ABI-compliant execution environment, though practically speaking,
302 302 * most everything works. The exceptions are generally some interpreters
303 303 * and debuggers that create executable code on the stack and jump
304 304 * into it (without explicitly mprotecting the address range to include
305 305 * PROT_EXEC).
306 306 *
307 307 * One important class of applications that are disabled are those
308 308 * that have been transformed into malicious agents using one of the
309 309 * numerous "buffer overflow" attacks. See 4007890.
310 310 */
311 311 int noexec_user_stack = 0;
312 312 int noexec_user_stack_log = 1;
313 313
314 314 int segvn_lpg_disable = 0;
315 315 uint_t segvn_maxpgszc = 0;
316 316
317 317 ulong_t segvn_vmpss_clrszc_cnt;
318 318 ulong_t segvn_vmpss_clrszc_err;
319 319 ulong_t segvn_fltvnpages_clrszc_cnt;
320 320 ulong_t segvn_fltvnpages_clrszc_err;
321 321 ulong_t segvn_setpgsz_align_err;
322 322 ulong_t segvn_setpgsz_anon_align_err;
323 323 ulong_t segvn_setpgsz_getattr_err;
324 324 ulong_t segvn_setpgsz_eof_err;
325 325 ulong_t segvn_faultvnmpss_align_err1;
326 326 ulong_t segvn_faultvnmpss_align_err2;
327 327 ulong_t segvn_faultvnmpss_align_err3;
328 328 ulong_t segvn_faultvnmpss_align_err4;
329 329 ulong_t segvn_faultvnmpss_align_err5;
330 330 ulong_t segvn_vmpss_pageio_deadlk_err;
331 331
332 332 int segvn_use_regions = 1;
333 333
334 334 /*
335 335 * Segvn supports text replication optimization for NUMA platforms. Text
336 336 * replica's are represented by anon maps (amp). There's one amp per text file
337 337 * region per lgroup. A process chooses the amp for each of its text mappings
338 338 * based on the lgroup assignment of its main thread (t_tid = 1). All
339 339 * processes that want a replica on a particular lgroup for the same text file
340 340 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
341 341 * with vp,off,size,szc used as a key. Text replication segments are read only
342 342 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
343 343 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
344 344 * pages. Replication amp is assigned to a segment when it gets its first
345 345 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
346 346 * rechecks periodically if the process still maps an amp local to the main
347 347 * thread. If not async thread forces process to remap to an amp in the new
348 348 * home lgroup of the main thread. Current text replication implementation
349 349 * only provides the benefit to workloads that do most of their work in the
350 350 * main thread of a process or all the threads of a process run in the same
351 351 * lgroup. To extend text replication benefit to different types of
352 352 * multithreaded workloads further work would be needed in the hat layer to
353 353 * allow the same virtual address in the same hat to simultaneously map
354 354 * different physical addresses (i.e. page table replication would be needed
355 355 * for x86).
356 356 *
357 357 * amp pages are used instead of vnode pages as long as segment has a very
358 358 * simple life cycle. It's created via segvn_create(), handles S_EXEC
359 359 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
360 360 * happens such as protection is changed, real COW fault happens, pagesize is
361 361 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
362 362 * text replication by converting the segment back to vnode only segment
363 363 * (unmap segment's address range and set svd->amp to NULL).
364 364 *
365 365 * The original file can be changed after amp is inserted into
366 366 * svntr_hashtab. Processes that are launched after the file is already
367 367 * changed can't use the replica's created prior to the file change. To
368 368 * implement this functionality hash entries are timestamped. Replica's can
369 369 * only be used if current file modification time is the same as the timestamp
370 370 * saved when hash entry was created. However just timestamps alone are not
371 371 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
372 372 * deal with file changes via MAP_SHARED mappings differently. When writable
373 373 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
374 374 * existing replica's for this vnode as not usable for future text
375 375 * mappings. And we don't create new replica's for files that currently have
376 376 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
377 377 * true).
378 378 */
379 379
380 380 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
381 381 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
382 382
383 383 static ulong_t svntr_hashtab_sz = 512;
384 384 static svntr_bucket_t *svntr_hashtab = NULL;
385 385 static struct kmem_cache *svntr_cache;
386 386 static svntr_stats_t *segvn_textrepl_stats;
387 387 static ksema_t segvn_trasync_sem;
388 388
389 389 int segvn_disable_textrepl = 1;
390 390 size_t textrepl_size_thresh = (size_t)-1;
391 391 size_t segvn_textrepl_bytes = 0;
392 392 size_t segvn_textrepl_max_bytes = 0;
393 393 clock_t segvn_update_textrepl_interval = 0;
394 394 int segvn_update_tr_time = 10;
395 395 int segvn_disable_textrepl_update = 0;
396 396
397 397 static void segvn_textrepl(struct seg *);
398 398 static void segvn_textunrepl(struct seg *, int);
399 399 static void segvn_inval_trcache(vnode_t *);
400 400 static void segvn_trasync_thread(void);
401 401 static void segvn_trupdate_wakeup(void *);
402 402 static void segvn_trupdate(void);
403 403 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
404 404 ulong_t);
405 405
406 406 /*
407 407 * Initialize segvn data structures
408 408 */
409 409 void
410 410 segvn_init(void)
411 411 {
412 412 uint_t maxszc;
413 413 uint_t szc;
414 414 size_t pgsz;
415 415
416 416 segvn_cache = kmem_cache_create("segvn_cache",
417 417 sizeof (struct segvn_data), 0,
418 418 segvn_cache_constructor, segvn_cache_destructor, NULL,
419 419 NULL, NULL, 0);
420 420
421 421 if (segvn_lpg_disable == 0) {
422 422 szc = maxszc = page_num_pagesizes() - 1;
423 423 if (szc == 0) {
424 424 segvn_lpg_disable = 1;
425 425 }
426 426 if (page_get_pagesize(0) != PAGESIZE) {
427 427 panic("segvn_init: bad szc 0");
428 428 /*NOTREACHED*/
429 429 }
430 430 while (szc != 0) {
431 431 pgsz = page_get_pagesize(szc);
432 432 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
433 433 panic("segvn_init: bad szc %d", szc);
434 434 /*NOTREACHED*/
435 435 }
436 436 szc--;
437 437 }
438 438 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
439 439 segvn_maxpgszc = maxszc;
440 440 }
441 441
442 442 if (segvn_maxpgszc) {
443 443 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
444 444 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
445 445 KM_SLEEP);
446 446 }
447 447
448 448 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
449 449 char str[32];
450 450
451 451 (void) sprintf(str, "segvn_szc_cache%d", szc);
452 452 segvn_szc_cache[szc] = kmem_cache_create(str,
453 453 page_get_pagecnt(szc) * sizeof (page_t *), 0,
454 454 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
455 455 }
456 456
457 457
458 458 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
459 459 segvn_use_regions = 0;
460 460
461 461 /*
462 462 * For now shared regions and text replication segvn support
463 463 * are mutually exclusive. This is acceptable because
464 464 * currently significant benefit from text replication was
465 465 * only observed on AMD64 NUMA platforms (due to relatively
466 466 * small L2$ size) and currently we don't support shared
467 467 * regions on x86.
468 468 */
469 469 if (segvn_use_regions && !segvn_disable_textrepl) {
470 470 segvn_disable_textrepl = 1;
471 471 }
472 472
473 473 #if defined(_LP64)
474 474 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
475 475 !segvn_disable_textrepl) {
476 476 ulong_t i;
477 477 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
478 478
479 479 svntr_cache = kmem_cache_create("svntr_cache",
480 480 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
481 481 NULL, NULL, NULL, 0);
482 482 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
483 483 for (i = 0; i < svntr_hashtab_sz; i++) {
484 484 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
485 485 MUTEX_DEFAULT, NULL);
486 486 }
487 487 segvn_textrepl_max_bytes = ptob(physmem) /
488 488 segvn_textrepl_max_bytes_factor;
489 489 segvn_textrepl_stats = kmem_zalloc(NCPU *
490 490 sizeof (svntr_stats_t), KM_SLEEP);
491 491 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
492 492 (void) thread_create(NULL, 0, segvn_trasync_thread,
493 493 NULL, 0, &p0, TS_RUN, minclsyspri);
494 494 }
495 495 #endif
496 496
497 497 if (!ISP2(segvn_pglock_comb_balign) ||
498 498 segvn_pglock_comb_balign < PAGESIZE) {
499 499 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
500 500 }
501 501 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
502 502 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
503 503 }
504 504
505 505 #define SEGVN_PAGEIO ((void *)0x1)
506 506 #define SEGVN_NOPAGEIO ((void *)0x2)
507 507
508 508 static void
509 509 segvn_setvnode_mpss(vnode_t *vp)
510 510 {
511 511 int err;
512 512
513 513 ASSERT(vp->v_mpssdata == NULL ||
514 514 vp->v_mpssdata == SEGVN_PAGEIO ||
515 515 vp->v_mpssdata == SEGVN_NOPAGEIO);
516 516
517 517 if (vp->v_mpssdata == NULL) {
518 518 if (vn_vmpss_usepageio(vp)) {
519 519 err = VOP_PAGEIO(vp, (page_t *)NULL,
520 520 (u_offset_t)0, 0, 0, CRED(), NULL);
521 521 } else {
522 522 err = ENOSYS;
523 523 }
524 524 /*
525 525 * set v_mpssdata just once per vnode life
526 526 * so that it never changes.
527 527 */
528 528 mutex_enter(&vp->v_lock);
529 529 if (vp->v_mpssdata == NULL) {
530 530 if (err == EINVAL) {
531 531 vp->v_mpssdata = SEGVN_PAGEIO;
532 532 } else {
533 533 vp->v_mpssdata = SEGVN_NOPAGEIO;
534 534 }
535 535 }
536 536 mutex_exit(&vp->v_lock);
537 537 }
538 538 }
539 539
540 540 int
541 541 segvn_create(struct seg *seg, void *argsp)
542 542 {
543 543 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
544 544 struct segvn_data *svd;
545 545 size_t swresv = 0;
546 546 struct cred *cred;
547 547 struct anon_map *amp;
548 548 int error = 0;
549 549 size_t pgsz;
550 550 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
551 551 int use_rgn = 0;
552 552 int trok = 0;
553 553
554 554 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
555 555
556 556 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
557 557 panic("segvn_create type");
558 558 /*NOTREACHED*/
559 559 }
560 560
561 561 /*
562 562 * Check arguments. If a shared anon structure is given then
563 563 * it is illegal to also specify a vp.
564 564 */
565 565 if (a->amp != NULL && a->vp != NULL) {
566 566 panic("segvn_create anon_map");
567 567 /*NOTREACHED*/
568 568 }
569 569
570 570 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
571 571 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
572 572 segvn_use_regions) {
573 573 use_rgn = 1;
574 574 }
575 575
576 576 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
577 577 if (a->type == MAP_SHARED)
578 578 a->flags &= ~MAP_NORESERVE;
579 579
580 580 if (a->szc != 0) {
581 581 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
582 582 (a->amp != NULL && a->type == MAP_PRIVATE) ||
583 583 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
584 584 a->szc = 0;
585 585 } else {
586 586 if (a->szc > segvn_maxpgszc)
587 587 a->szc = segvn_maxpgszc;
588 588 pgsz = page_get_pagesize(a->szc);
589 589 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
590 590 !IS_P2ALIGNED(seg->s_size, pgsz)) {
591 591 a->szc = 0;
592 592 } else if (a->vp != NULL) {
593 593 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
594 594 /*
595 595 * paranoid check.
596 596 * hat_page_demote() is not supported
597 597 * on swapfs pages.
598 598 */
599 599 a->szc = 0;
600 600 } else if (map_addr_vacalign_check(seg->s_base,
601 601 a->offset & PAGEMASK)) {
602 602 a->szc = 0;
603 603 }
604 604 } else if (a->amp != NULL) {
605 605 pgcnt_t anum = btopr(a->offset);
606 606 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
607 607 if (!IS_P2ALIGNED(anum, pgcnt)) {
608 608 a->szc = 0;
609 609 }
610 610 }
611 611 }
612 612 }
613 613
614 614 /*
615 615 * If segment may need private pages, reserve them now.
616 616 */
617 617 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
618 618 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
619 619 if (anon_resv_zone(seg->s_size,
620 620 seg->s_as->a_proc->p_zone) == 0)
621 621 return (EAGAIN);
622 622 swresv = seg->s_size;
623 623 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
624 624 seg, swresv, 1);
625 625 }
626 626
627 627 /*
628 628 * Reserve any mapping structures that may be required.
629 629 *
630 630 * Don't do it for segments that may use regions. It's currently a
631 631 * noop in the hat implementations anyway.
632 632 */
633 633 if (!use_rgn) {
634 634 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
635 635 }
636 636
637 637 if (a->cred) {
638 638 cred = a->cred;
639 639 crhold(cred);
640 640 } else {
641 641 crhold(cred = CRED());
642 642 }
643 643
644 644 /* Inform the vnode of the new mapping */
645 645 if (a->vp != NULL) {
646 646 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
647 647 seg->s_as, seg->s_base, seg->s_size, a->prot,
648 648 a->maxprot, a->type, cred, NULL);
649 649 if (error) {
650 650 if (swresv != 0) {
651 651 anon_unresv_zone(swresv,
652 652 seg->s_as->a_proc->p_zone);
653 653 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
654 654 "anon proc:%p %lu %u", seg, swresv, 0);
655 655 }
656 656 crfree(cred);
657 657 if (!use_rgn) {
658 658 hat_unload(seg->s_as->a_hat, seg->s_base,
659 659 seg->s_size, HAT_UNLOAD_UNMAP);
660 660 }
661 661 return (error);
662 662 }
663 663 /*
664 664 * svntr_hashtab will be NULL if we support shared regions.
665 665 */
666 666 trok = ((a->flags & MAP_TEXT) &&
667 667 (seg->s_size > textrepl_size_thresh ||
668 668 (a->flags & _MAP_TEXTREPL)) &&
669 669 lgrp_optimizations() && svntr_hashtab != NULL &&
670 670 a->type == MAP_PRIVATE && swresv == 0 &&
671 671 !(a->flags & MAP_NORESERVE) &&
672 672 seg->s_as != &kas && a->vp->v_type == VREG);
673 673
674 674 ASSERT(!trok || !use_rgn);
675 675 }
676 676
677 677 /*
678 678 * MAP_NORESERVE mappings don't count towards the VSZ of a process
679 679 * until we fault the pages in.
680 680 */
681 681 if ((a->vp == NULL || a->vp->v_type != VREG) &&
682 682 a->flags & MAP_NORESERVE) {
683 683 seg->s_as->a_resvsize -= seg->s_size;
684 684 }
685 685
686 686 /*
687 687 * If more than one segment in the address space, and they're adjacent
688 688 * virtually, try to concatenate them. Don't concatenate if an
689 689 * explicit anon_map structure was supplied (e.g., SystemV shared
690 690 * memory) or if we'll use text replication for this segment.
691 691 */
692 692 if (a->amp == NULL && !use_rgn && !trok) {
693 693 struct seg *pseg, *nseg;
694 694 struct segvn_data *psvd, *nsvd;
695 695 lgrp_mem_policy_t ppolicy, npolicy;
696 696 uint_t lgrp_mem_policy_flags = 0;
697 697 extern lgrp_mem_policy_t lgrp_mem_default_policy;
698 698
699 699 /*
700 700 * Memory policy flags (lgrp_mem_policy_flags) is valid when
701 701 * extending stack/heap segments.
702 702 */
703 703 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
704 704 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
705 705 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
706 706 } else {
707 707 /*
708 708 * Get policy when not extending it from another segment
709 709 */
710 710 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
711 711 }
712 712
713 713 /*
714 714 * First, try to concatenate the previous and new segments
715 715 */
716 716 pseg = AS_SEGPREV(seg->s_as, seg);
717 717 if (pseg != NULL &&
718 718 pseg->s_base + pseg->s_size == seg->s_base &&
719 719 pseg->s_ops == &segvn_ops) {
720 720 /*
721 721 * Get memory allocation policy from previous segment.
722 722 * When extension is specified (e.g. for heap) apply
723 723 * this policy to the new segment regardless of the
724 724 * outcome of segment concatenation. Extension occurs
725 725 * for non-default policy otherwise default policy is
726 726 * used and is based on extended segment size.
727 727 */
728 728 psvd = (struct segvn_data *)pseg->s_data;
729 729 ppolicy = psvd->policy_info.mem_policy;
730 730 if (lgrp_mem_policy_flags ==
731 731 LGRP_MP_FLAG_EXTEND_UP) {
732 732 if (ppolicy != lgrp_mem_default_policy) {
733 733 mpolicy = ppolicy;
734 734 } else {
735 735 mpolicy = lgrp_mem_policy_default(
736 736 pseg->s_size + seg->s_size,
737 737 a->type);
738 738 }
739 739 }
740 740
741 741 if (mpolicy == ppolicy &&
742 742 (pseg->s_size + seg->s_size <=
743 743 segvn_comb_thrshld || psvd->amp == NULL) &&
744 744 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
745 745 /*
746 746 * success! now try to concatenate
747 747 * with following seg
748 748 */
749 749 crfree(cred);
750 750 nseg = AS_SEGNEXT(pseg->s_as, pseg);
751 751 if (nseg != NULL &&
752 752 nseg != pseg &&
753 753 nseg->s_ops == &segvn_ops &&
754 754 pseg->s_base + pseg->s_size ==
755 755 nseg->s_base)
756 756 (void) segvn_concat(pseg, nseg, 0);
757 757 ASSERT(pseg->s_szc == 0 ||
758 758 (a->szc == pseg->s_szc &&
759 759 IS_P2ALIGNED(pseg->s_base, pgsz) &&
760 760 IS_P2ALIGNED(pseg->s_size, pgsz)));
761 761 return (0);
762 762 }
763 763 }
764 764
765 765 /*
766 766 * Failed, so try to concatenate with following seg
767 767 */
768 768 nseg = AS_SEGNEXT(seg->s_as, seg);
769 769 if (nseg != NULL &&
770 770 seg->s_base + seg->s_size == nseg->s_base &&
771 771 nseg->s_ops == &segvn_ops) {
772 772 /*
773 773 * Get memory allocation policy from next segment.
774 774 * When extension is specified (e.g. for stack) apply
775 775 * this policy to the new segment regardless of the
776 776 * outcome of segment concatenation. Extension occurs
777 777 * for non-default policy otherwise default policy is
778 778 * used and is based on extended segment size.
779 779 */
780 780 nsvd = (struct segvn_data *)nseg->s_data;
781 781 npolicy = nsvd->policy_info.mem_policy;
782 782 if (lgrp_mem_policy_flags ==
783 783 LGRP_MP_FLAG_EXTEND_DOWN) {
784 784 if (npolicy != lgrp_mem_default_policy) {
785 785 mpolicy = npolicy;
786 786 } else {
787 787 mpolicy = lgrp_mem_policy_default(
788 788 nseg->s_size + seg->s_size,
789 789 a->type);
790 790 }
791 791 }
792 792
793 793 if (mpolicy == npolicy &&
794 794 segvn_extend_next(seg, nseg, a, swresv) == 0) {
795 795 crfree(cred);
796 796 ASSERT(nseg->s_szc == 0 ||
797 797 (a->szc == nseg->s_szc &&
798 798 IS_P2ALIGNED(nseg->s_base, pgsz) &&
799 799 IS_P2ALIGNED(nseg->s_size, pgsz)));
800 800 return (0);
801 801 }
802 802 }
803 803 }
804 804
805 805 if (a->vp != NULL) {
806 806 VN_HOLD(a->vp);
807 807 if (a->type == MAP_SHARED)
808 808 lgrp_shm_policy_init(NULL, a->vp);
809 809 }
810 810 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
811 811
812 812 seg->s_ops = &segvn_ops;
813 813 seg->s_data = (void *)svd;
814 814 seg->s_szc = a->szc;
815 815
816 816 svd->seg = seg;
817 817 svd->vp = a->vp;
818 818 /*
819 819 * Anonymous mappings have no backing file so the offset is meaningless.
820 820 */
821 821 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
822 822 svd->prot = a->prot;
823 823 svd->maxprot = a->maxprot;
824 824 svd->pageprot = 0;
825 825 svd->type = a->type;
826 826 svd->vpage = NULL;
827 827 svd->cred = cred;
828 828 svd->advice = MADV_NORMAL;
829 829 svd->pageadvice = 0;
830 830 svd->flags = (ushort_t)a->flags;
831 831 svd->softlockcnt = 0;
832 832 svd->softlockcnt_sbase = 0;
833 833 svd->softlockcnt_send = 0;
834 834 svd->svn_inz = 0;
835 835 svd->rcookie = HAT_INVALID_REGION_COOKIE;
836 836 svd->pageswap = 0;
837 837
838 838 if (a->szc != 0 && a->vp != NULL) {
839 839 segvn_setvnode_mpss(a->vp);
840 840 }
841 841 if (svd->type == MAP_SHARED && svd->vp != NULL &&
842 842 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
843 843 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
844 844 segvn_inval_trcache(svd->vp);
845 845 }
846 846
847 847 amp = a->amp;
848 848 if ((svd->amp = amp) == NULL) {
849 849 svd->anon_index = 0;
850 850 if (svd->type == MAP_SHARED) {
851 851 svd->swresv = 0;
852 852 /*
853 853 * Shared mappings to a vp need no other setup.
854 854 * If we have a shared mapping to an anon_map object
855 855 * which hasn't been allocated yet, allocate the
856 856 * struct now so that it will be properly shared
857 857 * by remembering the swap reservation there.
858 858 */
859 859 if (a->vp == NULL) {
860 860 svd->amp = anonmap_alloc(seg->s_size, swresv,
861 861 ANON_SLEEP);
862 862 svd->amp->a_szc = seg->s_szc;
863 863 }
864 864 } else {
865 865 /*
866 866 * Private mapping (with or without a vp).
867 867 * Allocate anon_map when needed.
868 868 */
869 869 svd->swresv = swresv;
870 870 }
871 871 } else {
872 872 pgcnt_t anon_num;
873 873
874 874 /*
875 875 * Mapping to an existing anon_map structure without a vp.
876 876 * For now we will insure that the segment size isn't larger
877 877 * than the size - offset gives us. Later on we may wish to
878 878 * have the anon array dynamically allocated itself so that
879 879 * we don't always have to allocate all the anon pointer slots.
880 880 * This of course involves adding extra code to check that we
881 881 * aren't trying to use an anon pointer slot beyond the end
882 882 * of the currently allocated anon array.
883 883 */
884 884 if ((amp->size - a->offset) < seg->s_size) {
885 885 panic("segvn_create anon_map size");
886 886 /*NOTREACHED*/
887 887 }
888 888
889 889 anon_num = btopr(a->offset);
890 890
891 891 if (a->type == MAP_SHARED) {
892 892 /*
893 893 * SHARED mapping to a given anon_map.
894 894 */
895 895 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
896 896 amp->refcnt++;
897 897 if (a->szc > amp->a_szc) {
898 898 amp->a_szc = a->szc;
899 899 }
900 900 ANON_LOCK_EXIT(&->a_rwlock);
901 901 svd->anon_index = anon_num;
902 902 svd->swresv = 0;
903 903 } else {
904 904 /*
905 905 * PRIVATE mapping to a given anon_map.
906 906 * Make sure that all the needed anon
907 907 * structures are created (so that we will
908 908 * share the underlying pages if nothing
909 909 * is written by this mapping) and then
910 910 * duplicate the anon array as is done
911 911 * when a privately mapped segment is dup'ed.
912 912 */
913 913 struct anon *ap;
914 914 caddr_t addr;
915 915 caddr_t eaddr;
916 916 ulong_t anon_idx;
917 917 int hat_flag = HAT_LOAD;
918 918
919 919 if (svd->flags & MAP_TEXT) {
920 920 hat_flag |= HAT_LOAD_TEXT;
921 921 }
922 922
923 923 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
924 924 svd->amp->a_szc = seg->s_szc;
925 925 svd->anon_index = 0;
926 926 svd->swresv = swresv;
927 927
928 928 /*
929 929 * Prevent 2 threads from allocating anon
930 930 * slots simultaneously.
931 931 */
932 932 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
933 933 eaddr = seg->s_base + seg->s_size;
934 934
935 935 for (anon_idx = anon_num, addr = seg->s_base;
936 936 addr < eaddr; addr += PAGESIZE, anon_idx++) {
937 937 page_t *pp;
938 938
939 939 if ((ap = anon_get_ptr(amp->ahp,
940 940 anon_idx)) != NULL)
941 941 continue;
942 942
943 943 /*
944 944 * Allocate the anon struct now.
945 945 * Might as well load up translation
946 946 * to the page while we're at it...
947 947 */
948 948 pp = anon_zero(seg, addr, &ap, cred);
949 949 if (ap == NULL || pp == NULL) {
950 950 panic("segvn_create anon_zero");
951 951 /*NOTREACHED*/
952 952 }
953 953
954 954 /*
955 955 * Re-acquire the anon_map lock and
956 956 * initialize the anon array entry.
957 957 */
958 958 ASSERT(anon_get_ptr(amp->ahp,
959 959 anon_idx) == NULL);
960 960 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
961 961 ANON_SLEEP);
962 962
963 963 ASSERT(seg->s_szc == 0);
964 964 ASSERT(!IS_VMODSORT(pp->p_vnode));
965 965
966 966 ASSERT(use_rgn == 0);
967 967 hat_memload(seg->s_as->a_hat, addr, pp,
968 968 svd->prot & ~PROT_WRITE, hat_flag);
969 969
970 970 page_unlock(pp);
971 971 }
972 972 ASSERT(seg->s_szc == 0);
973 973 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
974 974 0, seg->s_size);
975 975 ANON_LOCK_EXIT(&->a_rwlock);
976 976 }
977 977 }
978 978
979 979 /*
980 980 * Set default memory allocation policy for segment
981 981 *
982 982 * Always set policy for private memory at least for initialization
983 983 * even if this is a shared memory segment
984 984 */
985 985 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
986 986
987 987 if (svd->type == MAP_SHARED)
988 988 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
989 989 svd->vp, svd->offset, seg->s_size);
990 990
991 991 if (use_rgn) {
992 992 ASSERT(!trok);
993 993 ASSERT(svd->amp == NULL);
994 994 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
995 995 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
996 996 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
997 997 HAT_REGION_TEXT);
998 998 }
999 999
1000 1000 ASSERT(!trok || !(svd->prot & PROT_WRITE));
1001 1001 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
1002 1002
1003 1003 return (0);
1004 1004 }
1005 1005
1006 1006 /*
1007 1007 * Concatenate two existing segments, if possible.
1008 1008 * Return 0 on success, -1 if two segments are not compatible
1009 1009 * or -2 on memory allocation failure.
1010 1010 * If amp_cat == 1 then try and concat segments with anon maps
1011 1011 */
1012 1012 static int
1013 1013 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
1014 1014 {
1015 1015 struct segvn_data *svd1 = seg1->s_data;
1016 1016 struct segvn_data *svd2 = seg2->s_data;
1017 1017 struct anon_map *amp1 = svd1->amp;
1018 1018 struct anon_map *amp2 = svd2->amp;
1019 1019 struct vpage *vpage1 = svd1->vpage;
1020 1020 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1021 1021 size_t size, nvpsize;
1022 1022 pgcnt_t npages1, npages2;
1023 1023
1024 1024 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1025 1025 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1026 1026 ASSERT(seg1->s_ops == seg2->s_ops);
1027 1027
1028 1028 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1029 1029 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1030 1030 return (-1);
1031 1031 }
1032 1032
1033 1033 /* both segments exist, try to merge them */
1034 1034 #define incompat(x) (svd1->x != svd2->x)
1035 1035 if (incompat(vp) || incompat(maxprot) ||
1036 1036 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1037 1037 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1038 1038 incompat(type) || incompat(cred) || incompat(flags) ||
1039 1039 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1040 1040 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1041 1041 return (-1);
1042 1042 #undef incompat
1043 1043
1044 1044 /*
1045 1045 * vp == NULL implies zfod, offset doesn't matter
1046 1046 */
1047 1047 if (svd1->vp != NULL &&
1048 1048 svd1->offset + seg1->s_size != svd2->offset) {
1049 1049 return (-1);
1050 1050 }
1051 1051
1052 1052 /*
1053 1053 * Don't concatenate if either segment uses text replication.
1054 1054 */
1055 1055 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1056 1056 return (-1);
1057 1057 }
1058 1058
1059 1059 /*
1060 1060 * Fail early if we're not supposed to concatenate
1061 1061 * segments with non NULL amp.
1062 1062 */
1063 1063 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1064 1064 return (-1);
1065 1065 }
1066 1066
1067 1067 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1068 1068 if (amp1 != amp2) {
1069 1069 return (-1);
1070 1070 }
1071 1071 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1072 1072 svd2->anon_index) {
1073 1073 return (-1);
1074 1074 }
1075 1075 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1076 1076 }
1077 1077
1078 1078 /*
1079 1079 * If either seg has vpages, create a new merged vpage array.
1080 1080 */
1081 1081 if (vpage1 != NULL || vpage2 != NULL) {
1082 1082 struct vpage *vp, *evp;
1083 1083
1084 1084 npages1 = seg_pages(seg1);
1085 1085 npages2 = seg_pages(seg2);
1086 1086 nvpsize = vpgtob(npages1 + npages2);
1087 1087
1088 1088 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1089 1089 return (-2);
1090 1090 }
1091 1091
1092 1092 if (vpage1 != NULL) {
1093 1093 bcopy(vpage1, nvpage, vpgtob(npages1));
1094 1094 } else {
1095 1095 evp = nvpage + npages1;
1096 1096 for (vp = nvpage; vp < evp; vp++) {
1097 1097 VPP_SETPROT(vp, svd1->prot);
1098 1098 VPP_SETADVICE(vp, svd1->advice);
1099 1099 }
1100 1100 }
1101 1101
1102 1102 if (vpage2 != NULL) {
1103 1103 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1104 1104 } else {
1105 1105 evp = nvpage + npages1 + npages2;
1106 1106 for (vp = nvpage + npages1; vp < evp; vp++) {
1107 1107 VPP_SETPROT(vp, svd2->prot);
1108 1108 VPP_SETADVICE(vp, svd2->advice);
1109 1109 }
1110 1110 }
1111 1111
1112 1112 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1113 1113 ASSERT(svd1->swresv == seg1->s_size);
1114 1114 ASSERT(!(svd1->flags & MAP_NORESERVE));
1115 1115 ASSERT(!(svd2->flags & MAP_NORESERVE));
1116 1116 evp = nvpage + npages1;
1117 1117 for (vp = nvpage; vp < evp; vp++) {
1118 1118 VPP_SETSWAPRES(vp);
1119 1119 }
1120 1120 }
1121 1121
1122 1122 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1123 1123 ASSERT(svd2->swresv == seg2->s_size);
1124 1124 ASSERT(!(svd1->flags & MAP_NORESERVE));
1125 1125 ASSERT(!(svd2->flags & MAP_NORESERVE));
1126 1126 vp = nvpage + npages1;
1127 1127 evp = vp + npages2;
1128 1128 for (; vp < evp; vp++) {
1129 1129 VPP_SETSWAPRES(vp);
1130 1130 }
1131 1131 }
1132 1132 }
1133 1133 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1134 1134 (svd1->pageswap == 0 && svd2->pageswap == 0));
1135 1135
1136 1136 /*
1137 1137 * If either segment has private pages, create a new merged anon
1138 1138 * array. If mergeing shared anon segments just decrement anon map's
1139 1139 * refcnt.
1140 1140 */
1141 1141 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1142 1142 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1143 1143 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1144 1144 ASSERT(amp1->refcnt >= 2);
1145 1145 amp1->refcnt--;
1146 1146 ANON_LOCK_EXIT(&1->a_rwlock);
1147 1147 svd2->amp = NULL;
1148 1148 } else if (amp1 != NULL || amp2 != NULL) {
1149 1149 struct anon_hdr *nahp;
1150 1150 struct anon_map *namp = NULL;
1151 1151 size_t asize;
1152 1152
1153 1153 ASSERT(svd1->type == MAP_PRIVATE);
1154 1154
1155 1155 asize = seg1->s_size + seg2->s_size;
1156 1156 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1157 1157 if (nvpage != NULL) {
1158 1158 kmem_free(nvpage, nvpsize);
1159 1159 }
1160 1160 return (-2);
1161 1161 }
1162 1162 if (amp1 != NULL) {
1163 1163 /*
1164 1164 * XXX anon rwlock is not really needed because
1165 1165 * this is a private segment and we are writers.
1166 1166 */
1167 1167 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1168 1168 ASSERT(amp1->refcnt == 1);
1169 1169 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1170 1170 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1171 1171 anon_release(nahp, btop(asize));
1172 1172 ANON_LOCK_EXIT(&1->a_rwlock);
1173 1173 if (nvpage != NULL) {
1174 1174 kmem_free(nvpage, nvpsize);
1175 1175 }
1176 1176 return (-2);
1177 1177 }
1178 1178 }
1179 1179 if (amp2 != NULL) {
1180 1180 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1181 1181 ASSERT(amp2->refcnt == 1);
1182 1182 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1183 1183 nahp, btop(seg1->s_size), btop(seg2->s_size),
1184 1184 ANON_NOSLEEP)) {
1185 1185 anon_release(nahp, btop(asize));
1186 1186 ANON_LOCK_EXIT(&2->a_rwlock);
1187 1187 if (amp1 != NULL) {
1188 1188 ANON_LOCK_EXIT(&1->a_rwlock);
1189 1189 }
1190 1190 if (nvpage != NULL) {
1191 1191 kmem_free(nvpage, nvpsize);
1192 1192 }
1193 1193 return (-2);
1194 1194 }
1195 1195 }
1196 1196 if (amp1 != NULL) {
1197 1197 namp = amp1;
1198 1198 anon_release(amp1->ahp, btop(amp1->size));
1199 1199 }
1200 1200 if (amp2 != NULL) {
1201 1201 if (namp == NULL) {
1202 1202 ASSERT(amp1 == NULL);
1203 1203 namp = amp2;
1204 1204 anon_release(amp2->ahp, btop(amp2->size));
1205 1205 } else {
1206 1206 amp2->refcnt--;
1207 1207 ANON_LOCK_EXIT(&2->a_rwlock);
1208 1208 anonmap_free(amp2);
1209 1209 }
1210 1210 svd2->amp = NULL; /* needed for seg_free */
1211 1211 }
1212 1212 namp->ahp = nahp;
1213 1213 namp->size = asize;
1214 1214 svd1->amp = namp;
1215 1215 svd1->anon_index = 0;
1216 1216 ANON_LOCK_EXIT(&namp->a_rwlock);
1217 1217 }
1218 1218 /*
1219 1219 * Now free the old vpage structures.
1220 1220 */
1221 1221 if (nvpage != NULL) {
1222 1222 if (vpage1 != NULL) {
1223 1223 kmem_free(vpage1, vpgtob(npages1));
1224 1224 }
1225 1225 if (vpage2 != NULL) {
1226 1226 svd2->vpage = NULL;
1227 1227 kmem_free(vpage2, vpgtob(npages2));
1228 1228 }
1229 1229 if (svd2->pageprot) {
1230 1230 svd1->pageprot = 1;
1231 1231 }
1232 1232 if (svd2->pageadvice) {
1233 1233 svd1->pageadvice = 1;
1234 1234 }
1235 1235 if (svd2->pageswap) {
1236 1236 svd1->pageswap = 1;
1237 1237 }
1238 1238 svd1->vpage = nvpage;
1239 1239 }
1240 1240
1241 1241 /* all looks ok, merge segments */
1242 1242 svd1->swresv += svd2->swresv;
1243 1243 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1244 1244 size = seg2->s_size;
1245 1245 seg_free(seg2);
1246 1246 seg1->s_size += size;
1247 1247 return (0);
1248 1248 }
1249 1249
1250 1250 /*
1251 1251 * Extend the previous segment (seg1) to include the
1252 1252 * new segment (seg2 + a), if possible.
1253 1253 * Return 0 on success.
1254 1254 */
1255 1255 static int
1256 1256 segvn_extend_prev(seg1, seg2, a, swresv)
1257 1257 struct seg *seg1, *seg2;
1258 1258 struct segvn_crargs *a;
1259 1259 size_t swresv;
1260 1260 {
1261 1261 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1262 1262 size_t size;
1263 1263 struct anon_map *amp1;
1264 1264 struct vpage *new_vpage;
1265 1265
1266 1266 /*
1267 1267 * We don't need any segment level locks for "segvn" data
1268 1268 * since the address space is "write" locked.
1269 1269 */
1270 1270 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1271 1271
1272 1272 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1273 1273 return (-1);
1274 1274 }
1275 1275
1276 1276 /* second segment is new, try to extend first */
1277 1277 /* XXX - should also check cred */
1278 1278 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1279 1279 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1280 1280 svd1->type != a->type || svd1->flags != a->flags ||
1281 1281 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1282 1282 return (-1);
1283 1283
1284 1284 /* vp == NULL implies zfod, offset doesn't matter */
1285 1285 if (svd1->vp != NULL &&
1286 1286 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1287 1287 return (-1);
1288 1288
1289 1289 if (svd1->tr_state != SEGVN_TR_OFF) {
1290 1290 return (-1);
1291 1291 }
1292 1292
1293 1293 amp1 = svd1->amp;
1294 1294 if (amp1) {
1295 1295 pgcnt_t newpgs;
1296 1296
1297 1297 /*
1298 1298 * Segment has private pages, can data structures
1299 1299 * be expanded?
1300 1300 *
1301 1301 * Acquire the anon_map lock to prevent it from changing,
1302 1302 * if it is shared. This ensures that the anon_map
1303 1303 * will not change while a thread which has a read/write
1304 1304 * lock on an address space references it.
1305 1305 * XXX - Don't need the anon_map lock at all if "refcnt"
1306 1306 * is 1.
1307 1307 *
1308 1308 * Can't grow a MAP_SHARED segment with an anonmap because
1309 1309 * there may be existing anon slots where we want to extend
1310 1310 * the segment and we wouldn't know what to do with them
1311 1311 * (e.g., for tmpfs right thing is to just leave them there,
1312 1312 * for /dev/zero they should be cleared out).
1313 1313 */
1314 1314 if (svd1->type == MAP_SHARED)
1315 1315 return (-1);
1316 1316
1317 1317 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1318 1318 if (amp1->refcnt > 1) {
1319 1319 ANON_LOCK_EXIT(&1->a_rwlock);
1320 1320 return (-1);
1321 1321 }
1322 1322 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1323 1323 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1324 1324
1325 1325 if (newpgs == 0) {
1326 1326 ANON_LOCK_EXIT(&1->a_rwlock);
1327 1327 return (-1);
1328 1328 }
1329 1329 amp1->size = ptob(newpgs);
1330 1330 ANON_LOCK_EXIT(&1->a_rwlock);
1331 1331 }
1332 1332 if (svd1->vpage != NULL) {
1333 1333 struct vpage *vp, *evp;
1334 1334 new_vpage =
1335 1335 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1336 1336 KM_NOSLEEP);
1337 1337 if (new_vpage == NULL)
1338 1338 return (-1);
1339 1339 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1340 1340 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1341 1341 svd1->vpage = new_vpage;
1342 1342
1343 1343 vp = new_vpage + seg_pages(seg1);
1344 1344 evp = vp + seg_pages(seg2);
1345 1345 for (; vp < evp; vp++)
1346 1346 VPP_SETPROT(vp, a->prot);
1347 1347 if (svd1->pageswap && swresv) {
1348 1348 ASSERT(!(svd1->flags & MAP_NORESERVE));
1349 1349 ASSERT(swresv == seg2->s_size);
1350 1350 vp = new_vpage + seg_pages(seg1);
1351 1351 for (; vp < evp; vp++) {
1352 1352 VPP_SETSWAPRES(vp);
1353 1353 }
1354 1354 }
1355 1355 }
1356 1356 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1357 1357 size = seg2->s_size;
1358 1358 seg_free(seg2);
1359 1359 seg1->s_size += size;
1360 1360 svd1->swresv += swresv;
1361 1361 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1362 1362 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1363 1363 (svd1->vp->v_flag & VVMEXEC)) {
1364 1364 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1365 1365 segvn_inval_trcache(svd1->vp);
1366 1366 }
1367 1367 return (0);
1368 1368 }
1369 1369
1370 1370 /*
1371 1371 * Extend the next segment (seg2) to include the
1372 1372 * new segment (seg1 + a), if possible.
1373 1373 * Return 0 on success.
1374 1374 */
1375 1375 static int
1376 1376 segvn_extend_next(
1377 1377 struct seg *seg1,
1378 1378 struct seg *seg2,
1379 1379 struct segvn_crargs *a,
1380 1380 size_t swresv)
1381 1381 {
1382 1382 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1383 1383 size_t size;
1384 1384 struct anon_map *amp2;
1385 1385 struct vpage *new_vpage;
1386 1386
1387 1387 /*
1388 1388 * We don't need any segment level locks for "segvn" data
1389 1389 * since the address space is "write" locked.
1390 1390 */
1391 1391 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1392 1392
1393 1393 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1394 1394 return (-1);
1395 1395 }
1396 1396
1397 1397 /* first segment is new, try to extend second */
1398 1398 /* XXX - should also check cred */
1399 1399 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1400 1400 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1401 1401 svd2->type != a->type || svd2->flags != a->flags ||
1402 1402 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1403 1403 return (-1);
1404 1404 /* vp == NULL implies zfod, offset doesn't matter */
1405 1405 if (svd2->vp != NULL &&
1406 1406 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1407 1407 return (-1);
1408 1408
1409 1409 if (svd2->tr_state != SEGVN_TR_OFF) {
1410 1410 return (-1);
1411 1411 }
1412 1412
1413 1413 amp2 = svd2->amp;
1414 1414 if (amp2) {
1415 1415 pgcnt_t newpgs;
1416 1416
1417 1417 /*
1418 1418 * Segment has private pages, can data structures
1419 1419 * be expanded?
1420 1420 *
1421 1421 * Acquire the anon_map lock to prevent it from changing,
1422 1422 * if it is shared. This ensures that the anon_map
1423 1423 * will not change while a thread which has a read/write
1424 1424 * lock on an address space references it.
1425 1425 *
1426 1426 * XXX - Don't need the anon_map lock at all if "refcnt"
1427 1427 * is 1.
1428 1428 */
1429 1429 if (svd2->type == MAP_SHARED)
1430 1430 return (-1);
1431 1431
1432 1432 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1433 1433 if (amp2->refcnt > 1) {
1434 1434 ANON_LOCK_EXIT(&2->a_rwlock);
1435 1435 return (-1);
1436 1436 }
1437 1437 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1438 1438 btop(seg2->s_size), btop(seg1->s_size),
1439 1439 ANON_NOSLEEP | ANON_GROWDOWN);
1440 1440
1441 1441 if (newpgs == 0) {
1442 1442 ANON_LOCK_EXIT(&2->a_rwlock);
1443 1443 return (-1);
1444 1444 }
1445 1445 amp2->size = ptob(newpgs);
1446 1446 ANON_LOCK_EXIT(&2->a_rwlock);
1447 1447 }
1448 1448 if (svd2->vpage != NULL) {
1449 1449 struct vpage *vp, *evp;
1450 1450 new_vpage =
1451 1451 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1452 1452 KM_NOSLEEP);
1453 1453 if (new_vpage == NULL) {
1454 1454 /* Not merging segments so adjust anon_index back */
1455 1455 if (amp2)
1456 1456 svd2->anon_index += seg_pages(seg1);
1457 1457 return (-1);
1458 1458 }
1459 1459 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1460 1460 vpgtob(seg_pages(seg2)));
1461 1461 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1462 1462 svd2->vpage = new_vpage;
1463 1463
1464 1464 vp = new_vpage;
1465 1465 evp = vp + seg_pages(seg1);
1466 1466 for (; vp < evp; vp++)
1467 1467 VPP_SETPROT(vp, a->prot);
1468 1468 if (svd2->pageswap && swresv) {
1469 1469 ASSERT(!(svd2->flags & MAP_NORESERVE));
1470 1470 ASSERT(swresv == seg1->s_size);
1471 1471 vp = new_vpage;
1472 1472 for (; vp < evp; vp++) {
1473 1473 VPP_SETSWAPRES(vp);
1474 1474 }
1475 1475 }
1476 1476 }
1477 1477 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1478 1478 size = seg1->s_size;
1479 1479 seg_free(seg1);
1480 1480 seg2->s_size += size;
1481 1481 seg2->s_base -= size;
1482 1482 svd2->offset -= size;
1483 1483 svd2->swresv += swresv;
1484 1484 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1485 1485 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1486 1486 (svd2->vp->v_flag & VVMEXEC)) {
1487 1487 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1488 1488 segvn_inval_trcache(svd2->vp);
1489 1489 }
1490 1490 return (0);
1491 1491 }
1492 1492
1493 1493 /*
1494 1494 * Duplicate all the pages in the segment. This may break COW sharing for a
1495 1495 * given page. If the page is marked with inherit zero set, then instead of
1496 1496 * duplicating the page, we zero the page.
1497 1497 */
1498 1498 static int
1499 1499 segvn_dup_pages(struct seg *seg, struct seg *newseg)
1500 1500 {
1501 1501 int error;
1502 1502 uint_t prot;
1503 1503 page_t *pp;
1504 1504 struct anon *ap, *newap;
1505 1505 size_t i;
1506 1506 caddr_t addr;
1507 1507
1508 1508 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1509 1509 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data;
1510 1510 ulong_t old_idx = svd->anon_index;
1511 1511 ulong_t new_idx = 0;
1512 1512
1513 1513 i = btopr(seg->s_size);
1514 1514 addr = seg->s_base;
1515 1515
1516 1516 /*
1517 1517 * XXX break cow sharing using PAGESIZE
1518 1518 * pages. They will be relocated into larger
1519 1519 * pages at fault time.
1520 1520 */
1521 1521 while (i-- > 0) {
1522 1522 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1523 1523 struct vpage *vpp;
1524 1524
1525 1525 vpp = &svd->vpage[seg_page(seg, addr)];
1526 1526
1527 1527 /*
1528 1528 * prot need not be computed below 'cause anon_private
1529 1529 * is going to ignore it anyway as child doesn't inherit
1530 1530 * pagelock from parent.
1531 1531 */
1532 1532 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1533 1533
1534 1534 /*
1535 1535 * Check whether we should zero this or dup it.
1536 1536 */
1537 1537 if (svd->svn_inz == SEGVN_INZ_ALL ||
1538 1538 (svd->svn_inz == SEGVN_INZ_VPP &&
1539 1539 VPP_ISINHZERO(vpp))) {
1540 1540 pp = anon_zero(newseg, addr, &newap,
1541 1541 newsvd->cred);
1542 1542 } else {
1543 1543 page_t *anon_pl[1+1];
1544 1544 uint_t vpprot;
1545 1545 error = anon_getpage(&ap, &vpprot, anon_pl,
1546 1546 PAGESIZE, seg, addr, S_READ, svd->cred);
1547 1547 if (error != 0)
1548 1548 return (error);
1549 1549
1550 1550 pp = anon_private(&newap, newseg, addr, prot,
1551 1551 anon_pl[0], 0, newsvd->cred);
1552 1552 }
1553 1553 if (pp == NULL) {
1554 1554 return (ENOMEM);
1555 1555 }
1556 1556 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap,
1557 1557 ANON_SLEEP);
1558 1558 page_unlock(pp);
1559 1559 }
1560 1560 addr += PAGESIZE;
1561 1561 old_idx++;
1562 1562 new_idx++;
1563 1563 }
1564 1564
1565 1565 return (0);
1566 1566 }
1567 1567
1568 1568 static int
1569 1569 segvn_dup(struct seg *seg, struct seg *newseg)
1570 1570 {
1571 1571 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1572 1572 struct segvn_data *newsvd;
1573 1573 pgcnt_t npages = seg_pages(seg);
1574 1574 int error = 0;
1575 1575 size_t len;
1576 1576 struct anon_map *amp;
1577 1577
1578 1578 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1579 1579 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1580 1580
1581 1581 /*
1582 1582 * If segment has anon reserved, reserve more for the new seg.
1583 1583 * For a MAP_NORESERVE segment swresv will be a count of all the
1584 1584 * allocated anon slots; thus we reserve for the child as many slots
1585 1585 * as the parent has allocated. This semantic prevents the child or
1586 1586 * parent from dieing during a copy-on-write fault caused by trying
1587 1587 * to write a shared pre-existing anon page.
1588 1588 */
1589 1589 if ((len = svd->swresv) != 0) {
1590 1590 if (anon_resv(svd->swresv) == 0)
1591 1591 return (ENOMEM);
1592 1592
1593 1593 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1594 1594 seg, len, 0);
1595 1595 }
1596 1596
1597 1597 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1598 1598
1599 1599 newseg->s_ops = &segvn_ops;
1600 1600 newseg->s_data = (void *)newsvd;
1601 1601 newseg->s_szc = seg->s_szc;
1602 1602
1603 1603 newsvd->seg = newseg;
1604 1604 if ((newsvd->vp = svd->vp) != NULL) {
1605 1605 VN_HOLD(svd->vp);
1606 1606 if (svd->type == MAP_SHARED)
1607 1607 lgrp_shm_policy_init(NULL, svd->vp);
1608 1608 }
1609 1609 newsvd->offset = svd->offset;
1610 1610 newsvd->prot = svd->prot;
1611 1611 newsvd->maxprot = svd->maxprot;
1612 1612 newsvd->pageprot = svd->pageprot;
1613 1613 newsvd->type = svd->type;
1614 1614 newsvd->cred = svd->cred;
1615 1615 crhold(newsvd->cred);
1616 1616 newsvd->advice = svd->advice;
1617 1617 newsvd->pageadvice = svd->pageadvice;
1618 1618 newsvd->svn_inz = svd->svn_inz;
1619 1619 newsvd->swresv = svd->swresv;
1620 1620 newsvd->pageswap = svd->pageswap;
1621 1621 newsvd->flags = svd->flags;
1622 1622 newsvd->softlockcnt = 0;
1623 1623 newsvd->softlockcnt_sbase = 0;
1624 1624 newsvd->softlockcnt_send = 0;
1625 1625 newsvd->policy_info = svd->policy_info;
1626 1626 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1627 1627
1628 1628 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1629 1629 /*
1630 1630 * Not attaching to a shared anon object.
1631 1631 */
1632 1632 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1633 1633 svd->tr_state == SEGVN_TR_OFF);
1634 1634 if (svd->tr_state == SEGVN_TR_ON) {
1635 1635 ASSERT(newsvd->vp != NULL && amp != NULL);
1636 1636 newsvd->tr_state = SEGVN_TR_INIT;
1637 1637 } else {
1638 1638 newsvd->tr_state = svd->tr_state;
1639 1639 }
1640 1640 newsvd->amp = NULL;
1641 1641 newsvd->anon_index = 0;
1642 1642 } else {
1643 1643 /* regions for now are only used on pure vnode segments */
1644 1644 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1645 1645 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1646 1646 newsvd->tr_state = SEGVN_TR_OFF;
1647 1647 if (svd->type == MAP_SHARED) {
1648 1648 ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1649 1649 newsvd->amp = amp;
1650 1650 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1651 1651 amp->refcnt++;
1652 1652 ANON_LOCK_EXIT(&->a_rwlock);
1653 1653 newsvd->anon_index = svd->anon_index;
1654 1654 } else {
1655 1655 int reclaim = 1;
1656 1656
1657 1657 /*
1658 1658 * Allocate and initialize new anon_map structure.
1659 1659 */
1660 1660 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1661 1661 ANON_SLEEP);
1662 1662 newsvd->amp->a_szc = newseg->s_szc;
1663 1663 newsvd->anon_index = 0;
1664 1664 ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1665 1665 svd->svn_inz == SEGVN_INZ_ALL ||
1666 1666 svd->svn_inz == SEGVN_INZ_VPP);
1667 1667
1668 1668 /*
1669 1669 * We don't have to acquire the anon_map lock
1670 1670 * for the new segment (since it belongs to an
1671 1671 * address space that is still not associated
1672 1672 * with any process), or the segment in the old
1673 1673 * address space (since all threads in it
1674 1674 * are stopped while duplicating the address space).
1675 1675 */
1676 1676
1677 1677 /*
1678 1678 * The goal of the following code is to make sure that
1679 1679 * softlocked pages do not end up as copy on write
1680 1680 * pages. This would cause problems where one
1681 1681 * thread writes to a page that is COW and a different
1682 1682 * thread in the same process has softlocked it. The
1683 1683 * softlock lock would move away from this process
1684 1684 * because the write would cause this process to get
1685 1685 * a copy (without the softlock).
1686 1686 *
1687 1687 * The strategy here is to just break the
1688 1688 * sharing on pages that could possibly be
1689 1689 * softlocked.
1690 1690 *
1691 1691 * In addition, if any pages have been marked that they
1692 1692 * should be inherited as zero, then we immediately go
1693 1693 * ahead and break COW and zero them. In the case of a
1694 1694 * softlocked page that should be inherited zero, we
1695 1695 * break COW and just get a zero page.
1696 1696 */
1697 1697 retry:
1698 1698 if (svd->softlockcnt ||
1699 1699 svd->svn_inz != SEGVN_INZ_NONE) {
1700 1700 /*
1701 1701 * The softlock count might be non zero
1702 1702 * because some pages are still stuck in the
1703 1703 * cache for lazy reclaim. Flush the cache
1704 1704 * now. This should drop the count to zero.
1705 1705 * [or there is really I/O going on to these
1706 1706 * pages]. Note, we have the writers lock so
1707 1707 * nothing gets inserted during the flush.
1708 1708 */
1709 1709 if (svd->softlockcnt && reclaim == 1) {
1710 1710 segvn_purge(seg);
1711 1711 reclaim = 0;
1712 1712 goto retry;
1713 1713 }
1714 1714
1715 1715 error = segvn_dup_pages(seg, newseg);
1716 1716 if (error != 0) {
1717 1717 newsvd->vpage = NULL;
1718 1718 goto out;
1719 1719 }
1720 1720 } else { /* common case */
1721 1721 if (seg->s_szc != 0) {
1722 1722 /*
1723 1723 * If at least one of anon slots of a
1724 1724 * large page exists then make sure
1725 1725 * all anon slots of a large page
1726 1726 * exist to avoid partial cow sharing
1727 1727 * of a large page in the future.
1728 1728 */
1729 1729 anon_dup_fill_holes(amp->ahp,
1730 1730 svd->anon_index, newsvd->amp->ahp,
1731 1731 0, seg->s_size, seg->s_szc,
1732 1732 svd->vp != NULL);
1733 1733 } else {
1734 1734 anon_dup(amp->ahp, svd->anon_index,
1735 1735 newsvd->amp->ahp, 0, seg->s_size);
1736 1736 }
1737 1737
1738 1738 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1739 1739 seg->s_size, PROT_WRITE);
1740 1740 }
1741 1741 }
1742 1742 }
1743 1743 /*
1744 1744 * If necessary, create a vpage structure for the new segment.
1745 1745 * Do not copy any page lock indications.
1746 1746 */
1747 1747 if (svd->vpage != NULL) {
1748 1748 uint_t i;
1749 1749 struct vpage *ovp = svd->vpage;
1750 1750 struct vpage *nvp;
1751 1751
1752 1752 nvp = newsvd->vpage =
1753 1753 kmem_alloc(vpgtob(npages), KM_SLEEP);
1754 1754 for (i = 0; i < npages; i++) {
1755 1755 *nvp = *ovp++;
1756 1756 VPP_CLRPPLOCK(nvp++);
1757 1757 }
1758 1758 } else
1759 1759 newsvd->vpage = NULL;
1760 1760
1761 1761 /* Inform the vnode of the new mapping */
1762 1762 if (newsvd->vp != NULL) {
1763 1763 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1764 1764 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1765 1765 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1766 1766 }
1767 1767 out:
1768 1768 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1769 1769 ASSERT(newsvd->amp == NULL);
1770 1770 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1771 1771 newsvd->rcookie = svd->rcookie;
1772 1772 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1773 1773 }
1774 1774 return (error);
1775 1775 }
1776 1776
1777 1777
1778 1778 /*
1779 1779 * callback function to invoke free_vp_pages() for only those pages actually
1780 1780 * processed by the HAT when a shared region is destroyed.
1781 1781 */
1782 1782 extern int free_pages;
1783 1783
1784 1784 static void
1785 1785 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1786 1786 size_t r_size, void *r_obj, u_offset_t r_objoff)
1787 1787 {
1788 1788 u_offset_t off;
1789 1789 size_t len;
1790 1790 vnode_t *vp = (vnode_t *)r_obj;
1791 1791
1792 1792 ASSERT(eaddr > saddr);
1793 1793 ASSERT(saddr >= r_saddr);
1794 1794 ASSERT(saddr < r_saddr + r_size);
1795 1795 ASSERT(eaddr > r_saddr);
1796 1796 ASSERT(eaddr <= r_saddr + r_size);
1797 1797 ASSERT(vp != NULL);
1798 1798
1799 1799 if (!free_pages) {
1800 1800 return;
1801 1801 }
1802 1802
1803 1803 len = eaddr - saddr;
1804 1804 off = (saddr - r_saddr) + r_objoff;
1805 1805 free_vp_pages(vp, off, len);
1806 1806 }
1807 1807
1808 1808 /*
1809 1809 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1810 1810 * those pages actually processed by the HAT
1811 1811 */
1812 1812 static void
1813 1813 segvn_hat_unload_callback(hat_callback_t *cb)
1814 1814 {
1815 1815 struct seg *seg = cb->hcb_data;
1816 1816 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1817 1817 size_t len;
1818 1818 u_offset_t off;
1819 1819
1820 1820 ASSERT(svd->vp != NULL);
1821 1821 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1822 1822 ASSERT(cb->hcb_start_addr >= seg->s_base);
1823 1823
1824 1824 len = cb->hcb_end_addr - cb->hcb_start_addr;
1825 1825 off = cb->hcb_start_addr - seg->s_base;
1826 1826 free_vp_pages(svd->vp, svd->offset + off, len);
1827 1827 }
1828 1828
1829 1829 /*
1830 1830 * This function determines the number of bytes of swap reserved by
1831 1831 * a segment for which per-page accounting is present. It is used to
1832 1832 * calculate the correct value of a segvn_data's swresv.
1833 1833 */
1834 1834 static size_t
1835 1835 segvn_count_swap_by_vpages(struct seg *seg)
1836 1836 {
1837 1837 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1838 1838 struct vpage *vp, *evp;
1839 1839 size_t nswappages = 0;
1840 1840
1841 1841 ASSERT(svd->pageswap);
1842 1842 ASSERT(svd->vpage != NULL);
1843 1843
1844 1844 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1845 1845
1846 1846 for (vp = svd->vpage; vp < evp; vp++) {
1847 1847 if (VPP_ISSWAPRES(vp))
1848 1848 nswappages++;
1849 1849 }
1850 1850
1851 1851 return (nswappages << PAGESHIFT);
1852 1852 }
1853 1853
1854 1854 static int
1855 1855 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1856 1856 {
1857 1857 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1858 1858 struct segvn_data *nsvd;
1859 1859 struct seg *nseg;
1860 1860 struct anon_map *amp;
1861 1861 pgcnt_t opages; /* old segment size in pages */
1862 1862 pgcnt_t npages; /* new segment size in pages */
1863 1863 pgcnt_t dpages; /* pages being deleted (unmapped) */
1864 1864 hat_callback_t callback; /* used for free_vp_pages() */
1865 1865 hat_callback_t *cbp = NULL;
1866 1866 caddr_t nbase;
1867 1867 size_t nsize;
1868 1868 size_t oswresv;
1869 1869 int reclaim = 1;
1870 1870
1871 1871 /*
1872 1872 * We don't need any segment level locks for "segvn" data
1873 1873 * since the address space is "write" locked.
1874 1874 */
1875 1875 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1876 1876
1877 1877 /*
1878 1878 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1879 1879 * softlockcnt is protected from change by the as write lock.
1880 1880 */
1881 1881 retry:
1882 1882 if (svd->softlockcnt > 0) {
1883 1883 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1884 1884
1885 1885 /*
1886 1886 * If this is shared segment non 0 softlockcnt
1887 1887 * means locked pages are still in use.
1888 1888 */
1889 1889 if (svd->type == MAP_SHARED) {
1890 1890 return (EAGAIN);
1891 1891 }
1892 1892
1893 1893 /*
1894 1894 * since we do have the writers lock nobody can fill
1895 1895 * the cache during the purge. The flush either succeeds
1896 1896 * or we still have pending I/Os.
1897 1897 */
1898 1898 if (reclaim == 1) {
1899 1899 segvn_purge(seg);
1900 1900 reclaim = 0;
1901 1901 goto retry;
1902 1902 }
1903 1903 return (EAGAIN);
1904 1904 }
1905 1905
1906 1906 /*
1907 1907 * Check for bad sizes
1908 1908 */
1909 1909 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1910 1910 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1911 1911 panic("segvn_unmap");
1912 1912 /*NOTREACHED*/
1913 1913 }
1914 1914
1915 1915 if (seg->s_szc != 0) {
1916 1916 size_t pgsz = page_get_pagesize(seg->s_szc);
1917 1917 int err;
1918 1918 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1919 1919 ASSERT(seg->s_base != addr || seg->s_size != len);
1920 1920 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1921 1921 ASSERT(svd->amp == NULL);
1922 1922 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1923 1923 hat_leave_region(seg->s_as->a_hat,
1924 1924 svd->rcookie, HAT_REGION_TEXT);
1925 1925 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1926 1926 /*
1927 1927 * could pass a flag to segvn_demote_range()
1928 1928 * below to tell it not to do any unloads but
1929 1929 * this case is rare enough to not bother for
1930 1930 * now.
1931 1931 */
1932 1932 } else if (svd->tr_state == SEGVN_TR_INIT) {
1933 1933 svd->tr_state = SEGVN_TR_OFF;
1934 1934 } else if (svd->tr_state == SEGVN_TR_ON) {
1935 1935 ASSERT(svd->amp != NULL);
1936 1936 segvn_textunrepl(seg, 1);
1937 1937 ASSERT(svd->amp == NULL);
1938 1938 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1939 1939 }
1940 1940 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1941 1941 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1942 1942 if (err == 0) {
1943 1943 return (IE_RETRY);
1944 1944 }
1945 1945 return (err);
1946 1946 }
1947 1947 }
1948 1948
1949 1949 /* Inform the vnode of the unmapping. */
1950 1950 if (svd->vp) {
1951 1951 int error;
1952 1952
1953 1953 error = VOP_DELMAP(svd->vp,
1954 1954 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1955 1955 seg->s_as, addr, len, svd->prot, svd->maxprot,
1956 1956 svd->type, svd->cred, NULL);
1957 1957
1958 1958 if (error == EAGAIN)
1959 1959 return (error);
1960 1960 }
1961 1961
1962 1962 /*
1963 1963 * Remove any page locks set through this mapping.
1964 1964 * If text replication is not off no page locks could have been
1965 1965 * established via this mapping.
1966 1966 */
1967 1967 if (svd->tr_state == SEGVN_TR_OFF) {
1968 1968 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1969 1969 }
1970 1970
1971 1971 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1972 1972 ASSERT(svd->amp == NULL);
1973 1973 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1974 1974 ASSERT(svd->type == MAP_PRIVATE);
1975 1975 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1976 1976 HAT_REGION_TEXT);
1977 1977 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1978 1978 } else if (svd->tr_state == SEGVN_TR_ON) {
1979 1979 ASSERT(svd->amp != NULL);
1980 1980 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1981 1981 segvn_textunrepl(seg, 1);
1982 1982 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1983 1983 } else {
1984 1984 if (svd->tr_state != SEGVN_TR_OFF) {
1985 1985 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1986 1986 svd->tr_state = SEGVN_TR_OFF;
1987 1987 }
1988 1988 /*
1989 1989 * Unload any hardware translations in the range to be taken
1990 1990 * out. Use a callback to invoke free_vp_pages() effectively.
1991 1991 */
1992 1992 if (svd->vp != NULL && free_pages != 0) {
1993 1993 callback.hcb_data = seg;
1994 1994 callback.hcb_function = segvn_hat_unload_callback;
1995 1995 cbp = &callback;
1996 1996 }
1997 1997 hat_unload_callback(seg->s_as->a_hat, addr, len,
1998 1998 HAT_UNLOAD_UNMAP, cbp);
1999 1999
2000 2000 if (svd->type == MAP_SHARED && svd->vp != NULL &&
2001 2001 (svd->vp->v_flag & VVMEXEC) &&
2002 2002 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
2003 2003 segvn_inval_trcache(svd->vp);
2004 2004 }
2005 2005 }
2006 2006
2007 2007 /*
2008 2008 * Check for entire segment
2009 2009 */
2010 2010 if (addr == seg->s_base && len == seg->s_size) {
2011 2011 seg_free(seg);
2012 2012 return (0);
2013 2013 }
2014 2014
2015 2015 opages = seg_pages(seg);
2016 2016 dpages = btop(len);
2017 2017 npages = opages - dpages;
2018 2018 amp = svd->amp;
2019 2019 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
2020 2020
2021 2021 /*
2022 2022 * Check for beginning of segment
2023 2023 */
2024 2024 if (addr == seg->s_base) {
2025 2025 if (svd->vpage != NULL) {
2026 2026 size_t nbytes;
2027 2027 struct vpage *ovpage;
2028 2028
2029 2029 ovpage = svd->vpage; /* keep pointer to vpage */
2030 2030
2031 2031 nbytes = vpgtob(npages);
2032 2032 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2033 2033 bcopy(&ovpage[dpages], svd->vpage, nbytes);
2034 2034
2035 2035 /* free up old vpage */
2036 2036 kmem_free(ovpage, vpgtob(opages));
2037 2037 }
2038 2038 if (amp != NULL) {
2039 2039 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2040 2040 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2041 2041 /*
2042 2042 * Shared anon map is no longer in use. Before
2043 2043 * freeing its pages purge all entries from
2044 2044 * pcache that belong to this amp.
2045 2045 */
2046 2046 if (svd->type == MAP_SHARED) {
2047 2047 ASSERT(amp->refcnt == 1);
2048 2048 ASSERT(svd->softlockcnt == 0);
2049 2049 anonmap_purge(amp);
2050 2050 }
2051 2051 /*
2052 2052 * Free up now unused parts of anon_map array.
2053 2053 */
2054 2054 if (amp->a_szc == seg->s_szc) {
2055 2055 if (seg->s_szc != 0) {
2056 2056 anon_free_pages(amp->ahp,
2057 2057 svd->anon_index, len,
2058 2058 seg->s_szc);
2059 2059 } else {
2060 2060 anon_free(amp->ahp,
2061 2061 svd->anon_index,
2062 2062 len);
2063 2063 }
2064 2064 } else {
2065 2065 ASSERT(svd->type == MAP_SHARED);
2066 2066 ASSERT(amp->a_szc > seg->s_szc);
2067 2067 anon_shmap_free_pages(amp,
2068 2068 svd->anon_index, len);
2069 2069 }
2070 2070
2071 2071 /*
2072 2072 * Unreserve swap space for the
2073 2073 * unmapped chunk of this segment in
2074 2074 * case it's MAP_SHARED
2075 2075 */
2076 2076 if (svd->type == MAP_SHARED) {
2077 2077 anon_unresv_zone(len,
2078 2078 seg->s_as->a_proc->p_zone);
2079 2079 amp->swresv -= len;
2080 2080 }
2081 2081 }
2082 2082 ANON_LOCK_EXIT(&->a_rwlock);
2083 2083 svd->anon_index += dpages;
2084 2084 }
2085 2085 if (svd->vp != NULL)
2086 2086 svd->offset += len;
2087 2087
2088 2088 seg->s_base += len;
2089 2089 seg->s_size -= len;
2090 2090
2091 2091 if (svd->swresv) {
2092 2092 if (svd->flags & MAP_NORESERVE) {
2093 2093 ASSERT(amp);
2094 2094 oswresv = svd->swresv;
2095 2095
2096 2096 svd->swresv = ptob(anon_pages(amp->ahp,
2097 2097 svd->anon_index, npages));
2098 2098 anon_unresv_zone(oswresv - svd->swresv,
2099 2099 seg->s_as->a_proc->p_zone);
2100 2100 if (SEG_IS_PARTIAL_RESV(seg))
2101 2101 seg->s_as->a_resvsize -= oswresv -
2102 2102 svd->swresv;
2103 2103 } else {
2104 2104 size_t unlen;
2105 2105
2106 2106 if (svd->pageswap) {
2107 2107 oswresv = svd->swresv;
2108 2108 svd->swresv =
2109 2109 segvn_count_swap_by_vpages(seg);
2110 2110 ASSERT(oswresv >= svd->swresv);
2111 2111 unlen = oswresv - svd->swresv;
2112 2112 } else {
2113 2113 svd->swresv -= len;
2114 2114 ASSERT(svd->swresv == seg->s_size);
2115 2115 unlen = len;
2116 2116 }
2117 2117 anon_unresv_zone(unlen,
2118 2118 seg->s_as->a_proc->p_zone);
2119 2119 }
2120 2120 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2121 2121 seg, len, 0);
2122 2122 }
2123 2123
2124 2124 return (0);
2125 2125 }
2126 2126
2127 2127 /*
2128 2128 * Check for end of segment
2129 2129 */
2130 2130 if (addr + len == seg->s_base + seg->s_size) {
2131 2131 if (svd->vpage != NULL) {
2132 2132 size_t nbytes;
2133 2133 struct vpage *ovpage;
2134 2134
2135 2135 ovpage = svd->vpage; /* keep pointer to vpage */
2136 2136
2137 2137 nbytes = vpgtob(npages);
2138 2138 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2139 2139 bcopy(ovpage, svd->vpage, nbytes);
2140 2140
2141 2141 /* free up old vpage */
2142 2142 kmem_free(ovpage, vpgtob(opages));
2143 2143
2144 2144 }
2145 2145 if (amp != NULL) {
2146 2146 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2147 2147 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2148 2148 /*
2149 2149 * Free up now unused parts of anon_map array.
2150 2150 */
2151 2151 ulong_t an_idx = svd->anon_index + npages;
2152 2152
2153 2153 /*
2154 2154 * Shared anon map is no longer in use. Before
2155 2155 * freeing its pages purge all entries from
2156 2156 * pcache that belong to this amp.
2157 2157 */
2158 2158 if (svd->type == MAP_SHARED) {
2159 2159 ASSERT(amp->refcnt == 1);
2160 2160 ASSERT(svd->softlockcnt == 0);
2161 2161 anonmap_purge(amp);
2162 2162 }
2163 2163
2164 2164 if (amp->a_szc == seg->s_szc) {
2165 2165 if (seg->s_szc != 0) {
2166 2166 anon_free_pages(amp->ahp,
2167 2167 an_idx, len,
2168 2168 seg->s_szc);
2169 2169 } else {
2170 2170 anon_free(amp->ahp, an_idx,
2171 2171 len);
2172 2172 }
2173 2173 } else {
2174 2174 ASSERT(svd->type == MAP_SHARED);
2175 2175 ASSERT(amp->a_szc > seg->s_szc);
2176 2176 anon_shmap_free_pages(amp,
2177 2177 an_idx, len);
2178 2178 }
2179 2179
2180 2180 /*
2181 2181 * Unreserve swap space for the
2182 2182 * unmapped chunk of this segment in
2183 2183 * case it's MAP_SHARED
2184 2184 */
2185 2185 if (svd->type == MAP_SHARED) {
2186 2186 anon_unresv_zone(len,
2187 2187 seg->s_as->a_proc->p_zone);
2188 2188 amp->swresv -= len;
2189 2189 }
2190 2190 }
2191 2191 ANON_LOCK_EXIT(&->a_rwlock);
2192 2192 }
2193 2193
2194 2194 seg->s_size -= len;
2195 2195
2196 2196 if (svd->swresv) {
2197 2197 if (svd->flags & MAP_NORESERVE) {
2198 2198 ASSERT(amp);
2199 2199 oswresv = svd->swresv;
2200 2200 svd->swresv = ptob(anon_pages(amp->ahp,
2201 2201 svd->anon_index, npages));
2202 2202 anon_unresv_zone(oswresv - svd->swresv,
2203 2203 seg->s_as->a_proc->p_zone);
2204 2204 if (SEG_IS_PARTIAL_RESV(seg))
2205 2205 seg->s_as->a_resvsize -= oswresv -
2206 2206 svd->swresv;
2207 2207 } else {
2208 2208 size_t unlen;
2209 2209
2210 2210 if (svd->pageswap) {
2211 2211 oswresv = svd->swresv;
2212 2212 svd->swresv =
2213 2213 segvn_count_swap_by_vpages(seg);
2214 2214 ASSERT(oswresv >= svd->swresv);
2215 2215 unlen = oswresv - svd->swresv;
2216 2216 } else {
2217 2217 svd->swresv -= len;
2218 2218 ASSERT(svd->swresv == seg->s_size);
2219 2219 unlen = len;
2220 2220 }
2221 2221 anon_unresv_zone(unlen,
2222 2222 seg->s_as->a_proc->p_zone);
2223 2223 }
2224 2224 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2225 2225 "anon proc:%p %lu %u", seg, len, 0);
2226 2226 }
2227 2227
2228 2228 return (0);
2229 2229 }
2230 2230
2231 2231 /*
2232 2232 * The section to go is in the middle of the segment,
2233 2233 * have to make it into two segments. nseg is made for
2234 2234 * the high end while seg is cut down at the low end.
2235 2235 */
2236 2236 nbase = addr + len; /* new seg base */
2237 2237 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2238 2238 seg->s_size = addr - seg->s_base; /* shrink old seg */
2239 2239 nseg = seg_alloc(seg->s_as, nbase, nsize);
2240 2240 if (nseg == NULL) {
2241 2241 panic("segvn_unmap seg_alloc");
2242 2242 /*NOTREACHED*/
2243 2243 }
2244 2244 nseg->s_ops = seg->s_ops;
2245 2245 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2246 2246 nseg->s_data = (void *)nsvd;
2247 2247 nseg->s_szc = seg->s_szc;
2248 2248 *nsvd = *svd;
2249 2249 nsvd->seg = nseg;
2250 2250 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2251 2251 nsvd->swresv = 0;
2252 2252 nsvd->softlockcnt = 0;
2253 2253 nsvd->softlockcnt_sbase = 0;
2254 2254 nsvd->softlockcnt_send = 0;
2255 2255 nsvd->svn_inz = svd->svn_inz;
2256 2256 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2257 2257
2258 2258 if (svd->vp != NULL) {
2259 2259 VN_HOLD(nsvd->vp);
2260 2260 if (nsvd->type == MAP_SHARED)
2261 2261 lgrp_shm_policy_init(NULL, nsvd->vp);
2262 2262 }
2263 2263 crhold(svd->cred);
2264 2264
2265 2265 if (svd->vpage == NULL) {
2266 2266 nsvd->vpage = NULL;
2267 2267 } else {
2268 2268 /* need to split vpage into two arrays */
2269 2269 size_t nbytes;
2270 2270 struct vpage *ovpage;
2271 2271
2272 2272 ovpage = svd->vpage; /* keep pointer to vpage */
2273 2273
2274 2274 npages = seg_pages(seg); /* seg has shrunk */
2275 2275 nbytes = vpgtob(npages);
2276 2276 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2277 2277
2278 2278 bcopy(ovpage, svd->vpage, nbytes);
2279 2279
2280 2280 npages = seg_pages(nseg);
2281 2281 nbytes = vpgtob(npages);
2282 2282 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2283 2283
2284 2284 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2285 2285
2286 2286 /* free up old vpage */
2287 2287 kmem_free(ovpage, vpgtob(opages));
2288 2288 }
2289 2289
2290 2290 if (amp == NULL) {
2291 2291 nsvd->amp = NULL;
2292 2292 nsvd->anon_index = 0;
2293 2293 } else {
2294 2294 /*
2295 2295 * Need to create a new anon map for the new segment.
2296 2296 * We'll also allocate a new smaller array for the old
2297 2297 * smaller segment to save space.
2298 2298 */
2299 2299 opages = btop((uintptr_t)(addr - seg->s_base));
2300 2300 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2301 2301 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2302 2302 /*
2303 2303 * Free up now unused parts of anon_map array.
2304 2304 */
2305 2305 ulong_t an_idx = svd->anon_index + opages;
2306 2306
2307 2307 /*
2308 2308 * Shared anon map is no longer in use. Before
2309 2309 * freeing its pages purge all entries from
2310 2310 * pcache that belong to this amp.
2311 2311 */
2312 2312 if (svd->type == MAP_SHARED) {
2313 2313 ASSERT(amp->refcnt == 1);
2314 2314 ASSERT(svd->softlockcnt == 0);
2315 2315 anonmap_purge(amp);
2316 2316 }
2317 2317
2318 2318 if (amp->a_szc == seg->s_szc) {
2319 2319 if (seg->s_szc != 0) {
2320 2320 anon_free_pages(amp->ahp, an_idx, len,
2321 2321 seg->s_szc);
2322 2322 } else {
2323 2323 anon_free(amp->ahp, an_idx,
2324 2324 len);
2325 2325 }
2326 2326 } else {
2327 2327 ASSERT(svd->type == MAP_SHARED);
2328 2328 ASSERT(amp->a_szc > seg->s_szc);
2329 2329 anon_shmap_free_pages(amp, an_idx, len);
2330 2330 }
2331 2331
2332 2332 /*
2333 2333 * Unreserve swap space for the
2334 2334 * unmapped chunk of this segment in
2335 2335 * case it's MAP_SHARED
2336 2336 */
2337 2337 if (svd->type == MAP_SHARED) {
2338 2338 anon_unresv_zone(len,
2339 2339 seg->s_as->a_proc->p_zone);
2340 2340 amp->swresv -= len;
2341 2341 }
2342 2342 }
2343 2343 nsvd->anon_index = svd->anon_index +
2344 2344 btop((uintptr_t)(nseg->s_base - seg->s_base));
2345 2345 if (svd->type == MAP_SHARED) {
2346 2346 amp->refcnt++;
2347 2347 nsvd->amp = amp;
2348 2348 } else {
2349 2349 struct anon_map *namp;
2350 2350 struct anon_hdr *nahp;
2351 2351
2352 2352 ASSERT(svd->type == MAP_PRIVATE);
2353 2353 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2354 2354 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2355 2355 namp->a_szc = seg->s_szc;
2356 2356 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2357 2357 0, btop(seg->s_size), ANON_SLEEP);
2358 2358 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2359 2359 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2360 2360 anon_release(amp->ahp, btop(amp->size));
2361 2361 svd->anon_index = 0;
2362 2362 nsvd->anon_index = 0;
2363 2363 amp->ahp = nahp;
2364 2364 amp->size = seg->s_size;
2365 2365 nsvd->amp = namp;
2366 2366 }
2367 2367 ANON_LOCK_EXIT(&->a_rwlock);
2368 2368 }
2369 2369 if (svd->swresv) {
2370 2370 if (svd->flags & MAP_NORESERVE) {
2371 2371 ASSERT(amp);
2372 2372 oswresv = svd->swresv;
2373 2373 svd->swresv = ptob(anon_pages(amp->ahp,
2374 2374 svd->anon_index, btop(seg->s_size)));
2375 2375 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2376 2376 nsvd->anon_index, btop(nseg->s_size)));
2377 2377 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2378 2378 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2379 2379 seg->s_as->a_proc->p_zone);
2380 2380 if (SEG_IS_PARTIAL_RESV(seg))
2381 2381 seg->s_as->a_resvsize -= oswresv -
2382 2382 (svd->swresv + nsvd->swresv);
2383 2383 } else {
2384 2384 size_t unlen;
2385 2385
2386 2386 if (svd->pageswap) {
2387 2387 oswresv = svd->swresv;
2388 2388 svd->swresv = segvn_count_swap_by_vpages(seg);
2389 2389 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2390 2390 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2391 2391 unlen = oswresv - (svd->swresv + nsvd->swresv);
2392 2392 } else {
2393 2393 if (seg->s_size + nseg->s_size + len !=
2394 2394 svd->swresv) {
2395 2395 panic("segvn_unmap: cannot split "
2396 2396 "swap reservation");
2397 2397 /*NOTREACHED*/
2398 2398 }
2399 2399 svd->swresv = seg->s_size;
2400 2400 nsvd->swresv = nseg->s_size;
2401 2401 unlen = len;
2402 2402 }
2403 2403 anon_unresv_zone(unlen,
2404 2404 seg->s_as->a_proc->p_zone);
2405 2405 }
2406 2406 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2407 2407 seg, len, 0);
2408 2408 }
2409 2409
2410 2410 return (0); /* I'm glad that's all over with! */
2411 2411 }
2412 2412
2413 2413 static void
2414 2414 segvn_free(struct seg *seg)
2415 2415 {
2416 2416 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2417 2417 pgcnt_t npages = seg_pages(seg);
2418 2418 struct anon_map *amp;
2419 2419 size_t len;
2420 2420
2421 2421 /*
2422 2422 * We don't need any segment level locks for "segvn" data
2423 2423 * since the address space is "write" locked.
2424 2424 */
2425 2425 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2426 2426 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2427 2427
2428 2428 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2429 2429
2430 2430 /*
2431 2431 * Be sure to unlock pages. XXX Why do things get free'ed instead
2432 2432 * of unmapped? XXX
2433 2433 */
2434 2434 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2435 2435 0, MC_UNLOCK, NULL, 0);
2436 2436
2437 2437 /*
2438 2438 * Deallocate the vpage and anon pointers if necessary and possible.
2439 2439 */
2440 2440 if (svd->vpage != NULL) {
2441 2441 kmem_free(svd->vpage, vpgtob(npages));
2442 2442 svd->vpage = NULL;
2443 2443 }
2444 2444 if ((amp = svd->amp) != NULL) {
2445 2445 /*
2446 2446 * If there are no more references to this anon_map
2447 2447 * structure, then deallocate the structure after freeing
2448 2448 * up all the anon slot pointers that we can.
2449 2449 */
2450 2450 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2451 2451 ASSERT(amp->a_szc >= seg->s_szc);
2452 2452 if (--amp->refcnt == 0) {
2453 2453 if (svd->type == MAP_PRIVATE) {
2454 2454 /*
2455 2455 * Private - we only need to anon_free
2456 2456 * the part that this segment refers to.
2457 2457 */
2458 2458 if (seg->s_szc != 0) {
2459 2459 anon_free_pages(amp->ahp,
2460 2460 svd->anon_index, seg->s_size,
2461 2461 seg->s_szc);
2462 2462 } else {
2463 2463 anon_free(amp->ahp, svd->anon_index,
2464 2464 seg->s_size);
2465 2465 }
2466 2466 } else {
2467 2467
2468 2468 /*
2469 2469 * Shared anon map is no longer in use. Before
2470 2470 * freeing its pages purge all entries from
2471 2471 * pcache that belong to this amp.
2472 2472 */
2473 2473 ASSERT(svd->softlockcnt == 0);
2474 2474 anonmap_purge(amp);
2475 2475
2476 2476 /*
2477 2477 * Shared - anon_free the entire
2478 2478 * anon_map's worth of stuff and
2479 2479 * release any swap reservation.
2480 2480 */
2481 2481 if (amp->a_szc != 0) {
2482 2482 anon_shmap_free_pages(amp, 0,
2483 2483 amp->size);
2484 2484 } else {
2485 2485 anon_free(amp->ahp, 0, amp->size);
2486 2486 }
2487 2487 if ((len = amp->swresv) != 0) {
2488 2488 anon_unresv_zone(len,
2489 2489 seg->s_as->a_proc->p_zone);
2490 2490 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2491 2491 "anon proc:%p %lu %u", seg, len, 0);
2492 2492 }
2493 2493 }
2494 2494 svd->amp = NULL;
2495 2495 ANON_LOCK_EXIT(&->a_rwlock);
2496 2496 anonmap_free(amp);
2497 2497 } else if (svd->type == MAP_PRIVATE) {
2498 2498 /*
2499 2499 * We had a private mapping which still has
2500 2500 * a held anon_map so just free up all the
2501 2501 * anon slot pointers that we were using.
2502 2502 */
2503 2503 if (seg->s_szc != 0) {
2504 2504 anon_free_pages(amp->ahp, svd->anon_index,
2505 2505 seg->s_size, seg->s_szc);
2506 2506 } else {
2507 2507 anon_free(amp->ahp, svd->anon_index,
2508 2508 seg->s_size);
2509 2509 }
2510 2510 ANON_LOCK_EXIT(&->a_rwlock);
2511 2511 } else {
2512 2512 ANON_LOCK_EXIT(&->a_rwlock);
2513 2513 }
2514 2514 }
2515 2515
2516 2516 /*
2517 2517 * Release swap reservation.
2518 2518 */
2519 2519 if ((len = svd->swresv) != 0) {
2520 2520 anon_unresv_zone(svd->swresv,
2521 2521 seg->s_as->a_proc->p_zone);
2522 2522 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2523 2523 seg, len, 0);
2524 2524 if (SEG_IS_PARTIAL_RESV(seg))
2525 2525 seg->s_as->a_resvsize -= svd->swresv;
2526 2526 svd->swresv = 0;
2527 2527 }
2528 2528 /*
2529 2529 * Release claim on vnode, credentials, and finally free the
2530 2530 * private data.
2531 2531 */
2532 2532 if (svd->vp != NULL) {
2533 2533 if (svd->type == MAP_SHARED)
2534 2534 lgrp_shm_policy_fini(NULL, svd->vp);
2535 2535 VN_RELE(svd->vp);
2536 2536 svd->vp = NULL;
2537 2537 }
2538 2538 crfree(svd->cred);
2539 2539 svd->pageprot = 0;
2540 2540 svd->pageadvice = 0;
2541 2541 svd->pageswap = 0;
2542 2542 svd->cred = NULL;
2543 2543
2544 2544 /*
2545 2545 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2546 2546 * still working with this segment without holding as lock (in case
2547 2547 * it's called by pcache async thread).
2548 2548 */
2549 2549 ASSERT(svd->softlockcnt == 0);
2550 2550 mutex_enter(&svd->segfree_syncmtx);
2551 2551 mutex_exit(&svd->segfree_syncmtx);
2552 2552
2553 2553 seg->s_data = NULL;
2554 2554 kmem_cache_free(segvn_cache, svd);
2555 2555 }
2556 2556
2557 2557 /*
2558 2558 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2559 2559 * already been F_SOFTLOCK'ed.
2560 2560 * Caller must always match addr and len of a softunlock with a previous
2561 2561 * softlock with exactly the same addr and len.
2562 2562 */
2563 2563 static void
2564 2564 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2565 2565 {
2566 2566 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2567 2567 page_t *pp;
2568 2568 caddr_t adr;
2569 2569 struct vnode *vp;
2570 2570 u_offset_t offset;
2571 2571 ulong_t anon_index;
2572 2572 struct anon_map *amp;
2573 2573 struct anon *ap = NULL;
2574 2574
2575 2575 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2576 2576 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2577 2577
2578 2578 if ((amp = svd->amp) != NULL)
2579 2579 anon_index = svd->anon_index + seg_page(seg, addr);
2580 2580
2581 2581 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2582 2582 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2583 2583 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2584 2584 } else {
2585 2585 hat_unlock(seg->s_as->a_hat, addr, len);
2586 2586 }
2587 2587 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2588 2588 if (amp != NULL) {
2589 2589 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2590 2590 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2591 2591 != NULL) {
2592 2592 swap_xlate(ap, &vp, &offset);
2593 2593 } else {
2594 2594 vp = svd->vp;
2595 2595 offset = svd->offset +
2596 2596 (uintptr_t)(adr - seg->s_base);
2597 2597 }
2598 2598 ANON_LOCK_EXIT(&->a_rwlock);
2599 2599 } else {
2600 2600 vp = svd->vp;
2601 2601 offset = svd->offset +
2602 2602 (uintptr_t)(adr - seg->s_base);
2603 2603 }
2604 2604
2605 2605 /*
2606 2606 * Use page_find() instead of page_lookup() to
2607 2607 * find the page since we know that it is locked.
2608 2608 */
2609 2609 pp = page_find(vp, offset);
2610 2610 if (pp == NULL) {
2611 2611 panic(
2612 2612 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2613 2613 (void *)adr, (void *)ap, (void *)vp, offset);
2614 2614 /*NOTREACHED*/
2615 2615 }
2616 2616
2617 2617 if (rw == S_WRITE) {
2618 2618 hat_setrefmod(pp);
2619 2619 if (seg->s_as->a_vbits)
2620 2620 hat_setstat(seg->s_as, adr, PAGESIZE,
2621 2621 P_REF | P_MOD);
2622 2622 } else if (rw != S_OTHER) {
2623 2623 hat_setref(pp);
2624 2624 if (seg->s_as->a_vbits)
2625 2625 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2626 2626 }
2627 2627 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2628 2628 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2629 2629 page_unlock(pp);
2630 2630 }
2631 2631 ASSERT(svd->softlockcnt >= btop(len));
2632 2632 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2633 2633 /*
2634 2634 * All SOFTLOCKS are gone. Wakeup any waiting
2635 2635 * unmappers so they can try again to unmap.
2636 2636 * Check for waiters first without the mutex
2637 2637 * held so we don't always grab the mutex on
2638 2638 * softunlocks.
2639 2639 */
2640 2640 if (AS_ISUNMAPWAIT(seg->s_as)) {
2641 2641 mutex_enter(&seg->s_as->a_contents);
2642 2642 if (AS_ISUNMAPWAIT(seg->s_as)) {
2643 2643 AS_CLRUNMAPWAIT(seg->s_as);
2644 2644 cv_broadcast(&seg->s_as->a_cv);
2645 2645 }
2646 2646 mutex_exit(&seg->s_as->a_contents);
2647 2647 }
2648 2648 }
2649 2649 }
2650 2650
2651 2651 #define PAGE_HANDLED ((page_t *)-1)
2652 2652
2653 2653 /*
2654 2654 * Release all the pages in the NULL terminated ppp list
2655 2655 * which haven't already been converted to PAGE_HANDLED.
2656 2656 */
2657 2657 static void
2658 2658 segvn_pagelist_rele(page_t **ppp)
2659 2659 {
2660 2660 for (; *ppp != NULL; ppp++) {
2661 2661 if (*ppp != PAGE_HANDLED)
2662 2662 page_unlock(*ppp);
2663 2663 }
2664 2664 }
2665 2665
2666 2666 static int stealcow = 1;
2667 2667
2668 2668 /*
2669 2669 * Workaround for viking chip bug. See bug id 1220902.
2670 2670 * To fix this down in pagefault() would require importing so
2671 2671 * much as and segvn code as to be unmaintainable.
2672 2672 */
2673 2673 int enable_mbit_wa = 0;
2674 2674
2675 2675 /*
2676 2676 * Handles all the dirty work of getting the right
2677 2677 * anonymous pages and loading up the translations.
2678 2678 * This routine is called only from segvn_fault()
2679 2679 * when looping over the range of addresses requested.
2680 2680 *
2681 2681 * The basic algorithm here is:
2682 2682 * If this is an anon_zero case
2683 2683 * Call anon_zero to allocate page
2684 2684 * Load up translation
2685 2685 * Return
2686 2686 * endif
2687 2687 * If this is an anon page
2688 2688 * Use anon_getpage to get the page
2689 2689 * else
2690 2690 * Find page in pl[] list passed in
2691 2691 * endif
2692 2692 * If not a cow
2693 2693 * Load up the translation to the page
2694 2694 * return
2695 2695 * endif
2696 2696 * Call anon_private to handle cow
2697 2697 * Load up (writable) translation to new page
2698 2698 */
2699 2699 static faultcode_t
2700 2700 segvn_faultpage(
2701 2701 struct hat *hat, /* the hat to use for mapping */
2702 2702 struct seg *seg, /* seg_vn of interest */
2703 2703 caddr_t addr, /* address in as */
2704 2704 u_offset_t off, /* offset in vp */
2705 2705 struct vpage *vpage, /* pointer to vpage for vp, off */
2706 2706 page_t *pl[], /* object source page pointer */
2707 2707 uint_t vpprot, /* access allowed to object pages */
2708 2708 enum fault_type type, /* type of fault */
2709 2709 enum seg_rw rw, /* type of access at fault */
2710 2710 int brkcow) /* we may need to break cow */
2711 2711 {
2712 2712 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2713 2713 page_t *pp, **ppp;
2714 2714 uint_t pageflags = 0;
2715 2715 page_t *anon_pl[1 + 1];
2716 2716 page_t *opp = NULL; /* original page */
2717 2717 uint_t prot;
2718 2718 int err;
2719 2719 int cow;
2720 2720 int claim;
2721 2721 int steal = 0;
2722 2722 ulong_t anon_index;
2723 2723 struct anon *ap, *oldap;
2724 2724 struct anon_map *amp;
2725 2725 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2726 2726 int anon_lock = 0;
2727 2727 anon_sync_obj_t cookie;
2728 2728
2729 2729 if (svd->flags & MAP_TEXT) {
2730 2730 hat_flag |= HAT_LOAD_TEXT;
2731 2731 }
2732 2732
2733 2733 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2734 2734 ASSERT(seg->s_szc == 0);
2735 2735 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2736 2736
2737 2737 /*
2738 2738 * Initialize protection value for this page.
2739 2739 * If we have per page protection values check it now.
2740 2740 */
2741 2741 if (svd->pageprot) {
2742 2742 uint_t protchk;
2743 2743
2744 2744 switch (rw) {
2745 2745 case S_READ:
2746 2746 protchk = PROT_READ;
2747 2747 break;
2748 2748 case S_WRITE:
2749 2749 protchk = PROT_WRITE;
2750 2750 break;
2751 2751 case S_EXEC:
2752 2752 protchk = PROT_EXEC;
2753 2753 break;
2754 2754 case S_OTHER:
2755 2755 default:
2756 2756 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2757 2757 break;
2758 2758 }
2759 2759
2760 2760 prot = VPP_PROT(vpage);
2761 2761 if ((prot & protchk) == 0)
2762 2762 return (FC_PROT); /* illegal access type */
2763 2763 } else {
2764 2764 prot = svd->prot;
2765 2765 }
2766 2766
2767 2767 if (type == F_SOFTLOCK) {
2768 2768 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2769 2769 }
2770 2770
2771 2771 /*
2772 2772 * Always acquire the anon array lock to prevent 2 threads from
2773 2773 * allocating separate anon slots for the same "addr".
2774 2774 */
2775 2775
2776 2776 if ((amp = svd->amp) != NULL) {
2777 2777 ASSERT(RW_READ_HELD(&->a_rwlock));
2778 2778 anon_index = svd->anon_index + seg_page(seg, addr);
2779 2779 anon_array_enter(amp, anon_index, &cookie);
2780 2780 anon_lock = 1;
2781 2781 }
2782 2782
2783 2783 if (svd->vp == NULL && amp != NULL) {
2784 2784 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2785 2785 /*
2786 2786 * Allocate a (normally) writable anonymous page of
2787 2787 * zeroes. If no advance reservations, reserve now.
2788 2788 */
2789 2789 if (svd->flags & MAP_NORESERVE) {
2790 2790 if (anon_resv_zone(ptob(1),
2791 2791 seg->s_as->a_proc->p_zone)) {
2792 2792 atomic_add_long(&svd->swresv, ptob(1));
2793 2793 atomic_add_long(&seg->s_as->a_resvsize,
2794 2794 ptob(1));
2795 2795 } else {
2796 2796 err = ENOMEM;
2797 2797 goto out;
2798 2798 }
2799 2799 }
2800 2800 if ((pp = anon_zero(seg, addr, &ap,
2801 2801 svd->cred)) == NULL) {
2802 2802 err = ENOMEM;
2803 2803 goto out; /* out of swap space */
2804 2804 }
2805 2805 /*
2806 2806 * Re-acquire the anon_map lock and
2807 2807 * initialize the anon array entry.
2808 2808 */
2809 2809 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2810 2810 ANON_SLEEP);
2811 2811
2812 2812 ASSERT(pp->p_szc == 0);
2813 2813
2814 2814 /*
2815 2815 * Handle pages that have been marked for migration
2816 2816 */
2817 2817 if (lgrp_optimizations())
2818 2818 page_migrate(seg, addr, &pp, 1);
2819 2819
2820 2820 if (enable_mbit_wa) {
2821 2821 if (rw == S_WRITE)
2822 2822 hat_setmod(pp);
2823 2823 else if (!hat_ismod(pp))
2824 2824 prot &= ~PROT_WRITE;
2825 2825 }
2826 2826 /*
2827 2827 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2828 2828 * with MC_LOCKAS, MCL_FUTURE) and this is a
2829 2829 * MAP_NORESERVE segment, we may need to
2830 2830 * permanently lock the page as it is being faulted
2831 2831 * for the first time. The following text applies
2832 2832 * only to MAP_NORESERVE segments:
2833 2833 *
2834 2834 * As per memcntl(2), if this segment was created
2835 2835 * after MCL_FUTURE was applied (a "future"
2836 2836 * segment), its pages must be locked. If this
2837 2837 * segment existed at MCL_FUTURE application (a
2838 2838 * "past" segment), the interface is unclear.
2839 2839 *
2840 2840 * We decide to lock only if vpage is present:
2841 2841 *
2842 2842 * - "future" segments will have a vpage array (see
2843 2843 * as_map), and so will be locked as required
2844 2844 *
2845 2845 * - "past" segments may not have a vpage array,
2846 2846 * depending on whether events (such as
2847 2847 * mprotect) have occurred. Locking if vpage
2848 2848 * exists will preserve legacy behavior. Not
2849 2849 * locking if vpage is absent, will not break
2850 2850 * the interface or legacy behavior. Note that
2851 2851 * allocating vpage here if it's absent requires
2852 2852 * upgrading the segvn reader lock, the cost of
2853 2853 * which does not seem worthwhile.
2854 2854 *
2855 2855 * Usually testing and setting VPP_ISPPLOCK and
2856 2856 * VPP_SETPPLOCK requires holding the segvn lock as
2857 2857 * writer, but in this case all readers are
2858 2858 * serializing on the anon array lock.
2859 2859 */
2860 2860 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2861 2861 (svd->flags & MAP_NORESERVE) &&
2862 2862 !VPP_ISPPLOCK(vpage)) {
2863 2863 proc_t *p = seg->s_as->a_proc;
2864 2864 ASSERT(svd->type == MAP_PRIVATE);
2865 2865 mutex_enter(&p->p_lock);
2866 2866 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2867 2867 1) == 0) {
2868 2868 claim = VPP_PROT(vpage) & PROT_WRITE;
2869 2869 if (page_pp_lock(pp, claim, 0)) {
2870 2870 VPP_SETPPLOCK(vpage);
2871 2871 } else {
2872 2872 rctl_decr_locked_mem(p, NULL,
2873 2873 PAGESIZE, 1);
2874 2874 }
2875 2875 }
2876 2876 mutex_exit(&p->p_lock);
2877 2877 }
2878 2878
2879 2879 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2880 2880 hat_memload(hat, addr, pp, prot, hat_flag);
2881 2881
2882 2882 if (!(hat_flag & HAT_LOAD_LOCK))
2883 2883 page_unlock(pp);
2884 2884
2885 2885 anon_array_exit(&cookie);
2886 2886 return (0);
2887 2887 }
2888 2888 }
2889 2889
2890 2890 /*
2891 2891 * Obtain the page structure via anon_getpage() if it is
2892 2892 * a private copy of an object (the result of a previous
2893 2893 * copy-on-write).
2894 2894 */
2895 2895 if (amp != NULL) {
2896 2896 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2897 2897 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2898 2898 seg, addr, rw, svd->cred);
2899 2899 if (err)
2900 2900 goto out;
2901 2901
2902 2902 if (svd->type == MAP_SHARED) {
2903 2903 /*
2904 2904 * If this is a shared mapping to an
2905 2905 * anon_map, then ignore the write
2906 2906 * permissions returned by anon_getpage().
2907 2907 * They apply to the private mappings
2908 2908 * of this anon_map.
2909 2909 */
2910 2910 vpprot |= PROT_WRITE;
2911 2911 }
2912 2912 opp = anon_pl[0];
2913 2913 }
2914 2914 }
2915 2915
2916 2916 /*
2917 2917 * Search the pl[] list passed in if it is from the
2918 2918 * original object (i.e., not a private copy).
2919 2919 */
2920 2920 if (opp == NULL) {
2921 2921 /*
2922 2922 * Find original page. We must be bringing it in
2923 2923 * from the list in pl[].
2924 2924 */
2925 2925 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2926 2926 if (opp == PAGE_HANDLED)
2927 2927 continue;
2928 2928 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2929 2929 if (opp->p_offset == off)
2930 2930 break;
2931 2931 }
2932 2932 if (opp == NULL) {
2933 2933 panic("segvn_faultpage not found");
2934 2934 /*NOTREACHED*/
2935 2935 }
2936 2936 *ppp = PAGE_HANDLED;
2937 2937
2938 2938 }
2939 2939
2940 2940 ASSERT(PAGE_LOCKED(opp));
2941 2941
2942 2942 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2943 2943 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2944 2944
2945 2945 /*
2946 2946 * The fault is treated as a copy-on-write fault if a
2947 2947 * write occurs on a private segment and the object
2948 2948 * page (i.e., mapping) is write protected. We assume
2949 2949 * that fatal protection checks have already been made.
2950 2950 */
2951 2951
2952 2952 if (brkcow) {
2953 2953 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2954 2954 cow = !(vpprot & PROT_WRITE);
2955 2955 } else if (svd->tr_state == SEGVN_TR_ON) {
2956 2956 /*
2957 2957 * If we are doing text replication COW on first touch.
2958 2958 */
2959 2959 ASSERT(amp != NULL);
2960 2960 ASSERT(svd->vp != NULL);
2961 2961 ASSERT(rw != S_WRITE);
2962 2962 cow = (ap == NULL);
2963 2963 } else {
2964 2964 cow = 0;
2965 2965 }
2966 2966
2967 2967 /*
2968 2968 * If not a copy-on-write case load the translation
2969 2969 * and return.
2970 2970 */
2971 2971 if (cow == 0) {
2972 2972
2973 2973 /*
2974 2974 * Handle pages that have been marked for migration
2975 2975 */
2976 2976 if (lgrp_optimizations())
2977 2977 page_migrate(seg, addr, &opp, 1);
2978 2978
2979 2979 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2980 2980 if (rw == S_WRITE)
2981 2981 hat_setmod(opp);
2982 2982 else if (rw != S_OTHER && !hat_ismod(opp))
2983 2983 prot &= ~PROT_WRITE;
2984 2984 }
2985 2985
2986 2986 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2987 2987 (!svd->pageprot && svd->prot == (prot & vpprot)));
2988 2988 ASSERT(amp == NULL ||
2989 2989 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2990 2990 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2991 2991 svd->rcookie);
2992 2992
2993 2993 if (!(hat_flag & HAT_LOAD_LOCK))
2994 2994 page_unlock(opp);
2995 2995
2996 2996 if (anon_lock) {
2997 2997 anon_array_exit(&cookie);
2998 2998 }
2999 2999 return (0);
3000 3000 }
3001 3001
3002 3002 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3003 3003
3004 3004 hat_setref(opp);
3005 3005
3006 3006 ASSERT(amp != NULL && anon_lock);
3007 3007
3008 3008 /*
3009 3009 * Steal the page only if it isn't a private page
3010 3010 * since stealing a private page is not worth the effort.
3011 3011 */
3012 3012 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
3013 3013 steal = 1;
3014 3014
3015 3015 /*
3016 3016 * Steal the original page if the following conditions are true:
3017 3017 *
3018 3018 * We are low on memory, the page is not private, page is not large,
3019 3019 * not shared, not modified, not `locked' or if we have it `locked'
3020 3020 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3021 3021 * that the page is not shared) and if it doesn't have any
3022 3022 * translations. page_struct_lock isn't needed to look at p_cowcnt
3023 3023 * and p_lckcnt because we first get exclusive lock on page.
3024 3024 */
3025 3025 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
3026 3026
3027 3027 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
3028 3028 page_tryupgrade(opp) && !hat_ismod(opp) &&
3029 3029 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
3030 3030 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
3031 3031 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
3032 3032 /*
3033 3033 * Check if this page has other translations
3034 3034 * after unloading our translation.
3035 3035 */
3036 3036 if (hat_page_is_mapped(opp)) {
3037 3037 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3038 3038 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3039 3039 HAT_UNLOAD);
3040 3040 }
3041 3041
3042 3042 /*
3043 3043 * hat_unload() might sync back someone else's recent
3044 3044 * modification, so check again.
3045 3045 */
3046 3046 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
3047 3047 pageflags |= STEAL_PAGE;
3048 3048 }
3049 3049
3050 3050 /*
3051 3051 * If we have a vpage pointer, see if it indicates that we have
3052 3052 * ``locked'' the page we map -- if so, tell anon_private to
3053 3053 * transfer the locking resource to the new page.
3054 3054 *
3055 3055 * See Statement at the beginning of segvn_lockop regarding
3056 3056 * the way lockcnts/cowcnts are handled during COW.
3057 3057 *
3058 3058 */
3059 3059 if (vpage != NULL && VPP_ISPPLOCK(vpage))
3060 3060 pageflags |= LOCK_PAGE;
3061 3061
3062 3062 /*
3063 3063 * Allocate a private page and perform the copy.
3064 3064 * For MAP_NORESERVE reserve swap space now, unless this
3065 3065 * is a cow fault on an existing anon page in which case
3066 3066 * MAP_NORESERVE will have made advance reservations.
3067 3067 */
3068 3068 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3069 3069 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3070 3070 atomic_add_long(&svd->swresv, ptob(1));
3071 3071 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3072 3072 } else {
3073 3073 page_unlock(opp);
3074 3074 err = ENOMEM;
3075 3075 goto out;
3076 3076 }
3077 3077 }
3078 3078 oldap = ap;
3079 3079 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3080 3080 if (pp == NULL) {
3081 3081 err = ENOMEM; /* out of swap space */
3082 3082 goto out;
3083 3083 }
3084 3084
3085 3085 /*
3086 3086 * If we copied away from an anonymous page, then
3087 3087 * we are one step closer to freeing up an anon slot.
3088 3088 *
3089 3089 * NOTE: The original anon slot must be released while
3090 3090 * holding the "anon_map" lock. This is necessary to prevent
3091 3091 * other threads from obtaining a pointer to the anon slot
3092 3092 * which may be freed if its "refcnt" is 1.
3093 3093 */
3094 3094 if (oldap != NULL)
3095 3095 anon_decref(oldap);
3096 3096
3097 3097 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3098 3098
3099 3099 /*
3100 3100 * Handle pages that have been marked for migration
3101 3101 */
3102 3102 if (lgrp_optimizations())
3103 3103 page_migrate(seg, addr, &pp, 1);
3104 3104
3105 3105 ASSERT(pp->p_szc == 0);
3106 3106
3107 3107 ASSERT(!IS_VMODSORT(pp->p_vnode));
3108 3108 if (enable_mbit_wa) {
3109 3109 if (rw == S_WRITE)
3110 3110 hat_setmod(pp);
3111 3111 else if (!hat_ismod(pp))
3112 3112 prot &= ~PROT_WRITE;
3113 3113 }
3114 3114
3115 3115 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3116 3116 hat_memload(hat, addr, pp, prot, hat_flag);
3117 3117
3118 3118 if (!(hat_flag & HAT_LOAD_LOCK))
3119 3119 page_unlock(pp);
3120 3120
3121 3121 ASSERT(anon_lock);
3122 3122 anon_array_exit(&cookie);
3123 3123 return (0);
3124 3124 out:
3125 3125 if (anon_lock)
3126 3126 anon_array_exit(&cookie);
3127 3127
3128 3128 if (type == F_SOFTLOCK) {
3129 3129 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3130 3130 }
3131 3131 return (FC_MAKE_ERR(err));
3132 3132 }
3133 3133
3134 3134 /*
3135 3135 * relocate a bunch of smaller targ pages into one large repl page. all targ
3136 3136 * pages must be complete pages smaller than replacement pages.
3137 3137 * it's assumed that no page's szc can change since they are all PAGESIZE or
3138 3138 * complete large pages locked SHARED.
3139 3139 */
3140 3140 static void
3141 3141 segvn_relocate_pages(page_t **targ, page_t *replacement)
3142 3142 {
3143 3143 page_t *pp;
3144 3144 pgcnt_t repl_npgs, curnpgs;
3145 3145 pgcnt_t i;
3146 3146 uint_t repl_szc = replacement->p_szc;
3147 3147 page_t *first_repl = replacement;
3148 3148 page_t *repl;
3149 3149 spgcnt_t npgs;
3150 3150
3151 3151 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3152 3152
3153 3153 ASSERT(repl_szc != 0);
3154 3154 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3155 3155
3156 3156 i = 0;
3157 3157 while (repl_npgs) {
3158 3158 spgcnt_t nreloc;
3159 3159 int err;
3160 3160 ASSERT(replacement != NULL);
3161 3161 pp = targ[i];
3162 3162 ASSERT(pp->p_szc < repl_szc);
3163 3163 ASSERT(PAGE_EXCL(pp));
3164 3164 ASSERT(!PP_ISFREE(pp));
3165 3165 curnpgs = page_get_pagecnt(pp->p_szc);
3166 3166 if (curnpgs == 1) {
3167 3167 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3168 3168 repl = replacement;
3169 3169 page_sub(&replacement, repl);
3170 3170 ASSERT(PAGE_EXCL(repl));
3171 3171 ASSERT(!PP_ISFREE(repl));
3172 3172 ASSERT(repl->p_szc == repl_szc);
3173 3173 } else {
3174 3174 page_t *repl_savepp;
3175 3175 int j;
3176 3176 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3177 3177 repl_savepp = replacement;
3178 3178 for (j = 0; j < curnpgs; j++) {
3179 3179 repl = replacement;
3180 3180 page_sub(&replacement, repl);
3181 3181 ASSERT(PAGE_EXCL(repl));
3182 3182 ASSERT(!PP_ISFREE(repl));
3183 3183 ASSERT(repl->p_szc == repl_szc);
3184 3184 ASSERT(page_pptonum(targ[i + j]) ==
3185 3185 page_pptonum(targ[i]) + j);
3186 3186 }
3187 3187 repl = repl_savepp;
3188 3188 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3189 3189 }
3190 3190 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3191 3191 if (err || nreloc != curnpgs) {
3192 3192 panic("segvn_relocate_pages: "
3193 3193 "page_relocate failed err=%d curnpgs=%ld "
3194 3194 "nreloc=%ld", err, curnpgs, nreloc);
3195 3195 }
3196 3196 ASSERT(curnpgs <= repl_npgs);
3197 3197 repl_npgs -= curnpgs;
3198 3198 i += curnpgs;
3199 3199 }
3200 3200 ASSERT(replacement == NULL);
3201 3201
3202 3202 repl = first_repl;
3203 3203 repl_npgs = npgs;
3204 3204 for (i = 0; i < repl_npgs; i++) {
3205 3205 ASSERT(PAGE_EXCL(repl));
3206 3206 ASSERT(!PP_ISFREE(repl));
3207 3207 targ[i] = repl;
3208 3208 page_downgrade(targ[i]);
3209 3209 repl++;
3210 3210 }
3211 3211 }
3212 3212
3213 3213 /*
3214 3214 * Check if all pages in ppa array are complete smaller than szc pages and
3215 3215 * their roots will still be aligned relative to their current size if the
3216 3216 * entire ppa array is relocated into one szc page. If these conditions are
3217 3217 * not met return 0.
3218 3218 *
3219 3219 * If all pages are properly aligned attempt to upgrade their locks
3220 3220 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3221 3221 * upgrdfail was set to 0 by caller.
3222 3222 *
3223 3223 * Return 1 if all pages are aligned and locked exclusively.
3224 3224 *
3225 3225 * If all pages in ppa array happen to be physically contiguous to make one
3226 3226 * szc page and all exclusive locks are successfully obtained promote the page
3227 3227 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3228 3228 */
3229 3229 static int
3230 3230 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3231 3231 {
3232 3232 page_t *pp;
3233 3233 pfn_t pfn;
3234 3234 pgcnt_t totnpgs = page_get_pagecnt(szc);
3235 3235 pfn_t first_pfn;
3236 3236 int contig = 1;
3237 3237 pgcnt_t i;
3238 3238 pgcnt_t j;
3239 3239 uint_t curszc;
3240 3240 pgcnt_t curnpgs;
3241 3241 int root = 0;
3242 3242
3243 3243 ASSERT(szc > 0);
3244 3244
3245 3245 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3246 3246
3247 3247 for (i = 0; i < totnpgs; i++) {
3248 3248 pp = ppa[i];
3249 3249 ASSERT(PAGE_SHARED(pp));
3250 3250 ASSERT(!PP_ISFREE(pp));
3251 3251 pfn = page_pptonum(pp);
3252 3252 if (i == 0) {
3253 3253 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3254 3254 contig = 0;
3255 3255 } else {
3256 3256 first_pfn = pfn;
3257 3257 }
3258 3258 } else if (contig && pfn != first_pfn + i) {
3259 3259 contig = 0;
3260 3260 }
3261 3261 if (pp->p_szc == 0) {
3262 3262 if (root) {
3263 3263 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3264 3264 return (0);
3265 3265 }
3266 3266 } else if (!root) {
3267 3267 if ((curszc = pp->p_szc) >= szc) {
3268 3268 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3269 3269 return (0);
3270 3270 }
3271 3271 if (curszc == 0) {
3272 3272 /*
3273 3273 * p_szc changed means we don't have all pages
3274 3274 * locked. return failure.
3275 3275 */
3276 3276 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3277 3277 return (0);
3278 3278 }
3279 3279 curnpgs = page_get_pagecnt(curszc);
3280 3280 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3281 3281 !IS_P2ALIGNED(i, curnpgs)) {
3282 3282 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3283 3283 return (0);
3284 3284 }
3285 3285 root = 1;
3286 3286 } else {
3287 3287 ASSERT(i > 0);
3288 3288 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3289 3289 if (pp->p_szc != curszc) {
3290 3290 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3291 3291 return (0);
3292 3292 }
3293 3293 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3294 3294 panic("segvn_full_szcpages: "
3295 3295 "large page not physically contiguous");
3296 3296 }
3297 3297 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3298 3298 root = 0;
3299 3299 }
3300 3300 }
3301 3301 }
3302 3302
3303 3303 for (i = 0; i < totnpgs; i++) {
3304 3304 ASSERT(ppa[i]->p_szc < szc);
3305 3305 if (!page_tryupgrade(ppa[i])) {
3306 3306 for (j = 0; j < i; j++) {
3307 3307 page_downgrade(ppa[j]);
3308 3308 }
3309 3309 *pszc = ppa[i]->p_szc;
3310 3310 *upgrdfail = 1;
3311 3311 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3312 3312 return (0);
3313 3313 }
3314 3314 }
3315 3315
3316 3316 /*
3317 3317 * When a page is put a free cachelist its szc is set to 0. if file
3318 3318 * system reclaimed pages from cachelist targ pages will be physically
3319 3319 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3320 3320 * pages without any relocations.
3321 3321 * To avoid any hat issues with previous small mappings
3322 3322 * hat_pageunload() the target pages first.
3323 3323 */
3324 3324 if (contig) {
3325 3325 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3326 3326 for (i = 0; i < totnpgs; i++) {
3327 3327 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3328 3328 }
3329 3329 for (i = 0; i < totnpgs; i++) {
3330 3330 ppa[i]->p_szc = szc;
3331 3331 }
3332 3332 for (i = 0; i < totnpgs; i++) {
3333 3333 ASSERT(PAGE_EXCL(ppa[i]));
3334 3334 page_downgrade(ppa[i]);
3335 3335 }
3336 3336 if (pszc != NULL) {
3337 3337 *pszc = szc;
3338 3338 }
3339 3339 }
3340 3340 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3341 3341 return (1);
3342 3342 }
3343 3343
3344 3344 /*
3345 3345 * Create physically contiguous pages for [vp, off] - [vp, off +
3346 3346 * page_size(szc)) range and for private segment return them in ppa array.
3347 3347 * Pages are created either via IO or relocations.
3348 3348 *
3349 3349 * Return 1 on success and 0 on failure.
3350 3350 *
3351 3351 * If physically contiguous pages already exist for this range return 1 without
3352 3352 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3353 3353 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3354 3354 */
3355 3355
3356 3356 static int
3357 3357 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3358 3358 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3359 3359 int *downsize)
3360 3360
3361 3361 {
3362 3362 page_t *pplist = *ppplist;
3363 3363 size_t pgsz = page_get_pagesize(szc);
3364 3364 pgcnt_t pages = btop(pgsz);
3365 3365 ulong_t start_off = off;
3366 3366 u_offset_t eoff = off + pgsz;
3367 3367 spgcnt_t nreloc;
3368 3368 u_offset_t io_off = off;
3369 3369 size_t io_len;
3370 3370 page_t *io_pplist = NULL;
3371 3371 page_t *done_pplist = NULL;
3372 3372 pgcnt_t pgidx = 0;
3373 3373 page_t *pp;
3374 3374 page_t *newpp;
3375 3375 page_t *targpp;
3376 3376 int io_err = 0;
3377 3377 int i;
3378 3378 pfn_t pfn;
3379 3379 ulong_t ppages;
3380 3380 page_t *targ_pplist = NULL;
3381 3381 page_t *repl_pplist = NULL;
3382 3382 page_t *tmp_pplist;
3383 3383 int nios = 0;
3384 3384 uint_t pszc;
3385 3385 struct vattr va;
3386 3386
3387 3387 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3388 3388
3389 3389 ASSERT(szc != 0);
3390 3390 ASSERT(pplist->p_szc == szc);
3391 3391
3392 3392 /*
3393 3393 * downsize will be set to 1 only if we fail to lock pages. this will
3394 3394 * allow subsequent faults to try to relocate the page again. If we
3395 3395 * fail due to misalignment don't downsize and let the caller map the
3396 3396 * whole region with small mappings to avoid more faults into the area
3397 3397 * where we can't get large pages anyway.
3398 3398 */
3399 3399 *downsize = 0;
3400 3400
3401 3401 while (off < eoff) {
3402 3402 newpp = pplist;
3403 3403 ASSERT(newpp != NULL);
3404 3404 ASSERT(PAGE_EXCL(newpp));
3405 3405 ASSERT(!PP_ISFREE(newpp));
3406 3406 /*
3407 3407 * we pass NULL for nrelocp to page_lookup_create()
3408 3408 * so that it doesn't relocate. We relocate here
3409 3409 * later only after we make sure we can lock all
3410 3410 * pages in the range we handle and they are all
3411 3411 * aligned.
3412 3412 */
3413 3413 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3414 3414 ASSERT(pp != NULL);
3415 3415 ASSERT(!PP_ISFREE(pp));
3416 3416 ASSERT(pp->p_vnode == vp);
3417 3417 ASSERT(pp->p_offset == off);
3418 3418 if (pp == newpp) {
3419 3419 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3420 3420 page_sub(&pplist, pp);
3421 3421 ASSERT(PAGE_EXCL(pp));
3422 3422 ASSERT(page_iolock_assert(pp));
3423 3423 page_list_concat(&io_pplist, &pp);
3424 3424 off += PAGESIZE;
3425 3425 continue;
3426 3426 }
3427 3427 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3428 3428 pfn = page_pptonum(pp);
3429 3429 pszc = pp->p_szc;
3430 3430 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3431 3431 IS_P2ALIGNED(pfn, pages)) {
3432 3432 ASSERT(repl_pplist == NULL);
3433 3433 ASSERT(done_pplist == NULL);
3434 3434 ASSERT(pplist == *ppplist);
3435 3435 page_unlock(pp);
3436 3436 page_free_replacement_page(pplist);
3437 3437 page_create_putback(pages);
3438 3438 *ppplist = NULL;
3439 3439 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3440 3440 return (1);
3441 3441 }
3442 3442 if (pszc >= szc) {
3443 3443 page_unlock(pp);
3444 3444 segvn_faultvnmpss_align_err1++;
3445 3445 goto out;
3446 3446 }
3447 3447 ppages = page_get_pagecnt(pszc);
3448 3448 if (!IS_P2ALIGNED(pfn, ppages)) {
3449 3449 ASSERT(pszc > 0);
3450 3450 /*
3451 3451 * sizing down to pszc won't help.
3452 3452 */
3453 3453 page_unlock(pp);
3454 3454 segvn_faultvnmpss_align_err2++;
3455 3455 goto out;
3456 3456 }
3457 3457 pfn = page_pptonum(newpp);
3458 3458 if (!IS_P2ALIGNED(pfn, ppages)) {
3459 3459 ASSERT(pszc > 0);
3460 3460 /*
3461 3461 * sizing down to pszc won't help.
3462 3462 */
3463 3463 page_unlock(pp);
3464 3464 segvn_faultvnmpss_align_err3++;
3465 3465 goto out;
3466 3466 }
3467 3467 if (!PAGE_EXCL(pp)) {
3468 3468 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3469 3469 page_unlock(pp);
3470 3470 *downsize = 1;
3471 3471 *ret_pszc = pp->p_szc;
3472 3472 goto out;
3473 3473 }
3474 3474 targpp = pp;
3475 3475 if (io_pplist != NULL) {
3476 3476 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3477 3477 io_len = off - io_off;
3478 3478 /*
3479 3479 * Some file systems like NFS don't check EOF
3480 3480 * conditions in VOP_PAGEIO(). Check it here
3481 3481 * now that pages are locked SE_EXCL. Any file
3482 3482 * truncation will wait until the pages are
3483 3483 * unlocked so no need to worry that file will
3484 3484 * be truncated after we check its size here.
3485 3485 * XXX fix NFS to remove this check.
3486 3486 */
3487 3487 va.va_mask = AT_SIZE;
3488 3488 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3489 3489 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3490 3490 page_unlock(targpp);
3491 3491 goto out;
3492 3492 }
3493 3493 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3494 3494 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3495 3495 *downsize = 1;
3496 3496 *ret_pszc = 0;
3497 3497 page_unlock(targpp);
3498 3498 goto out;
3499 3499 }
3500 3500 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3501 3501 B_READ, svd->cred, NULL);
3502 3502 if (io_err) {
3503 3503 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3504 3504 page_unlock(targpp);
3505 3505 if (io_err == EDEADLK) {
3506 3506 segvn_vmpss_pageio_deadlk_err++;
3507 3507 }
3508 3508 goto out;
3509 3509 }
3510 3510 nios++;
3511 3511 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3512 3512 while (io_pplist != NULL) {
3513 3513 pp = io_pplist;
3514 3514 page_sub(&io_pplist, pp);
3515 3515 ASSERT(page_iolock_assert(pp));
3516 3516 page_io_unlock(pp);
3517 3517 pgidx = (pp->p_offset - start_off) >>
3518 3518 PAGESHIFT;
3519 3519 ASSERT(pgidx < pages);
3520 3520 ppa[pgidx] = pp;
3521 3521 page_list_concat(&done_pplist, &pp);
3522 3522 }
3523 3523 }
3524 3524 pp = targpp;
3525 3525 ASSERT(PAGE_EXCL(pp));
3526 3526 ASSERT(pp->p_szc <= pszc);
3527 3527 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3528 3528 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3529 3529 page_unlock(pp);
3530 3530 *downsize = 1;
3531 3531 *ret_pszc = pp->p_szc;
3532 3532 goto out;
3533 3533 }
3534 3534 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3535 3535 /*
3536 3536 * page szc chould have changed before the entire group was
3537 3537 * locked. reread page szc.
3538 3538 */
3539 3539 pszc = pp->p_szc;
3540 3540 ppages = page_get_pagecnt(pszc);
3541 3541
3542 3542 /* link just the roots */
3543 3543 page_list_concat(&targ_pplist, &pp);
3544 3544 page_sub(&pplist, newpp);
3545 3545 page_list_concat(&repl_pplist, &newpp);
3546 3546 off += PAGESIZE;
3547 3547 while (--ppages != 0) {
3548 3548 newpp = pplist;
3549 3549 page_sub(&pplist, newpp);
3550 3550 off += PAGESIZE;
3551 3551 }
3552 3552 io_off = off;
3553 3553 }
3554 3554 if (io_pplist != NULL) {
3555 3555 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3556 3556 io_len = eoff - io_off;
3557 3557 va.va_mask = AT_SIZE;
3558 3558 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3559 3559 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3560 3560 goto out;
3561 3561 }
3562 3562 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3563 3563 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3564 3564 *downsize = 1;
3565 3565 *ret_pszc = 0;
3566 3566 goto out;
3567 3567 }
3568 3568 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3569 3569 B_READ, svd->cred, NULL);
3570 3570 if (io_err) {
3571 3571 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3572 3572 if (io_err == EDEADLK) {
3573 3573 segvn_vmpss_pageio_deadlk_err++;
3574 3574 }
3575 3575 goto out;
3576 3576 }
3577 3577 nios++;
3578 3578 while (io_pplist != NULL) {
3579 3579 pp = io_pplist;
3580 3580 page_sub(&io_pplist, pp);
3581 3581 ASSERT(page_iolock_assert(pp));
3582 3582 page_io_unlock(pp);
3583 3583 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3584 3584 ASSERT(pgidx < pages);
3585 3585 ppa[pgidx] = pp;
3586 3586 }
3587 3587 }
3588 3588 /*
3589 3589 * we're now bound to succeed or panic.
3590 3590 * remove pages from done_pplist. it's not needed anymore.
3591 3591 */
3592 3592 while (done_pplist != NULL) {
3593 3593 pp = done_pplist;
3594 3594 page_sub(&done_pplist, pp);
3595 3595 }
3596 3596 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3597 3597 ASSERT(pplist == NULL);
3598 3598 *ppplist = NULL;
3599 3599 while (targ_pplist != NULL) {
3600 3600 int ret;
3601 3601 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3602 3602 ASSERT(repl_pplist);
3603 3603 pp = targ_pplist;
3604 3604 page_sub(&targ_pplist, pp);
3605 3605 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3606 3606 newpp = repl_pplist;
3607 3607 page_sub(&repl_pplist, newpp);
3608 3608 #ifdef DEBUG
3609 3609 pfn = page_pptonum(pp);
3610 3610 pszc = pp->p_szc;
3611 3611 ppages = page_get_pagecnt(pszc);
3612 3612 ASSERT(IS_P2ALIGNED(pfn, ppages));
3613 3613 pfn = page_pptonum(newpp);
3614 3614 ASSERT(IS_P2ALIGNED(pfn, ppages));
3615 3615 ASSERT(P2PHASE(pfn, pages) == pgidx);
3616 3616 #endif
3617 3617 nreloc = 0;
3618 3618 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3619 3619 if (ret != 0 || nreloc == 0) {
3620 3620 panic("segvn_fill_vp_pages: "
3621 3621 "page_relocate failed");
3622 3622 }
3623 3623 pp = newpp;
3624 3624 while (nreloc-- != 0) {
3625 3625 ASSERT(PAGE_EXCL(pp));
3626 3626 ASSERT(pp->p_vnode == vp);
3627 3627 ASSERT(pgidx ==
3628 3628 ((pp->p_offset - start_off) >> PAGESHIFT));
3629 3629 ppa[pgidx++] = pp;
3630 3630 pp++;
3631 3631 }
3632 3632 }
3633 3633
3634 3634 if (svd->type == MAP_PRIVATE) {
3635 3635 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3636 3636 for (i = 0; i < pages; i++) {
3637 3637 ASSERT(ppa[i] != NULL);
3638 3638 ASSERT(PAGE_EXCL(ppa[i]));
3639 3639 ASSERT(ppa[i]->p_vnode == vp);
3640 3640 ASSERT(ppa[i]->p_offset ==
3641 3641 start_off + (i << PAGESHIFT));
3642 3642 page_downgrade(ppa[i]);
3643 3643 }
3644 3644 ppa[pages] = NULL;
3645 3645 } else {
3646 3646 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3647 3647 /*
3648 3648 * the caller will still call VOP_GETPAGE() for shared segments
3649 3649 * to check FS write permissions. For private segments we map
3650 3650 * file read only anyway. so no VOP_GETPAGE is needed.
3651 3651 */
3652 3652 for (i = 0; i < pages; i++) {
3653 3653 ASSERT(ppa[i] != NULL);
3654 3654 ASSERT(PAGE_EXCL(ppa[i]));
3655 3655 ASSERT(ppa[i]->p_vnode == vp);
3656 3656 ASSERT(ppa[i]->p_offset ==
3657 3657 start_off + (i << PAGESHIFT));
3658 3658 page_unlock(ppa[i]);
3659 3659 }
3660 3660 ppa[0] = NULL;
3661 3661 }
3662 3662
3663 3663 return (1);
3664 3664 out:
3665 3665 /*
3666 3666 * Do the cleanup. Unlock target pages we didn't relocate. They are
3667 3667 * linked on targ_pplist by root pages. reassemble unused replacement
3668 3668 * and io pages back to pplist.
3669 3669 */
3670 3670 if (io_pplist != NULL) {
3671 3671 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3672 3672 pp = io_pplist;
3673 3673 do {
3674 3674 ASSERT(pp->p_vnode == vp);
3675 3675 ASSERT(pp->p_offset == io_off);
3676 3676 ASSERT(page_iolock_assert(pp));
3677 3677 page_io_unlock(pp);
3678 3678 page_hashout(pp, NULL);
3679 3679 io_off += PAGESIZE;
3680 3680 } while ((pp = pp->p_next) != io_pplist);
3681 3681 page_list_concat(&io_pplist, &pplist);
3682 3682 pplist = io_pplist;
3683 3683 }
3684 3684 tmp_pplist = NULL;
3685 3685 while (targ_pplist != NULL) {
3686 3686 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3687 3687 pp = targ_pplist;
3688 3688 ASSERT(PAGE_EXCL(pp));
3689 3689 page_sub(&targ_pplist, pp);
3690 3690
3691 3691 pszc = pp->p_szc;
3692 3692 ppages = page_get_pagecnt(pszc);
3693 3693 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3694 3694
3695 3695 if (pszc != 0) {
3696 3696 group_page_unlock(pp);
3697 3697 }
3698 3698 page_unlock(pp);
3699 3699
3700 3700 pp = repl_pplist;
3701 3701 ASSERT(pp != NULL);
3702 3702 ASSERT(PAGE_EXCL(pp));
3703 3703 ASSERT(pp->p_szc == szc);
3704 3704 page_sub(&repl_pplist, pp);
3705 3705
3706 3706 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3707 3707
3708 3708 /* relink replacement page */
3709 3709 page_list_concat(&tmp_pplist, &pp);
3710 3710 while (--ppages != 0) {
3711 3711 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3712 3712 pp++;
3713 3713 ASSERT(PAGE_EXCL(pp));
3714 3714 ASSERT(pp->p_szc == szc);
3715 3715 page_list_concat(&tmp_pplist, &pp);
3716 3716 }
3717 3717 }
3718 3718 if (tmp_pplist != NULL) {
3719 3719 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3720 3720 page_list_concat(&tmp_pplist, &pplist);
3721 3721 pplist = tmp_pplist;
3722 3722 }
3723 3723 /*
3724 3724 * at this point all pages are either on done_pplist or
3725 3725 * pplist. They can't be all on done_pplist otherwise
3726 3726 * we'd've been done.
3727 3727 */
3728 3728 ASSERT(pplist != NULL);
3729 3729 if (nios != 0) {
3730 3730 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3731 3731 pp = pplist;
3732 3732 do {
3733 3733 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3734 3734 ASSERT(pp->p_szc == szc);
3735 3735 ASSERT(PAGE_EXCL(pp));
3736 3736 ASSERT(pp->p_vnode != vp);
3737 3737 pp->p_szc = 0;
3738 3738 } while ((pp = pp->p_next) != pplist);
3739 3739
3740 3740 pp = done_pplist;
3741 3741 do {
3742 3742 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3743 3743 ASSERT(pp->p_szc == szc);
3744 3744 ASSERT(PAGE_EXCL(pp));
3745 3745 ASSERT(pp->p_vnode == vp);
3746 3746 pp->p_szc = 0;
3747 3747 } while ((pp = pp->p_next) != done_pplist);
3748 3748
3749 3749 while (pplist != NULL) {
3750 3750 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3751 3751 pp = pplist;
3752 3752 page_sub(&pplist, pp);
3753 3753 page_free(pp, 0);
3754 3754 }
3755 3755
3756 3756 while (done_pplist != NULL) {
3757 3757 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3758 3758 pp = done_pplist;
3759 3759 page_sub(&done_pplist, pp);
3760 3760 page_unlock(pp);
3761 3761 }
3762 3762 *ppplist = NULL;
3763 3763 return (0);
3764 3764 }
3765 3765 ASSERT(pplist == *ppplist);
3766 3766 if (io_err) {
3767 3767 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3768 3768 /*
3769 3769 * don't downsize on io error.
3770 3770 * see if vop_getpage succeeds.
3771 3771 * pplist may still be used in this case
3772 3772 * for relocations.
3773 3773 */
3774 3774 return (0);
3775 3775 }
3776 3776 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3777 3777 page_free_replacement_page(pplist);
3778 3778 page_create_putback(pages);
3779 3779 *ppplist = NULL;
3780 3780 return (0);
3781 3781 }
3782 3782
3783 3783 int segvn_anypgsz = 0;
3784 3784
3785 3785 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3786 3786 if ((type) == F_SOFTLOCK) { \
3787 3787 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3788 3788 -(pages)); \
3789 3789 }
3790 3790
3791 3791 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3792 3792 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3793 3793 if ((rw) == S_WRITE) { \
3794 3794 for (i = 0; i < (pages); i++) { \
3795 3795 ASSERT((ppa)[i]->p_vnode == \
3796 3796 (ppa)[0]->p_vnode); \
3797 3797 hat_setmod((ppa)[i]); \
3798 3798 } \
3799 3799 } else if ((rw) != S_OTHER && \
3800 3800 ((prot) & (vpprot) & PROT_WRITE)) { \
3801 3801 for (i = 0; i < (pages); i++) { \
3802 3802 ASSERT((ppa)[i]->p_vnode == \
3803 3803 (ppa)[0]->p_vnode); \
3804 3804 if (!hat_ismod((ppa)[i])) { \
3805 3805 prot &= ~PROT_WRITE; \
3806 3806 break; \
3807 3807 } \
3808 3808 } \
3809 3809 } \
3810 3810 }
3811 3811
3812 3812 #ifdef VM_STATS
3813 3813
3814 3814 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3815 3815 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3816 3816
3817 3817 #else /* VM_STATS */
3818 3818
3819 3819 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3820 3820
3821 3821 #endif
3822 3822
3823 3823 static faultcode_t
3824 3824 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3825 3825 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3826 3826 caddr_t eaddr, int brkcow)
3827 3827 {
3828 3828 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3829 3829 struct anon_map *amp = svd->amp;
3830 3830 uchar_t segtype = svd->type;
3831 3831 uint_t szc = seg->s_szc;
3832 3832 size_t pgsz = page_get_pagesize(szc);
3833 3833 size_t maxpgsz = pgsz;
3834 3834 pgcnt_t pages = btop(pgsz);
3835 3835 pgcnt_t maxpages = pages;
3836 3836 size_t ppasize = (pages + 1) * sizeof (page_t *);
3837 3837 caddr_t a = lpgaddr;
3838 3838 caddr_t maxlpgeaddr = lpgeaddr;
3839 3839 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3840 3840 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3841 3841 struct vpage *vpage = (svd->vpage != NULL) ?
3842 3842 &svd->vpage[seg_page(seg, a)] : NULL;
3843 3843 vnode_t *vp = svd->vp;
3844 3844 page_t **ppa;
3845 3845 uint_t pszc;
3846 3846 size_t ppgsz;
3847 3847 pgcnt_t ppages;
3848 3848 faultcode_t err = 0;
↓ open down ↓ |
3848 lines elided |
↑ open up ↑ |
3849 3849 int ierr;
3850 3850 int vop_size_err = 0;
3851 3851 uint_t protchk, prot, vpprot;
3852 3852 ulong_t i;
3853 3853 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3854 3854 anon_sync_obj_t an_cookie;
3855 3855 enum seg_rw arw;
3856 3856 int alloc_failed = 0;
3857 3857 int adjszc_chk;
3858 3858 struct vattr va;
3859 - int xhat = 0;
3860 3859 page_t *pplist;
3861 3860 pfn_t pfn;
3862 3861 int physcontig;
3863 3862 int upgrdfail;
3864 3863 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3865 3864 int tron = (svd->tr_state == SEGVN_TR_ON);
3866 3865
3867 3866 ASSERT(szc != 0);
3868 3867 ASSERT(vp != NULL);
3869 3868 ASSERT(brkcow == 0 || amp != NULL);
3870 3869 ASSERT(tron == 0 || amp != NULL);
3871 3870 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3872 3871 ASSERT(!(svd->flags & MAP_NORESERVE));
3873 3872 ASSERT(type != F_SOFTUNLOCK);
3874 3873 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3875 3874 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3876 3875 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3877 3876 ASSERT(seg->s_szc < NBBY * sizeof (int));
3878 3877 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3879 3878 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3880 3879
3881 3880 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3882 3881 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3883 3882
3884 3883 if (svd->flags & MAP_TEXT) {
3885 3884 hat_flag |= HAT_LOAD_TEXT;
3886 3885 }
3887 3886
3888 3887 if (svd->pageprot) {
3889 3888 switch (rw) {
3890 3889 case S_READ:
3891 3890 protchk = PROT_READ;
3892 3891 break;
3893 3892 case S_WRITE:
3894 3893 protchk = PROT_WRITE;
3895 3894 break;
3896 3895 case S_EXEC:
3897 3896 protchk = PROT_EXEC;
3898 3897 break;
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
3899 3898 case S_OTHER:
3900 3899 default:
3901 3900 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3902 3901 break;
3903 3902 }
3904 3903 } else {
3905 3904 prot = svd->prot;
3906 3905 /* caller has already done segment level protection check. */
3907 3906 }
3908 3907
3909 - if (seg->s_as->a_hat != hat) {
3910 - xhat = 1;
3911 - }
3912 -
3913 3908 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3914 3909 SEGVN_VMSTAT_FLTVNPAGES(2);
3915 3910 arw = S_READ;
3916 3911 } else {
3917 3912 arw = rw;
3918 3913 }
3919 3914
3920 3915 ppa = kmem_alloc(ppasize, KM_SLEEP);
3921 3916
3922 3917 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3923 3918
3924 3919 for (;;) {
3925 3920 adjszc_chk = 0;
3926 3921 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3927 3922 if (adjszc_chk) {
3928 3923 while (szc < seg->s_szc) {
3929 3924 uintptr_t e;
3930 3925 uint_t tszc;
3931 3926 tszc = segvn_anypgsz_vnode ? szc + 1 :
3932 3927 seg->s_szc;
3933 3928 ppgsz = page_get_pagesize(tszc);
3934 3929 if (!IS_P2ALIGNED(a, ppgsz) ||
3935 3930 ((alloc_failed >> tszc) & 0x1)) {
3936 3931 break;
3937 3932 }
3938 3933 SEGVN_VMSTAT_FLTVNPAGES(4);
3939 3934 szc = tszc;
3940 3935 pgsz = ppgsz;
3941 3936 pages = btop(pgsz);
3942 3937 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3943 3938 lpgeaddr = (caddr_t)e;
3944 3939 }
3945 3940 }
3946 3941
3947 3942 again:
3948 3943 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3949 3944 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3950 3945 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3951 3946 anon_array_enter(amp, aindx, &an_cookie);
3952 3947 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3953 3948 SEGVN_VMSTAT_FLTVNPAGES(5);
3954 3949 ASSERT(anon_pages(amp->ahp, aindx,
3955 3950 maxpages) == maxpages);
3956 3951 anon_array_exit(&an_cookie);
3957 3952 ANON_LOCK_EXIT(&->a_rwlock);
3958 3953 err = segvn_fault_anonpages(hat, seg,
3959 3954 a, a + maxpgsz, type, rw,
3960 3955 MAX(a, addr),
3961 3956 MIN(a + maxpgsz, eaddr), brkcow);
3962 3957 if (err != 0) {
3963 3958 SEGVN_VMSTAT_FLTVNPAGES(6);
3964 3959 goto out;
3965 3960 }
3966 3961 if (szc < seg->s_szc) {
3967 3962 szc = seg->s_szc;
3968 3963 pgsz = maxpgsz;
3969 3964 pages = maxpages;
3970 3965 lpgeaddr = maxlpgeaddr;
3971 3966 }
3972 3967 goto next;
3973 3968 } else {
3974 3969 ASSERT(anon_pages(amp->ahp, aindx,
3975 3970 maxpages) == 0);
3976 3971 SEGVN_VMSTAT_FLTVNPAGES(7);
3977 3972 anon_array_exit(&an_cookie);
3978 3973 ANON_LOCK_EXIT(&->a_rwlock);
3979 3974 }
3980 3975 }
3981 3976 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3982 3977 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3983 3978
3984 3979 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3985 3980 ASSERT(vpage != NULL);
3986 3981 prot = VPP_PROT(vpage);
3987 3982 ASSERT(sameprot(seg, a, maxpgsz));
3988 3983 if ((prot & protchk) == 0) {
3989 3984 SEGVN_VMSTAT_FLTVNPAGES(8);
3990 3985 err = FC_PROT;
3991 3986 goto out;
3992 3987 }
3993 3988 }
3994 3989 if (type == F_SOFTLOCK) {
3995 3990 atomic_add_long((ulong_t *)&svd->softlockcnt,
3996 3991 pages);
3997 3992 }
3998 3993
3999 3994 pplist = NULL;
4000 3995 physcontig = 0;
4001 3996 ppa[0] = NULL;
4002 3997 if (!brkcow && !tron && szc &&
4003 3998 !page_exists_physcontig(vp, off, szc,
4004 3999 segtype == MAP_PRIVATE ? ppa : NULL)) {
4005 4000 SEGVN_VMSTAT_FLTVNPAGES(9);
4006 4001 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
4007 4002 szc, 0, 0) && type != F_SOFTLOCK) {
4008 4003 SEGVN_VMSTAT_FLTVNPAGES(10);
4009 4004 pszc = 0;
4010 4005 ierr = -1;
4011 4006 alloc_failed |= (1 << szc);
4012 4007 break;
4013 4008 }
4014 4009 if (pplist != NULL &&
4015 4010 vp->v_mpssdata == SEGVN_PAGEIO) {
4016 4011 int downsize;
4017 4012 SEGVN_VMSTAT_FLTVNPAGES(11);
4018 4013 physcontig = segvn_fill_vp_pages(svd,
4019 4014 vp, off, szc, ppa, &pplist,
4020 4015 &pszc, &downsize);
4021 4016 ASSERT(!physcontig || pplist == NULL);
4022 4017 if (!physcontig && downsize &&
4023 4018 type != F_SOFTLOCK) {
4024 4019 ASSERT(pplist == NULL);
4025 4020 SEGVN_VMSTAT_FLTVNPAGES(12);
4026 4021 ierr = -1;
4027 4022 break;
4028 4023 }
4029 4024 ASSERT(!physcontig ||
4030 4025 segtype == MAP_PRIVATE ||
4031 4026 ppa[0] == NULL);
4032 4027 if (physcontig && ppa[0] == NULL) {
4033 4028 physcontig = 0;
4034 4029 }
4035 4030 }
4036 4031 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4037 4032 SEGVN_VMSTAT_FLTVNPAGES(13);
4038 4033 ASSERT(segtype == MAP_PRIVATE);
4039 4034 physcontig = 1;
4040 4035 }
4041 4036
4042 4037 if (!physcontig) {
4043 4038 SEGVN_VMSTAT_FLTVNPAGES(14);
4044 4039 ppa[0] = NULL;
4045 4040 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
4046 4041 &vpprot, ppa, pgsz, seg, a, arw,
4047 4042 svd->cred, NULL);
4048 4043 #ifdef DEBUG
4049 4044 if (ierr == 0) {
4050 4045 for (i = 0; i < pages; i++) {
4051 4046 ASSERT(PAGE_LOCKED(ppa[i]));
4052 4047 ASSERT(!PP_ISFREE(ppa[i]));
4053 4048 ASSERT(ppa[i]->p_vnode == vp);
4054 4049 ASSERT(ppa[i]->p_offset ==
4055 4050 off + (i << PAGESHIFT));
4056 4051 }
4057 4052 }
4058 4053 #endif /* DEBUG */
4059 4054 if (segtype == MAP_PRIVATE) {
4060 4055 SEGVN_VMSTAT_FLTVNPAGES(15);
4061 4056 vpprot &= ~PROT_WRITE;
4062 4057 }
4063 4058 } else {
4064 4059 ASSERT(segtype == MAP_PRIVATE);
4065 4060 SEGVN_VMSTAT_FLTVNPAGES(16);
4066 4061 vpprot = PROT_ALL & ~PROT_WRITE;
4067 4062 ierr = 0;
4068 4063 }
4069 4064
4070 4065 if (ierr != 0) {
4071 4066 SEGVN_VMSTAT_FLTVNPAGES(17);
4072 4067 if (pplist != NULL) {
4073 4068 SEGVN_VMSTAT_FLTVNPAGES(18);
4074 4069 page_free_replacement_page(pplist);
4075 4070 page_create_putback(pages);
4076 4071 }
4077 4072 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4078 4073 if (a + pgsz <= eaddr) {
4079 4074 SEGVN_VMSTAT_FLTVNPAGES(19);
4080 4075 err = FC_MAKE_ERR(ierr);
4081 4076 goto out;
4082 4077 }
4083 4078 va.va_mask = AT_SIZE;
4084 4079 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4085 4080 SEGVN_VMSTAT_FLTVNPAGES(20);
4086 4081 err = FC_MAKE_ERR(EIO);
4087 4082 goto out;
4088 4083 }
4089 4084 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4090 4085 SEGVN_VMSTAT_FLTVNPAGES(21);
4091 4086 err = FC_MAKE_ERR(ierr);
4092 4087 goto out;
4093 4088 }
4094 4089 if (btopr(va.va_size) <
4095 4090 btopr(off + (eaddr - a))) {
4096 4091 SEGVN_VMSTAT_FLTVNPAGES(22);
4097 4092 err = FC_MAKE_ERR(ierr);
4098 4093 goto out;
4099 4094 }
4100 4095 if (brkcow || tron || type == F_SOFTLOCK) {
4101 4096 /* can't reduce map area */
4102 4097 SEGVN_VMSTAT_FLTVNPAGES(23);
4103 4098 vop_size_err = 1;
4104 4099 goto out;
4105 4100 }
4106 4101 SEGVN_VMSTAT_FLTVNPAGES(24);
4107 4102 ASSERT(szc != 0);
4108 4103 pszc = 0;
4109 4104 ierr = -1;
4110 4105 break;
4111 4106 }
4112 4107
4113 4108 if (amp != NULL) {
4114 4109 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4115 4110 anon_array_enter(amp, aindx, &an_cookie);
4116 4111 }
4117 4112 if (amp != NULL &&
4118 4113 anon_get_ptr(amp->ahp, aindx) != NULL) {
4119 4114 ulong_t taindx = P2ALIGN(aindx, maxpages);
4120 4115
4121 4116 SEGVN_VMSTAT_FLTVNPAGES(25);
4122 4117 ASSERT(anon_pages(amp->ahp, taindx,
4123 4118 maxpages) == maxpages);
4124 4119 for (i = 0; i < pages; i++) {
4125 4120 page_unlock(ppa[i]);
4126 4121 }
4127 4122 anon_array_exit(&an_cookie);
4128 4123 ANON_LOCK_EXIT(&->a_rwlock);
4129 4124 if (pplist != NULL) {
4130 4125 page_free_replacement_page(pplist);
4131 4126 page_create_putback(pages);
4132 4127 }
4133 4128 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4134 4129 if (szc < seg->s_szc) {
4135 4130 SEGVN_VMSTAT_FLTVNPAGES(26);
4136 4131 /*
4137 4132 * For private segments SOFTLOCK
4138 4133 * either always breaks cow (any rw
4139 4134 * type except S_READ_NOCOW) or
4140 4135 * address space is locked as writer
4141 4136 * (S_READ_NOCOW case) and anon slots
4142 4137 * can't show up on second check.
4143 4138 * Therefore if we are here for
4144 4139 * SOFTLOCK case it must be a cow
4145 4140 * break but cow break never reduces
4146 4141 * szc. text replication (tron) in
4147 4142 * this case works as cow break.
4148 4143 * Thus the assert below.
4149 4144 */
4150 4145 ASSERT(!brkcow && !tron &&
4151 4146 type != F_SOFTLOCK);
4152 4147 pszc = seg->s_szc;
4153 4148 ierr = -2;
4154 4149 break;
4155 4150 }
4156 4151 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4157 4152 goto again;
4158 4153 }
4159 4154 #ifdef DEBUG
4160 4155 if (amp != NULL) {
4161 4156 ulong_t taindx = P2ALIGN(aindx, maxpages);
4162 4157 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4163 4158 }
4164 4159 #endif /* DEBUG */
4165 4160
4166 4161 if (brkcow || tron) {
4167 4162 ASSERT(amp != NULL);
4168 4163 ASSERT(pplist == NULL);
4169 4164 ASSERT(szc == seg->s_szc);
4170 4165 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4171 4166 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4172 4167 SEGVN_VMSTAT_FLTVNPAGES(27);
4173 4168 ierr = anon_map_privatepages(amp, aindx, szc,
4174 4169 seg, a, prot, ppa, vpage, segvn_anypgsz,
4175 4170 tron ? PG_LOCAL : 0, svd->cred);
4176 4171 if (ierr != 0) {
4177 4172 SEGVN_VMSTAT_FLTVNPAGES(28);
4178 4173 anon_array_exit(&an_cookie);
4179 4174 ANON_LOCK_EXIT(&->a_rwlock);
4180 4175 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4181 4176 err = FC_MAKE_ERR(ierr);
4182 4177 goto out;
4183 4178 }
4184 4179
4185 4180 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4186 4181 /*
4187 4182 * p_szc can't be changed for locked
4188 4183 * swapfs pages.
4189 4184 */
4190 4185 ASSERT(svd->rcookie ==
4191 4186 HAT_INVALID_REGION_COOKIE);
4192 4187 hat_memload_array(hat, a, pgsz, ppa, prot,
4193 4188 hat_flag);
4194 4189
4195 4190 if (!(hat_flag & HAT_LOAD_LOCK)) {
4196 4191 SEGVN_VMSTAT_FLTVNPAGES(29);
4197 4192 for (i = 0; i < pages; i++) {
4198 4193 page_unlock(ppa[i]);
4199 4194 }
4200 4195 }
4201 4196 anon_array_exit(&an_cookie);
4202 4197 ANON_LOCK_EXIT(&->a_rwlock);
4203 4198 goto next;
4204 4199 }
4205 4200
4206 4201 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4207 4202 (!svd->pageprot && svd->prot == (prot & vpprot)));
4208 4203
4209 4204 pfn = page_pptonum(ppa[0]);
4210 4205 /*
4211 4206 * hat_page_demote() needs an SE_EXCL lock on one of
4212 4207 * constituent page_t's and it decreases root's p_szc
4213 4208 * last. This means if root's p_szc is equal szc and
4214 4209 * all its constituent pages are locked
4215 4210 * hat_page_demote() that could have changed p_szc to
4216 4211 * szc is already done and no new have page_demote()
4217 4212 * can start for this large page.
4218 4213 */
4219 4214
4220 4215 /*
4221 4216 * we need to make sure same mapping size is used for
4222 4217 * the same address range if there's a possibility the
4223 4218 * adddress is already mapped because hat layer panics
4224 4219 * when translation is loaded for the range already
4225 4220 * mapped with a different page size. We achieve it
4226 4221 * by always using largest page size possible subject
4227 4222 * to the constraints of page size, segment page size
4228 4223 * and page alignment. Since mappings are invalidated
4229 4224 * when those constraints change and make it
4230 4225 * impossible to use previously used mapping size no
4231 4226 * mapping size conflicts should happen.
4232 4227 */
4233 4228
4234 4229 chkszc:
4235 4230 if ((pszc = ppa[0]->p_szc) == szc &&
4236 4231 IS_P2ALIGNED(pfn, pages)) {
4237 4232
4238 4233 SEGVN_VMSTAT_FLTVNPAGES(30);
4239 4234 #ifdef DEBUG
4240 4235 for (i = 0; i < pages; i++) {
4241 4236 ASSERT(PAGE_LOCKED(ppa[i]));
4242 4237 ASSERT(!PP_ISFREE(ppa[i]));
4243 4238 ASSERT(page_pptonum(ppa[i]) ==
4244 4239 pfn + i);
4245 4240 ASSERT(ppa[i]->p_szc == szc);
4246 4241 ASSERT(ppa[i]->p_vnode == vp);
4247 4242 ASSERT(ppa[i]->p_offset ==
4248 4243 off + (i << PAGESHIFT));
4249 4244 }
4250 4245 #endif /* DEBUG */
4251 4246 /*
4252 4247 * All pages are of szc we need and they are
4253 4248 * all locked so they can't change szc. load
4254 4249 * translations.
4255 4250 *
4256 4251 * if page got promoted since last check
4257 4252 * we don't need pplist.
↓ open down ↓ |
335 lines elided |
↑ open up ↑ |
4258 4253 */
4259 4254 if (pplist != NULL) {
4260 4255 page_free_replacement_page(pplist);
4261 4256 page_create_putback(pages);
4262 4257 }
4263 4258 if (PP_ISMIGRATE(ppa[0])) {
4264 4259 page_migrate(seg, a, ppa, pages);
4265 4260 }
4266 4261 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4267 4262 prot, vpprot);
4268 - if (!xhat) {
4269 - hat_memload_array_region(hat, a, pgsz,
4270 - ppa, prot & vpprot, hat_flag,
4271 - svd->rcookie);
4272 - } else {
4273 - /*
4274 - * avoid large xhat mappings to FS
4275 - * pages so that hat_page_demote()
4276 - * doesn't need to check for xhat
4277 - * large mappings.
4278 - * Don't use regions with xhats.
4279 - */
4280 - for (i = 0; i < pages; i++) {
4281 - hat_memload(hat,
4282 - a + (i << PAGESHIFT),
4283 - ppa[i], prot & vpprot,
4284 - hat_flag);
4285 - }
4286 - }
4263 + hat_memload_array_region(hat, a, pgsz,
4264 + ppa, prot & vpprot, hat_flag,
4265 + svd->rcookie);
4287 4266
4288 4267 if (!(hat_flag & HAT_LOAD_LOCK)) {
4289 4268 for (i = 0; i < pages; i++) {
4290 4269 page_unlock(ppa[i]);
4291 4270 }
4292 4271 }
4293 4272 if (amp != NULL) {
4294 4273 anon_array_exit(&an_cookie);
4295 4274 ANON_LOCK_EXIT(&->a_rwlock);
4296 4275 }
4297 4276 goto next;
4298 4277 }
4299 4278
4300 4279 /*
4301 4280 * See if upsize is possible.
4302 4281 */
4303 4282 if (pszc > szc && szc < seg->s_szc &&
4304 4283 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4305 4284 pgcnt_t aphase;
4306 4285 uint_t pszc1 = MIN(pszc, seg->s_szc);
4307 4286 ppgsz = page_get_pagesize(pszc1);
4308 4287 ppages = btop(ppgsz);
4309 4288 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4310 4289
4311 4290 ASSERT(type != F_SOFTLOCK);
4312 4291
4313 4292 SEGVN_VMSTAT_FLTVNPAGES(31);
4314 4293 if (aphase != P2PHASE(pfn, ppages)) {
4315 4294 segvn_faultvnmpss_align_err4++;
4316 4295 } else {
4317 4296 SEGVN_VMSTAT_FLTVNPAGES(32);
4318 4297 if (pplist != NULL) {
4319 4298 page_t *pl = pplist;
4320 4299 page_free_replacement_page(pl);
4321 4300 page_create_putback(pages);
4322 4301 }
4323 4302 for (i = 0; i < pages; i++) {
4324 4303 page_unlock(ppa[i]);
4325 4304 }
4326 4305 if (amp != NULL) {
4327 4306 anon_array_exit(&an_cookie);
4328 4307 ANON_LOCK_EXIT(&->a_rwlock);
4329 4308 }
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
4330 4309 pszc = pszc1;
4331 4310 ierr = -2;
4332 4311 break;
4333 4312 }
4334 4313 }
4335 4314
4336 4315 /*
4337 4316 * check if we should use smallest mapping size.
4338 4317 */
4339 4318 upgrdfail = 0;
4340 - if (szc == 0 || xhat ||
4319 + if (szc == 0 ||
4341 4320 (pszc >= szc &&
4342 4321 !IS_P2ALIGNED(pfn, pages)) ||
4343 4322 (pszc < szc &&
4344 4323 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4345 4324 &pszc))) {
4346 4325
4347 4326 if (upgrdfail && type != F_SOFTLOCK) {
4348 4327 /*
4349 4328 * segvn_full_szcpages failed to lock
4350 4329 * all pages EXCL. Size down.
4351 4330 */
4352 4331 ASSERT(pszc < szc);
4353 4332
4354 4333 SEGVN_VMSTAT_FLTVNPAGES(33);
4355 4334
4356 4335 if (pplist != NULL) {
4357 4336 page_t *pl = pplist;
4358 4337 page_free_replacement_page(pl);
4359 4338 page_create_putback(pages);
4360 4339 }
4361 4340
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
4362 4341 for (i = 0; i < pages; i++) {
4363 4342 page_unlock(ppa[i]);
4364 4343 }
4365 4344 if (amp != NULL) {
4366 4345 anon_array_exit(&an_cookie);
4367 4346 ANON_LOCK_EXIT(&->a_rwlock);
4368 4347 }
4369 4348 ierr = -1;
4370 4349 break;
4371 4350 }
4372 - if (szc != 0 && !xhat && !upgrdfail) {
4351 + if (szc != 0 && !upgrdfail) {
4373 4352 segvn_faultvnmpss_align_err5++;
4374 4353 }
4375 4354 SEGVN_VMSTAT_FLTVNPAGES(34);
4376 4355 if (pplist != NULL) {
4377 4356 page_free_replacement_page(pplist);
4378 4357 page_create_putback(pages);
4379 4358 }
4380 4359 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4381 4360 prot, vpprot);
4382 4361 if (upgrdfail && segvn_anypgsz_vnode) {
4383 4362 /* SOFTLOCK case */
4384 4363 hat_memload_array_region(hat, a, pgsz,
4385 4364 ppa, prot & vpprot, hat_flag,
4386 4365 svd->rcookie);
4387 4366 } else {
4388 4367 for (i = 0; i < pages; i++) {
4389 4368 hat_memload_region(hat,
4390 4369 a + (i << PAGESHIFT),
4391 4370 ppa[i], prot & vpprot,
4392 4371 hat_flag, svd->rcookie);
4393 4372 }
4394 4373 }
4395 4374 if (!(hat_flag & HAT_LOAD_LOCK)) {
4396 4375 for (i = 0; i < pages; i++) {
4397 4376 page_unlock(ppa[i]);
4398 4377 }
4399 4378 }
4400 4379 if (amp != NULL) {
4401 4380 anon_array_exit(&an_cookie);
4402 4381 ANON_LOCK_EXIT(&->a_rwlock);
4403 4382 }
4404 4383 goto next;
4405 4384 }
4406 4385
4407 4386 if (pszc == szc) {
4408 4387 /*
4409 4388 * segvn_full_szcpages() upgraded pages szc.
4410 4389 */
4411 4390 ASSERT(pszc == ppa[0]->p_szc);
4412 4391 ASSERT(IS_P2ALIGNED(pfn, pages));
4413 4392 goto chkszc;
4414 4393 }
4415 4394
4416 4395 if (pszc > szc) {
4417 4396 kmutex_t *szcmtx;
4418 4397 SEGVN_VMSTAT_FLTVNPAGES(35);
4419 4398 /*
4420 4399 * p_szc of ppa[0] can change since we haven't
4421 4400 * locked all constituent pages. Call
4422 4401 * page_lock_szc() to prevent szc changes.
4423 4402 * This should be a rare case that happens when
4424 4403 * multiple segments use a different page size
4425 4404 * to map the same file offsets.
4426 4405 */
4427 4406 szcmtx = page_szc_lock(ppa[0]);
4428 4407 pszc = ppa[0]->p_szc;
4429 4408 ASSERT(szcmtx != NULL || pszc == 0);
4430 4409 ASSERT(ppa[0]->p_szc <= pszc);
4431 4410 if (pszc <= szc) {
4432 4411 SEGVN_VMSTAT_FLTVNPAGES(36);
4433 4412 if (szcmtx != NULL) {
4434 4413 mutex_exit(szcmtx);
4435 4414 }
4436 4415 goto chkszc;
4437 4416 }
4438 4417 if (pplist != NULL) {
4439 4418 /*
4440 4419 * page got promoted since last check.
4441 4420 * we don't need preaalocated large
4442 4421 * page.
4443 4422 */
4444 4423 SEGVN_VMSTAT_FLTVNPAGES(37);
4445 4424 page_free_replacement_page(pplist);
4446 4425 page_create_putback(pages);
4447 4426 }
4448 4427 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4449 4428 prot, vpprot);
4450 4429 hat_memload_array_region(hat, a, pgsz, ppa,
4451 4430 prot & vpprot, hat_flag, svd->rcookie);
4452 4431 mutex_exit(szcmtx);
4453 4432 if (!(hat_flag & HAT_LOAD_LOCK)) {
4454 4433 for (i = 0; i < pages; i++) {
4455 4434 page_unlock(ppa[i]);
4456 4435 }
4457 4436 }
4458 4437 if (amp != NULL) {
4459 4438 anon_array_exit(&an_cookie);
4460 4439 ANON_LOCK_EXIT(&->a_rwlock);
4461 4440 }
4462 4441 goto next;
4463 4442 }
4464 4443
4465 4444 /*
4466 4445 * if page got demoted since last check
4467 4446 * we could have not allocated larger page.
4468 4447 * allocate now.
4469 4448 */
4470 4449 if (pplist == NULL &&
4471 4450 page_alloc_pages(vp, seg, a, &pplist, NULL,
4472 4451 szc, 0, 0) && type != F_SOFTLOCK) {
4473 4452 SEGVN_VMSTAT_FLTVNPAGES(38);
4474 4453 for (i = 0; i < pages; i++) {
4475 4454 page_unlock(ppa[i]);
4476 4455 }
4477 4456 if (amp != NULL) {
4478 4457 anon_array_exit(&an_cookie);
4479 4458 ANON_LOCK_EXIT(&->a_rwlock);
4480 4459 }
4481 4460 ierr = -1;
4482 4461 alloc_failed |= (1 << szc);
4483 4462 break;
4484 4463 }
4485 4464
4486 4465 SEGVN_VMSTAT_FLTVNPAGES(39);
4487 4466
4488 4467 if (pplist != NULL) {
4489 4468 segvn_relocate_pages(ppa, pplist);
4490 4469 #ifdef DEBUG
4491 4470 } else {
4492 4471 ASSERT(type == F_SOFTLOCK);
4493 4472 SEGVN_VMSTAT_FLTVNPAGES(40);
4494 4473 #endif /* DEBUG */
4495 4474 }
4496 4475
4497 4476 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4498 4477
4499 4478 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4500 4479 ASSERT(type == F_SOFTLOCK);
4501 4480 for (i = 0; i < pages; i++) {
4502 4481 ASSERT(ppa[i]->p_szc < szc);
4503 4482 hat_memload_region(hat,
4504 4483 a + (i << PAGESHIFT),
4505 4484 ppa[i], prot & vpprot, hat_flag,
4506 4485 svd->rcookie);
4507 4486 }
4508 4487 } else {
4509 4488 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4510 4489 hat_memload_array_region(hat, a, pgsz, ppa,
4511 4490 prot & vpprot, hat_flag, svd->rcookie);
4512 4491 }
4513 4492 if (!(hat_flag & HAT_LOAD_LOCK)) {
4514 4493 for (i = 0; i < pages; i++) {
4515 4494 ASSERT(PAGE_SHARED(ppa[i]));
4516 4495 page_unlock(ppa[i]);
4517 4496 }
4518 4497 }
4519 4498 if (amp != NULL) {
4520 4499 anon_array_exit(&an_cookie);
4521 4500 ANON_LOCK_EXIT(&->a_rwlock);
4522 4501 }
4523 4502
4524 4503 next:
4525 4504 if (vpage != NULL) {
4526 4505 vpage += pages;
4527 4506 }
4528 4507 adjszc_chk = 1;
4529 4508 }
4530 4509 if (a == lpgeaddr)
4531 4510 break;
4532 4511 ASSERT(a < lpgeaddr);
4533 4512
4534 4513 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4535 4514
4536 4515 /*
4537 4516 * ierr == -1 means we failed to map with a large page.
4538 4517 * (either due to allocation/relocation failures or
4539 4518 * misalignment with other mappings to this file.
4540 4519 *
4541 4520 * ierr == -2 means some other thread allocated a large page
4542 4521 * after we gave up tp map with a large page. retry with
4543 4522 * larger mapping.
4544 4523 */
4545 4524 ASSERT(ierr == -1 || ierr == -2);
4546 4525 ASSERT(ierr == -2 || szc != 0);
4547 4526 ASSERT(ierr == -1 || szc < seg->s_szc);
4548 4527 if (ierr == -2) {
4549 4528 SEGVN_VMSTAT_FLTVNPAGES(41);
4550 4529 ASSERT(pszc > szc && pszc <= seg->s_szc);
4551 4530 szc = pszc;
4552 4531 } else if (segvn_anypgsz_vnode) {
4553 4532 SEGVN_VMSTAT_FLTVNPAGES(42);
4554 4533 szc--;
4555 4534 } else {
4556 4535 SEGVN_VMSTAT_FLTVNPAGES(43);
4557 4536 ASSERT(pszc < szc);
4558 4537 /*
4559 4538 * other process created pszc large page.
4560 4539 * but we still have to drop to 0 szc.
4561 4540 */
4562 4541 szc = 0;
4563 4542 }
4564 4543
4565 4544 pgsz = page_get_pagesize(szc);
4566 4545 pages = btop(pgsz);
4567 4546 if (ierr == -2) {
4568 4547 /*
4569 4548 * Size up case. Note lpgaddr may only be needed for
4570 4549 * softlock case so we don't adjust it here.
4571 4550 */
4572 4551 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4573 4552 ASSERT(a >= lpgaddr);
4574 4553 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4575 4554 off = svd->offset + (uintptr_t)(a - seg->s_base);
4576 4555 aindx = svd->anon_index + seg_page(seg, a);
4577 4556 vpage = (svd->vpage != NULL) ?
4578 4557 &svd->vpage[seg_page(seg, a)] : NULL;
4579 4558 } else {
4580 4559 /*
4581 4560 * Size down case. Note lpgaddr may only be needed for
4582 4561 * softlock case so we don't adjust it here.
4583 4562 */
4584 4563 ASSERT(IS_P2ALIGNED(a, pgsz));
4585 4564 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4586 4565 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4587 4566 ASSERT(a < lpgeaddr);
4588 4567 if (a < addr) {
4589 4568 SEGVN_VMSTAT_FLTVNPAGES(44);
4590 4569 /*
4591 4570 * The beginning of the large page region can
4592 4571 * be pulled to the right to make a smaller
4593 4572 * region. We haven't yet faulted a single
4594 4573 * page.
4595 4574 */
4596 4575 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4597 4576 ASSERT(a >= lpgaddr);
4598 4577 off = svd->offset +
4599 4578 (uintptr_t)(a - seg->s_base);
4600 4579 aindx = svd->anon_index + seg_page(seg, a);
4601 4580 vpage = (svd->vpage != NULL) ?
4602 4581 &svd->vpage[seg_page(seg, a)] : NULL;
4603 4582 }
4604 4583 }
4605 4584 }
4606 4585 out:
4607 4586 kmem_free(ppa, ppasize);
4608 4587 if (!err && !vop_size_err) {
4609 4588 SEGVN_VMSTAT_FLTVNPAGES(45);
4610 4589 return (0);
4611 4590 }
4612 4591 if (type == F_SOFTLOCK && a > lpgaddr) {
4613 4592 SEGVN_VMSTAT_FLTVNPAGES(46);
4614 4593 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4615 4594 }
4616 4595 if (!vop_size_err) {
4617 4596 SEGVN_VMSTAT_FLTVNPAGES(47);
4618 4597 return (err);
4619 4598 }
4620 4599 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4621 4600 /*
4622 4601 * Large page end is mapped beyond the end of file and it's a cow
4623 4602 * fault (can be a text replication induced cow) or softlock so we can't
4624 4603 * reduce the map area. For now just demote the segment. This should
4625 4604 * really only happen if the end of the file changed after the mapping
4626 4605 * was established since when large page segments are created we make
4627 4606 * sure they don't extend beyond the end of the file.
4628 4607 */
4629 4608 SEGVN_VMSTAT_FLTVNPAGES(48);
4630 4609
4631 4610 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4632 4611 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4633 4612 err = 0;
4634 4613 if (seg->s_szc != 0) {
4635 4614 segvn_fltvnpages_clrszc_cnt++;
4636 4615 ASSERT(svd->softlockcnt == 0);
4637 4616 err = segvn_clrszc(seg);
4638 4617 if (err != 0) {
4639 4618 segvn_fltvnpages_clrszc_err++;
4640 4619 }
4641 4620 }
4642 4621 ASSERT(err || seg->s_szc == 0);
4643 4622 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4644 4623 /* segvn_fault will do its job as if szc had been zero to begin with */
4645 4624 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4646 4625 }
4647 4626
4648 4627 /*
4649 4628 * This routine will attempt to fault in one large page.
4650 4629 * it will use smaller pages if that fails.
4651 4630 * It should only be called for pure anonymous segments.
4652 4631 */
4653 4632 static faultcode_t
4654 4633 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4655 4634 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4656 4635 caddr_t eaddr, int brkcow)
4657 4636 {
4658 4637 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4659 4638 struct anon_map *amp = svd->amp;
4660 4639 uchar_t segtype = svd->type;
4661 4640 uint_t szc = seg->s_szc;
4662 4641 size_t pgsz = page_get_pagesize(szc);
4663 4642 size_t maxpgsz = pgsz;
4664 4643 pgcnt_t pages = btop(pgsz);
4665 4644 uint_t ppaszc = szc;
4666 4645 caddr_t a = lpgaddr;
4667 4646 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4668 4647 struct vpage *vpage = (svd->vpage != NULL) ?
4669 4648 &svd->vpage[seg_page(seg, a)] : NULL;
4670 4649 page_t **ppa;
4671 4650 uint_t ppa_szc;
4672 4651 faultcode_t err;
4673 4652 int ierr;
4674 4653 uint_t protchk, prot, vpprot;
4675 4654 ulong_t i;
4676 4655 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4677 4656 anon_sync_obj_t cookie;
4678 4657 int adjszc_chk;
4679 4658 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4680 4659
4681 4660 ASSERT(szc != 0);
4682 4661 ASSERT(amp != NULL);
4683 4662 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4684 4663 ASSERT(!(svd->flags & MAP_NORESERVE));
4685 4664 ASSERT(type != F_SOFTUNLOCK);
4686 4665 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4687 4666 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4688 4667 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4689 4668
4690 4669 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4691 4670
4692 4671 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4693 4672 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4694 4673
4695 4674 if (svd->flags & MAP_TEXT) {
4696 4675 hat_flag |= HAT_LOAD_TEXT;
4697 4676 }
4698 4677
4699 4678 if (svd->pageprot) {
4700 4679 switch (rw) {
4701 4680 case S_READ:
4702 4681 protchk = PROT_READ;
4703 4682 break;
4704 4683 case S_WRITE:
4705 4684 protchk = PROT_WRITE;
4706 4685 break;
4707 4686 case S_EXEC:
4708 4687 protchk = PROT_EXEC;
4709 4688 break;
4710 4689 case S_OTHER:
4711 4690 default:
4712 4691 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4713 4692 break;
4714 4693 }
4715 4694 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4716 4695 } else {
4717 4696 prot = svd->prot;
4718 4697 /* caller has already done segment level protection check. */
4719 4698 }
4720 4699
4721 4700 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4722 4701 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4723 4702 for (;;) {
4724 4703 adjszc_chk = 0;
4725 4704 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4726 4705 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4727 4706 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4728 4707 ASSERT(vpage != NULL);
4729 4708 prot = VPP_PROT(vpage);
4730 4709 ASSERT(sameprot(seg, a, maxpgsz));
4731 4710 if ((prot & protchk) == 0) {
4732 4711 err = FC_PROT;
4733 4712 goto error;
4734 4713 }
4735 4714 }
4736 4715 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4737 4716 pgsz < maxpgsz) {
4738 4717 ASSERT(a > lpgaddr);
4739 4718 szc = seg->s_szc;
4740 4719 pgsz = maxpgsz;
4741 4720 pages = btop(pgsz);
4742 4721 ASSERT(IS_P2ALIGNED(aindx, pages));
4743 4722 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4744 4723 pgsz);
4745 4724 }
4746 4725 if (type == F_SOFTLOCK) {
4747 4726 atomic_add_long((ulong_t *)&svd->softlockcnt,
4748 4727 pages);
4749 4728 }
4750 4729 anon_array_enter(amp, aindx, &cookie);
4751 4730 ppa_szc = (uint_t)-1;
4752 4731 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4753 4732 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4754 4733 segvn_anypgsz, pgflags, svd->cred);
4755 4734 if (ierr != 0) {
4756 4735 anon_array_exit(&cookie);
4757 4736 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4758 4737 if (type == F_SOFTLOCK) {
4759 4738 atomic_add_long(
4760 4739 (ulong_t *)&svd->softlockcnt,
4761 4740 -pages);
4762 4741 }
4763 4742 if (ierr > 0) {
4764 4743 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4765 4744 err = FC_MAKE_ERR(ierr);
4766 4745 goto error;
4767 4746 }
4768 4747 break;
4769 4748 }
4770 4749
4771 4750 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4772 4751
4773 4752 ASSERT(segtype == MAP_SHARED ||
4774 4753 ppa[0]->p_szc <= szc);
4775 4754 ASSERT(segtype == MAP_PRIVATE ||
4776 4755 ppa[0]->p_szc >= szc);
4777 4756
4778 4757 /*
4779 4758 * Handle pages that have been marked for migration
4780 4759 */
4781 4760 if (lgrp_optimizations())
4782 4761 page_migrate(seg, a, ppa, pages);
4783 4762
4784 4763 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4785 4764
4786 4765 if (segtype == MAP_SHARED) {
4787 4766 vpprot |= PROT_WRITE;
4788 4767 }
4789 4768
4790 4769 hat_memload_array(hat, a, pgsz, ppa,
4791 4770 prot & vpprot, hat_flag);
4792 4771
4793 4772 if (hat_flag & HAT_LOAD_LOCK) {
4794 4773 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4795 4774 } else {
4796 4775 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4797 4776 for (i = 0; i < pages; i++)
4798 4777 page_unlock(ppa[i]);
4799 4778 }
4800 4779 if (vpage != NULL)
4801 4780 vpage += pages;
4802 4781
4803 4782 anon_array_exit(&cookie);
4804 4783 adjszc_chk = 1;
4805 4784 }
4806 4785 if (a == lpgeaddr)
4807 4786 break;
4808 4787 ASSERT(a < lpgeaddr);
4809 4788 /*
4810 4789 * ierr == -1 means we failed to allocate a large page.
4811 4790 * so do a size down operation.
4812 4791 *
4813 4792 * ierr == -2 means some other process that privately shares
4814 4793 * pages with this process has allocated a larger page and we
4815 4794 * need to retry with larger pages. So do a size up
4816 4795 * operation. This relies on the fact that large pages are
4817 4796 * never partially shared i.e. if we share any constituent
4818 4797 * page of a large page with another process we must share the
4819 4798 * entire large page. Note this cannot happen for SOFTLOCK
4820 4799 * case, unless current address (a) is at the beginning of the
4821 4800 * next page size boundary because the other process couldn't
4822 4801 * have relocated locked pages.
4823 4802 */
4824 4803 ASSERT(ierr == -1 || ierr == -2);
4825 4804
4826 4805 if (segvn_anypgsz) {
4827 4806 ASSERT(ierr == -2 || szc != 0);
4828 4807 ASSERT(ierr == -1 || szc < seg->s_szc);
4829 4808 szc = (ierr == -1) ? szc - 1 : szc + 1;
4830 4809 } else {
4831 4810 /*
4832 4811 * For non COW faults and segvn_anypgsz == 0
4833 4812 * we need to be careful not to loop forever
4834 4813 * if existing page is found with szc other
4835 4814 * than 0 or seg->s_szc. This could be due
4836 4815 * to page relocations on behalf of DR or
4837 4816 * more likely large page creation. For this
4838 4817 * case simply re-size to existing page's szc
4839 4818 * if returned by anon_map_getpages().
4840 4819 */
4841 4820 if (ppa_szc == (uint_t)-1) {
4842 4821 szc = (ierr == -1) ? 0 : seg->s_szc;
4843 4822 } else {
4844 4823 ASSERT(ppa_szc <= seg->s_szc);
4845 4824 ASSERT(ierr == -2 || ppa_szc < szc);
4846 4825 ASSERT(ierr == -1 || ppa_szc > szc);
4847 4826 szc = ppa_szc;
4848 4827 }
4849 4828 }
4850 4829
4851 4830 pgsz = page_get_pagesize(szc);
4852 4831 pages = btop(pgsz);
4853 4832 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4854 4833 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4855 4834 if (type == F_SOFTLOCK) {
4856 4835 /*
4857 4836 * For softlocks we cannot reduce the fault area
4858 4837 * (calculated based on the largest page size for this
4859 4838 * segment) for size down and a is already next
4860 4839 * page size aligned as assertted above for size
4861 4840 * ups. Therefore just continue in case of softlock.
4862 4841 */
4863 4842 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4864 4843 continue; /* keep lint happy */
4865 4844 } else if (ierr == -2) {
4866 4845
4867 4846 /*
4868 4847 * Size up case. Note lpgaddr may only be needed for
4869 4848 * softlock case so we don't adjust it here.
4870 4849 */
4871 4850 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4872 4851 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4873 4852 ASSERT(a >= lpgaddr);
4874 4853 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4875 4854 aindx = svd->anon_index + seg_page(seg, a);
4876 4855 vpage = (svd->vpage != NULL) ?
4877 4856 &svd->vpage[seg_page(seg, a)] : NULL;
4878 4857 } else {
4879 4858 /*
4880 4859 * Size down case. Note lpgaddr may only be needed for
4881 4860 * softlock case so we don't adjust it here.
4882 4861 */
4883 4862 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4884 4863 ASSERT(IS_P2ALIGNED(a, pgsz));
4885 4864 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4886 4865 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4887 4866 ASSERT(a < lpgeaddr);
4888 4867 if (a < addr) {
4889 4868 /*
4890 4869 * The beginning of the large page region can
4891 4870 * be pulled to the right to make a smaller
4892 4871 * region. We haven't yet faulted a single
4893 4872 * page.
4894 4873 */
4895 4874 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4896 4875 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4897 4876 ASSERT(a >= lpgaddr);
4898 4877 aindx = svd->anon_index + seg_page(seg, a);
4899 4878 vpage = (svd->vpage != NULL) ?
4900 4879 &svd->vpage[seg_page(seg, a)] : NULL;
4901 4880 }
4902 4881 }
4903 4882 }
4904 4883 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4905 4884 ANON_LOCK_EXIT(&->a_rwlock);
4906 4885 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4907 4886 return (0);
4908 4887 error:
4909 4888 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4910 4889 ANON_LOCK_EXIT(&->a_rwlock);
4911 4890 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4912 4891 if (type == F_SOFTLOCK && a > lpgaddr) {
4913 4892 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4914 4893 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4915 4894 }
4916 4895 return (err);
4917 4896 }
4918 4897
4919 4898 int fltadvice = 1; /* set to free behind pages for sequential access */
4920 4899
4921 4900 /*
4922 4901 * This routine is called via a machine specific fault handling routine.
4923 4902 * It is also called by software routines wishing to lock or unlock
4924 4903 * a range of addresses.
4925 4904 *
4926 4905 * Here is the basic algorithm:
4927 4906 * If unlocking
4928 4907 * Call segvn_softunlock
4929 4908 * Return
4930 4909 * endif
4931 4910 * Checking and set up work
4932 4911 * If we will need some non-anonymous pages
4933 4912 * Call VOP_GETPAGE over the range of non-anonymous pages
4934 4913 * endif
4935 4914 * Loop over all addresses requested
4936 4915 * Call segvn_faultpage passing in page list
4937 4916 * to load up translations and handle anonymous pages
4938 4917 * endloop
4939 4918 * Load up translation to any additional pages in page list not
4940 4919 * already handled that fit into this segment
4941 4920 */
4942 4921 static faultcode_t
4943 4922 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4944 4923 enum fault_type type, enum seg_rw rw)
4945 4924 {
4946 4925 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4947 4926 page_t **plp, **ppp, *pp;
4948 4927 u_offset_t off;
4949 4928 caddr_t a;
4950 4929 struct vpage *vpage;
4951 4930 uint_t vpprot, prot;
4952 4931 int err;
4953 4932 page_t *pl[PVN_GETPAGE_NUM + 1];
4954 4933 size_t plsz, pl_alloc_sz;
4955 4934 size_t page;
4956 4935 ulong_t anon_index;
4957 4936 struct anon_map *amp;
4958 4937 int dogetpage = 0;
4959 4938 caddr_t lpgaddr, lpgeaddr;
4960 4939 size_t pgsz;
4961 4940 anon_sync_obj_t cookie;
4962 4941 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4963 4942
4964 4943 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4965 4944 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4966 4945
4967 4946 /*
4968 4947 * First handle the easy stuff
4969 4948 */
4970 4949 if (type == F_SOFTUNLOCK) {
4971 4950 if (rw == S_READ_NOCOW) {
4972 4951 rw = S_READ;
4973 4952 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4974 4953 }
4975 4954 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4976 4955 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4977 4956 page_get_pagesize(seg->s_szc);
4978 4957 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4979 4958 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4980 4959 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4981 4960 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4982 4961 return (0);
4983 4962 }
4984 4963
4985 4964 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4986 4965 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4987 4966 if (brkcow == 0) {
4988 4967 if (svd->tr_state == SEGVN_TR_INIT) {
4989 4968 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4990 4969 if (svd->tr_state == SEGVN_TR_INIT) {
4991 4970 ASSERT(svd->vp != NULL && svd->amp == NULL);
4992 4971 ASSERT(svd->flags & MAP_TEXT);
4993 4972 ASSERT(svd->type == MAP_PRIVATE);
4994 4973 segvn_textrepl(seg);
4995 4974 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4996 4975 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4997 4976 svd->amp != NULL);
4998 4977 }
4999 4978 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5000 4979 }
5001 4980 } else if (svd->tr_state != SEGVN_TR_OFF) {
5002 4981 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5003 4982
5004 4983 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
5005 4984 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
5006 4985 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5007 4986 return (FC_PROT);
5008 4987 }
5009 4988
5010 4989 if (svd->tr_state == SEGVN_TR_ON) {
5011 4990 ASSERT(svd->vp != NULL && svd->amp != NULL);
5012 4991 segvn_textunrepl(seg, 0);
5013 4992 ASSERT(svd->amp == NULL &&
5014 4993 svd->tr_state == SEGVN_TR_OFF);
5015 4994 } else if (svd->tr_state != SEGVN_TR_OFF) {
5016 4995 svd->tr_state = SEGVN_TR_OFF;
5017 4996 }
5018 4997 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5019 4998 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5020 4999 }
5021 5000
5022 5001 top:
5023 5002 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5024 5003
5025 5004 /*
5026 5005 * If we have the same protections for the entire segment,
5027 5006 * insure that the access being attempted is legitimate.
5028 5007 */
5029 5008
5030 5009 if (svd->pageprot == 0) {
5031 5010 uint_t protchk;
5032 5011
5033 5012 switch (rw) {
5034 5013 case S_READ:
5035 5014 case S_READ_NOCOW:
5036 5015 protchk = PROT_READ;
5037 5016 break;
5038 5017 case S_WRITE:
5039 5018 protchk = PROT_WRITE;
5040 5019 break;
5041 5020 case S_EXEC:
5042 5021 protchk = PROT_EXEC;
5043 5022 break;
5044 5023 case S_OTHER:
5045 5024 default:
5046 5025 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
5047 5026 break;
5048 5027 }
5049 5028
5050 5029 if ((svd->prot & protchk) == 0) {
5051 5030 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5052 5031 return (FC_PROT); /* illegal access type */
5053 5032 }
5054 5033 }
5055 5034
5056 5035 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5057 5036 /* this must be SOFTLOCK S_READ fault */
5058 5037 ASSERT(svd->amp == NULL);
5059 5038 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5060 5039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5061 5040 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5062 5041 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5063 5042 /*
5064 5043 * this must be the first ever non S_READ_NOCOW
5065 5044 * softlock for this segment.
5066 5045 */
5067 5046 ASSERT(svd->softlockcnt == 0);
5068 5047 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5069 5048 HAT_REGION_TEXT);
5070 5049 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5071 5050 }
5072 5051 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5073 5052 goto top;
5074 5053 }
5075 5054
5076 5055 /*
5077 5056 * We can't allow the long term use of softlocks for vmpss segments,
5078 5057 * because in some file truncation cases we should be able to demote
5079 5058 * the segment, which requires that there are no softlocks. The
5080 5059 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5081 5060 * segment is S_READ_NOCOW, where the caller holds the address space
5082 5061 * locked as writer and calls softunlock before dropping the as lock.
5083 5062 * S_READ_NOCOW is used by /proc to read memory from another user.
5084 5063 *
5085 5064 * Another deadlock between SOFTLOCK and file truncation can happen
5086 5065 * because segvn_fault_vnodepages() calls the FS one pagesize at
5087 5066 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5088 5067 * can cause a deadlock because the first set of page_t's remain
5089 5068 * locked SE_SHARED. To avoid this, we demote segments on a first
5090 5069 * SOFTLOCK if they have a length greater than the segment's
5091 5070 * page size.
5092 5071 *
5093 5072 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5094 5073 * the access type is S_READ_NOCOW and the fault length is less than
5095 5074 * or equal to the segment's page size. While this is quite restrictive,
5096 5075 * it should be the most common case of SOFTLOCK against a vmpss
5097 5076 * segment.
5098 5077 *
5099 5078 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5100 5079 * caller makes sure no COW will be caused by another thread for a
5101 5080 * softlocked page.
5102 5081 */
5103 5082 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5104 5083 int demote = 0;
5105 5084
5106 5085 if (rw != S_READ_NOCOW) {
5107 5086 demote = 1;
5108 5087 }
5109 5088 if (!demote && len > PAGESIZE) {
5110 5089 pgsz = page_get_pagesize(seg->s_szc);
5111 5090 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5112 5091 lpgeaddr);
5113 5092 if (lpgeaddr - lpgaddr > pgsz) {
5114 5093 demote = 1;
5115 5094 }
5116 5095 }
5117 5096
5118 5097 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5119 5098
5120 5099 if (demote) {
5121 5100 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5122 5101 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5123 5102 if (seg->s_szc != 0) {
5124 5103 segvn_vmpss_clrszc_cnt++;
5125 5104 ASSERT(svd->softlockcnt == 0);
5126 5105 err = segvn_clrszc(seg);
5127 5106 if (err) {
5128 5107 segvn_vmpss_clrszc_err++;
5129 5108 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5130 5109 return (FC_MAKE_ERR(err));
5131 5110 }
5132 5111 }
5133 5112 ASSERT(seg->s_szc == 0);
5134 5113 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5135 5114 goto top;
5136 5115 }
5137 5116 }
5138 5117
5139 5118 /*
5140 5119 * Check to see if we need to allocate an anon_map structure.
5141 5120 */
5142 5121 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5143 5122 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5144 5123 /*
5145 5124 * Drop the "read" lock on the segment and acquire
5146 5125 * the "write" version since we have to allocate the
5147 5126 * anon_map.
5148 5127 */
5149 5128 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5150 5129 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5151 5130
5152 5131 if (svd->amp == NULL) {
5153 5132 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5154 5133 svd->amp->a_szc = seg->s_szc;
5155 5134 }
5156 5135 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5157 5136
5158 5137 /*
5159 5138 * Start all over again since segment protections
5160 5139 * may have changed after we dropped the "read" lock.
5161 5140 */
5162 5141 goto top;
5163 5142 }
5164 5143
5165 5144 /*
5166 5145 * S_READ_NOCOW vs S_READ distinction was
5167 5146 * only needed for the code above. After
5168 5147 * that we treat it as S_READ.
5169 5148 */
5170 5149 if (rw == S_READ_NOCOW) {
5171 5150 ASSERT(type == F_SOFTLOCK);
5172 5151 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5173 5152 rw = S_READ;
5174 5153 }
5175 5154
5176 5155 amp = svd->amp;
5177 5156
5178 5157 /*
5179 5158 * MADV_SEQUENTIAL work is ignored for large page segments.
5180 5159 */
5181 5160 if (seg->s_szc != 0) {
5182 5161 pgsz = page_get_pagesize(seg->s_szc);
5183 5162 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5184 5163 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5185 5164 if (svd->vp == NULL) {
5186 5165 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5187 5166 lpgeaddr, type, rw, addr, addr + len, brkcow);
5188 5167 } else {
5189 5168 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5190 5169 lpgeaddr, type, rw, addr, addr + len, brkcow);
5191 5170 if (err == IE_RETRY) {
5192 5171 ASSERT(seg->s_szc == 0);
5193 5172 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5194 5173 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5195 5174 goto top;
5196 5175 }
5197 5176 }
5198 5177 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5199 5178 return (err);
5200 5179 }
5201 5180
5202 5181 page = seg_page(seg, addr);
5203 5182 if (amp != NULL) {
5204 5183 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5205 5184 anon_index = svd->anon_index + page;
5206 5185
5207 5186 if (type == F_PROT && rw == S_READ &&
5208 5187 svd->tr_state == SEGVN_TR_OFF &&
5209 5188 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5210 5189 size_t index = anon_index;
5211 5190 struct anon *ap;
5212 5191
5213 5192 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5214 5193 /*
5215 5194 * The fast path could apply to S_WRITE also, except
5216 5195 * that the protection fault could be caused by lazy
5217 5196 * tlb flush when ro->rw. In this case, the pte is
5218 5197 * RW already. But RO in the other cpu's tlb causes
5219 5198 * the fault. Since hat_chgprot won't do anything if
5220 5199 * pte doesn't change, we may end up faulting
5221 5200 * indefinitely until the RO tlb entry gets replaced.
5222 5201 */
5223 5202 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5224 5203 anon_array_enter(amp, index, &cookie);
5225 5204 ap = anon_get_ptr(amp->ahp, index);
5226 5205 anon_array_exit(&cookie);
5227 5206 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5228 5207 ANON_LOCK_EXIT(&->a_rwlock);
5229 5208 goto slow;
5230 5209 }
5231 5210 }
5232 5211 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5233 5212 ANON_LOCK_EXIT(&->a_rwlock);
5234 5213 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5235 5214 return (0);
5236 5215 }
5237 5216 }
5238 5217 slow:
5239 5218
5240 5219 if (svd->vpage == NULL)
5241 5220 vpage = NULL;
5242 5221 else
5243 5222 vpage = &svd->vpage[page];
5244 5223
5245 5224 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5246 5225
5247 5226 /*
5248 5227 * If MADV_SEQUENTIAL has been set for the particular page we
5249 5228 * are faulting on, free behind all pages in the segment and put
5250 5229 * them on the free list.
5251 5230 */
5252 5231
5253 5232 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5254 5233 struct vpage *vpp;
5255 5234 ulong_t fanon_index;
5256 5235 size_t fpage;
5257 5236 u_offset_t pgoff, fpgoff;
5258 5237 struct vnode *fvp;
5259 5238 struct anon *fap = NULL;
5260 5239
5261 5240 if (svd->advice == MADV_SEQUENTIAL ||
5262 5241 (svd->pageadvice &&
5263 5242 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5264 5243 pgoff = off - PAGESIZE;
5265 5244 fpage = page - 1;
5266 5245 if (vpage != NULL)
5267 5246 vpp = &svd->vpage[fpage];
5268 5247 if (amp != NULL)
5269 5248 fanon_index = svd->anon_index + fpage;
5270 5249
5271 5250 while (pgoff > svd->offset) {
5272 5251 if (svd->advice != MADV_SEQUENTIAL &&
5273 5252 (!svd->pageadvice || (vpage &&
5274 5253 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5275 5254 break;
5276 5255
5277 5256 /*
5278 5257 * If this is an anon page, we must find the
5279 5258 * correct <vp, offset> for it
5280 5259 */
5281 5260 fap = NULL;
5282 5261 if (amp != NULL) {
5283 5262 ANON_LOCK_ENTER(&->a_rwlock,
5284 5263 RW_READER);
5285 5264 anon_array_enter(amp, fanon_index,
5286 5265 &cookie);
5287 5266 fap = anon_get_ptr(amp->ahp,
5288 5267 fanon_index);
5289 5268 if (fap != NULL) {
5290 5269 swap_xlate(fap, &fvp, &fpgoff);
5291 5270 } else {
5292 5271 fpgoff = pgoff;
5293 5272 fvp = svd->vp;
5294 5273 }
5295 5274 anon_array_exit(&cookie);
5296 5275 ANON_LOCK_EXIT(&->a_rwlock);
5297 5276 } else {
5298 5277 fpgoff = pgoff;
5299 5278 fvp = svd->vp;
5300 5279 }
5301 5280 if (fvp == NULL)
5302 5281 break; /* XXX */
5303 5282 /*
5304 5283 * Skip pages that are free or have an
5305 5284 * "exclusive" lock.
5306 5285 */
5307 5286 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5308 5287 if (pp == NULL)
5309 5288 break;
5310 5289 /*
5311 5290 * We don't need the page_struct_lock to test
5312 5291 * as this is only advisory; even if we
5313 5292 * acquire it someone might race in and lock
5314 5293 * the page after we unlock and before the
5315 5294 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5316 5295 */
5317 5296 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5318 5297 /*
5319 5298 * Hold the vnode before releasing
5320 5299 * the page lock to prevent it from
5321 5300 * being freed and re-used by some
5322 5301 * other thread.
5323 5302 */
5324 5303 VN_HOLD(fvp);
5325 5304 page_unlock(pp);
5326 5305 /*
5327 5306 * We should build a page list
5328 5307 * to kluster putpages XXX
5329 5308 */
5330 5309 (void) VOP_PUTPAGE(fvp,
5331 5310 (offset_t)fpgoff, PAGESIZE,
5332 5311 (B_DONTNEED|B_FREE|B_ASYNC),
5333 5312 svd->cred, NULL);
5334 5313 VN_RELE(fvp);
5335 5314 } else {
5336 5315 /*
5337 5316 * XXX - Should the loop terminate if
5338 5317 * the page is `locked'?
5339 5318 */
5340 5319 page_unlock(pp);
5341 5320 }
5342 5321 --vpp;
5343 5322 --fanon_index;
5344 5323 pgoff -= PAGESIZE;
5345 5324 }
5346 5325 }
5347 5326 }
5348 5327
5349 5328 plp = pl;
5350 5329 *plp = NULL;
5351 5330 pl_alloc_sz = 0;
5352 5331
5353 5332 /*
5354 5333 * See if we need to call VOP_GETPAGE for
5355 5334 * *any* of the range being faulted on.
5356 5335 * We can skip all of this work if there
5357 5336 * was no original vnode.
5358 5337 */
5359 5338 if (svd->vp != NULL) {
5360 5339 u_offset_t vp_off;
5361 5340 size_t vp_len;
5362 5341 struct anon *ap;
5363 5342 vnode_t *vp;
5364 5343
5365 5344 vp_off = off;
5366 5345 vp_len = len;
5367 5346
5368 5347 if (amp == NULL)
5369 5348 dogetpage = 1;
5370 5349 else {
5371 5350 /*
5372 5351 * Only acquire reader lock to prevent amp->ahp
5373 5352 * from being changed. It's ok to miss pages,
5374 5353 * hence we don't do anon_array_enter
5375 5354 */
5376 5355 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5377 5356 ap = anon_get_ptr(amp->ahp, anon_index);
5378 5357
5379 5358 if (len <= PAGESIZE)
5380 5359 /* inline non_anon() */
5381 5360 dogetpage = (ap == NULL);
5382 5361 else
5383 5362 dogetpage = non_anon(amp->ahp, anon_index,
5384 5363 &vp_off, &vp_len);
5385 5364 ANON_LOCK_EXIT(&->a_rwlock);
5386 5365 }
5387 5366
5388 5367 if (dogetpage) {
5389 5368 enum seg_rw arw;
5390 5369 struct as *as = seg->s_as;
5391 5370
5392 5371 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5393 5372 /*
5394 5373 * Page list won't fit in local array,
5395 5374 * allocate one of the needed size.
5396 5375 */
5397 5376 pl_alloc_sz =
5398 5377 (btop(len) + 1) * sizeof (page_t *);
5399 5378 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5400 5379 plp[0] = NULL;
5401 5380 plsz = len;
5402 5381 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5403 5382 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5404 5383 (((size_t)(addr + PAGESIZE) <
5405 5384 (size_t)(seg->s_base + seg->s_size)) &&
5406 5385 hat_probe(as->a_hat, addr + PAGESIZE))) {
5407 5386 /*
5408 5387 * Ask VOP_GETPAGE to return the exact number
5409 5388 * of pages if
5410 5389 * (a) this is a COW fault, or
5411 5390 * (b) this is a software fault, or
5412 5391 * (c) next page is already mapped.
5413 5392 */
5414 5393 plsz = len;
5415 5394 } else {
5416 5395 /*
5417 5396 * Ask VOP_GETPAGE to return adjacent pages
5418 5397 * within the segment.
5419 5398 */
5420 5399 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5421 5400 ((seg->s_base + seg->s_size) - addr));
5422 5401 ASSERT((addr + plsz) <=
5423 5402 (seg->s_base + seg->s_size));
5424 5403 }
5425 5404
5426 5405 /*
5427 5406 * Need to get some non-anonymous pages.
5428 5407 * We need to make only one call to GETPAGE to do
5429 5408 * this to prevent certain deadlocking conditions
5430 5409 * when we are doing locking. In this case
5431 5410 * non_anon() should have picked up the smallest
5432 5411 * range which includes all the non-anonymous
5433 5412 * pages in the requested range. We have to
5434 5413 * be careful regarding which rw flag to pass in
5435 5414 * because on a private mapping, the underlying
5436 5415 * object is never allowed to be written.
5437 5416 */
5438 5417 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5439 5418 arw = S_READ;
5440 5419 } else {
5441 5420 arw = rw;
5442 5421 }
5443 5422 vp = svd->vp;
5444 5423 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5445 5424 "segvn_getpage:seg %p addr %p vp %p",
5446 5425 seg, addr, vp);
5447 5426 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5448 5427 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5449 5428 svd->cred, NULL);
5450 5429 if (err) {
5451 5430 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5452 5431 segvn_pagelist_rele(plp);
5453 5432 if (pl_alloc_sz)
5454 5433 kmem_free(plp, pl_alloc_sz);
5455 5434 return (FC_MAKE_ERR(err));
5456 5435 }
5457 5436 if (svd->type == MAP_PRIVATE)
5458 5437 vpprot &= ~PROT_WRITE;
5459 5438 }
5460 5439 }
5461 5440
5462 5441 /*
5463 5442 * N.B. at this time the plp array has all the needed non-anon
5464 5443 * pages in addition to (possibly) having some adjacent pages.
5465 5444 */
5466 5445
5467 5446 /*
5468 5447 * Always acquire the anon_array_lock to prevent
5469 5448 * 2 threads from allocating separate anon slots for
5470 5449 * the same "addr".
5471 5450 *
5472 5451 * If this is a copy-on-write fault and we don't already
5473 5452 * have the anon_array_lock, acquire it to prevent the
5474 5453 * fault routine from handling multiple copy-on-write faults
5475 5454 * on the same "addr" in the same address space.
5476 5455 *
5477 5456 * Only one thread should deal with the fault since after
5478 5457 * it is handled, the other threads can acquire a translation
5479 5458 * to the newly created private page. This prevents two or
5480 5459 * more threads from creating different private pages for the
5481 5460 * same fault.
5482 5461 *
5483 5462 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5484 5463 * to prevent deadlock between this thread and another thread
5485 5464 * which has soft-locked this page and wants to acquire serial_lock.
5486 5465 * ( bug 4026339 )
5487 5466 *
5488 5467 * The fix for bug 4026339 becomes unnecessary when using the
5489 5468 * locking scheme with per amp rwlock and a global set of hash
5490 5469 * lock, anon_array_lock. If we steal a vnode page when low
5491 5470 * on memory and upgrad the page lock through page_rename,
5492 5471 * then the page is PAGE_HANDLED, nothing needs to be done
5493 5472 * for this page after returning from segvn_faultpage.
5494 5473 *
5495 5474 * But really, the page lock should be downgraded after
5496 5475 * the stolen page is page_rename'd.
5497 5476 */
5498 5477
5499 5478 if (amp != NULL)
5500 5479 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5501 5480
5502 5481 /*
5503 5482 * Ok, now loop over the address range and handle faults
5504 5483 */
5505 5484 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5506 5485 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5507 5486 type, rw, brkcow);
5508 5487 if (err) {
5509 5488 if (amp != NULL)
5510 5489 ANON_LOCK_EXIT(&->a_rwlock);
5511 5490 if (type == F_SOFTLOCK && a > addr) {
5512 5491 segvn_softunlock(seg, addr, (a - addr),
5513 5492 S_OTHER);
5514 5493 }
5515 5494 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5516 5495 segvn_pagelist_rele(plp);
5517 5496 if (pl_alloc_sz)
5518 5497 kmem_free(plp, pl_alloc_sz);
5519 5498 return (err);
5520 5499 }
5521 5500 if (vpage) {
5522 5501 vpage++;
5523 5502 } else if (svd->vpage) {
5524 5503 page = seg_page(seg, addr);
5525 5504 vpage = &svd->vpage[++page];
5526 5505 }
5527 5506 }
5528 5507
5529 5508 /* Didn't get pages from the underlying fs so we're done */
5530 5509 if (!dogetpage)
5531 5510 goto done;
5532 5511
5533 5512 /*
5534 5513 * Now handle any other pages in the list returned.
5535 5514 * If the page can be used, load up the translations now.
5536 5515 * Note that the for loop will only be entered if "plp"
5537 5516 * is pointing to a non-NULL page pointer which means that
5538 5517 * VOP_GETPAGE() was called and vpprot has been initialized.
5539 5518 */
5540 5519 if (svd->pageprot == 0)
5541 5520 prot = svd->prot & vpprot;
5542 5521
5543 5522
5544 5523 /*
5545 5524 * Large Files: diff should be unsigned value because we started
5546 5525 * supporting > 2GB segment sizes from 2.5.1 and when a
5547 5526 * large file of size > 2GB gets mapped to address space
5548 5527 * the diff value can be > 2GB.
5549 5528 */
5550 5529
5551 5530 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5552 5531 size_t diff;
5553 5532 struct anon *ap;
5554 5533 int anon_index;
5555 5534 anon_sync_obj_t cookie;
5556 5535 int hat_flag = HAT_LOAD_ADV;
5557 5536
5558 5537 if (svd->flags & MAP_TEXT) {
5559 5538 hat_flag |= HAT_LOAD_TEXT;
5560 5539 }
5561 5540
5562 5541 if (pp == PAGE_HANDLED)
5563 5542 continue;
5564 5543
5565 5544 if (svd->tr_state != SEGVN_TR_ON &&
5566 5545 pp->p_offset >= svd->offset &&
5567 5546 pp->p_offset < svd->offset + seg->s_size) {
5568 5547
5569 5548 diff = pp->p_offset - svd->offset;
5570 5549
5571 5550 /*
5572 5551 * Large Files: Following is the assertion
5573 5552 * validating the above cast.
5574 5553 */
5575 5554 ASSERT(svd->vp == pp->p_vnode);
5576 5555
5577 5556 page = btop(diff);
5578 5557 if (svd->pageprot)
5579 5558 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5580 5559
5581 5560 /*
5582 5561 * Prevent other threads in the address space from
5583 5562 * creating private pages (i.e., allocating anon slots)
5584 5563 * while we are in the process of loading translations
5585 5564 * to additional pages returned by the underlying
5586 5565 * object.
5587 5566 */
5588 5567 if (amp != NULL) {
5589 5568 anon_index = svd->anon_index + page;
5590 5569 anon_array_enter(amp, anon_index, &cookie);
5591 5570 ap = anon_get_ptr(amp->ahp, anon_index);
5592 5571 }
5593 5572 if ((amp == NULL) || (ap == NULL)) {
5594 5573 if (IS_VMODSORT(pp->p_vnode) ||
5595 5574 enable_mbit_wa) {
5596 5575 if (rw == S_WRITE)
5597 5576 hat_setmod(pp);
5598 5577 else if (rw != S_OTHER &&
5599 5578 !hat_ismod(pp))
5600 5579 prot &= ~PROT_WRITE;
5601 5580 }
5602 5581 /*
5603 5582 * Skip mapping read ahead pages marked
5604 5583 * for migration, so they will get migrated
5605 5584 * properly on fault
5606 5585 */
5607 5586 ASSERT(amp == NULL ||
5608 5587 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5609 5588 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5610 5589 hat_memload_region(hat,
5611 5590 seg->s_base + diff,
5612 5591 pp, prot, hat_flag,
5613 5592 svd->rcookie);
5614 5593 }
5615 5594 }
5616 5595 if (amp != NULL)
5617 5596 anon_array_exit(&cookie);
5618 5597 }
5619 5598 page_unlock(pp);
5620 5599 }
5621 5600 done:
5622 5601 if (amp != NULL)
5623 5602 ANON_LOCK_EXIT(&->a_rwlock);
5624 5603 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5625 5604 if (pl_alloc_sz)
5626 5605 kmem_free(plp, pl_alloc_sz);
5627 5606 return (0);
5628 5607 }
5629 5608
5630 5609 /*
5631 5610 * This routine is used to start I/O on pages asynchronously. XXX it will
5632 5611 * only create PAGESIZE pages. At fault time they will be relocated into
5633 5612 * larger pages.
5634 5613 */
5635 5614 static faultcode_t
5636 5615 segvn_faulta(struct seg *seg, caddr_t addr)
5637 5616 {
5638 5617 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5639 5618 int err;
5640 5619 struct anon_map *amp;
5641 5620 vnode_t *vp;
5642 5621
5643 5622 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5644 5623
5645 5624 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5646 5625 if ((amp = svd->amp) != NULL) {
5647 5626 struct anon *ap;
5648 5627
5649 5628 /*
5650 5629 * Reader lock to prevent amp->ahp from being changed.
5651 5630 * This is advisory, it's ok to miss a page, so
5652 5631 * we don't do anon_array_enter lock.
5653 5632 */
5654 5633 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5655 5634 if ((ap = anon_get_ptr(amp->ahp,
5656 5635 svd->anon_index + seg_page(seg, addr))) != NULL) {
5657 5636
5658 5637 err = anon_getpage(&ap, NULL, NULL,
5659 5638 0, seg, addr, S_READ, svd->cred);
5660 5639
5661 5640 ANON_LOCK_EXIT(&->a_rwlock);
5662 5641 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5663 5642 if (err)
5664 5643 return (FC_MAKE_ERR(err));
5665 5644 return (0);
5666 5645 }
5667 5646 ANON_LOCK_EXIT(&->a_rwlock);
5668 5647 }
5669 5648
5670 5649 if (svd->vp == NULL) {
5671 5650 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5672 5651 return (0); /* zfod page - do nothing now */
5673 5652 }
5674 5653
5675 5654 vp = svd->vp;
5676 5655 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5677 5656 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5678 5657 err = VOP_GETPAGE(vp,
5679 5658 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5680 5659 PAGESIZE, NULL, NULL, 0, seg, addr,
5681 5660 S_OTHER, svd->cred, NULL);
5682 5661
5683 5662 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5684 5663 if (err)
5685 5664 return (FC_MAKE_ERR(err));
5686 5665 return (0);
5687 5666 }
5688 5667
5689 5668 static int
5690 5669 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5691 5670 {
5692 5671 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5693 5672 struct vpage *cvp, *svp, *evp;
5694 5673 struct vnode *vp;
5695 5674 size_t pgsz;
5696 5675 pgcnt_t pgcnt;
5697 5676 anon_sync_obj_t cookie;
5698 5677 int unload_done = 0;
5699 5678
5700 5679 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5701 5680
5702 5681 if ((svd->maxprot & prot) != prot)
5703 5682 return (EACCES); /* violated maxprot */
5704 5683
5705 5684 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5706 5685
5707 5686 /* return if prot is the same */
5708 5687 if (!svd->pageprot && svd->prot == prot) {
5709 5688 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5710 5689 return (0);
5711 5690 }
5712 5691
5713 5692 /*
5714 5693 * Since we change protections we first have to flush the cache.
5715 5694 * This makes sure all the pagelock calls have to recheck
5716 5695 * protections.
5717 5696 */
5718 5697 if (svd->softlockcnt > 0) {
5719 5698 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5720 5699
5721 5700 /*
5722 5701 * If this is shared segment non 0 softlockcnt
5723 5702 * means locked pages are still in use.
5724 5703 */
5725 5704 if (svd->type == MAP_SHARED) {
5726 5705 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5727 5706 return (EAGAIN);
5728 5707 }
5729 5708
5730 5709 /*
5731 5710 * Since we do have the segvn writers lock nobody can fill
5732 5711 * the cache with entries belonging to this seg during
5733 5712 * the purge. The flush either succeeds or we still have
5734 5713 * pending I/Os.
5735 5714 */
5736 5715 segvn_purge(seg);
5737 5716 if (svd->softlockcnt > 0) {
5738 5717 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5739 5718 return (EAGAIN);
5740 5719 }
5741 5720 }
5742 5721
5743 5722 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5744 5723 ASSERT(svd->amp == NULL);
5745 5724 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5746 5725 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5747 5726 HAT_REGION_TEXT);
5748 5727 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5749 5728 unload_done = 1;
5750 5729 } else if (svd->tr_state == SEGVN_TR_INIT) {
5751 5730 svd->tr_state = SEGVN_TR_OFF;
5752 5731 } else if (svd->tr_state == SEGVN_TR_ON) {
5753 5732 ASSERT(svd->amp != NULL);
5754 5733 segvn_textunrepl(seg, 0);
5755 5734 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5756 5735 unload_done = 1;
5757 5736 }
5758 5737
5759 5738 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5760 5739 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5761 5740 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5762 5741 segvn_inval_trcache(svd->vp);
5763 5742 }
5764 5743 if (seg->s_szc != 0) {
5765 5744 int err;
5766 5745 pgsz = page_get_pagesize(seg->s_szc);
5767 5746 pgcnt = pgsz >> PAGESHIFT;
5768 5747 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5769 5748 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5770 5749 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5771 5750 ASSERT(seg->s_base != addr || seg->s_size != len);
5772 5751 /*
5773 5752 * If we are holding the as lock as a reader then
5774 5753 * we need to return IE_RETRY and let the as
5775 5754 * layer drop and re-acquire the lock as a writer.
5776 5755 */
5777 5756 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5778 5757 return (IE_RETRY);
5779 5758 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5780 5759 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5781 5760 err = segvn_demote_range(seg, addr, len,
5782 5761 SDR_END, 0);
5783 5762 } else {
5784 5763 uint_t szcvec = map_pgszcvec(seg->s_base,
5785 5764 pgsz, (uintptr_t)seg->s_base,
5786 5765 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5787 5766 err = segvn_demote_range(seg, addr, len,
5788 5767 SDR_END, szcvec);
5789 5768 }
5790 5769 if (err == 0)
5791 5770 return (IE_RETRY);
5792 5771 if (err == ENOMEM)
5793 5772 return (IE_NOMEM);
5794 5773 return (err);
5795 5774 }
5796 5775 }
5797 5776
5798 5777
5799 5778 /*
5800 5779 * If it's a private mapping and we're making it writable then we
5801 5780 * may have to reserve the additional swap space now. If we are
5802 5781 * making writable only a part of the segment then we use its vpage
5803 5782 * array to keep a record of the pages for which we have reserved
5804 5783 * swap. In this case we set the pageswap field in the segment's
5805 5784 * segvn structure to record this.
5806 5785 *
5807 5786 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5808 5787 * removing write permission on the entire segment and we haven't
5809 5788 * modified any pages, we can release the swap space.
5810 5789 */
5811 5790 if (svd->type == MAP_PRIVATE) {
5812 5791 if (prot & PROT_WRITE) {
5813 5792 if (!(svd->flags & MAP_NORESERVE) &&
5814 5793 !(svd->swresv && svd->pageswap == 0)) {
5815 5794 size_t sz = 0;
5816 5795
5817 5796 /*
5818 5797 * Start by determining how much swap
5819 5798 * space is required.
5820 5799 */
5821 5800 if (addr == seg->s_base &&
5822 5801 len == seg->s_size &&
5823 5802 svd->pageswap == 0) {
5824 5803 /* The whole segment */
5825 5804 sz = seg->s_size;
5826 5805 } else {
5827 5806 /*
5828 5807 * Make sure that the vpage array
5829 5808 * exists, and make a note of the
5830 5809 * range of elements corresponding
5831 5810 * to len.
5832 5811 */
5833 5812 segvn_vpage(seg);
5834 5813 if (svd->vpage == NULL) {
5835 5814 SEGVN_LOCK_EXIT(seg->s_as,
5836 5815 &svd->lock);
5837 5816 return (ENOMEM);
5838 5817 }
5839 5818 svp = &svd->vpage[seg_page(seg, addr)];
5840 5819 evp = &svd->vpage[seg_page(seg,
5841 5820 addr + len)];
5842 5821
5843 5822 if (svd->pageswap == 0) {
5844 5823 /*
5845 5824 * This is the first time we've
5846 5825 * asked for a part of this
5847 5826 * segment, so we need to
5848 5827 * reserve everything we've
5849 5828 * been asked for.
5850 5829 */
5851 5830 sz = len;
5852 5831 } else {
5853 5832 /*
5854 5833 * We have to count the number
5855 5834 * of pages required.
5856 5835 */
5857 5836 for (cvp = svp; cvp < evp;
5858 5837 cvp++) {
5859 5838 if (!VPP_ISSWAPRES(cvp))
5860 5839 sz++;
5861 5840 }
5862 5841 sz <<= PAGESHIFT;
5863 5842 }
5864 5843 }
5865 5844
5866 5845 /* Try to reserve the necessary swap. */
5867 5846 if (anon_resv_zone(sz,
5868 5847 seg->s_as->a_proc->p_zone) == 0) {
5869 5848 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5870 5849 return (IE_NOMEM);
5871 5850 }
5872 5851
5873 5852 /*
5874 5853 * Make a note of how much swap space
5875 5854 * we've reserved.
5876 5855 */
5877 5856 if (svd->pageswap == 0 && sz == seg->s_size) {
5878 5857 svd->swresv = sz;
5879 5858 } else {
5880 5859 ASSERT(svd->vpage != NULL);
5881 5860 svd->swresv += sz;
5882 5861 svd->pageswap = 1;
5883 5862 for (cvp = svp; cvp < evp; cvp++) {
5884 5863 if (!VPP_ISSWAPRES(cvp))
5885 5864 VPP_SETSWAPRES(cvp);
5886 5865 }
5887 5866 }
5888 5867 }
5889 5868 } else {
5890 5869 /*
5891 5870 * Swap space is released only if this segment
5892 5871 * does not map anonymous memory, since read faults
5893 5872 * on such segments still need an anon slot to read
5894 5873 * in the data.
5895 5874 */
5896 5875 if (svd->swresv != 0 && svd->vp != NULL &&
5897 5876 svd->amp == NULL && addr == seg->s_base &&
5898 5877 len == seg->s_size && svd->pageprot == 0) {
5899 5878 ASSERT(svd->pageswap == 0);
5900 5879 anon_unresv_zone(svd->swresv,
5901 5880 seg->s_as->a_proc->p_zone);
5902 5881 svd->swresv = 0;
5903 5882 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5904 5883 "anon proc:%p %lu %u", seg, 0, 0);
5905 5884 }
5906 5885 }
5907 5886 }
5908 5887
5909 5888 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5910 5889 if (svd->prot == prot) {
5911 5890 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5912 5891 return (0); /* all done */
5913 5892 }
5914 5893 svd->prot = (uchar_t)prot;
5915 5894 } else if (svd->type == MAP_PRIVATE) {
5916 5895 struct anon *ap = NULL;
5917 5896 page_t *pp;
5918 5897 u_offset_t offset, off;
5919 5898 struct anon_map *amp;
5920 5899 ulong_t anon_idx = 0;
5921 5900
5922 5901 /*
5923 5902 * A vpage structure exists or else the change does not
5924 5903 * involve the entire segment. Establish a vpage structure
5925 5904 * if none is there. Then, for each page in the range,
5926 5905 * adjust its individual permissions. Note that write-
5927 5906 * enabling a MAP_PRIVATE page can affect the claims for
5928 5907 * locked down memory. Overcommitting memory terminates
5929 5908 * the operation.
5930 5909 */
5931 5910 segvn_vpage(seg);
5932 5911 if (svd->vpage == NULL) {
5933 5912 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5934 5913 return (ENOMEM);
5935 5914 }
5936 5915 svd->pageprot = 1;
5937 5916 if ((amp = svd->amp) != NULL) {
5938 5917 anon_idx = svd->anon_index + seg_page(seg, addr);
5939 5918 ASSERT(seg->s_szc == 0 ||
5940 5919 IS_P2ALIGNED(anon_idx, pgcnt));
5941 5920 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5942 5921 }
5943 5922
5944 5923 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5945 5924 evp = &svd->vpage[seg_page(seg, addr + len)];
5946 5925
5947 5926 /*
5948 5927 * See Statement at the beginning of segvn_lockop regarding
5949 5928 * the way cowcnts and lckcnts are handled.
5950 5929 */
5951 5930 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5952 5931
5953 5932 if (seg->s_szc != 0) {
5954 5933 if (amp != NULL) {
5955 5934 anon_array_enter(amp, anon_idx,
5956 5935 &cookie);
5957 5936 }
5958 5937 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5959 5938 !segvn_claim_pages(seg, svp, offset,
5960 5939 anon_idx, prot)) {
5961 5940 if (amp != NULL) {
5962 5941 anon_array_exit(&cookie);
5963 5942 }
5964 5943 break;
5965 5944 }
5966 5945 if (amp != NULL) {
5967 5946 anon_array_exit(&cookie);
5968 5947 }
5969 5948 anon_idx++;
5970 5949 } else {
5971 5950 if (amp != NULL) {
5972 5951 anon_array_enter(amp, anon_idx,
5973 5952 &cookie);
5974 5953 ap = anon_get_ptr(amp->ahp, anon_idx++);
5975 5954 }
5976 5955
5977 5956 if (VPP_ISPPLOCK(svp) &&
5978 5957 VPP_PROT(svp) != prot) {
5979 5958
5980 5959 if (amp == NULL || ap == NULL) {
5981 5960 vp = svd->vp;
5982 5961 off = offset;
5983 5962 } else
5984 5963 swap_xlate(ap, &vp, &off);
5985 5964 if (amp != NULL)
5986 5965 anon_array_exit(&cookie);
5987 5966
5988 5967 if ((pp = page_lookup(vp, off,
5989 5968 SE_SHARED)) == NULL) {
5990 5969 panic("segvn_setprot: no page");
5991 5970 /*NOTREACHED*/
5992 5971 }
5993 5972 ASSERT(seg->s_szc == 0);
5994 5973 if ((VPP_PROT(svp) ^ prot) &
5995 5974 PROT_WRITE) {
5996 5975 if (prot & PROT_WRITE) {
5997 5976 if (!page_addclaim(
5998 5977 pp)) {
5999 5978 page_unlock(pp);
6000 5979 break;
6001 5980 }
6002 5981 } else {
6003 5982 if (!page_subclaim(
6004 5983 pp)) {
6005 5984 page_unlock(pp);
6006 5985 break;
6007 5986 }
6008 5987 }
6009 5988 }
6010 5989 page_unlock(pp);
6011 5990 } else if (amp != NULL)
6012 5991 anon_array_exit(&cookie);
6013 5992 }
6014 5993 VPP_SETPROT(svp, prot);
6015 5994 offset += PAGESIZE;
6016 5995 }
6017 5996 if (amp != NULL)
6018 5997 ANON_LOCK_EXIT(&->a_rwlock);
6019 5998
6020 5999 /*
6021 6000 * Did we terminate prematurely? If so, simply unload
6022 6001 * the translations to the things we've updated so far.
6023 6002 */
6024 6003 if (svp != evp) {
6025 6004 if (unload_done) {
6026 6005 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6027 6006 return (IE_NOMEM);
6028 6007 }
6029 6008 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6030 6009 PAGESIZE;
6031 6010 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
6032 6011 if (len != 0)
6033 6012 hat_unload(seg->s_as->a_hat, addr,
6034 6013 len, HAT_UNLOAD);
6035 6014 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6036 6015 return (IE_NOMEM);
6037 6016 }
6038 6017 } else {
6039 6018 segvn_vpage(seg);
6040 6019 if (svd->vpage == NULL) {
6041 6020 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6042 6021 return (ENOMEM);
6043 6022 }
6044 6023 svd->pageprot = 1;
6045 6024 evp = &svd->vpage[seg_page(seg, addr + len)];
6046 6025 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6047 6026 VPP_SETPROT(svp, prot);
6048 6027 }
6049 6028 }
6050 6029
6051 6030 if (unload_done) {
6052 6031 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6053 6032 return (0);
6054 6033 }
6055 6034
6056 6035 if (((prot & PROT_WRITE) != 0 &&
6057 6036 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6058 6037 (prot & ~PROT_USER) == PROT_NONE) {
6059 6038 /*
6060 6039 * Either private or shared data with write access (in
6061 6040 * which case we need to throw out all former translations
6062 6041 * so that we get the right translations set up on fault
6063 6042 * and we don't allow write access to any copy-on-write pages
6064 6043 * that might be around or to prevent write access to pages
6065 6044 * representing holes in a file), or we don't have permission
6066 6045 * to access the memory at all (in which case we have to
6067 6046 * unload any current translations that might exist).
6068 6047 */
6069 6048 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6070 6049 } else {
6071 6050 /*
6072 6051 * A shared mapping or a private mapping in which write
6073 6052 * protection is going to be denied - just change all the
6074 6053 * protections over the range of addresses in question.
6075 6054 * segvn does not support any other attributes other
6076 6055 * than prot so we can use hat_chgattr.
6077 6056 */
6078 6057 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6079 6058 }
6080 6059
6081 6060 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6082 6061
6083 6062 return (0);
6084 6063 }
6085 6064
6086 6065 /*
6087 6066 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6088 6067 * to determine if the seg is capable of mapping the requested szc.
6089 6068 */
6090 6069 static int
6091 6070 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6092 6071 {
6093 6072 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6094 6073 struct segvn_data *nsvd;
6095 6074 struct anon_map *amp = svd->amp;
6096 6075 struct seg *nseg;
6097 6076 caddr_t eaddr = addr + len, a;
6098 6077 size_t pgsz = page_get_pagesize(szc);
6099 6078 pgcnt_t pgcnt = page_get_pagecnt(szc);
6100 6079 int err;
6101 6080 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6102 6081
6103 6082 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6104 6083 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6105 6084
6106 6085 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6107 6086 return (0);
6108 6087 }
6109 6088
6110 6089 /*
6111 6090 * addr should always be pgsz aligned but eaddr may be misaligned if
6112 6091 * it's at the end of the segment.
6113 6092 *
6114 6093 * XXX we should assert this condition since as_setpagesize() logic
6115 6094 * guarantees it.
6116 6095 */
6117 6096 if (!IS_P2ALIGNED(addr, pgsz) ||
6118 6097 (!IS_P2ALIGNED(eaddr, pgsz) &&
6119 6098 eaddr != seg->s_base + seg->s_size)) {
6120 6099
6121 6100 segvn_setpgsz_align_err++;
6122 6101 return (EINVAL);
6123 6102 }
6124 6103
6125 6104 if (amp != NULL && svd->type == MAP_SHARED) {
6126 6105 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6127 6106 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6128 6107
6129 6108 segvn_setpgsz_anon_align_err++;
6130 6109 return (EINVAL);
6131 6110 }
6132 6111 }
6133 6112
6134 6113 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6135 6114 szc > segvn_maxpgszc) {
6136 6115 return (EINVAL);
6137 6116 }
6138 6117
6139 6118 /* paranoid check */
6140 6119 if (svd->vp != NULL &&
6141 6120 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6142 6121 return (EINVAL);
6143 6122 }
6144 6123
6145 6124 if (seg->s_szc == 0 && svd->vp != NULL &&
6146 6125 map_addr_vacalign_check(addr, off)) {
6147 6126 return (EINVAL);
6148 6127 }
6149 6128
6150 6129 /*
6151 6130 * Check that protections are the same within new page
6152 6131 * size boundaries.
6153 6132 */
6154 6133 if (svd->pageprot) {
6155 6134 for (a = addr; a < eaddr; a += pgsz) {
6156 6135 if ((a + pgsz) > eaddr) {
6157 6136 if (!sameprot(seg, a, eaddr - a)) {
6158 6137 return (EINVAL);
6159 6138 }
6160 6139 } else {
6161 6140 if (!sameprot(seg, a, pgsz)) {
6162 6141 return (EINVAL);
6163 6142 }
6164 6143 }
6165 6144 }
6166 6145 }
6167 6146
6168 6147 /*
6169 6148 * Since we are changing page size we first have to flush
6170 6149 * the cache. This makes sure all the pagelock calls have
6171 6150 * to recheck protections.
6172 6151 */
6173 6152 if (svd->softlockcnt > 0) {
6174 6153 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6175 6154
6176 6155 /*
6177 6156 * If this is shared segment non 0 softlockcnt
6178 6157 * means locked pages are still in use.
6179 6158 */
6180 6159 if (svd->type == MAP_SHARED) {
6181 6160 return (EAGAIN);
6182 6161 }
6183 6162
6184 6163 /*
6185 6164 * Since we do have the segvn writers lock nobody can fill
6186 6165 * the cache with entries belonging to this seg during
6187 6166 * the purge. The flush either succeeds or we still have
6188 6167 * pending I/Os.
6189 6168 */
6190 6169 segvn_purge(seg);
6191 6170 if (svd->softlockcnt > 0) {
6192 6171 return (EAGAIN);
6193 6172 }
6194 6173 }
6195 6174
6196 6175 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6197 6176 ASSERT(svd->amp == NULL);
6198 6177 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6199 6178 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6200 6179 HAT_REGION_TEXT);
6201 6180 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6202 6181 } else if (svd->tr_state == SEGVN_TR_INIT) {
6203 6182 svd->tr_state = SEGVN_TR_OFF;
6204 6183 } else if (svd->tr_state == SEGVN_TR_ON) {
6205 6184 ASSERT(svd->amp != NULL);
6206 6185 segvn_textunrepl(seg, 1);
6207 6186 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6208 6187 amp = NULL;
6209 6188 }
6210 6189
6211 6190 /*
6212 6191 * Operation for sub range of existing segment.
6213 6192 */
6214 6193 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6215 6194 if (szc < seg->s_szc) {
6216 6195 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6217 6196 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6218 6197 if (err == 0) {
6219 6198 return (IE_RETRY);
6220 6199 }
6221 6200 if (err == ENOMEM) {
6222 6201 return (IE_NOMEM);
6223 6202 }
6224 6203 return (err);
6225 6204 }
6226 6205 if (addr != seg->s_base) {
6227 6206 nseg = segvn_split_seg(seg, addr);
6228 6207 if (eaddr != (nseg->s_base + nseg->s_size)) {
6229 6208 /* eaddr is szc aligned */
6230 6209 (void) segvn_split_seg(nseg, eaddr);
6231 6210 }
6232 6211 return (IE_RETRY);
6233 6212 }
6234 6213 if (eaddr != (seg->s_base + seg->s_size)) {
6235 6214 /* eaddr is szc aligned */
6236 6215 (void) segvn_split_seg(seg, eaddr);
6237 6216 }
6238 6217 return (IE_RETRY);
6239 6218 }
6240 6219
6241 6220 /*
6242 6221 * Break any low level sharing and reset seg->s_szc to 0.
6243 6222 */
6244 6223 if ((err = segvn_clrszc(seg)) != 0) {
6245 6224 if (err == ENOMEM) {
6246 6225 err = IE_NOMEM;
6247 6226 }
6248 6227 return (err);
6249 6228 }
6250 6229 ASSERT(seg->s_szc == 0);
6251 6230
6252 6231 /*
6253 6232 * If the end of the current segment is not pgsz aligned
6254 6233 * then attempt to concatenate with the next segment.
6255 6234 */
6256 6235 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6257 6236 nseg = AS_SEGNEXT(seg->s_as, seg);
6258 6237 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6259 6238 return (ENOMEM);
6260 6239 }
6261 6240 if (nseg->s_ops != &segvn_ops) {
6262 6241 return (EINVAL);
6263 6242 }
6264 6243 nsvd = (struct segvn_data *)nseg->s_data;
6265 6244 if (nsvd->softlockcnt > 0) {
6266 6245 /*
6267 6246 * If this is shared segment non 0 softlockcnt
6268 6247 * means locked pages are still in use.
6269 6248 */
6270 6249 if (nsvd->type == MAP_SHARED) {
6271 6250 return (EAGAIN);
6272 6251 }
6273 6252 segvn_purge(nseg);
6274 6253 if (nsvd->softlockcnt > 0) {
6275 6254 return (EAGAIN);
6276 6255 }
6277 6256 }
6278 6257 err = segvn_clrszc(nseg);
6279 6258 if (err == ENOMEM) {
6280 6259 err = IE_NOMEM;
6281 6260 }
6282 6261 if (err != 0) {
6283 6262 return (err);
6284 6263 }
6285 6264 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6286 6265 err = segvn_concat(seg, nseg, 1);
6287 6266 if (err == -1) {
6288 6267 return (EINVAL);
6289 6268 }
6290 6269 if (err == -2) {
6291 6270 return (IE_NOMEM);
6292 6271 }
6293 6272 return (IE_RETRY);
6294 6273 }
6295 6274
6296 6275 /*
6297 6276 * May need to re-align anon array to
6298 6277 * new szc.
6299 6278 */
6300 6279 if (amp != NULL) {
6301 6280 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6302 6281 struct anon_hdr *nahp;
6303 6282
6304 6283 ASSERT(svd->type == MAP_PRIVATE);
6305 6284
6306 6285 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6307 6286 ASSERT(amp->refcnt == 1);
6308 6287 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6309 6288 if (nahp == NULL) {
6310 6289 ANON_LOCK_EXIT(&->a_rwlock);
6311 6290 return (IE_NOMEM);
6312 6291 }
6313 6292 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6314 6293 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6315 6294 anon_release(nahp, btop(amp->size));
6316 6295 ANON_LOCK_EXIT(&->a_rwlock);
6317 6296 return (IE_NOMEM);
6318 6297 }
6319 6298 anon_release(amp->ahp, btop(amp->size));
6320 6299 amp->ahp = nahp;
6321 6300 svd->anon_index = 0;
6322 6301 ANON_LOCK_EXIT(&->a_rwlock);
6323 6302 }
6324 6303 }
6325 6304 if (svd->vp != NULL && szc != 0) {
6326 6305 struct vattr va;
6327 6306 u_offset_t eoffpage = svd->offset;
6328 6307 va.va_mask = AT_SIZE;
6329 6308 eoffpage += seg->s_size;
6330 6309 eoffpage = btopr(eoffpage);
6331 6310 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6332 6311 segvn_setpgsz_getattr_err++;
6333 6312 return (EINVAL);
6334 6313 }
6335 6314 if (btopr(va.va_size) < eoffpage) {
6336 6315 segvn_setpgsz_eof_err++;
6337 6316 return (EINVAL);
6338 6317 }
6339 6318 if (amp != NULL) {
6340 6319 /*
6341 6320 * anon_fill_cow_holes() may call VOP_GETPAGE().
6342 6321 * don't take anon map lock here to avoid holding it
6343 6322 * across VOP_GETPAGE() calls that may call back into
6344 6323 * segvn for klsutering checks. We don't really need
6345 6324 * anon map lock here since it's a private segment and
6346 6325 * we hold as level lock as writers.
6347 6326 */
6348 6327 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6349 6328 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6350 6329 seg->s_size, szc, svd->prot, svd->vpage,
6351 6330 svd->cred)) != 0) {
6352 6331 return (EINVAL);
6353 6332 }
6354 6333 }
6355 6334 segvn_setvnode_mpss(svd->vp);
6356 6335 }
6357 6336
6358 6337 if (amp != NULL) {
6359 6338 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6360 6339 if (svd->type == MAP_PRIVATE) {
6361 6340 amp->a_szc = szc;
6362 6341 } else if (szc > amp->a_szc) {
6363 6342 amp->a_szc = szc;
6364 6343 }
6365 6344 ANON_LOCK_EXIT(&->a_rwlock);
6366 6345 }
6367 6346
6368 6347 seg->s_szc = szc;
6369 6348
6370 6349 return (0);
6371 6350 }
6372 6351
6373 6352 static int
6374 6353 segvn_clrszc(struct seg *seg)
6375 6354 {
6376 6355 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6377 6356 struct anon_map *amp = svd->amp;
6378 6357 size_t pgsz;
6379 6358 pgcnt_t pages;
6380 6359 int err = 0;
6381 6360 caddr_t a = seg->s_base;
6382 6361 caddr_t ea = a + seg->s_size;
6383 6362 ulong_t an_idx = svd->anon_index;
6384 6363 vnode_t *vp = svd->vp;
6385 6364 struct vpage *vpage = svd->vpage;
6386 6365 page_t *anon_pl[1 + 1], *pp;
6387 6366 struct anon *ap, *oldap;
6388 6367 uint_t prot = svd->prot, vpprot;
6389 6368 int pageflag = 0;
6390 6369
6391 6370 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6392 6371 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6393 6372 ASSERT(svd->softlockcnt == 0);
6394 6373
6395 6374 if (vp == NULL && amp == NULL) {
6396 6375 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6397 6376 seg->s_szc = 0;
6398 6377 return (0);
6399 6378 }
6400 6379
6401 6380 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6402 6381 ASSERT(svd->amp == NULL);
6403 6382 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6404 6383 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6405 6384 HAT_REGION_TEXT);
6406 6385 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6407 6386 } else if (svd->tr_state == SEGVN_TR_ON) {
6408 6387 ASSERT(svd->amp != NULL);
6409 6388 segvn_textunrepl(seg, 1);
6410 6389 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6411 6390 amp = NULL;
6412 6391 } else {
6413 6392 if (svd->tr_state != SEGVN_TR_OFF) {
6414 6393 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6415 6394 svd->tr_state = SEGVN_TR_OFF;
6416 6395 }
6417 6396
6418 6397 /*
6419 6398 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6420 6399 * unload argument is 0 when we are freeing the segment
6421 6400 * and unload was already done.
6422 6401 */
6423 6402 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6424 6403 HAT_UNLOAD_UNMAP);
6425 6404 }
6426 6405
6427 6406 if (amp == NULL || svd->type == MAP_SHARED) {
6428 6407 seg->s_szc = 0;
6429 6408 return (0);
6430 6409 }
6431 6410
6432 6411 pgsz = page_get_pagesize(seg->s_szc);
6433 6412 pages = btop(pgsz);
6434 6413
6435 6414 /*
6436 6415 * XXX anon rwlock is not really needed because this is a
6437 6416 * private segment and we are writers.
6438 6417 */
6439 6418 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6440 6419
6441 6420 for (; a < ea; a += pgsz, an_idx += pages) {
6442 6421 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6443 6422 ASSERT(vpage != NULL || svd->pageprot == 0);
6444 6423 if (vpage != NULL) {
6445 6424 ASSERT(sameprot(seg, a, pgsz));
6446 6425 prot = VPP_PROT(vpage);
6447 6426 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6448 6427 }
6449 6428 if (seg->s_szc != 0) {
6450 6429 ASSERT(vp == NULL || anon_pages(amp->ahp,
6451 6430 an_idx, pages) == pages);
6452 6431 if ((err = anon_map_demotepages(amp, an_idx,
6453 6432 seg, a, prot, vpage, svd->cred)) != 0) {
6454 6433 goto out;
6455 6434 }
6456 6435 } else {
6457 6436 if (oldap->an_refcnt == 1) {
6458 6437 continue;
6459 6438 }
6460 6439 if ((err = anon_getpage(&oldap, &vpprot,
6461 6440 anon_pl, PAGESIZE, seg, a, S_READ,
6462 6441 svd->cred))) {
6463 6442 goto out;
6464 6443 }
6465 6444 if ((pp = anon_private(&ap, seg, a, prot,
6466 6445 anon_pl[0], pageflag, svd->cred)) == NULL) {
6467 6446 err = ENOMEM;
6468 6447 goto out;
6469 6448 }
6470 6449 anon_decref(oldap);
6471 6450 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6472 6451 ANON_SLEEP);
6473 6452 page_unlock(pp);
6474 6453 }
6475 6454 }
6476 6455 vpage = (vpage == NULL) ? NULL : vpage + pages;
6477 6456 }
6478 6457
6479 6458 amp->a_szc = 0;
6480 6459 seg->s_szc = 0;
6481 6460 out:
6482 6461 ANON_LOCK_EXIT(&->a_rwlock);
6483 6462 return (err);
6484 6463 }
6485 6464
6486 6465 static int
6487 6466 segvn_claim_pages(
6488 6467 struct seg *seg,
6489 6468 struct vpage *svp,
6490 6469 u_offset_t off,
6491 6470 ulong_t anon_idx,
6492 6471 uint_t prot)
6493 6472 {
6494 6473 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6495 6474 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6496 6475 page_t **ppa;
6497 6476 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6498 6477 struct anon_map *amp = svd->amp;
6499 6478 struct vpage *evp = svp + pgcnt;
6500 6479 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6501 6480 + seg->s_base;
6502 6481 struct anon *ap;
6503 6482 struct vnode *vp = svd->vp;
6504 6483 page_t *pp;
6505 6484 pgcnt_t pg_idx, i;
6506 6485 int err = 0;
6507 6486 anoff_t aoff;
6508 6487 int anon = (amp != NULL) ? 1 : 0;
6509 6488
6510 6489 ASSERT(svd->type == MAP_PRIVATE);
6511 6490 ASSERT(svd->vpage != NULL);
6512 6491 ASSERT(seg->s_szc != 0);
6513 6492 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6514 6493 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6515 6494 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6516 6495
6517 6496 if (VPP_PROT(svp) == prot)
6518 6497 return (1);
6519 6498 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6520 6499 return (1);
6521 6500
6522 6501 ppa = kmem_alloc(ppasize, KM_SLEEP);
6523 6502 if (anon && vp != NULL) {
6524 6503 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6525 6504 anon = 0;
6526 6505 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6527 6506 }
6528 6507 ASSERT(!anon ||
6529 6508 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6530 6509 }
6531 6510
6532 6511 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6533 6512 if (!VPP_ISPPLOCK(svp))
6534 6513 continue;
6535 6514 if (anon) {
6536 6515 ap = anon_get_ptr(amp->ahp, anon_idx);
6537 6516 if (ap == NULL) {
6538 6517 panic("segvn_claim_pages: no anon slot");
6539 6518 }
6540 6519 swap_xlate(ap, &vp, &aoff);
6541 6520 off = (u_offset_t)aoff;
6542 6521 }
6543 6522 ASSERT(vp != NULL);
6544 6523 if ((pp = page_lookup(vp,
6545 6524 (u_offset_t)off, SE_SHARED)) == NULL) {
6546 6525 panic("segvn_claim_pages: no page");
6547 6526 }
6548 6527 ppa[pg_idx++] = pp;
6549 6528 off += PAGESIZE;
6550 6529 }
6551 6530
6552 6531 if (ppa[0] == NULL) {
6553 6532 kmem_free(ppa, ppasize);
6554 6533 return (1);
6555 6534 }
6556 6535
6557 6536 ASSERT(pg_idx <= pgcnt);
6558 6537 ppa[pg_idx] = NULL;
6559 6538
6560 6539
6561 6540 /* Find each large page within ppa, and adjust its claim */
6562 6541
6563 6542 /* Does ppa cover a single large page? */
6564 6543 if (ppa[0]->p_szc == seg->s_szc) {
6565 6544 if (prot & PROT_WRITE)
6566 6545 err = page_addclaim_pages(ppa);
6567 6546 else
6568 6547 err = page_subclaim_pages(ppa);
6569 6548 } else {
6570 6549 for (i = 0; ppa[i]; i += pgcnt) {
6571 6550 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6572 6551 if (prot & PROT_WRITE)
6573 6552 err = page_addclaim_pages(&ppa[i]);
6574 6553 else
6575 6554 err = page_subclaim_pages(&ppa[i]);
6576 6555 if (err == 0)
6577 6556 break;
6578 6557 }
6579 6558 }
6580 6559
6581 6560 for (i = 0; i < pg_idx; i++) {
6582 6561 ASSERT(ppa[i] != NULL);
6583 6562 page_unlock(ppa[i]);
6584 6563 }
6585 6564
6586 6565 kmem_free(ppa, ppasize);
6587 6566 return (err);
6588 6567 }
6589 6568
6590 6569 /*
6591 6570 * Returns right (upper address) segment if split occurred.
6592 6571 * If the address is equal to the beginning or end of its segment it returns
6593 6572 * the current segment.
6594 6573 */
6595 6574 static struct seg *
6596 6575 segvn_split_seg(struct seg *seg, caddr_t addr)
6597 6576 {
6598 6577 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6599 6578 struct seg *nseg;
6600 6579 size_t nsize;
6601 6580 struct segvn_data *nsvd;
6602 6581
6603 6582 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6604 6583 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6605 6584
6606 6585 ASSERT(addr >= seg->s_base);
6607 6586 ASSERT(addr <= seg->s_base + seg->s_size);
6608 6587 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6609 6588
6610 6589 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6611 6590 return (seg);
6612 6591
6613 6592 nsize = seg->s_base + seg->s_size - addr;
6614 6593 seg->s_size = addr - seg->s_base;
6615 6594 nseg = seg_alloc(seg->s_as, addr, nsize);
6616 6595 ASSERT(nseg != NULL);
6617 6596 nseg->s_ops = seg->s_ops;
6618 6597 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6619 6598 nseg->s_data = (void *)nsvd;
6620 6599 nseg->s_szc = seg->s_szc;
6621 6600 *nsvd = *svd;
6622 6601 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6623 6602 nsvd->seg = nseg;
6624 6603 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6625 6604
6626 6605 if (nsvd->vp != NULL) {
6627 6606 VN_HOLD(nsvd->vp);
6628 6607 nsvd->offset = svd->offset +
6629 6608 (uintptr_t)(nseg->s_base - seg->s_base);
6630 6609 if (nsvd->type == MAP_SHARED)
6631 6610 lgrp_shm_policy_init(NULL, nsvd->vp);
6632 6611 } else {
6633 6612 /*
6634 6613 * The offset for an anonymous segment has no signifigance in
6635 6614 * terms of an offset into a file. If we were to use the above
6636 6615 * calculation instead, the structures read out of
6637 6616 * /proc/<pid>/xmap would be more difficult to decipher since
6638 6617 * it would be unclear whether two seemingly contiguous
6639 6618 * prxmap_t structures represented different segments or a
6640 6619 * single segment that had been split up into multiple prxmap_t
6641 6620 * structures (e.g. if some part of the segment had not yet
6642 6621 * been faulted in).
6643 6622 */
6644 6623 nsvd->offset = 0;
6645 6624 }
6646 6625
6647 6626 ASSERT(svd->softlockcnt == 0);
6648 6627 ASSERT(svd->softlockcnt_sbase == 0);
6649 6628 ASSERT(svd->softlockcnt_send == 0);
6650 6629 crhold(svd->cred);
6651 6630
6652 6631 if (svd->vpage != NULL) {
6653 6632 size_t bytes = vpgtob(seg_pages(seg));
6654 6633 size_t nbytes = vpgtob(seg_pages(nseg));
6655 6634 struct vpage *ovpage = svd->vpage;
6656 6635
6657 6636 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6658 6637 bcopy(ovpage, svd->vpage, bytes);
6659 6638 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6660 6639 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6661 6640 kmem_free(ovpage, bytes + nbytes);
6662 6641 }
6663 6642 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6664 6643 struct anon_map *oamp = svd->amp, *namp;
6665 6644 struct anon_hdr *nahp;
6666 6645
6667 6646 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6668 6647 ASSERT(oamp->refcnt == 1);
6669 6648 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6670 6649 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6671 6650 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6672 6651
6673 6652 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6674 6653 namp->a_szc = nseg->s_szc;
6675 6654 (void) anon_copy_ptr(oamp->ahp,
6676 6655 svd->anon_index + btop(seg->s_size),
6677 6656 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6678 6657 anon_release(oamp->ahp, btop(oamp->size));
6679 6658 oamp->ahp = nahp;
6680 6659 oamp->size = seg->s_size;
6681 6660 svd->anon_index = 0;
6682 6661 nsvd->amp = namp;
6683 6662 nsvd->anon_index = 0;
6684 6663 ANON_LOCK_EXIT(&oamp->a_rwlock);
6685 6664 } else if (svd->amp != NULL) {
6686 6665 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6687 6666 ASSERT(svd->amp == nsvd->amp);
6688 6667 ASSERT(seg->s_szc <= svd->amp->a_szc);
6689 6668 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6690 6669 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6691 6670 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6692 6671 svd->amp->refcnt++;
6693 6672 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6694 6673 }
6695 6674
6696 6675 /*
6697 6676 * Split the amount of swap reserved.
6698 6677 */
6699 6678 if (svd->swresv) {
6700 6679 /*
6701 6680 * For MAP_NORESERVE, only allocate swap reserve for pages
6702 6681 * being used. Other segments get enough to cover whole
6703 6682 * segment.
6704 6683 */
6705 6684 if (svd->flags & MAP_NORESERVE) {
6706 6685 size_t oswresv;
6707 6686
6708 6687 ASSERT(svd->amp);
6709 6688 oswresv = svd->swresv;
6710 6689 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6711 6690 svd->anon_index, btop(seg->s_size)));
6712 6691 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6713 6692 nsvd->anon_index, btop(nseg->s_size)));
6714 6693 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6715 6694 } else {
6716 6695 if (svd->pageswap) {
6717 6696 svd->swresv = segvn_count_swap_by_vpages(seg);
6718 6697 ASSERT(nsvd->swresv >= svd->swresv);
6719 6698 nsvd->swresv -= svd->swresv;
6720 6699 } else {
6721 6700 ASSERT(svd->swresv == seg->s_size +
6722 6701 nseg->s_size);
6723 6702 svd->swresv = seg->s_size;
6724 6703 nsvd->swresv = nseg->s_size;
6725 6704 }
6726 6705 }
6727 6706 }
6728 6707
6729 6708 return (nseg);
6730 6709 }
6731 6710
6732 6711 /*
6733 6712 * called on memory operations (unmap, setprot, setpagesize) for a subset
6734 6713 * of a large page segment to either demote the memory range (SDR_RANGE)
6735 6714 * or the ends (SDR_END) by addr/len.
6736 6715 *
6737 6716 * returns 0 on success. returns errno, including ENOMEM, on failure.
6738 6717 */
6739 6718 static int
6740 6719 segvn_demote_range(
6741 6720 struct seg *seg,
6742 6721 caddr_t addr,
6743 6722 size_t len,
6744 6723 int flag,
6745 6724 uint_t szcvec)
6746 6725 {
6747 6726 caddr_t eaddr = addr + len;
6748 6727 caddr_t lpgaddr, lpgeaddr;
6749 6728 struct seg *nseg;
6750 6729 struct seg *badseg1 = NULL;
6751 6730 struct seg *badseg2 = NULL;
6752 6731 size_t pgsz;
6753 6732 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6754 6733 int err;
6755 6734 uint_t szc = seg->s_szc;
6756 6735 uint_t tszcvec;
6757 6736
6758 6737 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6759 6738 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6760 6739 ASSERT(szc != 0);
6761 6740 pgsz = page_get_pagesize(szc);
6762 6741 ASSERT(seg->s_base != addr || seg->s_size != len);
6763 6742 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6764 6743 ASSERT(svd->softlockcnt == 0);
6765 6744 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6766 6745 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6767 6746
6768 6747 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6769 6748 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6770 6749 if (flag == SDR_RANGE) {
6771 6750 /* demote entire range */
6772 6751 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6773 6752 (void) segvn_split_seg(nseg, lpgeaddr);
6774 6753 ASSERT(badseg1->s_base == lpgaddr);
6775 6754 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6776 6755 } else if (addr != lpgaddr) {
6777 6756 ASSERT(flag == SDR_END);
6778 6757 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6779 6758 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6780 6759 eaddr < lpgaddr + 2 * pgsz) {
6781 6760 (void) segvn_split_seg(nseg, lpgeaddr);
6782 6761 ASSERT(badseg1->s_base == lpgaddr);
6783 6762 ASSERT(badseg1->s_size == 2 * pgsz);
6784 6763 } else {
6785 6764 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6786 6765 ASSERT(badseg1->s_base == lpgaddr);
6787 6766 ASSERT(badseg1->s_size == pgsz);
6788 6767 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6789 6768 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6790 6769 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6791 6770 badseg2 = nseg;
6792 6771 (void) segvn_split_seg(nseg, lpgeaddr);
6793 6772 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6794 6773 ASSERT(badseg2->s_size == pgsz);
6795 6774 }
6796 6775 }
6797 6776 } else {
6798 6777 ASSERT(flag == SDR_END);
6799 6778 ASSERT(eaddr < lpgeaddr);
6800 6779 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6801 6780 (void) segvn_split_seg(nseg, lpgeaddr);
6802 6781 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6803 6782 ASSERT(badseg1->s_size == pgsz);
6804 6783 }
6805 6784
6806 6785 ASSERT(badseg1 != NULL);
6807 6786 ASSERT(badseg1->s_szc == szc);
6808 6787 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6809 6788 badseg1->s_size == 2 * pgsz);
6810 6789 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6811 6790 ASSERT(badseg1->s_size == pgsz ||
6812 6791 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6813 6792 if (err = segvn_clrszc(badseg1)) {
6814 6793 return (err);
6815 6794 }
6816 6795 ASSERT(badseg1->s_szc == 0);
6817 6796
6818 6797 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6819 6798 uint_t tszc = highbit(tszcvec) - 1;
6820 6799 caddr_t ta = MAX(addr, badseg1->s_base);
6821 6800 caddr_t te;
6822 6801 size_t tpgsz = page_get_pagesize(tszc);
6823 6802
6824 6803 ASSERT(svd->type == MAP_SHARED);
6825 6804 ASSERT(flag == SDR_END);
6826 6805 ASSERT(tszc < szc && tszc > 0);
6827 6806
6828 6807 if (eaddr > badseg1->s_base + badseg1->s_size) {
6829 6808 te = badseg1->s_base + badseg1->s_size;
6830 6809 } else {
6831 6810 te = eaddr;
6832 6811 }
6833 6812
6834 6813 ASSERT(ta <= te);
6835 6814 badseg1->s_szc = tszc;
6836 6815 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6837 6816 if (badseg2 != NULL) {
6838 6817 err = segvn_demote_range(badseg1, ta, te - ta,
6839 6818 SDR_END, tszcvec);
6840 6819 if (err != 0) {
6841 6820 return (err);
6842 6821 }
6843 6822 } else {
6844 6823 return (segvn_demote_range(badseg1, ta,
6845 6824 te - ta, SDR_END, tszcvec));
6846 6825 }
6847 6826 }
6848 6827 }
6849 6828
6850 6829 if (badseg2 == NULL)
6851 6830 return (0);
6852 6831 ASSERT(badseg2->s_szc == szc);
6853 6832 ASSERT(badseg2->s_size == pgsz);
6854 6833 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6855 6834 if (err = segvn_clrszc(badseg2)) {
6856 6835 return (err);
6857 6836 }
6858 6837 ASSERT(badseg2->s_szc == 0);
6859 6838
6860 6839 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6861 6840 uint_t tszc = highbit(tszcvec) - 1;
6862 6841 size_t tpgsz = page_get_pagesize(tszc);
6863 6842
6864 6843 ASSERT(svd->type == MAP_SHARED);
6865 6844 ASSERT(flag == SDR_END);
6866 6845 ASSERT(tszc < szc && tszc > 0);
6867 6846 ASSERT(badseg2->s_base > addr);
6868 6847 ASSERT(eaddr > badseg2->s_base);
6869 6848 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6870 6849
6871 6850 badseg2->s_szc = tszc;
6872 6851 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6873 6852 return (segvn_demote_range(badseg2, badseg2->s_base,
6874 6853 eaddr - badseg2->s_base, SDR_END, tszcvec));
6875 6854 }
6876 6855 }
6877 6856
6878 6857 return (0);
6879 6858 }
6880 6859
6881 6860 static int
6882 6861 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6883 6862 {
6884 6863 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6885 6864 struct vpage *vp, *evp;
6886 6865
6887 6866 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6888 6867
6889 6868 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6890 6869 /*
6891 6870 * If segment protection can be used, simply check against them.
6892 6871 */
6893 6872 if (svd->pageprot == 0) {
6894 6873 int err;
6895 6874
6896 6875 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6897 6876 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6898 6877 return (err);
6899 6878 }
6900 6879
6901 6880 /*
6902 6881 * Have to check down to the vpage level.
6903 6882 */
6904 6883 evp = &svd->vpage[seg_page(seg, addr + len)];
6905 6884 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6906 6885 if ((VPP_PROT(vp) & prot) != prot) {
6907 6886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6908 6887 return (EACCES);
6909 6888 }
6910 6889 }
6911 6890 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6912 6891 return (0);
6913 6892 }
6914 6893
6915 6894 static int
6916 6895 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6917 6896 {
6918 6897 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6919 6898 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6920 6899
6921 6900 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6922 6901
6923 6902 if (pgno != 0) {
6924 6903 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6925 6904 if (svd->pageprot == 0) {
6926 6905 do {
6927 6906 protv[--pgno] = svd->prot;
6928 6907 } while (pgno != 0);
6929 6908 } else {
6930 6909 size_t pgoff = seg_page(seg, addr);
6931 6910
6932 6911 do {
6933 6912 pgno--;
6934 6913 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6935 6914 } while (pgno != 0);
6936 6915 }
6937 6916 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6938 6917 }
6939 6918 return (0);
6940 6919 }
6941 6920
6942 6921 static u_offset_t
6943 6922 segvn_getoffset(struct seg *seg, caddr_t addr)
6944 6923 {
6945 6924 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6946 6925
6947 6926 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6948 6927
6949 6928 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6950 6929 }
6951 6930
6952 6931 /*ARGSUSED*/
6953 6932 static int
6954 6933 segvn_gettype(struct seg *seg, caddr_t addr)
6955 6934 {
6956 6935 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6957 6936
6958 6937 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6959 6938
6960 6939 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6961 6940 MAP_INITDATA)));
6962 6941 }
6963 6942
6964 6943 /*ARGSUSED*/
6965 6944 static int
6966 6945 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6967 6946 {
6968 6947 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6969 6948
6970 6949 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6971 6950
6972 6951 *vpp = svd->vp;
6973 6952 return (0);
6974 6953 }
6975 6954
6976 6955 /*
6977 6956 * Check to see if it makes sense to do kluster/read ahead to
6978 6957 * addr + delta relative to the mapping at addr. We assume here
6979 6958 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6980 6959 *
6981 6960 * For segvn, we currently "approve" of the action if we are
6982 6961 * still in the segment and it maps from the same vp/off,
6983 6962 * or if the advice stored in segvn_data or vpages allows it.
6984 6963 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6985 6964 */
6986 6965 static int
6987 6966 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6988 6967 {
6989 6968 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6990 6969 struct anon *oap, *ap;
6991 6970 ssize_t pd;
6992 6971 size_t page;
6993 6972 struct vnode *vp1, *vp2;
6994 6973 u_offset_t off1, off2;
6995 6974 struct anon_map *amp;
6996 6975
6997 6976 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6998 6977 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6999 6978 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
7000 6979
7001 6980 if (addr + delta < seg->s_base ||
7002 6981 addr + delta >= (seg->s_base + seg->s_size))
7003 6982 return (-1); /* exceeded segment bounds */
7004 6983
7005 6984 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
7006 6985 page = seg_page(seg, addr);
7007 6986
7008 6987 /*
7009 6988 * Check to see if either of the pages addr or addr + delta
7010 6989 * have advice set that prevents klustering (if MADV_RANDOM advice
7011 6990 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
7012 6991 * is negative).
7013 6992 */
7014 6993 if (svd->advice == MADV_RANDOM ||
7015 6994 svd->advice == MADV_SEQUENTIAL && delta < 0)
7016 6995 return (-1);
7017 6996 else if (svd->pageadvice && svd->vpage) {
7018 6997 struct vpage *bvpp, *evpp;
7019 6998
7020 6999 bvpp = &svd->vpage[page];
7021 7000 evpp = &svd->vpage[page + pd];
7022 7001 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
7023 7002 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
7024 7003 return (-1);
7025 7004 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
7026 7005 VPP_ADVICE(evpp) == MADV_RANDOM)
7027 7006 return (-1);
7028 7007 }
7029 7008
7030 7009 if (svd->type == MAP_SHARED)
7031 7010 return (0); /* shared mapping - all ok */
7032 7011
7033 7012 if ((amp = svd->amp) == NULL)
7034 7013 return (0); /* off original vnode */
7035 7014
7036 7015 page += svd->anon_index;
7037 7016
7038 7017 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7039 7018
7040 7019 oap = anon_get_ptr(amp->ahp, page);
7041 7020 ap = anon_get_ptr(amp->ahp, page + pd);
7042 7021
7043 7022 ANON_LOCK_EXIT(&->a_rwlock);
7044 7023
7045 7024 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
7046 7025 return (-1); /* one with and one without an anon */
7047 7026 }
7048 7027
7049 7028 if (oap == NULL) { /* implies that ap == NULL */
7050 7029 return (0); /* off original vnode */
7051 7030 }
7052 7031
7053 7032 /*
7054 7033 * Now we know we have two anon pointers - check to
7055 7034 * see if they happen to be properly allocated.
7056 7035 */
7057 7036
7058 7037 /*
7059 7038 * XXX We cheat here and don't lock the anon slots. We can't because
7060 7039 * we may have been called from the anon layer which might already
7061 7040 * have locked them. We are holding a refcnt on the slots so they
7062 7041 * can't disappear. The worst that will happen is we'll get the wrong
7063 7042 * names (vp, off) for the slots and make a poor klustering decision.
7064 7043 */
7065 7044 swap_xlate(ap, &vp1, &off1);
7066 7045 swap_xlate(oap, &vp2, &off2);
7067 7046
7068 7047
7069 7048 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
7070 7049 return (-1);
7071 7050 return (0);
7072 7051 }
7073 7052
7074 7053 /*
7075 7054 * Swap the pages of seg out to secondary storage, returning the
7076 7055 * number of bytes of storage freed.
7077 7056 *
7078 7057 * The basic idea is first to unload all translations and then to call
7079 7058 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7080 7059 * swap device. Pages to which other segments have mappings will remain
7081 7060 * mapped and won't be swapped. Our caller (as_swapout) has already
7082 7061 * performed the unloading step.
7083 7062 *
7084 7063 * The value returned is intended to correlate well with the process's
7085 7064 * memory requirements. However, there are some caveats:
7086 7065 * 1) When given a shared segment as argument, this routine will
7087 7066 * only succeed in swapping out pages for the last sharer of the
7088 7067 * segment. (Previous callers will only have decremented mapping
7089 7068 * reference counts.)
7090 7069 * 2) We assume that the hat layer maintains a large enough translation
7091 7070 * cache to capture process reference patterns.
7092 7071 */
7093 7072 static size_t
7094 7073 segvn_swapout(struct seg *seg)
7095 7074 {
7096 7075 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7097 7076 struct anon_map *amp;
7098 7077 pgcnt_t pgcnt = 0;
7099 7078 pgcnt_t npages;
7100 7079 pgcnt_t page;
7101 7080 ulong_t anon_index;
7102 7081
7103 7082 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7104 7083
7105 7084 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7106 7085 /*
7107 7086 * Find pages unmapped by our caller and force them
7108 7087 * out to the virtual swap device.
7109 7088 */
7110 7089 if ((amp = svd->amp) != NULL)
7111 7090 anon_index = svd->anon_index;
7112 7091 npages = seg->s_size >> PAGESHIFT;
7113 7092 for (page = 0; page < npages; page++) {
7114 7093 page_t *pp;
7115 7094 struct anon *ap;
7116 7095 struct vnode *vp;
7117 7096 u_offset_t off;
7118 7097 anon_sync_obj_t cookie;
7119 7098
7120 7099 /*
7121 7100 * Obtain <vp, off> pair for the page, then look it up.
7122 7101 *
7123 7102 * Note that this code is willing to consider regular
7124 7103 * pages as well as anon pages. Is this appropriate here?
7125 7104 */
7126 7105 ap = NULL;
7127 7106 if (amp != NULL) {
7128 7107 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7129 7108 if (anon_array_try_enter(amp, anon_index + page,
7130 7109 &cookie)) {
7131 7110 ANON_LOCK_EXIT(&->a_rwlock);
7132 7111 continue;
7133 7112 }
7134 7113 ap = anon_get_ptr(amp->ahp, anon_index + page);
7135 7114 if (ap != NULL) {
7136 7115 swap_xlate(ap, &vp, &off);
7137 7116 } else {
7138 7117 vp = svd->vp;
7139 7118 off = svd->offset + ptob(page);
7140 7119 }
7141 7120 anon_array_exit(&cookie);
7142 7121 ANON_LOCK_EXIT(&->a_rwlock);
7143 7122 } else {
7144 7123 vp = svd->vp;
7145 7124 off = svd->offset + ptob(page);
7146 7125 }
7147 7126 if (vp == NULL) { /* untouched zfod page */
7148 7127 ASSERT(ap == NULL);
7149 7128 continue;
7150 7129 }
7151 7130
7152 7131 pp = page_lookup_nowait(vp, off, SE_SHARED);
7153 7132 if (pp == NULL)
7154 7133 continue;
7155 7134
7156 7135
7157 7136 /*
7158 7137 * Examine the page to see whether it can be tossed out,
7159 7138 * keeping track of how many we've found.
7160 7139 */
7161 7140 if (!page_tryupgrade(pp)) {
7162 7141 /*
7163 7142 * If the page has an i/o lock and no mappings,
7164 7143 * it's very likely that the page is being
7165 7144 * written out as a result of klustering.
7166 7145 * Assume this is so and take credit for it here.
7167 7146 */
7168 7147 if (!page_io_trylock(pp)) {
7169 7148 if (!hat_page_is_mapped(pp))
7170 7149 pgcnt++;
7171 7150 } else {
7172 7151 page_io_unlock(pp);
7173 7152 }
7174 7153 page_unlock(pp);
7175 7154 continue;
7176 7155 }
7177 7156 ASSERT(!page_iolock_assert(pp));
7178 7157
7179 7158
7180 7159 /*
7181 7160 * Skip if page is locked or has mappings.
7182 7161 * We don't need the page_struct_lock to look at lckcnt
7183 7162 * and cowcnt because the page is exclusive locked.
7184 7163 */
7185 7164 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7186 7165 hat_page_is_mapped(pp)) {
7187 7166 page_unlock(pp);
7188 7167 continue;
7189 7168 }
7190 7169
7191 7170 /*
7192 7171 * dispose skips large pages so try to demote first.
7193 7172 */
7194 7173 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7195 7174 page_unlock(pp);
7196 7175 /*
7197 7176 * XXX should skip the remaining page_t's of this
7198 7177 * large page.
7199 7178 */
7200 7179 continue;
7201 7180 }
7202 7181
7203 7182 ASSERT(pp->p_szc == 0);
7204 7183
7205 7184 /*
7206 7185 * No longer mapped -- we can toss it out. How
7207 7186 * we do so depends on whether or not it's dirty.
7208 7187 */
7209 7188 if (hat_ismod(pp) && pp->p_vnode) {
7210 7189 /*
7211 7190 * We must clean the page before it can be
7212 7191 * freed. Setting B_FREE will cause pvn_done
7213 7192 * to free the page when the i/o completes.
7214 7193 * XXX: This also causes it to be accounted
7215 7194 * as a pageout instead of a swap: need
7216 7195 * B_SWAPOUT bit to use instead of B_FREE.
7217 7196 *
7218 7197 * Hold the vnode before releasing the page lock
7219 7198 * to prevent it from being freed and re-used by
7220 7199 * some other thread.
7221 7200 */
7222 7201 VN_HOLD(vp);
7223 7202 page_unlock(pp);
7224 7203
7225 7204 /*
7226 7205 * Queue all i/o requests for the pageout thread
7227 7206 * to avoid saturating the pageout devices.
7228 7207 */
7229 7208 if (!queue_io_request(vp, off))
7230 7209 VN_RELE(vp);
7231 7210 } else {
7232 7211 /*
7233 7212 * The page was clean, free it.
7234 7213 *
7235 7214 * XXX: Can we ever encounter modified pages
7236 7215 * with no associated vnode here?
7237 7216 */
7238 7217 ASSERT(pp->p_vnode != NULL);
7239 7218 /*LINTED: constant in conditional context*/
7240 7219 VN_DISPOSE(pp, B_FREE, 0, kcred);
7241 7220 }
7242 7221
7243 7222 /*
7244 7223 * Credit now even if i/o is in progress.
7245 7224 */
7246 7225 pgcnt++;
7247 7226 }
7248 7227 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7249 7228
7250 7229 /*
7251 7230 * Wakeup pageout to initiate i/o on all queued requests.
7252 7231 */
7253 7232 cv_signal_pageout();
7254 7233 return (ptob(pgcnt));
7255 7234 }
7256 7235
7257 7236 /*
7258 7237 * Synchronize primary storage cache with real object in virtual memory.
7259 7238 *
7260 7239 * XXX - Anonymous pages should not be sync'ed out at all.
7261 7240 */
7262 7241 static int
7263 7242 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7264 7243 {
7265 7244 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7266 7245 struct vpage *vpp;
7267 7246 page_t *pp;
7268 7247 u_offset_t offset;
7269 7248 struct vnode *vp;
7270 7249 u_offset_t off;
7271 7250 caddr_t eaddr;
7272 7251 int bflags;
7273 7252 int err = 0;
7274 7253 int segtype;
7275 7254 int pageprot;
7276 7255 int prot;
7277 7256 ulong_t anon_index;
7278 7257 struct anon_map *amp;
7279 7258 struct anon *ap;
7280 7259 anon_sync_obj_t cookie;
7281 7260
7282 7261 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7283 7262
7284 7263 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7285 7264
7286 7265 if (svd->softlockcnt > 0) {
7287 7266 /*
7288 7267 * If this is shared segment non 0 softlockcnt
7289 7268 * means locked pages are still in use.
7290 7269 */
7291 7270 if (svd->type == MAP_SHARED) {
7292 7271 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7293 7272 return (EAGAIN);
7294 7273 }
7295 7274
7296 7275 /*
7297 7276 * flush all pages from seg cache
7298 7277 * otherwise we may deadlock in swap_putpage
7299 7278 * for B_INVAL page (4175402).
7300 7279 *
7301 7280 * Even if we grab segvn WRITER's lock
7302 7281 * here, there might be another thread which could've
7303 7282 * successfully performed lookup/insert just before
7304 7283 * we acquired the lock here. So, grabbing either
7305 7284 * lock here is of not much use. Until we devise
7306 7285 * a strategy at upper layers to solve the
7307 7286 * synchronization issues completely, we expect
7308 7287 * applications to handle this appropriately.
7309 7288 */
7310 7289 segvn_purge(seg);
7311 7290 if (svd->softlockcnt > 0) {
7312 7291 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7313 7292 return (EAGAIN);
7314 7293 }
7315 7294 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7316 7295 svd->amp->a_softlockcnt > 0) {
7317 7296 /*
7318 7297 * Try to purge this amp's entries from pcache. It will
7319 7298 * succeed only if other segments that share the amp have no
7320 7299 * outstanding softlock's.
7321 7300 */
7322 7301 segvn_purge(seg);
7323 7302 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7324 7303 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7325 7304 return (EAGAIN);
7326 7305 }
7327 7306 }
7328 7307
7329 7308 vpp = svd->vpage;
7330 7309 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7331 7310 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7332 7311 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7333 7312
7334 7313 if (attr) {
7335 7314 pageprot = attr & ~(SHARED|PRIVATE);
7336 7315 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7337 7316
7338 7317 /*
7339 7318 * We are done if the segment types don't match
7340 7319 * or if we have segment level protections and
7341 7320 * they don't match.
7342 7321 */
7343 7322 if (svd->type != segtype) {
7344 7323 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7345 7324 return (0);
7346 7325 }
7347 7326 if (vpp == NULL) {
7348 7327 if (svd->prot != pageprot) {
7349 7328 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7350 7329 return (0);
7351 7330 }
7352 7331 prot = svd->prot;
7353 7332 } else
7354 7333 vpp = &svd->vpage[seg_page(seg, addr)];
7355 7334
7356 7335 } else if (svd->vp && svd->amp == NULL &&
7357 7336 (flags & MS_INVALIDATE) == 0) {
7358 7337
7359 7338 /*
7360 7339 * No attributes, no anonymous pages and MS_INVALIDATE flag
7361 7340 * is not on, just use one big request.
7362 7341 */
7363 7342 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7364 7343 bflags, svd->cred, NULL);
7365 7344 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7366 7345 return (err);
7367 7346 }
7368 7347
7369 7348 if ((amp = svd->amp) != NULL)
7370 7349 anon_index = svd->anon_index + seg_page(seg, addr);
7371 7350
7372 7351 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7373 7352 ap = NULL;
7374 7353 if (amp != NULL) {
7375 7354 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7376 7355 anon_array_enter(amp, anon_index, &cookie);
7377 7356 ap = anon_get_ptr(amp->ahp, anon_index++);
7378 7357 if (ap != NULL) {
7379 7358 swap_xlate(ap, &vp, &off);
7380 7359 } else {
7381 7360 vp = svd->vp;
7382 7361 off = offset;
7383 7362 }
7384 7363 anon_array_exit(&cookie);
7385 7364 ANON_LOCK_EXIT(&->a_rwlock);
7386 7365 } else {
7387 7366 vp = svd->vp;
7388 7367 off = offset;
7389 7368 }
7390 7369 offset += PAGESIZE;
7391 7370
7392 7371 if (vp == NULL) /* untouched zfod page */
7393 7372 continue;
7394 7373
7395 7374 if (attr) {
7396 7375 if (vpp) {
7397 7376 prot = VPP_PROT(vpp);
7398 7377 vpp++;
7399 7378 }
7400 7379 if (prot != pageprot) {
7401 7380 continue;
7402 7381 }
7403 7382 }
7404 7383
7405 7384 /*
7406 7385 * See if any of these pages are locked -- if so, then we
7407 7386 * will have to truncate an invalidate request at the first
7408 7387 * locked one. We don't need the page_struct_lock to test
7409 7388 * as this is only advisory; even if we acquire it someone
7410 7389 * might race in and lock the page after we unlock and before
7411 7390 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7412 7391 */
7413 7392 if (flags & MS_INVALIDATE) {
7414 7393 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7415 7394 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7416 7395 page_unlock(pp);
7417 7396 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7418 7397 return (EBUSY);
7419 7398 }
7420 7399 if (ap != NULL && pp->p_szc != 0 &&
7421 7400 page_tryupgrade(pp)) {
7422 7401 if (pp->p_lckcnt == 0 &&
7423 7402 pp->p_cowcnt == 0) {
7424 7403 /*
7425 7404 * swapfs VN_DISPOSE() won't
7426 7405 * invalidate large pages.
7427 7406 * Attempt to demote.
7428 7407 * XXX can't help it if it
7429 7408 * fails. But for swapfs
7430 7409 * pages it is no big deal.
7431 7410 */
7432 7411 (void) page_try_demote_pages(
7433 7412 pp);
7434 7413 }
7435 7414 }
7436 7415 page_unlock(pp);
7437 7416 }
7438 7417 } else if (svd->type == MAP_SHARED && amp != NULL) {
7439 7418 /*
7440 7419 * Avoid writing out to disk ISM's large pages
7441 7420 * because segspt_free_pages() relies on NULL an_pvp
7442 7421 * of anon slots of such pages.
7443 7422 */
7444 7423
7445 7424 ASSERT(svd->vp == NULL);
7446 7425 /*
7447 7426 * swapfs uses page_lookup_nowait if not freeing or
7448 7427 * invalidating and skips a page if
7449 7428 * page_lookup_nowait returns NULL.
7450 7429 */
7451 7430 pp = page_lookup_nowait(vp, off, SE_SHARED);
7452 7431 if (pp == NULL) {
7453 7432 continue;
7454 7433 }
7455 7434 if (pp->p_szc != 0) {
7456 7435 page_unlock(pp);
7457 7436 continue;
7458 7437 }
7459 7438
7460 7439 /*
7461 7440 * Note ISM pages are created large so (vp, off)'s
7462 7441 * page cannot suddenly become large after we unlock
7463 7442 * pp.
7464 7443 */
7465 7444 page_unlock(pp);
7466 7445 }
7467 7446 /*
7468 7447 * XXX - Should ultimately try to kluster
7469 7448 * calls to VOP_PUTPAGE() for performance.
7470 7449 */
7471 7450 VN_HOLD(vp);
7472 7451 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7473 7452 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7474 7453 svd->cred, NULL);
7475 7454
7476 7455 VN_RELE(vp);
7477 7456 if (err)
7478 7457 break;
7479 7458 }
7480 7459 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7481 7460 return (err);
7482 7461 }
7483 7462
7484 7463 /*
7485 7464 * Determine if we have data corresponding to pages in the
7486 7465 * primary storage virtual memory cache (i.e., "in core").
7487 7466 */
7488 7467 static size_t
7489 7468 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7490 7469 {
7491 7470 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7492 7471 struct vnode *vp, *avp;
7493 7472 u_offset_t offset, aoffset;
7494 7473 size_t p, ep;
7495 7474 int ret;
7496 7475 struct vpage *vpp;
7497 7476 page_t *pp;
7498 7477 uint_t start;
7499 7478 struct anon_map *amp; /* XXX - for locknest */
7500 7479 struct anon *ap;
7501 7480 uint_t attr;
7502 7481 anon_sync_obj_t cookie;
7503 7482
7504 7483 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7505 7484
7506 7485 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7507 7486 if (svd->amp == NULL && svd->vp == NULL) {
7508 7487 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7509 7488 bzero(vec, btopr(len));
7510 7489 return (len); /* no anonymous pages created yet */
7511 7490 }
7512 7491
7513 7492 p = seg_page(seg, addr);
7514 7493 ep = seg_page(seg, addr + len);
7515 7494 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7516 7495
7517 7496 amp = svd->amp;
7518 7497 for (; p < ep; p++, addr += PAGESIZE) {
7519 7498 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7520 7499 ret = start;
7521 7500 ap = NULL;
7522 7501 avp = NULL;
7523 7502 /* Grab the vnode/offset for the anon slot */
7524 7503 if (amp != NULL) {
7525 7504 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7526 7505 anon_array_enter(amp, svd->anon_index + p, &cookie);
7527 7506 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7528 7507 if (ap != NULL) {
7529 7508 swap_xlate(ap, &avp, &aoffset);
7530 7509 }
7531 7510 anon_array_exit(&cookie);
7532 7511 ANON_LOCK_EXIT(&->a_rwlock);
7533 7512 }
7534 7513 if ((avp != NULL) && page_exists(avp, aoffset)) {
7535 7514 /* A page exists for the anon slot */
7536 7515 ret |= SEG_PAGE_INCORE;
7537 7516
7538 7517 /*
7539 7518 * If page is mapped and writable
7540 7519 */
7541 7520 attr = (uint_t)0;
7542 7521 if ((hat_getattr(seg->s_as->a_hat, addr,
7543 7522 &attr) != -1) && (attr & PROT_WRITE)) {
7544 7523 ret |= SEG_PAGE_ANON;
7545 7524 }
7546 7525 /*
7547 7526 * Don't get page_struct lock for lckcnt and cowcnt,
7548 7527 * since this is purely advisory.
7549 7528 */
7550 7529 if ((pp = page_lookup_nowait(avp, aoffset,
7551 7530 SE_SHARED)) != NULL) {
7552 7531 if (pp->p_lckcnt)
7553 7532 ret |= SEG_PAGE_SOFTLOCK;
7554 7533 if (pp->p_cowcnt)
7555 7534 ret |= SEG_PAGE_HASCOW;
7556 7535 page_unlock(pp);
7557 7536 }
7558 7537 }
7559 7538
7560 7539 /* Gather vnode statistics */
7561 7540 vp = svd->vp;
7562 7541 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7563 7542
7564 7543 if (vp != NULL) {
7565 7544 /*
7566 7545 * Try to obtain a "shared" lock on the page
7567 7546 * without blocking. If this fails, determine
7568 7547 * if the page is in memory.
7569 7548 */
7570 7549 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7571 7550 if ((pp == NULL) && (page_exists(vp, offset))) {
7572 7551 /* Page is incore, and is named */
7573 7552 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7574 7553 }
7575 7554 /*
7576 7555 * Don't get page_struct lock for lckcnt and cowcnt,
7577 7556 * since this is purely advisory.
7578 7557 */
7579 7558 if (pp != NULL) {
7580 7559 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7581 7560 if (pp->p_lckcnt)
7582 7561 ret |= SEG_PAGE_SOFTLOCK;
7583 7562 if (pp->p_cowcnt)
7584 7563 ret |= SEG_PAGE_HASCOW;
7585 7564 page_unlock(pp);
7586 7565 }
7587 7566 }
7588 7567
7589 7568 /* Gather virtual page information */
7590 7569 if (vpp) {
7591 7570 if (VPP_ISPPLOCK(vpp))
7592 7571 ret |= SEG_PAGE_LOCKED;
7593 7572 vpp++;
7594 7573 }
7595 7574
7596 7575 *vec++ = (char)ret;
7597 7576 }
7598 7577 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7599 7578 return (len);
7600 7579 }
7601 7580
7602 7581 /*
7603 7582 * Statement for p_cowcnts/p_lckcnts.
7604 7583 *
7605 7584 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7606 7585 * irrespective of the following factors or anything else:
7607 7586 *
7608 7587 * (1) anon slots are populated or not
7609 7588 * (2) cow is broken or not
7610 7589 * (3) refcnt on ap is 1 or greater than 1
7611 7590 *
7612 7591 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7613 7592 * and munlock.
7614 7593 *
7615 7594 *
7616 7595 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7617 7596 *
7618 7597 * if vpage has PROT_WRITE
7619 7598 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7620 7599 * else
7621 7600 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7622 7601 *
7623 7602 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7624 7603 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7625 7604 *
7626 7605 * We may also break COW if softlocking on read access in the physio case.
7627 7606 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7628 7607 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7629 7608 * vpage doesn't have PROT_WRITE.
7630 7609 *
7631 7610 *
7632 7611 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7633 7612 *
7634 7613 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7635 7614 * increment p_lckcnt by calling page_subclaim() which takes care of
7636 7615 * availrmem accounting and p_lckcnt overflow.
7637 7616 *
7638 7617 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7639 7618 * increment p_cowcnt by calling page_addclaim() which takes care of
7640 7619 * availrmem availability and p_cowcnt overflow.
7641 7620 */
7642 7621
7643 7622 /*
7644 7623 * Lock down (or unlock) pages mapped by this segment.
7645 7624 *
7646 7625 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7647 7626 * At fault time they will be relocated into larger pages.
7648 7627 */
7649 7628 static int
7650 7629 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7651 7630 int attr, int op, ulong_t *lockmap, size_t pos)
7652 7631 {
7653 7632 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7654 7633 struct vpage *vpp;
7655 7634 struct vpage *evp;
7656 7635 page_t *pp;
7657 7636 u_offset_t offset;
7658 7637 u_offset_t off;
7659 7638 int segtype;
7660 7639 int pageprot;
7661 7640 int claim;
7662 7641 struct vnode *vp;
7663 7642 ulong_t anon_index;
7664 7643 struct anon_map *amp;
7665 7644 struct anon *ap;
7666 7645 struct vattr va;
7667 7646 anon_sync_obj_t cookie;
7668 7647 struct kshmid *sp = NULL;
7669 7648 struct proc *p = curproc;
7670 7649 kproject_t *proj = NULL;
7671 7650 int chargeproc = 1;
7672 7651 size_t locked_bytes = 0;
7673 7652 size_t unlocked_bytes = 0;
7674 7653 int err = 0;
7675 7654
7676 7655 /*
7677 7656 * Hold write lock on address space because may split or concatenate
7678 7657 * segments
7679 7658 */
7680 7659 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7681 7660
7682 7661 /*
7683 7662 * If this is a shm, use shm's project and zone, else use
7684 7663 * project and zone of calling process
7685 7664 */
7686 7665
7687 7666 /* Determine if this segment backs a sysV shm */
7688 7667 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7689 7668 ASSERT(svd->type == MAP_SHARED);
7690 7669 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7691 7670 sp = svd->amp->a_sp;
7692 7671 proj = sp->shm_perm.ipc_proj;
7693 7672 chargeproc = 0;
7694 7673 }
7695 7674
7696 7675 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7697 7676 if (attr) {
7698 7677 pageprot = attr & ~(SHARED|PRIVATE);
7699 7678 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7700 7679
7701 7680 /*
7702 7681 * We are done if the segment types don't match
7703 7682 * or if we have segment level protections and
7704 7683 * they don't match.
7705 7684 */
7706 7685 if (svd->type != segtype) {
7707 7686 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7708 7687 return (0);
7709 7688 }
7710 7689 if (svd->pageprot == 0 && svd->prot != pageprot) {
7711 7690 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7712 7691 return (0);
7713 7692 }
7714 7693 }
7715 7694
7716 7695 if (op == MC_LOCK) {
7717 7696 if (svd->tr_state == SEGVN_TR_INIT) {
7718 7697 svd->tr_state = SEGVN_TR_OFF;
7719 7698 } else if (svd->tr_state == SEGVN_TR_ON) {
7720 7699 ASSERT(svd->amp != NULL);
7721 7700 segvn_textunrepl(seg, 0);
7722 7701 ASSERT(svd->amp == NULL &&
7723 7702 svd->tr_state == SEGVN_TR_OFF);
7724 7703 }
7725 7704 }
7726 7705
7727 7706 /*
7728 7707 * If we're locking, then we must create a vpage structure if
7729 7708 * none exists. If we're unlocking, then check to see if there
7730 7709 * is a vpage -- if not, then we could not have locked anything.
7731 7710 */
7732 7711
7733 7712 if ((vpp = svd->vpage) == NULL) {
7734 7713 if (op == MC_LOCK) {
7735 7714 segvn_vpage(seg);
7736 7715 if (svd->vpage == NULL) {
7737 7716 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7738 7717 return (ENOMEM);
7739 7718 }
7740 7719 } else {
7741 7720 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7742 7721 return (0);
7743 7722 }
7744 7723 }
7745 7724
7746 7725 /*
7747 7726 * The anonymous data vector (i.e., previously
7748 7727 * unreferenced mapping to swap space) can be allocated
7749 7728 * by lazily testing for its existence.
7750 7729 */
7751 7730 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7752 7731 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7753 7732 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7754 7733 svd->amp->a_szc = seg->s_szc;
7755 7734 }
7756 7735
7757 7736 if ((amp = svd->amp) != NULL) {
7758 7737 anon_index = svd->anon_index + seg_page(seg, addr);
7759 7738 }
7760 7739
7761 7740 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7762 7741 evp = &svd->vpage[seg_page(seg, addr + len)];
7763 7742
7764 7743 if (sp != NULL)
7765 7744 mutex_enter(&sp->shm_mlock);
7766 7745
7767 7746 /* determine number of unlocked bytes in range for lock operation */
7768 7747 if (op == MC_LOCK) {
7769 7748
7770 7749 if (sp == NULL) {
7771 7750 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7772 7751 vpp++) {
7773 7752 if (!VPP_ISPPLOCK(vpp))
7774 7753 unlocked_bytes += PAGESIZE;
7775 7754 }
7776 7755 } else {
7777 7756 ulong_t i_idx, i_edx;
7778 7757 anon_sync_obj_t i_cookie;
7779 7758 struct anon *i_ap;
7780 7759 struct vnode *i_vp;
7781 7760 u_offset_t i_off;
7782 7761
7783 7762 /* Only count sysV pages once for locked memory */
7784 7763 i_edx = svd->anon_index + seg_page(seg, addr + len);
7785 7764 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7786 7765 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7787 7766 anon_array_enter(amp, i_idx, &i_cookie);
7788 7767 i_ap = anon_get_ptr(amp->ahp, i_idx);
7789 7768 if (i_ap == NULL) {
7790 7769 unlocked_bytes += PAGESIZE;
7791 7770 anon_array_exit(&i_cookie);
7792 7771 continue;
7793 7772 }
7794 7773 swap_xlate(i_ap, &i_vp, &i_off);
7795 7774 anon_array_exit(&i_cookie);
7796 7775 pp = page_lookup(i_vp, i_off, SE_SHARED);
7797 7776 if (pp == NULL) {
7798 7777 unlocked_bytes += PAGESIZE;
7799 7778 continue;
7800 7779 } else if (pp->p_lckcnt == 0)
7801 7780 unlocked_bytes += PAGESIZE;
7802 7781 page_unlock(pp);
7803 7782 }
7804 7783 ANON_LOCK_EXIT(&->a_rwlock);
7805 7784 }
7806 7785
7807 7786 mutex_enter(&p->p_lock);
7808 7787 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7809 7788 chargeproc);
7810 7789 mutex_exit(&p->p_lock);
7811 7790
7812 7791 if (err) {
7813 7792 if (sp != NULL)
7814 7793 mutex_exit(&sp->shm_mlock);
7815 7794 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7816 7795 return (err);
7817 7796 }
7818 7797 }
7819 7798 /*
7820 7799 * Loop over all pages in the range. Process if we're locking and
7821 7800 * page has not already been locked in this mapping; or if we're
7822 7801 * unlocking and the page has been locked.
7823 7802 */
7824 7803 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7825 7804 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7826 7805 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7827 7806 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7828 7807 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7829 7808
7830 7809 if (amp != NULL)
7831 7810 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7832 7811 /*
7833 7812 * If this isn't a MAP_NORESERVE segment and
7834 7813 * we're locking, allocate anon slots if they
7835 7814 * don't exist. The page is brought in later on.
7836 7815 */
7837 7816 if (op == MC_LOCK && svd->vp == NULL &&
7838 7817 ((svd->flags & MAP_NORESERVE) == 0) &&
7839 7818 amp != NULL &&
7840 7819 ((ap = anon_get_ptr(amp->ahp, anon_index))
7841 7820 == NULL)) {
7842 7821 anon_array_enter(amp, anon_index, &cookie);
7843 7822
7844 7823 if ((ap = anon_get_ptr(amp->ahp,
7845 7824 anon_index)) == NULL) {
7846 7825 pp = anon_zero(seg, addr, &ap,
7847 7826 svd->cred);
7848 7827 if (pp == NULL) {
7849 7828 anon_array_exit(&cookie);
7850 7829 ANON_LOCK_EXIT(&->a_rwlock);
7851 7830 err = ENOMEM;
7852 7831 goto out;
7853 7832 }
7854 7833 ASSERT(anon_get_ptr(amp->ahp,
7855 7834 anon_index) == NULL);
7856 7835 (void) anon_set_ptr(amp->ahp,
7857 7836 anon_index, ap, ANON_SLEEP);
7858 7837 page_unlock(pp);
7859 7838 }
7860 7839 anon_array_exit(&cookie);
7861 7840 }
7862 7841
7863 7842 /*
7864 7843 * Get name for page, accounting for
7865 7844 * existence of private copy.
7866 7845 */
7867 7846 ap = NULL;
7868 7847 if (amp != NULL) {
7869 7848 anon_array_enter(amp, anon_index, &cookie);
7870 7849 ap = anon_get_ptr(amp->ahp, anon_index);
7871 7850 if (ap != NULL) {
7872 7851 swap_xlate(ap, &vp, &off);
7873 7852 } else {
7874 7853 if (svd->vp == NULL &&
7875 7854 (svd->flags & MAP_NORESERVE)) {
7876 7855 anon_array_exit(&cookie);
7877 7856 ANON_LOCK_EXIT(&->a_rwlock);
7878 7857 continue;
7879 7858 }
7880 7859 vp = svd->vp;
7881 7860 off = offset;
7882 7861 }
7883 7862 if (op != MC_LOCK || ap == NULL) {
7884 7863 anon_array_exit(&cookie);
7885 7864 ANON_LOCK_EXIT(&->a_rwlock);
7886 7865 }
7887 7866 } else {
7888 7867 vp = svd->vp;
7889 7868 off = offset;
7890 7869 }
7891 7870
7892 7871 /*
7893 7872 * Get page frame. It's ok if the page is
7894 7873 * not available when we're unlocking, as this
7895 7874 * may simply mean that a page we locked got
7896 7875 * truncated out of existence after we locked it.
7897 7876 *
7898 7877 * Invoke VOP_GETPAGE() to obtain the page struct
7899 7878 * since we may need to read it from disk if its
7900 7879 * been paged out.
7901 7880 */
7902 7881 if (op != MC_LOCK)
7903 7882 pp = page_lookup(vp, off, SE_SHARED);
7904 7883 else {
7905 7884 page_t *pl[1 + 1];
7906 7885 int error;
7907 7886
7908 7887 ASSERT(vp != NULL);
7909 7888
7910 7889 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7911 7890 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7912 7891 S_OTHER, svd->cred, NULL);
7913 7892
7914 7893 if (error && ap != NULL) {
7915 7894 anon_array_exit(&cookie);
7916 7895 ANON_LOCK_EXIT(&->a_rwlock);
7917 7896 }
7918 7897
7919 7898 /*
7920 7899 * If the error is EDEADLK then we must bounce
7921 7900 * up and drop all vm subsystem locks and then
7922 7901 * retry the operation later
7923 7902 * This behavior is a temporary measure because
7924 7903 * ufs/sds logging is badly designed and will
7925 7904 * deadlock if we don't allow this bounce to
7926 7905 * happen. The real solution is to re-design
7927 7906 * the logging code to work properly. See bug
7928 7907 * 4125102 for details of the problem.
7929 7908 */
7930 7909 if (error == EDEADLK) {
7931 7910 err = error;
7932 7911 goto out;
7933 7912 }
7934 7913 /*
7935 7914 * Quit if we fail to fault in the page. Treat
7936 7915 * the failure as an error, unless the addr
7937 7916 * is mapped beyond the end of a file.
7938 7917 */
7939 7918 if (error && svd->vp) {
7940 7919 va.va_mask = AT_SIZE;
7941 7920 if (VOP_GETATTR(svd->vp, &va, 0,
7942 7921 svd->cred, NULL) != 0) {
7943 7922 err = EIO;
7944 7923 goto out;
7945 7924 }
7946 7925 if (btopr(va.va_size) >=
7947 7926 btopr(off + 1)) {
7948 7927 err = EIO;
7949 7928 goto out;
7950 7929 }
7951 7930 goto out;
7952 7931
7953 7932 } else if (error) {
7954 7933 err = EIO;
7955 7934 goto out;
7956 7935 }
7957 7936 pp = pl[0];
7958 7937 ASSERT(pp != NULL);
7959 7938 }
7960 7939
7961 7940 /*
7962 7941 * See Statement at the beginning of this routine.
7963 7942 *
7964 7943 * claim is always set if MAP_PRIVATE and PROT_WRITE
7965 7944 * irrespective of following factors:
7966 7945 *
7967 7946 * (1) anon slots are populated or not
7968 7947 * (2) cow is broken or not
7969 7948 * (3) refcnt on ap is 1 or greater than 1
7970 7949 *
7971 7950 * See 4140683 for details
7972 7951 */
7973 7952 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7974 7953 (svd->type == MAP_PRIVATE));
7975 7954
7976 7955 /*
7977 7956 * Perform page-level operation appropriate to
7978 7957 * operation. If locking, undo the SOFTLOCK
7979 7958 * performed to bring the page into memory
7980 7959 * after setting the lock. If unlocking,
7981 7960 * and no page was found, account for the claim
7982 7961 * separately.
7983 7962 */
7984 7963 if (op == MC_LOCK) {
7985 7964 int ret = 1; /* Assume success */
7986 7965
7987 7966 ASSERT(!VPP_ISPPLOCK(vpp));
7988 7967
7989 7968 ret = page_pp_lock(pp, claim, 0);
7990 7969 if (ap != NULL) {
7991 7970 if (ap->an_pvp != NULL) {
7992 7971 anon_swap_free(ap, pp);
7993 7972 }
7994 7973 anon_array_exit(&cookie);
7995 7974 ANON_LOCK_EXIT(&->a_rwlock);
7996 7975 }
7997 7976 if (ret == 0) {
7998 7977 /* locking page failed */
7999 7978 page_unlock(pp);
8000 7979 err = EAGAIN;
8001 7980 goto out;
8002 7981 }
8003 7982 VPP_SETPPLOCK(vpp);
8004 7983 if (sp != NULL) {
8005 7984 if (pp->p_lckcnt == 1)
8006 7985 locked_bytes += PAGESIZE;
8007 7986 } else
8008 7987 locked_bytes += PAGESIZE;
8009 7988
8010 7989 if (lockmap != (ulong_t *)NULL)
8011 7990 BT_SET(lockmap, pos);
8012 7991
8013 7992 page_unlock(pp);
8014 7993 } else {
8015 7994 ASSERT(VPP_ISPPLOCK(vpp));
8016 7995 if (pp != NULL) {
8017 7996 /* sysV pages should be locked */
8018 7997 ASSERT(sp == NULL || pp->p_lckcnt > 0);
8019 7998 page_pp_unlock(pp, claim, 0);
8020 7999 if (sp != NULL) {
8021 8000 if (pp->p_lckcnt == 0)
8022 8001 unlocked_bytes
8023 8002 += PAGESIZE;
8024 8003 } else
8025 8004 unlocked_bytes += PAGESIZE;
8026 8005 page_unlock(pp);
8027 8006 } else {
8028 8007 ASSERT(sp == NULL);
8029 8008 unlocked_bytes += PAGESIZE;
8030 8009 }
8031 8010 VPP_CLRPPLOCK(vpp);
8032 8011 }
8033 8012 }
8034 8013 }
8035 8014 out:
8036 8015 if (op == MC_LOCK) {
8037 8016 /* Credit back bytes that did not get locked */
8038 8017 if ((unlocked_bytes - locked_bytes) > 0) {
8039 8018 if (proj == NULL)
8040 8019 mutex_enter(&p->p_lock);
8041 8020 rctl_decr_locked_mem(p, proj,
8042 8021 (unlocked_bytes - locked_bytes), chargeproc);
8043 8022 if (proj == NULL)
8044 8023 mutex_exit(&p->p_lock);
8045 8024 }
8046 8025
8047 8026 } else {
8048 8027 /* Account bytes that were unlocked */
8049 8028 if (unlocked_bytes > 0) {
8050 8029 if (proj == NULL)
8051 8030 mutex_enter(&p->p_lock);
8052 8031 rctl_decr_locked_mem(p, proj, unlocked_bytes,
8053 8032 chargeproc);
8054 8033 if (proj == NULL)
8055 8034 mutex_exit(&p->p_lock);
8056 8035 }
8057 8036 }
8058 8037 if (sp != NULL)
8059 8038 mutex_exit(&sp->shm_mlock);
8060 8039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8061 8040
8062 8041 return (err);
8063 8042 }
8064 8043
8065 8044 /*
8066 8045 * Set advice from user for specified pages
8067 8046 * There are 9 types of advice:
8068 8047 * MADV_NORMAL - Normal (default) behavior (whatever that is)
8069 8048 * MADV_RANDOM - Random page references
8070 8049 * do not allow readahead or 'klustering'
8071 8050 * MADV_SEQUENTIAL - Sequential page references
8072 8051 * Pages previous to the one currently being
8073 8052 * accessed (determined by fault) are 'not needed'
8074 8053 * and are freed immediately
8075 8054 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
8076 8055 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
8077 8056 * MADV_FREE - Contents can be discarded
8078 8057 * MADV_ACCESS_DEFAULT- Default access
8079 8058 * MADV_ACCESS_LWP - Next LWP will access heavily
8080 8059 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8081 8060 */
8082 8061 static int
8083 8062 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8084 8063 {
8085 8064 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8086 8065 size_t page;
8087 8066 int err = 0;
8088 8067 int already_set;
8089 8068 struct anon_map *amp;
8090 8069 ulong_t anon_index;
8091 8070 struct seg *next;
8092 8071 lgrp_mem_policy_t policy;
8093 8072 struct seg *prev;
8094 8073 struct vnode *vp;
8095 8074
8096 8075 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8097 8076
8098 8077 /*
8099 8078 * In case of MADV_FREE, we won't be modifying any segment private
8100 8079 * data structures; so, we only need to grab READER's lock
8101 8080 */
8102 8081 if (behav != MADV_FREE) {
8103 8082 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8104 8083 if (svd->tr_state != SEGVN_TR_OFF) {
8105 8084 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8106 8085 return (0);
8107 8086 }
8108 8087 } else {
8109 8088 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8110 8089 }
8111 8090
8112 8091 /*
8113 8092 * Large pages are assumed to be only turned on when accesses to the
8114 8093 * segment's address range have spatial and temporal locality. That
8115 8094 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8116 8095 * Also, ignore advice affecting lgroup memory allocation
8117 8096 * if don't need to do lgroup optimizations on this system
8118 8097 */
8119 8098
8120 8099 if ((behav == MADV_SEQUENTIAL &&
8121 8100 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8122 8101 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8123 8102 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8124 8103 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8125 8104 return (0);
8126 8105 }
8127 8106
8128 8107 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8129 8108 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8130 8109 /*
8131 8110 * Since we are going to unload hat mappings
8132 8111 * we first have to flush the cache. Otherwise
8133 8112 * this might lead to system panic if another
8134 8113 * thread is doing physio on the range whose
8135 8114 * mappings are unloaded by madvise(3C).
8136 8115 */
8137 8116 if (svd->softlockcnt > 0) {
8138 8117 /*
8139 8118 * If this is shared segment non 0 softlockcnt
8140 8119 * means locked pages are still in use.
8141 8120 */
8142 8121 if (svd->type == MAP_SHARED) {
8143 8122 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8144 8123 return (EAGAIN);
8145 8124 }
8146 8125 /*
8147 8126 * Since we do have the segvn writers lock
8148 8127 * nobody can fill the cache with entries
8149 8128 * belonging to this seg during the purge.
8150 8129 * The flush either succeeds or we still
8151 8130 * have pending I/Os. In the later case,
8152 8131 * madvise(3C) fails.
8153 8132 */
8154 8133 segvn_purge(seg);
8155 8134 if (svd->softlockcnt > 0) {
8156 8135 /*
8157 8136 * Since madvise(3C) is advisory and
8158 8137 * it's not part of UNIX98, madvise(3C)
8159 8138 * failure here doesn't cause any hardship.
8160 8139 * Note that we don't block in "as" layer.
8161 8140 */
8162 8141 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8163 8142 return (EAGAIN);
8164 8143 }
8165 8144 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8166 8145 svd->amp->a_softlockcnt > 0) {
8167 8146 /*
8168 8147 * Try to purge this amp's entries from pcache. It
8169 8148 * will succeed only if other segments that share the
8170 8149 * amp have no outstanding softlock's.
8171 8150 */
8172 8151 segvn_purge(seg);
8173 8152 }
8174 8153 }
8175 8154
8176 8155 amp = svd->amp;
8177 8156 vp = svd->vp;
8178 8157 if (behav == MADV_FREE) {
8179 8158 /*
8180 8159 * MADV_FREE is not supported for segments with
8181 8160 * underlying object; if anonmap is NULL, anon slots
8182 8161 * are not yet populated and there is nothing for
8183 8162 * us to do. As MADV_FREE is advisory, we don't
8184 8163 * return error in either case.
8185 8164 */
8186 8165 if (vp != NULL || amp == NULL) {
8187 8166 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8188 8167 return (0);
8189 8168 }
8190 8169
8191 8170 segvn_purge(seg);
8192 8171
8193 8172 page = seg_page(seg, addr);
8194 8173 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8195 8174 anon_disclaim(amp, svd->anon_index + page, len);
8196 8175 ANON_LOCK_EXIT(&->a_rwlock);
8197 8176 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8198 8177 return (0);
8199 8178 }
8200 8179
8201 8180 /*
8202 8181 * If advice is to be applied to entire segment,
8203 8182 * use advice field in seg_data structure
8204 8183 * otherwise use appropriate vpage entry.
8205 8184 */
8206 8185 if ((addr == seg->s_base) && (len == seg->s_size)) {
8207 8186 switch (behav) {
8208 8187 case MADV_ACCESS_LWP:
8209 8188 case MADV_ACCESS_MANY:
8210 8189 case MADV_ACCESS_DEFAULT:
8211 8190 /*
8212 8191 * Set memory allocation policy for this segment
8213 8192 */
8214 8193 policy = lgrp_madv_to_policy(behav, len, svd->type);
8215 8194 if (svd->type == MAP_SHARED)
8216 8195 already_set = lgrp_shm_policy_set(policy, amp,
8217 8196 svd->anon_index, vp, svd->offset, len);
8218 8197 else {
8219 8198 /*
8220 8199 * For private memory, need writers lock on
8221 8200 * address space because the segment may be
8222 8201 * split or concatenated when changing policy
8223 8202 */
8224 8203 if (AS_READ_HELD(seg->s_as,
8225 8204 &seg->s_as->a_lock)) {
8226 8205 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8227 8206 return (IE_RETRY);
8228 8207 }
8229 8208
8230 8209 already_set = lgrp_privm_policy_set(policy,
8231 8210 &svd->policy_info, len);
8232 8211 }
8233 8212
8234 8213 /*
8235 8214 * If policy set already and it shouldn't be reapplied,
8236 8215 * don't do anything.
8237 8216 */
8238 8217 if (already_set &&
8239 8218 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8240 8219 break;
8241 8220
8242 8221 /*
8243 8222 * Mark any existing pages in given range for
8244 8223 * migration
8245 8224 */
8246 8225 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8247 8226 vp, svd->offset, 1);
8248 8227
8249 8228 /*
8250 8229 * If same policy set already or this is a shared
8251 8230 * memory segment, don't need to try to concatenate
8252 8231 * segment with adjacent ones.
8253 8232 */
8254 8233 if (already_set || svd->type == MAP_SHARED)
8255 8234 break;
8256 8235
8257 8236 /*
8258 8237 * Try to concatenate this segment with previous
8259 8238 * one and next one, since we changed policy for
8260 8239 * this one and it may be compatible with adjacent
8261 8240 * ones now.
8262 8241 */
8263 8242 prev = AS_SEGPREV(seg->s_as, seg);
8264 8243 next = AS_SEGNEXT(seg->s_as, seg);
8265 8244
8266 8245 if (next && next->s_ops == &segvn_ops &&
8267 8246 addr + len == next->s_base)
8268 8247 (void) segvn_concat(seg, next, 1);
8269 8248
8270 8249 if (prev && prev->s_ops == &segvn_ops &&
8271 8250 addr == prev->s_base + prev->s_size) {
8272 8251 /*
8273 8252 * Drop lock for private data of current
8274 8253 * segment before concatenating (deleting) it
8275 8254 * and return IE_REATTACH to tell as_ctl() that
8276 8255 * current segment has changed
8277 8256 */
8278 8257 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8279 8258 if (!segvn_concat(prev, seg, 1))
8280 8259 err = IE_REATTACH;
8281 8260
8282 8261 return (err);
8283 8262 }
8284 8263 break;
8285 8264
8286 8265 case MADV_SEQUENTIAL:
8287 8266 /*
8288 8267 * unloading mapping guarantees
8289 8268 * detection in segvn_fault
8290 8269 */
8291 8270 ASSERT(seg->s_szc == 0);
8292 8271 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8293 8272 hat_unload(seg->s_as->a_hat, addr, len,
8294 8273 HAT_UNLOAD);
8295 8274 /* FALLTHROUGH */
8296 8275 case MADV_NORMAL:
8297 8276 case MADV_RANDOM:
8298 8277 svd->advice = (uchar_t)behav;
8299 8278 svd->pageadvice = 0;
8300 8279 break;
8301 8280 case MADV_WILLNEED: /* handled in memcntl */
8302 8281 case MADV_DONTNEED: /* handled in memcntl */
8303 8282 case MADV_FREE: /* handled above */
8304 8283 break;
8305 8284 default:
8306 8285 err = EINVAL;
8307 8286 }
8308 8287 } else {
8309 8288 caddr_t eaddr;
8310 8289 struct seg *new_seg;
8311 8290 struct segvn_data *new_svd;
8312 8291 u_offset_t off;
8313 8292 caddr_t oldeaddr;
8314 8293
8315 8294 page = seg_page(seg, addr);
8316 8295
8317 8296 segvn_vpage(seg);
8318 8297 if (svd->vpage == NULL) {
8319 8298 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8320 8299 return (ENOMEM);
8321 8300 }
8322 8301
8323 8302 switch (behav) {
8324 8303 struct vpage *bvpp, *evpp;
8325 8304
8326 8305 case MADV_ACCESS_LWP:
8327 8306 case MADV_ACCESS_MANY:
8328 8307 case MADV_ACCESS_DEFAULT:
8329 8308 /*
8330 8309 * Set memory allocation policy for portion of this
8331 8310 * segment
8332 8311 */
8333 8312
8334 8313 /*
8335 8314 * Align address and length of advice to page
8336 8315 * boundaries for large pages
8337 8316 */
8338 8317 if (seg->s_szc != 0) {
8339 8318 size_t pgsz;
8340 8319
8341 8320 pgsz = page_get_pagesize(seg->s_szc);
8342 8321 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8343 8322 len = P2ROUNDUP(len, pgsz);
8344 8323 }
8345 8324
8346 8325 /*
8347 8326 * Check to see whether policy is set already
8348 8327 */
8349 8328 policy = lgrp_madv_to_policy(behav, len, svd->type);
8350 8329
8351 8330 anon_index = svd->anon_index + page;
8352 8331 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8353 8332
8354 8333 if (svd->type == MAP_SHARED)
8355 8334 already_set = lgrp_shm_policy_set(policy, amp,
8356 8335 anon_index, vp, off, len);
8357 8336 else
8358 8337 already_set =
8359 8338 (policy == svd->policy_info.mem_policy);
8360 8339
8361 8340 /*
8362 8341 * If policy set already and it shouldn't be reapplied,
8363 8342 * don't do anything.
8364 8343 */
8365 8344 if (already_set &&
8366 8345 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8367 8346 break;
8368 8347
8369 8348 /*
8370 8349 * For private memory, need writers lock on
8371 8350 * address space because the segment may be
8372 8351 * split or concatenated when changing policy
8373 8352 */
8374 8353 if (svd->type == MAP_PRIVATE &&
8375 8354 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8376 8355 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8377 8356 return (IE_RETRY);
8378 8357 }
8379 8358
8380 8359 /*
8381 8360 * Mark any existing pages in given range for
8382 8361 * migration
8383 8362 */
8384 8363 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8385 8364 vp, svd->offset, 1);
8386 8365
8387 8366 /*
8388 8367 * Don't need to try to split or concatenate
8389 8368 * segments, since policy is same or this is a shared
8390 8369 * memory segment
8391 8370 */
8392 8371 if (already_set || svd->type == MAP_SHARED)
8393 8372 break;
8394 8373
8395 8374 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8396 8375 ASSERT(svd->amp == NULL);
8397 8376 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8398 8377 ASSERT(svd->softlockcnt == 0);
8399 8378 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8400 8379 HAT_REGION_TEXT);
8401 8380 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8402 8381 }
8403 8382
8404 8383 /*
8405 8384 * Split off new segment if advice only applies to a
8406 8385 * portion of existing segment starting in middle
8407 8386 */
8408 8387 new_seg = NULL;
8409 8388 eaddr = addr + len;
8410 8389 oldeaddr = seg->s_base + seg->s_size;
8411 8390 if (addr > seg->s_base) {
8412 8391 /*
8413 8392 * Must flush I/O page cache
8414 8393 * before splitting segment
8415 8394 */
8416 8395 if (svd->softlockcnt > 0)
8417 8396 segvn_purge(seg);
8418 8397
8419 8398 /*
8420 8399 * Split segment and return IE_REATTACH to tell
8421 8400 * as_ctl() that current segment changed
8422 8401 */
8423 8402 new_seg = segvn_split_seg(seg, addr);
8424 8403 new_svd = (struct segvn_data *)new_seg->s_data;
8425 8404 err = IE_REATTACH;
8426 8405
8427 8406 /*
8428 8407 * If new segment ends where old one
8429 8408 * did, try to concatenate the new
8430 8409 * segment with next one.
8431 8410 */
8432 8411 if (eaddr == oldeaddr) {
8433 8412 /*
8434 8413 * Set policy for new segment
8435 8414 */
8436 8415 (void) lgrp_privm_policy_set(policy,
8437 8416 &new_svd->policy_info,
8438 8417 new_seg->s_size);
8439 8418
8440 8419 next = AS_SEGNEXT(new_seg->s_as,
8441 8420 new_seg);
8442 8421
8443 8422 if (next &&
8444 8423 next->s_ops == &segvn_ops &&
8445 8424 eaddr == next->s_base)
8446 8425 (void) segvn_concat(new_seg,
8447 8426 next, 1);
8448 8427 }
8449 8428 }
8450 8429
8451 8430 /*
8452 8431 * Split off end of existing segment if advice only
8453 8432 * applies to a portion of segment ending before
8454 8433 * end of the existing segment
8455 8434 */
8456 8435 if (eaddr < oldeaddr) {
8457 8436 /*
8458 8437 * Must flush I/O page cache
8459 8438 * before splitting segment
8460 8439 */
8461 8440 if (svd->softlockcnt > 0)
8462 8441 segvn_purge(seg);
8463 8442
8464 8443 /*
8465 8444 * If beginning of old segment was already
8466 8445 * split off, use new segment to split end off
8467 8446 * from.
8468 8447 */
8469 8448 if (new_seg != NULL && new_seg != seg) {
8470 8449 /*
8471 8450 * Split segment
8472 8451 */
8473 8452 (void) segvn_split_seg(new_seg, eaddr);
8474 8453
8475 8454 /*
8476 8455 * Set policy for new segment
8477 8456 */
8478 8457 (void) lgrp_privm_policy_set(policy,
8479 8458 &new_svd->policy_info,
8480 8459 new_seg->s_size);
8481 8460 } else {
8482 8461 /*
8483 8462 * Split segment and return IE_REATTACH
8484 8463 * to tell as_ctl() that current
8485 8464 * segment changed
8486 8465 */
8487 8466 (void) segvn_split_seg(seg, eaddr);
8488 8467 err = IE_REATTACH;
8489 8468
8490 8469 (void) lgrp_privm_policy_set(policy,
8491 8470 &svd->policy_info, seg->s_size);
8492 8471
8493 8472 /*
8494 8473 * If new segment starts where old one
8495 8474 * did, try to concatenate it with
8496 8475 * previous segment.
8497 8476 */
8498 8477 if (addr == seg->s_base) {
8499 8478 prev = AS_SEGPREV(seg->s_as,
8500 8479 seg);
8501 8480
8502 8481 /*
8503 8482 * Drop lock for private data
8504 8483 * of current segment before
8505 8484 * concatenating (deleting) it
8506 8485 */
8507 8486 if (prev &&
8508 8487 prev->s_ops ==
8509 8488 &segvn_ops &&
8510 8489 addr == prev->s_base +
8511 8490 prev->s_size) {
8512 8491 SEGVN_LOCK_EXIT(
8513 8492 seg->s_as,
8514 8493 &svd->lock);
8515 8494 (void) segvn_concat(
8516 8495 prev, seg, 1);
8517 8496 return (err);
8518 8497 }
8519 8498 }
8520 8499 }
8521 8500 }
8522 8501 break;
8523 8502 case MADV_SEQUENTIAL:
8524 8503 ASSERT(seg->s_szc == 0);
8525 8504 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8526 8505 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8527 8506 /* FALLTHROUGH */
8528 8507 case MADV_NORMAL:
8529 8508 case MADV_RANDOM:
8530 8509 bvpp = &svd->vpage[page];
8531 8510 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8532 8511 for (; bvpp < evpp; bvpp++)
8533 8512 VPP_SETADVICE(bvpp, behav);
8534 8513 svd->advice = MADV_NORMAL;
8535 8514 break;
8536 8515 case MADV_WILLNEED: /* handled in memcntl */
8537 8516 case MADV_DONTNEED: /* handled in memcntl */
8538 8517 case MADV_FREE: /* handled above */
8539 8518 break;
8540 8519 default:
8541 8520 err = EINVAL;
8542 8521 }
8543 8522 }
8544 8523 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8545 8524 return (err);
8546 8525 }
8547 8526
8548 8527 /*
8549 8528 * There is one kind of inheritance that can be specified for pages:
8550 8529 *
8551 8530 * SEGP_INH_ZERO - Pages should be zeroed in the child
8552 8531 */
8553 8532 static int
8554 8533 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8555 8534 {
8556 8535 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8557 8536 struct vpage *bvpp, *evpp;
8558 8537 size_t page;
8559 8538 int ret = 0;
8560 8539
8561 8540 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8562 8541
8563 8542 /* Can't support something we don't know about */
8564 8543 if (behav != SEGP_INH_ZERO)
8565 8544 return (ENOTSUP);
8566 8545
8567 8546 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8568 8547
8569 8548 /*
8570 8549 * This must be a straightforward anonymous segment that is mapped
8571 8550 * privately and is not backed by a vnode.
8572 8551 */
8573 8552 if (svd->tr_state != SEGVN_TR_OFF ||
8574 8553 svd->type != MAP_PRIVATE ||
8575 8554 svd->vp != NULL) {
8576 8555 ret = EINVAL;
8577 8556 goto out;
8578 8557 }
8579 8558
8580 8559 /*
8581 8560 * If the entire segment has been marked as inherit zero, then no reason
8582 8561 * to do anything else.
8583 8562 */
8584 8563 if (svd->svn_inz == SEGVN_INZ_ALL) {
8585 8564 ret = 0;
8586 8565 goto out;
8587 8566 }
8588 8567
8589 8568 /*
8590 8569 * If this applies to the entire segment, simply mark it and we're done.
8591 8570 */
8592 8571 if ((addr == seg->s_base) && (len == seg->s_size)) {
8593 8572 svd->svn_inz = SEGVN_INZ_ALL;
8594 8573 ret = 0;
8595 8574 goto out;
8596 8575 }
8597 8576
8598 8577 /*
8599 8578 * We've been asked to mark a subset of this segment as inherit zero,
8600 8579 * therefore we need to mainpulate its vpages.
8601 8580 */
8602 8581 if (svd->vpage == NULL) {
8603 8582 segvn_vpage(seg);
8604 8583 if (svd->vpage == NULL) {
8605 8584 ret = ENOMEM;
8606 8585 goto out;
8607 8586 }
8608 8587 }
8609 8588
8610 8589 svd->svn_inz = SEGVN_INZ_VPP;
8611 8590 page = seg_page(seg, addr);
8612 8591 bvpp = &svd->vpage[page];
8613 8592 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8614 8593 for (; bvpp < evpp; bvpp++)
8615 8594 VPP_SETINHZERO(bvpp);
8616 8595 ret = 0;
8617 8596
8618 8597 out:
8619 8598 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8620 8599 return (ret);
8621 8600 }
8622 8601
8623 8602 /*
8624 8603 * Create a vpage structure for this seg.
8625 8604 */
8626 8605 static void
8627 8606 segvn_vpage(struct seg *seg)
8628 8607 {
8629 8608 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8630 8609 struct vpage *vp, *evp;
8631 8610 static pgcnt_t page_limit = 0;
8632 8611
8633 8612 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8634 8613
8635 8614 /*
8636 8615 * If no vpage structure exists, allocate one. Copy the protections
8637 8616 * and the advice from the segment itself to the individual pages.
8638 8617 */
8639 8618 if (svd->vpage == NULL) {
8640 8619 /*
8641 8620 * Start by calculating the number of pages we must allocate to
8642 8621 * track the per-page vpage structs needs for this entire
8643 8622 * segment. If we know now that it will require more than our
8644 8623 * heuristic for the maximum amount of kmem we can consume then
8645 8624 * fail. We do this here, instead of trying to detect this deep
8646 8625 * in page_resv and propagating the error up, since the entire
8647 8626 * memory allocation stack is not amenable to passing this
8648 8627 * back. Instead, it wants to keep trying.
8649 8628 *
8650 8629 * As a heuristic we set a page limit of 5/8s of total_pages
8651 8630 * for this allocation. We use shifts so that no floating
8652 8631 * point conversion takes place and only need to do the
8653 8632 * calculation once.
8654 8633 */
8655 8634 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage);
8656 8635 pgcnt_t npages = mem_needed >> PAGESHIFT;
8657 8636
8658 8637 if (page_limit == 0)
8659 8638 page_limit = (total_pages >> 1) + (total_pages >> 3);
8660 8639
8661 8640 if (npages > page_limit)
8662 8641 return;
8663 8642
8664 8643 svd->pageadvice = 1;
8665 8644 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8666 8645 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8667 8646 for (vp = svd->vpage; vp < evp; vp++) {
8668 8647 VPP_SETPROT(vp, svd->prot);
8669 8648 VPP_SETADVICE(vp, svd->advice);
8670 8649 }
8671 8650 }
8672 8651 }
8673 8652
8674 8653 /*
8675 8654 * Dump the pages belonging to this segvn segment.
8676 8655 */
8677 8656 static void
8678 8657 segvn_dump(struct seg *seg)
8679 8658 {
8680 8659 struct segvn_data *svd;
8681 8660 page_t *pp;
8682 8661 struct anon_map *amp;
8683 8662 ulong_t anon_index;
8684 8663 struct vnode *vp;
8685 8664 u_offset_t off, offset;
8686 8665 pfn_t pfn;
8687 8666 pgcnt_t page, npages;
8688 8667 caddr_t addr;
8689 8668
8690 8669 npages = seg_pages(seg);
8691 8670 svd = (struct segvn_data *)seg->s_data;
8692 8671 vp = svd->vp;
8693 8672 off = offset = svd->offset;
8694 8673 addr = seg->s_base;
8695 8674
8696 8675 if ((amp = svd->amp) != NULL) {
8697 8676 anon_index = svd->anon_index;
8698 8677 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8699 8678 }
8700 8679
8701 8680 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8702 8681 struct anon *ap;
8703 8682 int we_own_it = 0;
8704 8683
8705 8684 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8706 8685 swap_xlate_nopanic(ap, &vp, &off);
8707 8686 } else {
8708 8687 vp = svd->vp;
8709 8688 off = offset;
8710 8689 }
8711 8690
8712 8691 /*
8713 8692 * If pp == NULL, the page either does not exist
8714 8693 * or is exclusively locked. So determine if it
8715 8694 * exists before searching for it.
8716 8695 */
8717 8696
8718 8697 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8719 8698 we_own_it = 1;
8720 8699 else
8721 8700 pp = page_exists(vp, off);
8722 8701
8723 8702 if (pp) {
8724 8703 pfn = page_pptonum(pp);
8725 8704 dump_addpage(seg->s_as, addr, pfn);
8726 8705 if (we_own_it)
8727 8706 page_unlock(pp);
8728 8707 }
8729 8708 addr += PAGESIZE;
8730 8709 dump_timeleft = dump_timeout;
8731 8710 }
8732 8711
8733 8712 if (amp != NULL)
8734 8713 ANON_LOCK_EXIT(&->a_rwlock);
8735 8714 }
8736 8715
8737 8716 #ifdef DEBUG
8738 8717 static uint32_t segvn_pglock_mtbf = 0;
8739 8718 #endif
8740 8719
8741 8720 #define PCACHE_SHWLIST ((page_t *)-2)
8742 8721 #define NOPCACHE_SHWLIST ((page_t *)-1)
8743 8722
8744 8723 /*
8745 8724 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8746 8725 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8747 8726 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8748 8727 * the same parts of the segment. Currently shadow list creation is only
8749 8728 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8750 8729 * tagged with segment pointer, starting virtual address and length. This
8751 8730 * approach for MAP_SHARED segments may add many pcache entries for the same
8752 8731 * set of pages and lead to long hash chains that decrease pcache lookup
8753 8732 * performance. To avoid this issue for shared segments shared anon map and
8754 8733 * starting anon index are used for pcache entry tagging. This allows all
8755 8734 * segments to share pcache entries for the same anon range and reduces pcache
8756 8735 * chain's length as well as memory overhead from duplicate shadow lists and
8757 8736 * pcache entries.
8758 8737 *
8759 8738 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8760 8739 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8761 8740 * part of softlockcnt accounting is done differently for private and shared
8762 8741 * segments. In private segment case softlock is only incremented when a new
8763 8742 * shadow list is created but not when an existing one is found via
8764 8743 * seg_plookup(). pcache entries have reference count incremented/decremented
8765 8744 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8766 8745 * reference count can be purged (and purging is needed before segment can be
8767 8746 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8768 8747 * decrement softlockcnt. Since in private segment case each of its pcache
8769 8748 * entries only belongs to this segment we can expect that when
8770 8749 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8771 8750 * segment purge will succeed and softlockcnt will drop to 0. In shared
8772 8751 * segment case reference count in pcache entry counts active locks from many
8773 8752 * different segments so we can't expect segment purging to succeed even when
8774 8753 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8775 8754 * segment. To be able to determine when there're no pending pagelocks in
8776 8755 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8777 8756 * but instead softlockcnt is incremented and decremented for every
8778 8757 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8779 8758 * list was created or an existing one was found. When softlockcnt drops to 0
8780 8759 * this segment no longer has any claims for pcached shadow lists and the
8781 8760 * segment can be freed even if there're still active pcache entries
8782 8761 * shared by this segment anon map. Shared segment pcache entries belong to
8783 8762 * anon map and are typically removed when anon map is freed after all
8784 8763 * processes destroy the segments that use this anon map.
8785 8764 */
8786 8765 static int
8787 8766 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8788 8767 enum lock_type type, enum seg_rw rw)
8789 8768 {
8790 8769 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8791 8770 size_t np;
8792 8771 pgcnt_t adjustpages;
8793 8772 pgcnt_t npages;
8794 8773 ulong_t anon_index;
8795 8774 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8796 8775 uint_t error;
8797 8776 struct anon_map *amp;
8798 8777 pgcnt_t anpgcnt;
8799 8778 struct page **pplist, **pl, *pp;
8800 8779 caddr_t a;
8801 8780 size_t page;
8802 8781 caddr_t lpgaddr, lpgeaddr;
8803 8782 anon_sync_obj_t cookie;
8804 8783 int anlock;
8805 8784 struct anon_map *pamp;
8806 8785 caddr_t paddr;
8807 8786 seg_preclaim_cbfunc_t preclaim_callback;
8808 8787 size_t pgsz;
8809 8788 int use_pcache;
8810 8789 size_t wlen;
8811 8790 uint_t pflags = 0;
8812 8791 int sftlck_sbase = 0;
8813 8792 int sftlck_send = 0;
8814 8793
8815 8794 #ifdef DEBUG
8816 8795 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8817 8796 hrtime_t ts = gethrtime();
8818 8797 if ((ts % segvn_pglock_mtbf) == 0) {
8819 8798 return (ENOTSUP);
8820 8799 }
8821 8800 if ((ts % segvn_pglock_mtbf) == 1) {
8822 8801 return (EFAULT);
8823 8802 }
8824 8803 }
8825 8804 #endif
8826 8805
8827 8806 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8828 8807 "segvn_pagelock: start seg %p addr %p", seg, addr);
8829 8808
8830 8809 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8831 8810 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8832 8811
8833 8812 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8834 8813
8835 8814 /*
8836 8815 * for now we only support pagelock to anon memory. We would have to
8837 8816 * check protections for vnode objects and call into the vnode driver.
8838 8817 * That's too much for a fast path. Let the fault entry point handle
8839 8818 * it.
8840 8819 */
8841 8820 if (svd->vp != NULL) {
8842 8821 if (type == L_PAGELOCK) {
8843 8822 error = ENOTSUP;
8844 8823 goto out;
8845 8824 }
8846 8825 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8847 8826 }
8848 8827 if ((amp = svd->amp) == NULL) {
8849 8828 if (type == L_PAGELOCK) {
8850 8829 error = EFAULT;
8851 8830 goto out;
8852 8831 }
8853 8832 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8854 8833 }
8855 8834 if (rw != S_READ && rw != S_WRITE) {
8856 8835 if (type == L_PAGELOCK) {
8857 8836 error = ENOTSUP;
8858 8837 goto out;
8859 8838 }
8860 8839 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8861 8840 }
8862 8841
8863 8842 if (seg->s_szc != 0) {
8864 8843 /*
8865 8844 * We are adjusting the pagelock region to the large page size
8866 8845 * boundary because the unlocked part of a large page cannot
8867 8846 * be freed anyway unless all constituent pages of a large
8868 8847 * page are locked. Bigger regions reduce pcache chain length
8869 8848 * and improve lookup performance. The tradeoff is that the
8870 8849 * very first segvn_pagelock() call for a given page is more
8871 8850 * expensive if only 1 page_t is needed for IO. This is only
8872 8851 * an issue if pcache entry doesn't get reused by several
8873 8852 * subsequent calls. We optimize here for the case when pcache
8874 8853 * is heavily used by repeated IOs to the same address range.
8875 8854 *
8876 8855 * Note segment's page size cannot change while we are holding
8877 8856 * as lock. And then it cannot change while softlockcnt is
8878 8857 * not 0. This will allow us to correctly recalculate large
8879 8858 * page size region for the matching pageunlock/reclaim call
8880 8859 * since as_pageunlock() caller must always match
8881 8860 * as_pagelock() call's addr and len.
8882 8861 *
8883 8862 * For pageunlock *ppp points to the pointer of page_t that
8884 8863 * corresponds to the real unadjusted start address. Similar
8885 8864 * for pagelock *ppp must point to the pointer of page_t that
8886 8865 * corresponds to the real unadjusted start address.
8887 8866 */
8888 8867 pgsz = page_get_pagesize(seg->s_szc);
8889 8868 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8890 8869 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8891 8870 } else if (len < segvn_pglock_comb_thrshld) {
8892 8871 lpgaddr = addr;
8893 8872 lpgeaddr = addr + len;
8894 8873 adjustpages = 0;
8895 8874 pgsz = PAGESIZE;
8896 8875 } else {
8897 8876 /*
8898 8877 * Align the address range of large enough requests to allow
8899 8878 * combining of different shadow lists into 1 to reduce memory
8900 8879 * overhead from potentially overlapping large shadow lists
8901 8880 * (worst case is we have a 1MB IO into buffers with start
8902 8881 * addresses separated by 4K). Alignment is only possible if
8903 8882 * padded chunks have sufficient access permissions. Note
8904 8883 * permissions won't change between L_PAGELOCK and
8905 8884 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8906 8885 * segvn_setprot() to wait until softlockcnt drops to 0. This
8907 8886 * allows us to determine in L_PAGEUNLOCK the same range we
8908 8887 * computed in L_PAGELOCK.
8909 8888 *
8910 8889 * If alignment is limited by segment ends set
8911 8890 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8912 8891 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8913 8892 * per segment counters. In L_PAGEUNLOCK case decrease
8914 8893 * softlockcnt_sbase/softlockcnt_send counters if
8915 8894 * sftlck_sbase/sftlck_send flags are set. When
8916 8895 * softlockcnt_sbase/softlockcnt_send are non 0
8917 8896 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8918 8897 * won't merge the segments. This restriction combined with
8919 8898 * restriction on segment unmapping and splitting for segments
8920 8899 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8921 8900 * correctly determine the same range that was previously
8922 8901 * locked by matching L_PAGELOCK.
8923 8902 */
8924 8903 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8925 8904 pgsz = PAGESIZE;
8926 8905 if (svd->type == MAP_PRIVATE) {
8927 8906 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8928 8907 segvn_pglock_comb_balign);
8929 8908 if (lpgaddr < seg->s_base) {
8930 8909 lpgaddr = seg->s_base;
8931 8910 sftlck_sbase = 1;
8932 8911 }
8933 8912 } else {
8934 8913 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8935 8914 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8936 8915 if (aaix < svd->anon_index) {
8937 8916 lpgaddr = seg->s_base;
8938 8917 sftlck_sbase = 1;
8939 8918 } else {
8940 8919 lpgaddr = addr - ptob(aix - aaix);
8941 8920 ASSERT(lpgaddr >= seg->s_base);
8942 8921 }
8943 8922 }
8944 8923 if (svd->pageprot && lpgaddr != addr) {
8945 8924 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8946 8925 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8947 8926 while (vp < evp) {
8948 8927 if ((VPP_PROT(vp) & protchk) == 0) {
8949 8928 break;
8950 8929 }
8951 8930 vp++;
8952 8931 }
8953 8932 if (vp < evp) {
8954 8933 lpgaddr = addr;
8955 8934 pflags = 0;
8956 8935 }
8957 8936 }
8958 8937 lpgeaddr = addr + len;
8959 8938 if (pflags) {
8960 8939 if (svd->type == MAP_PRIVATE) {
8961 8940 lpgeaddr = (caddr_t)P2ROUNDUP(
8962 8941 (uintptr_t)lpgeaddr,
8963 8942 segvn_pglock_comb_balign);
8964 8943 } else {
8965 8944 ulong_t aix = svd->anon_index +
8966 8945 seg_page(seg, lpgeaddr);
8967 8946 ulong_t aaix = P2ROUNDUP(aix,
8968 8947 segvn_pglock_comb_palign);
8969 8948 if (aaix < aix) {
8970 8949 lpgeaddr = 0;
8971 8950 } else {
8972 8951 lpgeaddr += ptob(aaix - aix);
8973 8952 }
8974 8953 }
8975 8954 if (lpgeaddr == 0 ||
8976 8955 lpgeaddr > seg->s_base + seg->s_size) {
8977 8956 lpgeaddr = seg->s_base + seg->s_size;
8978 8957 sftlck_send = 1;
8979 8958 }
8980 8959 }
8981 8960 if (svd->pageprot && lpgeaddr != addr + len) {
8982 8961 struct vpage *vp;
8983 8962 struct vpage *evp;
8984 8963
8985 8964 vp = &svd->vpage[seg_page(seg, addr + len)];
8986 8965 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8987 8966
8988 8967 while (vp < evp) {
8989 8968 if ((VPP_PROT(vp) & protchk) == 0) {
8990 8969 break;
8991 8970 }
8992 8971 vp++;
8993 8972 }
8994 8973 if (vp < evp) {
8995 8974 lpgeaddr = addr + len;
8996 8975 }
8997 8976 }
8998 8977 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8999 8978 }
9000 8979
9001 8980 /*
9002 8981 * For MAP_SHARED segments we create pcache entries tagged by amp and
9003 8982 * anon index so that we can share pcache entries with other segments
9004 8983 * that map this amp. For private segments pcache entries are tagged
9005 8984 * with segment and virtual address.
9006 8985 */
9007 8986 if (svd->type == MAP_SHARED) {
9008 8987 pamp = amp;
9009 8988 paddr = (caddr_t)((lpgaddr - seg->s_base) +
9010 8989 ptob(svd->anon_index));
9011 8990 preclaim_callback = shamp_reclaim;
9012 8991 } else {
9013 8992 pamp = NULL;
9014 8993 paddr = lpgaddr;
9015 8994 preclaim_callback = segvn_reclaim;
9016 8995 }
9017 8996
9018 8997 if (type == L_PAGEUNLOCK) {
9019 8998 VM_STAT_ADD(segvnvmstats.pagelock[0]);
9020 8999
9021 9000 /*
9022 9001 * update hat ref bits for /proc. We need to make sure
9023 9002 * that threads tracing the ref and mod bits of the
9024 9003 * address space get the right data.
9025 9004 * Note: page ref and mod bits are updated at reclaim time
9026 9005 */
9027 9006 if (seg->s_as->a_vbits) {
9028 9007 for (a = addr; a < addr + len; a += PAGESIZE) {
9029 9008 if (rw == S_WRITE) {
9030 9009 hat_setstat(seg->s_as, a,
9031 9010 PAGESIZE, P_REF | P_MOD);
9032 9011 } else {
9033 9012 hat_setstat(seg->s_as, a,
9034 9013 PAGESIZE, P_REF);
9035 9014 }
9036 9015 }
9037 9016 }
9038 9017
9039 9018 /*
9040 9019 * Check the shadow list entry after the last page used in
9041 9020 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
9042 9021 * was not inserted into pcache and is not large page
9043 9022 * adjusted. In this case call reclaim callback directly and
9044 9023 * don't adjust the shadow list start and size for large
9045 9024 * pages.
9046 9025 */
9047 9026 npages = btop(len);
9048 9027 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
9049 9028 void *ptag;
9050 9029 if (pamp != NULL) {
9051 9030 ASSERT(svd->type == MAP_SHARED);
9052 9031 ptag = (void *)pamp;
9053 9032 paddr = (caddr_t)((addr - seg->s_base) +
9054 9033 ptob(svd->anon_index));
9055 9034 } else {
9056 9035 ptag = (void *)seg;
9057 9036 paddr = addr;
9058 9037 }
9059 9038 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
9060 9039 } else {
9061 9040 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
9062 9041 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
9063 9042 len = lpgeaddr - lpgaddr;
9064 9043 npages = btop(len);
9065 9044 seg_pinactive(seg, pamp, paddr, len,
9066 9045 *ppp - adjustpages, rw, pflags, preclaim_callback);
9067 9046 }
9068 9047
9069 9048 if (pamp != NULL) {
9070 9049 ASSERT(svd->type == MAP_SHARED);
9071 9050 ASSERT(svd->softlockcnt >= npages);
9072 9051 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
9073 9052 }
9074 9053
9075 9054 if (sftlck_sbase) {
9076 9055 ASSERT(svd->softlockcnt_sbase > 0);
9077 9056 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
9078 9057 }
9079 9058 if (sftlck_send) {
9080 9059 ASSERT(svd->softlockcnt_send > 0);
9081 9060 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
9082 9061 }
9083 9062
9084 9063 /*
9085 9064 * If someone is blocked while unmapping, we purge
9086 9065 * segment page cache and thus reclaim pplist synchronously
9087 9066 * without waiting for seg_pasync_thread. This speeds up
9088 9067 * unmapping in cases where munmap(2) is called, while
9089 9068 * raw async i/o is still in progress or where a thread
9090 9069 * exits on data fault in a multithreaded application.
9091 9070 */
9092 9071 if (AS_ISUNMAPWAIT(seg->s_as)) {
9093 9072 if (svd->softlockcnt == 0) {
9094 9073 mutex_enter(&seg->s_as->a_contents);
9095 9074 if (AS_ISUNMAPWAIT(seg->s_as)) {
9096 9075 AS_CLRUNMAPWAIT(seg->s_as);
9097 9076 cv_broadcast(&seg->s_as->a_cv);
9098 9077 }
9099 9078 mutex_exit(&seg->s_as->a_contents);
9100 9079 } else if (pamp == NULL) {
9101 9080 /*
9102 9081 * softlockcnt is not 0 and this is a
9103 9082 * MAP_PRIVATE segment. Try to purge its
9104 9083 * pcache entries to reduce softlockcnt.
9105 9084 * If it drops to 0 segvn_reclaim()
9106 9085 * will wake up a thread waiting on
9107 9086 * unmapwait flag.
9108 9087 *
9109 9088 * We don't purge MAP_SHARED segments with non
9110 9089 * 0 softlockcnt since IO is still in progress
9111 9090 * for such segments.
9112 9091 */
9113 9092 ASSERT(svd->type == MAP_PRIVATE);
9114 9093 segvn_purge(seg);
9115 9094 }
9116 9095 }
9117 9096 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9118 9097 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
9119 9098 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
9120 9099 return (0);
9121 9100 }
9122 9101
9123 9102 /* The L_PAGELOCK case ... */
9124 9103
9125 9104 VM_STAT_ADD(segvnvmstats.pagelock[1]);
9126 9105
9127 9106 /*
9128 9107 * For MAP_SHARED segments we have to check protections before
9129 9108 * seg_plookup() since pcache entries may be shared by many segments
9130 9109 * with potentially different page protections.
9131 9110 */
9132 9111 if (pamp != NULL) {
9133 9112 ASSERT(svd->type == MAP_SHARED);
9134 9113 if (svd->pageprot == 0) {
9135 9114 if ((svd->prot & protchk) == 0) {
9136 9115 error = EACCES;
9137 9116 goto out;
9138 9117 }
9139 9118 } else {
9140 9119 /*
9141 9120 * check page protections
9142 9121 */
9143 9122 caddr_t ea;
9144 9123
9145 9124 if (seg->s_szc) {
9146 9125 a = lpgaddr;
9147 9126 ea = lpgeaddr;
9148 9127 } else {
9149 9128 a = addr;
9150 9129 ea = addr + len;
9151 9130 }
9152 9131 for (; a < ea; a += pgsz) {
9153 9132 struct vpage *vp;
9154 9133
9155 9134 ASSERT(seg->s_szc == 0 ||
9156 9135 sameprot(seg, a, pgsz));
9157 9136 vp = &svd->vpage[seg_page(seg, a)];
9158 9137 if ((VPP_PROT(vp) & protchk) == 0) {
9159 9138 error = EACCES;
9160 9139 goto out;
9161 9140 }
9162 9141 }
9163 9142 }
9164 9143 }
9165 9144
9166 9145 /*
9167 9146 * try to find pages in segment page cache
9168 9147 */
9169 9148 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
9170 9149 if (pplist != NULL) {
9171 9150 if (pamp != NULL) {
9172 9151 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
9173 9152 ASSERT(svd->type == MAP_SHARED);
9174 9153 atomic_add_long((ulong_t *)&svd->softlockcnt,
9175 9154 npages);
9176 9155 }
9177 9156 if (sftlck_sbase) {
9178 9157 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9179 9158 }
9180 9159 if (sftlck_send) {
9181 9160 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9182 9161 }
9183 9162 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9184 9163 *ppp = pplist + adjustpages;
9185 9164 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9186 9165 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9187 9166 return (0);
9188 9167 }
9189 9168
9190 9169 /*
9191 9170 * For MAP_SHARED segments we already verified above that segment
9192 9171 * protections allow this pagelock operation.
9193 9172 */
9194 9173 if (pamp == NULL) {
9195 9174 ASSERT(svd->type == MAP_PRIVATE);
9196 9175 if (svd->pageprot == 0) {
9197 9176 if ((svd->prot & protchk) == 0) {
9198 9177 error = EACCES;
9199 9178 goto out;
9200 9179 }
9201 9180 if (svd->prot & PROT_WRITE) {
9202 9181 wlen = lpgeaddr - lpgaddr;
9203 9182 } else {
9204 9183 wlen = 0;
9205 9184 ASSERT(rw == S_READ);
9206 9185 }
9207 9186 } else {
9208 9187 int wcont = 1;
9209 9188 /*
9210 9189 * check page protections
9211 9190 */
9212 9191 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9213 9192 struct vpage *vp;
9214 9193
9215 9194 ASSERT(seg->s_szc == 0 ||
9216 9195 sameprot(seg, a, pgsz));
9217 9196 vp = &svd->vpage[seg_page(seg, a)];
9218 9197 if ((VPP_PROT(vp) & protchk) == 0) {
9219 9198 error = EACCES;
9220 9199 goto out;
9221 9200 }
9222 9201 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9223 9202 wlen += pgsz;
9224 9203 } else {
9225 9204 wcont = 0;
9226 9205 ASSERT(rw == S_READ);
9227 9206 }
9228 9207 }
9229 9208 }
9230 9209 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9231 9210 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9232 9211 }
9233 9212
9234 9213 /*
9235 9214 * Only build large page adjusted shadow list if we expect to insert
9236 9215 * it into pcache. For large enough pages it's a big overhead to
9237 9216 * create a shadow list of the entire large page. But this overhead
9238 9217 * should be amortized over repeated pcache hits on subsequent reuse
9239 9218 * of this shadow list (IO into any range within this shadow list will
9240 9219 * find it in pcache since we large page align the request for pcache
9241 9220 * lookups). pcache performance is improved with bigger shadow lists
9242 9221 * as it reduces the time to pcache the entire big segment and reduces
9243 9222 * pcache chain length.
9244 9223 */
9245 9224 if (seg_pinsert_check(seg, pamp, paddr,
9246 9225 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9247 9226 addr = lpgaddr;
9248 9227 len = lpgeaddr - lpgaddr;
9249 9228 use_pcache = 1;
9250 9229 } else {
9251 9230 use_pcache = 0;
9252 9231 /*
9253 9232 * Since this entry will not be inserted into the pcache, we
9254 9233 * will not do any adjustments to the starting address or
9255 9234 * size of the memory to be locked.
9256 9235 */
9257 9236 adjustpages = 0;
9258 9237 }
9259 9238 npages = btop(len);
9260 9239
9261 9240 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9262 9241 pl = pplist;
9263 9242 *ppp = pplist + adjustpages;
9264 9243 /*
9265 9244 * If use_pcache is 0 this shadow list is not large page adjusted.
9266 9245 * Record this info in the last entry of shadow array so that
9267 9246 * L_PAGEUNLOCK can determine if it should large page adjust the
9268 9247 * address range to find the real range that was locked.
9269 9248 */
9270 9249 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9271 9250
9272 9251 page = seg_page(seg, addr);
9273 9252 anon_index = svd->anon_index + page;
9274 9253
9275 9254 anlock = 0;
9276 9255 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9277 9256 ASSERT(amp->a_szc >= seg->s_szc);
9278 9257 anpgcnt = page_get_pagecnt(amp->a_szc);
9279 9258 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9280 9259 struct anon *ap;
9281 9260 struct vnode *vp;
9282 9261 u_offset_t off;
9283 9262
9284 9263 /*
9285 9264 * Lock and unlock anon array only once per large page.
9286 9265 * anon_array_enter() locks the root anon slot according to
9287 9266 * a_szc which can't change while anon map is locked. We lock
9288 9267 * anon the first time through this loop and each time we
9289 9268 * reach anon index that corresponds to a root of a large
9290 9269 * page.
9291 9270 */
9292 9271 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9293 9272 ASSERT(anlock == 0);
9294 9273 anon_array_enter(amp, anon_index, &cookie);
9295 9274 anlock = 1;
9296 9275 }
9297 9276 ap = anon_get_ptr(amp->ahp, anon_index);
9298 9277
9299 9278 /*
9300 9279 * We must never use seg_pcache for COW pages
9301 9280 * because we might end up with original page still
9302 9281 * lying in seg_pcache even after private page is
9303 9282 * created. This leads to data corruption as
9304 9283 * aio_write refers to the page still in cache
9305 9284 * while all other accesses refer to the private
9306 9285 * page.
9307 9286 */
9308 9287 if (ap == NULL || ap->an_refcnt != 1) {
9309 9288 struct vpage *vpage;
9310 9289
9311 9290 if (seg->s_szc) {
9312 9291 error = EFAULT;
9313 9292 break;
9314 9293 }
9315 9294 if (svd->vpage != NULL) {
9316 9295 vpage = &svd->vpage[seg_page(seg, a)];
9317 9296 } else {
9318 9297 vpage = NULL;
9319 9298 }
9320 9299 ASSERT(anlock);
9321 9300 anon_array_exit(&cookie);
9322 9301 anlock = 0;
9323 9302 pp = NULL;
9324 9303 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9325 9304 vpage, &pp, 0, F_INVAL, rw, 1);
9326 9305 if (error) {
9327 9306 error = fc_decode(error);
9328 9307 break;
9329 9308 }
9330 9309 anon_array_enter(amp, anon_index, &cookie);
9331 9310 anlock = 1;
9332 9311 ap = anon_get_ptr(amp->ahp, anon_index);
9333 9312 if (ap == NULL || ap->an_refcnt != 1) {
9334 9313 error = EFAULT;
9335 9314 break;
9336 9315 }
9337 9316 }
9338 9317 swap_xlate(ap, &vp, &off);
9339 9318 pp = page_lookup_nowait(vp, off, SE_SHARED);
9340 9319 if (pp == NULL) {
9341 9320 error = EFAULT;
9342 9321 break;
9343 9322 }
9344 9323 if (ap->an_pvp != NULL) {
9345 9324 anon_swap_free(ap, pp);
9346 9325 }
9347 9326 /*
9348 9327 * Unlock anon if this is the last slot in a large page.
9349 9328 */
9350 9329 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9351 9330 ASSERT(anlock);
9352 9331 anon_array_exit(&cookie);
9353 9332 anlock = 0;
9354 9333 }
9355 9334 *pplist++ = pp;
9356 9335 }
9357 9336 if (anlock) { /* Ensure the lock is dropped */
9358 9337 anon_array_exit(&cookie);
9359 9338 }
9360 9339 ANON_LOCK_EXIT(&->a_rwlock);
9361 9340
9362 9341 if (a >= addr + len) {
9363 9342 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9364 9343 if (pamp != NULL) {
9365 9344 ASSERT(svd->type == MAP_SHARED);
9366 9345 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9367 9346 npages);
9368 9347 wlen = len;
9369 9348 }
9370 9349 if (sftlck_sbase) {
9371 9350 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9372 9351 }
9373 9352 if (sftlck_send) {
9374 9353 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9375 9354 }
9376 9355 if (use_pcache) {
9377 9356 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9378 9357 rw, pflags, preclaim_callback);
9379 9358 }
9380 9359 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9381 9360 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9382 9361 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9383 9362 return (0);
9384 9363 }
9385 9364
9386 9365 pplist = pl;
9387 9366 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9388 9367 while (np > (uint_t)0) {
9389 9368 ASSERT(PAGE_LOCKED(*pplist));
9390 9369 page_unlock(*pplist);
9391 9370 np--;
9392 9371 pplist++;
9393 9372 }
9394 9373 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9395 9374 out:
9396 9375 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9397 9376 *ppp = NULL;
9398 9377 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9399 9378 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9400 9379 return (error);
9401 9380 }
9402 9381
9403 9382 /*
9404 9383 * purge any cached pages in the I/O page cache
9405 9384 */
9406 9385 static void
9407 9386 segvn_purge(struct seg *seg)
9408 9387 {
9409 9388 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9410 9389
9411 9390 /*
9412 9391 * pcache is only used by pure anon segments.
9413 9392 */
9414 9393 if (svd->amp == NULL || svd->vp != NULL) {
9415 9394 return;
9416 9395 }
9417 9396
9418 9397 /*
9419 9398 * For MAP_SHARED segments non 0 segment's softlockcnt means
9420 9399 * active IO is still in progress via this segment. So we only
9421 9400 * purge MAP_SHARED segments when their softlockcnt is 0.
9422 9401 */
9423 9402 if (svd->type == MAP_PRIVATE) {
9424 9403 if (svd->softlockcnt) {
9425 9404 seg_ppurge(seg, NULL, 0);
9426 9405 }
9427 9406 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9428 9407 seg_ppurge(seg, svd->amp, 0);
9429 9408 }
9430 9409 }
9431 9410
9432 9411 /*
9433 9412 * If async argument is not 0 we are called from pcache async thread and don't
9434 9413 * hold AS lock.
9435 9414 */
9436 9415
9437 9416 /*ARGSUSED*/
9438 9417 static int
9439 9418 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9440 9419 enum seg_rw rw, int async)
9441 9420 {
9442 9421 struct seg *seg = (struct seg *)ptag;
9443 9422 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9444 9423 pgcnt_t np, npages;
9445 9424 struct page **pl;
9446 9425
9447 9426 npages = np = btop(len);
9448 9427 ASSERT(npages);
9449 9428
9450 9429 ASSERT(svd->vp == NULL && svd->amp != NULL);
9451 9430 ASSERT(svd->softlockcnt >= npages);
9452 9431 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9453 9432
9454 9433 pl = pplist;
9455 9434
9456 9435 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9457 9436 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9458 9437
9459 9438 while (np > (uint_t)0) {
9460 9439 if (rw == S_WRITE) {
9461 9440 hat_setrefmod(*pplist);
9462 9441 } else {
9463 9442 hat_setref(*pplist);
9464 9443 }
9465 9444 page_unlock(*pplist);
9466 9445 np--;
9467 9446 pplist++;
9468 9447 }
9469 9448
9470 9449 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9471 9450
9472 9451 /*
9473 9452 * If we are pcache async thread we don't hold AS lock. This means if
9474 9453 * softlockcnt drops to 0 after the decrement below address space may
9475 9454 * get freed. We can't allow it since after softlock derement to 0 we
9476 9455 * still need to access as structure for possible wakeup of unmap
9477 9456 * waiters. To prevent the disappearance of as we take this segment
9478 9457 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9479 9458 * make sure this routine completes before segment is freed.
9480 9459 *
9481 9460 * The second complication we have to deal with in async case is a
9482 9461 * possibility of missed wake up of unmap wait thread. When we don't
9483 9462 * hold as lock here we may take a_contents lock before unmap wait
9484 9463 * thread that was first to see softlockcnt was still not 0. As a
9485 9464 * result we'll fail to wake up an unmap wait thread. To avoid this
9486 9465 * race we set nounmapwait flag in as structure if we drop softlockcnt
9487 9466 * to 0 when we were called by pcache async thread. unmapwait thread
9488 9467 * will not block if this flag is set.
9489 9468 */
9490 9469 if (async) {
9491 9470 mutex_enter(&svd->segfree_syncmtx);
9492 9471 }
9493 9472
9494 9473 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9495 9474 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9496 9475 mutex_enter(&seg->s_as->a_contents);
9497 9476 if (async) {
9498 9477 AS_SETNOUNMAPWAIT(seg->s_as);
9499 9478 }
9500 9479 if (AS_ISUNMAPWAIT(seg->s_as)) {
9501 9480 AS_CLRUNMAPWAIT(seg->s_as);
9502 9481 cv_broadcast(&seg->s_as->a_cv);
9503 9482 }
9504 9483 mutex_exit(&seg->s_as->a_contents);
9505 9484 }
9506 9485 }
9507 9486
9508 9487 if (async) {
9509 9488 mutex_exit(&svd->segfree_syncmtx);
9510 9489 }
9511 9490 return (0);
9512 9491 }
9513 9492
9514 9493 /*ARGSUSED*/
9515 9494 static int
9516 9495 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9517 9496 enum seg_rw rw, int async)
9518 9497 {
9519 9498 amp_t *amp = (amp_t *)ptag;
9520 9499 pgcnt_t np, npages;
9521 9500 struct page **pl;
9522 9501
9523 9502 npages = np = btop(len);
9524 9503 ASSERT(npages);
9525 9504 ASSERT(amp->a_softlockcnt >= npages);
9526 9505
9527 9506 pl = pplist;
9528 9507
9529 9508 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9530 9509 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9531 9510
9532 9511 while (np > (uint_t)0) {
9533 9512 if (rw == S_WRITE) {
9534 9513 hat_setrefmod(*pplist);
9535 9514 } else {
9536 9515 hat_setref(*pplist);
9537 9516 }
9538 9517 page_unlock(*pplist);
9539 9518 np--;
9540 9519 pplist++;
9541 9520 }
9542 9521
9543 9522 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9544 9523
9545 9524 /*
9546 9525 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9547 9526 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9548 9527 * and anonmap_purge() acquires a_purgemtx.
9549 9528 */
9550 9529 mutex_enter(&->a_purgemtx);
9551 9530 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9552 9531 amp->a_purgewait) {
9553 9532 amp->a_purgewait = 0;
9554 9533 cv_broadcast(&->a_purgecv);
9555 9534 }
9556 9535 mutex_exit(&->a_purgemtx);
9557 9536 return (0);
9558 9537 }
9559 9538
9560 9539 /*
9561 9540 * get a memory ID for an addr in a given segment
9562 9541 *
9563 9542 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9564 9543 * At fault time they will be relocated into larger pages.
9565 9544 */
9566 9545 static int
9567 9546 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9568 9547 {
9569 9548 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9570 9549 struct anon *ap = NULL;
9571 9550 ulong_t anon_index;
9572 9551 struct anon_map *amp;
9573 9552 anon_sync_obj_t cookie;
9574 9553
9575 9554 if (svd->type == MAP_PRIVATE) {
9576 9555 memidp->val[0] = (uintptr_t)seg->s_as;
9577 9556 memidp->val[1] = (uintptr_t)addr;
9578 9557 return (0);
9579 9558 }
9580 9559
9581 9560 if (svd->type == MAP_SHARED) {
9582 9561 if (svd->vp) {
9583 9562 memidp->val[0] = (uintptr_t)svd->vp;
9584 9563 memidp->val[1] = (u_longlong_t)svd->offset +
9585 9564 (uintptr_t)(addr - seg->s_base);
9586 9565 return (0);
9587 9566 } else {
9588 9567
9589 9568 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9590 9569 if ((amp = svd->amp) != NULL) {
9591 9570 anon_index = svd->anon_index +
9592 9571 seg_page(seg, addr);
9593 9572 }
9594 9573 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9595 9574
9596 9575 ASSERT(amp != NULL);
9597 9576
9598 9577 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9599 9578 anon_array_enter(amp, anon_index, &cookie);
9600 9579 ap = anon_get_ptr(amp->ahp, anon_index);
9601 9580 if (ap == NULL) {
9602 9581 page_t *pp;
9603 9582
9604 9583 pp = anon_zero(seg, addr, &ap, svd->cred);
9605 9584 if (pp == NULL) {
9606 9585 anon_array_exit(&cookie);
9607 9586 ANON_LOCK_EXIT(&->a_rwlock);
9608 9587 return (ENOMEM);
9609 9588 }
9610 9589 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9611 9590 == NULL);
9612 9591 (void) anon_set_ptr(amp->ahp, anon_index,
9613 9592 ap, ANON_SLEEP);
9614 9593 page_unlock(pp);
9615 9594 }
9616 9595
9617 9596 anon_array_exit(&cookie);
9618 9597 ANON_LOCK_EXIT(&->a_rwlock);
9619 9598
9620 9599 memidp->val[0] = (uintptr_t)ap;
9621 9600 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9622 9601 return (0);
9623 9602 }
9624 9603 }
9625 9604 return (EINVAL);
9626 9605 }
9627 9606
9628 9607 static int
9629 9608 sameprot(struct seg *seg, caddr_t a, size_t len)
9630 9609 {
9631 9610 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9632 9611 struct vpage *vpage;
9633 9612 spgcnt_t pages = btop(len);
9634 9613 uint_t prot;
9635 9614
9636 9615 if (svd->pageprot == 0)
9637 9616 return (1);
9638 9617
9639 9618 ASSERT(svd->vpage != NULL);
9640 9619
9641 9620 vpage = &svd->vpage[seg_page(seg, a)];
9642 9621 prot = VPP_PROT(vpage);
9643 9622 vpage++;
9644 9623 pages--;
9645 9624 while (pages-- > 0) {
9646 9625 if (prot != VPP_PROT(vpage))
9647 9626 return (0);
9648 9627 vpage++;
9649 9628 }
9650 9629 return (1);
9651 9630 }
9652 9631
9653 9632 /*
9654 9633 * Get memory allocation policy info for specified address in given segment
9655 9634 */
9656 9635 static lgrp_mem_policy_info_t *
9657 9636 segvn_getpolicy(struct seg *seg, caddr_t addr)
9658 9637 {
9659 9638 struct anon_map *amp;
9660 9639 ulong_t anon_index;
9661 9640 lgrp_mem_policy_info_t *policy_info;
9662 9641 struct segvn_data *svn_data;
9663 9642 u_offset_t vn_off;
9664 9643 vnode_t *vp;
9665 9644
9666 9645 ASSERT(seg != NULL);
9667 9646
9668 9647 svn_data = (struct segvn_data *)seg->s_data;
9669 9648 if (svn_data == NULL)
9670 9649 return (NULL);
9671 9650
9672 9651 /*
9673 9652 * Get policy info for private or shared memory
9674 9653 */
9675 9654 if (svn_data->type != MAP_SHARED) {
9676 9655 if (svn_data->tr_state != SEGVN_TR_ON) {
9677 9656 policy_info = &svn_data->policy_info;
9678 9657 } else {
9679 9658 policy_info = &svn_data->tr_policy_info;
9680 9659 ASSERT(policy_info->mem_policy ==
9681 9660 LGRP_MEM_POLICY_NEXT_SEG);
9682 9661 }
9683 9662 } else {
9684 9663 amp = svn_data->amp;
9685 9664 anon_index = svn_data->anon_index + seg_page(seg, addr);
9686 9665 vp = svn_data->vp;
9687 9666 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9688 9667 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9689 9668 }
9690 9669
9691 9670 return (policy_info);
9692 9671 }
9693 9672
9694 9673 /*ARGSUSED*/
9695 9674 static int
9696 9675 segvn_capable(struct seg *seg, segcapability_t capability)
9697 9676 {
9698 9677 return (0);
9699 9678 }
9700 9679
9701 9680 /*
9702 9681 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9703 9682 * established to per vnode mapping per lgroup amp pages instead of to vnode
9704 9683 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9705 9684 * may share the same text replication amp. If a suitable amp doesn't already
9706 9685 * exist in svntr hash table create a new one. We may fail to bind to amp if
9707 9686 * segment is not eligible for text replication. Code below first checks for
9708 9687 * these conditions. If binding is successful segment tr_state is set to on
9709 9688 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9710 9689 * svd->amp remains as NULL.
9711 9690 */
9712 9691 static void
9713 9692 segvn_textrepl(struct seg *seg)
9714 9693 {
9715 9694 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9716 9695 vnode_t *vp = svd->vp;
9717 9696 u_offset_t off = svd->offset;
9718 9697 size_t size = seg->s_size;
9719 9698 u_offset_t eoff = off + size;
9720 9699 uint_t szc = seg->s_szc;
9721 9700 ulong_t hash = SVNTR_HASH_FUNC(vp);
9722 9701 svntr_t *svntrp;
9723 9702 struct vattr va;
9724 9703 proc_t *p = seg->s_as->a_proc;
9725 9704 lgrp_id_t lgrp_id;
9726 9705 lgrp_id_t olid;
9727 9706 int first;
9728 9707 struct anon_map *amp;
9729 9708
9730 9709 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9731 9710 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9732 9711 ASSERT(p != NULL);
9733 9712 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9734 9713 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9735 9714 ASSERT(svd->flags & MAP_TEXT);
9736 9715 ASSERT(svd->type == MAP_PRIVATE);
9737 9716 ASSERT(vp != NULL && svd->amp == NULL);
9738 9717 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9739 9718 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9740 9719 ASSERT(seg->s_as != &kas);
9741 9720 ASSERT(off < eoff);
9742 9721 ASSERT(svntr_hashtab != NULL);
9743 9722
9744 9723 /*
9745 9724 * If numa optimizations are no longer desired bail out.
9746 9725 */
9747 9726 if (!lgrp_optimizations()) {
9748 9727 svd->tr_state = SEGVN_TR_OFF;
9749 9728 return;
9750 9729 }
9751 9730
9752 9731 /*
9753 9732 * Avoid creating anon maps with size bigger than the file size.
9754 9733 * If VOP_GETATTR() call fails bail out.
9755 9734 */
9756 9735 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9757 9736 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9758 9737 svd->tr_state = SEGVN_TR_OFF;
9759 9738 SEGVN_TR_ADDSTAT(gaerr);
9760 9739 return;
9761 9740 }
9762 9741 if (btopr(va.va_size) < btopr(eoff)) {
9763 9742 svd->tr_state = SEGVN_TR_OFF;
9764 9743 SEGVN_TR_ADDSTAT(overmap);
9765 9744 return;
9766 9745 }
9767 9746
9768 9747 /*
9769 9748 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9770 9749 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9771 9750 * mapping that checks if trcache for this vnode needs to be
9772 9751 * invalidated can't miss us.
9773 9752 */
9774 9753 if (!(vp->v_flag & VVMEXEC)) {
9775 9754 mutex_enter(&vp->v_lock);
9776 9755 vp->v_flag |= VVMEXEC;
9777 9756 mutex_exit(&vp->v_lock);
9778 9757 }
9779 9758 mutex_enter(&svntr_hashtab[hash].tr_lock);
9780 9759 /*
9781 9760 * Bail out if potentially MAP_SHARED writable mappings exist to this
9782 9761 * vnode. We don't want to use old file contents from existing
9783 9762 * replicas if this mapping was established after the original file
9784 9763 * was changed.
9785 9764 */
9786 9765 if (vn_is_mapped(vp, V_WRITE)) {
9787 9766 mutex_exit(&svntr_hashtab[hash].tr_lock);
9788 9767 svd->tr_state = SEGVN_TR_OFF;
9789 9768 SEGVN_TR_ADDSTAT(wrcnt);
9790 9769 return;
9791 9770 }
9792 9771 svntrp = svntr_hashtab[hash].tr_head;
9793 9772 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9794 9773 ASSERT(svntrp->tr_refcnt != 0);
9795 9774 if (svntrp->tr_vp != vp) {
9796 9775 continue;
9797 9776 }
9798 9777
9799 9778 /*
9800 9779 * Bail out if the file or its attributes were changed after
9801 9780 * this replication entry was created since we need to use the
9802 9781 * latest file contents. Note that mtime test alone is not
9803 9782 * sufficient because a user can explicitly change mtime via
9804 9783 * utimes(2) interfaces back to the old value after modifiying
9805 9784 * the file contents. To detect this case we also have to test
9806 9785 * ctime which among other things records the time of the last
9807 9786 * mtime change by utimes(2). ctime is not changed when the file
9808 9787 * is only read or executed so we expect that typically existing
9809 9788 * replication amp's can be used most of the time.
9810 9789 */
9811 9790 if (!svntrp->tr_valid ||
9812 9791 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9813 9792 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9814 9793 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9815 9794 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9816 9795 mutex_exit(&svntr_hashtab[hash].tr_lock);
9817 9796 svd->tr_state = SEGVN_TR_OFF;
9818 9797 SEGVN_TR_ADDSTAT(stale);
9819 9798 return;
9820 9799 }
9821 9800 /*
9822 9801 * if off, eoff and szc match current segment we found the
9823 9802 * existing entry we can use.
9824 9803 */
9825 9804 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9826 9805 svntrp->tr_szc == szc) {
9827 9806 break;
9828 9807 }
9829 9808 /*
9830 9809 * Don't create different but overlapping in file offsets
9831 9810 * entries to avoid replication of the same file pages more
9832 9811 * than once per lgroup.
9833 9812 */
9834 9813 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9835 9814 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9836 9815 mutex_exit(&svntr_hashtab[hash].tr_lock);
9837 9816 svd->tr_state = SEGVN_TR_OFF;
9838 9817 SEGVN_TR_ADDSTAT(overlap);
9839 9818 return;
9840 9819 }
9841 9820 }
9842 9821 /*
9843 9822 * If we didn't find existing entry create a new one.
9844 9823 */
9845 9824 if (svntrp == NULL) {
9846 9825 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9847 9826 if (svntrp == NULL) {
9848 9827 mutex_exit(&svntr_hashtab[hash].tr_lock);
9849 9828 svd->tr_state = SEGVN_TR_OFF;
9850 9829 SEGVN_TR_ADDSTAT(nokmem);
9851 9830 return;
9852 9831 }
9853 9832 #ifdef DEBUG
9854 9833 {
9855 9834 lgrp_id_t i;
9856 9835 for (i = 0; i < NLGRPS_MAX; i++) {
9857 9836 ASSERT(svntrp->tr_amp[i] == NULL);
9858 9837 }
9859 9838 }
9860 9839 #endif /* DEBUG */
9861 9840 svntrp->tr_vp = vp;
9862 9841 svntrp->tr_off = off;
9863 9842 svntrp->tr_eoff = eoff;
9864 9843 svntrp->tr_szc = szc;
9865 9844 svntrp->tr_valid = 1;
9866 9845 svntrp->tr_mtime = va.va_mtime;
9867 9846 svntrp->tr_ctime = va.va_ctime;
9868 9847 svntrp->tr_refcnt = 0;
9869 9848 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9870 9849 svntr_hashtab[hash].tr_head = svntrp;
9871 9850 }
9872 9851 first = 1;
9873 9852 again:
9874 9853 /*
9875 9854 * We want to pick a replica with pages on main thread's (t_tid = 1,
9876 9855 * aka T1) lgrp. Currently text replication is only optimized for
9877 9856 * workloads that either have all threads of a process on the same
9878 9857 * lgrp or execute their large text primarily on main thread.
9879 9858 */
9880 9859 lgrp_id = p->p_t1_lgrpid;
9881 9860 if (lgrp_id == LGRP_NONE) {
9882 9861 /*
9883 9862 * In case exec() prefaults text on non main thread use
9884 9863 * current thread lgrpid. It will become main thread anyway
9885 9864 * soon.
9886 9865 */
9887 9866 lgrp_id = lgrp_home_id(curthread);
9888 9867 }
9889 9868 /*
9890 9869 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9891 9870 * just set it to NLGRPS_MAX if it's different from current process T1
9892 9871 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9893 9872 * replication and T1 new home is different from lgrp used for text
9894 9873 * replication. When this happens asyncronous segvn thread rechecks if
9895 9874 * segments should change lgrps used for text replication. If we fail
9896 9875 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9897 9876 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9898 9877 * we want to use. We don't need to use cas in this case because
9899 9878 * another thread that races in between our non atomic check and set
9900 9879 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9901 9880 */
9902 9881 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9903 9882 olid = p->p_tr_lgrpid;
9904 9883 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9905 9884 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9906 9885 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9907 9886 olid) {
9908 9887 olid = p->p_tr_lgrpid;
9909 9888 ASSERT(olid != LGRP_NONE);
9910 9889 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9911 9890 p->p_tr_lgrpid = NLGRPS_MAX;
9912 9891 }
9913 9892 }
9914 9893 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9915 9894 membar_producer();
9916 9895 /*
9917 9896 * lgrp_move_thread() won't schedule async recheck after
9918 9897 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9919 9898 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9920 9899 * is not LGRP_NONE.
9921 9900 */
9922 9901 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9923 9902 p->p_t1_lgrpid != lgrp_id) {
9924 9903 first = 0;
9925 9904 goto again;
9926 9905 }
9927 9906 }
9928 9907 /*
9929 9908 * If no amp was created yet for lgrp_id create a new one as long as
9930 9909 * we have enough memory to afford it.
9931 9910 */
9932 9911 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9933 9912 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9934 9913 if (trmem > segvn_textrepl_max_bytes) {
9935 9914 SEGVN_TR_ADDSTAT(normem);
9936 9915 goto fail;
9937 9916 }
9938 9917 if (anon_try_resv_zone(size, NULL) == 0) {
9939 9918 SEGVN_TR_ADDSTAT(noanon);
9940 9919 goto fail;
9941 9920 }
9942 9921 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9943 9922 if (amp == NULL) {
9944 9923 anon_unresv_zone(size, NULL);
9945 9924 SEGVN_TR_ADDSTAT(nokmem);
9946 9925 goto fail;
9947 9926 }
9948 9927 ASSERT(amp->refcnt == 1);
9949 9928 amp->a_szc = szc;
9950 9929 svntrp->tr_amp[lgrp_id] = amp;
9951 9930 SEGVN_TR_ADDSTAT(newamp);
9952 9931 }
9953 9932 svntrp->tr_refcnt++;
9954 9933 ASSERT(svd->svn_trnext == NULL);
9955 9934 ASSERT(svd->svn_trprev == NULL);
9956 9935 svd->svn_trnext = svntrp->tr_svnhead;
9957 9936 svd->svn_trprev = NULL;
9958 9937 if (svntrp->tr_svnhead != NULL) {
9959 9938 svntrp->tr_svnhead->svn_trprev = svd;
9960 9939 }
9961 9940 svntrp->tr_svnhead = svd;
9962 9941 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9963 9942 ASSERT(amp->refcnt >= 1);
9964 9943 svd->amp = amp;
9965 9944 svd->anon_index = 0;
9966 9945 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9967 9946 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9968 9947 svd->tr_state = SEGVN_TR_ON;
9969 9948 mutex_exit(&svntr_hashtab[hash].tr_lock);
9970 9949 SEGVN_TR_ADDSTAT(repl);
9971 9950 return;
9972 9951 fail:
9973 9952 ASSERT(segvn_textrepl_bytes >= size);
9974 9953 atomic_add_long(&segvn_textrepl_bytes, -size);
9975 9954 ASSERT(svntrp != NULL);
9976 9955 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9977 9956 if (svntrp->tr_refcnt == 0) {
9978 9957 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9979 9958 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9980 9959 mutex_exit(&svntr_hashtab[hash].tr_lock);
9981 9960 kmem_cache_free(svntr_cache, svntrp);
9982 9961 } else {
9983 9962 mutex_exit(&svntr_hashtab[hash].tr_lock);
9984 9963 }
9985 9964 svd->tr_state = SEGVN_TR_OFF;
9986 9965 }
9987 9966
9988 9967 /*
9989 9968 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9990 9969 * replication amp. This routine is most typically called when segment is
9991 9970 * unmapped but can also be called when segment no longer qualifies for text
9992 9971 * replication (e.g. due to protection changes). If unload_unmap is set use
9993 9972 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9994 9973 * svntr free all its anon maps and remove it from the hash table.
9995 9974 */
9996 9975 static void
9997 9976 segvn_textunrepl(struct seg *seg, int unload_unmap)
9998 9977 {
9999 9978 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
10000 9979 vnode_t *vp = svd->vp;
10001 9980 u_offset_t off = svd->offset;
10002 9981 size_t size = seg->s_size;
10003 9982 u_offset_t eoff = off + size;
10004 9983 uint_t szc = seg->s_szc;
10005 9984 ulong_t hash = SVNTR_HASH_FUNC(vp);
10006 9985 svntr_t *svntrp;
10007 9986 svntr_t **prv_svntrp;
10008 9987 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
10009 9988 lgrp_id_t i;
10010 9989
10011 9990 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
10012 9991 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
10013 9992 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
10014 9993 ASSERT(svd->tr_state == SEGVN_TR_ON);
10015 9994 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10016 9995 ASSERT(svd->amp != NULL);
10017 9996 ASSERT(svd->amp->refcnt >= 1);
10018 9997 ASSERT(svd->anon_index == 0);
10019 9998 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
10020 9999 ASSERT(svntr_hashtab != NULL);
10021 10000
10022 10001 mutex_enter(&svntr_hashtab[hash].tr_lock);
10023 10002 prv_svntrp = &svntr_hashtab[hash].tr_head;
10024 10003 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
10025 10004 ASSERT(svntrp->tr_refcnt != 0);
10026 10005 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
10027 10006 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
10028 10007 break;
10029 10008 }
10030 10009 }
10031 10010 if (svntrp == NULL) {
10032 10011 panic("segvn_textunrepl: svntr record not found");
10033 10012 }
10034 10013 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
10035 10014 panic("segvn_textunrepl: amp mismatch");
10036 10015 }
10037 10016 svd->tr_state = SEGVN_TR_OFF;
10038 10017 svd->amp = NULL;
10039 10018 if (svd->svn_trprev == NULL) {
10040 10019 ASSERT(svntrp->tr_svnhead == svd);
10041 10020 svntrp->tr_svnhead = svd->svn_trnext;
10042 10021 if (svntrp->tr_svnhead != NULL) {
10043 10022 svntrp->tr_svnhead->svn_trprev = NULL;
10044 10023 }
10045 10024 svd->svn_trnext = NULL;
10046 10025 } else {
10047 10026 svd->svn_trprev->svn_trnext = svd->svn_trnext;
10048 10027 if (svd->svn_trnext != NULL) {
10049 10028 svd->svn_trnext->svn_trprev = svd->svn_trprev;
10050 10029 svd->svn_trnext = NULL;
10051 10030 }
10052 10031 svd->svn_trprev = NULL;
10053 10032 }
10054 10033 if (--svntrp->tr_refcnt) {
10055 10034 mutex_exit(&svntr_hashtab[hash].tr_lock);
10056 10035 goto done;
10057 10036 }
10058 10037 *prv_svntrp = svntrp->tr_next;
10059 10038 mutex_exit(&svntr_hashtab[hash].tr_lock);
10060 10039 for (i = 0; i < NLGRPS_MAX; i++) {
10061 10040 struct anon_map *amp = svntrp->tr_amp[i];
10062 10041 if (amp == NULL) {
10063 10042 continue;
10064 10043 }
10065 10044 ASSERT(amp->refcnt == 1);
10066 10045 ASSERT(amp->swresv == size);
10067 10046 ASSERT(amp->size == size);
10068 10047 ASSERT(amp->a_szc == szc);
10069 10048 if (amp->a_szc != 0) {
10070 10049 anon_free_pages(amp->ahp, 0, size, szc);
10071 10050 } else {
10072 10051 anon_free(amp->ahp, 0, size);
10073 10052 }
10074 10053 svntrp->tr_amp[i] = NULL;
10075 10054 ASSERT(segvn_textrepl_bytes >= size);
10076 10055 atomic_add_long(&segvn_textrepl_bytes, -size);
10077 10056 anon_unresv_zone(amp->swresv, NULL);
10078 10057 amp->refcnt = 0;
10079 10058 anonmap_free(amp);
10080 10059 }
10081 10060 kmem_cache_free(svntr_cache, svntrp);
10082 10061 done:
10083 10062 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
10084 10063 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
10085 10064 }
10086 10065
10087 10066 /*
10088 10067 * This is called when a MAP_SHARED writable mapping is created to a vnode
10089 10068 * that is currently used for execution (VVMEXEC flag is set). In this case we
10090 10069 * need to prevent further use of existing replicas.
10091 10070 */
10092 10071 static void
10093 10072 segvn_inval_trcache(vnode_t *vp)
10094 10073 {
10095 10074 ulong_t hash = SVNTR_HASH_FUNC(vp);
10096 10075 svntr_t *svntrp;
10097 10076
10098 10077 ASSERT(vp->v_flag & VVMEXEC);
10099 10078
10100 10079 if (svntr_hashtab == NULL) {
10101 10080 return;
10102 10081 }
10103 10082
10104 10083 mutex_enter(&svntr_hashtab[hash].tr_lock);
10105 10084 svntrp = svntr_hashtab[hash].tr_head;
10106 10085 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10107 10086 ASSERT(svntrp->tr_refcnt != 0);
10108 10087 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
10109 10088 svntrp->tr_valid = 0;
10110 10089 }
10111 10090 }
10112 10091 mutex_exit(&svntr_hashtab[hash].tr_lock);
10113 10092 }
10114 10093
10115 10094 static void
10116 10095 segvn_trasync_thread(void)
10117 10096 {
10118 10097 callb_cpr_t cpr_info;
10119 10098 kmutex_t cpr_lock; /* just for CPR stuff */
10120 10099
10121 10100 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
10122 10101
10123 10102 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
10124 10103 callb_generic_cpr, "segvn_async");
10125 10104
10126 10105 if (segvn_update_textrepl_interval == 0) {
10127 10106 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
10128 10107 } else {
10129 10108 segvn_update_textrepl_interval *= hz;
10130 10109 }
10131 10110 (void) timeout(segvn_trupdate_wakeup, NULL,
10132 10111 segvn_update_textrepl_interval);
10133 10112
10134 10113 for (;;) {
10135 10114 mutex_enter(&cpr_lock);
10136 10115 CALLB_CPR_SAFE_BEGIN(&cpr_info);
10137 10116 mutex_exit(&cpr_lock);
10138 10117 sema_p(&segvn_trasync_sem);
10139 10118 mutex_enter(&cpr_lock);
10140 10119 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
10141 10120 mutex_exit(&cpr_lock);
10142 10121 segvn_trupdate();
10143 10122 }
10144 10123 }
10145 10124
10146 10125 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
10147 10126
10148 10127 static void
10149 10128 segvn_trupdate_wakeup(void *dummy)
10150 10129 {
10151 10130 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
10152 10131
10153 10132 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
10154 10133 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
10155 10134 sema_v(&segvn_trasync_sem);
10156 10135 }
10157 10136
10158 10137 if (!segvn_disable_textrepl_update &&
10159 10138 segvn_update_textrepl_interval != 0) {
10160 10139 (void) timeout(segvn_trupdate_wakeup, dummy,
10161 10140 segvn_update_textrepl_interval);
10162 10141 }
10163 10142 }
10164 10143
10165 10144 static void
10166 10145 segvn_trupdate(void)
10167 10146 {
10168 10147 ulong_t hash;
10169 10148 svntr_t *svntrp;
10170 10149 segvn_data_t *svd;
10171 10150
10172 10151 ASSERT(svntr_hashtab != NULL);
10173 10152
10174 10153 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
10175 10154 mutex_enter(&svntr_hashtab[hash].tr_lock);
10176 10155 svntrp = svntr_hashtab[hash].tr_head;
10177 10156 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10178 10157 ASSERT(svntrp->tr_refcnt != 0);
10179 10158 svd = svntrp->tr_svnhead;
10180 10159 for (; svd != NULL; svd = svd->svn_trnext) {
10181 10160 segvn_trupdate_seg(svd->seg, svd, svntrp,
10182 10161 hash);
10183 10162 }
10184 10163 }
10185 10164 mutex_exit(&svntr_hashtab[hash].tr_lock);
10186 10165 }
10187 10166 }
10188 10167
10189 10168 static void
10190 10169 segvn_trupdate_seg(struct seg *seg,
10191 10170 segvn_data_t *svd,
10192 10171 svntr_t *svntrp,
10193 10172 ulong_t hash)
10194 10173 {
10195 10174 proc_t *p;
10196 10175 lgrp_id_t lgrp_id;
10197 10176 struct as *as;
10198 10177 size_t size;
10199 10178 struct anon_map *amp;
10200 10179
10201 10180 ASSERT(svd->vp != NULL);
10202 10181 ASSERT(svd->vp == svntrp->tr_vp);
10203 10182 ASSERT(svd->offset == svntrp->tr_off);
10204 10183 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10205 10184 ASSERT(seg != NULL);
10206 10185 ASSERT(svd->seg == seg);
10207 10186 ASSERT(seg->s_data == (void *)svd);
10208 10187 ASSERT(seg->s_szc == svntrp->tr_szc);
10209 10188 ASSERT(svd->tr_state == SEGVN_TR_ON);
10210 10189 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10211 10190 ASSERT(svd->amp != NULL);
10212 10191 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10213 10192 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10214 10193 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10215 10194 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10216 10195 ASSERT(svntrp->tr_refcnt != 0);
10217 10196 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10218 10197
10219 10198 as = seg->s_as;
10220 10199 ASSERT(as != NULL && as != &kas);
10221 10200 p = as->a_proc;
10222 10201 ASSERT(p != NULL);
10223 10202 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10224 10203 lgrp_id = p->p_t1_lgrpid;
10225 10204 if (lgrp_id == LGRP_NONE) {
10226 10205 return;
10227 10206 }
10228 10207 ASSERT(lgrp_id < NLGRPS_MAX);
10229 10208 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10230 10209 return;
10231 10210 }
10232 10211
10233 10212 /*
10234 10213 * Use tryenter locking since we are locking as/seg and svntr hash
10235 10214 * lock in reverse from syncrounous thread order.
10236 10215 */
10237 10216 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10238 10217 SEGVN_TR_ADDSTAT(nolock);
10239 10218 if (segvn_lgrp_trthr_migrs_snpsht) {
10240 10219 segvn_lgrp_trthr_migrs_snpsht = 0;
10241 10220 }
10242 10221 return;
10243 10222 }
10244 10223 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10245 10224 AS_LOCK_EXIT(as, &as->a_lock);
10246 10225 SEGVN_TR_ADDSTAT(nolock);
10247 10226 if (segvn_lgrp_trthr_migrs_snpsht) {
10248 10227 segvn_lgrp_trthr_migrs_snpsht = 0;
10249 10228 }
10250 10229 return;
10251 10230 }
10252 10231 size = seg->s_size;
10253 10232 if (svntrp->tr_amp[lgrp_id] == NULL) {
10254 10233 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10255 10234 if (trmem > segvn_textrepl_max_bytes) {
10256 10235 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10257 10236 AS_LOCK_EXIT(as, &as->a_lock);
10258 10237 atomic_add_long(&segvn_textrepl_bytes, -size);
10259 10238 SEGVN_TR_ADDSTAT(normem);
10260 10239 return;
10261 10240 }
10262 10241 if (anon_try_resv_zone(size, NULL) == 0) {
10263 10242 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10264 10243 AS_LOCK_EXIT(as, &as->a_lock);
10265 10244 atomic_add_long(&segvn_textrepl_bytes, -size);
10266 10245 SEGVN_TR_ADDSTAT(noanon);
10267 10246 return;
10268 10247 }
10269 10248 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10270 10249 if (amp == NULL) {
10271 10250 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10272 10251 AS_LOCK_EXIT(as, &as->a_lock);
10273 10252 atomic_add_long(&segvn_textrepl_bytes, -size);
10274 10253 anon_unresv_zone(size, NULL);
10275 10254 SEGVN_TR_ADDSTAT(nokmem);
10276 10255 return;
10277 10256 }
10278 10257 ASSERT(amp->refcnt == 1);
10279 10258 amp->a_szc = seg->s_szc;
10280 10259 svntrp->tr_amp[lgrp_id] = amp;
10281 10260 }
10282 10261 /*
10283 10262 * We don't need to drop the bucket lock but here we give other
10284 10263 * threads a chance. svntr and svd can't be unlinked as long as
10285 10264 * segment lock is held as a writer and AS held as well. After we
10286 10265 * retake bucket lock we'll continue from where we left. We'll be able
10287 10266 * to reach the end of either list since new entries are always added
10288 10267 * to the beginning of the lists.
10289 10268 */
10290 10269 mutex_exit(&svntr_hashtab[hash].tr_lock);
10291 10270 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10292 10271 mutex_enter(&svntr_hashtab[hash].tr_lock);
10293 10272
10294 10273 ASSERT(svd->tr_state == SEGVN_TR_ON);
10295 10274 ASSERT(svd->amp != NULL);
10296 10275 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10297 10276 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10298 10277 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10299 10278
10300 10279 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10301 10280 svd->amp = svntrp->tr_amp[lgrp_id];
10302 10281 p->p_tr_lgrpid = NLGRPS_MAX;
10303 10282 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10304 10283 AS_LOCK_EXIT(as, &as->a_lock);
10305 10284
10306 10285 ASSERT(svntrp->tr_refcnt != 0);
10307 10286 ASSERT(svd->vp == svntrp->tr_vp);
10308 10287 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10309 10288 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10310 10289 ASSERT(svd->seg == seg);
10311 10290 ASSERT(svd->tr_state == SEGVN_TR_ON);
10312 10291
10313 10292 SEGVN_TR_ADDSTAT(asyncrepl);
10314 10293 }
↓ open down ↓ |
5932 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX