Print this page
6583 remove whole-process swapping
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
87 87
88 88 struct seg_ops segspt_ops = {
89 89 SEGSPT_BADOP(int), /* dup */
90 90 segspt_unmap,
91 91 segspt_free,
92 92 SEGSPT_BADOP(int), /* fault */
93 93 SEGSPT_BADOP(faultcode_t), /* faulta */
94 94 SEGSPT_BADOP(int), /* setprot */
95 95 SEGSPT_BADOP(int), /* checkprot */
96 96 SEGSPT_BADOP(int), /* kluster */
97 - SEGSPT_BADOP(size_t), /* swapout */
98 97 SEGSPT_BADOP(int), /* sync */
99 98 SEGSPT_BADOP(size_t), /* incore */
100 99 SEGSPT_BADOP(int), /* lockop */
101 100 SEGSPT_BADOP(int), /* getprot */
102 101 SEGSPT_BADOP(u_offset_t), /* getoffset */
103 102 SEGSPT_BADOP(int), /* gettype */
104 103 SEGSPT_BADOP(int), /* getvp */
105 104 SEGSPT_BADOP(int), /* advise */
106 105 SEGSPT_BADOP(void), /* dump */
107 106 SEGSPT_BADOP(int), /* pagelock */
108 107 SEGSPT_BADOP(int), /* setpgsz */
109 108 SEGSPT_BADOP(int), /* getmemid */
110 109 segspt_getpolicy, /* getpolicy */
111 110 SEGSPT_BADOP(int), /* capable */
112 111 seg_inherit_notsup /* inherit */
113 112 };
114 113
115 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
116 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
117 116 static void segspt_shmfree(struct seg *seg);
118 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
119 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
120 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
121 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
122 121 register size_t len, register uint_t prot);
123 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
124 123 uint_t prot);
125 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
126 -static size_t segspt_shmswapout(struct seg *seg);
127 125 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
128 126 register char *vec);
129 127 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
130 128 int attr, uint_t flags);
131 129 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
132 130 int attr, int op, ulong_t *lockmap, size_t pos);
133 131 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
134 132 uint_t *protv);
135 133 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
136 134 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
137 135 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
138 136 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
139 137 uint_t behav);
140 138 static void segspt_shmdump(struct seg *seg);
141 139 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
142 140 struct page ***, enum lock_type, enum seg_rw);
143 141 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
144 142 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
145 143 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
146 144 static int segspt_shmcapable(struct seg *, segcapability_t);
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
147 145
148 146 struct seg_ops segspt_shmops = {
149 147 segspt_shmdup,
150 148 segspt_shmunmap,
151 149 segspt_shmfree,
152 150 segspt_shmfault,
153 151 segspt_shmfaulta,
154 152 segspt_shmsetprot,
155 153 segspt_shmcheckprot,
156 154 segspt_shmkluster,
157 - segspt_shmswapout,
158 155 segspt_shmsync,
159 156 segspt_shmincore,
160 157 segspt_shmlockop,
161 158 segspt_shmgetprot,
162 159 segspt_shmgetoffset,
163 160 segspt_shmgettype,
164 161 segspt_shmgetvp,
165 162 segspt_shmadvise, /* advise */
166 163 segspt_shmdump,
167 164 segspt_shmpagelock,
168 165 segspt_shmsetpgsz,
169 166 segspt_shmgetmemid,
170 167 segspt_shmgetpolicy,
171 168 segspt_shmcapable,
172 169 seg_inherit_notsup
173 170 };
174 171
175 172 static void segspt_purge(struct seg *seg);
176 173 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
177 174 enum seg_rw, int);
178 175 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
179 176 page_t **ppa);
180 177
181 178
182 179
183 180 /*ARGSUSED*/
184 181 int
185 182 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
186 183 uint_t prot, uint_t flags, uint_t share_szc)
187 184 {
188 185 int err;
189 186 struct as *newas;
190 187 struct segspt_crargs sptcargs;
191 188
192 189 #ifdef DEBUG
193 190 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
194 191 tnf_ulong, size, size );
195 192 #endif
196 193 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
197 194 segspt_minfree = availrmem/20; /* for the system */
198 195
199 196 if (!hat_supported(HAT_SHARED_PT, (void *)0))
200 197 return (EINVAL);
201 198
202 199 /*
203 200 * get a new as for this shared memory segment
204 201 */
205 202 newas = as_alloc();
206 203 newas->a_proc = NULL;
207 204 sptcargs.amp = amp;
208 205 sptcargs.prot = prot;
209 206 sptcargs.flags = flags;
210 207 sptcargs.szc = share_szc;
211 208 /*
212 209 * create a shared page table (spt) segment
213 210 */
214 211
215 212 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
216 213 as_free(newas);
217 214 return (err);
218 215 }
219 216 *sptseg = sptcargs.seg_spt;
220 217 return (0);
221 218 }
222 219
223 220 void
224 221 sptdestroy(struct as *as, struct anon_map *amp)
225 222 {
226 223
227 224 #ifdef DEBUG
228 225 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
229 226 #endif
230 227 (void) as_unmap(as, SEGSPTADDR, amp->size);
231 228 as_free(as);
232 229 }
233 230
234 231 /*
235 232 * called from seg_free().
236 233 * free (i.e., unlock, unmap, return to free list)
237 234 * all the pages in the given seg.
238 235 */
239 236 void
240 237 segspt_free(struct seg *seg)
241 238 {
242 239 struct spt_data *sptd = (struct spt_data *)seg->s_data;
243 240
244 241 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
245 242
246 243 if (sptd != NULL) {
247 244 if (sptd->spt_realsize)
248 245 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
249 246
250 247 if (sptd->spt_ppa_lckcnt)
251 248 kmem_free(sptd->spt_ppa_lckcnt,
252 249 sizeof (*sptd->spt_ppa_lckcnt)
253 250 * btopr(sptd->spt_amp->size));
254 251 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
255 252 cv_destroy(&sptd->spt_cv);
256 253 mutex_destroy(&sptd->spt_lock);
257 254 kmem_free(sptd, sizeof (*sptd));
258 255 }
259 256 }
260 257
261 258 /*ARGSUSED*/
262 259 static int
263 260 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
264 261 uint_t flags)
265 262 {
266 263 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
267 264
268 265 return (0);
269 266 }
270 267
271 268 /*ARGSUSED*/
272 269 static size_t
273 270 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
274 271 {
275 272 caddr_t eo_seg;
276 273 pgcnt_t npages;
277 274 struct shm_data *shmd = (struct shm_data *)seg->s_data;
278 275 struct seg *sptseg;
279 276 struct spt_data *sptd;
280 277
281 278 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
282 279 #ifdef lint
283 280 seg = seg;
284 281 #endif
285 282 sptseg = shmd->shm_sptseg;
286 283 sptd = sptseg->s_data;
287 284
288 285 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
289 286 eo_seg = addr + len;
290 287 while (addr < eo_seg) {
291 288 /* page exists, and it's locked. */
292 289 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
293 290 SEG_PAGE_ANON;
294 291 addr += PAGESIZE;
295 292 }
296 293 return (len);
297 294 } else {
298 295 struct anon_map *amp = shmd->shm_amp;
299 296 struct anon *ap;
300 297 page_t *pp;
301 298 pgcnt_t anon_index;
302 299 struct vnode *vp;
303 300 u_offset_t off;
304 301 ulong_t i;
305 302 int ret;
306 303 anon_sync_obj_t cookie;
307 304
308 305 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
309 306 anon_index = seg_page(seg, addr);
310 307 npages = btopr(len);
311 308 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
312 309 return (EINVAL);
313 310 }
314 311 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
315 312 for (i = 0; i < npages; i++, anon_index++) {
316 313 ret = 0;
317 314 anon_array_enter(amp, anon_index, &cookie);
318 315 ap = anon_get_ptr(amp->ahp, anon_index);
319 316 if (ap != NULL) {
320 317 swap_xlate(ap, &vp, &off);
321 318 anon_array_exit(&cookie);
322 319 pp = page_lookup_nowait(vp, off, SE_SHARED);
323 320 if (pp != NULL) {
324 321 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
325 322 page_unlock(pp);
326 323 }
327 324 } else {
328 325 anon_array_exit(&cookie);
329 326 }
330 327 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
331 328 ret |= SEG_PAGE_LOCKED;
332 329 }
333 330 *vec++ = (char)ret;
334 331 }
335 332 ANON_LOCK_EXIT(&->a_rwlock);
336 333 return (len);
337 334 }
338 335 }
339 336
340 337 static int
341 338 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
342 339 {
343 340 size_t share_size;
344 341
345 342 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
346 343
347 344 /*
348 345 * seg.s_size may have been rounded up to the largest page size
349 346 * in shmat().
350 347 * XXX This should be cleanedup. sptdestroy should take a length
351 348 * argument which should be the same as sptcreate. Then
352 349 * this rounding would not be needed (or is done in shm.c)
353 350 * Only the check for full segment will be needed.
354 351 *
355 352 * XXX -- shouldn't raddr == 0 always? These tests don't seem
356 353 * to be useful at all.
357 354 */
358 355 share_size = page_get_pagesize(seg->s_szc);
359 356 ssize = P2ROUNDUP(ssize, share_size);
360 357
361 358 if (raddr == seg->s_base && ssize == seg->s_size) {
362 359 seg_free(seg);
363 360 return (0);
364 361 } else
365 362 return (EINVAL);
366 363 }
367 364
368 365 int
369 366 segspt_create(struct seg *seg, caddr_t argsp)
370 367 {
371 368 int err;
372 369 caddr_t addr = seg->s_base;
373 370 struct spt_data *sptd;
374 371 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
375 372 struct anon_map *amp = sptcargs->amp;
376 373 struct kshmid *sp = amp->a_sp;
377 374 struct cred *cred = CRED();
378 375 ulong_t i, j, anon_index = 0;
379 376 pgcnt_t npages = btopr(amp->size);
380 377 struct vnode *vp;
381 378 page_t **ppa;
382 379 uint_t hat_flags;
383 380 size_t pgsz;
384 381 pgcnt_t pgcnt;
385 382 caddr_t a;
386 383 pgcnt_t pidx;
387 384 size_t sz;
388 385 proc_t *procp = curproc;
389 386 rctl_qty_t lockedbytes = 0;
390 387 kproject_t *proj;
391 388
392 389 /*
393 390 * We are holding the a_lock on the underlying dummy as,
394 391 * so we can make calls to the HAT layer.
395 392 */
396 393 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
397 394 ASSERT(sp != NULL);
398 395
399 396 #ifdef DEBUG
400 397 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
401 398 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
402 399 #endif
403 400 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
404 401 if (err = anon_swap_adjust(npages))
405 402 return (err);
406 403 }
407 404 err = ENOMEM;
408 405
409 406 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
410 407 goto out1;
411 408
412 409 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
413 410 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
414 411 KM_NOSLEEP)) == NULL)
415 412 goto out2;
416 413 }
417 414
418 415 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
419 416
420 417 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
421 418 goto out3;
422 419
423 420 seg->s_ops = &segspt_ops;
424 421 sptd->spt_vp = vp;
425 422 sptd->spt_amp = amp;
426 423 sptd->spt_prot = sptcargs->prot;
427 424 sptd->spt_flags = sptcargs->flags;
428 425 seg->s_data = (caddr_t)sptd;
429 426 sptd->spt_ppa = NULL;
430 427 sptd->spt_ppa_lckcnt = NULL;
431 428 seg->s_szc = sptcargs->szc;
432 429 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
433 430 sptd->spt_gen = 0;
434 431
435 432 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
436 433 if (seg->s_szc > amp->a_szc) {
437 434 amp->a_szc = seg->s_szc;
438 435 }
439 436 ANON_LOCK_EXIT(&->a_rwlock);
440 437
441 438 /*
442 439 * Set policy to affect initial allocation of pages in
443 440 * anon_map_createpages()
444 441 */
445 442 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
446 443 NULL, 0, ptob(npages));
447 444
448 445 if (sptcargs->flags & SHM_PAGEABLE) {
449 446 size_t share_sz;
450 447 pgcnt_t new_npgs, more_pgs;
451 448 struct anon_hdr *nahp;
452 449 zone_t *zone;
453 450
454 451 share_sz = page_get_pagesize(seg->s_szc);
455 452 if (!IS_P2ALIGNED(amp->size, share_sz)) {
456 453 /*
457 454 * We are rounding up the size of the anon array
458 455 * on 4 M boundary because we always create 4 M
459 456 * of page(s) when locking, faulting pages and we
460 457 * don't have to check for all corner cases e.g.
461 458 * if there is enough space to allocate 4 M
462 459 * page.
463 460 */
464 461 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
465 462 more_pgs = new_npgs - npages;
466 463
467 464 /*
468 465 * The zone will never be NULL, as a fully created
469 466 * shm always has an owning zone.
470 467 */
471 468 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
472 469 ASSERT(zone != NULL);
473 470 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
474 471 err = ENOMEM;
475 472 goto out4;
476 473 }
477 474
478 475 nahp = anon_create(new_npgs, ANON_SLEEP);
479 476 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
480 477 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
481 478 ANON_SLEEP);
482 479 anon_release(amp->ahp, npages);
483 480 amp->ahp = nahp;
484 481 ASSERT(amp->swresv == ptob(npages));
485 482 amp->swresv = amp->size = ptob(new_npgs);
486 483 ANON_LOCK_EXIT(&->a_rwlock);
487 484 npages = new_npgs;
488 485 }
489 486
490 487 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
491 488 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
492 489 sptd->spt_pcachecnt = 0;
493 490 sptd->spt_realsize = ptob(npages);
494 491 sptcargs->seg_spt = seg;
495 492 return (0);
496 493 }
497 494
498 495 /*
499 496 * get array of pages for each anon slot in amp
500 497 */
501 498 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
502 499 seg, addr, S_CREATE, cred)) != 0)
503 500 goto out4;
504 501
505 502 mutex_enter(&sp->shm_mlock);
506 503
507 504 /* May be partially locked, so, count bytes to charge for locking */
508 505 for (i = 0; i < npages; i++)
509 506 if (ppa[i]->p_lckcnt == 0)
510 507 lockedbytes += PAGESIZE;
511 508
512 509 proj = sp->shm_perm.ipc_proj;
513 510
514 511 if (lockedbytes > 0) {
515 512 mutex_enter(&procp->p_lock);
516 513 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
517 514 mutex_exit(&procp->p_lock);
518 515 mutex_exit(&sp->shm_mlock);
519 516 for (i = 0; i < npages; i++)
520 517 page_unlock(ppa[i]);
521 518 err = ENOMEM;
522 519 goto out4;
523 520 }
524 521 mutex_exit(&procp->p_lock);
525 522 }
526 523
527 524 /*
528 525 * addr is initial address corresponding to the first page on ppa list
529 526 */
530 527 for (i = 0; i < npages; i++) {
531 528 /* attempt to lock all pages */
532 529 if (page_pp_lock(ppa[i], 0, 1) == 0) {
533 530 /*
534 531 * if unable to lock any page, unlock all
535 532 * of them and return error
536 533 */
537 534 for (j = 0; j < i; j++)
538 535 page_pp_unlock(ppa[j], 0, 1);
539 536 for (i = 0; i < npages; i++)
540 537 page_unlock(ppa[i]);
541 538 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
542 539 mutex_exit(&sp->shm_mlock);
543 540 err = ENOMEM;
544 541 goto out4;
545 542 }
546 543 }
547 544 mutex_exit(&sp->shm_mlock);
548 545
549 546 /*
550 547 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
551 548 * for the entire life of the segment. For example platforms
552 549 * that do not support Dynamic Reconfiguration.
553 550 */
554 551 hat_flags = HAT_LOAD_SHARE;
555 552 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
556 553 hat_flags |= HAT_LOAD_LOCK;
557 554
558 555 /*
559 556 * Load translations one lare page at a time
560 557 * to make sure we don't create mappings bigger than
561 558 * segment's size code in case underlying pages
562 559 * are shared with segvn's segment that uses bigger
563 560 * size code than we do.
564 561 */
565 562 pgsz = page_get_pagesize(seg->s_szc);
566 563 pgcnt = page_get_pagecnt(seg->s_szc);
567 564 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
568 565 sz = MIN(pgsz, ptob(npages - pidx));
569 566 hat_memload_array(seg->s_as->a_hat, a, sz,
570 567 &ppa[pidx], sptd->spt_prot, hat_flags);
571 568 }
572 569
573 570 /*
574 571 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
575 572 * we will leave the pages locked SE_SHARED for the life
576 573 * of the ISM segment. This will prevent any calls to
577 574 * hat_pageunload() on this ISM segment for those platforms.
578 575 */
579 576 if (!(hat_flags & HAT_LOAD_LOCK)) {
580 577 /*
581 578 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
582 579 * we no longer need to hold the SE_SHARED lock on the pages,
583 580 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
584 581 * SE_SHARED lock on the pages as necessary.
585 582 */
586 583 for (i = 0; i < npages; i++)
587 584 page_unlock(ppa[i]);
588 585 }
589 586 sptd->spt_pcachecnt = 0;
590 587 kmem_free(ppa, ((sizeof (page_t *)) * npages));
591 588 sptd->spt_realsize = ptob(npages);
592 589 atomic_add_long(&spt_used, npages);
593 590 sptcargs->seg_spt = seg;
594 591 return (0);
595 592
596 593 out4:
597 594 seg->s_data = NULL;
598 595 kmem_free(vp, sizeof (*vp));
599 596 cv_destroy(&sptd->spt_cv);
600 597 out3:
601 598 mutex_destroy(&sptd->spt_lock);
602 599 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
603 600 kmem_free(ppa, (sizeof (*ppa) * npages));
604 601 out2:
605 602 kmem_free(sptd, sizeof (*sptd));
606 603 out1:
607 604 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
608 605 anon_swap_restore(npages);
609 606 return (err);
610 607 }
611 608
612 609 /*ARGSUSED*/
613 610 void
614 611 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
615 612 {
616 613 struct page *pp;
617 614 struct spt_data *sptd = (struct spt_data *)seg->s_data;
618 615 pgcnt_t npages;
619 616 ulong_t anon_idx;
620 617 struct anon_map *amp;
621 618 struct anon *ap;
622 619 struct vnode *vp;
623 620 u_offset_t off;
624 621 uint_t hat_flags;
625 622 int root = 0;
626 623 pgcnt_t pgs, curnpgs = 0;
627 624 page_t *rootpp;
628 625 rctl_qty_t unlocked_bytes = 0;
629 626 kproject_t *proj;
630 627 kshmid_t *sp;
631 628
632 629 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
633 630
634 631 len = P2ROUNDUP(len, PAGESIZE);
635 632
636 633 npages = btop(len);
637 634
638 635 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
639 636 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
640 637 (sptd->spt_flags & SHM_PAGEABLE)) {
641 638 hat_flags = HAT_UNLOAD_UNMAP;
642 639 }
643 640
644 641 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
645 642
646 643 amp = sptd->spt_amp;
647 644 if (sptd->spt_flags & SHM_PAGEABLE)
648 645 npages = btop(amp->size);
649 646
650 647 ASSERT(amp != NULL);
651 648
652 649 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
653 650 sp = amp->a_sp;
654 651 proj = sp->shm_perm.ipc_proj;
655 652 mutex_enter(&sp->shm_mlock);
656 653 }
657 654 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
658 655 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
659 656 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
660 657 panic("segspt_free_pages: null app");
661 658 /*NOTREACHED*/
662 659 }
663 660 } else {
664 661 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
665 662 == NULL)
666 663 continue;
667 664 }
668 665 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
669 666 swap_xlate(ap, &vp, &off);
670 667
671 668 /*
672 669 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
673 670 * the pages won't be having SE_SHARED lock at this
674 671 * point.
675 672 *
676 673 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
677 674 * the pages are still held SE_SHARED locked from the
678 675 * original segspt_create()
679 676 *
680 677 * Our goal is to get SE_EXCL lock on each page, remove
681 678 * permanent lock on it and invalidate the page.
682 679 */
683 680 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
684 681 if (hat_flags == HAT_UNLOAD_UNMAP)
685 682 pp = page_lookup(vp, off, SE_EXCL);
686 683 else {
687 684 if ((pp = page_find(vp, off)) == NULL) {
688 685 panic("segspt_free_pages: "
689 686 "page not locked");
690 687 /*NOTREACHED*/
691 688 }
692 689 if (!page_tryupgrade(pp)) {
693 690 page_unlock(pp);
694 691 pp = page_lookup(vp, off, SE_EXCL);
695 692 }
696 693 }
697 694 if (pp == NULL) {
698 695 panic("segspt_free_pages: "
699 696 "page not in the system");
700 697 /*NOTREACHED*/
701 698 }
702 699 ASSERT(pp->p_lckcnt > 0);
703 700 page_pp_unlock(pp, 0, 1);
704 701 if (pp->p_lckcnt == 0)
705 702 unlocked_bytes += PAGESIZE;
706 703 } else {
707 704 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
708 705 continue;
709 706 }
710 707 /*
711 708 * It's logical to invalidate the pages here as in most cases
712 709 * these were created by segspt.
713 710 */
714 711 if (pp->p_szc != 0) {
715 712 if (root == 0) {
716 713 ASSERT(curnpgs == 0);
717 714 root = 1;
718 715 rootpp = pp;
719 716 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
720 717 ASSERT(pgs > 1);
721 718 ASSERT(IS_P2ALIGNED(pgs, pgs));
722 719 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
723 720 curnpgs--;
724 721 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
725 722 ASSERT(curnpgs == 1);
726 723 ASSERT(page_pptonum(pp) ==
727 724 page_pptonum(rootpp) + (pgs - 1));
728 725 page_destroy_pages(rootpp);
729 726 root = 0;
730 727 curnpgs = 0;
731 728 } else {
732 729 ASSERT(curnpgs > 1);
733 730 ASSERT(page_pptonum(pp) ==
734 731 page_pptonum(rootpp) + (pgs - curnpgs));
735 732 curnpgs--;
736 733 }
737 734 } else {
738 735 if (root != 0 || curnpgs != 0) {
739 736 panic("segspt_free_pages: bad large page");
740 737 /*NOTREACHED*/
741 738 }
742 739 /*
743 740 * Before destroying the pages, we need to take care
744 741 * of the rctl locked memory accounting. For that
745 742 * we need to calculte the unlocked_bytes.
746 743 */
747 744 if (pp->p_lckcnt > 0)
748 745 unlocked_bytes += PAGESIZE;
749 746 /*LINTED: constant in conditional context */
750 747 VN_DISPOSE(pp, B_INVAL, 0, kcred);
751 748 }
752 749 }
753 750 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
754 751 if (unlocked_bytes > 0)
755 752 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
756 753 mutex_exit(&sp->shm_mlock);
757 754 }
758 755 if (root != 0 || curnpgs != 0) {
759 756 panic("segspt_free_pages: bad large page");
760 757 /*NOTREACHED*/
761 758 }
762 759
763 760 /*
764 761 * mark that pages have been released
765 762 */
766 763 sptd->spt_realsize = 0;
767 764
768 765 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
769 766 atomic_add_long(&spt_used, -npages);
770 767 anon_swap_restore(npages);
771 768 }
772 769 }
773 770
774 771 /*
775 772 * Get memory allocation policy info for specified address in given segment
776 773 */
777 774 static lgrp_mem_policy_info_t *
778 775 segspt_getpolicy(struct seg *seg, caddr_t addr)
779 776 {
780 777 struct anon_map *amp;
781 778 ulong_t anon_index;
782 779 lgrp_mem_policy_info_t *policy_info;
783 780 struct spt_data *spt_data;
784 781
785 782 ASSERT(seg != NULL);
786 783
787 784 /*
788 785 * Get anon_map from segspt
789 786 *
790 787 * Assume that no lock needs to be held on anon_map, since
791 788 * it should be protected by its reference count which must be
792 789 * nonzero for an existing segment
793 790 * Need to grab readers lock on policy tree though
794 791 */
795 792 spt_data = (struct spt_data *)seg->s_data;
796 793 if (spt_data == NULL)
797 794 return (NULL);
798 795 amp = spt_data->spt_amp;
799 796 ASSERT(amp->refcnt != 0);
800 797
801 798 /*
802 799 * Get policy info
803 800 *
804 801 * Assume starting anon index of 0
805 802 */
806 803 anon_index = seg_page(seg, addr);
807 804 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
808 805
809 806 return (policy_info);
810 807 }
811 808
812 809 /*
813 810 * DISM only.
814 811 * Return locked pages over a given range.
815 812 *
816 813 * We will cache all DISM locked pages and save the pplist for the
817 814 * entire segment in the ppa field of the underlying DISM segment structure.
818 815 * Later, during a call to segspt_reclaim() we will use this ppa array
819 816 * to page_unlock() all of the pages and then we will free this ppa list.
820 817 */
821 818 /*ARGSUSED*/
822 819 static int
823 820 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
824 821 struct page ***ppp, enum lock_type type, enum seg_rw rw)
825 822 {
826 823 struct shm_data *shmd = (struct shm_data *)seg->s_data;
827 824 struct seg *sptseg = shmd->shm_sptseg;
828 825 struct spt_data *sptd = sptseg->s_data;
829 826 pgcnt_t pg_idx, npages, tot_npages, npgs;
830 827 struct page **pplist, **pl, **ppa, *pp;
831 828 struct anon_map *amp;
832 829 spgcnt_t an_idx;
833 830 int ret = ENOTSUP;
834 831 uint_t pl_built = 0;
835 832 struct anon *ap;
836 833 struct vnode *vp;
837 834 u_offset_t off;
838 835 pgcnt_t claim_availrmem = 0;
839 836 uint_t szc;
840 837
841 838 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
842 839 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
843 840
844 841 /*
845 842 * We want to lock/unlock the entire ISM segment. Therefore,
846 843 * we will be using the underlying sptseg and it's base address
847 844 * and length for the caching arguments.
848 845 */
849 846 ASSERT(sptseg);
850 847 ASSERT(sptd);
851 848
852 849 pg_idx = seg_page(seg, addr);
853 850 npages = btopr(len);
854 851
855 852 /*
856 853 * check if the request is larger than number of pages covered
857 854 * by amp
858 855 */
859 856 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
860 857 *ppp = NULL;
861 858 return (ENOTSUP);
862 859 }
863 860
864 861 if (type == L_PAGEUNLOCK) {
865 862 ASSERT(sptd->spt_ppa != NULL);
866 863
867 864 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
868 865 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
869 866
870 867 /*
871 868 * If someone is blocked while unmapping, we purge
872 869 * segment page cache and thus reclaim pplist synchronously
873 870 * without waiting for seg_pasync_thread. This speeds up
874 871 * unmapping in cases where munmap(2) is called, while
875 872 * raw async i/o is still in progress or where a thread
876 873 * exits on data fault in a multithreaded application.
877 874 */
878 875 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
879 876 (AS_ISUNMAPWAIT(seg->s_as) &&
880 877 shmd->shm_softlockcnt > 0)) {
881 878 segspt_purge(seg);
882 879 }
883 880 return (0);
884 881 }
885 882
886 883 /* The L_PAGELOCK case ... */
887 884
888 885 if (sptd->spt_flags & DISM_PPA_CHANGED) {
889 886 segspt_purge(seg);
890 887 /*
891 888 * for DISM ppa needs to be rebuild since
892 889 * number of locked pages could be changed
893 890 */
894 891 *ppp = NULL;
895 892 return (ENOTSUP);
896 893 }
897 894
898 895 /*
899 896 * First try to find pages in segment page cache, without
900 897 * holding the segment lock.
901 898 */
902 899 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
903 900 S_WRITE, SEGP_FORCE_WIRED);
904 901 if (pplist != NULL) {
905 902 ASSERT(sptd->spt_ppa != NULL);
906 903 ASSERT(sptd->spt_ppa == pplist);
907 904 ppa = sptd->spt_ppa;
908 905 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
909 906 if (ppa[an_idx] == NULL) {
910 907 seg_pinactive(seg, NULL, seg->s_base,
911 908 sptd->spt_amp->size, ppa,
912 909 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
913 910 *ppp = NULL;
914 911 return (ENOTSUP);
915 912 }
916 913 if ((szc = ppa[an_idx]->p_szc) != 0) {
917 914 npgs = page_get_pagecnt(szc);
918 915 an_idx = P2ROUNDUP(an_idx + 1, npgs);
919 916 } else {
920 917 an_idx++;
921 918 }
922 919 }
923 920 /*
924 921 * Since we cache the entire DISM segment, we want to
925 922 * set ppp to point to the first slot that corresponds
926 923 * to the requested addr, i.e. pg_idx.
927 924 */
928 925 *ppp = &(sptd->spt_ppa[pg_idx]);
929 926 return (0);
930 927 }
931 928
932 929 mutex_enter(&sptd->spt_lock);
933 930 /*
934 931 * try to find pages in segment page cache with mutex
935 932 */
936 933 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
937 934 S_WRITE, SEGP_FORCE_WIRED);
938 935 if (pplist != NULL) {
939 936 ASSERT(sptd->spt_ppa != NULL);
940 937 ASSERT(sptd->spt_ppa == pplist);
941 938 ppa = sptd->spt_ppa;
942 939 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
943 940 if (ppa[an_idx] == NULL) {
944 941 mutex_exit(&sptd->spt_lock);
945 942 seg_pinactive(seg, NULL, seg->s_base,
946 943 sptd->spt_amp->size, ppa,
947 944 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
948 945 *ppp = NULL;
949 946 return (ENOTSUP);
950 947 }
951 948 if ((szc = ppa[an_idx]->p_szc) != 0) {
952 949 npgs = page_get_pagecnt(szc);
953 950 an_idx = P2ROUNDUP(an_idx + 1, npgs);
954 951 } else {
955 952 an_idx++;
956 953 }
957 954 }
958 955 /*
959 956 * Since we cache the entire DISM segment, we want to
960 957 * set ppp to point to the first slot that corresponds
961 958 * to the requested addr, i.e. pg_idx.
962 959 */
963 960 mutex_exit(&sptd->spt_lock);
964 961 *ppp = &(sptd->spt_ppa[pg_idx]);
965 962 return (0);
966 963 }
967 964 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
968 965 SEGP_FORCE_WIRED) == SEGP_FAIL) {
969 966 mutex_exit(&sptd->spt_lock);
970 967 *ppp = NULL;
971 968 return (ENOTSUP);
972 969 }
973 970
974 971 /*
975 972 * No need to worry about protections because DISM pages are always rw.
976 973 */
977 974 pl = pplist = NULL;
978 975 amp = sptd->spt_amp;
979 976
980 977 /*
981 978 * Do we need to build the ppa array?
982 979 */
983 980 if (sptd->spt_ppa == NULL) {
984 981 pgcnt_t lpg_cnt = 0;
985 982
986 983 pl_built = 1;
987 984 tot_npages = btopr(sptd->spt_amp->size);
988 985
989 986 ASSERT(sptd->spt_pcachecnt == 0);
990 987 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
991 988 pl = pplist;
992 989
993 990 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
994 991 for (an_idx = 0; an_idx < tot_npages; ) {
995 992 ap = anon_get_ptr(amp->ahp, an_idx);
996 993 /*
997 994 * Cache only mlocked pages. For large pages
998 995 * if one (constituent) page is mlocked
999 996 * all pages for that large page
1000 997 * are cached also. This is for quick
1001 998 * lookups of ppa array;
1002 999 */
1003 1000 if ((ap != NULL) && (lpg_cnt != 0 ||
1004 1001 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1005 1002
1006 1003 swap_xlate(ap, &vp, &off);
1007 1004 pp = page_lookup(vp, off, SE_SHARED);
1008 1005 ASSERT(pp != NULL);
1009 1006 if (lpg_cnt == 0) {
1010 1007 lpg_cnt++;
1011 1008 /*
1012 1009 * For a small page, we are done --
1013 1010 * lpg_count is reset to 0 below.
1014 1011 *
1015 1012 * For a large page, we are guaranteed
1016 1013 * to find the anon structures of all
1017 1014 * constituent pages and a non-zero
1018 1015 * lpg_cnt ensures that we don't test
1019 1016 * for mlock for these. We are done
1020 1017 * when lpg_count reaches (npgs + 1).
1021 1018 * If we are not the first constituent
1022 1019 * page, restart at the first one.
1023 1020 */
1024 1021 npgs = page_get_pagecnt(pp->p_szc);
1025 1022 if (!IS_P2ALIGNED(an_idx, npgs)) {
1026 1023 an_idx = P2ALIGN(an_idx, npgs);
1027 1024 page_unlock(pp);
1028 1025 continue;
1029 1026 }
1030 1027 }
1031 1028 if (++lpg_cnt > npgs)
1032 1029 lpg_cnt = 0;
1033 1030
1034 1031 /*
1035 1032 * availrmem is decremented only
1036 1033 * for unlocked pages
1037 1034 */
1038 1035 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1039 1036 claim_availrmem++;
1040 1037 pplist[an_idx] = pp;
1041 1038 }
1042 1039 an_idx++;
1043 1040 }
1044 1041 ANON_LOCK_EXIT(&->a_rwlock);
1045 1042
1046 1043 if (claim_availrmem) {
1047 1044 mutex_enter(&freemem_lock);
1048 1045 if (availrmem < tune.t_minarmem + claim_availrmem) {
1049 1046 mutex_exit(&freemem_lock);
1050 1047 ret = ENOTSUP;
1051 1048 claim_availrmem = 0;
1052 1049 goto insert_fail;
1053 1050 } else {
1054 1051 availrmem -= claim_availrmem;
1055 1052 }
1056 1053 mutex_exit(&freemem_lock);
1057 1054 }
1058 1055
1059 1056 sptd->spt_ppa = pl;
1060 1057 } else {
1061 1058 /*
1062 1059 * We already have a valid ppa[].
1063 1060 */
1064 1061 pl = sptd->spt_ppa;
1065 1062 }
1066 1063
1067 1064 ASSERT(pl != NULL);
1068 1065
1069 1066 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1070 1067 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1071 1068 segspt_reclaim);
1072 1069 if (ret == SEGP_FAIL) {
1073 1070 /*
1074 1071 * seg_pinsert failed. We return
1075 1072 * ENOTSUP, so that the as_pagelock() code will
1076 1073 * then try the slower F_SOFTLOCK path.
1077 1074 */
1078 1075 if (pl_built) {
1079 1076 /*
1080 1077 * No one else has referenced the ppa[].
1081 1078 * We created it and we need to destroy it.
1082 1079 */
1083 1080 sptd->spt_ppa = NULL;
1084 1081 }
1085 1082 ret = ENOTSUP;
1086 1083 goto insert_fail;
1087 1084 }
1088 1085
1089 1086 /*
1090 1087 * In either case, we increment softlockcnt on the 'real' segment.
1091 1088 */
1092 1089 sptd->spt_pcachecnt++;
1093 1090 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1094 1091
1095 1092 ppa = sptd->spt_ppa;
1096 1093 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1097 1094 if (ppa[an_idx] == NULL) {
1098 1095 mutex_exit(&sptd->spt_lock);
1099 1096 seg_pinactive(seg, NULL, seg->s_base,
1100 1097 sptd->spt_amp->size,
1101 1098 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1102 1099 *ppp = NULL;
1103 1100 return (ENOTSUP);
1104 1101 }
1105 1102 if ((szc = ppa[an_idx]->p_szc) != 0) {
1106 1103 npgs = page_get_pagecnt(szc);
1107 1104 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1108 1105 } else {
1109 1106 an_idx++;
1110 1107 }
1111 1108 }
1112 1109 /*
1113 1110 * We can now drop the sptd->spt_lock since the ppa[]
1114 1111 * exists and he have incremented pacachecnt.
1115 1112 */
1116 1113 mutex_exit(&sptd->spt_lock);
1117 1114
1118 1115 /*
1119 1116 * Since we cache the entire segment, we want to
1120 1117 * set ppp to point to the first slot that corresponds
1121 1118 * to the requested addr, i.e. pg_idx.
1122 1119 */
1123 1120 *ppp = &(sptd->spt_ppa[pg_idx]);
1124 1121 return (0);
1125 1122
1126 1123 insert_fail:
1127 1124 /*
1128 1125 * We will only reach this code if we tried and failed.
1129 1126 *
1130 1127 * And we can drop the lock on the dummy seg, once we've failed
1131 1128 * to set up a new ppa[].
1132 1129 */
1133 1130 mutex_exit(&sptd->spt_lock);
1134 1131
1135 1132 if (pl_built) {
1136 1133 if (claim_availrmem) {
1137 1134 mutex_enter(&freemem_lock);
1138 1135 availrmem += claim_availrmem;
1139 1136 mutex_exit(&freemem_lock);
1140 1137 }
1141 1138
1142 1139 /*
1143 1140 * We created pl and we need to destroy it.
1144 1141 */
1145 1142 pplist = pl;
1146 1143 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1147 1144 if (pplist[an_idx] != NULL)
1148 1145 page_unlock(pplist[an_idx]);
1149 1146 }
1150 1147 kmem_free(pl, sizeof (page_t *) * tot_npages);
1151 1148 }
1152 1149
1153 1150 if (shmd->shm_softlockcnt <= 0) {
1154 1151 if (AS_ISUNMAPWAIT(seg->s_as)) {
1155 1152 mutex_enter(&seg->s_as->a_contents);
1156 1153 if (AS_ISUNMAPWAIT(seg->s_as)) {
1157 1154 AS_CLRUNMAPWAIT(seg->s_as);
1158 1155 cv_broadcast(&seg->s_as->a_cv);
1159 1156 }
1160 1157 mutex_exit(&seg->s_as->a_contents);
1161 1158 }
1162 1159 }
1163 1160 *ppp = NULL;
1164 1161 return (ret);
1165 1162 }
1166 1163
1167 1164
1168 1165
1169 1166 /*
1170 1167 * return locked pages over a given range.
1171 1168 *
1172 1169 * We will cache the entire ISM segment and save the pplist for the
1173 1170 * entire segment in the ppa field of the underlying ISM segment structure.
1174 1171 * Later, during a call to segspt_reclaim() we will use this ppa array
1175 1172 * to page_unlock() all of the pages and then we will free this ppa list.
1176 1173 */
1177 1174 /*ARGSUSED*/
1178 1175 static int
1179 1176 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1180 1177 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1181 1178 {
1182 1179 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1183 1180 struct seg *sptseg = shmd->shm_sptseg;
1184 1181 struct spt_data *sptd = sptseg->s_data;
1185 1182 pgcnt_t np, page_index, npages;
1186 1183 caddr_t a, spt_base;
1187 1184 struct page **pplist, **pl, *pp;
1188 1185 struct anon_map *amp;
1189 1186 ulong_t anon_index;
1190 1187 int ret = ENOTSUP;
1191 1188 uint_t pl_built = 0;
1192 1189 struct anon *ap;
1193 1190 struct vnode *vp;
1194 1191 u_offset_t off;
1195 1192
1196 1193 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1197 1194 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1198 1195
1199 1196
1200 1197 /*
1201 1198 * We want to lock/unlock the entire ISM segment. Therefore,
1202 1199 * we will be using the underlying sptseg and it's base address
1203 1200 * and length for the caching arguments.
1204 1201 */
1205 1202 ASSERT(sptseg);
1206 1203 ASSERT(sptd);
1207 1204
1208 1205 if (sptd->spt_flags & SHM_PAGEABLE) {
1209 1206 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1210 1207 }
1211 1208
1212 1209 page_index = seg_page(seg, addr);
1213 1210 npages = btopr(len);
1214 1211
1215 1212 /*
1216 1213 * check if the request is larger than number of pages covered
1217 1214 * by amp
1218 1215 */
1219 1216 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1220 1217 *ppp = NULL;
1221 1218 return (ENOTSUP);
1222 1219 }
1223 1220
1224 1221 if (type == L_PAGEUNLOCK) {
1225 1222
1226 1223 ASSERT(sptd->spt_ppa != NULL);
1227 1224
1228 1225 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1229 1226 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1230 1227
1231 1228 /*
1232 1229 * If someone is blocked while unmapping, we purge
1233 1230 * segment page cache and thus reclaim pplist synchronously
1234 1231 * without waiting for seg_pasync_thread. This speeds up
1235 1232 * unmapping in cases where munmap(2) is called, while
1236 1233 * raw async i/o is still in progress or where a thread
1237 1234 * exits on data fault in a multithreaded application.
1238 1235 */
1239 1236 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1240 1237 segspt_purge(seg);
1241 1238 }
1242 1239 return (0);
1243 1240 }
1244 1241
1245 1242 /* The L_PAGELOCK case... */
1246 1243
1247 1244 /*
1248 1245 * First try to find pages in segment page cache, without
1249 1246 * holding the segment lock.
1250 1247 */
1251 1248 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1252 1249 S_WRITE, SEGP_FORCE_WIRED);
1253 1250 if (pplist != NULL) {
1254 1251 ASSERT(sptd->spt_ppa == pplist);
1255 1252 ASSERT(sptd->spt_ppa[page_index]);
1256 1253 /*
1257 1254 * Since we cache the entire ISM segment, we want to
1258 1255 * set ppp to point to the first slot that corresponds
1259 1256 * to the requested addr, i.e. page_index.
1260 1257 */
1261 1258 *ppp = &(sptd->spt_ppa[page_index]);
1262 1259 return (0);
1263 1260 }
1264 1261
1265 1262 mutex_enter(&sptd->spt_lock);
1266 1263
1267 1264 /*
1268 1265 * try to find pages in segment page cache
1269 1266 */
1270 1267 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1271 1268 S_WRITE, SEGP_FORCE_WIRED);
1272 1269 if (pplist != NULL) {
1273 1270 ASSERT(sptd->spt_ppa == pplist);
1274 1271 /*
1275 1272 * Since we cache the entire segment, we want to
1276 1273 * set ppp to point to the first slot that corresponds
1277 1274 * to the requested addr, i.e. page_index.
1278 1275 */
1279 1276 mutex_exit(&sptd->spt_lock);
1280 1277 *ppp = &(sptd->spt_ppa[page_index]);
1281 1278 return (0);
1282 1279 }
1283 1280
1284 1281 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1285 1282 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1286 1283 mutex_exit(&sptd->spt_lock);
1287 1284 *ppp = NULL;
1288 1285 return (ENOTSUP);
1289 1286 }
1290 1287
1291 1288 /*
1292 1289 * No need to worry about protections because ISM pages
1293 1290 * are always rw.
1294 1291 */
1295 1292 pl = pplist = NULL;
1296 1293
1297 1294 /*
1298 1295 * Do we need to build the ppa array?
1299 1296 */
1300 1297 if (sptd->spt_ppa == NULL) {
1301 1298 ASSERT(sptd->spt_ppa == pplist);
1302 1299
1303 1300 spt_base = sptseg->s_base;
1304 1301 pl_built = 1;
1305 1302
1306 1303 /*
1307 1304 * availrmem is decremented once during anon_swap_adjust()
1308 1305 * and is incremented during the anon_unresv(), which is
1309 1306 * called from shm_rm_amp() when the segment is destroyed.
1310 1307 */
1311 1308 amp = sptd->spt_amp;
1312 1309 ASSERT(amp != NULL);
1313 1310
1314 1311 /* pcachecnt is protected by sptd->spt_lock */
1315 1312 ASSERT(sptd->spt_pcachecnt == 0);
1316 1313 pplist = kmem_zalloc(sizeof (page_t *)
1317 1314 * btopr(sptd->spt_amp->size), KM_SLEEP);
1318 1315 pl = pplist;
1319 1316
1320 1317 anon_index = seg_page(sptseg, spt_base);
1321 1318
1322 1319 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1323 1320 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1324 1321 a += PAGESIZE, anon_index++, pplist++) {
1325 1322 ap = anon_get_ptr(amp->ahp, anon_index);
1326 1323 ASSERT(ap != NULL);
1327 1324 swap_xlate(ap, &vp, &off);
1328 1325 pp = page_lookup(vp, off, SE_SHARED);
1329 1326 ASSERT(pp != NULL);
1330 1327 *pplist = pp;
1331 1328 }
1332 1329 ANON_LOCK_EXIT(&->a_rwlock);
1333 1330
1334 1331 if (a < (spt_base + sptd->spt_amp->size)) {
1335 1332 ret = ENOTSUP;
1336 1333 goto insert_fail;
1337 1334 }
1338 1335 sptd->spt_ppa = pl;
1339 1336 } else {
1340 1337 /*
1341 1338 * We already have a valid ppa[].
1342 1339 */
1343 1340 pl = sptd->spt_ppa;
1344 1341 }
1345 1342
1346 1343 ASSERT(pl != NULL);
1347 1344
1348 1345 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1349 1346 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1350 1347 segspt_reclaim);
1351 1348 if (ret == SEGP_FAIL) {
1352 1349 /*
1353 1350 * seg_pinsert failed. We return
1354 1351 * ENOTSUP, so that the as_pagelock() code will
1355 1352 * then try the slower F_SOFTLOCK path.
1356 1353 */
1357 1354 if (pl_built) {
1358 1355 /*
1359 1356 * No one else has referenced the ppa[].
1360 1357 * We created it and we need to destroy it.
1361 1358 */
1362 1359 sptd->spt_ppa = NULL;
1363 1360 }
1364 1361 ret = ENOTSUP;
1365 1362 goto insert_fail;
1366 1363 }
1367 1364
1368 1365 /*
1369 1366 * In either case, we increment softlockcnt on the 'real' segment.
1370 1367 */
1371 1368 sptd->spt_pcachecnt++;
1372 1369 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1373 1370
1374 1371 /*
1375 1372 * We can now drop the sptd->spt_lock since the ppa[]
1376 1373 * exists and he have incremented pacachecnt.
1377 1374 */
1378 1375 mutex_exit(&sptd->spt_lock);
1379 1376
1380 1377 /*
1381 1378 * Since we cache the entire segment, we want to
1382 1379 * set ppp to point to the first slot that corresponds
1383 1380 * to the requested addr, i.e. page_index.
1384 1381 */
1385 1382 *ppp = &(sptd->spt_ppa[page_index]);
1386 1383 return (0);
1387 1384
1388 1385 insert_fail:
1389 1386 /*
1390 1387 * We will only reach this code if we tried and failed.
1391 1388 *
1392 1389 * And we can drop the lock on the dummy seg, once we've failed
1393 1390 * to set up a new ppa[].
1394 1391 */
1395 1392 mutex_exit(&sptd->spt_lock);
1396 1393
1397 1394 if (pl_built) {
1398 1395 /*
1399 1396 * We created pl and we need to destroy it.
1400 1397 */
1401 1398 pplist = pl;
1402 1399 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1403 1400 while (np) {
1404 1401 page_unlock(*pplist);
1405 1402 np--;
1406 1403 pplist++;
1407 1404 }
1408 1405 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1409 1406 }
1410 1407 if (shmd->shm_softlockcnt <= 0) {
1411 1408 if (AS_ISUNMAPWAIT(seg->s_as)) {
1412 1409 mutex_enter(&seg->s_as->a_contents);
1413 1410 if (AS_ISUNMAPWAIT(seg->s_as)) {
1414 1411 AS_CLRUNMAPWAIT(seg->s_as);
1415 1412 cv_broadcast(&seg->s_as->a_cv);
1416 1413 }
1417 1414 mutex_exit(&seg->s_as->a_contents);
1418 1415 }
1419 1416 }
1420 1417 *ppp = NULL;
1421 1418 return (ret);
1422 1419 }
1423 1420
1424 1421 /*
1425 1422 * purge any cached pages in the I/O page cache
1426 1423 */
1427 1424 static void
1428 1425 segspt_purge(struct seg *seg)
1429 1426 {
1430 1427 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1431 1428 }
1432 1429
1433 1430 static int
1434 1431 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1435 1432 enum seg_rw rw, int async)
1436 1433 {
1437 1434 struct seg *seg = (struct seg *)ptag;
1438 1435 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1439 1436 struct seg *sptseg;
1440 1437 struct spt_data *sptd;
1441 1438 pgcnt_t npages, i, free_availrmem = 0;
1442 1439 int done = 0;
1443 1440
1444 1441 #ifdef lint
1445 1442 addr = addr;
1446 1443 #endif
1447 1444 sptseg = shmd->shm_sptseg;
1448 1445 sptd = sptseg->s_data;
1449 1446 npages = (len >> PAGESHIFT);
1450 1447 ASSERT(npages);
1451 1448 ASSERT(sptd->spt_pcachecnt != 0);
1452 1449 ASSERT(sptd->spt_ppa == pplist);
1453 1450 ASSERT(npages == btopr(sptd->spt_amp->size));
1454 1451 ASSERT(async || AS_LOCK_HELD(seg->s_as));
1455 1452
1456 1453 /*
1457 1454 * Acquire the lock on the dummy seg and destroy the
1458 1455 * ppa array IF this is the last pcachecnt.
1459 1456 */
1460 1457 mutex_enter(&sptd->spt_lock);
1461 1458 if (--sptd->spt_pcachecnt == 0) {
1462 1459 for (i = 0; i < npages; i++) {
1463 1460 if (pplist[i] == NULL) {
1464 1461 continue;
1465 1462 }
1466 1463 if (rw == S_WRITE) {
1467 1464 hat_setrefmod(pplist[i]);
1468 1465 } else {
1469 1466 hat_setref(pplist[i]);
1470 1467 }
1471 1468 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1472 1469 (sptd->spt_ppa_lckcnt[i] == 0))
1473 1470 free_availrmem++;
1474 1471 page_unlock(pplist[i]);
1475 1472 }
1476 1473 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1477 1474 mutex_enter(&freemem_lock);
1478 1475 availrmem += free_availrmem;
1479 1476 mutex_exit(&freemem_lock);
1480 1477 }
1481 1478 /*
1482 1479 * Since we want to cach/uncache the entire ISM segment,
1483 1480 * we will track the pplist in a segspt specific field
1484 1481 * ppa, that is initialized at the time we add an entry to
1485 1482 * the cache.
1486 1483 */
1487 1484 ASSERT(sptd->spt_pcachecnt == 0);
1488 1485 kmem_free(pplist, sizeof (page_t *) * npages);
1489 1486 sptd->spt_ppa = NULL;
1490 1487 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1491 1488 sptd->spt_gen++;
1492 1489 cv_broadcast(&sptd->spt_cv);
1493 1490 done = 1;
1494 1491 }
1495 1492 mutex_exit(&sptd->spt_lock);
1496 1493
1497 1494 /*
1498 1495 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1499 1496 * may not hold AS lock (in this case async argument is not 0). This
1500 1497 * means if softlockcnt drops to 0 after the decrement below address
1501 1498 * space may get freed. We can't allow it since after softlock
1502 1499 * derement to 0 we still need to access as structure for possible
1503 1500 * wakeup of unmap waiters. To prevent the disappearance of as we take
1504 1501 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1505 1502 * this mutex as a barrier to make sure this routine completes before
1506 1503 * segment is freed.
1507 1504 *
1508 1505 * The second complication we have to deal with in async case is a
1509 1506 * possibility of missed wake up of unmap wait thread. When we don't
1510 1507 * hold as lock here we may take a_contents lock before unmap wait
1511 1508 * thread that was first to see softlockcnt was still not 0. As a
1512 1509 * result we'll fail to wake up an unmap wait thread. To avoid this
1513 1510 * race we set nounmapwait flag in as structure if we drop softlockcnt
1514 1511 * to 0 if async is not 0. unmapwait thread
1515 1512 * will not block if this flag is set.
1516 1513 */
1517 1514 if (async)
1518 1515 mutex_enter(&shmd->shm_segfree_syncmtx);
1519 1516
1520 1517 /*
1521 1518 * Now decrement softlockcnt.
1522 1519 */
1523 1520 ASSERT(shmd->shm_softlockcnt > 0);
1524 1521 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1525 1522
1526 1523 if (shmd->shm_softlockcnt <= 0) {
1527 1524 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1528 1525 mutex_enter(&seg->s_as->a_contents);
1529 1526 if (async)
1530 1527 AS_SETNOUNMAPWAIT(seg->s_as);
1531 1528 if (AS_ISUNMAPWAIT(seg->s_as)) {
1532 1529 AS_CLRUNMAPWAIT(seg->s_as);
1533 1530 cv_broadcast(&seg->s_as->a_cv);
1534 1531 }
1535 1532 mutex_exit(&seg->s_as->a_contents);
1536 1533 }
1537 1534 }
1538 1535
1539 1536 if (async)
1540 1537 mutex_exit(&shmd->shm_segfree_syncmtx);
1541 1538
1542 1539 return (done);
1543 1540 }
1544 1541
1545 1542 /*
1546 1543 * Do a F_SOFTUNLOCK call over the range requested.
1547 1544 * The range must have already been F_SOFTLOCK'ed.
1548 1545 *
1549 1546 * The calls to acquire and release the anon map lock mutex were
1550 1547 * removed in order to avoid a deadly embrace during a DR
1551 1548 * memory delete operation. (Eg. DR blocks while waiting for a
1552 1549 * exclusive lock on a page that is being used for kaio; the
1553 1550 * thread that will complete the kaio and call segspt_softunlock
1554 1551 * blocks on the anon map lock; another thread holding the anon
1555 1552 * map lock blocks on another page lock via the segspt_shmfault
1556 1553 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1557 1554 *
1558 1555 * The appropriateness of the removal is based upon the following:
1559 1556 * 1. If we are holding a segment's reader lock and the page is held
1560 1557 * shared, then the corresponding element in anonmap which points to
1561 1558 * anon struct cannot change and there is no need to acquire the
1562 1559 * anonymous map lock.
1563 1560 * 2. Threads in segspt_softunlock have a reader lock on the segment
1564 1561 * and already have the shared page lock, so we are guaranteed that
1565 1562 * the anon map slot cannot change and therefore can call anon_get_ptr()
1566 1563 * without grabbing the anonymous map lock.
1567 1564 * 3. Threads that softlock a shared page break copy-on-write, even if
1568 1565 * its a read. Thus cow faults can be ignored with respect to soft
1569 1566 * unlocking, since the breaking of cow means that the anon slot(s) will
1570 1567 * not be shared.
1571 1568 */
1572 1569 static void
1573 1570 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1574 1571 size_t len, enum seg_rw rw)
1575 1572 {
1576 1573 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1577 1574 struct seg *sptseg;
1578 1575 struct spt_data *sptd;
1579 1576 page_t *pp;
1580 1577 caddr_t adr;
1581 1578 struct vnode *vp;
1582 1579 u_offset_t offset;
1583 1580 ulong_t anon_index;
1584 1581 struct anon_map *amp; /* XXX - for locknest */
1585 1582 struct anon *ap = NULL;
1586 1583 pgcnt_t npages;
1587 1584
1588 1585 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1589 1586
1590 1587 sptseg = shmd->shm_sptseg;
1591 1588 sptd = sptseg->s_data;
1592 1589
1593 1590 /*
1594 1591 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1595 1592 * and therefore their pages are SE_SHARED locked
1596 1593 * for the entire life of the segment.
1597 1594 */
1598 1595 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1599 1596 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1600 1597 goto softlock_decrement;
1601 1598 }
1602 1599
1603 1600 /*
1604 1601 * Any thread is free to do a page_find and
1605 1602 * page_unlock() on the pages within this seg.
1606 1603 *
1607 1604 * We are already holding the as->a_lock on the user's
1608 1605 * real segment, but we need to hold the a_lock on the
1609 1606 * underlying dummy as. This is mostly to satisfy the
1610 1607 * underlying HAT layer.
1611 1608 */
1612 1609 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1613 1610 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1614 1611 AS_LOCK_EXIT(sptseg->s_as);
1615 1612
1616 1613 amp = sptd->spt_amp;
1617 1614 ASSERT(amp != NULL);
1618 1615 anon_index = seg_page(sptseg, sptseg_addr);
1619 1616
1620 1617 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1621 1618 ap = anon_get_ptr(amp->ahp, anon_index++);
1622 1619 ASSERT(ap != NULL);
1623 1620 swap_xlate(ap, &vp, &offset);
1624 1621
1625 1622 /*
1626 1623 * Use page_find() instead of page_lookup() to
1627 1624 * find the page since we know that it has a
1628 1625 * "shared" lock.
1629 1626 */
1630 1627 pp = page_find(vp, offset);
1631 1628 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1632 1629 if (pp == NULL) {
1633 1630 panic("segspt_softunlock: "
1634 1631 "addr %p, ap %p, vp %p, off %llx",
1635 1632 (void *)adr, (void *)ap, (void *)vp, offset);
1636 1633 /*NOTREACHED*/
1637 1634 }
1638 1635
1639 1636 if (rw == S_WRITE) {
1640 1637 hat_setrefmod(pp);
1641 1638 } else if (rw != S_OTHER) {
1642 1639 hat_setref(pp);
1643 1640 }
1644 1641 page_unlock(pp);
1645 1642 }
1646 1643
1647 1644 softlock_decrement:
1648 1645 npages = btopr(len);
1649 1646 ASSERT(shmd->shm_softlockcnt >= npages);
1650 1647 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1651 1648 if (shmd->shm_softlockcnt == 0) {
1652 1649 /*
1653 1650 * All SOFTLOCKS are gone. Wakeup any waiting
1654 1651 * unmappers so they can try again to unmap.
1655 1652 * Check for waiters first without the mutex
1656 1653 * held so we don't always grab the mutex on
1657 1654 * softunlocks.
1658 1655 */
1659 1656 if (AS_ISUNMAPWAIT(seg->s_as)) {
1660 1657 mutex_enter(&seg->s_as->a_contents);
1661 1658 if (AS_ISUNMAPWAIT(seg->s_as)) {
1662 1659 AS_CLRUNMAPWAIT(seg->s_as);
1663 1660 cv_broadcast(&seg->s_as->a_cv);
1664 1661 }
1665 1662 mutex_exit(&seg->s_as->a_contents);
1666 1663 }
1667 1664 }
1668 1665 }
1669 1666
1670 1667 int
1671 1668 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1672 1669 {
1673 1670 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1674 1671 struct shm_data *shmd;
1675 1672 struct anon_map *shm_amp = shmd_arg->shm_amp;
1676 1673 struct spt_data *sptd;
1677 1674 int error = 0;
1678 1675
1679 1676 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1680 1677
1681 1678 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1682 1679 if (shmd == NULL)
1683 1680 return (ENOMEM);
1684 1681
1685 1682 shmd->shm_sptas = shmd_arg->shm_sptas;
1686 1683 shmd->shm_amp = shm_amp;
1687 1684 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1688 1685
1689 1686 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1690 1687 NULL, 0, seg->s_size);
1691 1688
1692 1689 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1693 1690
1694 1691 seg->s_data = (void *)shmd;
1695 1692 seg->s_ops = &segspt_shmops;
1696 1693 seg->s_szc = shmd->shm_sptseg->s_szc;
1697 1694 sptd = shmd->shm_sptseg->s_data;
1698 1695
1699 1696 if (sptd->spt_flags & SHM_PAGEABLE) {
1700 1697 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1701 1698 KM_NOSLEEP)) == NULL) {
1702 1699 seg->s_data = (void *)NULL;
1703 1700 kmem_free(shmd, (sizeof (*shmd)));
1704 1701 return (ENOMEM);
1705 1702 }
1706 1703 shmd->shm_lckpgs = 0;
1707 1704 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1708 1705 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1709 1706 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1710 1707 seg->s_size, seg->s_szc)) != 0) {
1711 1708 kmem_free(shmd->shm_vpage,
1712 1709 btopr(shm_amp->size));
1713 1710 }
1714 1711 }
1715 1712 } else {
1716 1713 error = hat_share(seg->s_as->a_hat, seg->s_base,
1717 1714 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1718 1715 seg->s_size, seg->s_szc);
1719 1716 }
1720 1717 if (error) {
1721 1718 seg->s_szc = 0;
1722 1719 seg->s_data = (void *)NULL;
1723 1720 kmem_free(shmd, (sizeof (*shmd)));
1724 1721 } else {
1725 1722 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1726 1723 shm_amp->refcnt++;
1727 1724 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1728 1725 }
1729 1726 return (error);
1730 1727 }
1731 1728
1732 1729 int
1733 1730 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1734 1731 {
1735 1732 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1736 1733 int reclaim = 1;
1737 1734
1738 1735 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1739 1736 retry:
1740 1737 if (shmd->shm_softlockcnt > 0) {
1741 1738 if (reclaim == 1) {
1742 1739 segspt_purge(seg);
1743 1740 reclaim = 0;
1744 1741 goto retry;
1745 1742 }
1746 1743 return (EAGAIN);
1747 1744 }
1748 1745
1749 1746 if (ssize != seg->s_size) {
1750 1747 #ifdef DEBUG
1751 1748 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1752 1749 ssize, seg->s_size);
1753 1750 #endif
1754 1751 return (EINVAL);
1755 1752 }
1756 1753
1757 1754 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1758 1755 NULL, 0);
1759 1756 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1760 1757
1761 1758 seg_free(seg);
1762 1759
1763 1760 return (0);
1764 1761 }
1765 1762
1766 1763 void
1767 1764 segspt_shmfree(struct seg *seg)
1768 1765 {
1769 1766 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1770 1767 struct anon_map *shm_amp = shmd->shm_amp;
1771 1768
1772 1769 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1773 1770
1774 1771 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1775 1772 MC_UNLOCK, NULL, 0);
1776 1773
1777 1774 /*
1778 1775 * Need to increment refcnt when attaching
1779 1776 * and decrement when detaching because of dup().
1780 1777 */
1781 1778 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1782 1779 shm_amp->refcnt--;
1783 1780 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1784 1781
1785 1782 if (shmd->shm_vpage) { /* only for DISM */
1786 1783 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1787 1784 shmd->shm_vpage = NULL;
1788 1785 }
1789 1786
1790 1787 /*
1791 1788 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1792 1789 * still working with this segment without holding as lock.
1793 1790 */
1794 1791 ASSERT(shmd->shm_softlockcnt == 0);
1795 1792 mutex_enter(&shmd->shm_segfree_syncmtx);
1796 1793 mutex_destroy(&shmd->shm_segfree_syncmtx);
1797 1794
1798 1795 kmem_free(shmd, sizeof (*shmd));
1799 1796 }
1800 1797
1801 1798 /*ARGSUSED*/
1802 1799 int
1803 1800 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1804 1801 {
1805 1802 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1806 1803
1807 1804 /*
1808 1805 * Shared page table is more than shared mapping.
1809 1806 * Individual process sharing page tables can't change prot
1810 1807 * because there is only one set of page tables.
1811 1808 * This will be allowed after private page table is
1812 1809 * supported.
1813 1810 */
1814 1811 /* need to return correct status error? */
1815 1812 return (0);
1816 1813 }
1817 1814
1818 1815
1819 1816 faultcode_t
1820 1817 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1821 1818 size_t len, enum fault_type type, enum seg_rw rw)
1822 1819 {
1823 1820 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1824 1821 struct seg *sptseg = shmd->shm_sptseg;
1825 1822 struct as *curspt = shmd->shm_sptas;
1826 1823 struct spt_data *sptd = sptseg->s_data;
1827 1824 pgcnt_t npages;
1828 1825 size_t size;
1829 1826 caddr_t segspt_addr, shm_addr;
1830 1827 page_t **ppa;
1831 1828 int i;
1832 1829 ulong_t an_idx = 0;
1833 1830 int err = 0;
1834 1831 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1835 1832 size_t pgsz;
1836 1833 pgcnt_t pgcnt;
1837 1834 caddr_t a;
1838 1835 pgcnt_t pidx;
1839 1836
1840 1837 #ifdef lint
1841 1838 hat = hat;
1842 1839 #endif
1843 1840 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1844 1841
1845 1842 /*
1846 1843 * Because of the way spt is implemented
1847 1844 * the realsize of the segment does not have to be
1848 1845 * equal to the segment size itself. The segment size is
1849 1846 * often in multiples of a page size larger than PAGESIZE.
1850 1847 * The realsize is rounded up to the nearest PAGESIZE
1851 1848 * based on what the user requested. This is a bit of
1852 1849 * ungliness that is historical but not easily fixed
1853 1850 * without re-designing the higher levels of ISM.
1854 1851 */
1855 1852 ASSERT(addr >= seg->s_base);
1856 1853 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1857 1854 return (FC_NOMAP);
1858 1855 /*
1859 1856 * For all of the following cases except F_PROT, we need to
1860 1857 * make any necessary adjustments to addr and len
1861 1858 * and get all of the necessary page_t's into an array called ppa[].
1862 1859 *
1863 1860 * The code in shmat() forces base addr and len of ISM segment
1864 1861 * to be aligned to largest page size supported. Therefore,
1865 1862 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1866 1863 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1867 1864 * in large pagesize chunks, or else we will screw up the HAT
1868 1865 * layer by calling hat_memload_array() with differing page sizes
1869 1866 * over a given virtual range.
1870 1867 */
1871 1868 pgsz = page_get_pagesize(sptseg->s_szc);
1872 1869 pgcnt = page_get_pagecnt(sptseg->s_szc);
1873 1870 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1874 1871 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1875 1872 npages = btopr(size);
1876 1873
1877 1874 /*
1878 1875 * Now we need to convert from addr in segshm to addr in segspt.
1879 1876 */
1880 1877 an_idx = seg_page(seg, shm_addr);
1881 1878 segspt_addr = sptseg->s_base + ptob(an_idx);
1882 1879
1883 1880 ASSERT((segspt_addr + ptob(npages)) <=
1884 1881 (sptseg->s_base + sptd->spt_realsize));
1885 1882 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1886 1883
1887 1884 switch (type) {
1888 1885
1889 1886 case F_SOFTLOCK:
1890 1887
1891 1888 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1892 1889 /*
1893 1890 * Fall through to the F_INVAL case to load up the hat layer
1894 1891 * entries with the HAT_LOAD_LOCK flag.
1895 1892 */
1896 1893 /* FALLTHRU */
1897 1894 case F_INVAL:
1898 1895
1899 1896 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1900 1897 return (FC_NOMAP);
1901 1898
1902 1899 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1903 1900
1904 1901 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1905 1902 if (err != 0) {
1906 1903 if (type == F_SOFTLOCK) {
1907 1904 atomic_add_long((ulong_t *)(
1908 1905 &(shmd->shm_softlockcnt)), -npages);
1909 1906 }
1910 1907 goto dism_err;
1911 1908 }
1912 1909 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1913 1910 a = segspt_addr;
1914 1911 pidx = 0;
1915 1912 if (type == F_SOFTLOCK) {
1916 1913
1917 1914 /*
1918 1915 * Load up the translation keeping it
1919 1916 * locked and don't unlock the page.
1920 1917 */
1921 1918 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1922 1919 hat_memload_array(sptseg->s_as->a_hat,
1923 1920 a, pgsz, &ppa[pidx], sptd->spt_prot,
1924 1921 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1925 1922 }
1926 1923 } else {
1927 1924 /*
1928 1925 * Migrate pages marked for migration
1929 1926 */
1930 1927 if (lgrp_optimizations())
1931 1928 page_migrate(seg, shm_addr, ppa, npages);
1932 1929
1933 1930 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1934 1931 hat_memload_array(sptseg->s_as->a_hat,
1935 1932 a, pgsz, &ppa[pidx],
1936 1933 sptd->spt_prot,
1937 1934 HAT_LOAD_SHARE);
1938 1935 }
1939 1936
1940 1937 /*
1941 1938 * And now drop the SE_SHARED lock(s).
1942 1939 */
1943 1940 if (dyn_ism_unmap) {
1944 1941 for (i = 0; i < npages; i++) {
1945 1942 page_unlock(ppa[i]);
1946 1943 }
1947 1944 }
1948 1945 }
1949 1946
1950 1947 if (!dyn_ism_unmap) {
1951 1948 if (hat_share(seg->s_as->a_hat, shm_addr,
1952 1949 curspt->a_hat, segspt_addr, ptob(npages),
1953 1950 seg->s_szc) != 0) {
1954 1951 panic("hat_share err in DISM fault");
1955 1952 /* NOTREACHED */
1956 1953 }
1957 1954 if (type == F_INVAL) {
1958 1955 for (i = 0; i < npages; i++) {
1959 1956 page_unlock(ppa[i]);
1960 1957 }
1961 1958 }
1962 1959 }
1963 1960 AS_LOCK_EXIT(sptseg->s_as);
1964 1961 dism_err:
1965 1962 kmem_free(ppa, npages * sizeof (page_t *));
1966 1963 return (err);
1967 1964
1968 1965 case F_SOFTUNLOCK:
1969 1966
1970 1967 /*
1971 1968 * This is a bit ugly, we pass in the real seg pointer,
1972 1969 * but the segspt_addr is the virtual address within the
1973 1970 * dummy seg.
1974 1971 */
1975 1972 segspt_softunlock(seg, segspt_addr, size, rw);
1976 1973 return (0);
1977 1974
1978 1975 case F_PROT:
1979 1976
1980 1977 /*
1981 1978 * This takes care of the unusual case where a user
1982 1979 * allocates a stack in shared memory and a register
1983 1980 * window overflow is written to that stack page before
1984 1981 * it is otherwise modified.
1985 1982 *
1986 1983 * We can get away with this because ISM segments are
1987 1984 * always rw. Other than this unusual case, there
1988 1985 * should be no instances of protection violations.
1989 1986 */
1990 1987 return (0);
1991 1988
1992 1989 default:
1993 1990 #ifdef DEBUG
1994 1991 panic("segspt_dismfault default type?");
1995 1992 #else
1996 1993 return (FC_NOMAP);
1997 1994 #endif
1998 1995 }
1999 1996 }
2000 1997
2001 1998
2002 1999 faultcode_t
2003 2000 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2004 2001 size_t len, enum fault_type type, enum seg_rw rw)
2005 2002 {
2006 2003 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2007 2004 struct seg *sptseg = shmd->shm_sptseg;
2008 2005 struct as *curspt = shmd->shm_sptas;
2009 2006 struct spt_data *sptd = sptseg->s_data;
2010 2007 pgcnt_t npages;
2011 2008 size_t size;
2012 2009 caddr_t sptseg_addr, shm_addr;
2013 2010 page_t *pp, **ppa;
2014 2011 int i;
2015 2012 u_offset_t offset;
2016 2013 ulong_t anon_index = 0;
2017 2014 struct vnode *vp;
2018 2015 struct anon_map *amp; /* XXX - for locknest */
2019 2016 struct anon *ap = NULL;
2020 2017 size_t pgsz;
2021 2018 pgcnt_t pgcnt;
2022 2019 caddr_t a;
2023 2020 pgcnt_t pidx;
2024 2021 size_t sz;
2025 2022
2026 2023 #ifdef lint
2027 2024 hat = hat;
2028 2025 #endif
2029 2026
2030 2027 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2031 2028
2032 2029 if (sptd->spt_flags & SHM_PAGEABLE) {
2033 2030 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2034 2031 }
2035 2032
2036 2033 /*
2037 2034 * Because of the way spt is implemented
2038 2035 * the realsize of the segment does not have to be
2039 2036 * equal to the segment size itself. The segment size is
2040 2037 * often in multiples of a page size larger than PAGESIZE.
2041 2038 * The realsize is rounded up to the nearest PAGESIZE
2042 2039 * based on what the user requested. This is a bit of
2043 2040 * ungliness that is historical but not easily fixed
2044 2041 * without re-designing the higher levels of ISM.
2045 2042 */
2046 2043 ASSERT(addr >= seg->s_base);
2047 2044 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2048 2045 return (FC_NOMAP);
2049 2046 /*
2050 2047 * For all of the following cases except F_PROT, we need to
2051 2048 * make any necessary adjustments to addr and len
2052 2049 * and get all of the necessary page_t's into an array called ppa[].
2053 2050 *
2054 2051 * The code in shmat() forces base addr and len of ISM segment
2055 2052 * to be aligned to largest page size supported. Therefore,
2056 2053 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2057 2054 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2058 2055 * in large pagesize chunks, or else we will screw up the HAT
2059 2056 * layer by calling hat_memload_array() with differing page sizes
2060 2057 * over a given virtual range.
2061 2058 */
2062 2059 pgsz = page_get_pagesize(sptseg->s_szc);
2063 2060 pgcnt = page_get_pagecnt(sptseg->s_szc);
2064 2061 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2065 2062 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2066 2063 npages = btopr(size);
2067 2064
2068 2065 /*
2069 2066 * Now we need to convert from addr in segshm to addr in segspt.
2070 2067 */
2071 2068 anon_index = seg_page(seg, shm_addr);
2072 2069 sptseg_addr = sptseg->s_base + ptob(anon_index);
2073 2070
2074 2071 /*
2075 2072 * And now we may have to adjust npages downward if we have
2076 2073 * exceeded the realsize of the segment or initial anon
2077 2074 * allocations.
2078 2075 */
2079 2076 if ((sptseg_addr + ptob(npages)) >
2080 2077 (sptseg->s_base + sptd->spt_realsize))
2081 2078 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2082 2079
2083 2080 npages = btopr(size);
2084 2081
2085 2082 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2086 2083 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2087 2084
2088 2085 switch (type) {
2089 2086
2090 2087 case F_SOFTLOCK:
2091 2088
2092 2089 /*
2093 2090 * availrmem is decremented once during anon_swap_adjust()
2094 2091 * and is incremented during the anon_unresv(), which is
2095 2092 * called from shm_rm_amp() when the segment is destroyed.
2096 2093 */
2097 2094 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2098 2095 /*
2099 2096 * Some platforms assume that ISM pages are SE_SHARED
2100 2097 * locked for the entire life of the segment.
2101 2098 */
2102 2099 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2103 2100 return (0);
2104 2101 /*
2105 2102 * Fall through to the F_INVAL case to load up the hat layer
2106 2103 * entries with the HAT_LOAD_LOCK flag.
2107 2104 */
2108 2105
2109 2106 /* FALLTHRU */
2110 2107 case F_INVAL:
2111 2108
2112 2109 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2113 2110 return (FC_NOMAP);
2114 2111
2115 2112 /*
2116 2113 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2117 2114 * may still rely on this call to hat_share(). That
2118 2115 * would imply that those hat's can fault on a
2119 2116 * HAT_LOAD_LOCK translation, which would seem
2120 2117 * contradictory.
2121 2118 */
2122 2119 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2123 2120 if (hat_share(seg->s_as->a_hat, seg->s_base,
2124 2121 curspt->a_hat, sptseg->s_base,
2125 2122 sptseg->s_size, sptseg->s_szc) != 0) {
2126 2123 panic("hat_share error in ISM fault");
2127 2124 /*NOTREACHED*/
2128 2125 }
2129 2126 return (0);
2130 2127 }
2131 2128 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2132 2129
2133 2130 /*
2134 2131 * I see no need to lock the real seg,
2135 2132 * here, because all of our work will be on the underlying
2136 2133 * dummy seg.
2137 2134 *
2138 2135 * sptseg_addr and npages now account for large pages.
2139 2136 */
2140 2137 amp = sptd->spt_amp;
2141 2138 ASSERT(amp != NULL);
2142 2139 anon_index = seg_page(sptseg, sptseg_addr);
2143 2140
2144 2141 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2145 2142 for (i = 0; i < npages; i++) {
2146 2143 ap = anon_get_ptr(amp->ahp, anon_index++);
2147 2144 ASSERT(ap != NULL);
2148 2145 swap_xlate(ap, &vp, &offset);
2149 2146 pp = page_lookup(vp, offset, SE_SHARED);
2150 2147 ASSERT(pp != NULL);
2151 2148 ppa[i] = pp;
2152 2149 }
2153 2150 ANON_LOCK_EXIT(&->a_rwlock);
2154 2151 ASSERT(i == npages);
2155 2152
2156 2153 /*
2157 2154 * We are already holding the as->a_lock on the user's
2158 2155 * real segment, but we need to hold the a_lock on the
2159 2156 * underlying dummy as. This is mostly to satisfy the
2160 2157 * underlying HAT layer.
2161 2158 */
2162 2159 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
2163 2160 a = sptseg_addr;
2164 2161 pidx = 0;
2165 2162 if (type == F_SOFTLOCK) {
2166 2163 /*
2167 2164 * Load up the translation keeping it
2168 2165 * locked and don't unlock the page.
2169 2166 */
2170 2167 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2171 2168 sz = MIN(pgsz, ptob(npages - pidx));
2172 2169 hat_memload_array(sptseg->s_as->a_hat, a,
2173 2170 sz, &ppa[pidx], sptd->spt_prot,
2174 2171 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2175 2172 }
2176 2173 } else {
2177 2174 /*
2178 2175 * Migrate pages marked for migration.
2179 2176 */
2180 2177 if (lgrp_optimizations())
2181 2178 page_migrate(seg, shm_addr, ppa, npages);
2182 2179
2183 2180 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2184 2181 sz = MIN(pgsz, ptob(npages - pidx));
2185 2182 hat_memload_array(sptseg->s_as->a_hat,
2186 2183 a, sz, &ppa[pidx],
2187 2184 sptd->spt_prot, HAT_LOAD_SHARE);
2188 2185 }
2189 2186
2190 2187 /*
2191 2188 * And now drop the SE_SHARED lock(s).
2192 2189 */
2193 2190 for (i = 0; i < npages; i++)
2194 2191 page_unlock(ppa[i]);
2195 2192 }
2196 2193 AS_LOCK_EXIT(sptseg->s_as);
2197 2194
2198 2195 kmem_free(ppa, sizeof (page_t *) * npages);
2199 2196 return (0);
2200 2197 case F_SOFTUNLOCK:
2201 2198
2202 2199 /*
2203 2200 * This is a bit ugly, we pass in the real seg pointer,
2204 2201 * but the sptseg_addr is the virtual address within the
2205 2202 * dummy seg.
2206 2203 */
2207 2204 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2208 2205 return (0);
2209 2206
2210 2207 case F_PROT:
2211 2208
2212 2209 /*
2213 2210 * This takes care of the unusual case where a user
2214 2211 * allocates a stack in shared memory and a register
2215 2212 * window overflow is written to that stack page before
2216 2213 * it is otherwise modified.
2217 2214 *
2218 2215 * We can get away with this because ISM segments are
2219 2216 * always rw. Other than this unusual case, there
2220 2217 * should be no instances of protection violations.
2221 2218 */
2222 2219 return (0);
2223 2220
2224 2221 default:
2225 2222 #ifdef DEBUG
2226 2223 cmn_err(CE_WARN, "segspt_shmfault default type?");
2227 2224 #endif
2228 2225 return (FC_NOMAP);
2229 2226 }
2230 2227 }
2231 2228
↓ open down ↓ |
2064 lines elided |
↑ open up ↑ |
2232 2229 /*ARGSUSED*/
2233 2230 static faultcode_t
2234 2231 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2235 2232 {
2236 2233 return (0);
2237 2234 }
2238 2235
2239 2236 /*ARGSUSED*/
2240 2237 static int
2241 2238 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2242 -{
2243 - return (0);
2244 -}
2245 -
2246 -/*ARGSUSED*/
2247 -static size_t
2248 -segspt_shmswapout(struct seg *seg)
2249 2239 {
2250 2240 return (0);
2251 2241 }
2252 2242
2253 2243 /*
2254 2244 * duplicate the shared page tables
2255 2245 */
2256 2246 int
2257 2247 segspt_shmdup(struct seg *seg, struct seg *newseg)
2258 2248 {
2259 2249 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2260 2250 struct anon_map *amp = shmd->shm_amp;
2261 2251 struct shm_data *shmd_new;
2262 2252 struct seg *spt_seg = shmd->shm_sptseg;
2263 2253 struct spt_data *sptd = spt_seg->s_data;
2264 2254 int error = 0;
2265 2255
2266 2256 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2267 2257
2268 2258 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2269 2259 newseg->s_data = (void *)shmd_new;
2270 2260 shmd_new->shm_sptas = shmd->shm_sptas;
2271 2261 shmd_new->shm_amp = amp;
2272 2262 shmd_new->shm_sptseg = shmd->shm_sptseg;
2273 2263 newseg->s_ops = &segspt_shmops;
2274 2264 newseg->s_szc = seg->s_szc;
2275 2265 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2276 2266
2277 2267 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2278 2268 amp->refcnt++;
2279 2269 ANON_LOCK_EXIT(&->a_rwlock);
2280 2270
2281 2271 if (sptd->spt_flags & SHM_PAGEABLE) {
2282 2272 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2283 2273 shmd_new->shm_lckpgs = 0;
2284 2274 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2285 2275 if ((error = hat_share(newseg->s_as->a_hat,
2286 2276 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2287 2277 seg->s_size, seg->s_szc)) != 0) {
2288 2278 kmem_free(shmd_new->shm_vpage,
2289 2279 btopr(amp->size));
2290 2280 }
2291 2281 }
2292 2282 return (error);
2293 2283 } else {
2294 2284 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2295 2285 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2296 2286 seg->s_szc));
2297 2287
2298 2288 }
2299 2289 }
2300 2290
2301 2291 /*ARGSUSED*/
2302 2292 int
2303 2293 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2304 2294 {
2305 2295 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2306 2296 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2307 2297
2308 2298 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2309 2299
2310 2300 /*
2311 2301 * ISM segment is always rw.
2312 2302 */
2313 2303 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2314 2304 }
2315 2305
2316 2306 /*
2317 2307 * Return an array of locked large pages, for empty slots allocate
2318 2308 * private zero-filled anon pages.
2319 2309 */
2320 2310 static int
2321 2311 spt_anon_getpages(
2322 2312 struct seg *sptseg,
2323 2313 caddr_t sptaddr,
2324 2314 size_t len,
2325 2315 page_t *ppa[])
2326 2316 {
2327 2317 struct spt_data *sptd = sptseg->s_data;
2328 2318 struct anon_map *amp = sptd->spt_amp;
2329 2319 enum seg_rw rw = sptd->spt_prot;
2330 2320 uint_t szc = sptseg->s_szc;
2331 2321 size_t pg_sz, share_sz = page_get_pagesize(szc);
2332 2322 pgcnt_t lp_npgs;
2333 2323 caddr_t lp_addr, e_sptaddr;
2334 2324 uint_t vpprot, ppa_szc = 0;
2335 2325 struct vpage *vpage = NULL;
2336 2326 ulong_t j, ppa_idx;
2337 2327 int err, ierr = 0;
2338 2328 pgcnt_t an_idx;
2339 2329 anon_sync_obj_t cookie;
2340 2330 int anon_locked = 0;
2341 2331 pgcnt_t amp_pgs;
2342 2332
2343 2333
2344 2334 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2345 2335 ASSERT(len != 0);
2346 2336
2347 2337 pg_sz = share_sz;
2348 2338 lp_npgs = btop(pg_sz);
2349 2339 lp_addr = sptaddr;
2350 2340 e_sptaddr = sptaddr + len;
2351 2341 an_idx = seg_page(sptseg, sptaddr);
2352 2342 ppa_idx = 0;
2353 2343
2354 2344 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2355 2345
2356 2346 amp_pgs = page_get_pagecnt(amp->a_szc);
2357 2347
2358 2348 /*CONSTCOND*/
2359 2349 while (1) {
2360 2350 for (; lp_addr < e_sptaddr;
2361 2351 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2362 2352
2363 2353 /*
2364 2354 * If we're currently locked, and we get to a new
2365 2355 * page, unlock our current anon chunk.
2366 2356 */
2367 2357 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2368 2358 anon_array_exit(&cookie);
2369 2359 anon_locked = 0;
2370 2360 }
2371 2361 if (!anon_locked) {
2372 2362 anon_array_enter(amp, an_idx, &cookie);
2373 2363 anon_locked = 1;
2374 2364 }
2375 2365 ppa_szc = (uint_t)-1;
2376 2366 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2377 2367 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2378 2368 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2379 2369
2380 2370 if (ierr != 0) {
2381 2371 if (ierr > 0) {
2382 2372 err = FC_MAKE_ERR(ierr);
2383 2373 goto lpgs_err;
2384 2374 }
2385 2375 break;
2386 2376 }
2387 2377 }
2388 2378 if (lp_addr == e_sptaddr) {
2389 2379 break;
2390 2380 }
2391 2381 ASSERT(lp_addr < e_sptaddr);
2392 2382
2393 2383 /*
2394 2384 * ierr == -1 means we failed to allocate a large page.
2395 2385 * so do a size down operation.
2396 2386 *
2397 2387 * ierr == -2 means some other process that privately shares
2398 2388 * pages with this process has allocated a larger page and we
2399 2389 * need to retry with larger pages. So do a size up
2400 2390 * operation. This relies on the fact that large pages are
2401 2391 * never partially shared i.e. if we share any constituent
2402 2392 * page of a large page with another process we must share the
2403 2393 * entire large page. Note this cannot happen for SOFTLOCK
2404 2394 * case, unless current address (lpaddr) is at the beginning
2405 2395 * of the next page size boundary because the other process
2406 2396 * couldn't have relocated locked pages.
2407 2397 */
2408 2398 ASSERT(ierr == -1 || ierr == -2);
2409 2399 if (segvn_anypgsz) {
2410 2400 ASSERT(ierr == -2 || szc != 0);
2411 2401 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2412 2402 szc = (ierr == -1) ? szc - 1 : szc + 1;
2413 2403 } else {
2414 2404 /*
2415 2405 * For faults and segvn_anypgsz == 0
2416 2406 * we need to be careful not to loop forever
2417 2407 * if existing page is found with szc other
2418 2408 * than 0 or seg->s_szc. This could be due
2419 2409 * to page relocations on behalf of DR or
2420 2410 * more likely large page creation. For this
2421 2411 * case simply re-size to existing page's szc
2422 2412 * if returned by anon_map_getpages().
2423 2413 */
2424 2414 if (ppa_szc == (uint_t)-1) {
2425 2415 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2426 2416 } else {
2427 2417 ASSERT(ppa_szc <= sptseg->s_szc);
2428 2418 ASSERT(ierr == -2 || ppa_szc < szc);
2429 2419 ASSERT(ierr == -1 || ppa_szc > szc);
2430 2420 szc = ppa_szc;
2431 2421 }
2432 2422 }
2433 2423 pg_sz = page_get_pagesize(szc);
2434 2424 lp_npgs = btop(pg_sz);
2435 2425 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2436 2426 }
2437 2427 if (anon_locked) {
2438 2428 anon_array_exit(&cookie);
2439 2429 }
2440 2430 ANON_LOCK_EXIT(&->a_rwlock);
2441 2431 return (0);
2442 2432
2443 2433 lpgs_err:
2444 2434 if (anon_locked) {
2445 2435 anon_array_exit(&cookie);
2446 2436 }
2447 2437 ANON_LOCK_EXIT(&->a_rwlock);
2448 2438 for (j = 0; j < ppa_idx; j++)
2449 2439 page_unlock(ppa[j]);
2450 2440 return (err);
2451 2441 }
2452 2442
2453 2443 /*
2454 2444 * count the number of bytes in a set of spt pages that are currently not
2455 2445 * locked
2456 2446 */
2457 2447 static rctl_qty_t
2458 2448 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2459 2449 {
2460 2450 ulong_t i;
2461 2451 rctl_qty_t unlocked = 0;
2462 2452
2463 2453 for (i = 0; i < npages; i++) {
2464 2454 if (ppa[i]->p_lckcnt == 0)
2465 2455 unlocked += PAGESIZE;
2466 2456 }
2467 2457 return (unlocked);
2468 2458 }
2469 2459
2470 2460 extern u_longlong_t randtick(void);
2471 2461 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2472 2462 #define NLCK (NCPU_P2)
2473 2463 /* Random number with a range [0, n-1], n must be power of two */
2474 2464 #define RAND_P2(n) \
2475 2465 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2476 2466
2477 2467 int
2478 2468 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2479 2469 page_t **ppa, ulong_t *lockmap, size_t pos,
2480 2470 rctl_qty_t *locked)
2481 2471 {
2482 2472 struct shm_data *shmd = seg->s_data;
2483 2473 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2484 2474 ulong_t i;
2485 2475 int kernel;
2486 2476 pgcnt_t nlck = 0;
2487 2477 int rv = 0;
2488 2478 int use_reserved = 1;
2489 2479
2490 2480 /* return the number of bytes actually locked */
2491 2481 *locked = 0;
2492 2482
2493 2483 /*
2494 2484 * To avoid contention on freemem_lock, availrmem and pages_locked
2495 2485 * global counters are updated only every nlck locked pages instead of
2496 2486 * every time. Reserve nlck locks up front and deduct from this
2497 2487 * reservation for each page that requires a lock. When the reservation
2498 2488 * is consumed, reserve again. nlck is randomized, so the competing
2499 2489 * threads do not fall into a cyclic lock contention pattern. When
2500 2490 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2501 2491 * is used to lock pages.
2502 2492 */
2503 2493 for (i = 0; i < npages; anon_index++, pos++, i++) {
2504 2494 if (nlck == 0 && use_reserved == 1) {
2505 2495 nlck = NLCK + RAND_P2(NLCK);
2506 2496 /* if fewer loops left, decrease nlck */
2507 2497 nlck = MIN(nlck, npages - i);
2508 2498 /*
2509 2499 * Reserve nlck locks up front and deduct from this
2510 2500 * reservation for each page that requires a lock. When
2511 2501 * the reservation is consumed, reserve again.
2512 2502 */
2513 2503 mutex_enter(&freemem_lock);
2514 2504 if ((availrmem - nlck) < pages_pp_maximum) {
2515 2505 /* Do not do advance memory reserves */
2516 2506 use_reserved = 0;
2517 2507 } else {
2518 2508 availrmem -= nlck;
2519 2509 pages_locked += nlck;
2520 2510 }
2521 2511 mutex_exit(&freemem_lock);
2522 2512 }
2523 2513 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2524 2514 if (sptd->spt_ppa_lckcnt[anon_index] <
2525 2515 (ushort_t)DISM_LOCK_MAX) {
2526 2516 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2527 2517 (ushort_t)DISM_LOCK_MAX) {
2528 2518 cmn_err(CE_WARN,
2529 2519 "DISM page lock limit "
2530 2520 "reached on DISM offset 0x%lx\n",
2531 2521 anon_index << PAGESHIFT);
2532 2522 }
2533 2523 kernel = (sptd->spt_ppa &&
2534 2524 sptd->spt_ppa[anon_index]);
2535 2525 if (!page_pp_lock(ppa[i], 0, kernel ||
2536 2526 use_reserved)) {
2537 2527 sptd->spt_ppa_lckcnt[anon_index]--;
2538 2528 rv = EAGAIN;
2539 2529 break;
2540 2530 }
2541 2531 /* if this is a newly locked page, count it */
2542 2532 if (ppa[i]->p_lckcnt == 1) {
2543 2533 if (kernel == 0 && use_reserved == 1)
2544 2534 nlck--;
2545 2535 *locked += PAGESIZE;
2546 2536 }
2547 2537 shmd->shm_lckpgs++;
2548 2538 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2549 2539 if (lockmap != NULL)
2550 2540 BT_SET(lockmap, pos);
2551 2541 }
2552 2542 }
2553 2543 }
2554 2544 /* Return unused lock reservation */
2555 2545 if (nlck != 0 && use_reserved == 1) {
2556 2546 mutex_enter(&freemem_lock);
2557 2547 availrmem += nlck;
2558 2548 pages_locked -= nlck;
2559 2549 mutex_exit(&freemem_lock);
2560 2550 }
2561 2551
2562 2552 return (rv);
2563 2553 }
2564 2554
2565 2555 int
2566 2556 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2567 2557 rctl_qty_t *unlocked)
2568 2558 {
2569 2559 struct shm_data *shmd = seg->s_data;
2570 2560 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2571 2561 struct anon_map *amp = sptd->spt_amp;
2572 2562 struct anon *ap;
2573 2563 struct vnode *vp;
2574 2564 u_offset_t off;
2575 2565 struct page *pp;
2576 2566 int kernel;
2577 2567 anon_sync_obj_t cookie;
2578 2568 ulong_t i;
2579 2569 pgcnt_t nlck = 0;
2580 2570 pgcnt_t nlck_limit = NLCK;
2581 2571
2582 2572 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2583 2573 for (i = 0; i < npages; i++, anon_index++) {
2584 2574 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2585 2575 anon_array_enter(amp, anon_index, &cookie);
2586 2576 ap = anon_get_ptr(amp->ahp, anon_index);
2587 2577 ASSERT(ap);
2588 2578
2589 2579 swap_xlate(ap, &vp, &off);
2590 2580 anon_array_exit(&cookie);
2591 2581 pp = page_lookup(vp, off, SE_SHARED);
2592 2582 ASSERT(pp);
2593 2583 /*
2594 2584 * availrmem is decremented only for pages which are not
2595 2585 * in seg pcache, for pages in seg pcache availrmem was
2596 2586 * decremented in _dismpagelock()
2597 2587 */
2598 2588 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2599 2589 ASSERT(pp->p_lckcnt > 0);
2600 2590
2601 2591 /*
2602 2592 * lock page but do not change availrmem, we do it
2603 2593 * ourselves every nlck loops.
2604 2594 */
2605 2595 page_pp_unlock(pp, 0, 1);
2606 2596 if (pp->p_lckcnt == 0) {
2607 2597 if (kernel == 0)
2608 2598 nlck++;
2609 2599 *unlocked += PAGESIZE;
2610 2600 }
2611 2601 page_unlock(pp);
2612 2602 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2613 2603 sptd->spt_ppa_lckcnt[anon_index]--;
2614 2604 shmd->shm_lckpgs--;
2615 2605 }
2616 2606
2617 2607 /*
2618 2608 * To reduce freemem_lock contention, do not update availrmem
2619 2609 * until at least NLCK pages have been unlocked.
2620 2610 * 1. No need to update if nlck is zero
2621 2611 * 2. Always update if the last iteration
2622 2612 */
2623 2613 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2624 2614 mutex_enter(&freemem_lock);
2625 2615 availrmem += nlck;
2626 2616 pages_locked -= nlck;
2627 2617 mutex_exit(&freemem_lock);
2628 2618 nlck = 0;
2629 2619 nlck_limit = NLCK + RAND_P2(NLCK);
2630 2620 }
2631 2621 }
2632 2622 ANON_LOCK_EXIT(&->a_rwlock);
2633 2623
2634 2624 return (0);
2635 2625 }
2636 2626
2637 2627 /*ARGSUSED*/
2638 2628 static int
2639 2629 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2640 2630 int attr, int op, ulong_t *lockmap, size_t pos)
2641 2631 {
2642 2632 struct shm_data *shmd = seg->s_data;
2643 2633 struct seg *sptseg = shmd->shm_sptseg;
2644 2634 struct spt_data *sptd = sptseg->s_data;
2645 2635 struct kshmid *sp = sptd->spt_amp->a_sp;
2646 2636 pgcnt_t npages, a_npages;
2647 2637 page_t **ppa;
2648 2638 pgcnt_t an_idx, a_an_idx, ppa_idx;
2649 2639 caddr_t spt_addr, a_addr; /* spt and aligned address */
2650 2640 size_t a_len; /* aligned len */
2651 2641 size_t share_sz;
2652 2642 ulong_t i;
2653 2643 int sts = 0;
2654 2644 rctl_qty_t unlocked = 0;
2655 2645 rctl_qty_t locked = 0;
2656 2646 struct proc *p = curproc;
2657 2647 kproject_t *proj;
2658 2648
2659 2649 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2660 2650 ASSERT(sp != NULL);
2661 2651
2662 2652 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2663 2653 return (0);
2664 2654 }
2665 2655
2666 2656 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2667 2657 an_idx = seg_page(seg, addr);
2668 2658 npages = btopr(len);
2669 2659
2670 2660 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2671 2661 return (ENOMEM);
2672 2662 }
2673 2663
2674 2664 /*
2675 2665 * A shm's project never changes, so no lock needed.
2676 2666 * The shm has a hold on the project, so it will not go away.
2677 2667 * Since we have a mapping to shm within this zone, we know
2678 2668 * that the zone will not go away.
2679 2669 */
2680 2670 proj = sp->shm_perm.ipc_proj;
2681 2671
2682 2672 if (op == MC_LOCK) {
2683 2673
2684 2674 /*
2685 2675 * Need to align addr and size request if they are not
2686 2676 * aligned so we can always allocate large page(s) however
2687 2677 * we only lock what was requested in initial request.
2688 2678 */
2689 2679 share_sz = page_get_pagesize(sptseg->s_szc);
2690 2680 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2691 2681 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2692 2682 share_sz);
2693 2683 a_npages = btop(a_len);
2694 2684 a_an_idx = seg_page(seg, a_addr);
2695 2685 spt_addr = sptseg->s_base + ptob(a_an_idx);
2696 2686 ppa_idx = an_idx - a_an_idx;
2697 2687
2698 2688 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2699 2689 KM_NOSLEEP)) == NULL) {
2700 2690 return (ENOMEM);
2701 2691 }
2702 2692
2703 2693 /*
2704 2694 * Don't cache any new pages for IO and
2705 2695 * flush any cached pages.
2706 2696 */
2707 2697 mutex_enter(&sptd->spt_lock);
2708 2698 if (sptd->spt_ppa != NULL)
2709 2699 sptd->spt_flags |= DISM_PPA_CHANGED;
2710 2700
2711 2701 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2712 2702 if (sts != 0) {
2713 2703 mutex_exit(&sptd->spt_lock);
2714 2704 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2715 2705 return (sts);
2716 2706 }
2717 2707
2718 2708 mutex_enter(&sp->shm_mlock);
2719 2709 /* enforce locked memory rctl */
2720 2710 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2721 2711
2722 2712 mutex_enter(&p->p_lock);
2723 2713 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2724 2714 mutex_exit(&p->p_lock);
2725 2715 sts = EAGAIN;
2726 2716 } else {
2727 2717 mutex_exit(&p->p_lock);
2728 2718 sts = spt_lockpages(seg, an_idx, npages,
2729 2719 &ppa[ppa_idx], lockmap, pos, &locked);
2730 2720
2731 2721 /*
2732 2722 * correct locked count if not all pages could be
2733 2723 * locked
2734 2724 */
2735 2725 if ((unlocked - locked) > 0) {
2736 2726 rctl_decr_locked_mem(NULL, proj,
2737 2727 (unlocked - locked), 0);
2738 2728 }
2739 2729 }
2740 2730 /*
2741 2731 * unlock pages
2742 2732 */
2743 2733 for (i = 0; i < a_npages; i++)
2744 2734 page_unlock(ppa[i]);
2745 2735 if (sptd->spt_ppa != NULL)
2746 2736 sptd->spt_flags |= DISM_PPA_CHANGED;
2747 2737 mutex_exit(&sp->shm_mlock);
2748 2738 mutex_exit(&sptd->spt_lock);
2749 2739
2750 2740 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2751 2741
2752 2742 } else if (op == MC_UNLOCK) { /* unlock */
2753 2743 page_t **ppa;
2754 2744
2755 2745 mutex_enter(&sptd->spt_lock);
2756 2746 if (shmd->shm_lckpgs == 0) {
2757 2747 mutex_exit(&sptd->spt_lock);
2758 2748 return (0);
2759 2749 }
2760 2750 /*
2761 2751 * Don't cache new IO pages.
2762 2752 */
2763 2753 if (sptd->spt_ppa != NULL)
2764 2754 sptd->spt_flags |= DISM_PPA_CHANGED;
2765 2755
2766 2756 mutex_enter(&sp->shm_mlock);
2767 2757 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2768 2758 if ((ppa = sptd->spt_ppa) != NULL)
2769 2759 sptd->spt_flags |= DISM_PPA_CHANGED;
2770 2760 mutex_exit(&sptd->spt_lock);
2771 2761
2772 2762 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2773 2763 mutex_exit(&sp->shm_mlock);
2774 2764
2775 2765 if (ppa != NULL)
2776 2766 seg_ppurge_wiredpp(ppa);
2777 2767 }
2778 2768 return (sts);
2779 2769 }
2780 2770
2781 2771 /*ARGSUSED*/
2782 2772 int
2783 2773 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2784 2774 {
2785 2775 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2786 2776 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2787 2777 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2788 2778
2789 2779 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2790 2780
2791 2781 /*
2792 2782 * ISM segment is always rw.
2793 2783 */
2794 2784 while (--pgno >= 0)
2795 2785 *protv++ = sptd->spt_prot;
2796 2786 return (0);
2797 2787 }
2798 2788
2799 2789 /*ARGSUSED*/
2800 2790 u_offset_t
2801 2791 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2802 2792 {
2803 2793 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2804 2794
2805 2795 /* Offset does not matter in ISM memory */
2806 2796
2807 2797 return ((u_offset_t)0);
2808 2798 }
2809 2799
2810 2800 /* ARGSUSED */
2811 2801 int
2812 2802 segspt_shmgettype(struct seg *seg, caddr_t addr)
2813 2803 {
2814 2804 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2815 2805 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2816 2806
2817 2807 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2818 2808
2819 2809 /*
2820 2810 * The shared memory mapping is always MAP_SHARED, SWAP is only
2821 2811 * reserved for DISM
2822 2812 */
2823 2813 return (MAP_SHARED |
2824 2814 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2825 2815 }
2826 2816
2827 2817 /*ARGSUSED*/
2828 2818 int
2829 2819 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2830 2820 {
2831 2821 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2832 2822 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2833 2823
2834 2824 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2835 2825
2836 2826 *vpp = sptd->spt_vp;
2837 2827 return (0);
2838 2828 }
2839 2829
2840 2830 /*
2841 2831 * We need to wait for pending IO to complete to a DISM segment in order for
2842 2832 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2843 2833 * than enough time to wait.
2844 2834 */
2845 2835 static clock_t spt_pcache_wait = 120;
2846 2836
2847 2837 /*ARGSUSED*/
2848 2838 static int
2849 2839 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2850 2840 {
2851 2841 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2852 2842 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2853 2843 struct anon_map *amp;
2854 2844 pgcnt_t pg_idx;
2855 2845 ushort_t gen;
2856 2846 clock_t end_lbolt;
2857 2847 int writer;
2858 2848 page_t **ppa;
2859 2849
2860 2850 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2861 2851
2862 2852 if (behav == MADV_FREE) {
2863 2853 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2864 2854 return (0);
2865 2855
2866 2856 amp = sptd->spt_amp;
2867 2857 pg_idx = seg_page(seg, addr);
2868 2858
2869 2859 mutex_enter(&sptd->spt_lock);
2870 2860 if ((ppa = sptd->spt_ppa) == NULL) {
2871 2861 mutex_exit(&sptd->spt_lock);
2872 2862 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2873 2863 anon_disclaim(amp, pg_idx, len);
2874 2864 ANON_LOCK_EXIT(&->a_rwlock);
2875 2865 return (0);
2876 2866 }
2877 2867
2878 2868 sptd->spt_flags |= DISM_PPA_CHANGED;
2879 2869 gen = sptd->spt_gen;
2880 2870
2881 2871 mutex_exit(&sptd->spt_lock);
2882 2872
2883 2873 /*
2884 2874 * Purge all DISM cached pages
2885 2875 */
2886 2876 seg_ppurge_wiredpp(ppa);
2887 2877
2888 2878 /*
2889 2879 * Drop the AS_LOCK so that other threads can grab it
2890 2880 * in the as_pageunlock path and hopefully get the segment
2891 2881 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2892 2882 * to keep this segment resident.
2893 2883 */
2894 2884 writer = AS_WRITE_HELD(seg->s_as);
2895 2885 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2896 2886 AS_LOCK_EXIT(seg->s_as);
2897 2887
2898 2888 mutex_enter(&sptd->spt_lock);
2899 2889
2900 2890 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2901 2891
2902 2892 /*
2903 2893 * Try to wait for pages to get kicked out of the seg_pcache.
2904 2894 */
2905 2895 while (sptd->spt_gen == gen &&
2906 2896 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2907 2897 ddi_get_lbolt() < end_lbolt) {
2908 2898 if (!cv_timedwait_sig(&sptd->spt_cv,
2909 2899 &sptd->spt_lock, end_lbolt)) {
2910 2900 break;
2911 2901 }
2912 2902 }
2913 2903
2914 2904 mutex_exit(&sptd->spt_lock);
2915 2905
2916 2906 /* Regrab the AS_LOCK and release our hold on the segment */
2917 2907 AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER);
2918 2908 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2919 2909 if (shmd->shm_softlockcnt <= 0) {
2920 2910 if (AS_ISUNMAPWAIT(seg->s_as)) {
2921 2911 mutex_enter(&seg->s_as->a_contents);
2922 2912 if (AS_ISUNMAPWAIT(seg->s_as)) {
2923 2913 AS_CLRUNMAPWAIT(seg->s_as);
2924 2914 cv_broadcast(&seg->s_as->a_cv);
2925 2915 }
2926 2916 mutex_exit(&seg->s_as->a_contents);
2927 2917 }
2928 2918 }
2929 2919
2930 2920 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2931 2921 anon_disclaim(amp, pg_idx, len);
2932 2922 ANON_LOCK_EXIT(&->a_rwlock);
2933 2923 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2934 2924 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2935 2925 int already_set;
2936 2926 ulong_t anon_index;
2937 2927 lgrp_mem_policy_t policy;
2938 2928 caddr_t shm_addr;
2939 2929 size_t share_size;
2940 2930 size_t size;
2941 2931 struct seg *sptseg = shmd->shm_sptseg;
2942 2932 caddr_t sptseg_addr;
2943 2933
2944 2934 /*
2945 2935 * Align address and length to page size of underlying segment
2946 2936 */
2947 2937 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2948 2938 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2949 2939 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2950 2940 share_size);
2951 2941
2952 2942 amp = shmd->shm_amp;
2953 2943 anon_index = seg_page(seg, shm_addr);
2954 2944
2955 2945 /*
2956 2946 * And now we may have to adjust size downward if we have
2957 2947 * exceeded the realsize of the segment or initial anon
2958 2948 * allocations.
2959 2949 */
2960 2950 sptseg_addr = sptseg->s_base + ptob(anon_index);
2961 2951 if ((sptseg_addr + size) >
2962 2952 (sptseg->s_base + sptd->spt_realsize))
2963 2953 size = (sptseg->s_base + sptd->spt_realsize) -
2964 2954 sptseg_addr;
2965 2955
2966 2956 /*
2967 2957 * Set memory allocation policy for this segment
2968 2958 */
2969 2959 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2970 2960 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2971 2961 NULL, 0, len);
2972 2962
2973 2963 /*
2974 2964 * If random memory allocation policy set already,
2975 2965 * don't bother reapplying it.
2976 2966 */
2977 2967 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2978 2968 return (0);
2979 2969
2980 2970 /*
2981 2971 * Mark any existing pages in the given range for
2982 2972 * migration, flushing the I/O page cache, and using
2983 2973 * underlying segment to calculate anon index and get
2984 2974 * anonmap and vnode pointer from
2985 2975 */
2986 2976 if (shmd->shm_softlockcnt > 0)
2987 2977 segspt_purge(seg);
2988 2978
2989 2979 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2990 2980 }
2991 2981
2992 2982 return (0);
2993 2983 }
2994 2984
2995 2985 /*ARGSUSED*/
2996 2986 void
2997 2987 segspt_shmdump(struct seg *seg)
2998 2988 {
2999 2989 /* no-op for ISM segment */
3000 2990 }
3001 2991
3002 2992 /*ARGSUSED*/
3003 2993 static faultcode_t
3004 2994 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3005 2995 {
3006 2996 return (ENOTSUP);
3007 2997 }
3008 2998
3009 2999 /*
3010 3000 * get a memory ID for an addr in a given segment
3011 3001 */
3012 3002 static int
3013 3003 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3014 3004 {
3015 3005 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3016 3006 struct anon *ap;
3017 3007 size_t anon_index;
3018 3008 struct anon_map *amp = shmd->shm_amp;
3019 3009 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3020 3010 struct seg *sptseg = shmd->shm_sptseg;
3021 3011 anon_sync_obj_t cookie;
3022 3012
3023 3013 anon_index = seg_page(seg, addr);
3024 3014
3025 3015 if (addr > (seg->s_base + sptd->spt_realsize)) {
3026 3016 return (EFAULT);
3027 3017 }
3028 3018
3029 3019 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3030 3020 anon_array_enter(amp, anon_index, &cookie);
3031 3021 ap = anon_get_ptr(amp->ahp, anon_index);
3032 3022 if (ap == NULL) {
3033 3023 struct page *pp;
3034 3024 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3035 3025
3036 3026 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3037 3027 if (pp == NULL) {
3038 3028 anon_array_exit(&cookie);
3039 3029 ANON_LOCK_EXIT(&->a_rwlock);
3040 3030 return (ENOMEM);
3041 3031 }
3042 3032 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3043 3033 page_unlock(pp);
3044 3034 }
3045 3035 anon_array_exit(&cookie);
3046 3036 ANON_LOCK_EXIT(&->a_rwlock);
3047 3037 memidp->val[0] = (uintptr_t)ap;
3048 3038 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3049 3039 return (0);
3050 3040 }
3051 3041
3052 3042 /*
3053 3043 * Get memory allocation policy info for specified address in given segment
3054 3044 */
3055 3045 static lgrp_mem_policy_info_t *
3056 3046 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3057 3047 {
3058 3048 struct anon_map *amp;
3059 3049 ulong_t anon_index;
3060 3050 lgrp_mem_policy_info_t *policy_info;
3061 3051 struct shm_data *shm_data;
3062 3052
3063 3053 ASSERT(seg != NULL);
3064 3054
3065 3055 /*
3066 3056 * Get anon_map from segshm
3067 3057 *
3068 3058 * Assume that no lock needs to be held on anon_map, since
3069 3059 * it should be protected by its reference count which must be
3070 3060 * nonzero for an existing segment
3071 3061 * Need to grab readers lock on policy tree though
3072 3062 */
3073 3063 shm_data = (struct shm_data *)seg->s_data;
3074 3064 if (shm_data == NULL)
3075 3065 return (NULL);
3076 3066 amp = shm_data->shm_amp;
3077 3067 ASSERT(amp->refcnt != 0);
3078 3068
3079 3069 /*
3080 3070 * Get policy info
3081 3071 *
3082 3072 * Assume starting anon index of 0
3083 3073 */
3084 3074 anon_index = seg_page(seg, addr);
3085 3075 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3086 3076
3087 3077 return (policy_info);
3088 3078 }
3089 3079
3090 3080 /*ARGSUSED*/
3091 3081 static int
3092 3082 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3093 3083 {
3094 3084 return (0);
3095 3085 }
↓ open down ↓ |
837 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX