1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/param.h>
26 #include <sys/user.h>
27 #include <sys/mman.h>
28 #include <sys/kmem.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/systm.h>
32 #include <sys/tuneable.h>
33 #include <vm/hat.h>
34 #include <vm/seg.h>
35 #include <vm/as.h>
36 #include <vm/anon.h>
37 #include <vm/page.h>
38 #include <sys/buf.h>
39 #include <sys/swap.h>
40 #include <sys/atomic.h>
41 #include <vm/seg_spt.h>
42 #include <sys/debug.h>
43 #include <sys/vtrace.h>
44 #include <sys/shm.h>
45 #include <sys/shm_impl.h>
46 #include <sys/lgrp.h>
47 #include <sys/vmsystm.h>
48 #include <sys/policy.h>
49 #include <sys/project.h>
50 #include <sys/tnf_probe.h>
51 #include <sys/zone.h>
52
53 #define SEGSPTADDR (caddr_t)0x0
54
55 /*
56 * # pages used for spt
57 */
58 size_t spt_used;
59
60 /*
61 * segspt_minfree is the memory left for system after ISM
62 * locked its pages; it is set up to 5% of availrmem in
63 * sptcreate when ISM is created. ISM should not use more
64 * than ~90% of availrmem; if it does, then the performance
65 * of the system may decrease. Machines with large memories may
66 * be able to use up more memory for ISM so we set the default
67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 * If somebody wants even more memory for ISM (risking hanging
69 * the system) they can patch the segspt_minfree to smaller number.
70 */
71 pgcnt_t segspt_minfree = 0;
72
73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 static void segspt_free(struct seg *seg);
76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78
79 static void
80 segspt_badop()
81 {
82 panic("segspt_badop called");
83 /*NOTREACHED*/
84 }
85
86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
87
88 static const struct seg_ops segspt_ops = {
89 .dup = SEGSPT_BADOP(int),
90 .unmap = segspt_unmap,
91 .free = segspt_free,
92 .fault = SEGSPT_BADOP(int),
93 .faulta = SEGSPT_BADOP(faultcode_t),
94 .setprot = SEGSPT_BADOP(int),
95 .checkprot = SEGSPT_BADOP(int),
96 .kluster = SEGSPT_BADOP(int),
97 .swapout = SEGSPT_BADOP(size_t),
98 .sync = SEGSPT_BADOP(int),
99 .incore = SEGSPT_BADOP(size_t),
100 .lockop = SEGSPT_BADOP(int),
101 .getprot = SEGSPT_BADOP(int),
102 .getoffset = SEGSPT_BADOP(u_offset_t),
103 .gettype = SEGSPT_BADOP(int),
104 .getvp = SEGSPT_BADOP(int),
105 .advise = SEGSPT_BADOP(int),
106 .dump = SEGSPT_BADOP(void),
107 .pagelock = SEGSPT_BADOP(int),
108 .setpagesize = SEGSPT_BADOP(int),
109 .getmemid = SEGSPT_BADOP(int),
110 .getpolicy = segspt_getpolicy,
111 .capable = SEGSPT_BADOP(int),
112 };
113
114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
116 static void segspt_shmfree(struct seg *seg);
117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 register size_t len, register uint_t prot);
122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 uint_t prot);
124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 static size_t segspt_shmswapout(struct seg *seg);
126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 register char *vec);
128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 int attr, uint_t flags);
130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 int attr, int op, ulong_t *lockmap, size_t pos);
132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 uint_t *protv);
134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 uint_t behav);
139 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
140 struct page ***, enum lock_type, enum seg_rw);
141 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
142 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
143
144 const struct seg_ops segspt_shmops = {
145 .dup = segspt_shmdup,
146 .unmap = segspt_shmunmap,
147 .free = segspt_shmfree,
148 .fault = segspt_shmfault,
149 .faulta = segspt_shmfaulta,
150 .setprot = segspt_shmsetprot,
151 .checkprot = segspt_shmcheckprot,
152 .kluster = segspt_shmkluster,
153 .swapout = segspt_shmswapout,
154 .sync = segspt_shmsync,
155 .incore = segspt_shmincore,
156 .lockop = segspt_shmlockop,
157 .getprot = segspt_shmgetprot,
158 .getoffset = segspt_shmgetoffset,
159 .gettype = segspt_shmgettype,
160 .getvp = segspt_shmgetvp,
161 .advise = segspt_shmadvise,
162 .pagelock = segspt_shmpagelock,
163 .getmemid = segspt_shmgetmemid,
164 .getpolicy = segspt_shmgetpolicy,
165 };
166
167 static void segspt_purge(struct seg *seg);
168 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
169 enum seg_rw, int);
170 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
171 page_t **ppa);
172
173
174
175 /*ARGSUSED*/
176 int
177 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
178 uint_t prot, uint_t flags, uint_t share_szc)
179 {
180 int err;
181 struct as *newas;
182 struct segspt_crargs sptcargs;
183
184 #ifdef DEBUG
185 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
186 tnf_ulong, size, size );
187 #endif
188 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
189 segspt_minfree = availrmem/20; /* for the system */
190
191 if (!hat_supported(HAT_SHARED_PT, (void *)0))
192 return (EINVAL);
193
194 /*
195 * get a new as for this shared memory segment
196 */
197 newas = as_alloc();
198 newas->a_proc = NULL;
199 sptcargs.amp = amp;
200 sptcargs.prot = prot;
201 sptcargs.flags = flags;
202 sptcargs.szc = share_szc;
203 /*
204 * create a shared page table (spt) segment
205 */
206
207 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
208 as_free(newas);
209 return (err);
210 }
211 *sptseg = sptcargs.seg_spt;
212 return (0);
213 }
214
215 void
216 sptdestroy(struct as *as, struct anon_map *amp)
217 {
218
219 #ifdef DEBUG
220 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
221 #endif
222 (void) as_unmap(as, SEGSPTADDR, amp->size);
223 as_free(as);
224 }
225
226 /*
227 * called from seg_free().
228 * free (i.e., unlock, unmap, return to free list)
229 * all the pages in the given seg.
230 */
231 void
232 segspt_free(struct seg *seg)
233 {
234 struct spt_data *sptd = (struct spt_data *)seg->s_data;
235
236 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
237
238 if (sptd != NULL) {
239 if (sptd->spt_realsize)
240 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
241
242 if (sptd->spt_ppa_lckcnt)
243 kmem_free(sptd->spt_ppa_lckcnt,
244 sizeof (*sptd->spt_ppa_lckcnt)
245 * btopr(sptd->spt_amp->size));
246 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
247 cv_destroy(&sptd->spt_cv);
248 mutex_destroy(&sptd->spt_lock);
249 kmem_free(sptd, sizeof (*sptd));
250 }
251 }
252
253 /*ARGSUSED*/
254 static int
255 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
256 uint_t flags)
257 {
258 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
259
260 return (0);
261 }
262
263 /*ARGSUSED*/
264 static size_t
265 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
266 {
267 caddr_t eo_seg;
268 pgcnt_t npages;
269 struct shm_data *shmd = (struct shm_data *)seg->s_data;
270 struct seg *sptseg;
271 struct spt_data *sptd;
272
273 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
274 #ifdef lint
275 seg = seg;
276 #endif
277 sptseg = shmd->shm_sptseg;
278 sptd = sptseg->s_data;
279
280 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
281 eo_seg = addr + len;
282 while (addr < eo_seg) {
283 /* page exists, and it's locked. */
284 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
285 SEG_PAGE_ANON;
286 addr += PAGESIZE;
287 }
288 return (len);
289 } else {
290 struct anon_map *amp = shmd->shm_amp;
291 struct anon *ap;
292 page_t *pp;
293 pgcnt_t anon_index;
294 struct vnode *vp;
295 u_offset_t off;
296 ulong_t i;
297 int ret;
298 anon_sync_obj_t cookie;
299
300 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
301 anon_index = seg_page(seg, addr);
302 npages = btopr(len);
303 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
304 return (EINVAL);
305 }
306 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
307 for (i = 0; i < npages; i++, anon_index++) {
308 ret = 0;
309 anon_array_enter(amp, anon_index, &cookie);
310 ap = anon_get_ptr(amp->ahp, anon_index);
311 if (ap != NULL) {
312 swap_xlate(ap, &vp, &off);
313 anon_array_exit(&cookie);
314 pp = page_lookup_nowait(vp, off, SE_SHARED);
315 if (pp != NULL) {
316 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
317 page_unlock(pp);
318 }
319 } else {
320 anon_array_exit(&cookie);
321 }
322 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
323 ret |= SEG_PAGE_LOCKED;
324 }
325 *vec++ = (char)ret;
326 }
327 ANON_LOCK_EXIT(&->a_rwlock);
328 return (len);
329 }
330 }
331
332 static int
333 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
334 {
335 size_t share_size;
336
337 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
338
339 /*
340 * seg.s_size may have been rounded up to the largest page size
341 * in shmat().
342 * XXX This should be cleanedup. sptdestroy should take a length
343 * argument which should be the same as sptcreate. Then
344 * this rounding would not be needed (or is done in shm.c)
345 * Only the check for full segment will be needed.
346 *
347 * XXX -- shouldn't raddr == 0 always? These tests don't seem
348 * to be useful at all.
349 */
350 share_size = page_get_pagesize(seg->s_szc);
351 ssize = P2ROUNDUP(ssize, share_size);
352
353 if (raddr == seg->s_base && ssize == seg->s_size) {
354 seg_free(seg);
355 return (0);
356 } else
357 return (EINVAL);
358 }
359
360 int
361 segspt_create(struct seg *seg, caddr_t argsp)
362 {
363 int err;
364 caddr_t addr = seg->s_base;
365 struct spt_data *sptd;
366 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
367 struct anon_map *amp = sptcargs->amp;
368 struct kshmid *sp = amp->a_sp;
369 struct cred *cred = CRED();
370 ulong_t i, j, anon_index = 0;
371 pgcnt_t npages = btopr(amp->size);
372 struct vnode *vp;
373 page_t **ppa;
374 uint_t hat_flags;
375 size_t pgsz;
376 pgcnt_t pgcnt;
377 caddr_t a;
378 pgcnt_t pidx;
379 size_t sz;
380 proc_t *procp = curproc;
381 rctl_qty_t lockedbytes = 0;
382 kproject_t *proj;
383
384 /*
385 * We are holding the a_lock on the underlying dummy as,
386 * so we can make calls to the HAT layer.
387 */
388 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
389 ASSERT(sp != NULL);
390
391 #ifdef DEBUG
392 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
393 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
394 #endif
395 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
396 if (err = anon_swap_adjust(npages))
397 return (err);
398 }
399 err = ENOMEM;
400
401 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
402 goto out1;
403
404 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
405 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
406 KM_NOSLEEP)) == NULL)
407 goto out2;
408 }
409
410 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
411
412 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
413 goto out3;
414
415 seg->s_ops = &segspt_ops;
416 sptd->spt_vp = vp;
417 sptd->spt_amp = amp;
418 sptd->spt_prot = sptcargs->prot;
419 sptd->spt_flags = sptcargs->flags;
420 seg->s_data = (caddr_t)sptd;
421 sptd->spt_ppa = NULL;
422 sptd->spt_ppa_lckcnt = NULL;
423 seg->s_szc = sptcargs->szc;
424 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
425 sptd->spt_gen = 0;
426
427 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
428 if (seg->s_szc > amp->a_szc) {
429 amp->a_szc = seg->s_szc;
430 }
431 ANON_LOCK_EXIT(&->a_rwlock);
432
433 /*
434 * Set policy to affect initial allocation of pages in
435 * anon_map_createpages()
436 */
437 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
438 NULL, 0, ptob(npages));
439
440 if (sptcargs->flags & SHM_PAGEABLE) {
441 size_t share_sz;
442 pgcnt_t new_npgs, more_pgs;
443 struct anon_hdr *nahp;
444 zone_t *zone;
445
446 share_sz = page_get_pagesize(seg->s_szc);
447 if (!IS_P2ALIGNED(amp->size, share_sz)) {
448 /*
449 * We are rounding up the size of the anon array
450 * on 4 M boundary because we always create 4 M
451 * of page(s) when locking, faulting pages and we
452 * don't have to check for all corner cases e.g.
453 * if there is enough space to allocate 4 M
454 * page.
455 */
456 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
457 more_pgs = new_npgs - npages;
458
459 /*
460 * The zone will never be NULL, as a fully created
461 * shm always has an owning zone.
462 */
463 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
464 ASSERT(zone != NULL);
465 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
466 err = ENOMEM;
467 goto out4;
468 }
469
470 nahp = anon_create(new_npgs, ANON_SLEEP);
471 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
472 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
473 ANON_SLEEP);
474 anon_release(amp->ahp, npages);
475 amp->ahp = nahp;
476 ASSERT(amp->swresv == ptob(npages));
477 amp->swresv = amp->size = ptob(new_npgs);
478 ANON_LOCK_EXIT(&->a_rwlock);
479 npages = new_npgs;
480 }
481
482 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
483 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
484 sptd->spt_pcachecnt = 0;
485 sptd->spt_realsize = ptob(npages);
486 sptcargs->seg_spt = seg;
487 return (0);
488 }
489
490 /*
491 * get array of pages for each anon slot in amp
492 */
493 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
494 seg, addr, S_CREATE, cred)) != 0)
495 goto out4;
496
497 mutex_enter(&sp->shm_mlock);
498
499 /* May be partially locked, so, count bytes to charge for locking */
500 for (i = 0; i < npages; i++)
501 if (ppa[i]->p_lckcnt == 0)
502 lockedbytes += PAGESIZE;
503
504 proj = sp->shm_perm.ipc_proj;
505
506 if (lockedbytes > 0) {
507 mutex_enter(&procp->p_lock);
508 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
509 mutex_exit(&procp->p_lock);
510 mutex_exit(&sp->shm_mlock);
511 for (i = 0; i < npages; i++)
512 page_unlock(ppa[i]);
513 err = ENOMEM;
514 goto out4;
515 }
516 mutex_exit(&procp->p_lock);
517 }
518
519 /*
520 * addr is initial address corresponding to the first page on ppa list
521 */
522 for (i = 0; i < npages; i++) {
523 /* attempt to lock all pages */
524 if (page_pp_lock(ppa[i], 0, 1) == 0) {
525 /*
526 * if unable to lock any page, unlock all
527 * of them and return error
528 */
529 for (j = 0; j < i; j++)
530 page_pp_unlock(ppa[j], 0, 1);
531 for (i = 0; i < npages; i++)
532 page_unlock(ppa[i]);
533 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
534 mutex_exit(&sp->shm_mlock);
535 err = ENOMEM;
536 goto out4;
537 }
538 }
539 mutex_exit(&sp->shm_mlock);
540
541 /*
542 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
543 * for the entire life of the segment. For example platforms
544 * that do not support Dynamic Reconfiguration.
545 */
546 hat_flags = HAT_LOAD_SHARE;
547 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
548 hat_flags |= HAT_LOAD_LOCK;
549
550 /*
551 * Load translations one lare page at a time
552 * to make sure we don't create mappings bigger than
553 * segment's size code in case underlying pages
554 * are shared with segvn's segment that uses bigger
555 * size code than we do.
556 */
557 pgsz = page_get_pagesize(seg->s_szc);
558 pgcnt = page_get_pagecnt(seg->s_szc);
559 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
560 sz = MIN(pgsz, ptob(npages - pidx));
561 hat_memload_array(seg->s_as->a_hat, a, sz,
562 &ppa[pidx], sptd->spt_prot, hat_flags);
563 }
564
565 /*
566 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
567 * we will leave the pages locked SE_SHARED for the life
568 * of the ISM segment. This will prevent any calls to
569 * hat_pageunload() on this ISM segment for those platforms.
570 */
571 if (!(hat_flags & HAT_LOAD_LOCK)) {
572 /*
573 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
574 * we no longer need to hold the SE_SHARED lock on the pages,
575 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
576 * SE_SHARED lock on the pages as necessary.
577 */
578 for (i = 0; i < npages; i++)
579 page_unlock(ppa[i]);
580 }
581 sptd->spt_pcachecnt = 0;
582 kmem_free(ppa, ((sizeof (page_t *)) * npages));
583 sptd->spt_realsize = ptob(npages);
584 atomic_add_long(&spt_used, npages);
585 sptcargs->seg_spt = seg;
586 return (0);
587
588 out4:
589 seg->s_data = NULL;
590 kmem_free(vp, sizeof (*vp));
591 cv_destroy(&sptd->spt_cv);
592 out3:
593 mutex_destroy(&sptd->spt_lock);
594 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
595 kmem_free(ppa, (sizeof (*ppa) * npages));
596 out2:
597 kmem_free(sptd, sizeof (*sptd));
598 out1:
599 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
600 anon_swap_restore(npages);
601 return (err);
602 }
603
604 /*ARGSUSED*/
605 void
606 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
607 {
608 struct page *pp;
609 struct spt_data *sptd = (struct spt_data *)seg->s_data;
610 pgcnt_t npages;
611 ulong_t anon_idx;
612 struct anon_map *amp;
613 struct anon *ap;
614 struct vnode *vp;
615 u_offset_t off;
616 uint_t hat_flags;
617 int root = 0;
618 pgcnt_t pgs, curnpgs = 0;
619 page_t *rootpp;
620 rctl_qty_t unlocked_bytes = 0;
621 kproject_t *proj;
622 kshmid_t *sp;
623
624 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
625
626 len = P2ROUNDUP(len, PAGESIZE);
627
628 npages = btop(len);
629
630 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
631 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
632 (sptd->spt_flags & SHM_PAGEABLE)) {
633 hat_flags = HAT_UNLOAD_UNMAP;
634 }
635
636 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
637
638 amp = sptd->spt_amp;
639 if (sptd->spt_flags & SHM_PAGEABLE)
640 npages = btop(amp->size);
641
642 ASSERT(amp != NULL);
643
644 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
645 sp = amp->a_sp;
646 proj = sp->shm_perm.ipc_proj;
647 mutex_enter(&sp->shm_mlock);
648 }
649 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
650 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
651 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
652 panic("segspt_free_pages: null app");
653 /*NOTREACHED*/
654 }
655 } else {
656 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
657 == NULL)
658 continue;
659 }
660 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
661 swap_xlate(ap, &vp, &off);
662
663 /*
664 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
665 * the pages won't be having SE_SHARED lock at this
666 * point.
667 *
668 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
669 * the pages are still held SE_SHARED locked from the
670 * original segspt_create()
671 *
672 * Our goal is to get SE_EXCL lock on each page, remove
673 * permanent lock on it and invalidate the page.
674 */
675 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
676 if (hat_flags == HAT_UNLOAD_UNMAP)
677 pp = page_lookup(vp, off, SE_EXCL);
678 else {
679 if ((pp = page_find(vp, off)) == NULL) {
680 panic("segspt_free_pages: "
681 "page not locked");
682 /*NOTREACHED*/
683 }
684 if (!page_tryupgrade(pp)) {
685 page_unlock(pp);
686 pp = page_lookup(vp, off, SE_EXCL);
687 }
688 }
689 if (pp == NULL) {
690 panic("segspt_free_pages: "
691 "page not in the system");
692 /*NOTREACHED*/
693 }
694 ASSERT(pp->p_lckcnt > 0);
695 page_pp_unlock(pp, 0, 1);
696 if (pp->p_lckcnt == 0)
697 unlocked_bytes += PAGESIZE;
698 } else {
699 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
700 continue;
701 }
702 /*
703 * It's logical to invalidate the pages here as in most cases
704 * these were created by segspt.
705 */
706 if (pp->p_szc != 0) {
707 if (root == 0) {
708 ASSERT(curnpgs == 0);
709 root = 1;
710 rootpp = pp;
711 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
712 ASSERT(pgs > 1);
713 ASSERT(IS_P2ALIGNED(pgs, pgs));
714 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
715 curnpgs--;
716 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
717 ASSERT(curnpgs == 1);
718 ASSERT(page_pptonum(pp) ==
719 page_pptonum(rootpp) + (pgs - 1));
720 page_destroy_pages(rootpp);
721 root = 0;
722 curnpgs = 0;
723 } else {
724 ASSERT(curnpgs > 1);
725 ASSERT(page_pptonum(pp) ==
726 page_pptonum(rootpp) + (pgs - curnpgs));
727 curnpgs--;
728 }
729 } else {
730 if (root != 0 || curnpgs != 0) {
731 panic("segspt_free_pages: bad large page");
732 /*NOTREACHED*/
733 }
734 /*
735 * Before destroying the pages, we need to take care
736 * of the rctl locked memory accounting. For that
737 * we need to calculte the unlocked_bytes.
738 */
739 if (pp->p_lckcnt > 0)
740 unlocked_bytes += PAGESIZE;
741 /*LINTED: constant in conditional context */
742 VN_DISPOSE(pp, B_INVAL, 0, kcred);
743 }
744 }
745 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
746 if (unlocked_bytes > 0)
747 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
748 mutex_exit(&sp->shm_mlock);
749 }
750 if (root != 0 || curnpgs != 0) {
751 panic("segspt_free_pages: bad large page");
752 /*NOTREACHED*/
753 }
754
755 /*
756 * mark that pages have been released
757 */
758 sptd->spt_realsize = 0;
759
760 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
761 atomic_add_long(&spt_used, -npages);
762 anon_swap_restore(npages);
763 }
764 }
765
766 /*
767 * Get memory allocation policy info for specified address in given segment
768 */
769 static lgrp_mem_policy_info_t *
770 segspt_getpolicy(struct seg *seg, caddr_t addr)
771 {
772 struct anon_map *amp;
773 ulong_t anon_index;
774 lgrp_mem_policy_info_t *policy_info;
775 struct spt_data *spt_data;
776
777 ASSERT(seg != NULL);
778
779 /*
780 * Get anon_map from segspt
781 *
782 * Assume that no lock needs to be held on anon_map, since
783 * it should be protected by its reference count which must be
784 * nonzero for an existing segment
785 * Need to grab readers lock on policy tree though
786 */
787 spt_data = (struct spt_data *)seg->s_data;
788 if (spt_data == NULL)
789 return (NULL);
790 amp = spt_data->spt_amp;
791 ASSERT(amp->refcnt != 0);
792
793 /*
794 * Get policy info
795 *
796 * Assume starting anon index of 0
797 */
798 anon_index = seg_page(seg, addr);
799 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
800
801 return (policy_info);
802 }
803
804 /*
805 * DISM only.
806 * Return locked pages over a given range.
807 *
808 * We will cache all DISM locked pages and save the pplist for the
809 * entire segment in the ppa field of the underlying DISM segment structure.
810 * Later, during a call to segspt_reclaim() we will use this ppa array
811 * to page_unlock() all of the pages and then we will free this ppa list.
812 */
813 /*ARGSUSED*/
814 static int
815 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
816 struct page ***ppp, enum lock_type type, enum seg_rw rw)
817 {
818 struct shm_data *shmd = (struct shm_data *)seg->s_data;
819 struct seg *sptseg = shmd->shm_sptseg;
820 struct spt_data *sptd = sptseg->s_data;
821 pgcnt_t pg_idx, npages, tot_npages, npgs;
822 struct page **pplist, **pl, **ppa, *pp;
823 struct anon_map *amp;
824 spgcnt_t an_idx;
825 int ret = ENOTSUP;
826 uint_t pl_built = 0;
827 struct anon *ap;
828 struct vnode *vp;
829 u_offset_t off;
830 pgcnt_t claim_availrmem = 0;
831 uint_t szc;
832
833 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
834 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
835
836 /*
837 * We want to lock/unlock the entire ISM segment. Therefore,
838 * we will be using the underlying sptseg and it's base address
839 * and length for the caching arguments.
840 */
841 ASSERT(sptseg);
842 ASSERT(sptd);
843
844 pg_idx = seg_page(seg, addr);
845 npages = btopr(len);
846
847 /*
848 * check if the request is larger than number of pages covered
849 * by amp
850 */
851 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
852 *ppp = NULL;
853 return (ENOTSUP);
854 }
855
856 if (type == L_PAGEUNLOCK) {
857 ASSERT(sptd->spt_ppa != NULL);
858
859 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
860 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
861
862 /*
863 * If someone is blocked while unmapping, we purge
864 * segment page cache and thus reclaim pplist synchronously
865 * without waiting for seg_pasync_thread. This speeds up
866 * unmapping in cases where munmap(2) is called, while
867 * raw async i/o is still in progress or where a thread
868 * exits on data fault in a multithreaded application.
869 */
870 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
871 (AS_ISUNMAPWAIT(seg->s_as) &&
872 shmd->shm_softlockcnt > 0)) {
873 segspt_purge(seg);
874 }
875 return (0);
876 }
877
878 /* The L_PAGELOCK case ... */
879
880 if (sptd->spt_flags & DISM_PPA_CHANGED) {
881 segspt_purge(seg);
882 /*
883 * for DISM ppa needs to be rebuild since
884 * number of locked pages could be changed
885 */
886 *ppp = NULL;
887 return (ENOTSUP);
888 }
889
890 /*
891 * First try to find pages in segment page cache, without
892 * holding the segment lock.
893 */
894 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
895 S_WRITE, SEGP_FORCE_WIRED);
896 if (pplist != NULL) {
897 ASSERT(sptd->spt_ppa != NULL);
898 ASSERT(sptd->spt_ppa == pplist);
899 ppa = sptd->spt_ppa;
900 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
901 if (ppa[an_idx] == NULL) {
902 seg_pinactive(seg, NULL, seg->s_base,
903 sptd->spt_amp->size, ppa,
904 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
905 *ppp = NULL;
906 return (ENOTSUP);
907 }
908 if ((szc = ppa[an_idx]->p_szc) != 0) {
909 npgs = page_get_pagecnt(szc);
910 an_idx = P2ROUNDUP(an_idx + 1, npgs);
911 } else {
912 an_idx++;
913 }
914 }
915 /*
916 * Since we cache the entire DISM segment, we want to
917 * set ppp to point to the first slot that corresponds
918 * to the requested addr, i.e. pg_idx.
919 */
920 *ppp = &(sptd->spt_ppa[pg_idx]);
921 return (0);
922 }
923
924 mutex_enter(&sptd->spt_lock);
925 /*
926 * try to find pages in segment page cache with mutex
927 */
928 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
929 S_WRITE, SEGP_FORCE_WIRED);
930 if (pplist != NULL) {
931 ASSERT(sptd->spt_ppa != NULL);
932 ASSERT(sptd->spt_ppa == pplist);
933 ppa = sptd->spt_ppa;
934 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
935 if (ppa[an_idx] == NULL) {
936 mutex_exit(&sptd->spt_lock);
937 seg_pinactive(seg, NULL, seg->s_base,
938 sptd->spt_amp->size, ppa,
939 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
940 *ppp = NULL;
941 return (ENOTSUP);
942 }
943 if ((szc = ppa[an_idx]->p_szc) != 0) {
944 npgs = page_get_pagecnt(szc);
945 an_idx = P2ROUNDUP(an_idx + 1, npgs);
946 } else {
947 an_idx++;
948 }
949 }
950 /*
951 * Since we cache the entire DISM segment, we want to
952 * set ppp to point to the first slot that corresponds
953 * to the requested addr, i.e. pg_idx.
954 */
955 mutex_exit(&sptd->spt_lock);
956 *ppp = &(sptd->spt_ppa[pg_idx]);
957 return (0);
958 }
959 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
960 SEGP_FORCE_WIRED) == SEGP_FAIL) {
961 mutex_exit(&sptd->spt_lock);
962 *ppp = NULL;
963 return (ENOTSUP);
964 }
965
966 /*
967 * No need to worry about protections because DISM pages are always rw.
968 */
969 pl = pplist = NULL;
970 amp = sptd->spt_amp;
971
972 /*
973 * Do we need to build the ppa array?
974 */
975 if (sptd->spt_ppa == NULL) {
976 pgcnt_t lpg_cnt = 0;
977
978 pl_built = 1;
979 tot_npages = btopr(sptd->spt_amp->size);
980
981 ASSERT(sptd->spt_pcachecnt == 0);
982 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
983 pl = pplist;
984
985 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
986 for (an_idx = 0; an_idx < tot_npages; ) {
987 ap = anon_get_ptr(amp->ahp, an_idx);
988 /*
989 * Cache only mlocked pages. For large pages
990 * if one (constituent) page is mlocked
991 * all pages for that large page
992 * are cached also. This is for quick
993 * lookups of ppa array;
994 */
995 if ((ap != NULL) && (lpg_cnt != 0 ||
996 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
997
998 swap_xlate(ap, &vp, &off);
999 pp = page_lookup(vp, off, SE_SHARED);
1000 ASSERT(pp != NULL);
1001 if (lpg_cnt == 0) {
1002 lpg_cnt++;
1003 /*
1004 * For a small page, we are done --
1005 * lpg_count is reset to 0 below.
1006 *
1007 * For a large page, we are guaranteed
1008 * to find the anon structures of all
1009 * constituent pages and a non-zero
1010 * lpg_cnt ensures that we don't test
1011 * for mlock for these. We are done
1012 * when lpg_count reaches (npgs + 1).
1013 * If we are not the first constituent
1014 * page, restart at the first one.
1015 */
1016 npgs = page_get_pagecnt(pp->p_szc);
1017 if (!IS_P2ALIGNED(an_idx, npgs)) {
1018 an_idx = P2ALIGN(an_idx, npgs);
1019 page_unlock(pp);
1020 continue;
1021 }
1022 }
1023 if (++lpg_cnt > npgs)
1024 lpg_cnt = 0;
1025
1026 /*
1027 * availrmem is decremented only
1028 * for unlocked pages
1029 */
1030 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1031 claim_availrmem++;
1032 pplist[an_idx] = pp;
1033 }
1034 an_idx++;
1035 }
1036 ANON_LOCK_EXIT(&->a_rwlock);
1037
1038 if (claim_availrmem) {
1039 mutex_enter(&freemem_lock);
1040 if (availrmem < tune.t_minarmem + claim_availrmem) {
1041 mutex_exit(&freemem_lock);
1042 ret = ENOTSUP;
1043 claim_availrmem = 0;
1044 goto insert_fail;
1045 } else {
1046 availrmem -= claim_availrmem;
1047 }
1048 mutex_exit(&freemem_lock);
1049 }
1050
1051 sptd->spt_ppa = pl;
1052 } else {
1053 /*
1054 * We already have a valid ppa[].
1055 */
1056 pl = sptd->spt_ppa;
1057 }
1058
1059 ASSERT(pl != NULL);
1060
1061 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1062 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1063 segspt_reclaim);
1064 if (ret == SEGP_FAIL) {
1065 /*
1066 * seg_pinsert failed. We return
1067 * ENOTSUP, so that the as_pagelock() code will
1068 * then try the slower F_SOFTLOCK path.
1069 */
1070 if (pl_built) {
1071 /*
1072 * No one else has referenced the ppa[].
1073 * We created it and we need to destroy it.
1074 */
1075 sptd->spt_ppa = NULL;
1076 }
1077 ret = ENOTSUP;
1078 goto insert_fail;
1079 }
1080
1081 /*
1082 * In either case, we increment softlockcnt on the 'real' segment.
1083 */
1084 sptd->spt_pcachecnt++;
1085 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1086
1087 ppa = sptd->spt_ppa;
1088 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1089 if (ppa[an_idx] == NULL) {
1090 mutex_exit(&sptd->spt_lock);
1091 seg_pinactive(seg, NULL, seg->s_base,
1092 sptd->spt_amp->size,
1093 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1094 *ppp = NULL;
1095 return (ENOTSUP);
1096 }
1097 if ((szc = ppa[an_idx]->p_szc) != 0) {
1098 npgs = page_get_pagecnt(szc);
1099 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1100 } else {
1101 an_idx++;
1102 }
1103 }
1104 /*
1105 * We can now drop the sptd->spt_lock since the ppa[]
1106 * exists and he have incremented pacachecnt.
1107 */
1108 mutex_exit(&sptd->spt_lock);
1109
1110 /*
1111 * Since we cache the entire segment, we want to
1112 * set ppp to point to the first slot that corresponds
1113 * to the requested addr, i.e. pg_idx.
1114 */
1115 *ppp = &(sptd->spt_ppa[pg_idx]);
1116 return (0);
1117
1118 insert_fail:
1119 /*
1120 * We will only reach this code if we tried and failed.
1121 *
1122 * And we can drop the lock on the dummy seg, once we've failed
1123 * to set up a new ppa[].
1124 */
1125 mutex_exit(&sptd->spt_lock);
1126
1127 if (pl_built) {
1128 if (claim_availrmem) {
1129 mutex_enter(&freemem_lock);
1130 availrmem += claim_availrmem;
1131 mutex_exit(&freemem_lock);
1132 }
1133
1134 /*
1135 * We created pl and we need to destroy it.
1136 */
1137 pplist = pl;
1138 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1139 if (pplist[an_idx] != NULL)
1140 page_unlock(pplist[an_idx]);
1141 }
1142 kmem_free(pl, sizeof (page_t *) * tot_npages);
1143 }
1144
1145 if (shmd->shm_softlockcnt <= 0) {
1146 if (AS_ISUNMAPWAIT(seg->s_as)) {
1147 mutex_enter(&seg->s_as->a_contents);
1148 if (AS_ISUNMAPWAIT(seg->s_as)) {
1149 AS_CLRUNMAPWAIT(seg->s_as);
1150 cv_broadcast(&seg->s_as->a_cv);
1151 }
1152 mutex_exit(&seg->s_as->a_contents);
1153 }
1154 }
1155 *ppp = NULL;
1156 return (ret);
1157 }
1158
1159
1160
1161 /*
1162 * return locked pages over a given range.
1163 *
1164 * We will cache the entire ISM segment and save the pplist for the
1165 * entire segment in the ppa field of the underlying ISM segment structure.
1166 * Later, during a call to segspt_reclaim() we will use this ppa array
1167 * to page_unlock() all of the pages and then we will free this ppa list.
1168 */
1169 /*ARGSUSED*/
1170 static int
1171 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1172 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1173 {
1174 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1175 struct seg *sptseg = shmd->shm_sptseg;
1176 struct spt_data *sptd = sptseg->s_data;
1177 pgcnt_t np, page_index, npages;
1178 caddr_t a, spt_base;
1179 struct page **pplist, **pl, *pp;
1180 struct anon_map *amp;
1181 ulong_t anon_index;
1182 int ret = ENOTSUP;
1183 uint_t pl_built = 0;
1184 struct anon *ap;
1185 struct vnode *vp;
1186 u_offset_t off;
1187
1188 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1189 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1190
1191
1192 /*
1193 * We want to lock/unlock the entire ISM segment. Therefore,
1194 * we will be using the underlying sptseg and it's base address
1195 * and length for the caching arguments.
1196 */
1197 ASSERT(sptseg);
1198 ASSERT(sptd);
1199
1200 if (sptd->spt_flags & SHM_PAGEABLE) {
1201 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1202 }
1203
1204 page_index = seg_page(seg, addr);
1205 npages = btopr(len);
1206
1207 /*
1208 * check if the request is larger than number of pages covered
1209 * by amp
1210 */
1211 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1212 *ppp = NULL;
1213 return (ENOTSUP);
1214 }
1215
1216 if (type == L_PAGEUNLOCK) {
1217
1218 ASSERT(sptd->spt_ppa != NULL);
1219
1220 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1221 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1222
1223 /*
1224 * If someone is blocked while unmapping, we purge
1225 * segment page cache and thus reclaim pplist synchronously
1226 * without waiting for seg_pasync_thread. This speeds up
1227 * unmapping in cases where munmap(2) is called, while
1228 * raw async i/o is still in progress or where a thread
1229 * exits on data fault in a multithreaded application.
1230 */
1231 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1232 segspt_purge(seg);
1233 }
1234 return (0);
1235 }
1236
1237 /* The L_PAGELOCK case... */
1238
1239 /*
1240 * First try to find pages in segment page cache, without
1241 * holding the segment lock.
1242 */
1243 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1244 S_WRITE, SEGP_FORCE_WIRED);
1245 if (pplist != NULL) {
1246 ASSERT(sptd->spt_ppa == pplist);
1247 ASSERT(sptd->spt_ppa[page_index]);
1248 /*
1249 * Since we cache the entire ISM segment, we want to
1250 * set ppp to point to the first slot that corresponds
1251 * to the requested addr, i.e. page_index.
1252 */
1253 *ppp = &(sptd->spt_ppa[page_index]);
1254 return (0);
1255 }
1256
1257 mutex_enter(&sptd->spt_lock);
1258
1259 /*
1260 * try to find pages in segment page cache
1261 */
1262 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1263 S_WRITE, SEGP_FORCE_WIRED);
1264 if (pplist != NULL) {
1265 ASSERT(sptd->spt_ppa == pplist);
1266 /*
1267 * Since we cache the entire segment, we want to
1268 * set ppp to point to the first slot that corresponds
1269 * to the requested addr, i.e. page_index.
1270 */
1271 mutex_exit(&sptd->spt_lock);
1272 *ppp = &(sptd->spt_ppa[page_index]);
1273 return (0);
1274 }
1275
1276 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1277 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1278 mutex_exit(&sptd->spt_lock);
1279 *ppp = NULL;
1280 return (ENOTSUP);
1281 }
1282
1283 /*
1284 * No need to worry about protections because ISM pages
1285 * are always rw.
1286 */
1287 pl = pplist = NULL;
1288
1289 /*
1290 * Do we need to build the ppa array?
1291 */
1292 if (sptd->spt_ppa == NULL) {
1293 ASSERT(sptd->spt_ppa == pplist);
1294
1295 spt_base = sptseg->s_base;
1296 pl_built = 1;
1297
1298 /*
1299 * availrmem is decremented once during anon_swap_adjust()
1300 * and is incremented during the anon_unresv(), which is
1301 * called from shm_rm_amp() when the segment is destroyed.
1302 */
1303 amp = sptd->spt_amp;
1304 ASSERT(amp != NULL);
1305
1306 /* pcachecnt is protected by sptd->spt_lock */
1307 ASSERT(sptd->spt_pcachecnt == 0);
1308 pplist = kmem_zalloc(sizeof (page_t *)
1309 * btopr(sptd->spt_amp->size), KM_SLEEP);
1310 pl = pplist;
1311
1312 anon_index = seg_page(sptseg, spt_base);
1313
1314 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1315 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1316 a += PAGESIZE, anon_index++, pplist++) {
1317 ap = anon_get_ptr(amp->ahp, anon_index);
1318 ASSERT(ap != NULL);
1319 swap_xlate(ap, &vp, &off);
1320 pp = page_lookup(vp, off, SE_SHARED);
1321 ASSERT(pp != NULL);
1322 *pplist = pp;
1323 }
1324 ANON_LOCK_EXIT(&->a_rwlock);
1325
1326 if (a < (spt_base + sptd->spt_amp->size)) {
1327 ret = ENOTSUP;
1328 goto insert_fail;
1329 }
1330 sptd->spt_ppa = pl;
1331 } else {
1332 /*
1333 * We already have a valid ppa[].
1334 */
1335 pl = sptd->spt_ppa;
1336 }
1337
1338 ASSERT(pl != NULL);
1339
1340 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1341 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1342 segspt_reclaim);
1343 if (ret == SEGP_FAIL) {
1344 /*
1345 * seg_pinsert failed. We return
1346 * ENOTSUP, so that the as_pagelock() code will
1347 * then try the slower F_SOFTLOCK path.
1348 */
1349 if (pl_built) {
1350 /*
1351 * No one else has referenced the ppa[].
1352 * We created it and we need to destroy it.
1353 */
1354 sptd->spt_ppa = NULL;
1355 }
1356 ret = ENOTSUP;
1357 goto insert_fail;
1358 }
1359
1360 /*
1361 * In either case, we increment softlockcnt on the 'real' segment.
1362 */
1363 sptd->spt_pcachecnt++;
1364 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1365
1366 /*
1367 * We can now drop the sptd->spt_lock since the ppa[]
1368 * exists and he have incremented pacachecnt.
1369 */
1370 mutex_exit(&sptd->spt_lock);
1371
1372 /*
1373 * Since we cache the entire segment, we want to
1374 * set ppp to point to the first slot that corresponds
1375 * to the requested addr, i.e. page_index.
1376 */
1377 *ppp = &(sptd->spt_ppa[page_index]);
1378 return (0);
1379
1380 insert_fail:
1381 /*
1382 * We will only reach this code if we tried and failed.
1383 *
1384 * And we can drop the lock on the dummy seg, once we've failed
1385 * to set up a new ppa[].
1386 */
1387 mutex_exit(&sptd->spt_lock);
1388
1389 if (pl_built) {
1390 /*
1391 * We created pl and we need to destroy it.
1392 */
1393 pplist = pl;
1394 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1395 while (np) {
1396 page_unlock(*pplist);
1397 np--;
1398 pplist++;
1399 }
1400 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1401 }
1402 if (shmd->shm_softlockcnt <= 0) {
1403 if (AS_ISUNMAPWAIT(seg->s_as)) {
1404 mutex_enter(&seg->s_as->a_contents);
1405 if (AS_ISUNMAPWAIT(seg->s_as)) {
1406 AS_CLRUNMAPWAIT(seg->s_as);
1407 cv_broadcast(&seg->s_as->a_cv);
1408 }
1409 mutex_exit(&seg->s_as->a_contents);
1410 }
1411 }
1412 *ppp = NULL;
1413 return (ret);
1414 }
1415
1416 /*
1417 * purge any cached pages in the I/O page cache
1418 */
1419 static void
1420 segspt_purge(struct seg *seg)
1421 {
1422 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1423 }
1424
1425 static int
1426 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1427 enum seg_rw rw, int async)
1428 {
1429 struct seg *seg = (struct seg *)ptag;
1430 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1431 struct seg *sptseg;
1432 struct spt_data *sptd;
1433 pgcnt_t npages, i, free_availrmem = 0;
1434 int done = 0;
1435
1436 #ifdef lint
1437 addr = addr;
1438 #endif
1439 sptseg = shmd->shm_sptseg;
1440 sptd = sptseg->s_data;
1441 npages = (len >> PAGESHIFT);
1442 ASSERT(npages);
1443 ASSERT(sptd->spt_pcachecnt != 0);
1444 ASSERT(sptd->spt_ppa == pplist);
1445 ASSERT(npages == btopr(sptd->spt_amp->size));
1446 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1447
1448 /*
1449 * Acquire the lock on the dummy seg and destroy the
1450 * ppa array IF this is the last pcachecnt.
1451 */
1452 mutex_enter(&sptd->spt_lock);
1453 if (--sptd->spt_pcachecnt == 0) {
1454 for (i = 0; i < npages; i++) {
1455 if (pplist[i] == NULL) {
1456 continue;
1457 }
1458 if (rw == S_WRITE) {
1459 hat_setrefmod(pplist[i]);
1460 } else {
1461 hat_setref(pplist[i]);
1462 }
1463 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1464 (sptd->spt_ppa_lckcnt[i] == 0))
1465 free_availrmem++;
1466 page_unlock(pplist[i]);
1467 }
1468 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1469 mutex_enter(&freemem_lock);
1470 availrmem += free_availrmem;
1471 mutex_exit(&freemem_lock);
1472 }
1473 /*
1474 * Since we want to cach/uncache the entire ISM segment,
1475 * we will track the pplist in a segspt specific field
1476 * ppa, that is initialized at the time we add an entry to
1477 * the cache.
1478 */
1479 ASSERT(sptd->spt_pcachecnt == 0);
1480 kmem_free(pplist, sizeof (page_t *) * npages);
1481 sptd->spt_ppa = NULL;
1482 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1483 sptd->spt_gen++;
1484 cv_broadcast(&sptd->spt_cv);
1485 done = 1;
1486 }
1487 mutex_exit(&sptd->spt_lock);
1488
1489 /*
1490 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1491 * may not hold AS lock (in this case async argument is not 0). This
1492 * means if softlockcnt drops to 0 after the decrement below address
1493 * space may get freed. We can't allow it since after softlock
1494 * derement to 0 we still need to access as structure for possible
1495 * wakeup of unmap waiters. To prevent the disappearance of as we take
1496 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1497 * this mutex as a barrier to make sure this routine completes before
1498 * segment is freed.
1499 *
1500 * The second complication we have to deal with in async case is a
1501 * possibility of missed wake up of unmap wait thread. When we don't
1502 * hold as lock here we may take a_contents lock before unmap wait
1503 * thread that was first to see softlockcnt was still not 0. As a
1504 * result we'll fail to wake up an unmap wait thread. To avoid this
1505 * race we set nounmapwait flag in as structure if we drop softlockcnt
1506 * to 0 if async is not 0. unmapwait thread
1507 * will not block if this flag is set.
1508 */
1509 if (async)
1510 mutex_enter(&shmd->shm_segfree_syncmtx);
1511
1512 /*
1513 * Now decrement softlockcnt.
1514 */
1515 ASSERT(shmd->shm_softlockcnt > 0);
1516 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1517
1518 if (shmd->shm_softlockcnt <= 0) {
1519 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1520 mutex_enter(&seg->s_as->a_contents);
1521 if (async)
1522 AS_SETNOUNMAPWAIT(seg->s_as);
1523 if (AS_ISUNMAPWAIT(seg->s_as)) {
1524 AS_CLRUNMAPWAIT(seg->s_as);
1525 cv_broadcast(&seg->s_as->a_cv);
1526 }
1527 mutex_exit(&seg->s_as->a_contents);
1528 }
1529 }
1530
1531 if (async)
1532 mutex_exit(&shmd->shm_segfree_syncmtx);
1533
1534 return (done);
1535 }
1536
1537 /*
1538 * Do a F_SOFTUNLOCK call over the range requested.
1539 * The range must have already been F_SOFTLOCK'ed.
1540 *
1541 * The calls to acquire and release the anon map lock mutex were
1542 * removed in order to avoid a deadly embrace during a DR
1543 * memory delete operation. (Eg. DR blocks while waiting for a
1544 * exclusive lock on a page that is being used for kaio; the
1545 * thread that will complete the kaio and call segspt_softunlock
1546 * blocks on the anon map lock; another thread holding the anon
1547 * map lock blocks on another page lock via the segspt_shmfault
1548 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1549 *
1550 * The appropriateness of the removal is based upon the following:
1551 * 1. If we are holding a segment's reader lock and the page is held
1552 * shared, then the corresponding element in anonmap which points to
1553 * anon struct cannot change and there is no need to acquire the
1554 * anonymous map lock.
1555 * 2. Threads in segspt_softunlock have a reader lock on the segment
1556 * and already have the shared page lock, so we are guaranteed that
1557 * the anon map slot cannot change and therefore can call anon_get_ptr()
1558 * without grabbing the anonymous map lock.
1559 * 3. Threads that softlock a shared page break copy-on-write, even if
1560 * its a read. Thus cow faults can be ignored with respect to soft
1561 * unlocking, since the breaking of cow means that the anon slot(s) will
1562 * not be shared.
1563 */
1564 static void
1565 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1566 size_t len, enum seg_rw rw)
1567 {
1568 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1569 struct seg *sptseg;
1570 struct spt_data *sptd;
1571 page_t *pp;
1572 caddr_t adr;
1573 struct vnode *vp;
1574 u_offset_t offset;
1575 ulong_t anon_index;
1576 struct anon_map *amp; /* XXX - for locknest */
1577 struct anon *ap = NULL;
1578 pgcnt_t npages;
1579
1580 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1581
1582 sptseg = shmd->shm_sptseg;
1583 sptd = sptseg->s_data;
1584
1585 /*
1586 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1587 * and therefore their pages are SE_SHARED locked
1588 * for the entire life of the segment.
1589 */
1590 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1591 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1592 goto softlock_decrement;
1593 }
1594
1595 /*
1596 * Any thread is free to do a page_find and
1597 * page_unlock() on the pages within this seg.
1598 *
1599 * We are already holding the as->a_lock on the user's
1600 * real segment, but we need to hold the a_lock on the
1601 * underlying dummy as. This is mostly to satisfy the
1602 * underlying HAT layer.
1603 */
1604 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1605 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1606 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1607
1608 amp = sptd->spt_amp;
1609 ASSERT(amp != NULL);
1610 anon_index = seg_page(sptseg, sptseg_addr);
1611
1612 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1613 ap = anon_get_ptr(amp->ahp, anon_index++);
1614 ASSERT(ap != NULL);
1615 swap_xlate(ap, &vp, &offset);
1616
1617 /*
1618 * Use page_find() instead of page_lookup() to
1619 * find the page since we know that it has a
1620 * "shared" lock.
1621 */
1622 pp = page_find(vp, offset);
1623 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1624 if (pp == NULL) {
1625 panic("segspt_softunlock: "
1626 "addr %p, ap %p, vp %p, off %llx",
1627 (void *)adr, (void *)ap, (void *)vp, offset);
1628 /*NOTREACHED*/
1629 }
1630
1631 if (rw == S_WRITE) {
1632 hat_setrefmod(pp);
1633 } else if (rw != S_OTHER) {
1634 hat_setref(pp);
1635 }
1636 page_unlock(pp);
1637 }
1638
1639 softlock_decrement:
1640 npages = btopr(len);
1641 ASSERT(shmd->shm_softlockcnt >= npages);
1642 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1643 if (shmd->shm_softlockcnt == 0) {
1644 /*
1645 * All SOFTLOCKS are gone. Wakeup any waiting
1646 * unmappers so they can try again to unmap.
1647 * Check for waiters first without the mutex
1648 * held so we don't always grab the mutex on
1649 * softunlocks.
1650 */
1651 if (AS_ISUNMAPWAIT(seg->s_as)) {
1652 mutex_enter(&seg->s_as->a_contents);
1653 if (AS_ISUNMAPWAIT(seg->s_as)) {
1654 AS_CLRUNMAPWAIT(seg->s_as);
1655 cv_broadcast(&seg->s_as->a_cv);
1656 }
1657 mutex_exit(&seg->s_as->a_contents);
1658 }
1659 }
1660 }
1661
1662 int
1663 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1664 {
1665 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1666 struct shm_data *shmd;
1667 struct anon_map *shm_amp = shmd_arg->shm_amp;
1668 struct spt_data *sptd;
1669 int error = 0;
1670
1671 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1672
1673 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1674 if (shmd == NULL)
1675 return (ENOMEM);
1676
1677 shmd->shm_sptas = shmd_arg->shm_sptas;
1678 shmd->shm_amp = shm_amp;
1679 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1680
1681 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1682 NULL, 0, seg->s_size);
1683
1684 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1685
1686 seg->s_data = (void *)shmd;
1687 seg->s_ops = &segspt_shmops;
1688 seg->s_szc = shmd->shm_sptseg->s_szc;
1689 sptd = shmd->shm_sptseg->s_data;
1690
1691 if (sptd->spt_flags & SHM_PAGEABLE) {
1692 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1693 KM_NOSLEEP)) == NULL) {
1694 seg->s_data = (void *)NULL;
1695 kmem_free(shmd, (sizeof (*shmd)));
1696 return (ENOMEM);
1697 }
1698 shmd->shm_lckpgs = 0;
1699 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1700 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1701 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1702 seg->s_size, seg->s_szc)) != 0) {
1703 kmem_free(shmd->shm_vpage,
1704 btopr(shm_amp->size));
1705 }
1706 }
1707 } else {
1708 error = hat_share(seg->s_as->a_hat, seg->s_base,
1709 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1710 seg->s_size, seg->s_szc);
1711 }
1712 if (error) {
1713 seg->s_szc = 0;
1714 seg->s_data = (void *)NULL;
1715 kmem_free(shmd, (sizeof (*shmd)));
1716 } else {
1717 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1718 shm_amp->refcnt++;
1719 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1720 }
1721 return (error);
1722 }
1723
1724 int
1725 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1726 {
1727 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1728 int reclaim = 1;
1729
1730 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1731 retry:
1732 if (shmd->shm_softlockcnt > 0) {
1733 if (reclaim == 1) {
1734 segspt_purge(seg);
1735 reclaim = 0;
1736 goto retry;
1737 }
1738 return (EAGAIN);
1739 }
1740
1741 if (ssize != seg->s_size) {
1742 #ifdef DEBUG
1743 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1744 ssize, seg->s_size);
1745 #endif
1746 return (EINVAL);
1747 }
1748
1749 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1750 NULL, 0);
1751 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1752
1753 seg_free(seg);
1754
1755 return (0);
1756 }
1757
1758 void
1759 segspt_shmfree(struct seg *seg)
1760 {
1761 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1762 struct anon_map *shm_amp = shmd->shm_amp;
1763
1764 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1765
1766 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1767 MC_UNLOCK, NULL, 0);
1768
1769 /*
1770 * Need to increment refcnt when attaching
1771 * and decrement when detaching because of dup().
1772 */
1773 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1774 shm_amp->refcnt--;
1775 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1776
1777 if (shmd->shm_vpage) { /* only for DISM */
1778 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1779 shmd->shm_vpage = NULL;
1780 }
1781
1782 /*
1783 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1784 * still working with this segment without holding as lock.
1785 */
1786 ASSERT(shmd->shm_softlockcnt == 0);
1787 mutex_enter(&shmd->shm_segfree_syncmtx);
1788 mutex_destroy(&shmd->shm_segfree_syncmtx);
1789
1790 kmem_free(shmd, sizeof (*shmd));
1791 }
1792
1793 /*ARGSUSED*/
1794 int
1795 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1796 {
1797 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1798
1799 /*
1800 * Shared page table is more than shared mapping.
1801 * Individual process sharing page tables can't change prot
1802 * because there is only one set of page tables.
1803 * This will be allowed after private page table is
1804 * supported.
1805 */
1806 /* need to return correct status error? */
1807 return (0);
1808 }
1809
1810
1811 faultcode_t
1812 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1813 size_t len, enum fault_type type, enum seg_rw rw)
1814 {
1815 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1816 struct seg *sptseg = shmd->shm_sptseg;
1817 struct as *curspt = shmd->shm_sptas;
1818 struct spt_data *sptd = sptseg->s_data;
1819 pgcnt_t npages;
1820 size_t size;
1821 caddr_t segspt_addr, shm_addr;
1822 page_t **ppa;
1823 int i;
1824 ulong_t an_idx = 0;
1825 int err = 0;
1826 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1827 size_t pgsz;
1828 pgcnt_t pgcnt;
1829 caddr_t a;
1830 pgcnt_t pidx;
1831
1832 #ifdef lint
1833 hat = hat;
1834 #endif
1835 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1836
1837 /*
1838 * Because of the way spt is implemented
1839 * the realsize of the segment does not have to be
1840 * equal to the segment size itself. The segment size is
1841 * often in multiples of a page size larger than PAGESIZE.
1842 * The realsize is rounded up to the nearest PAGESIZE
1843 * based on what the user requested. This is a bit of
1844 * ungliness that is historical but not easily fixed
1845 * without re-designing the higher levels of ISM.
1846 */
1847 ASSERT(addr >= seg->s_base);
1848 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1849 return (FC_NOMAP);
1850 /*
1851 * For all of the following cases except F_PROT, we need to
1852 * make any necessary adjustments to addr and len
1853 * and get all of the necessary page_t's into an array called ppa[].
1854 *
1855 * The code in shmat() forces base addr and len of ISM segment
1856 * to be aligned to largest page size supported. Therefore,
1857 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1858 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1859 * in large pagesize chunks, or else we will screw up the HAT
1860 * layer by calling hat_memload_array() with differing page sizes
1861 * over a given virtual range.
1862 */
1863 pgsz = page_get_pagesize(sptseg->s_szc);
1864 pgcnt = page_get_pagecnt(sptseg->s_szc);
1865 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1866 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1867 npages = btopr(size);
1868
1869 /*
1870 * Now we need to convert from addr in segshm to addr in segspt.
1871 */
1872 an_idx = seg_page(seg, shm_addr);
1873 segspt_addr = sptseg->s_base + ptob(an_idx);
1874
1875 ASSERT((segspt_addr + ptob(npages)) <=
1876 (sptseg->s_base + sptd->spt_realsize));
1877 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1878
1879 switch (type) {
1880
1881 case F_SOFTLOCK:
1882
1883 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1884 /*
1885 * Fall through to the F_INVAL case to load up the hat layer
1886 * entries with the HAT_LOAD_LOCK flag.
1887 */
1888 /* FALLTHRU */
1889 case F_INVAL:
1890
1891 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1892 return (FC_NOMAP);
1893
1894 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1895
1896 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1897 if (err != 0) {
1898 if (type == F_SOFTLOCK) {
1899 atomic_add_long((ulong_t *)(
1900 &(shmd->shm_softlockcnt)), -npages);
1901 }
1902 goto dism_err;
1903 }
1904 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1905 a = segspt_addr;
1906 pidx = 0;
1907 if (type == F_SOFTLOCK) {
1908
1909 /*
1910 * Load up the translation keeping it
1911 * locked and don't unlock the page.
1912 */
1913 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1914 hat_memload_array(sptseg->s_as->a_hat,
1915 a, pgsz, &ppa[pidx], sptd->spt_prot,
1916 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1917 }
1918 } else {
1919 if (hat == seg->s_as->a_hat) {
1920
1921 /*
1922 * Migrate pages marked for migration
1923 */
1924 if (lgrp_optimizations())
1925 page_migrate(seg, shm_addr, ppa,
1926 npages);
1927
1928 /* CPU HAT */
1929 for (; pidx < npages;
1930 a += pgsz, pidx += pgcnt) {
1931 hat_memload_array(sptseg->s_as->a_hat,
1932 a, pgsz, &ppa[pidx],
1933 sptd->spt_prot,
1934 HAT_LOAD_SHARE);
1935 }
1936 } else {
1937 /* XHAT. Pass real address */
1938 hat_memload_array(hat, shm_addr,
1939 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1940 }
1941
1942 /*
1943 * And now drop the SE_SHARED lock(s).
1944 */
1945 if (dyn_ism_unmap) {
1946 for (i = 0; i < npages; i++) {
1947 page_unlock(ppa[i]);
1948 }
1949 }
1950 }
1951
1952 if (!dyn_ism_unmap) {
1953 if (hat_share(seg->s_as->a_hat, shm_addr,
1954 curspt->a_hat, segspt_addr, ptob(npages),
1955 seg->s_szc) != 0) {
1956 panic("hat_share err in DISM fault");
1957 /* NOTREACHED */
1958 }
1959 if (type == F_INVAL) {
1960 for (i = 0; i < npages; i++) {
1961 page_unlock(ppa[i]);
1962 }
1963 }
1964 }
1965 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1966 dism_err:
1967 kmem_free(ppa, npages * sizeof (page_t *));
1968 return (err);
1969
1970 case F_SOFTUNLOCK:
1971
1972 /*
1973 * This is a bit ugly, we pass in the real seg pointer,
1974 * but the segspt_addr is the virtual address within the
1975 * dummy seg.
1976 */
1977 segspt_softunlock(seg, segspt_addr, size, rw);
1978 return (0);
1979
1980 case F_PROT:
1981
1982 /*
1983 * This takes care of the unusual case where a user
1984 * allocates a stack in shared memory and a register
1985 * window overflow is written to that stack page before
1986 * it is otherwise modified.
1987 *
1988 * We can get away with this because ISM segments are
1989 * always rw. Other than this unusual case, there
1990 * should be no instances of protection violations.
1991 */
1992 return (0);
1993
1994 default:
1995 #ifdef DEBUG
1996 panic("segspt_dismfault default type?");
1997 #else
1998 return (FC_NOMAP);
1999 #endif
2000 }
2001 }
2002
2003
2004 faultcode_t
2005 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2006 size_t len, enum fault_type type, enum seg_rw rw)
2007 {
2008 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2009 struct seg *sptseg = shmd->shm_sptseg;
2010 struct as *curspt = shmd->shm_sptas;
2011 struct spt_data *sptd = sptseg->s_data;
2012 pgcnt_t npages;
2013 size_t size;
2014 caddr_t sptseg_addr, shm_addr;
2015 page_t *pp, **ppa;
2016 int i;
2017 u_offset_t offset;
2018 ulong_t anon_index = 0;
2019 struct vnode *vp;
2020 struct anon_map *amp; /* XXX - for locknest */
2021 struct anon *ap = NULL;
2022 size_t pgsz;
2023 pgcnt_t pgcnt;
2024 caddr_t a;
2025 pgcnt_t pidx;
2026 size_t sz;
2027
2028 #ifdef lint
2029 hat = hat;
2030 #endif
2031
2032 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2033
2034 if (sptd->spt_flags & SHM_PAGEABLE) {
2035 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2036 }
2037
2038 /*
2039 * Because of the way spt is implemented
2040 * the realsize of the segment does not have to be
2041 * equal to the segment size itself. The segment size is
2042 * often in multiples of a page size larger than PAGESIZE.
2043 * The realsize is rounded up to the nearest PAGESIZE
2044 * based on what the user requested. This is a bit of
2045 * ungliness that is historical but not easily fixed
2046 * without re-designing the higher levels of ISM.
2047 */
2048 ASSERT(addr >= seg->s_base);
2049 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2050 return (FC_NOMAP);
2051 /*
2052 * For all of the following cases except F_PROT, we need to
2053 * make any necessary adjustments to addr and len
2054 * and get all of the necessary page_t's into an array called ppa[].
2055 *
2056 * The code in shmat() forces base addr and len of ISM segment
2057 * to be aligned to largest page size supported. Therefore,
2058 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2059 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2060 * in large pagesize chunks, or else we will screw up the HAT
2061 * layer by calling hat_memload_array() with differing page sizes
2062 * over a given virtual range.
2063 */
2064 pgsz = page_get_pagesize(sptseg->s_szc);
2065 pgcnt = page_get_pagecnt(sptseg->s_szc);
2066 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2067 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2068 npages = btopr(size);
2069
2070 /*
2071 * Now we need to convert from addr in segshm to addr in segspt.
2072 */
2073 anon_index = seg_page(seg, shm_addr);
2074 sptseg_addr = sptseg->s_base + ptob(anon_index);
2075
2076 /*
2077 * And now we may have to adjust npages downward if we have
2078 * exceeded the realsize of the segment or initial anon
2079 * allocations.
2080 */
2081 if ((sptseg_addr + ptob(npages)) >
2082 (sptseg->s_base + sptd->spt_realsize))
2083 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2084
2085 npages = btopr(size);
2086
2087 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2088 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2089
2090 switch (type) {
2091
2092 case F_SOFTLOCK:
2093
2094 /*
2095 * availrmem is decremented once during anon_swap_adjust()
2096 * and is incremented during the anon_unresv(), which is
2097 * called from shm_rm_amp() when the segment is destroyed.
2098 */
2099 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2100 /*
2101 * Some platforms assume that ISM pages are SE_SHARED
2102 * locked for the entire life of the segment.
2103 */
2104 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2105 return (0);
2106 /*
2107 * Fall through to the F_INVAL case to load up the hat layer
2108 * entries with the HAT_LOAD_LOCK flag.
2109 */
2110
2111 /* FALLTHRU */
2112 case F_INVAL:
2113
2114 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2115 return (FC_NOMAP);
2116
2117 /*
2118 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2119 * may still rely on this call to hat_share(). That
2120 * would imply that those hat's can fault on a
2121 * HAT_LOAD_LOCK translation, which would seem
2122 * contradictory.
2123 */
2124 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2125 if (hat_share(seg->s_as->a_hat, seg->s_base,
2126 curspt->a_hat, sptseg->s_base,
2127 sptseg->s_size, sptseg->s_szc) != 0) {
2128 panic("hat_share error in ISM fault");
2129 /*NOTREACHED*/
2130 }
2131 return (0);
2132 }
2133 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2134
2135 /*
2136 * I see no need to lock the real seg,
2137 * here, because all of our work will be on the underlying
2138 * dummy seg.
2139 *
2140 * sptseg_addr and npages now account for large pages.
2141 */
2142 amp = sptd->spt_amp;
2143 ASSERT(amp != NULL);
2144 anon_index = seg_page(sptseg, sptseg_addr);
2145
2146 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2147 for (i = 0; i < npages; i++) {
2148 ap = anon_get_ptr(amp->ahp, anon_index++);
2149 ASSERT(ap != NULL);
2150 swap_xlate(ap, &vp, &offset);
2151 pp = page_lookup(vp, offset, SE_SHARED);
2152 ASSERT(pp != NULL);
2153 ppa[i] = pp;
2154 }
2155 ANON_LOCK_EXIT(&->a_rwlock);
2156 ASSERT(i == npages);
2157
2158 /*
2159 * We are already holding the as->a_lock on the user's
2160 * real segment, but we need to hold the a_lock on the
2161 * underlying dummy as. This is mostly to satisfy the
2162 * underlying HAT layer.
2163 */
2164 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2165 a = sptseg_addr;
2166 pidx = 0;
2167 if (type == F_SOFTLOCK) {
2168 /*
2169 * Load up the translation keeping it
2170 * locked and don't unlock the page.
2171 */
2172 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2173 sz = MIN(pgsz, ptob(npages - pidx));
2174 hat_memload_array(sptseg->s_as->a_hat, a,
2175 sz, &ppa[pidx], sptd->spt_prot,
2176 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2177 }
2178 } else {
2179 if (hat == seg->s_as->a_hat) {
2180
2181 /*
2182 * Migrate pages marked for migration.
2183 */
2184 if (lgrp_optimizations())
2185 page_migrate(seg, shm_addr, ppa,
2186 npages);
2187
2188 /* CPU HAT */
2189 for (; pidx < npages;
2190 a += pgsz, pidx += pgcnt) {
2191 sz = MIN(pgsz, ptob(npages - pidx));
2192 hat_memload_array(sptseg->s_as->a_hat,
2193 a, sz, &ppa[pidx],
2194 sptd->spt_prot, HAT_LOAD_SHARE);
2195 }
2196 } else {
2197 /* XHAT. Pass real address */
2198 hat_memload_array(hat, shm_addr,
2199 ptob(npages), ppa, sptd->spt_prot,
2200 HAT_LOAD_SHARE);
2201 }
2202
2203 /*
2204 * And now drop the SE_SHARED lock(s).
2205 */
2206 for (i = 0; i < npages; i++)
2207 page_unlock(ppa[i]);
2208 }
2209 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2210
2211 kmem_free(ppa, sizeof (page_t *) * npages);
2212 return (0);
2213 case F_SOFTUNLOCK:
2214
2215 /*
2216 * This is a bit ugly, we pass in the real seg pointer,
2217 * but the sptseg_addr is the virtual address within the
2218 * dummy seg.
2219 */
2220 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2221 return (0);
2222
2223 case F_PROT:
2224
2225 /*
2226 * This takes care of the unusual case where a user
2227 * allocates a stack in shared memory and a register
2228 * window overflow is written to that stack page before
2229 * it is otherwise modified.
2230 *
2231 * We can get away with this because ISM segments are
2232 * always rw. Other than this unusual case, there
2233 * should be no instances of protection violations.
2234 */
2235 return (0);
2236
2237 default:
2238 #ifdef DEBUG
2239 cmn_err(CE_WARN, "segspt_shmfault default type?");
2240 #endif
2241 return (FC_NOMAP);
2242 }
2243 }
2244
2245 /*ARGSUSED*/
2246 static faultcode_t
2247 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2248 {
2249 return (0);
2250 }
2251
2252 /*ARGSUSED*/
2253 static int
2254 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2255 {
2256 return (0);
2257 }
2258
2259 /*ARGSUSED*/
2260 static size_t
2261 segspt_shmswapout(struct seg *seg)
2262 {
2263 return (0);
2264 }
2265
2266 /*
2267 * duplicate the shared page tables
2268 */
2269 int
2270 segspt_shmdup(struct seg *seg, struct seg *newseg)
2271 {
2272 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2273 struct anon_map *amp = shmd->shm_amp;
2274 struct shm_data *shmd_new;
2275 struct seg *spt_seg = shmd->shm_sptseg;
2276 struct spt_data *sptd = spt_seg->s_data;
2277 int error = 0;
2278
2279 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2280
2281 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2282 newseg->s_data = (void *)shmd_new;
2283 shmd_new->shm_sptas = shmd->shm_sptas;
2284 shmd_new->shm_amp = amp;
2285 shmd_new->shm_sptseg = shmd->shm_sptseg;
2286 newseg->s_ops = &segspt_shmops;
2287 newseg->s_szc = seg->s_szc;
2288 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2289
2290 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2291 amp->refcnt++;
2292 ANON_LOCK_EXIT(&->a_rwlock);
2293
2294 if (sptd->spt_flags & SHM_PAGEABLE) {
2295 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2296 shmd_new->shm_lckpgs = 0;
2297 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2298 if ((error = hat_share(newseg->s_as->a_hat,
2299 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2300 seg->s_size, seg->s_szc)) != 0) {
2301 kmem_free(shmd_new->shm_vpage,
2302 btopr(amp->size));
2303 }
2304 }
2305 return (error);
2306 } else {
2307 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2308 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2309 seg->s_szc));
2310
2311 }
2312 }
2313
2314 /*ARGSUSED*/
2315 int
2316 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2317 {
2318 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2319 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2320
2321 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2322
2323 /*
2324 * ISM segment is always rw.
2325 */
2326 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2327 }
2328
2329 /*
2330 * Return an array of locked large pages, for empty slots allocate
2331 * private zero-filled anon pages.
2332 */
2333 static int
2334 spt_anon_getpages(
2335 struct seg *sptseg,
2336 caddr_t sptaddr,
2337 size_t len,
2338 page_t *ppa[])
2339 {
2340 struct spt_data *sptd = sptseg->s_data;
2341 struct anon_map *amp = sptd->spt_amp;
2342 enum seg_rw rw = sptd->spt_prot;
2343 uint_t szc = sptseg->s_szc;
2344 size_t pg_sz, share_sz = page_get_pagesize(szc);
2345 pgcnt_t lp_npgs;
2346 caddr_t lp_addr, e_sptaddr;
2347 uint_t vpprot, ppa_szc = 0;
2348 struct vpage *vpage = NULL;
2349 ulong_t j, ppa_idx;
2350 int err, ierr = 0;
2351 pgcnt_t an_idx;
2352 anon_sync_obj_t cookie;
2353 int anon_locked = 0;
2354 pgcnt_t amp_pgs;
2355
2356
2357 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2358 ASSERT(len != 0);
2359
2360 pg_sz = share_sz;
2361 lp_npgs = btop(pg_sz);
2362 lp_addr = sptaddr;
2363 e_sptaddr = sptaddr + len;
2364 an_idx = seg_page(sptseg, sptaddr);
2365 ppa_idx = 0;
2366
2367 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2368
2369 amp_pgs = page_get_pagecnt(amp->a_szc);
2370
2371 /*CONSTCOND*/
2372 while (1) {
2373 for (; lp_addr < e_sptaddr;
2374 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2375
2376 /*
2377 * If we're currently locked, and we get to a new
2378 * page, unlock our current anon chunk.
2379 */
2380 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2381 anon_array_exit(&cookie);
2382 anon_locked = 0;
2383 }
2384 if (!anon_locked) {
2385 anon_array_enter(amp, an_idx, &cookie);
2386 anon_locked = 1;
2387 }
2388 ppa_szc = (uint_t)-1;
2389 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2390 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2391 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2392
2393 if (ierr != 0) {
2394 if (ierr > 0) {
2395 err = FC_MAKE_ERR(ierr);
2396 goto lpgs_err;
2397 }
2398 break;
2399 }
2400 }
2401 if (lp_addr == e_sptaddr) {
2402 break;
2403 }
2404 ASSERT(lp_addr < e_sptaddr);
2405
2406 /*
2407 * ierr == -1 means we failed to allocate a large page.
2408 * so do a size down operation.
2409 *
2410 * ierr == -2 means some other process that privately shares
2411 * pages with this process has allocated a larger page and we
2412 * need to retry with larger pages. So do a size up
2413 * operation. This relies on the fact that large pages are
2414 * never partially shared i.e. if we share any constituent
2415 * page of a large page with another process we must share the
2416 * entire large page. Note this cannot happen for SOFTLOCK
2417 * case, unless current address (lpaddr) is at the beginning
2418 * of the next page size boundary because the other process
2419 * couldn't have relocated locked pages.
2420 */
2421 ASSERT(ierr == -1 || ierr == -2);
2422 if (segvn_anypgsz) {
2423 ASSERT(ierr == -2 || szc != 0);
2424 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2425 szc = (ierr == -1) ? szc - 1 : szc + 1;
2426 } else {
2427 /*
2428 * For faults and segvn_anypgsz == 0
2429 * we need to be careful not to loop forever
2430 * if existing page is found with szc other
2431 * than 0 or seg->s_szc. This could be due
2432 * to page relocations on behalf of DR or
2433 * more likely large page creation. For this
2434 * case simply re-size to existing page's szc
2435 * if returned by anon_map_getpages().
2436 */
2437 if (ppa_szc == (uint_t)-1) {
2438 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2439 } else {
2440 ASSERT(ppa_szc <= sptseg->s_szc);
2441 ASSERT(ierr == -2 || ppa_szc < szc);
2442 ASSERT(ierr == -1 || ppa_szc > szc);
2443 szc = ppa_szc;
2444 }
2445 }
2446 pg_sz = page_get_pagesize(szc);
2447 lp_npgs = btop(pg_sz);
2448 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2449 }
2450 if (anon_locked) {
2451 anon_array_exit(&cookie);
2452 }
2453 ANON_LOCK_EXIT(&->a_rwlock);
2454 return (0);
2455
2456 lpgs_err:
2457 if (anon_locked) {
2458 anon_array_exit(&cookie);
2459 }
2460 ANON_LOCK_EXIT(&->a_rwlock);
2461 for (j = 0; j < ppa_idx; j++)
2462 page_unlock(ppa[j]);
2463 return (err);
2464 }
2465
2466 /*
2467 * count the number of bytes in a set of spt pages that are currently not
2468 * locked
2469 */
2470 static rctl_qty_t
2471 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2472 {
2473 ulong_t i;
2474 rctl_qty_t unlocked = 0;
2475
2476 for (i = 0; i < npages; i++) {
2477 if (ppa[i]->p_lckcnt == 0)
2478 unlocked += PAGESIZE;
2479 }
2480 return (unlocked);
2481 }
2482
2483 extern u_longlong_t randtick(void);
2484 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2485 #define NLCK (NCPU_P2)
2486 /* Random number with a range [0, n-1], n must be power of two */
2487 #define RAND_P2(n) \
2488 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2489
2490 int
2491 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2492 page_t **ppa, ulong_t *lockmap, size_t pos,
2493 rctl_qty_t *locked)
2494 {
2495 struct shm_data *shmd = seg->s_data;
2496 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2497 ulong_t i;
2498 int kernel;
2499 pgcnt_t nlck = 0;
2500 int rv = 0;
2501 int use_reserved = 1;
2502
2503 /* return the number of bytes actually locked */
2504 *locked = 0;
2505
2506 /*
2507 * To avoid contention on freemem_lock, availrmem and pages_locked
2508 * global counters are updated only every nlck locked pages instead of
2509 * every time. Reserve nlck locks up front and deduct from this
2510 * reservation for each page that requires a lock. When the reservation
2511 * is consumed, reserve again. nlck is randomized, so the competing
2512 * threads do not fall into a cyclic lock contention pattern. When
2513 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2514 * is used to lock pages.
2515 */
2516 for (i = 0; i < npages; anon_index++, pos++, i++) {
2517 if (nlck == 0 && use_reserved == 1) {
2518 nlck = NLCK + RAND_P2(NLCK);
2519 /* if fewer loops left, decrease nlck */
2520 nlck = MIN(nlck, npages - i);
2521 /*
2522 * Reserve nlck locks up front and deduct from this
2523 * reservation for each page that requires a lock. When
2524 * the reservation is consumed, reserve again.
2525 */
2526 mutex_enter(&freemem_lock);
2527 if ((availrmem - nlck) < pages_pp_maximum) {
2528 /* Do not do advance memory reserves */
2529 use_reserved = 0;
2530 } else {
2531 availrmem -= nlck;
2532 pages_locked += nlck;
2533 }
2534 mutex_exit(&freemem_lock);
2535 }
2536 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2537 if (sptd->spt_ppa_lckcnt[anon_index] <
2538 (ushort_t)DISM_LOCK_MAX) {
2539 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2540 (ushort_t)DISM_LOCK_MAX) {
2541 cmn_err(CE_WARN,
2542 "DISM page lock limit "
2543 "reached on DISM offset 0x%lx\n",
2544 anon_index << PAGESHIFT);
2545 }
2546 kernel = (sptd->spt_ppa &&
2547 sptd->spt_ppa[anon_index]);
2548 if (!page_pp_lock(ppa[i], 0, kernel ||
2549 use_reserved)) {
2550 sptd->spt_ppa_lckcnt[anon_index]--;
2551 rv = EAGAIN;
2552 break;
2553 }
2554 /* if this is a newly locked page, count it */
2555 if (ppa[i]->p_lckcnt == 1) {
2556 if (kernel == 0 && use_reserved == 1)
2557 nlck--;
2558 *locked += PAGESIZE;
2559 }
2560 shmd->shm_lckpgs++;
2561 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2562 if (lockmap != NULL)
2563 BT_SET(lockmap, pos);
2564 }
2565 }
2566 }
2567 /* Return unused lock reservation */
2568 if (nlck != 0 && use_reserved == 1) {
2569 mutex_enter(&freemem_lock);
2570 availrmem += nlck;
2571 pages_locked -= nlck;
2572 mutex_exit(&freemem_lock);
2573 }
2574
2575 return (rv);
2576 }
2577
2578 int
2579 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2580 rctl_qty_t *unlocked)
2581 {
2582 struct shm_data *shmd = seg->s_data;
2583 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2584 struct anon_map *amp = sptd->spt_amp;
2585 struct anon *ap;
2586 struct vnode *vp;
2587 u_offset_t off;
2588 struct page *pp;
2589 int kernel;
2590 anon_sync_obj_t cookie;
2591 ulong_t i;
2592 pgcnt_t nlck = 0;
2593 pgcnt_t nlck_limit = NLCK;
2594
2595 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2596 for (i = 0; i < npages; i++, anon_index++) {
2597 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2598 anon_array_enter(amp, anon_index, &cookie);
2599 ap = anon_get_ptr(amp->ahp, anon_index);
2600 ASSERT(ap);
2601
2602 swap_xlate(ap, &vp, &off);
2603 anon_array_exit(&cookie);
2604 pp = page_lookup(vp, off, SE_SHARED);
2605 ASSERT(pp);
2606 /*
2607 * availrmem is decremented only for pages which are not
2608 * in seg pcache, for pages in seg pcache availrmem was
2609 * decremented in _dismpagelock()
2610 */
2611 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2612 ASSERT(pp->p_lckcnt > 0);
2613
2614 /*
2615 * lock page but do not change availrmem, we do it
2616 * ourselves every nlck loops.
2617 */
2618 page_pp_unlock(pp, 0, 1);
2619 if (pp->p_lckcnt == 0) {
2620 if (kernel == 0)
2621 nlck++;
2622 *unlocked += PAGESIZE;
2623 }
2624 page_unlock(pp);
2625 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2626 sptd->spt_ppa_lckcnt[anon_index]--;
2627 shmd->shm_lckpgs--;
2628 }
2629
2630 /*
2631 * To reduce freemem_lock contention, do not update availrmem
2632 * until at least NLCK pages have been unlocked.
2633 * 1. No need to update if nlck is zero
2634 * 2. Always update if the last iteration
2635 */
2636 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2637 mutex_enter(&freemem_lock);
2638 availrmem += nlck;
2639 pages_locked -= nlck;
2640 mutex_exit(&freemem_lock);
2641 nlck = 0;
2642 nlck_limit = NLCK + RAND_P2(NLCK);
2643 }
2644 }
2645 ANON_LOCK_EXIT(&->a_rwlock);
2646
2647 return (0);
2648 }
2649
2650 /*ARGSUSED*/
2651 static int
2652 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2653 int attr, int op, ulong_t *lockmap, size_t pos)
2654 {
2655 struct shm_data *shmd = seg->s_data;
2656 struct seg *sptseg = shmd->shm_sptseg;
2657 struct spt_data *sptd = sptseg->s_data;
2658 struct kshmid *sp = sptd->spt_amp->a_sp;
2659 pgcnt_t npages, a_npages;
2660 page_t **ppa;
2661 pgcnt_t an_idx, a_an_idx, ppa_idx;
2662 caddr_t spt_addr, a_addr; /* spt and aligned address */
2663 size_t a_len; /* aligned len */
2664 size_t share_sz;
2665 ulong_t i;
2666 int sts = 0;
2667 rctl_qty_t unlocked = 0;
2668 rctl_qty_t locked = 0;
2669 struct proc *p = curproc;
2670 kproject_t *proj;
2671
2672 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2673 ASSERT(sp != NULL);
2674
2675 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2676 return (0);
2677 }
2678
2679 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2680 an_idx = seg_page(seg, addr);
2681 npages = btopr(len);
2682
2683 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2684 return (ENOMEM);
2685 }
2686
2687 /*
2688 * A shm's project never changes, so no lock needed.
2689 * The shm has a hold on the project, so it will not go away.
2690 * Since we have a mapping to shm within this zone, we know
2691 * that the zone will not go away.
2692 */
2693 proj = sp->shm_perm.ipc_proj;
2694
2695 if (op == MC_LOCK) {
2696
2697 /*
2698 * Need to align addr and size request if they are not
2699 * aligned so we can always allocate large page(s) however
2700 * we only lock what was requested in initial request.
2701 */
2702 share_sz = page_get_pagesize(sptseg->s_szc);
2703 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2704 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2705 share_sz);
2706 a_npages = btop(a_len);
2707 a_an_idx = seg_page(seg, a_addr);
2708 spt_addr = sptseg->s_base + ptob(a_an_idx);
2709 ppa_idx = an_idx - a_an_idx;
2710
2711 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2712 KM_NOSLEEP)) == NULL) {
2713 return (ENOMEM);
2714 }
2715
2716 /*
2717 * Don't cache any new pages for IO and
2718 * flush any cached pages.
2719 */
2720 mutex_enter(&sptd->spt_lock);
2721 if (sptd->spt_ppa != NULL)
2722 sptd->spt_flags |= DISM_PPA_CHANGED;
2723
2724 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2725 if (sts != 0) {
2726 mutex_exit(&sptd->spt_lock);
2727 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2728 return (sts);
2729 }
2730
2731 mutex_enter(&sp->shm_mlock);
2732 /* enforce locked memory rctl */
2733 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2734
2735 mutex_enter(&p->p_lock);
2736 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2737 mutex_exit(&p->p_lock);
2738 sts = EAGAIN;
2739 } else {
2740 mutex_exit(&p->p_lock);
2741 sts = spt_lockpages(seg, an_idx, npages,
2742 &ppa[ppa_idx], lockmap, pos, &locked);
2743
2744 /*
2745 * correct locked count if not all pages could be
2746 * locked
2747 */
2748 if ((unlocked - locked) > 0) {
2749 rctl_decr_locked_mem(NULL, proj,
2750 (unlocked - locked), 0);
2751 }
2752 }
2753 /*
2754 * unlock pages
2755 */
2756 for (i = 0; i < a_npages; i++)
2757 page_unlock(ppa[i]);
2758 if (sptd->spt_ppa != NULL)
2759 sptd->spt_flags |= DISM_PPA_CHANGED;
2760 mutex_exit(&sp->shm_mlock);
2761 mutex_exit(&sptd->spt_lock);
2762
2763 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2764
2765 } else if (op == MC_UNLOCK) { /* unlock */
2766 page_t **ppa;
2767
2768 mutex_enter(&sptd->spt_lock);
2769 if (shmd->shm_lckpgs == 0) {
2770 mutex_exit(&sptd->spt_lock);
2771 return (0);
2772 }
2773 /*
2774 * Don't cache new IO pages.
2775 */
2776 if (sptd->spt_ppa != NULL)
2777 sptd->spt_flags |= DISM_PPA_CHANGED;
2778
2779 mutex_enter(&sp->shm_mlock);
2780 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2781 if ((ppa = sptd->spt_ppa) != NULL)
2782 sptd->spt_flags |= DISM_PPA_CHANGED;
2783 mutex_exit(&sptd->spt_lock);
2784
2785 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2786 mutex_exit(&sp->shm_mlock);
2787
2788 if (ppa != NULL)
2789 seg_ppurge_wiredpp(ppa);
2790 }
2791 return (sts);
2792 }
2793
2794 /*ARGSUSED*/
2795 int
2796 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2797 {
2798 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2799 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2800 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2801
2802 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2803
2804 /*
2805 * ISM segment is always rw.
2806 */
2807 while (--pgno >= 0)
2808 *protv++ = sptd->spt_prot;
2809 return (0);
2810 }
2811
2812 /*ARGSUSED*/
2813 u_offset_t
2814 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2815 {
2816 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2817
2818 /* Offset does not matter in ISM memory */
2819
2820 return ((u_offset_t)0);
2821 }
2822
2823 /* ARGSUSED */
2824 int
2825 segspt_shmgettype(struct seg *seg, caddr_t addr)
2826 {
2827 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2828 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2829
2830 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2831
2832 /*
2833 * The shared memory mapping is always MAP_SHARED, SWAP is only
2834 * reserved for DISM
2835 */
2836 return (MAP_SHARED |
2837 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2838 }
2839
2840 /*ARGSUSED*/
2841 int
2842 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2843 {
2844 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2845 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2846
2847 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2848
2849 *vpp = sptd->spt_vp;
2850 return (0);
2851 }
2852
2853 /*
2854 * We need to wait for pending IO to complete to a DISM segment in order for
2855 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2856 * than enough time to wait.
2857 */
2858 static clock_t spt_pcache_wait = 120;
2859
2860 /*ARGSUSED*/
2861 static int
2862 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2863 {
2864 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2865 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2866 struct anon_map *amp;
2867 pgcnt_t pg_idx;
2868 ushort_t gen;
2869 clock_t end_lbolt;
2870 int writer;
2871 page_t **ppa;
2872
2873 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2874
2875 if (behav == MADV_FREE) {
2876 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2877 return (0);
2878
2879 amp = sptd->spt_amp;
2880 pg_idx = seg_page(seg, addr);
2881
2882 mutex_enter(&sptd->spt_lock);
2883 if ((ppa = sptd->spt_ppa) == NULL) {
2884 mutex_exit(&sptd->spt_lock);
2885 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2886 anon_disclaim(amp, pg_idx, len);
2887 ANON_LOCK_EXIT(&->a_rwlock);
2888 return (0);
2889 }
2890
2891 sptd->spt_flags |= DISM_PPA_CHANGED;
2892 gen = sptd->spt_gen;
2893
2894 mutex_exit(&sptd->spt_lock);
2895
2896 /*
2897 * Purge all DISM cached pages
2898 */
2899 seg_ppurge_wiredpp(ppa);
2900
2901 /*
2902 * Drop the AS_LOCK so that other threads can grab it
2903 * in the as_pageunlock path and hopefully get the segment
2904 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2905 * to keep this segment resident.
2906 */
2907 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2908 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2909 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2910
2911 mutex_enter(&sptd->spt_lock);
2912
2913 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2914
2915 /*
2916 * Try to wait for pages to get kicked out of the seg_pcache.
2917 */
2918 while (sptd->spt_gen == gen &&
2919 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2920 ddi_get_lbolt() < end_lbolt) {
2921 if (!cv_timedwait_sig(&sptd->spt_cv,
2922 &sptd->spt_lock, end_lbolt)) {
2923 break;
2924 }
2925 }
2926
2927 mutex_exit(&sptd->spt_lock);
2928
2929 /* Regrab the AS_LOCK and release our hold on the segment */
2930 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2931 writer ? RW_WRITER : RW_READER);
2932 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2933 if (shmd->shm_softlockcnt <= 0) {
2934 if (AS_ISUNMAPWAIT(seg->s_as)) {
2935 mutex_enter(&seg->s_as->a_contents);
2936 if (AS_ISUNMAPWAIT(seg->s_as)) {
2937 AS_CLRUNMAPWAIT(seg->s_as);
2938 cv_broadcast(&seg->s_as->a_cv);
2939 }
2940 mutex_exit(&seg->s_as->a_contents);
2941 }
2942 }
2943
2944 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2945 anon_disclaim(amp, pg_idx, len);
2946 ANON_LOCK_EXIT(&->a_rwlock);
2947 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2948 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2949 int already_set;
2950 ulong_t anon_index;
2951 lgrp_mem_policy_t policy;
2952 caddr_t shm_addr;
2953 size_t share_size;
2954 size_t size;
2955 struct seg *sptseg = shmd->shm_sptseg;
2956 caddr_t sptseg_addr;
2957
2958 /*
2959 * Align address and length to page size of underlying segment
2960 */
2961 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2962 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2963 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2964 share_size);
2965
2966 amp = shmd->shm_amp;
2967 anon_index = seg_page(seg, shm_addr);
2968
2969 /*
2970 * And now we may have to adjust size downward if we have
2971 * exceeded the realsize of the segment or initial anon
2972 * allocations.
2973 */
2974 sptseg_addr = sptseg->s_base + ptob(anon_index);
2975 if ((sptseg_addr + size) >
2976 (sptseg->s_base + sptd->spt_realsize))
2977 size = (sptseg->s_base + sptd->spt_realsize) -
2978 sptseg_addr;
2979
2980 /*
2981 * Set memory allocation policy for this segment
2982 */
2983 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2984 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2985 NULL, 0, len);
2986
2987 /*
2988 * If random memory allocation policy set already,
2989 * don't bother reapplying it.
2990 */
2991 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2992 return (0);
2993
2994 /*
2995 * Mark any existing pages in the given range for
2996 * migration, flushing the I/O page cache, and using
2997 * underlying segment to calculate anon index and get
2998 * anonmap and vnode pointer from
2999 */
3000 if (shmd->shm_softlockcnt > 0)
3001 segspt_purge(seg);
3002
3003 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3004 }
3005
3006 return (0);
3007 }
3008
3009 /*
3010 * get a memory ID for an addr in a given segment
3011 */
3012 static int
3013 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3014 {
3015 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3016 struct anon *ap;
3017 size_t anon_index;
3018 struct anon_map *amp = shmd->shm_amp;
3019 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3020 struct seg *sptseg = shmd->shm_sptseg;
3021 anon_sync_obj_t cookie;
3022
3023 anon_index = seg_page(seg, addr);
3024
3025 if (addr > (seg->s_base + sptd->spt_realsize)) {
3026 return (EFAULT);
3027 }
3028
3029 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3030 anon_array_enter(amp, anon_index, &cookie);
3031 ap = anon_get_ptr(amp->ahp, anon_index);
3032 if (ap == NULL) {
3033 struct page *pp;
3034 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3035
3036 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3037 if (pp == NULL) {
3038 anon_array_exit(&cookie);
3039 ANON_LOCK_EXIT(&->a_rwlock);
3040 return (ENOMEM);
3041 }
3042 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3043 page_unlock(pp);
3044 }
3045 anon_array_exit(&cookie);
3046 ANON_LOCK_EXIT(&->a_rwlock);
3047 memidp->val[0] = (uintptr_t)ap;
3048 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3049 return (0);
3050 }
3051
3052 /*
3053 * Get memory allocation policy info for specified address in given segment
3054 */
3055 static lgrp_mem_policy_info_t *
3056 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3057 {
3058 struct anon_map *amp;
3059 ulong_t anon_index;
3060 lgrp_mem_policy_info_t *policy_info;
3061 struct shm_data *shm_data;
3062
3063 ASSERT(seg != NULL);
3064
3065 /*
3066 * Get anon_map from segshm
3067 *
3068 * Assume that no lock needs to be held on anon_map, since
3069 * it should be protected by its reference count which must be
3070 * nonzero for an existing segment
3071 * Need to grab readers lock on policy tree though
3072 */
3073 shm_data = (struct shm_data *)seg->s_data;
3074 if (shm_data == NULL)
3075 return (NULL);
3076 amp = shm_data->shm_amp;
3077 ASSERT(amp->refcnt != 0);
3078
3079 /*
3080 * Get policy info
3081 *
3082 * Assume starting anon index of 0
3083 */
3084 anon_index = seg_page(seg, addr);
3085 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3086
3087 return (policy_info);
3088 }