Print this page
6152 use NULL dump segop as a shorthand for no-op
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
87 87
88 88 struct seg_ops segspt_ops = {
89 89 .dup = SEGSPT_BADOP(int),
90 90 .unmap = segspt_unmap,
91 91 .free = segspt_free,
92 92 .fault = SEGSPT_BADOP(int),
93 93 .faulta = SEGSPT_BADOP(faultcode_t),
94 94 .setprot = SEGSPT_BADOP(int),
95 95 .checkprot = SEGSPT_BADOP(int),
96 96 .kluster = SEGSPT_BADOP(int),
97 97 .swapout = SEGSPT_BADOP(size_t),
98 98 .sync = SEGSPT_BADOP(int),
99 99 .incore = SEGSPT_BADOP(size_t),
100 100 .lockop = SEGSPT_BADOP(int),
101 101 .getprot = SEGSPT_BADOP(int),
102 102 .getoffset = SEGSPT_BADOP(u_offset_t),
103 103 .gettype = SEGSPT_BADOP(int),
104 104 .getvp = SEGSPT_BADOP(int),
105 105 .advise = SEGSPT_BADOP(int),
106 106 .dump = SEGSPT_BADOP(void),
107 107 .pagelock = SEGSPT_BADOP(int),
108 108 .setpagesize = SEGSPT_BADOP(int),
109 109 .getmemid = SEGSPT_BADOP(int),
110 110 .getpolicy = segspt_getpolicy,
111 111 .capable = SEGSPT_BADOP(int),
112 112 };
113 113
114 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
115 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
116 116 static void segspt_shmfree(struct seg *seg);
117 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
118 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
119 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 121 register size_t len, register uint_t prot);
122 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 123 uint_t prot);
124 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 125 static size_t segspt_shmswapout(struct seg *seg);
126 126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 127 register char *vec);
128 128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
129 129 int attr, uint_t flags);
130 130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 131 int attr, int op, ulong_t *lockmap, size_t pos);
132 132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 133 uint_t *protv);
134 134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 138 uint_t behav);
139 -static void segspt_shmdump(struct seg *seg);
140 139 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
141 140 struct page ***, enum lock_type, enum seg_rw);
142 141 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
143 142 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
144 143
145 144 struct seg_ops segspt_shmops = {
146 145 .dup = segspt_shmdup,
147 146 .unmap = segspt_shmunmap,
148 147 .free = segspt_shmfree,
149 148 .fault = segspt_shmfault,
150 149 .faulta = segspt_shmfaulta,
151 150 .setprot = segspt_shmsetprot,
152 151 .checkprot = segspt_shmcheckprot,
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
153 152 .kluster = segspt_shmkluster,
154 153 .swapout = segspt_shmswapout,
155 154 .sync = segspt_shmsync,
156 155 .incore = segspt_shmincore,
157 156 .lockop = segspt_shmlockop,
158 157 .getprot = segspt_shmgetprot,
159 158 .getoffset = segspt_shmgetoffset,
160 159 .gettype = segspt_shmgettype,
161 160 .getvp = segspt_shmgetvp,
162 161 .advise = segspt_shmadvise,
163 - .dump = segspt_shmdump,
164 162 .pagelock = segspt_shmpagelock,
165 163 .getmemid = segspt_shmgetmemid,
166 164 .getpolicy = segspt_shmgetpolicy,
167 165 };
168 166
169 167 static void segspt_purge(struct seg *seg);
170 168 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
171 169 enum seg_rw, int);
172 170 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
173 171 page_t **ppa);
174 172
175 173
176 174
177 175 /*ARGSUSED*/
178 176 int
179 177 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
180 178 uint_t prot, uint_t flags, uint_t share_szc)
181 179 {
182 180 int err;
183 181 struct as *newas;
184 182 struct segspt_crargs sptcargs;
185 183
186 184 #ifdef DEBUG
187 185 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
188 186 tnf_ulong, size, size );
189 187 #endif
190 188 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
191 189 segspt_minfree = availrmem/20; /* for the system */
192 190
193 191 if (!hat_supported(HAT_SHARED_PT, (void *)0))
194 192 return (EINVAL);
195 193
196 194 /*
197 195 * get a new as for this shared memory segment
198 196 */
199 197 newas = as_alloc();
200 198 newas->a_proc = NULL;
201 199 sptcargs.amp = amp;
202 200 sptcargs.prot = prot;
203 201 sptcargs.flags = flags;
204 202 sptcargs.szc = share_szc;
205 203 /*
206 204 * create a shared page table (spt) segment
207 205 */
208 206
209 207 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
210 208 as_free(newas);
211 209 return (err);
212 210 }
213 211 *sptseg = sptcargs.seg_spt;
214 212 return (0);
215 213 }
216 214
217 215 void
218 216 sptdestroy(struct as *as, struct anon_map *amp)
219 217 {
220 218
221 219 #ifdef DEBUG
222 220 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
223 221 #endif
224 222 (void) as_unmap(as, SEGSPTADDR, amp->size);
225 223 as_free(as);
226 224 }
227 225
228 226 /*
229 227 * called from seg_free().
230 228 * free (i.e., unlock, unmap, return to free list)
231 229 * all the pages in the given seg.
232 230 */
233 231 void
234 232 segspt_free(struct seg *seg)
235 233 {
236 234 struct spt_data *sptd = (struct spt_data *)seg->s_data;
237 235
238 236 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
239 237
240 238 if (sptd != NULL) {
241 239 if (sptd->spt_realsize)
242 240 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
243 241
244 242 if (sptd->spt_ppa_lckcnt)
245 243 kmem_free(sptd->spt_ppa_lckcnt,
246 244 sizeof (*sptd->spt_ppa_lckcnt)
247 245 * btopr(sptd->spt_amp->size));
248 246 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
249 247 cv_destroy(&sptd->spt_cv);
250 248 mutex_destroy(&sptd->spt_lock);
251 249 kmem_free(sptd, sizeof (*sptd));
252 250 }
253 251 }
254 252
255 253 /*ARGSUSED*/
256 254 static int
257 255 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
258 256 uint_t flags)
259 257 {
260 258 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
261 259
262 260 return (0);
263 261 }
264 262
265 263 /*ARGSUSED*/
266 264 static size_t
267 265 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
268 266 {
269 267 caddr_t eo_seg;
270 268 pgcnt_t npages;
271 269 struct shm_data *shmd = (struct shm_data *)seg->s_data;
272 270 struct seg *sptseg;
273 271 struct spt_data *sptd;
274 272
275 273 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
276 274 #ifdef lint
277 275 seg = seg;
278 276 #endif
279 277 sptseg = shmd->shm_sptseg;
280 278 sptd = sptseg->s_data;
281 279
282 280 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
283 281 eo_seg = addr + len;
284 282 while (addr < eo_seg) {
285 283 /* page exists, and it's locked. */
286 284 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
287 285 SEG_PAGE_ANON;
288 286 addr += PAGESIZE;
289 287 }
290 288 return (len);
291 289 } else {
292 290 struct anon_map *amp = shmd->shm_amp;
293 291 struct anon *ap;
294 292 page_t *pp;
295 293 pgcnt_t anon_index;
296 294 struct vnode *vp;
297 295 u_offset_t off;
298 296 ulong_t i;
299 297 int ret;
300 298 anon_sync_obj_t cookie;
301 299
302 300 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
303 301 anon_index = seg_page(seg, addr);
304 302 npages = btopr(len);
305 303 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
306 304 return (EINVAL);
307 305 }
308 306 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
309 307 for (i = 0; i < npages; i++, anon_index++) {
310 308 ret = 0;
311 309 anon_array_enter(amp, anon_index, &cookie);
312 310 ap = anon_get_ptr(amp->ahp, anon_index);
313 311 if (ap != NULL) {
314 312 swap_xlate(ap, &vp, &off);
315 313 anon_array_exit(&cookie);
316 314 pp = page_lookup_nowait(vp, off, SE_SHARED);
317 315 if (pp != NULL) {
318 316 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
319 317 page_unlock(pp);
320 318 }
321 319 } else {
322 320 anon_array_exit(&cookie);
323 321 }
324 322 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
325 323 ret |= SEG_PAGE_LOCKED;
326 324 }
327 325 *vec++ = (char)ret;
328 326 }
329 327 ANON_LOCK_EXIT(&->a_rwlock);
330 328 return (len);
331 329 }
332 330 }
333 331
334 332 static int
335 333 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
336 334 {
337 335 size_t share_size;
338 336
339 337 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
340 338
341 339 /*
342 340 * seg.s_size may have been rounded up to the largest page size
343 341 * in shmat().
344 342 * XXX This should be cleanedup. sptdestroy should take a length
345 343 * argument which should be the same as sptcreate. Then
346 344 * this rounding would not be needed (or is done in shm.c)
347 345 * Only the check for full segment will be needed.
348 346 *
349 347 * XXX -- shouldn't raddr == 0 always? These tests don't seem
350 348 * to be useful at all.
351 349 */
352 350 share_size = page_get_pagesize(seg->s_szc);
353 351 ssize = P2ROUNDUP(ssize, share_size);
354 352
355 353 if (raddr == seg->s_base && ssize == seg->s_size) {
356 354 seg_free(seg);
357 355 return (0);
358 356 } else
359 357 return (EINVAL);
360 358 }
361 359
362 360 int
363 361 segspt_create(struct seg *seg, caddr_t argsp)
364 362 {
365 363 int err;
366 364 caddr_t addr = seg->s_base;
367 365 struct spt_data *sptd;
368 366 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
369 367 struct anon_map *amp = sptcargs->amp;
370 368 struct kshmid *sp = amp->a_sp;
371 369 struct cred *cred = CRED();
372 370 ulong_t i, j, anon_index = 0;
373 371 pgcnt_t npages = btopr(amp->size);
374 372 struct vnode *vp;
375 373 page_t **ppa;
376 374 uint_t hat_flags;
377 375 size_t pgsz;
378 376 pgcnt_t pgcnt;
379 377 caddr_t a;
380 378 pgcnt_t pidx;
381 379 size_t sz;
382 380 proc_t *procp = curproc;
383 381 rctl_qty_t lockedbytes = 0;
384 382 kproject_t *proj;
385 383
386 384 /*
387 385 * We are holding the a_lock on the underlying dummy as,
388 386 * so we can make calls to the HAT layer.
389 387 */
390 388 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
391 389 ASSERT(sp != NULL);
392 390
393 391 #ifdef DEBUG
394 392 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
395 393 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
396 394 #endif
397 395 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
398 396 if (err = anon_swap_adjust(npages))
399 397 return (err);
400 398 }
401 399 err = ENOMEM;
402 400
403 401 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
404 402 goto out1;
405 403
406 404 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
407 405 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
408 406 KM_NOSLEEP)) == NULL)
409 407 goto out2;
410 408 }
411 409
412 410 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
413 411
414 412 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
415 413 goto out3;
416 414
417 415 seg->s_ops = &segspt_ops;
418 416 sptd->spt_vp = vp;
419 417 sptd->spt_amp = amp;
420 418 sptd->spt_prot = sptcargs->prot;
421 419 sptd->spt_flags = sptcargs->flags;
422 420 seg->s_data = (caddr_t)sptd;
423 421 sptd->spt_ppa = NULL;
424 422 sptd->spt_ppa_lckcnt = NULL;
425 423 seg->s_szc = sptcargs->szc;
426 424 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
427 425 sptd->spt_gen = 0;
428 426
429 427 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
430 428 if (seg->s_szc > amp->a_szc) {
431 429 amp->a_szc = seg->s_szc;
432 430 }
433 431 ANON_LOCK_EXIT(&->a_rwlock);
434 432
435 433 /*
436 434 * Set policy to affect initial allocation of pages in
437 435 * anon_map_createpages()
438 436 */
439 437 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
440 438 NULL, 0, ptob(npages));
441 439
442 440 if (sptcargs->flags & SHM_PAGEABLE) {
443 441 size_t share_sz;
444 442 pgcnt_t new_npgs, more_pgs;
445 443 struct anon_hdr *nahp;
446 444 zone_t *zone;
447 445
448 446 share_sz = page_get_pagesize(seg->s_szc);
449 447 if (!IS_P2ALIGNED(amp->size, share_sz)) {
450 448 /*
451 449 * We are rounding up the size of the anon array
452 450 * on 4 M boundary because we always create 4 M
453 451 * of page(s) when locking, faulting pages and we
454 452 * don't have to check for all corner cases e.g.
455 453 * if there is enough space to allocate 4 M
456 454 * page.
457 455 */
458 456 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
459 457 more_pgs = new_npgs - npages;
460 458
461 459 /*
462 460 * The zone will never be NULL, as a fully created
463 461 * shm always has an owning zone.
464 462 */
465 463 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
466 464 ASSERT(zone != NULL);
467 465 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
468 466 err = ENOMEM;
469 467 goto out4;
470 468 }
471 469
472 470 nahp = anon_create(new_npgs, ANON_SLEEP);
473 471 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
474 472 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
475 473 ANON_SLEEP);
476 474 anon_release(amp->ahp, npages);
477 475 amp->ahp = nahp;
478 476 ASSERT(amp->swresv == ptob(npages));
479 477 amp->swresv = amp->size = ptob(new_npgs);
480 478 ANON_LOCK_EXIT(&->a_rwlock);
481 479 npages = new_npgs;
482 480 }
483 481
484 482 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
485 483 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
486 484 sptd->spt_pcachecnt = 0;
487 485 sptd->spt_realsize = ptob(npages);
488 486 sptcargs->seg_spt = seg;
489 487 return (0);
490 488 }
491 489
492 490 /*
493 491 * get array of pages for each anon slot in amp
494 492 */
495 493 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
496 494 seg, addr, S_CREATE, cred)) != 0)
497 495 goto out4;
498 496
499 497 mutex_enter(&sp->shm_mlock);
500 498
501 499 /* May be partially locked, so, count bytes to charge for locking */
502 500 for (i = 0; i < npages; i++)
503 501 if (ppa[i]->p_lckcnt == 0)
504 502 lockedbytes += PAGESIZE;
505 503
506 504 proj = sp->shm_perm.ipc_proj;
507 505
508 506 if (lockedbytes > 0) {
509 507 mutex_enter(&procp->p_lock);
510 508 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
511 509 mutex_exit(&procp->p_lock);
512 510 mutex_exit(&sp->shm_mlock);
513 511 for (i = 0; i < npages; i++)
514 512 page_unlock(ppa[i]);
515 513 err = ENOMEM;
516 514 goto out4;
517 515 }
518 516 mutex_exit(&procp->p_lock);
519 517 }
520 518
521 519 /*
522 520 * addr is initial address corresponding to the first page on ppa list
523 521 */
524 522 for (i = 0; i < npages; i++) {
525 523 /* attempt to lock all pages */
526 524 if (page_pp_lock(ppa[i], 0, 1) == 0) {
527 525 /*
528 526 * if unable to lock any page, unlock all
529 527 * of them and return error
530 528 */
531 529 for (j = 0; j < i; j++)
532 530 page_pp_unlock(ppa[j], 0, 1);
533 531 for (i = 0; i < npages; i++)
534 532 page_unlock(ppa[i]);
535 533 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
536 534 mutex_exit(&sp->shm_mlock);
537 535 err = ENOMEM;
538 536 goto out4;
539 537 }
540 538 }
541 539 mutex_exit(&sp->shm_mlock);
542 540
543 541 /*
544 542 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
545 543 * for the entire life of the segment. For example platforms
546 544 * that do not support Dynamic Reconfiguration.
547 545 */
548 546 hat_flags = HAT_LOAD_SHARE;
549 547 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
550 548 hat_flags |= HAT_LOAD_LOCK;
551 549
552 550 /*
553 551 * Load translations one lare page at a time
554 552 * to make sure we don't create mappings bigger than
555 553 * segment's size code in case underlying pages
556 554 * are shared with segvn's segment that uses bigger
557 555 * size code than we do.
558 556 */
559 557 pgsz = page_get_pagesize(seg->s_szc);
560 558 pgcnt = page_get_pagecnt(seg->s_szc);
561 559 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
562 560 sz = MIN(pgsz, ptob(npages - pidx));
563 561 hat_memload_array(seg->s_as->a_hat, a, sz,
564 562 &ppa[pidx], sptd->spt_prot, hat_flags);
565 563 }
566 564
567 565 /*
568 566 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
569 567 * we will leave the pages locked SE_SHARED for the life
570 568 * of the ISM segment. This will prevent any calls to
571 569 * hat_pageunload() on this ISM segment for those platforms.
572 570 */
573 571 if (!(hat_flags & HAT_LOAD_LOCK)) {
574 572 /*
575 573 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
576 574 * we no longer need to hold the SE_SHARED lock on the pages,
577 575 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
578 576 * SE_SHARED lock on the pages as necessary.
579 577 */
580 578 for (i = 0; i < npages; i++)
581 579 page_unlock(ppa[i]);
582 580 }
583 581 sptd->spt_pcachecnt = 0;
584 582 kmem_free(ppa, ((sizeof (page_t *)) * npages));
585 583 sptd->spt_realsize = ptob(npages);
586 584 atomic_add_long(&spt_used, npages);
587 585 sptcargs->seg_spt = seg;
588 586 return (0);
589 587
590 588 out4:
591 589 seg->s_data = NULL;
592 590 kmem_free(vp, sizeof (*vp));
593 591 cv_destroy(&sptd->spt_cv);
594 592 out3:
595 593 mutex_destroy(&sptd->spt_lock);
596 594 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
597 595 kmem_free(ppa, (sizeof (*ppa) * npages));
598 596 out2:
599 597 kmem_free(sptd, sizeof (*sptd));
600 598 out1:
601 599 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
602 600 anon_swap_restore(npages);
603 601 return (err);
604 602 }
605 603
606 604 /*ARGSUSED*/
607 605 void
608 606 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
609 607 {
610 608 struct page *pp;
611 609 struct spt_data *sptd = (struct spt_data *)seg->s_data;
612 610 pgcnt_t npages;
613 611 ulong_t anon_idx;
614 612 struct anon_map *amp;
615 613 struct anon *ap;
616 614 struct vnode *vp;
617 615 u_offset_t off;
618 616 uint_t hat_flags;
619 617 int root = 0;
620 618 pgcnt_t pgs, curnpgs = 0;
621 619 page_t *rootpp;
622 620 rctl_qty_t unlocked_bytes = 0;
623 621 kproject_t *proj;
624 622 kshmid_t *sp;
625 623
626 624 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
627 625
628 626 len = P2ROUNDUP(len, PAGESIZE);
629 627
630 628 npages = btop(len);
631 629
632 630 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
633 631 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
634 632 (sptd->spt_flags & SHM_PAGEABLE)) {
635 633 hat_flags = HAT_UNLOAD_UNMAP;
636 634 }
637 635
638 636 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
639 637
640 638 amp = sptd->spt_amp;
641 639 if (sptd->spt_flags & SHM_PAGEABLE)
642 640 npages = btop(amp->size);
643 641
644 642 ASSERT(amp != NULL);
645 643
646 644 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
647 645 sp = amp->a_sp;
648 646 proj = sp->shm_perm.ipc_proj;
649 647 mutex_enter(&sp->shm_mlock);
650 648 }
651 649 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
652 650 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
653 651 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
654 652 panic("segspt_free_pages: null app");
655 653 /*NOTREACHED*/
656 654 }
657 655 } else {
658 656 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
659 657 == NULL)
660 658 continue;
661 659 }
662 660 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
663 661 swap_xlate(ap, &vp, &off);
664 662
665 663 /*
666 664 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
667 665 * the pages won't be having SE_SHARED lock at this
668 666 * point.
669 667 *
670 668 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
671 669 * the pages are still held SE_SHARED locked from the
672 670 * original segspt_create()
673 671 *
674 672 * Our goal is to get SE_EXCL lock on each page, remove
675 673 * permanent lock on it and invalidate the page.
676 674 */
677 675 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
678 676 if (hat_flags == HAT_UNLOAD_UNMAP)
679 677 pp = page_lookup(vp, off, SE_EXCL);
680 678 else {
681 679 if ((pp = page_find(vp, off)) == NULL) {
682 680 panic("segspt_free_pages: "
683 681 "page not locked");
684 682 /*NOTREACHED*/
685 683 }
686 684 if (!page_tryupgrade(pp)) {
687 685 page_unlock(pp);
688 686 pp = page_lookup(vp, off, SE_EXCL);
689 687 }
690 688 }
691 689 if (pp == NULL) {
692 690 panic("segspt_free_pages: "
693 691 "page not in the system");
694 692 /*NOTREACHED*/
695 693 }
696 694 ASSERT(pp->p_lckcnt > 0);
697 695 page_pp_unlock(pp, 0, 1);
698 696 if (pp->p_lckcnt == 0)
699 697 unlocked_bytes += PAGESIZE;
700 698 } else {
701 699 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
702 700 continue;
703 701 }
704 702 /*
705 703 * It's logical to invalidate the pages here as in most cases
706 704 * these were created by segspt.
707 705 */
708 706 if (pp->p_szc != 0) {
709 707 if (root == 0) {
710 708 ASSERT(curnpgs == 0);
711 709 root = 1;
712 710 rootpp = pp;
713 711 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
714 712 ASSERT(pgs > 1);
715 713 ASSERT(IS_P2ALIGNED(pgs, pgs));
716 714 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
717 715 curnpgs--;
718 716 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
719 717 ASSERT(curnpgs == 1);
720 718 ASSERT(page_pptonum(pp) ==
721 719 page_pptonum(rootpp) + (pgs - 1));
722 720 page_destroy_pages(rootpp);
723 721 root = 0;
724 722 curnpgs = 0;
725 723 } else {
726 724 ASSERT(curnpgs > 1);
727 725 ASSERT(page_pptonum(pp) ==
728 726 page_pptonum(rootpp) + (pgs - curnpgs));
729 727 curnpgs--;
730 728 }
731 729 } else {
732 730 if (root != 0 || curnpgs != 0) {
733 731 panic("segspt_free_pages: bad large page");
734 732 /*NOTREACHED*/
735 733 }
736 734 /*
737 735 * Before destroying the pages, we need to take care
738 736 * of the rctl locked memory accounting. For that
739 737 * we need to calculte the unlocked_bytes.
740 738 */
741 739 if (pp->p_lckcnt > 0)
742 740 unlocked_bytes += PAGESIZE;
743 741 /*LINTED: constant in conditional context */
744 742 VN_DISPOSE(pp, B_INVAL, 0, kcred);
745 743 }
746 744 }
747 745 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
748 746 if (unlocked_bytes > 0)
749 747 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
750 748 mutex_exit(&sp->shm_mlock);
751 749 }
752 750 if (root != 0 || curnpgs != 0) {
753 751 panic("segspt_free_pages: bad large page");
754 752 /*NOTREACHED*/
755 753 }
756 754
757 755 /*
758 756 * mark that pages have been released
759 757 */
760 758 sptd->spt_realsize = 0;
761 759
762 760 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
763 761 atomic_add_long(&spt_used, -npages);
764 762 anon_swap_restore(npages);
765 763 }
766 764 }
767 765
768 766 /*
769 767 * Get memory allocation policy info for specified address in given segment
770 768 */
771 769 static lgrp_mem_policy_info_t *
772 770 segspt_getpolicy(struct seg *seg, caddr_t addr)
773 771 {
774 772 struct anon_map *amp;
775 773 ulong_t anon_index;
776 774 lgrp_mem_policy_info_t *policy_info;
777 775 struct spt_data *spt_data;
778 776
779 777 ASSERT(seg != NULL);
780 778
781 779 /*
782 780 * Get anon_map from segspt
783 781 *
784 782 * Assume that no lock needs to be held on anon_map, since
785 783 * it should be protected by its reference count which must be
786 784 * nonzero for an existing segment
787 785 * Need to grab readers lock on policy tree though
788 786 */
789 787 spt_data = (struct spt_data *)seg->s_data;
790 788 if (spt_data == NULL)
791 789 return (NULL);
792 790 amp = spt_data->spt_amp;
793 791 ASSERT(amp->refcnt != 0);
794 792
795 793 /*
796 794 * Get policy info
797 795 *
798 796 * Assume starting anon index of 0
799 797 */
800 798 anon_index = seg_page(seg, addr);
801 799 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
802 800
803 801 return (policy_info);
804 802 }
805 803
806 804 /*
807 805 * DISM only.
808 806 * Return locked pages over a given range.
809 807 *
810 808 * We will cache all DISM locked pages and save the pplist for the
811 809 * entire segment in the ppa field of the underlying DISM segment structure.
812 810 * Later, during a call to segspt_reclaim() we will use this ppa array
813 811 * to page_unlock() all of the pages and then we will free this ppa list.
814 812 */
815 813 /*ARGSUSED*/
816 814 static int
817 815 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
818 816 struct page ***ppp, enum lock_type type, enum seg_rw rw)
819 817 {
820 818 struct shm_data *shmd = (struct shm_data *)seg->s_data;
821 819 struct seg *sptseg = shmd->shm_sptseg;
822 820 struct spt_data *sptd = sptseg->s_data;
823 821 pgcnt_t pg_idx, npages, tot_npages, npgs;
824 822 struct page **pplist, **pl, **ppa, *pp;
825 823 struct anon_map *amp;
826 824 spgcnt_t an_idx;
827 825 int ret = ENOTSUP;
828 826 uint_t pl_built = 0;
829 827 struct anon *ap;
830 828 struct vnode *vp;
831 829 u_offset_t off;
832 830 pgcnt_t claim_availrmem = 0;
833 831 uint_t szc;
834 832
835 833 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
836 834 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
837 835
838 836 /*
839 837 * We want to lock/unlock the entire ISM segment. Therefore,
840 838 * we will be using the underlying sptseg and it's base address
841 839 * and length for the caching arguments.
842 840 */
843 841 ASSERT(sptseg);
844 842 ASSERT(sptd);
845 843
846 844 pg_idx = seg_page(seg, addr);
847 845 npages = btopr(len);
848 846
849 847 /*
850 848 * check if the request is larger than number of pages covered
851 849 * by amp
852 850 */
853 851 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
854 852 *ppp = NULL;
855 853 return (ENOTSUP);
856 854 }
857 855
858 856 if (type == L_PAGEUNLOCK) {
859 857 ASSERT(sptd->spt_ppa != NULL);
860 858
861 859 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
862 860 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
863 861
864 862 /*
865 863 * If someone is blocked while unmapping, we purge
866 864 * segment page cache and thus reclaim pplist synchronously
867 865 * without waiting for seg_pasync_thread. This speeds up
868 866 * unmapping in cases where munmap(2) is called, while
869 867 * raw async i/o is still in progress or where a thread
870 868 * exits on data fault in a multithreaded application.
871 869 */
872 870 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
873 871 (AS_ISUNMAPWAIT(seg->s_as) &&
874 872 shmd->shm_softlockcnt > 0)) {
875 873 segspt_purge(seg);
876 874 }
877 875 return (0);
878 876 }
879 877
880 878 /* The L_PAGELOCK case ... */
881 879
882 880 if (sptd->spt_flags & DISM_PPA_CHANGED) {
883 881 segspt_purge(seg);
884 882 /*
885 883 * for DISM ppa needs to be rebuild since
886 884 * number of locked pages could be changed
887 885 */
888 886 *ppp = NULL;
889 887 return (ENOTSUP);
890 888 }
891 889
892 890 /*
893 891 * First try to find pages in segment page cache, without
894 892 * holding the segment lock.
895 893 */
896 894 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
897 895 S_WRITE, SEGP_FORCE_WIRED);
898 896 if (pplist != NULL) {
899 897 ASSERT(sptd->spt_ppa != NULL);
900 898 ASSERT(sptd->spt_ppa == pplist);
901 899 ppa = sptd->spt_ppa;
902 900 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
903 901 if (ppa[an_idx] == NULL) {
904 902 seg_pinactive(seg, NULL, seg->s_base,
905 903 sptd->spt_amp->size, ppa,
906 904 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
907 905 *ppp = NULL;
908 906 return (ENOTSUP);
909 907 }
910 908 if ((szc = ppa[an_idx]->p_szc) != 0) {
911 909 npgs = page_get_pagecnt(szc);
912 910 an_idx = P2ROUNDUP(an_idx + 1, npgs);
913 911 } else {
914 912 an_idx++;
915 913 }
916 914 }
917 915 /*
918 916 * Since we cache the entire DISM segment, we want to
919 917 * set ppp to point to the first slot that corresponds
920 918 * to the requested addr, i.e. pg_idx.
921 919 */
922 920 *ppp = &(sptd->spt_ppa[pg_idx]);
923 921 return (0);
924 922 }
925 923
926 924 mutex_enter(&sptd->spt_lock);
927 925 /*
928 926 * try to find pages in segment page cache with mutex
929 927 */
930 928 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
931 929 S_WRITE, SEGP_FORCE_WIRED);
932 930 if (pplist != NULL) {
933 931 ASSERT(sptd->spt_ppa != NULL);
934 932 ASSERT(sptd->spt_ppa == pplist);
935 933 ppa = sptd->spt_ppa;
936 934 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
937 935 if (ppa[an_idx] == NULL) {
938 936 mutex_exit(&sptd->spt_lock);
939 937 seg_pinactive(seg, NULL, seg->s_base,
940 938 sptd->spt_amp->size, ppa,
941 939 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
942 940 *ppp = NULL;
943 941 return (ENOTSUP);
944 942 }
945 943 if ((szc = ppa[an_idx]->p_szc) != 0) {
946 944 npgs = page_get_pagecnt(szc);
947 945 an_idx = P2ROUNDUP(an_idx + 1, npgs);
948 946 } else {
949 947 an_idx++;
950 948 }
951 949 }
952 950 /*
953 951 * Since we cache the entire DISM segment, we want to
954 952 * set ppp to point to the first slot that corresponds
955 953 * to the requested addr, i.e. pg_idx.
956 954 */
957 955 mutex_exit(&sptd->spt_lock);
958 956 *ppp = &(sptd->spt_ppa[pg_idx]);
959 957 return (0);
960 958 }
961 959 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
962 960 SEGP_FORCE_WIRED) == SEGP_FAIL) {
963 961 mutex_exit(&sptd->spt_lock);
964 962 *ppp = NULL;
965 963 return (ENOTSUP);
966 964 }
967 965
968 966 /*
969 967 * No need to worry about protections because DISM pages are always rw.
970 968 */
971 969 pl = pplist = NULL;
972 970 amp = sptd->spt_amp;
973 971
974 972 /*
975 973 * Do we need to build the ppa array?
976 974 */
977 975 if (sptd->spt_ppa == NULL) {
978 976 pgcnt_t lpg_cnt = 0;
979 977
980 978 pl_built = 1;
981 979 tot_npages = btopr(sptd->spt_amp->size);
982 980
983 981 ASSERT(sptd->spt_pcachecnt == 0);
984 982 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
985 983 pl = pplist;
986 984
987 985 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
988 986 for (an_idx = 0; an_idx < tot_npages; ) {
989 987 ap = anon_get_ptr(amp->ahp, an_idx);
990 988 /*
991 989 * Cache only mlocked pages. For large pages
992 990 * if one (constituent) page is mlocked
993 991 * all pages for that large page
994 992 * are cached also. This is for quick
995 993 * lookups of ppa array;
996 994 */
997 995 if ((ap != NULL) && (lpg_cnt != 0 ||
998 996 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
999 997
1000 998 swap_xlate(ap, &vp, &off);
1001 999 pp = page_lookup(vp, off, SE_SHARED);
1002 1000 ASSERT(pp != NULL);
1003 1001 if (lpg_cnt == 0) {
1004 1002 lpg_cnt++;
1005 1003 /*
1006 1004 * For a small page, we are done --
1007 1005 * lpg_count is reset to 0 below.
1008 1006 *
1009 1007 * For a large page, we are guaranteed
1010 1008 * to find the anon structures of all
1011 1009 * constituent pages and a non-zero
1012 1010 * lpg_cnt ensures that we don't test
1013 1011 * for mlock for these. We are done
1014 1012 * when lpg_count reaches (npgs + 1).
1015 1013 * If we are not the first constituent
1016 1014 * page, restart at the first one.
1017 1015 */
1018 1016 npgs = page_get_pagecnt(pp->p_szc);
1019 1017 if (!IS_P2ALIGNED(an_idx, npgs)) {
1020 1018 an_idx = P2ALIGN(an_idx, npgs);
1021 1019 page_unlock(pp);
1022 1020 continue;
1023 1021 }
1024 1022 }
1025 1023 if (++lpg_cnt > npgs)
1026 1024 lpg_cnt = 0;
1027 1025
1028 1026 /*
1029 1027 * availrmem is decremented only
1030 1028 * for unlocked pages
1031 1029 */
1032 1030 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1033 1031 claim_availrmem++;
1034 1032 pplist[an_idx] = pp;
1035 1033 }
1036 1034 an_idx++;
1037 1035 }
1038 1036 ANON_LOCK_EXIT(&->a_rwlock);
1039 1037
1040 1038 if (claim_availrmem) {
1041 1039 mutex_enter(&freemem_lock);
1042 1040 if (availrmem < tune.t_minarmem + claim_availrmem) {
1043 1041 mutex_exit(&freemem_lock);
1044 1042 ret = ENOTSUP;
1045 1043 claim_availrmem = 0;
1046 1044 goto insert_fail;
1047 1045 } else {
1048 1046 availrmem -= claim_availrmem;
1049 1047 }
1050 1048 mutex_exit(&freemem_lock);
1051 1049 }
1052 1050
1053 1051 sptd->spt_ppa = pl;
1054 1052 } else {
1055 1053 /*
1056 1054 * We already have a valid ppa[].
1057 1055 */
1058 1056 pl = sptd->spt_ppa;
1059 1057 }
1060 1058
1061 1059 ASSERT(pl != NULL);
1062 1060
1063 1061 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1064 1062 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1065 1063 segspt_reclaim);
1066 1064 if (ret == SEGP_FAIL) {
1067 1065 /*
1068 1066 * seg_pinsert failed. We return
1069 1067 * ENOTSUP, so that the as_pagelock() code will
1070 1068 * then try the slower F_SOFTLOCK path.
1071 1069 */
1072 1070 if (pl_built) {
1073 1071 /*
1074 1072 * No one else has referenced the ppa[].
1075 1073 * We created it and we need to destroy it.
1076 1074 */
1077 1075 sptd->spt_ppa = NULL;
1078 1076 }
1079 1077 ret = ENOTSUP;
1080 1078 goto insert_fail;
1081 1079 }
1082 1080
1083 1081 /*
1084 1082 * In either case, we increment softlockcnt on the 'real' segment.
1085 1083 */
1086 1084 sptd->spt_pcachecnt++;
1087 1085 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1088 1086
1089 1087 ppa = sptd->spt_ppa;
1090 1088 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1091 1089 if (ppa[an_idx] == NULL) {
1092 1090 mutex_exit(&sptd->spt_lock);
1093 1091 seg_pinactive(seg, NULL, seg->s_base,
1094 1092 sptd->spt_amp->size,
1095 1093 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1096 1094 *ppp = NULL;
1097 1095 return (ENOTSUP);
1098 1096 }
1099 1097 if ((szc = ppa[an_idx]->p_szc) != 0) {
1100 1098 npgs = page_get_pagecnt(szc);
1101 1099 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1102 1100 } else {
1103 1101 an_idx++;
1104 1102 }
1105 1103 }
1106 1104 /*
1107 1105 * We can now drop the sptd->spt_lock since the ppa[]
1108 1106 * exists and he have incremented pacachecnt.
1109 1107 */
1110 1108 mutex_exit(&sptd->spt_lock);
1111 1109
1112 1110 /*
1113 1111 * Since we cache the entire segment, we want to
1114 1112 * set ppp to point to the first slot that corresponds
1115 1113 * to the requested addr, i.e. pg_idx.
1116 1114 */
1117 1115 *ppp = &(sptd->spt_ppa[pg_idx]);
1118 1116 return (0);
1119 1117
1120 1118 insert_fail:
1121 1119 /*
1122 1120 * We will only reach this code if we tried and failed.
1123 1121 *
1124 1122 * And we can drop the lock on the dummy seg, once we've failed
1125 1123 * to set up a new ppa[].
1126 1124 */
1127 1125 mutex_exit(&sptd->spt_lock);
1128 1126
1129 1127 if (pl_built) {
1130 1128 if (claim_availrmem) {
1131 1129 mutex_enter(&freemem_lock);
1132 1130 availrmem += claim_availrmem;
1133 1131 mutex_exit(&freemem_lock);
1134 1132 }
1135 1133
1136 1134 /*
1137 1135 * We created pl and we need to destroy it.
1138 1136 */
1139 1137 pplist = pl;
1140 1138 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1141 1139 if (pplist[an_idx] != NULL)
1142 1140 page_unlock(pplist[an_idx]);
1143 1141 }
1144 1142 kmem_free(pl, sizeof (page_t *) * tot_npages);
1145 1143 }
1146 1144
1147 1145 if (shmd->shm_softlockcnt <= 0) {
1148 1146 if (AS_ISUNMAPWAIT(seg->s_as)) {
1149 1147 mutex_enter(&seg->s_as->a_contents);
1150 1148 if (AS_ISUNMAPWAIT(seg->s_as)) {
1151 1149 AS_CLRUNMAPWAIT(seg->s_as);
1152 1150 cv_broadcast(&seg->s_as->a_cv);
1153 1151 }
1154 1152 mutex_exit(&seg->s_as->a_contents);
1155 1153 }
1156 1154 }
1157 1155 *ppp = NULL;
1158 1156 return (ret);
1159 1157 }
1160 1158
1161 1159
1162 1160
1163 1161 /*
1164 1162 * return locked pages over a given range.
1165 1163 *
1166 1164 * We will cache the entire ISM segment and save the pplist for the
1167 1165 * entire segment in the ppa field of the underlying ISM segment structure.
1168 1166 * Later, during a call to segspt_reclaim() we will use this ppa array
1169 1167 * to page_unlock() all of the pages and then we will free this ppa list.
1170 1168 */
1171 1169 /*ARGSUSED*/
1172 1170 static int
1173 1171 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1174 1172 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1175 1173 {
1176 1174 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1177 1175 struct seg *sptseg = shmd->shm_sptseg;
1178 1176 struct spt_data *sptd = sptseg->s_data;
1179 1177 pgcnt_t np, page_index, npages;
1180 1178 caddr_t a, spt_base;
1181 1179 struct page **pplist, **pl, *pp;
1182 1180 struct anon_map *amp;
1183 1181 ulong_t anon_index;
1184 1182 int ret = ENOTSUP;
1185 1183 uint_t pl_built = 0;
1186 1184 struct anon *ap;
1187 1185 struct vnode *vp;
1188 1186 u_offset_t off;
1189 1187
1190 1188 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1191 1189 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1192 1190
1193 1191
1194 1192 /*
1195 1193 * We want to lock/unlock the entire ISM segment. Therefore,
1196 1194 * we will be using the underlying sptseg and it's base address
1197 1195 * and length for the caching arguments.
1198 1196 */
1199 1197 ASSERT(sptseg);
1200 1198 ASSERT(sptd);
1201 1199
1202 1200 if (sptd->spt_flags & SHM_PAGEABLE) {
1203 1201 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1204 1202 }
1205 1203
1206 1204 page_index = seg_page(seg, addr);
1207 1205 npages = btopr(len);
1208 1206
1209 1207 /*
1210 1208 * check if the request is larger than number of pages covered
1211 1209 * by amp
1212 1210 */
1213 1211 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1214 1212 *ppp = NULL;
1215 1213 return (ENOTSUP);
1216 1214 }
1217 1215
1218 1216 if (type == L_PAGEUNLOCK) {
1219 1217
1220 1218 ASSERT(sptd->spt_ppa != NULL);
1221 1219
1222 1220 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1223 1221 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1224 1222
1225 1223 /*
1226 1224 * If someone is blocked while unmapping, we purge
1227 1225 * segment page cache and thus reclaim pplist synchronously
1228 1226 * without waiting for seg_pasync_thread. This speeds up
1229 1227 * unmapping in cases where munmap(2) is called, while
1230 1228 * raw async i/o is still in progress or where a thread
1231 1229 * exits on data fault in a multithreaded application.
1232 1230 */
1233 1231 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1234 1232 segspt_purge(seg);
1235 1233 }
1236 1234 return (0);
1237 1235 }
1238 1236
1239 1237 /* The L_PAGELOCK case... */
1240 1238
1241 1239 /*
1242 1240 * First try to find pages in segment page cache, without
1243 1241 * holding the segment lock.
1244 1242 */
1245 1243 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1246 1244 S_WRITE, SEGP_FORCE_WIRED);
1247 1245 if (pplist != NULL) {
1248 1246 ASSERT(sptd->spt_ppa == pplist);
1249 1247 ASSERT(sptd->spt_ppa[page_index]);
1250 1248 /*
1251 1249 * Since we cache the entire ISM segment, we want to
1252 1250 * set ppp to point to the first slot that corresponds
1253 1251 * to the requested addr, i.e. page_index.
1254 1252 */
1255 1253 *ppp = &(sptd->spt_ppa[page_index]);
1256 1254 return (0);
1257 1255 }
1258 1256
1259 1257 mutex_enter(&sptd->spt_lock);
1260 1258
1261 1259 /*
1262 1260 * try to find pages in segment page cache
1263 1261 */
1264 1262 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1265 1263 S_WRITE, SEGP_FORCE_WIRED);
1266 1264 if (pplist != NULL) {
1267 1265 ASSERT(sptd->spt_ppa == pplist);
1268 1266 /*
1269 1267 * Since we cache the entire segment, we want to
1270 1268 * set ppp to point to the first slot that corresponds
1271 1269 * to the requested addr, i.e. page_index.
1272 1270 */
1273 1271 mutex_exit(&sptd->spt_lock);
1274 1272 *ppp = &(sptd->spt_ppa[page_index]);
1275 1273 return (0);
1276 1274 }
1277 1275
1278 1276 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1279 1277 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1280 1278 mutex_exit(&sptd->spt_lock);
1281 1279 *ppp = NULL;
1282 1280 return (ENOTSUP);
1283 1281 }
1284 1282
1285 1283 /*
1286 1284 * No need to worry about protections because ISM pages
1287 1285 * are always rw.
1288 1286 */
1289 1287 pl = pplist = NULL;
1290 1288
1291 1289 /*
1292 1290 * Do we need to build the ppa array?
1293 1291 */
1294 1292 if (sptd->spt_ppa == NULL) {
1295 1293 ASSERT(sptd->spt_ppa == pplist);
1296 1294
1297 1295 spt_base = sptseg->s_base;
1298 1296 pl_built = 1;
1299 1297
1300 1298 /*
1301 1299 * availrmem is decremented once during anon_swap_adjust()
1302 1300 * and is incremented during the anon_unresv(), which is
1303 1301 * called from shm_rm_amp() when the segment is destroyed.
1304 1302 */
1305 1303 amp = sptd->spt_amp;
1306 1304 ASSERT(amp != NULL);
1307 1305
1308 1306 /* pcachecnt is protected by sptd->spt_lock */
1309 1307 ASSERT(sptd->spt_pcachecnt == 0);
1310 1308 pplist = kmem_zalloc(sizeof (page_t *)
1311 1309 * btopr(sptd->spt_amp->size), KM_SLEEP);
1312 1310 pl = pplist;
1313 1311
1314 1312 anon_index = seg_page(sptseg, spt_base);
1315 1313
1316 1314 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1317 1315 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1318 1316 a += PAGESIZE, anon_index++, pplist++) {
1319 1317 ap = anon_get_ptr(amp->ahp, anon_index);
1320 1318 ASSERT(ap != NULL);
1321 1319 swap_xlate(ap, &vp, &off);
1322 1320 pp = page_lookup(vp, off, SE_SHARED);
1323 1321 ASSERT(pp != NULL);
1324 1322 *pplist = pp;
1325 1323 }
1326 1324 ANON_LOCK_EXIT(&->a_rwlock);
1327 1325
1328 1326 if (a < (spt_base + sptd->spt_amp->size)) {
1329 1327 ret = ENOTSUP;
1330 1328 goto insert_fail;
1331 1329 }
1332 1330 sptd->spt_ppa = pl;
1333 1331 } else {
1334 1332 /*
1335 1333 * We already have a valid ppa[].
1336 1334 */
1337 1335 pl = sptd->spt_ppa;
1338 1336 }
1339 1337
1340 1338 ASSERT(pl != NULL);
1341 1339
1342 1340 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1343 1341 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1344 1342 segspt_reclaim);
1345 1343 if (ret == SEGP_FAIL) {
1346 1344 /*
1347 1345 * seg_pinsert failed. We return
1348 1346 * ENOTSUP, so that the as_pagelock() code will
1349 1347 * then try the slower F_SOFTLOCK path.
1350 1348 */
1351 1349 if (pl_built) {
1352 1350 /*
1353 1351 * No one else has referenced the ppa[].
1354 1352 * We created it and we need to destroy it.
1355 1353 */
1356 1354 sptd->spt_ppa = NULL;
1357 1355 }
1358 1356 ret = ENOTSUP;
1359 1357 goto insert_fail;
1360 1358 }
1361 1359
1362 1360 /*
1363 1361 * In either case, we increment softlockcnt on the 'real' segment.
1364 1362 */
1365 1363 sptd->spt_pcachecnt++;
1366 1364 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1367 1365
1368 1366 /*
1369 1367 * We can now drop the sptd->spt_lock since the ppa[]
1370 1368 * exists and he have incremented pacachecnt.
1371 1369 */
1372 1370 mutex_exit(&sptd->spt_lock);
1373 1371
1374 1372 /*
1375 1373 * Since we cache the entire segment, we want to
1376 1374 * set ppp to point to the first slot that corresponds
1377 1375 * to the requested addr, i.e. page_index.
1378 1376 */
1379 1377 *ppp = &(sptd->spt_ppa[page_index]);
1380 1378 return (0);
1381 1379
1382 1380 insert_fail:
1383 1381 /*
1384 1382 * We will only reach this code if we tried and failed.
1385 1383 *
1386 1384 * And we can drop the lock on the dummy seg, once we've failed
1387 1385 * to set up a new ppa[].
1388 1386 */
1389 1387 mutex_exit(&sptd->spt_lock);
1390 1388
1391 1389 if (pl_built) {
1392 1390 /*
1393 1391 * We created pl and we need to destroy it.
1394 1392 */
1395 1393 pplist = pl;
1396 1394 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1397 1395 while (np) {
1398 1396 page_unlock(*pplist);
1399 1397 np--;
1400 1398 pplist++;
1401 1399 }
1402 1400 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1403 1401 }
1404 1402 if (shmd->shm_softlockcnt <= 0) {
1405 1403 if (AS_ISUNMAPWAIT(seg->s_as)) {
1406 1404 mutex_enter(&seg->s_as->a_contents);
1407 1405 if (AS_ISUNMAPWAIT(seg->s_as)) {
1408 1406 AS_CLRUNMAPWAIT(seg->s_as);
1409 1407 cv_broadcast(&seg->s_as->a_cv);
1410 1408 }
1411 1409 mutex_exit(&seg->s_as->a_contents);
1412 1410 }
1413 1411 }
1414 1412 *ppp = NULL;
1415 1413 return (ret);
1416 1414 }
1417 1415
1418 1416 /*
1419 1417 * purge any cached pages in the I/O page cache
1420 1418 */
1421 1419 static void
1422 1420 segspt_purge(struct seg *seg)
1423 1421 {
1424 1422 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1425 1423 }
1426 1424
1427 1425 static int
1428 1426 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1429 1427 enum seg_rw rw, int async)
1430 1428 {
1431 1429 struct seg *seg = (struct seg *)ptag;
1432 1430 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1433 1431 struct seg *sptseg;
1434 1432 struct spt_data *sptd;
1435 1433 pgcnt_t npages, i, free_availrmem = 0;
1436 1434 int done = 0;
1437 1435
1438 1436 #ifdef lint
1439 1437 addr = addr;
1440 1438 #endif
1441 1439 sptseg = shmd->shm_sptseg;
1442 1440 sptd = sptseg->s_data;
1443 1441 npages = (len >> PAGESHIFT);
1444 1442 ASSERT(npages);
1445 1443 ASSERT(sptd->spt_pcachecnt != 0);
1446 1444 ASSERT(sptd->spt_ppa == pplist);
1447 1445 ASSERT(npages == btopr(sptd->spt_amp->size));
1448 1446 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1449 1447
1450 1448 /*
1451 1449 * Acquire the lock on the dummy seg and destroy the
1452 1450 * ppa array IF this is the last pcachecnt.
1453 1451 */
1454 1452 mutex_enter(&sptd->spt_lock);
1455 1453 if (--sptd->spt_pcachecnt == 0) {
1456 1454 for (i = 0; i < npages; i++) {
1457 1455 if (pplist[i] == NULL) {
1458 1456 continue;
1459 1457 }
1460 1458 if (rw == S_WRITE) {
1461 1459 hat_setrefmod(pplist[i]);
1462 1460 } else {
1463 1461 hat_setref(pplist[i]);
1464 1462 }
1465 1463 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1466 1464 (sptd->spt_ppa_lckcnt[i] == 0))
1467 1465 free_availrmem++;
1468 1466 page_unlock(pplist[i]);
1469 1467 }
1470 1468 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1471 1469 mutex_enter(&freemem_lock);
1472 1470 availrmem += free_availrmem;
1473 1471 mutex_exit(&freemem_lock);
1474 1472 }
1475 1473 /*
1476 1474 * Since we want to cach/uncache the entire ISM segment,
1477 1475 * we will track the pplist in a segspt specific field
1478 1476 * ppa, that is initialized at the time we add an entry to
1479 1477 * the cache.
1480 1478 */
1481 1479 ASSERT(sptd->spt_pcachecnt == 0);
1482 1480 kmem_free(pplist, sizeof (page_t *) * npages);
1483 1481 sptd->spt_ppa = NULL;
1484 1482 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1485 1483 sptd->spt_gen++;
1486 1484 cv_broadcast(&sptd->spt_cv);
1487 1485 done = 1;
1488 1486 }
1489 1487 mutex_exit(&sptd->spt_lock);
1490 1488
1491 1489 /*
1492 1490 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1493 1491 * may not hold AS lock (in this case async argument is not 0). This
1494 1492 * means if softlockcnt drops to 0 after the decrement below address
1495 1493 * space may get freed. We can't allow it since after softlock
1496 1494 * derement to 0 we still need to access as structure for possible
1497 1495 * wakeup of unmap waiters. To prevent the disappearance of as we take
1498 1496 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1499 1497 * this mutex as a barrier to make sure this routine completes before
1500 1498 * segment is freed.
1501 1499 *
1502 1500 * The second complication we have to deal with in async case is a
1503 1501 * possibility of missed wake up of unmap wait thread. When we don't
1504 1502 * hold as lock here we may take a_contents lock before unmap wait
1505 1503 * thread that was first to see softlockcnt was still not 0. As a
1506 1504 * result we'll fail to wake up an unmap wait thread. To avoid this
1507 1505 * race we set nounmapwait flag in as structure if we drop softlockcnt
1508 1506 * to 0 if async is not 0. unmapwait thread
1509 1507 * will not block if this flag is set.
1510 1508 */
1511 1509 if (async)
1512 1510 mutex_enter(&shmd->shm_segfree_syncmtx);
1513 1511
1514 1512 /*
1515 1513 * Now decrement softlockcnt.
1516 1514 */
1517 1515 ASSERT(shmd->shm_softlockcnt > 0);
1518 1516 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1519 1517
1520 1518 if (shmd->shm_softlockcnt <= 0) {
1521 1519 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1522 1520 mutex_enter(&seg->s_as->a_contents);
1523 1521 if (async)
1524 1522 AS_SETNOUNMAPWAIT(seg->s_as);
1525 1523 if (AS_ISUNMAPWAIT(seg->s_as)) {
1526 1524 AS_CLRUNMAPWAIT(seg->s_as);
1527 1525 cv_broadcast(&seg->s_as->a_cv);
1528 1526 }
1529 1527 mutex_exit(&seg->s_as->a_contents);
1530 1528 }
1531 1529 }
1532 1530
1533 1531 if (async)
1534 1532 mutex_exit(&shmd->shm_segfree_syncmtx);
1535 1533
1536 1534 return (done);
1537 1535 }
1538 1536
1539 1537 /*
1540 1538 * Do a F_SOFTUNLOCK call over the range requested.
1541 1539 * The range must have already been F_SOFTLOCK'ed.
1542 1540 *
1543 1541 * The calls to acquire and release the anon map lock mutex were
1544 1542 * removed in order to avoid a deadly embrace during a DR
1545 1543 * memory delete operation. (Eg. DR blocks while waiting for a
1546 1544 * exclusive lock on a page that is being used for kaio; the
1547 1545 * thread that will complete the kaio and call segspt_softunlock
1548 1546 * blocks on the anon map lock; another thread holding the anon
1549 1547 * map lock blocks on another page lock via the segspt_shmfault
1550 1548 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1551 1549 *
1552 1550 * The appropriateness of the removal is based upon the following:
1553 1551 * 1. If we are holding a segment's reader lock and the page is held
1554 1552 * shared, then the corresponding element in anonmap which points to
1555 1553 * anon struct cannot change and there is no need to acquire the
1556 1554 * anonymous map lock.
1557 1555 * 2. Threads in segspt_softunlock have a reader lock on the segment
1558 1556 * and already have the shared page lock, so we are guaranteed that
1559 1557 * the anon map slot cannot change and therefore can call anon_get_ptr()
1560 1558 * without grabbing the anonymous map lock.
1561 1559 * 3. Threads that softlock a shared page break copy-on-write, even if
1562 1560 * its a read. Thus cow faults can be ignored with respect to soft
1563 1561 * unlocking, since the breaking of cow means that the anon slot(s) will
1564 1562 * not be shared.
1565 1563 */
1566 1564 static void
1567 1565 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1568 1566 size_t len, enum seg_rw rw)
1569 1567 {
1570 1568 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1571 1569 struct seg *sptseg;
1572 1570 struct spt_data *sptd;
1573 1571 page_t *pp;
1574 1572 caddr_t adr;
1575 1573 struct vnode *vp;
1576 1574 u_offset_t offset;
1577 1575 ulong_t anon_index;
1578 1576 struct anon_map *amp; /* XXX - for locknest */
1579 1577 struct anon *ap = NULL;
1580 1578 pgcnt_t npages;
1581 1579
1582 1580 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1583 1581
1584 1582 sptseg = shmd->shm_sptseg;
1585 1583 sptd = sptseg->s_data;
1586 1584
1587 1585 /*
1588 1586 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1589 1587 * and therefore their pages are SE_SHARED locked
1590 1588 * for the entire life of the segment.
1591 1589 */
1592 1590 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1593 1591 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1594 1592 goto softlock_decrement;
1595 1593 }
1596 1594
1597 1595 /*
1598 1596 * Any thread is free to do a page_find and
1599 1597 * page_unlock() on the pages within this seg.
1600 1598 *
1601 1599 * We are already holding the as->a_lock on the user's
1602 1600 * real segment, but we need to hold the a_lock on the
1603 1601 * underlying dummy as. This is mostly to satisfy the
1604 1602 * underlying HAT layer.
1605 1603 */
1606 1604 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1607 1605 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1608 1606 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1609 1607
1610 1608 amp = sptd->spt_amp;
1611 1609 ASSERT(amp != NULL);
1612 1610 anon_index = seg_page(sptseg, sptseg_addr);
1613 1611
1614 1612 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1615 1613 ap = anon_get_ptr(amp->ahp, anon_index++);
1616 1614 ASSERT(ap != NULL);
1617 1615 swap_xlate(ap, &vp, &offset);
1618 1616
1619 1617 /*
1620 1618 * Use page_find() instead of page_lookup() to
1621 1619 * find the page since we know that it has a
1622 1620 * "shared" lock.
1623 1621 */
1624 1622 pp = page_find(vp, offset);
1625 1623 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1626 1624 if (pp == NULL) {
1627 1625 panic("segspt_softunlock: "
1628 1626 "addr %p, ap %p, vp %p, off %llx",
1629 1627 (void *)adr, (void *)ap, (void *)vp, offset);
1630 1628 /*NOTREACHED*/
1631 1629 }
1632 1630
1633 1631 if (rw == S_WRITE) {
1634 1632 hat_setrefmod(pp);
1635 1633 } else if (rw != S_OTHER) {
1636 1634 hat_setref(pp);
1637 1635 }
1638 1636 page_unlock(pp);
1639 1637 }
1640 1638
1641 1639 softlock_decrement:
1642 1640 npages = btopr(len);
1643 1641 ASSERT(shmd->shm_softlockcnt >= npages);
1644 1642 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1645 1643 if (shmd->shm_softlockcnt == 0) {
1646 1644 /*
1647 1645 * All SOFTLOCKS are gone. Wakeup any waiting
1648 1646 * unmappers so they can try again to unmap.
1649 1647 * Check for waiters first without the mutex
1650 1648 * held so we don't always grab the mutex on
1651 1649 * softunlocks.
1652 1650 */
1653 1651 if (AS_ISUNMAPWAIT(seg->s_as)) {
1654 1652 mutex_enter(&seg->s_as->a_contents);
1655 1653 if (AS_ISUNMAPWAIT(seg->s_as)) {
1656 1654 AS_CLRUNMAPWAIT(seg->s_as);
1657 1655 cv_broadcast(&seg->s_as->a_cv);
1658 1656 }
1659 1657 mutex_exit(&seg->s_as->a_contents);
1660 1658 }
1661 1659 }
1662 1660 }
1663 1661
1664 1662 int
1665 1663 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1666 1664 {
1667 1665 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1668 1666 struct shm_data *shmd;
1669 1667 struct anon_map *shm_amp = shmd_arg->shm_amp;
1670 1668 struct spt_data *sptd;
1671 1669 int error = 0;
1672 1670
1673 1671 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1674 1672
1675 1673 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1676 1674 if (shmd == NULL)
1677 1675 return (ENOMEM);
1678 1676
1679 1677 shmd->shm_sptas = shmd_arg->shm_sptas;
1680 1678 shmd->shm_amp = shm_amp;
1681 1679 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1682 1680
1683 1681 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1684 1682 NULL, 0, seg->s_size);
1685 1683
1686 1684 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1687 1685
1688 1686 seg->s_data = (void *)shmd;
1689 1687 seg->s_ops = &segspt_shmops;
1690 1688 seg->s_szc = shmd->shm_sptseg->s_szc;
1691 1689 sptd = shmd->shm_sptseg->s_data;
1692 1690
1693 1691 if (sptd->spt_flags & SHM_PAGEABLE) {
1694 1692 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1695 1693 KM_NOSLEEP)) == NULL) {
1696 1694 seg->s_data = (void *)NULL;
1697 1695 kmem_free(shmd, (sizeof (*shmd)));
1698 1696 return (ENOMEM);
1699 1697 }
1700 1698 shmd->shm_lckpgs = 0;
1701 1699 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1702 1700 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1703 1701 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1704 1702 seg->s_size, seg->s_szc)) != 0) {
1705 1703 kmem_free(shmd->shm_vpage,
1706 1704 btopr(shm_amp->size));
1707 1705 }
1708 1706 }
1709 1707 } else {
1710 1708 error = hat_share(seg->s_as->a_hat, seg->s_base,
1711 1709 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1712 1710 seg->s_size, seg->s_szc);
1713 1711 }
1714 1712 if (error) {
1715 1713 seg->s_szc = 0;
1716 1714 seg->s_data = (void *)NULL;
1717 1715 kmem_free(shmd, (sizeof (*shmd)));
1718 1716 } else {
1719 1717 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1720 1718 shm_amp->refcnt++;
1721 1719 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1722 1720 }
1723 1721 return (error);
1724 1722 }
1725 1723
1726 1724 int
1727 1725 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1728 1726 {
1729 1727 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1730 1728 int reclaim = 1;
1731 1729
1732 1730 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1733 1731 retry:
1734 1732 if (shmd->shm_softlockcnt > 0) {
1735 1733 if (reclaim == 1) {
1736 1734 segspt_purge(seg);
1737 1735 reclaim = 0;
1738 1736 goto retry;
1739 1737 }
1740 1738 return (EAGAIN);
1741 1739 }
1742 1740
1743 1741 if (ssize != seg->s_size) {
1744 1742 #ifdef DEBUG
1745 1743 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1746 1744 ssize, seg->s_size);
1747 1745 #endif
1748 1746 return (EINVAL);
1749 1747 }
1750 1748
1751 1749 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1752 1750 NULL, 0);
1753 1751 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1754 1752
1755 1753 seg_free(seg);
1756 1754
1757 1755 return (0);
1758 1756 }
1759 1757
1760 1758 void
1761 1759 segspt_shmfree(struct seg *seg)
1762 1760 {
1763 1761 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1764 1762 struct anon_map *shm_amp = shmd->shm_amp;
1765 1763
1766 1764 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1767 1765
1768 1766 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1769 1767 MC_UNLOCK, NULL, 0);
1770 1768
1771 1769 /*
1772 1770 * Need to increment refcnt when attaching
1773 1771 * and decrement when detaching because of dup().
1774 1772 */
1775 1773 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1776 1774 shm_amp->refcnt--;
1777 1775 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1778 1776
1779 1777 if (shmd->shm_vpage) { /* only for DISM */
1780 1778 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1781 1779 shmd->shm_vpage = NULL;
1782 1780 }
1783 1781
1784 1782 /*
1785 1783 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1786 1784 * still working with this segment without holding as lock.
1787 1785 */
1788 1786 ASSERT(shmd->shm_softlockcnt == 0);
1789 1787 mutex_enter(&shmd->shm_segfree_syncmtx);
1790 1788 mutex_destroy(&shmd->shm_segfree_syncmtx);
1791 1789
1792 1790 kmem_free(shmd, sizeof (*shmd));
1793 1791 }
1794 1792
1795 1793 /*ARGSUSED*/
1796 1794 int
1797 1795 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1798 1796 {
1799 1797 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1800 1798
1801 1799 /*
1802 1800 * Shared page table is more than shared mapping.
1803 1801 * Individual process sharing page tables can't change prot
1804 1802 * because there is only one set of page tables.
1805 1803 * This will be allowed after private page table is
1806 1804 * supported.
1807 1805 */
1808 1806 /* need to return correct status error? */
1809 1807 return (0);
1810 1808 }
1811 1809
1812 1810
1813 1811 faultcode_t
1814 1812 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1815 1813 size_t len, enum fault_type type, enum seg_rw rw)
1816 1814 {
1817 1815 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1818 1816 struct seg *sptseg = shmd->shm_sptseg;
1819 1817 struct as *curspt = shmd->shm_sptas;
1820 1818 struct spt_data *sptd = sptseg->s_data;
1821 1819 pgcnt_t npages;
1822 1820 size_t size;
1823 1821 caddr_t segspt_addr, shm_addr;
1824 1822 page_t **ppa;
1825 1823 int i;
1826 1824 ulong_t an_idx = 0;
1827 1825 int err = 0;
1828 1826 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1829 1827 size_t pgsz;
1830 1828 pgcnt_t pgcnt;
1831 1829 caddr_t a;
1832 1830 pgcnt_t pidx;
1833 1831
1834 1832 #ifdef lint
1835 1833 hat = hat;
1836 1834 #endif
1837 1835 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1838 1836
1839 1837 /*
1840 1838 * Because of the way spt is implemented
1841 1839 * the realsize of the segment does not have to be
1842 1840 * equal to the segment size itself. The segment size is
1843 1841 * often in multiples of a page size larger than PAGESIZE.
1844 1842 * The realsize is rounded up to the nearest PAGESIZE
1845 1843 * based on what the user requested. This is a bit of
1846 1844 * ungliness that is historical but not easily fixed
1847 1845 * without re-designing the higher levels of ISM.
1848 1846 */
1849 1847 ASSERT(addr >= seg->s_base);
1850 1848 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1851 1849 return (FC_NOMAP);
1852 1850 /*
1853 1851 * For all of the following cases except F_PROT, we need to
1854 1852 * make any necessary adjustments to addr and len
1855 1853 * and get all of the necessary page_t's into an array called ppa[].
1856 1854 *
1857 1855 * The code in shmat() forces base addr and len of ISM segment
1858 1856 * to be aligned to largest page size supported. Therefore,
1859 1857 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1860 1858 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1861 1859 * in large pagesize chunks, or else we will screw up the HAT
1862 1860 * layer by calling hat_memload_array() with differing page sizes
1863 1861 * over a given virtual range.
1864 1862 */
1865 1863 pgsz = page_get_pagesize(sptseg->s_szc);
1866 1864 pgcnt = page_get_pagecnt(sptseg->s_szc);
1867 1865 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1868 1866 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1869 1867 npages = btopr(size);
1870 1868
1871 1869 /*
1872 1870 * Now we need to convert from addr in segshm to addr in segspt.
1873 1871 */
1874 1872 an_idx = seg_page(seg, shm_addr);
1875 1873 segspt_addr = sptseg->s_base + ptob(an_idx);
1876 1874
1877 1875 ASSERT((segspt_addr + ptob(npages)) <=
1878 1876 (sptseg->s_base + sptd->spt_realsize));
1879 1877 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1880 1878
1881 1879 switch (type) {
1882 1880
1883 1881 case F_SOFTLOCK:
1884 1882
1885 1883 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1886 1884 /*
1887 1885 * Fall through to the F_INVAL case to load up the hat layer
1888 1886 * entries with the HAT_LOAD_LOCK flag.
1889 1887 */
1890 1888 /* FALLTHRU */
1891 1889 case F_INVAL:
1892 1890
1893 1891 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1894 1892 return (FC_NOMAP);
1895 1893
1896 1894 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1897 1895
1898 1896 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1899 1897 if (err != 0) {
1900 1898 if (type == F_SOFTLOCK) {
1901 1899 atomic_add_long((ulong_t *)(
1902 1900 &(shmd->shm_softlockcnt)), -npages);
1903 1901 }
1904 1902 goto dism_err;
1905 1903 }
1906 1904 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1907 1905 a = segspt_addr;
1908 1906 pidx = 0;
1909 1907 if (type == F_SOFTLOCK) {
1910 1908
1911 1909 /*
1912 1910 * Load up the translation keeping it
1913 1911 * locked and don't unlock the page.
1914 1912 */
1915 1913 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1916 1914 hat_memload_array(sptseg->s_as->a_hat,
1917 1915 a, pgsz, &ppa[pidx], sptd->spt_prot,
1918 1916 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1919 1917 }
1920 1918 } else {
1921 1919 if (hat == seg->s_as->a_hat) {
1922 1920
1923 1921 /*
1924 1922 * Migrate pages marked for migration
1925 1923 */
1926 1924 if (lgrp_optimizations())
1927 1925 page_migrate(seg, shm_addr, ppa,
1928 1926 npages);
1929 1927
1930 1928 /* CPU HAT */
1931 1929 for (; pidx < npages;
1932 1930 a += pgsz, pidx += pgcnt) {
1933 1931 hat_memload_array(sptseg->s_as->a_hat,
1934 1932 a, pgsz, &ppa[pidx],
1935 1933 sptd->spt_prot,
1936 1934 HAT_LOAD_SHARE);
1937 1935 }
1938 1936 } else {
1939 1937 /* XHAT. Pass real address */
1940 1938 hat_memload_array(hat, shm_addr,
1941 1939 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1942 1940 }
1943 1941
1944 1942 /*
1945 1943 * And now drop the SE_SHARED lock(s).
1946 1944 */
1947 1945 if (dyn_ism_unmap) {
1948 1946 for (i = 0; i < npages; i++) {
1949 1947 page_unlock(ppa[i]);
1950 1948 }
1951 1949 }
1952 1950 }
1953 1951
1954 1952 if (!dyn_ism_unmap) {
1955 1953 if (hat_share(seg->s_as->a_hat, shm_addr,
1956 1954 curspt->a_hat, segspt_addr, ptob(npages),
1957 1955 seg->s_szc) != 0) {
1958 1956 panic("hat_share err in DISM fault");
1959 1957 /* NOTREACHED */
1960 1958 }
1961 1959 if (type == F_INVAL) {
1962 1960 for (i = 0; i < npages; i++) {
1963 1961 page_unlock(ppa[i]);
1964 1962 }
1965 1963 }
1966 1964 }
1967 1965 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1968 1966 dism_err:
1969 1967 kmem_free(ppa, npages * sizeof (page_t *));
1970 1968 return (err);
1971 1969
1972 1970 case F_SOFTUNLOCK:
1973 1971
1974 1972 /*
1975 1973 * This is a bit ugly, we pass in the real seg pointer,
1976 1974 * but the segspt_addr is the virtual address within the
1977 1975 * dummy seg.
1978 1976 */
1979 1977 segspt_softunlock(seg, segspt_addr, size, rw);
1980 1978 return (0);
1981 1979
1982 1980 case F_PROT:
1983 1981
1984 1982 /*
1985 1983 * This takes care of the unusual case where a user
1986 1984 * allocates a stack in shared memory and a register
1987 1985 * window overflow is written to that stack page before
1988 1986 * it is otherwise modified.
1989 1987 *
1990 1988 * We can get away with this because ISM segments are
1991 1989 * always rw. Other than this unusual case, there
1992 1990 * should be no instances of protection violations.
1993 1991 */
1994 1992 return (0);
1995 1993
1996 1994 default:
1997 1995 #ifdef DEBUG
1998 1996 panic("segspt_dismfault default type?");
1999 1997 #else
2000 1998 return (FC_NOMAP);
2001 1999 #endif
2002 2000 }
2003 2001 }
2004 2002
2005 2003
2006 2004 faultcode_t
2007 2005 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2008 2006 size_t len, enum fault_type type, enum seg_rw rw)
2009 2007 {
2010 2008 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2011 2009 struct seg *sptseg = shmd->shm_sptseg;
2012 2010 struct as *curspt = shmd->shm_sptas;
2013 2011 struct spt_data *sptd = sptseg->s_data;
2014 2012 pgcnt_t npages;
2015 2013 size_t size;
2016 2014 caddr_t sptseg_addr, shm_addr;
2017 2015 page_t *pp, **ppa;
2018 2016 int i;
2019 2017 u_offset_t offset;
2020 2018 ulong_t anon_index = 0;
2021 2019 struct vnode *vp;
2022 2020 struct anon_map *amp; /* XXX - for locknest */
2023 2021 struct anon *ap = NULL;
2024 2022 size_t pgsz;
2025 2023 pgcnt_t pgcnt;
2026 2024 caddr_t a;
2027 2025 pgcnt_t pidx;
2028 2026 size_t sz;
2029 2027
2030 2028 #ifdef lint
2031 2029 hat = hat;
2032 2030 #endif
2033 2031
2034 2032 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2035 2033
2036 2034 if (sptd->spt_flags & SHM_PAGEABLE) {
2037 2035 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2038 2036 }
2039 2037
2040 2038 /*
2041 2039 * Because of the way spt is implemented
2042 2040 * the realsize of the segment does not have to be
2043 2041 * equal to the segment size itself. The segment size is
2044 2042 * often in multiples of a page size larger than PAGESIZE.
2045 2043 * The realsize is rounded up to the nearest PAGESIZE
2046 2044 * based on what the user requested. This is a bit of
2047 2045 * ungliness that is historical but not easily fixed
2048 2046 * without re-designing the higher levels of ISM.
2049 2047 */
2050 2048 ASSERT(addr >= seg->s_base);
2051 2049 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2052 2050 return (FC_NOMAP);
2053 2051 /*
2054 2052 * For all of the following cases except F_PROT, we need to
2055 2053 * make any necessary adjustments to addr and len
2056 2054 * and get all of the necessary page_t's into an array called ppa[].
2057 2055 *
2058 2056 * The code in shmat() forces base addr and len of ISM segment
2059 2057 * to be aligned to largest page size supported. Therefore,
2060 2058 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2061 2059 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2062 2060 * in large pagesize chunks, or else we will screw up the HAT
2063 2061 * layer by calling hat_memload_array() with differing page sizes
2064 2062 * over a given virtual range.
2065 2063 */
2066 2064 pgsz = page_get_pagesize(sptseg->s_szc);
2067 2065 pgcnt = page_get_pagecnt(sptseg->s_szc);
2068 2066 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2069 2067 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2070 2068 npages = btopr(size);
2071 2069
2072 2070 /*
2073 2071 * Now we need to convert from addr in segshm to addr in segspt.
2074 2072 */
2075 2073 anon_index = seg_page(seg, shm_addr);
2076 2074 sptseg_addr = sptseg->s_base + ptob(anon_index);
2077 2075
2078 2076 /*
2079 2077 * And now we may have to adjust npages downward if we have
2080 2078 * exceeded the realsize of the segment or initial anon
2081 2079 * allocations.
2082 2080 */
2083 2081 if ((sptseg_addr + ptob(npages)) >
2084 2082 (sptseg->s_base + sptd->spt_realsize))
2085 2083 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2086 2084
2087 2085 npages = btopr(size);
2088 2086
2089 2087 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2090 2088 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2091 2089
2092 2090 switch (type) {
2093 2091
2094 2092 case F_SOFTLOCK:
2095 2093
2096 2094 /*
2097 2095 * availrmem is decremented once during anon_swap_adjust()
2098 2096 * and is incremented during the anon_unresv(), which is
2099 2097 * called from shm_rm_amp() when the segment is destroyed.
2100 2098 */
2101 2099 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2102 2100 /*
2103 2101 * Some platforms assume that ISM pages are SE_SHARED
2104 2102 * locked for the entire life of the segment.
2105 2103 */
2106 2104 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2107 2105 return (0);
2108 2106 /*
2109 2107 * Fall through to the F_INVAL case to load up the hat layer
2110 2108 * entries with the HAT_LOAD_LOCK flag.
2111 2109 */
2112 2110
2113 2111 /* FALLTHRU */
2114 2112 case F_INVAL:
2115 2113
2116 2114 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2117 2115 return (FC_NOMAP);
2118 2116
2119 2117 /*
2120 2118 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2121 2119 * may still rely on this call to hat_share(). That
2122 2120 * would imply that those hat's can fault on a
2123 2121 * HAT_LOAD_LOCK translation, which would seem
2124 2122 * contradictory.
2125 2123 */
2126 2124 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2127 2125 if (hat_share(seg->s_as->a_hat, seg->s_base,
2128 2126 curspt->a_hat, sptseg->s_base,
2129 2127 sptseg->s_size, sptseg->s_szc) != 0) {
2130 2128 panic("hat_share error in ISM fault");
2131 2129 /*NOTREACHED*/
2132 2130 }
2133 2131 return (0);
2134 2132 }
2135 2133 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2136 2134
2137 2135 /*
2138 2136 * I see no need to lock the real seg,
2139 2137 * here, because all of our work will be on the underlying
2140 2138 * dummy seg.
2141 2139 *
2142 2140 * sptseg_addr and npages now account for large pages.
2143 2141 */
2144 2142 amp = sptd->spt_amp;
2145 2143 ASSERT(amp != NULL);
2146 2144 anon_index = seg_page(sptseg, sptseg_addr);
2147 2145
2148 2146 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2149 2147 for (i = 0; i < npages; i++) {
2150 2148 ap = anon_get_ptr(amp->ahp, anon_index++);
2151 2149 ASSERT(ap != NULL);
2152 2150 swap_xlate(ap, &vp, &offset);
2153 2151 pp = page_lookup(vp, offset, SE_SHARED);
2154 2152 ASSERT(pp != NULL);
2155 2153 ppa[i] = pp;
2156 2154 }
2157 2155 ANON_LOCK_EXIT(&->a_rwlock);
2158 2156 ASSERT(i == npages);
2159 2157
2160 2158 /*
2161 2159 * We are already holding the as->a_lock on the user's
2162 2160 * real segment, but we need to hold the a_lock on the
2163 2161 * underlying dummy as. This is mostly to satisfy the
2164 2162 * underlying HAT layer.
2165 2163 */
2166 2164 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2167 2165 a = sptseg_addr;
2168 2166 pidx = 0;
2169 2167 if (type == F_SOFTLOCK) {
2170 2168 /*
2171 2169 * Load up the translation keeping it
2172 2170 * locked and don't unlock the page.
2173 2171 */
2174 2172 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2175 2173 sz = MIN(pgsz, ptob(npages - pidx));
2176 2174 hat_memload_array(sptseg->s_as->a_hat, a,
2177 2175 sz, &ppa[pidx], sptd->spt_prot,
2178 2176 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2179 2177 }
2180 2178 } else {
2181 2179 if (hat == seg->s_as->a_hat) {
2182 2180
2183 2181 /*
2184 2182 * Migrate pages marked for migration.
2185 2183 */
2186 2184 if (lgrp_optimizations())
2187 2185 page_migrate(seg, shm_addr, ppa,
2188 2186 npages);
2189 2187
2190 2188 /* CPU HAT */
2191 2189 for (; pidx < npages;
2192 2190 a += pgsz, pidx += pgcnt) {
2193 2191 sz = MIN(pgsz, ptob(npages - pidx));
2194 2192 hat_memload_array(sptseg->s_as->a_hat,
2195 2193 a, sz, &ppa[pidx],
2196 2194 sptd->spt_prot, HAT_LOAD_SHARE);
2197 2195 }
2198 2196 } else {
2199 2197 /* XHAT. Pass real address */
2200 2198 hat_memload_array(hat, shm_addr,
2201 2199 ptob(npages), ppa, sptd->spt_prot,
2202 2200 HAT_LOAD_SHARE);
2203 2201 }
2204 2202
2205 2203 /*
2206 2204 * And now drop the SE_SHARED lock(s).
2207 2205 */
2208 2206 for (i = 0; i < npages; i++)
2209 2207 page_unlock(ppa[i]);
2210 2208 }
2211 2209 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2212 2210
2213 2211 kmem_free(ppa, sizeof (page_t *) * npages);
2214 2212 return (0);
2215 2213 case F_SOFTUNLOCK:
2216 2214
2217 2215 /*
2218 2216 * This is a bit ugly, we pass in the real seg pointer,
2219 2217 * but the sptseg_addr is the virtual address within the
2220 2218 * dummy seg.
2221 2219 */
2222 2220 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2223 2221 return (0);
2224 2222
2225 2223 case F_PROT:
2226 2224
2227 2225 /*
2228 2226 * This takes care of the unusual case where a user
2229 2227 * allocates a stack in shared memory and a register
2230 2228 * window overflow is written to that stack page before
2231 2229 * it is otherwise modified.
2232 2230 *
2233 2231 * We can get away with this because ISM segments are
2234 2232 * always rw. Other than this unusual case, there
2235 2233 * should be no instances of protection violations.
2236 2234 */
2237 2235 return (0);
2238 2236
2239 2237 default:
2240 2238 #ifdef DEBUG
2241 2239 cmn_err(CE_WARN, "segspt_shmfault default type?");
2242 2240 #endif
2243 2241 return (FC_NOMAP);
2244 2242 }
2245 2243 }
2246 2244
2247 2245 /*ARGSUSED*/
2248 2246 static faultcode_t
2249 2247 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2250 2248 {
2251 2249 return (0);
2252 2250 }
2253 2251
2254 2252 /*ARGSUSED*/
2255 2253 static int
2256 2254 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2257 2255 {
2258 2256 return (0);
2259 2257 }
2260 2258
2261 2259 /*ARGSUSED*/
2262 2260 static size_t
2263 2261 segspt_shmswapout(struct seg *seg)
2264 2262 {
2265 2263 return (0);
2266 2264 }
2267 2265
2268 2266 /*
2269 2267 * duplicate the shared page tables
2270 2268 */
2271 2269 int
2272 2270 segspt_shmdup(struct seg *seg, struct seg *newseg)
2273 2271 {
2274 2272 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2275 2273 struct anon_map *amp = shmd->shm_amp;
2276 2274 struct shm_data *shmd_new;
2277 2275 struct seg *spt_seg = shmd->shm_sptseg;
2278 2276 struct spt_data *sptd = spt_seg->s_data;
2279 2277 int error = 0;
2280 2278
2281 2279 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2282 2280
2283 2281 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2284 2282 newseg->s_data = (void *)shmd_new;
2285 2283 shmd_new->shm_sptas = shmd->shm_sptas;
2286 2284 shmd_new->shm_amp = amp;
2287 2285 shmd_new->shm_sptseg = shmd->shm_sptseg;
2288 2286 newseg->s_ops = &segspt_shmops;
2289 2287 newseg->s_szc = seg->s_szc;
2290 2288 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2291 2289
2292 2290 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2293 2291 amp->refcnt++;
2294 2292 ANON_LOCK_EXIT(&->a_rwlock);
2295 2293
2296 2294 if (sptd->spt_flags & SHM_PAGEABLE) {
2297 2295 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2298 2296 shmd_new->shm_lckpgs = 0;
2299 2297 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2300 2298 if ((error = hat_share(newseg->s_as->a_hat,
2301 2299 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2302 2300 seg->s_size, seg->s_szc)) != 0) {
2303 2301 kmem_free(shmd_new->shm_vpage,
2304 2302 btopr(amp->size));
2305 2303 }
2306 2304 }
2307 2305 return (error);
2308 2306 } else {
2309 2307 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2310 2308 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2311 2309 seg->s_szc));
2312 2310
2313 2311 }
2314 2312 }
2315 2313
2316 2314 /*ARGSUSED*/
2317 2315 int
2318 2316 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2319 2317 {
2320 2318 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2321 2319 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2322 2320
2323 2321 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2324 2322
2325 2323 /*
2326 2324 * ISM segment is always rw.
2327 2325 */
2328 2326 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2329 2327 }
2330 2328
2331 2329 /*
2332 2330 * Return an array of locked large pages, for empty slots allocate
2333 2331 * private zero-filled anon pages.
2334 2332 */
2335 2333 static int
2336 2334 spt_anon_getpages(
2337 2335 struct seg *sptseg,
2338 2336 caddr_t sptaddr,
2339 2337 size_t len,
2340 2338 page_t *ppa[])
2341 2339 {
2342 2340 struct spt_data *sptd = sptseg->s_data;
2343 2341 struct anon_map *amp = sptd->spt_amp;
2344 2342 enum seg_rw rw = sptd->spt_prot;
2345 2343 uint_t szc = sptseg->s_szc;
2346 2344 size_t pg_sz, share_sz = page_get_pagesize(szc);
2347 2345 pgcnt_t lp_npgs;
2348 2346 caddr_t lp_addr, e_sptaddr;
2349 2347 uint_t vpprot, ppa_szc = 0;
2350 2348 struct vpage *vpage = NULL;
2351 2349 ulong_t j, ppa_idx;
2352 2350 int err, ierr = 0;
2353 2351 pgcnt_t an_idx;
2354 2352 anon_sync_obj_t cookie;
2355 2353 int anon_locked = 0;
2356 2354 pgcnt_t amp_pgs;
2357 2355
2358 2356
2359 2357 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2360 2358 ASSERT(len != 0);
2361 2359
2362 2360 pg_sz = share_sz;
2363 2361 lp_npgs = btop(pg_sz);
2364 2362 lp_addr = sptaddr;
2365 2363 e_sptaddr = sptaddr + len;
2366 2364 an_idx = seg_page(sptseg, sptaddr);
2367 2365 ppa_idx = 0;
2368 2366
2369 2367 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2370 2368
2371 2369 amp_pgs = page_get_pagecnt(amp->a_szc);
2372 2370
2373 2371 /*CONSTCOND*/
2374 2372 while (1) {
2375 2373 for (; lp_addr < e_sptaddr;
2376 2374 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2377 2375
2378 2376 /*
2379 2377 * If we're currently locked, and we get to a new
2380 2378 * page, unlock our current anon chunk.
2381 2379 */
2382 2380 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2383 2381 anon_array_exit(&cookie);
2384 2382 anon_locked = 0;
2385 2383 }
2386 2384 if (!anon_locked) {
2387 2385 anon_array_enter(amp, an_idx, &cookie);
2388 2386 anon_locked = 1;
2389 2387 }
2390 2388 ppa_szc = (uint_t)-1;
2391 2389 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2392 2390 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2393 2391 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2394 2392
2395 2393 if (ierr != 0) {
2396 2394 if (ierr > 0) {
2397 2395 err = FC_MAKE_ERR(ierr);
2398 2396 goto lpgs_err;
2399 2397 }
2400 2398 break;
2401 2399 }
2402 2400 }
2403 2401 if (lp_addr == e_sptaddr) {
2404 2402 break;
2405 2403 }
2406 2404 ASSERT(lp_addr < e_sptaddr);
2407 2405
2408 2406 /*
2409 2407 * ierr == -1 means we failed to allocate a large page.
2410 2408 * so do a size down operation.
2411 2409 *
2412 2410 * ierr == -2 means some other process that privately shares
2413 2411 * pages with this process has allocated a larger page and we
2414 2412 * need to retry with larger pages. So do a size up
2415 2413 * operation. This relies on the fact that large pages are
2416 2414 * never partially shared i.e. if we share any constituent
2417 2415 * page of a large page with another process we must share the
2418 2416 * entire large page. Note this cannot happen for SOFTLOCK
2419 2417 * case, unless current address (lpaddr) is at the beginning
2420 2418 * of the next page size boundary because the other process
2421 2419 * couldn't have relocated locked pages.
2422 2420 */
2423 2421 ASSERT(ierr == -1 || ierr == -2);
2424 2422 if (segvn_anypgsz) {
2425 2423 ASSERT(ierr == -2 || szc != 0);
2426 2424 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2427 2425 szc = (ierr == -1) ? szc - 1 : szc + 1;
2428 2426 } else {
2429 2427 /*
2430 2428 * For faults and segvn_anypgsz == 0
2431 2429 * we need to be careful not to loop forever
2432 2430 * if existing page is found with szc other
2433 2431 * than 0 or seg->s_szc. This could be due
2434 2432 * to page relocations on behalf of DR or
2435 2433 * more likely large page creation. For this
2436 2434 * case simply re-size to existing page's szc
2437 2435 * if returned by anon_map_getpages().
2438 2436 */
2439 2437 if (ppa_szc == (uint_t)-1) {
2440 2438 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2441 2439 } else {
2442 2440 ASSERT(ppa_szc <= sptseg->s_szc);
2443 2441 ASSERT(ierr == -2 || ppa_szc < szc);
2444 2442 ASSERT(ierr == -1 || ppa_szc > szc);
2445 2443 szc = ppa_szc;
2446 2444 }
2447 2445 }
2448 2446 pg_sz = page_get_pagesize(szc);
2449 2447 lp_npgs = btop(pg_sz);
2450 2448 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2451 2449 }
2452 2450 if (anon_locked) {
2453 2451 anon_array_exit(&cookie);
2454 2452 }
2455 2453 ANON_LOCK_EXIT(&->a_rwlock);
2456 2454 return (0);
2457 2455
2458 2456 lpgs_err:
2459 2457 if (anon_locked) {
2460 2458 anon_array_exit(&cookie);
2461 2459 }
2462 2460 ANON_LOCK_EXIT(&->a_rwlock);
2463 2461 for (j = 0; j < ppa_idx; j++)
2464 2462 page_unlock(ppa[j]);
2465 2463 return (err);
2466 2464 }
2467 2465
2468 2466 /*
2469 2467 * count the number of bytes in a set of spt pages that are currently not
2470 2468 * locked
2471 2469 */
2472 2470 static rctl_qty_t
2473 2471 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2474 2472 {
2475 2473 ulong_t i;
2476 2474 rctl_qty_t unlocked = 0;
2477 2475
2478 2476 for (i = 0; i < npages; i++) {
2479 2477 if (ppa[i]->p_lckcnt == 0)
2480 2478 unlocked += PAGESIZE;
2481 2479 }
2482 2480 return (unlocked);
2483 2481 }
2484 2482
2485 2483 extern u_longlong_t randtick(void);
2486 2484 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2487 2485 #define NLCK (NCPU_P2)
2488 2486 /* Random number with a range [0, n-1], n must be power of two */
2489 2487 #define RAND_P2(n) \
2490 2488 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2491 2489
2492 2490 int
2493 2491 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2494 2492 page_t **ppa, ulong_t *lockmap, size_t pos,
2495 2493 rctl_qty_t *locked)
2496 2494 {
2497 2495 struct shm_data *shmd = seg->s_data;
2498 2496 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2499 2497 ulong_t i;
2500 2498 int kernel;
2501 2499 pgcnt_t nlck = 0;
2502 2500 int rv = 0;
2503 2501 int use_reserved = 1;
2504 2502
2505 2503 /* return the number of bytes actually locked */
2506 2504 *locked = 0;
2507 2505
2508 2506 /*
2509 2507 * To avoid contention on freemem_lock, availrmem and pages_locked
2510 2508 * global counters are updated only every nlck locked pages instead of
2511 2509 * every time. Reserve nlck locks up front and deduct from this
2512 2510 * reservation for each page that requires a lock. When the reservation
2513 2511 * is consumed, reserve again. nlck is randomized, so the competing
2514 2512 * threads do not fall into a cyclic lock contention pattern. When
2515 2513 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2516 2514 * is used to lock pages.
2517 2515 */
2518 2516 for (i = 0; i < npages; anon_index++, pos++, i++) {
2519 2517 if (nlck == 0 && use_reserved == 1) {
2520 2518 nlck = NLCK + RAND_P2(NLCK);
2521 2519 /* if fewer loops left, decrease nlck */
2522 2520 nlck = MIN(nlck, npages - i);
2523 2521 /*
2524 2522 * Reserve nlck locks up front and deduct from this
2525 2523 * reservation for each page that requires a lock. When
2526 2524 * the reservation is consumed, reserve again.
2527 2525 */
2528 2526 mutex_enter(&freemem_lock);
2529 2527 if ((availrmem - nlck) < pages_pp_maximum) {
2530 2528 /* Do not do advance memory reserves */
2531 2529 use_reserved = 0;
2532 2530 } else {
2533 2531 availrmem -= nlck;
2534 2532 pages_locked += nlck;
2535 2533 }
2536 2534 mutex_exit(&freemem_lock);
2537 2535 }
2538 2536 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2539 2537 if (sptd->spt_ppa_lckcnt[anon_index] <
2540 2538 (ushort_t)DISM_LOCK_MAX) {
2541 2539 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2542 2540 (ushort_t)DISM_LOCK_MAX) {
2543 2541 cmn_err(CE_WARN,
2544 2542 "DISM page lock limit "
2545 2543 "reached on DISM offset 0x%lx\n",
2546 2544 anon_index << PAGESHIFT);
2547 2545 }
2548 2546 kernel = (sptd->spt_ppa &&
2549 2547 sptd->spt_ppa[anon_index]);
2550 2548 if (!page_pp_lock(ppa[i], 0, kernel ||
2551 2549 use_reserved)) {
2552 2550 sptd->spt_ppa_lckcnt[anon_index]--;
2553 2551 rv = EAGAIN;
2554 2552 break;
2555 2553 }
2556 2554 /* if this is a newly locked page, count it */
2557 2555 if (ppa[i]->p_lckcnt == 1) {
2558 2556 if (kernel == 0 && use_reserved == 1)
2559 2557 nlck--;
2560 2558 *locked += PAGESIZE;
2561 2559 }
2562 2560 shmd->shm_lckpgs++;
2563 2561 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2564 2562 if (lockmap != NULL)
2565 2563 BT_SET(lockmap, pos);
2566 2564 }
2567 2565 }
2568 2566 }
2569 2567 /* Return unused lock reservation */
2570 2568 if (nlck != 0 && use_reserved == 1) {
2571 2569 mutex_enter(&freemem_lock);
2572 2570 availrmem += nlck;
2573 2571 pages_locked -= nlck;
2574 2572 mutex_exit(&freemem_lock);
2575 2573 }
2576 2574
2577 2575 return (rv);
2578 2576 }
2579 2577
2580 2578 int
2581 2579 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2582 2580 rctl_qty_t *unlocked)
2583 2581 {
2584 2582 struct shm_data *shmd = seg->s_data;
2585 2583 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2586 2584 struct anon_map *amp = sptd->spt_amp;
2587 2585 struct anon *ap;
2588 2586 struct vnode *vp;
2589 2587 u_offset_t off;
2590 2588 struct page *pp;
2591 2589 int kernel;
2592 2590 anon_sync_obj_t cookie;
2593 2591 ulong_t i;
2594 2592 pgcnt_t nlck = 0;
2595 2593 pgcnt_t nlck_limit = NLCK;
2596 2594
2597 2595 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2598 2596 for (i = 0; i < npages; i++, anon_index++) {
2599 2597 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2600 2598 anon_array_enter(amp, anon_index, &cookie);
2601 2599 ap = anon_get_ptr(amp->ahp, anon_index);
2602 2600 ASSERT(ap);
2603 2601
2604 2602 swap_xlate(ap, &vp, &off);
2605 2603 anon_array_exit(&cookie);
2606 2604 pp = page_lookup(vp, off, SE_SHARED);
2607 2605 ASSERT(pp);
2608 2606 /*
2609 2607 * availrmem is decremented only for pages which are not
2610 2608 * in seg pcache, for pages in seg pcache availrmem was
2611 2609 * decremented in _dismpagelock()
2612 2610 */
2613 2611 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2614 2612 ASSERT(pp->p_lckcnt > 0);
2615 2613
2616 2614 /*
2617 2615 * lock page but do not change availrmem, we do it
2618 2616 * ourselves every nlck loops.
2619 2617 */
2620 2618 page_pp_unlock(pp, 0, 1);
2621 2619 if (pp->p_lckcnt == 0) {
2622 2620 if (kernel == 0)
2623 2621 nlck++;
2624 2622 *unlocked += PAGESIZE;
2625 2623 }
2626 2624 page_unlock(pp);
2627 2625 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2628 2626 sptd->spt_ppa_lckcnt[anon_index]--;
2629 2627 shmd->shm_lckpgs--;
2630 2628 }
2631 2629
2632 2630 /*
2633 2631 * To reduce freemem_lock contention, do not update availrmem
2634 2632 * until at least NLCK pages have been unlocked.
2635 2633 * 1. No need to update if nlck is zero
2636 2634 * 2. Always update if the last iteration
2637 2635 */
2638 2636 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2639 2637 mutex_enter(&freemem_lock);
2640 2638 availrmem += nlck;
2641 2639 pages_locked -= nlck;
2642 2640 mutex_exit(&freemem_lock);
2643 2641 nlck = 0;
2644 2642 nlck_limit = NLCK + RAND_P2(NLCK);
2645 2643 }
2646 2644 }
2647 2645 ANON_LOCK_EXIT(&->a_rwlock);
2648 2646
2649 2647 return (0);
2650 2648 }
2651 2649
2652 2650 /*ARGSUSED*/
2653 2651 static int
2654 2652 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2655 2653 int attr, int op, ulong_t *lockmap, size_t pos)
2656 2654 {
2657 2655 struct shm_data *shmd = seg->s_data;
2658 2656 struct seg *sptseg = shmd->shm_sptseg;
2659 2657 struct spt_data *sptd = sptseg->s_data;
2660 2658 struct kshmid *sp = sptd->spt_amp->a_sp;
2661 2659 pgcnt_t npages, a_npages;
2662 2660 page_t **ppa;
2663 2661 pgcnt_t an_idx, a_an_idx, ppa_idx;
2664 2662 caddr_t spt_addr, a_addr; /* spt and aligned address */
2665 2663 size_t a_len; /* aligned len */
2666 2664 size_t share_sz;
2667 2665 ulong_t i;
2668 2666 int sts = 0;
2669 2667 rctl_qty_t unlocked = 0;
2670 2668 rctl_qty_t locked = 0;
2671 2669 struct proc *p = curproc;
2672 2670 kproject_t *proj;
2673 2671
2674 2672 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2675 2673 ASSERT(sp != NULL);
2676 2674
2677 2675 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2678 2676 return (0);
2679 2677 }
2680 2678
2681 2679 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2682 2680 an_idx = seg_page(seg, addr);
2683 2681 npages = btopr(len);
2684 2682
2685 2683 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2686 2684 return (ENOMEM);
2687 2685 }
2688 2686
2689 2687 /*
2690 2688 * A shm's project never changes, so no lock needed.
2691 2689 * The shm has a hold on the project, so it will not go away.
2692 2690 * Since we have a mapping to shm within this zone, we know
2693 2691 * that the zone will not go away.
2694 2692 */
2695 2693 proj = sp->shm_perm.ipc_proj;
2696 2694
2697 2695 if (op == MC_LOCK) {
2698 2696
2699 2697 /*
2700 2698 * Need to align addr and size request if they are not
2701 2699 * aligned so we can always allocate large page(s) however
2702 2700 * we only lock what was requested in initial request.
2703 2701 */
2704 2702 share_sz = page_get_pagesize(sptseg->s_szc);
2705 2703 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2706 2704 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2707 2705 share_sz);
2708 2706 a_npages = btop(a_len);
2709 2707 a_an_idx = seg_page(seg, a_addr);
2710 2708 spt_addr = sptseg->s_base + ptob(a_an_idx);
2711 2709 ppa_idx = an_idx - a_an_idx;
2712 2710
2713 2711 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2714 2712 KM_NOSLEEP)) == NULL) {
2715 2713 return (ENOMEM);
2716 2714 }
2717 2715
2718 2716 /*
2719 2717 * Don't cache any new pages for IO and
2720 2718 * flush any cached pages.
2721 2719 */
2722 2720 mutex_enter(&sptd->spt_lock);
2723 2721 if (sptd->spt_ppa != NULL)
2724 2722 sptd->spt_flags |= DISM_PPA_CHANGED;
2725 2723
2726 2724 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2727 2725 if (sts != 0) {
2728 2726 mutex_exit(&sptd->spt_lock);
2729 2727 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2730 2728 return (sts);
2731 2729 }
2732 2730
2733 2731 mutex_enter(&sp->shm_mlock);
2734 2732 /* enforce locked memory rctl */
2735 2733 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2736 2734
2737 2735 mutex_enter(&p->p_lock);
2738 2736 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2739 2737 mutex_exit(&p->p_lock);
2740 2738 sts = EAGAIN;
2741 2739 } else {
2742 2740 mutex_exit(&p->p_lock);
2743 2741 sts = spt_lockpages(seg, an_idx, npages,
2744 2742 &ppa[ppa_idx], lockmap, pos, &locked);
2745 2743
2746 2744 /*
2747 2745 * correct locked count if not all pages could be
2748 2746 * locked
2749 2747 */
2750 2748 if ((unlocked - locked) > 0) {
2751 2749 rctl_decr_locked_mem(NULL, proj,
2752 2750 (unlocked - locked), 0);
2753 2751 }
2754 2752 }
2755 2753 /*
2756 2754 * unlock pages
2757 2755 */
2758 2756 for (i = 0; i < a_npages; i++)
2759 2757 page_unlock(ppa[i]);
2760 2758 if (sptd->spt_ppa != NULL)
2761 2759 sptd->spt_flags |= DISM_PPA_CHANGED;
2762 2760 mutex_exit(&sp->shm_mlock);
2763 2761 mutex_exit(&sptd->spt_lock);
2764 2762
2765 2763 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2766 2764
2767 2765 } else if (op == MC_UNLOCK) { /* unlock */
2768 2766 page_t **ppa;
2769 2767
2770 2768 mutex_enter(&sptd->spt_lock);
2771 2769 if (shmd->shm_lckpgs == 0) {
2772 2770 mutex_exit(&sptd->spt_lock);
2773 2771 return (0);
2774 2772 }
2775 2773 /*
2776 2774 * Don't cache new IO pages.
2777 2775 */
2778 2776 if (sptd->spt_ppa != NULL)
2779 2777 sptd->spt_flags |= DISM_PPA_CHANGED;
2780 2778
2781 2779 mutex_enter(&sp->shm_mlock);
2782 2780 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2783 2781 if ((ppa = sptd->spt_ppa) != NULL)
2784 2782 sptd->spt_flags |= DISM_PPA_CHANGED;
2785 2783 mutex_exit(&sptd->spt_lock);
2786 2784
2787 2785 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2788 2786 mutex_exit(&sp->shm_mlock);
2789 2787
2790 2788 if (ppa != NULL)
2791 2789 seg_ppurge_wiredpp(ppa);
2792 2790 }
2793 2791 return (sts);
2794 2792 }
2795 2793
2796 2794 /*ARGSUSED*/
2797 2795 int
2798 2796 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2799 2797 {
2800 2798 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2801 2799 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2802 2800 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2803 2801
2804 2802 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2805 2803
2806 2804 /*
2807 2805 * ISM segment is always rw.
2808 2806 */
2809 2807 while (--pgno >= 0)
2810 2808 *protv++ = sptd->spt_prot;
2811 2809 return (0);
2812 2810 }
2813 2811
2814 2812 /*ARGSUSED*/
2815 2813 u_offset_t
2816 2814 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2817 2815 {
2818 2816 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2819 2817
2820 2818 /* Offset does not matter in ISM memory */
2821 2819
2822 2820 return ((u_offset_t)0);
2823 2821 }
2824 2822
2825 2823 /* ARGSUSED */
2826 2824 int
2827 2825 segspt_shmgettype(struct seg *seg, caddr_t addr)
2828 2826 {
2829 2827 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2830 2828 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2831 2829
2832 2830 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2833 2831
2834 2832 /*
2835 2833 * The shared memory mapping is always MAP_SHARED, SWAP is only
2836 2834 * reserved for DISM
2837 2835 */
2838 2836 return (MAP_SHARED |
2839 2837 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2840 2838 }
2841 2839
2842 2840 /*ARGSUSED*/
2843 2841 int
2844 2842 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2845 2843 {
2846 2844 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2847 2845 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2848 2846
2849 2847 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2850 2848
2851 2849 *vpp = sptd->spt_vp;
2852 2850 return (0);
2853 2851 }
2854 2852
2855 2853 /*
2856 2854 * We need to wait for pending IO to complete to a DISM segment in order for
2857 2855 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2858 2856 * than enough time to wait.
2859 2857 */
2860 2858 static clock_t spt_pcache_wait = 120;
2861 2859
2862 2860 /*ARGSUSED*/
2863 2861 static int
2864 2862 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2865 2863 {
2866 2864 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2867 2865 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2868 2866 struct anon_map *amp;
2869 2867 pgcnt_t pg_idx;
2870 2868 ushort_t gen;
2871 2869 clock_t end_lbolt;
2872 2870 int writer;
2873 2871 page_t **ppa;
2874 2872
2875 2873 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2876 2874
2877 2875 if (behav == MADV_FREE) {
2878 2876 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2879 2877 return (0);
2880 2878
2881 2879 amp = sptd->spt_amp;
2882 2880 pg_idx = seg_page(seg, addr);
2883 2881
2884 2882 mutex_enter(&sptd->spt_lock);
2885 2883 if ((ppa = sptd->spt_ppa) == NULL) {
2886 2884 mutex_exit(&sptd->spt_lock);
2887 2885 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2888 2886 anon_disclaim(amp, pg_idx, len);
2889 2887 ANON_LOCK_EXIT(&->a_rwlock);
2890 2888 return (0);
2891 2889 }
2892 2890
2893 2891 sptd->spt_flags |= DISM_PPA_CHANGED;
2894 2892 gen = sptd->spt_gen;
2895 2893
2896 2894 mutex_exit(&sptd->spt_lock);
2897 2895
2898 2896 /*
2899 2897 * Purge all DISM cached pages
2900 2898 */
2901 2899 seg_ppurge_wiredpp(ppa);
2902 2900
2903 2901 /*
2904 2902 * Drop the AS_LOCK so that other threads can grab it
2905 2903 * in the as_pageunlock path and hopefully get the segment
2906 2904 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2907 2905 * to keep this segment resident.
2908 2906 */
2909 2907 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2910 2908 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2911 2909 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2912 2910
2913 2911 mutex_enter(&sptd->spt_lock);
2914 2912
2915 2913 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2916 2914
2917 2915 /*
2918 2916 * Try to wait for pages to get kicked out of the seg_pcache.
2919 2917 */
2920 2918 while (sptd->spt_gen == gen &&
2921 2919 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2922 2920 ddi_get_lbolt() < end_lbolt) {
2923 2921 if (!cv_timedwait_sig(&sptd->spt_cv,
2924 2922 &sptd->spt_lock, end_lbolt)) {
2925 2923 break;
2926 2924 }
2927 2925 }
2928 2926
2929 2927 mutex_exit(&sptd->spt_lock);
2930 2928
2931 2929 /* Regrab the AS_LOCK and release our hold on the segment */
2932 2930 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2933 2931 writer ? RW_WRITER : RW_READER);
2934 2932 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2935 2933 if (shmd->shm_softlockcnt <= 0) {
2936 2934 if (AS_ISUNMAPWAIT(seg->s_as)) {
2937 2935 mutex_enter(&seg->s_as->a_contents);
2938 2936 if (AS_ISUNMAPWAIT(seg->s_as)) {
2939 2937 AS_CLRUNMAPWAIT(seg->s_as);
2940 2938 cv_broadcast(&seg->s_as->a_cv);
2941 2939 }
2942 2940 mutex_exit(&seg->s_as->a_contents);
2943 2941 }
2944 2942 }
2945 2943
2946 2944 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2947 2945 anon_disclaim(amp, pg_idx, len);
2948 2946 ANON_LOCK_EXIT(&->a_rwlock);
2949 2947 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2950 2948 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2951 2949 int already_set;
2952 2950 ulong_t anon_index;
2953 2951 lgrp_mem_policy_t policy;
2954 2952 caddr_t shm_addr;
2955 2953 size_t share_size;
2956 2954 size_t size;
2957 2955 struct seg *sptseg = shmd->shm_sptseg;
2958 2956 caddr_t sptseg_addr;
2959 2957
2960 2958 /*
2961 2959 * Align address and length to page size of underlying segment
2962 2960 */
2963 2961 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2964 2962 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2965 2963 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2966 2964 share_size);
2967 2965
2968 2966 amp = shmd->shm_amp;
2969 2967 anon_index = seg_page(seg, shm_addr);
2970 2968
2971 2969 /*
2972 2970 * And now we may have to adjust size downward if we have
2973 2971 * exceeded the realsize of the segment or initial anon
2974 2972 * allocations.
2975 2973 */
2976 2974 sptseg_addr = sptseg->s_base + ptob(anon_index);
2977 2975 if ((sptseg_addr + size) >
2978 2976 (sptseg->s_base + sptd->spt_realsize))
2979 2977 size = (sptseg->s_base + sptd->spt_realsize) -
2980 2978 sptseg_addr;
2981 2979
2982 2980 /*
2983 2981 * Set memory allocation policy for this segment
2984 2982 */
2985 2983 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2986 2984 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2987 2985 NULL, 0, len);
2988 2986
2989 2987 /*
2990 2988 * If random memory allocation policy set already,
2991 2989 * don't bother reapplying it.
2992 2990 */
2993 2991 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2994 2992 return (0);
2995 2993
2996 2994 /*
2997 2995 * Mark any existing pages in the given range for
2998 2996 * migration, flushing the I/O page cache, and using
↓ open down ↓ |
2825 lines elided |
↑ open up ↑ |
2999 2997 * underlying segment to calculate anon index and get
3000 2998 * anonmap and vnode pointer from
3001 2999 */
3002 3000 if (shmd->shm_softlockcnt > 0)
3003 3001 segspt_purge(seg);
3004 3002
3005 3003 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3006 3004 }
3007 3005
3008 3006 return (0);
3009 -}
3010 -
3011 -/*ARGSUSED*/
3012 -void
3013 -segspt_shmdump(struct seg *seg)
3014 -{
3015 - /* no-op for ISM segment */
3016 3007 }
3017 3008
3018 3009 /*
3019 3010 * get a memory ID for an addr in a given segment
3020 3011 */
3021 3012 static int
3022 3013 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3023 3014 {
3024 3015 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3025 3016 struct anon *ap;
3026 3017 size_t anon_index;
3027 3018 struct anon_map *amp = shmd->shm_amp;
3028 3019 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3029 3020 struct seg *sptseg = shmd->shm_sptseg;
3030 3021 anon_sync_obj_t cookie;
3031 3022
3032 3023 anon_index = seg_page(seg, addr);
3033 3024
3034 3025 if (addr > (seg->s_base + sptd->spt_realsize)) {
3035 3026 return (EFAULT);
3036 3027 }
3037 3028
3038 3029 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3039 3030 anon_array_enter(amp, anon_index, &cookie);
3040 3031 ap = anon_get_ptr(amp->ahp, anon_index);
3041 3032 if (ap == NULL) {
3042 3033 struct page *pp;
3043 3034 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3044 3035
3045 3036 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3046 3037 if (pp == NULL) {
3047 3038 anon_array_exit(&cookie);
3048 3039 ANON_LOCK_EXIT(&->a_rwlock);
3049 3040 return (ENOMEM);
3050 3041 }
3051 3042 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3052 3043 page_unlock(pp);
3053 3044 }
3054 3045 anon_array_exit(&cookie);
3055 3046 ANON_LOCK_EXIT(&->a_rwlock);
3056 3047 memidp->val[0] = (uintptr_t)ap;
3057 3048 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3058 3049 return (0);
3059 3050 }
3060 3051
3061 3052 /*
3062 3053 * Get memory allocation policy info for specified address in given segment
3063 3054 */
3064 3055 static lgrp_mem_policy_info_t *
3065 3056 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3066 3057 {
3067 3058 struct anon_map *amp;
3068 3059 ulong_t anon_index;
3069 3060 lgrp_mem_policy_info_t *policy_info;
3070 3061 struct shm_data *shm_data;
3071 3062
3072 3063 ASSERT(seg != NULL);
3073 3064
3074 3065 /*
3075 3066 * Get anon_map from segshm
3076 3067 *
3077 3068 * Assume that no lock needs to be held on anon_map, since
3078 3069 * it should be protected by its reference count which must be
3079 3070 * nonzero for an existing segment
3080 3071 * Need to grab readers lock on policy tree though
3081 3072 */
3082 3073 shm_data = (struct shm_data *)seg->s_data;
3083 3074 if (shm_data == NULL)
3084 3075 return (NULL);
3085 3076 amp = shm_data->shm_amp;
3086 3077 ASSERT(amp->refcnt != 0);
3087 3078
3088 3079 /*
3089 3080 * Get policy info
3090 3081 *
3091 3082 * Assume starting anon index of 0
3092 3083 */
3093 3084 anon_index = seg_page(seg, addr);
3094 3085 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3095 3086
3096 3087 return (policy_info);
3097 3088 }
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX