Print this page
patch SEGOP_SWAPOUT-delete
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
87 87
88 88 struct seg_ops segspt_ops = {
89 89 SEGSPT_BADOP(int), /* dup */
90 90 segspt_unmap,
91 91 segspt_free,
92 92 SEGSPT_BADOP(int), /* fault */
93 93 SEGSPT_BADOP(faultcode_t), /* faulta */
94 94 SEGSPT_BADOP(int), /* setprot */
95 95 SEGSPT_BADOP(int), /* checkprot */
96 96 SEGSPT_BADOP(int), /* kluster */
97 - SEGSPT_BADOP(size_t), /* swapout */
98 97 SEGSPT_BADOP(int), /* sync */
99 98 SEGSPT_BADOP(size_t), /* incore */
100 99 SEGSPT_BADOP(int), /* lockop */
101 100 SEGSPT_BADOP(int), /* getprot */
102 101 SEGSPT_BADOP(u_offset_t), /* getoffset */
103 102 SEGSPT_BADOP(int), /* gettype */
104 103 SEGSPT_BADOP(int), /* getvp */
105 104 SEGSPT_BADOP(int), /* advise */
106 105 SEGSPT_BADOP(void), /* dump */
107 106 SEGSPT_BADOP(int), /* pagelock */
108 107 SEGSPT_BADOP(int), /* setpgsz */
109 108 SEGSPT_BADOP(int), /* getmemid */
110 109 segspt_getpolicy, /* getpolicy */
111 110 SEGSPT_BADOP(int), /* capable */
112 111 };
113 112
114 113 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
115 114 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
116 115 static void segspt_shmfree(struct seg *seg);
117 116 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
118 117 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
119 118 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 119 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 120 register size_t len, register uint_t prot);
122 121 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 122 uint_t prot);
124 123 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 -static size_t segspt_shmswapout(struct seg *seg);
126 124 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 125 register char *vec);
128 126 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 127 int attr, uint_t flags);
130 128 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 129 int attr, int op, ulong_t *lockmap, size_t pos);
132 130 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 131 uint_t *protv);
134 132 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 133 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 134 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 135 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 136 uint_t behav);
139 137 static void segspt_shmdump(struct seg *seg);
140 138 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
141 139 struct page ***, enum lock_type, enum seg_rw);
142 140 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
143 141 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
144 142 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
145 143 static int segspt_shmcapable(struct seg *, segcapability_t);
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
146 144
147 145 struct seg_ops segspt_shmops = {
148 146 segspt_shmdup,
149 147 segspt_shmunmap,
150 148 segspt_shmfree,
151 149 segspt_shmfault,
152 150 segspt_shmfaulta,
153 151 segspt_shmsetprot,
154 152 segspt_shmcheckprot,
155 153 segspt_shmkluster,
156 - segspt_shmswapout,
157 154 segspt_shmsync,
158 155 segspt_shmincore,
159 156 segspt_shmlockop,
160 157 segspt_shmgetprot,
161 158 segspt_shmgetoffset,
162 159 segspt_shmgettype,
163 160 segspt_shmgetvp,
164 161 segspt_shmadvise, /* advise */
165 162 segspt_shmdump,
166 163 segspt_shmpagelock,
167 164 segspt_shmsetpgsz,
168 165 segspt_shmgetmemid,
169 166 segspt_shmgetpolicy,
170 167 segspt_shmcapable,
171 168 };
172 169
173 170 static void segspt_purge(struct seg *seg);
174 171 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
175 172 enum seg_rw, int);
176 173 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
177 174 page_t **ppa);
178 175
179 176
180 177
181 178 /*ARGSUSED*/
182 179 int
183 180 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
184 181 uint_t prot, uint_t flags, uint_t share_szc)
185 182 {
186 183 int err;
187 184 struct as *newas;
188 185 struct segspt_crargs sptcargs;
189 186
190 187 #ifdef DEBUG
191 188 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
192 189 tnf_ulong, size, size );
193 190 #endif
194 191 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
195 192 segspt_minfree = availrmem/20; /* for the system */
196 193
197 194 if (!hat_supported(HAT_SHARED_PT, (void *)0))
198 195 return (EINVAL);
199 196
200 197 /*
201 198 * get a new as for this shared memory segment
202 199 */
203 200 newas = as_alloc();
204 201 newas->a_proc = NULL;
205 202 sptcargs.amp = amp;
206 203 sptcargs.prot = prot;
207 204 sptcargs.flags = flags;
208 205 sptcargs.szc = share_szc;
209 206 /*
210 207 * create a shared page table (spt) segment
211 208 */
212 209
213 210 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
214 211 as_free(newas);
215 212 return (err);
216 213 }
217 214 *sptseg = sptcargs.seg_spt;
218 215 return (0);
219 216 }
220 217
221 218 void
222 219 sptdestroy(struct as *as, struct anon_map *amp)
223 220 {
224 221
225 222 #ifdef DEBUG
226 223 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
227 224 #endif
228 225 (void) as_unmap(as, SEGSPTADDR, amp->size);
229 226 as_free(as);
230 227 }
231 228
232 229 /*
233 230 * called from seg_free().
234 231 * free (i.e., unlock, unmap, return to free list)
235 232 * all the pages in the given seg.
236 233 */
237 234 void
238 235 segspt_free(struct seg *seg)
239 236 {
240 237 struct spt_data *sptd = (struct spt_data *)seg->s_data;
241 238
242 239 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
243 240
244 241 if (sptd != NULL) {
245 242 if (sptd->spt_realsize)
246 243 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
247 244
248 245 if (sptd->spt_ppa_lckcnt)
249 246 kmem_free(sptd->spt_ppa_lckcnt,
250 247 sizeof (*sptd->spt_ppa_lckcnt)
251 248 * btopr(sptd->spt_amp->size));
252 249 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
253 250 cv_destroy(&sptd->spt_cv);
254 251 mutex_destroy(&sptd->spt_lock);
255 252 kmem_free(sptd, sizeof (*sptd));
256 253 }
257 254 }
258 255
259 256 /*ARGSUSED*/
260 257 static int
261 258 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
262 259 uint_t flags)
263 260 {
264 261 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
265 262
266 263 return (0);
267 264 }
268 265
269 266 /*ARGSUSED*/
270 267 static size_t
271 268 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
272 269 {
273 270 caddr_t eo_seg;
274 271 pgcnt_t npages;
275 272 struct shm_data *shmd = (struct shm_data *)seg->s_data;
276 273 struct seg *sptseg;
277 274 struct spt_data *sptd;
278 275
279 276 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
280 277 #ifdef lint
281 278 seg = seg;
282 279 #endif
283 280 sptseg = shmd->shm_sptseg;
284 281 sptd = sptseg->s_data;
285 282
286 283 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
287 284 eo_seg = addr + len;
288 285 while (addr < eo_seg) {
289 286 /* page exists, and it's locked. */
290 287 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
291 288 SEG_PAGE_ANON;
292 289 addr += PAGESIZE;
293 290 }
294 291 return (len);
295 292 } else {
296 293 struct anon_map *amp = shmd->shm_amp;
297 294 struct anon *ap;
298 295 page_t *pp;
299 296 pgcnt_t anon_index;
300 297 struct vnode *vp;
301 298 u_offset_t off;
302 299 ulong_t i;
303 300 int ret;
304 301 anon_sync_obj_t cookie;
305 302
306 303 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
307 304 anon_index = seg_page(seg, addr);
308 305 npages = btopr(len);
309 306 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
310 307 return (EINVAL);
311 308 }
312 309 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
313 310 for (i = 0; i < npages; i++, anon_index++) {
314 311 ret = 0;
315 312 anon_array_enter(amp, anon_index, &cookie);
316 313 ap = anon_get_ptr(amp->ahp, anon_index);
317 314 if (ap != NULL) {
318 315 swap_xlate(ap, &vp, &off);
319 316 anon_array_exit(&cookie);
320 317 pp = page_lookup_nowait(vp, off, SE_SHARED);
321 318 if (pp != NULL) {
322 319 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
323 320 page_unlock(pp);
324 321 }
325 322 } else {
326 323 anon_array_exit(&cookie);
327 324 }
328 325 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
329 326 ret |= SEG_PAGE_LOCKED;
330 327 }
331 328 *vec++ = (char)ret;
332 329 }
333 330 ANON_LOCK_EXIT(&->a_rwlock);
334 331 return (len);
335 332 }
336 333 }
337 334
338 335 static int
339 336 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
340 337 {
341 338 size_t share_size;
342 339
343 340 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
344 341
345 342 /*
346 343 * seg.s_size may have been rounded up to the largest page size
347 344 * in shmat().
348 345 * XXX This should be cleanedup. sptdestroy should take a length
349 346 * argument which should be the same as sptcreate. Then
350 347 * this rounding would not be needed (or is done in shm.c)
351 348 * Only the check for full segment will be needed.
352 349 *
353 350 * XXX -- shouldn't raddr == 0 always? These tests don't seem
354 351 * to be useful at all.
355 352 */
356 353 share_size = page_get_pagesize(seg->s_szc);
357 354 ssize = P2ROUNDUP(ssize, share_size);
358 355
359 356 if (raddr == seg->s_base && ssize == seg->s_size) {
360 357 seg_free(seg);
361 358 return (0);
362 359 } else
363 360 return (EINVAL);
364 361 }
365 362
366 363 int
367 364 segspt_create(struct seg *seg, caddr_t argsp)
368 365 {
369 366 int err;
370 367 caddr_t addr = seg->s_base;
371 368 struct spt_data *sptd;
372 369 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
373 370 struct anon_map *amp = sptcargs->amp;
374 371 struct kshmid *sp = amp->a_sp;
375 372 struct cred *cred = CRED();
376 373 ulong_t i, j, anon_index = 0;
377 374 pgcnt_t npages = btopr(amp->size);
378 375 struct vnode *vp;
379 376 page_t **ppa;
380 377 uint_t hat_flags;
381 378 size_t pgsz;
382 379 pgcnt_t pgcnt;
383 380 caddr_t a;
384 381 pgcnt_t pidx;
385 382 size_t sz;
386 383 proc_t *procp = curproc;
387 384 rctl_qty_t lockedbytes = 0;
388 385 kproject_t *proj;
389 386
390 387 /*
391 388 * We are holding the a_lock on the underlying dummy as,
392 389 * so we can make calls to the HAT layer.
393 390 */
394 391 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
395 392 ASSERT(sp != NULL);
396 393
397 394 #ifdef DEBUG
398 395 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
399 396 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
400 397 #endif
401 398 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
402 399 if (err = anon_swap_adjust(npages))
403 400 return (err);
404 401 }
405 402 err = ENOMEM;
406 403
407 404 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
408 405 goto out1;
409 406
410 407 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
411 408 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
412 409 KM_NOSLEEP)) == NULL)
413 410 goto out2;
414 411 }
415 412
416 413 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
417 414
418 415 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
419 416 goto out3;
420 417
421 418 seg->s_ops = &segspt_ops;
422 419 sptd->spt_vp = vp;
423 420 sptd->spt_amp = amp;
424 421 sptd->spt_prot = sptcargs->prot;
425 422 sptd->spt_flags = sptcargs->flags;
426 423 seg->s_data = (caddr_t)sptd;
427 424 sptd->spt_ppa = NULL;
428 425 sptd->spt_ppa_lckcnt = NULL;
429 426 seg->s_szc = sptcargs->szc;
430 427 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
431 428 sptd->spt_gen = 0;
432 429
433 430 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
434 431 if (seg->s_szc > amp->a_szc) {
435 432 amp->a_szc = seg->s_szc;
436 433 }
437 434 ANON_LOCK_EXIT(&->a_rwlock);
438 435
439 436 /*
440 437 * Set policy to affect initial allocation of pages in
441 438 * anon_map_createpages()
442 439 */
443 440 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
444 441 NULL, 0, ptob(npages));
445 442
446 443 if (sptcargs->flags & SHM_PAGEABLE) {
447 444 size_t share_sz;
448 445 pgcnt_t new_npgs, more_pgs;
449 446 struct anon_hdr *nahp;
450 447 zone_t *zone;
451 448
452 449 share_sz = page_get_pagesize(seg->s_szc);
453 450 if (!IS_P2ALIGNED(amp->size, share_sz)) {
454 451 /*
455 452 * We are rounding up the size of the anon array
456 453 * on 4 M boundary because we always create 4 M
457 454 * of page(s) when locking, faulting pages and we
458 455 * don't have to check for all corner cases e.g.
459 456 * if there is enough space to allocate 4 M
460 457 * page.
461 458 */
462 459 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
463 460 more_pgs = new_npgs - npages;
464 461
465 462 /*
466 463 * The zone will never be NULL, as a fully created
467 464 * shm always has an owning zone.
468 465 */
469 466 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
470 467 ASSERT(zone != NULL);
471 468 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
472 469 err = ENOMEM;
473 470 goto out4;
474 471 }
475 472
476 473 nahp = anon_create(new_npgs, ANON_SLEEP);
477 474 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
478 475 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
479 476 ANON_SLEEP);
480 477 anon_release(amp->ahp, npages);
481 478 amp->ahp = nahp;
482 479 ASSERT(amp->swresv == ptob(npages));
483 480 amp->swresv = amp->size = ptob(new_npgs);
484 481 ANON_LOCK_EXIT(&->a_rwlock);
485 482 npages = new_npgs;
486 483 }
487 484
488 485 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
489 486 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
490 487 sptd->spt_pcachecnt = 0;
491 488 sptd->spt_realsize = ptob(npages);
492 489 sptcargs->seg_spt = seg;
493 490 return (0);
494 491 }
495 492
496 493 /*
497 494 * get array of pages for each anon slot in amp
498 495 */
499 496 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
500 497 seg, addr, S_CREATE, cred)) != 0)
501 498 goto out4;
502 499
503 500 mutex_enter(&sp->shm_mlock);
504 501
505 502 /* May be partially locked, so, count bytes to charge for locking */
506 503 for (i = 0; i < npages; i++)
507 504 if (ppa[i]->p_lckcnt == 0)
508 505 lockedbytes += PAGESIZE;
509 506
510 507 proj = sp->shm_perm.ipc_proj;
511 508
512 509 if (lockedbytes > 0) {
513 510 mutex_enter(&procp->p_lock);
514 511 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
515 512 mutex_exit(&procp->p_lock);
516 513 mutex_exit(&sp->shm_mlock);
517 514 for (i = 0; i < npages; i++)
518 515 page_unlock(ppa[i]);
519 516 err = ENOMEM;
520 517 goto out4;
521 518 }
522 519 mutex_exit(&procp->p_lock);
523 520 }
524 521
525 522 /*
526 523 * addr is initial address corresponding to the first page on ppa list
527 524 */
528 525 for (i = 0; i < npages; i++) {
529 526 /* attempt to lock all pages */
530 527 if (page_pp_lock(ppa[i], 0, 1) == 0) {
531 528 /*
532 529 * if unable to lock any page, unlock all
533 530 * of them and return error
534 531 */
535 532 for (j = 0; j < i; j++)
536 533 page_pp_unlock(ppa[j], 0, 1);
537 534 for (i = 0; i < npages; i++)
538 535 page_unlock(ppa[i]);
539 536 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
540 537 mutex_exit(&sp->shm_mlock);
541 538 err = ENOMEM;
542 539 goto out4;
543 540 }
544 541 }
545 542 mutex_exit(&sp->shm_mlock);
546 543
547 544 /*
548 545 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
549 546 * for the entire life of the segment. For example platforms
550 547 * that do not support Dynamic Reconfiguration.
551 548 */
552 549 hat_flags = HAT_LOAD_SHARE;
553 550 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
554 551 hat_flags |= HAT_LOAD_LOCK;
555 552
556 553 /*
557 554 * Load translations one lare page at a time
558 555 * to make sure we don't create mappings bigger than
559 556 * segment's size code in case underlying pages
560 557 * are shared with segvn's segment that uses bigger
561 558 * size code than we do.
562 559 */
563 560 pgsz = page_get_pagesize(seg->s_szc);
564 561 pgcnt = page_get_pagecnt(seg->s_szc);
565 562 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
566 563 sz = MIN(pgsz, ptob(npages - pidx));
567 564 hat_memload_array(seg->s_as->a_hat, a, sz,
568 565 &ppa[pidx], sptd->spt_prot, hat_flags);
569 566 }
570 567
571 568 /*
572 569 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
573 570 * we will leave the pages locked SE_SHARED for the life
574 571 * of the ISM segment. This will prevent any calls to
575 572 * hat_pageunload() on this ISM segment for those platforms.
576 573 */
577 574 if (!(hat_flags & HAT_LOAD_LOCK)) {
578 575 /*
579 576 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
580 577 * we no longer need to hold the SE_SHARED lock on the pages,
581 578 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
582 579 * SE_SHARED lock on the pages as necessary.
583 580 */
584 581 for (i = 0; i < npages; i++)
585 582 page_unlock(ppa[i]);
586 583 }
587 584 sptd->spt_pcachecnt = 0;
588 585 kmem_free(ppa, ((sizeof (page_t *)) * npages));
589 586 sptd->spt_realsize = ptob(npages);
590 587 atomic_add_long(&spt_used, npages);
591 588 sptcargs->seg_spt = seg;
592 589 return (0);
593 590
594 591 out4:
595 592 seg->s_data = NULL;
596 593 kmem_free(vp, sizeof (*vp));
597 594 cv_destroy(&sptd->spt_cv);
598 595 out3:
599 596 mutex_destroy(&sptd->spt_lock);
600 597 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
601 598 kmem_free(ppa, (sizeof (*ppa) * npages));
602 599 out2:
603 600 kmem_free(sptd, sizeof (*sptd));
604 601 out1:
605 602 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
606 603 anon_swap_restore(npages);
607 604 return (err);
608 605 }
609 606
610 607 /*ARGSUSED*/
611 608 void
612 609 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
613 610 {
614 611 struct page *pp;
615 612 struct spt_data *sptd = (struct spt_data *)seg->s_data;
616 613 pgcnt_t npages;
617 614 ulong_t anon_idx;
618 615 struct anon_map *amp;
619 616 struct anon *ap;
620 617 struct vnode *vp;
621 618 u_offset_t off;
622 619 uint_t hat_flags;
623 620 int root = 0;
624 621 pgcnt_t pgs, curnpgs = 0;
625 622 page_t *rootpp;
626 623 rctl_qty_t unlocked_bytes = 0;
627 624 kproject_t *proj;
628 625 kshmid_t *sp;
629 626
630 627 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
631 628
632 629 len = P2ROUNDUP(len, PAGESIZE);
633 630
634 631 npages = btop(len);
635 632
636 633 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
637 634 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
638 635 (sptd->spt_flags & SHM_PAGEABLE)) {
639 636 hat_flags = HAT_UNLOAD_UNMAP;
640 637 }
641 638
642 639 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
643 640
644 641 amp = sptd->spt_amp;
645 642 if (sptd->spt_flags & SHM_PAGEABLE)
646 643 npages = btop(amp->size);
647 644
648 645 ASSERT(amp != NULL);
649 646
650 647 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
651 648 sp = amp->a_sp;
652 649 proj = sp->shm_perm.ipc_proj;
653 650 mutex_enter(&sp->shm_mlock);
654 651 }
655 652 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
656 653 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
657 654 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
658 655 panic("segspt_free_pages: null app");
659 656 /*NOTREACHED*/
660 657 }
661 658 } else {
662 659 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
663 660 == NULL)
664 661 continue;
665 662 }
666 663 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
667 664 swap_xlate(ap, &vp, &off);
668 665
669 666 /*
670 667 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
671 668 * the pages won't be having SE_SHARED lock at this
672 669 * point.
673 670 *
674 671 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
675 672 * the pages are still held SE_SHARED locked from the
676 673 * original segspt_create()
677 674 *
678 675 * Our goal is to get SE_EXCL lock on each page, remove
679 676 * permanent lock on it and invalidate the page.
680 677 */
681 678 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
682 679 if (hat_flags == HAT_UNLOAD_UNMAP)
683 680 pp = page_lookup(vp, off, SE_EXCL);
684 681 else {
685 682 if ((pp = page_find(vp, off)) == NULL) {
686 683 panic("segspt_free_pages: "
687 684 "page not locked");
688 685 /*NOTREACHED*/
689 686 }
690 687 if (!page_tryupgrade(pp)) {
691 688 page_unlock(pp);
692 689 pp = page_lookup(vp, off, SE_EXCL);
693 690 }
694 691 }
695 692 if (pp == NULL) {
696 693 panic("segspt_free_pages: "
697 694 "page not in the system");
698 695 /*NOTREACHED*/
699 696 }
700 697 ASSERT(pp->p_lckcnt > 0);
701 698 page_pp_unlock(pp, 0, 1);
702 699 if (pp->p_lckcnt == 0)
703 700 unlocked_bytes += PAGESIZE;
704 701 } else {
705 702 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
706 703 continue;
707 704 }
708 705 /*
709 706 * It's logical to invalidate the pages here as in most cases
710 707 * these were created by segspt.
711 708 */
712 709 if (pp->p_szc != 0) {
713 710 if (root == 0) {
714 711 ASSERT(curnpgs == 0);
715 712 root = 1;
716 713 rootpp = pp;
717 714 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
718 715 ASSERT(pgs > 1);
719 716 ASSERT(IS_P2ALIGNED(pgs, pgs));
720 717 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
721 718 curnpgs--;
722 719 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
723 720 ASSERT(curnpgs == 1);
724 721 ASSERT(page_pptonum(pp) ==
725 722 page_pptonum(rootpp) + (pgs - 1));
726 723 page_destroy_pages(rootpp);
727 724 root = 0;
728 725 curnpgs = 0;
729 726 } else {
730 727 ASSERT(curnpgs > 1);
731 728 ASSERT(page_pptonum(pp) ==
732 729 page_pptonum(rootpp) + (pgs - curnpgs));
733 730 curnpgs--;
734 731 }
735 732 } else {
736 733 if (root != 0 || curnpgs != 0) {
737 734 panic("segspt_free_pages: bad large page");
738 735 /*NOTREACHED*/
739 736 }
740 737 /*
741 738 * Before destroying the pages, we need to take care
742 739 * of the rctl locked memory accounting. For that
743 740 * we need to calculte the unlocked_bytes.
744 741 */
745 742 if (pp->p_lckcnt > 0)
746 743 unlocked_bytes += PAGESIZE;
747 744 /*LINTED: constant in conditional context */
748 745 VN_DISPOSE(pp, B_INVAL, 0, kcred);
749 746 }
750 747 }
751 748 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
752 749 if (unlocked_bytes > 0)
753 750 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
754 751 mutex_exit(&sp->shm_mlock);
755 752 }
756 753 if (root != 0 || curnpgs != 0) {
757 754 panic("segspt_free_pages: bad large page");
758 755 /*NOTREACHED*/
759 756 }
760 757
761 758 /*
762 759 * mark that pages have been released
763 760 */
764 761 sptd->spt_realsize = 0;
765 762
766 763 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
767 764 atomic_add_long(&spt_used, -npages);
768 765 anon_swap_restore(npages);
769 766 }
770 767 }
771 768
772 769 /*
773 770 * Get memory allocation policy info for specified address in given segment
774 771 */
775 772 static lgrp_mem_policy_info_t *
776 773 segspt_getpolicy(struct seg *seg, caddr_t addr)
777 774 {
778 775 struct anon_map *amp;
779 776 ulong_t anon_index;
780 777 lgrp_mem_policy_info_t *policy_info;
781 778 struct spt_data *spt_data;
782 779
783 780 ASSERT(seg != NULL);
784 781
785 782 /*
786 783 * Get anon_map from segspt
787 784 *
788 785 * Assume that no lock needs to be held on anon_map, since
789 786 * it should be protected by its reference count which must be
790 787 * nonzero for an existing segment
791 788 * Need to grab readers lock on policy tree though
792 789 */
793 790 spt_data = (struct spt_data *)seg->s_data;
794 791 if (spt_data == NULL)
795 792 return (NULL);
796 793 amp = spt_data->spt_amp;
797 794 ASSERT(amp->refcnt != 0);
798 795
799 796 /*
800 797 * Get policy info
801 798 *
802 799 * Assume starting anon index of 0
803 800 */
804 801 anon_index = seg_page(seg, addr);
805 802 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
806 803
807 804 return (policy_info);
808 805 }
809 806
810 807 /*
811 808 * DISM only.
812 809 * Return locked pages over a given range.
813 810 *
814 811 * We will cache all DISM locked pages and save the pplist for the
815 812 * entire segment in the ppa field of the underlying DISM segment structure.
816 813 * Later, during a call to segspt_reclaim() we will use this ppa array
817 814 * to page_unlock() all of the pages and then we will free this ppa list.
818 815 */
819 816 /*ARGSUSED*/
820 817 static int
821 818 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
822 819 struct page ***ppp, enum lock_type type, enum seg_rw rw)
823 820 {
824 821 struct shm_data *shmd = (struct shm_data *)seg->s_data;
825 822 struct seg *sptseg = shmd->shm_sptseg;
826 823 struct spt_data *sptd = sptseg->s_data;
827 824 pgcnt_t pg_idx, npages, tot_npages, npgs;
828 825 struct page **pplist, **pl, **ppa, *pp;
829 826 struct anon_map *amp;
830 827 spgcnt_t an_idx;
831 828 int ret = ENOTSUP;
832 829 uint_t pl_built = 0;
833 830 struct anon *ap;
834 831 struct vnode *vp;
835 832 u_offset_t off;
836 833 pgcnt_t claim_availrmem = 0;
837 834 uint_t szc;
838 835
839 836 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
840 837 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
841 838
842 839 /*
843 840 * We want to lock/unlock the entire ISM segment. Therefore,
844 841 * we will be using the underlying sptseg and it's base address
845 842 * and length for the caching arguments.
846 843 */
847 844 ASSERT(sptseg);
848 845 ASSERT(sptd);
849 846
850 847 pg_idx = seg_page(seg, addr);
851 848 npages = btopr(len);
852 849
853 850 /*
854 851 * check if the request is larger than number of pages covered
855 852 * by amp
856 853 */
857 854 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
858 855 *ppp = NULL;
859 856 return (ENOTSUP);
860 857 }
861 858
862 859 if (type == L_PAGEUNLOCK) {
863 860 ASSERT(sptd->spt_ppa != NULL);
864 861
865 862 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
866 863 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
867 864
868 865 /*
869 866 * If someone is blocked while unmapping, we purge
870 867 * segment page cache and thus reclaim pplist synchronously
871 868 * without waiting for seg_pasync_thread. This speeds up
872 869 * unmapping in cases where munmap(2) is called, while
873 870 * raw async i/o is still in progress or where a thread
874 871 * exits on data fault in a multithreaded application.
875 872 */
876 873 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
877 874 (AS_ISUNMAPWAIT(seg->s_as) &&
878 875 shmd->shm_softlockcnt > 0)) {
879 876 segspt_purge(seg);
880 877 }
881 878 return (0);
882 879 }
883 880
884 881 /* The L_PAGELOCK case ... */
885 882
886 883 if (sptd->spt_flags & DISM_PPA_CHANGED) {
887 884 segspt_purge(seg);
888 885 /*
889 886 * for DISM ppa needs to be rebuild since
890 887 * number of locked pages could be changed
891 888 */
892 889 *ppp = NULL;
893 890 return (ENOTSUP);
894 891 }
895 892
896 893 /*
897 894 * First try to find pages in segment page cache, without
898 895 * holding the segment lock.
899 896 */
900 897 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
901 898 S_WRITE, SEGP_FORCE_WIRED);
902 899 if (pplist != NULL) {
903 900 ASSERT(sptd->spt_ppa != NULL);
904 901 ASSERT(sptd->spt_ppa == pplist);
905 902 ppa = sptd->spt_ppa;
906 903 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
907 904 if (ppa[an_idx] == NULL) {
908 905 seg_pinactive(seg, NULL, seg->s_base,
909 906 sptd->spt_amp->size, ppa,
910 907 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
911 908 *ppp = NULL;
912 909 return (ENOTSUP);
913 910 }
914 911 if ((szc = ppa[an_idx]->p_szc) != 0) {
915 912 npgs = page_get_pagecnt(szc);
916 913 an_idx = P2ROUNDUP(an_idx + 1, npgs);
917 914 } else {
918 915 an_idx++;
919 916 }
920 917 }
921 918 /*
922 919 * Since we cache the entire DISM segment, we want to
923 920 * set ppp to point to the first slot that corresponds
924 921 * to the requested addr, i.e. pg_idx.
925 922 */
926 923 *ppp = &(sptd->spt_ppa[pg_idx]);
927 924 return (0);
928 925 }
929 926
930 927 mutex_enter(&sptd->spt_lock);
931 928 /*
932 929 * try to find pages in segment page cache with mutex
933 930 */
934 931 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
935 932 S_WRITE, SEGP_FORCE_WIRED);
936 933 if (pplist != NULL) {
937 934 ASSERT(sptd->spt_ppa != NULL);
938 935 ASSERT(sptd->spt_ppa == pplist);
939 936 ppa = sptd->spt_ppa;
940 937 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
941 938 if (ppa[an_idx] == NULL) {
942 939 mutex_exit(&sptd->spt_lock);
943 940 seg_pinactive(seg, NULL, seg->s_base,
944 941 sptd->spt_amp->size, ppa,
945 942 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
946 943 *ppp = NULL;
947 944 return (ENOTSUP);
948 945 }
949 946 if ((szc = ppa[an_idx]->p_szc) != 0) {
950 947 npgs = page_get_pagecnt(szc);
951 948 an_idx = P2ROUNDUP(an_idx + 1, npgs);
952 949 } else {
953 950 an_idx++;
954 951 }
955 952 }
956 953 /*
957 954 * Since we cache the entire DISM segment, we want to
958 955 * set ppp to point to the first slot that corresponds
959 956 * to the requested addr, i.e. pg_idx.
960 957 */
961 958 mutex_exit(&sptd->spt_lock);
962 959 *ppp = &(sptd->spt_ppa[pg_idx]);
963 960 return (0);
964 961 }
965 962 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
966 963 SEGP_FORCE_WIRED) == SEGP_FAIL) {
967 964 mutex_exit(&sptd->spt_lock);
968 965 *ppp = NULL;
969 966 return (ENOTSUP);
970 967 }
971 968
972 969 /*
973 970 * No need to worry about protections because DISM pages are always rw.
974 971 */
975 972 pl = pplist = NULL;
976 973 amp = sptd->spt_amp;
977 974
978 975 /*
979 976 * Do we need to build the ppa array?
980 977 */
981 978 if (sptd->spt_ppa == NULL) {
982 979 pgcnt_t lpg_cnt = 0;
983 980
984 981 pl_built = 1;
985 982 tot_npages = btopr(sptd->spt_amp->size);
986 983
987 984 ASSERT(sptd->spt_pcachecnt == 0);
988 985 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
989 986 pl = pplist;
990 987
991 988 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
992 989 for (an_idx = 0; an_idx < tot_npages; ) {
993 990 ap = anon_get_ptr(amp->ahp, an_idx);
994 991 /*
995 992 * Cache only mlocked pages. For large pages
996 993 * if one (constituent) page is mlocked
997 994 * all pages for that large page
998 995 * are cached also. This is for quick
999 996 * lookups of ppa array;
1000 997 */
1001 998 if ((ap != NULL) && (lpg_cnt != 0 ||
1002 999 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1003 1000
1004 1001 swap_xlate(ap, &vp, &off);
1005 1002 pp = page_lookup(vp, off, SE_SHARED);
1006 1003 ASSERT(pp != NULL);
1007 1004 if (lpg_cnt == 0) {
1008 1005 lpg_cnt++;
1009 1006 /*
1010 1007 * For a small page, we are done --
1011 1008 * lpg_count is reset to 0 below.
1012 1009 *
1013 1010 * For a large page, we are guaranteed
1014 1011 * to find the anon structures of all
1015 1012 * constituent pages and a non-zero
1016 1013 * lpg_cnt ensures that we don't test
1017 1014 * for mlock for these. We are done
1018 1015 * when lpg_count reaches (npgs + 1).
1019 1016 * If we are not the first constituent
1020 1017 * page, restart at the first one.
1021 1018 */
1022 1019 npgs = page_get_pagecnt(pp->p_szc);
1023 1020 if (!IS_P2ALIGNED(an_idx, npgs)) {
1024 1021 an_idx = P2ALIGN(an_idx, npgs);
1025 1022 page_unlock(pp);
1026 1023 continue;
1027 1024 }
1028 1025 }
1029 1026 if (++lpg_cnt > npgs)
1030 1027 lpg_cnt = 0;
1031 1028
1032 1029 /*
1033 1030 * availrmem is decremented only
1034 1031 * for unlocked pages
1035 1032 */
1036 1033 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1037 1034 claim_availrmem++;
1038 1035 pplist[an_idx] = pp;
1039 1036 }
1040 1037 an_idx++;
1041 1038 }
1042 1039 ANON_LOCK_EXIT(&->a_rwlock);
1043 1040
1044 1041 if (claim_availrmem) {
1045 1042 mutex_enter(&freemem_lock);
1046 1043 if (availrmem < tune.t_minarmem + claim_availrmem) {
1047 1044 mutex_exit(&freemem_lock);
1048 1045 ret = ENOTSUP;
1049 1046 claim_availrmem = 0;
1050 1047 goto insert_fail;
1051 1048 } else {
1052 1049 availrmem -= claim_availrmem;
1053 1050 }
1054 1051 mutex_exit(&freemem_lock);
1055 1052 }
1056 1053
1057 1054 sptd->spt_ppa = pl;
1058 1055 } else {
1059 1056 /*
1060 1057 * We already have a valid ppa[].
1061 1058 */
1062 1059 pl = sptd->spt_ppa;
1063 1060 }
1064 1061
1065 1062 ASSERT(pl != NULL);
1066 1063
1067 1064 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1068 1065 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1069 1066 segspt_reclaim);
1070 1067 if (ret == SEGP_FAIL) {
1071 1068 /*
1072 1069 * seg_pinsert failed. We return
1073 1070 * ENOTSUP, so that the as_pagelock() code will
1074 1071 * then try the slower F_SOFTLOCK path.
1075 1072 */
1076 1073 if (pl_built) {
1077 1074 /*
1078 1075 * No one else has referenced the ppa[].
1079 1076 * We created it and we need to destroy it.
1080 1077 */
1081 1078 sptd->spt_ppa = NULL;
1082 1079 }
1083 1080 ret = ENOTSUP;
1084 1081 goto insert_fail;
1085 1082 }
1086 1083
1087 1084 /*
1088 1085 * In either case, we increment softlockcnt on the 'real' segment.
1089 1086 */
1090 1087 sptd->spt_pcachecnt++;
1091 1088 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1092 1089
1093 1090 ppa = sptd->spt_ppa;
1094 1091 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1095 1092 if (ppa[an_idx] == NULL) {
1096 1093 mutex_exit(&sptd->spt_lock);
1097 1094 seg_pinactive(seg, NULL, seg->s_base,
1098 1095 sptd->spt_amp->size,
1099 1096 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1100 1097 *ppp = NULL;
1101 1098 return (ENOTSUP);
1102 1099 }
1103 1100 if ((szc = ppa[an_idx]->p_szc) != 0) {
1104 1101 npgs = page_get_pagecnt(szc);
1105 1102 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1106 1103 } else {
1107 1104 an_idx++;
1108 1105 }
1109 1106 }
1110 1107 /*
1111 1108 * We can now drop the sptd->spt_lock since the ppa[]
1112 1109 * exists and he have incremented pacachecnt.
1113 1110 */
1114 1111 mutex_exit(&sptd->spt_lock);
1115 1112
1116 1113 /*
1117 1114 * Since we cache the entire segment, we want to
1118 1115 * set ppp to point to the first slot that corresponds
1119 1116 * to the requested addr, i.e. pg_idx.
1120 1117 */
1121 1118 *ppp = &(sptd->spt_ppa[pg_idx]);
1122 1119 return (0);
1123 1120
1124 1121 insert_fail:
1125 1122 /*
1126 1123 * We will only reach this code if we tried and failed.
1127 1124 *
1128 1125 * And we can drop the lock on the dummy seg, once we've failed
1129 1126 * to set up a new ppa[].
1130 1127 */
1131 1128 mutex_exit(&sptd->spt_lock);
1132 1129
1133 1130 if (pl_built) {
1134 1131 if (claim_availrmem) {
1135 1132 mutex_enter(&freemem_lock);
1136 1133 availrmem += claim_availrmem;
1137 1134 mutex_exit(&freemem_lock);
1138 1135 }
1139 1136
1140 1137 /*
1141 1138 * We created pl and we need to destroy it.
1142 1139 */
1143 1140 pplist = pl;
1144 1141 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1145 1142 if (pplist[an_idx] != NULL)
1146 1143 page_unlock(pplist[an_idx]);
1147 1144 }
1148 1145 kmem_free(pl, sizeof (page_t *) * tot_npages);
1149 1146 }
1150 1147
1151 1148 if (shmd->shm_softlockcnt <= 0) {
1152 1149 if (AS_ISUNMAPWAIT(seg->s_as)) {
1153 1150 mutex_enter(&seg->s_as->a_contents);
1154 1151 if (AS_ISUNMAPWAIT(seg->s_as)) {
1155 1152 AS_CLRUNMAPWAIT(seg->s_as);
1156 1153 cv_broadcast(&seg->s_as->a_cv);
1157 1154 }
1158 1155 mutex_exit(&seg->s_as->a_contents);
1159 1156 }
1160 1157 }
1161 1158 *ppp = NULL;
1162 1159 return (ret);
1163 1160 }
1164 1161
1165 1162
1166 1163
1167 1164 /*
1168 1165 * return locked pages over a given range.
1169 1166 *
1170 1167 * We will cache the entire ISM segment and save the pplist for the
1171 1168 * entire segment in the ppa field of the underlying ISM segment structure.
1172 1169 * Later, during a call to segspt_reclaim() we will use this ppa array
1173 1170 * to page_unlock() all of the pages and then we will free this ppa list.
1174 1171 */
1175 1172 /*ARGSUSED*/
1176 1173 static int
1177 1174 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1178 1175 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1179 1176 {
1180 1177 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1181 1178 struct seg *sptseg = shmd->shm_sptseg;
1182 1179 struct spt_data *sptd = sptseg->s_data;
1183 1180 pgcnt_t np, page_index, npages;
1184 1181 caddr_t a, spt_base;
1185 1182 struct page **pplist, **pl, *pp;
1186 1183 struct anon_map *amp;
1187 1184 ulong_t anon_index;
1188 1185 int ret = ENOTSUP;
1189 1186 uint_t pl_built = 0;
1190 1187 struct anon *ap;
1191 1188 struct vnode *vp;
1192 1189 u_offset_t off;
1193 1190
1194 1191 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1195 1192 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1196 1193
1197 1194
1198 1195 /*
1199 1196 * We want to lock/unlock the entire ISM segment. Therefore,
1200 1197 * we will be using the underlying sptseg and it's base address
1201 1198 * and length for the caching arguments.
1202 1199 */
1203 1200 ASSERT(sptseg);
1204 1201 ASSERT(sptd);
1205 1202
1206 1203 if (sptd->spt_flags & SHM_PAGEABLE) {
1207 1204 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1208 1205 }
1209 1206
1210 1207 page_index = seg_page(seg, addr);
1211 1208 npages = btopr(len);
1212 1209
1213 1210 /*
1214 1211 * check if the request is larger than number of pages covered
1215 1212 * by amp
1216 1213 */
1217 1214 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1218 1215 *ppp = NULL;
1219 1216 return (ENOTSUP);
1220 1217 }
1221 1218
1222 1219 if (type == L_PAGEUNLOCK) {
1223 1220
1224 1221 ASSERT(sptd->spt_ppa != NULL);
1225 1222
1226 1223 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1227 1224 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1228 1225
1229 1226 /*
1230 1227 * If someone is blocked while unmapping, we purge
1231 1228 * segment page cache and thus reclaim pplist synchronously
1232 1229 * without waiting for seg_pasync_thread. This speeds up
1233 1230 * unmapping in cases where munmap(2) is called, while
1234 1231 * raw async i/o is still in progress or where a thread
1235 1232 * exits on data fault in a multithreaded application.
1236 1233 */
1237 1234 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1238 1235 segspt_purge(seg);
1239 1236 }
1240 1237 return (0);
1241 1238 }
1242 1239
1243 1240 /* The L_PAGELOCK case... */
1244 1241
1245 1242 /*
1246 1243 * First try to find pages in segment page cache, without
1247 1244 * holding the segment lock.
1248 1245 */
1249 1246 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1250 1247 S_WRITE, SEGP_FORCE_WIRED);
1251 1248 if (pplist != NULL) {
1252 1249 ASSERT(sptd->spt_ppa == pplist);
1253 1250 ASSERT(sptd->spt_ppa[page_index]);
1254 1251 /*
1255 1252 * Since we cache the entire ISM segment, we want to
1256 1253 * set ppp to point to the first slot that corresponds
1257 1254 * to the requested addr, i.e. page_index.
1258 1255 */
1259 1256 *ppp = &(sptd->spt_ppa[page_index]);
1260 1257 return (0);
1261 1258 }
1262 1259
1263 1260 mutex_enter(&sptd->spt_lock);
1264 1261
1265 1262 /*
1266 1263 * try to find pages in segment page cache
1267 1264 */
1268 1265 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1269 1266 S_WRITE, SEGP_FORCE_WIRED);
1270 1267 if (pplist != NULL) {
1271 1268 ASSERT(sptd->spt_ppa == pplist);
1272 1269 /*
1273 1270 * Since we cache the entire segment, we want to
1274 1271 * set ppp to point to the first slot that corresponds
1275 1272 * to the requested addr, i.e. page_index.
1276 1273 */
1277 1274 mutex_exit(&sptd->spt_lock);
1278 1275 *ppp = &(sptd->spt_ppa[page_index]);
1279 1276 return (0);
1280 1277 }
1281 1278
1282 1279 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1283 1280 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1284 1281 mutex_exit(&sptd->spt_lock);
1285 1282 *ppp = NULL;
1286 1283 return (ENOTSUP);
1287 1284 }
1288 1285
1289 1286 /*
1290 1287 * No need to worry about protections because ISM pages
1291 1288 * are always rw.
1292 1289 */
1293 1290 pl = pplist = NULL;
1294 1291
1295 1292 /*
1296 1293 * Do we need to build the ppa array?
1297 1294 */
1298 1295 if (sptd->spt_ppa == NULL) {
1299 1296 ASSERT(sptd->spt_ppa == pplist);
1300 1297
1301 1298 spt_base = sptseg->s_base;
1302 1299 pl_built = 1;
1303 1300
1304 1301 /*
1305 1302 * availrmem is decremented once during anon_swap_adjust()
1306 1303 * and is incremented during the anon_unresv(), which is
1307 1304 * called from shm_rm_amp() when the segment is destroyed.
1308 1305 */
1309 1306 amp = sptd->spt_amp;
1310 1307 ASSERT(amp != NULL);
1311 1308
1312 1309 /* pcachecnt is protected by sptd->spt_lock */
1313 1310 ASSERT(sptd->spt_pcachecnt == 0);
1314 1311 pplist = kmem_zalloc(sizeof (page_t *)
1315 1312 * btopr(sptd->spt_amp->size), KM_SLEEP);
1316 1313 pl = pplist;
1317 1314
1318 1315 anon_index = seg_page(sptseg, spt_base);
1319 1316
1320 1317 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1321 1318 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1322 1319 a += PAGESIZE, anon_index++, pplist++) {
1323 1320 ap = anon_get_ptr(amp->ahp, anon_index);
1324 1321 ASSERT(ap != NULL);
1325 1322 swap_xlate(ap, &vp, &off);
1326 1323 pp = page_lookup(vp, off, SE_SHARED);
1327 1324 ASSERT(pp != NULL);
1328 1325 *pplist = pp;
1329 1326 }
1330 1327 ANON_LOCK_EXIT(&->a_rwlock);
1331 1328
1332 1329 if (a < (spt_base + sptd->spt_amp->size)) {
1333 1330 ret = ENOTSUP;
1334 1331 goto insert_fail;
1335 1332 }
1336 1333 sptd->spt_ppa = pl;
1337 1334 } else {
1338 1335 /*
1339 1336 * We already have a valid ppa[].
1340 1337 */
1341 1338 pl = sptd->spt_ppa;
1342 1339 }
1343 1340
1344 1341 ASSERT(pl != NULL);
1345 1342
1346 1343 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1347 1344 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1348 1345 segspt_reclaim);
1349 1346 if (ret == SEGP_FAIL) {
1350 1347 /*
1351 1348 * seg_pinsert failed. We return
1352 1349 * ENOTSUP, so that the as_pagelock() code will
1353 1350 * then try the slower F_SOFTLOCK path.
1354 1351 */
1355 1352 if (pl_built) {
1356 1353 /*
1357 1354 * No one else has referenced the ppa[].
1358 1355 * We created it and we need to destroy it.
1359 1356 */
1360 1357 sptd->spt_ppa = NULL;
1361 1358 }
1362 1359 ret = ENOTSUP;
1363 1360 goto insert_fail;
1364 1361 }
1365 1362
1366 1363 /*
1367 1364 * In either case, we increment softlockcnt on the 'real' segment.
1368 1365 */
1369 1366 sptd->spt_pcachecnt++;
1370 1367 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1371 1368
1372 1369 /*
1373 1370 * We can now drop the sptd->spt_lock since the ppa[]
1374 1371 * exists and he have incremented pacachecnt.
1375 1372 */
1376 1373 mutex_exit(&sptd->spt_lock);
1377 1374
1378 1375 /*
1379 1376 * Since we cache the entire segment, we want to
1380 1377 * set ppp to point to the first slot that corresponds
1381 1378 * to the requested addr, i.e. page_index.
1382 1379 */
1383 1380 *ppp = &(sptd->spt_ppa[page_index]);
1384 1381 return (0);
1385 1382
1386 1383 insert_fail:
1387 1384 /*
1388 1385 * We will only reach this code if we tried and failed.
1389 1386 *
1390 1387 * And we can drop the lock on the dummy seg, once we've failed
1391 1388 * to set up a new ppa[].
1392 1389 */
1393 1390 mutex_exit(&sptd->spt_lock);
1394 1391
1395 1392 if (pl_built) {
1396 1393 /*
1397 1394 * We created pl and we need to destroy it.
1398 1395 */
1399 1396 pplist = pl;
1400 1397 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1401 1398 while (np) {
1402 1399 page_unlock(*pplist);
1403 1400 np--;
1404 1401 pplist++;
1405 1402 }
1406 1403 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1407 1404 }
1408 1405 if (shmd->shm_softlockcnt <= 0) {
1409 1406 if (AS_ISUNMAPWAIT(seg->s_as)) {
1410 1407 mutex_enter(&seg->s_as->a_contents);
1411 1408 if (AS_ISUNMAPWAIT(seg->s_as)) {
1412 1409 AS_CLRUNMAPWAIT(seg->s_as);
1413 1410 cv_broadcast(&seg->s_as->a_cv);
1414 1411 }
1415 1412 mutex_exit(&seg->s_as->a_contents);
1416 1413 }
1417 1414 }
1418 1415 *ppp = NULL;
1419 1416 return (ret);
1420 1417 }
1421 1418
1422 1419 /*
1423 1420 * purge any cached pages in the I/O page cache
1424 1421 */
1425 1422 static void
1426 1423 segspt_purge(struct seg *seg)
1427 1424 {
1428 1425 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1429 1426 }
1430 1427
1431 1428 static int
1432 1429 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1433 1430 enum seg_rw rw, int async)
1434 1431 {
1435 1432 struct seg *seg = (struct seg *)ptag;
1436 1433 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1437 1434 struct seg *sptseg;
1438 1435 struct spt_data *sptd;
1439 1436 pgcnt_t npages, i, free_availrmem = 0;
1440 1437 int done = 0;
1441 1438
1442 1439 #ifdef lint
1443 1440 addr = addr;
1444 1441 #endif
1445 1442 sptseg = shmd->shm_sptseg;
1446 1443 sptd = sptseg->s_data;
1447 1444 npages = (len >> PAGESHIFT);
1448 1445 ASSERT(npages);
1449 1446 ASSERT(sptd->spt_pcachecnt != 0);
1450 1447 ASSERT(sptd->spt_ppa == pplist);
1451 1448 ASSERT(npages == btopr(sptd->spt_amp->size));
1452 1449 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1453 1450
1454 1451 /*
1455 1452 * Acquire the lock on the dummy seg and destroy the
1456 1453 * ppa array IF this is the last pcachecnt.
1457 1454 */
1458 1455 mutex_enter(&sptd->spt_lock);
1459 1456 if (--sptd->spt_pcachecnt == 0) {
1460 1457 for (i = 0; i < npages; i++) {
1461 1458 if (pplist[i] == NULL) {
1462 1459 continue;
1463 1460 }
1464 1461 if (rw == S_WRITE) {
1465 1462 hat_setrefmod(pplist[i]);
1466 1463 } else {
1467 1464 hat_setref(pplist[i]);
1468 1465 }
1469 1466 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1470 1467 (sptd->spt_ppa_lckcnt[i] == 0))
1471 1468 free_availrmem++;
1472 1469 page_unlock(pplist[i]);
1473 1470 }
1474 1471 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1475 1472 mutex_enter(&freemem_lock);
1476 1473 availrmem += free_availrmem;
1477 1474 mutex_exit(&freemem_lock);
1478 1475 }
1479 1476 /*
1480 1477 * Since we want to cach/uncache the entire ISM segment,
1481 1478 * we will track the pplist in a segspt specific field
1482 1479 * ppa, that is initialized at the time we add an entry to
1483 1480 * the cache.
1484 1481 */
1485 1482 ASSERT(sptd->spt_pcachecnt == 0);
1486 1483 kmem_free(pplist, sizeof (page_t *) * npages);
1487 1484 sptd->spt_ppa = NULL;
1488 1485 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1489 1486 sptd->spt_gen++;
1490 1487 cv_broadcast(&sptd->spt_cv);
1491 1488 done = 1;
1492 1489 }
1493 1490 mutex_exit(&sptd->spt_lock);
1494 1491
1495 1492 /*
1496 1493 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1497 1494 * may not hold AS lock (in this case async argument is not 0). This
1498 1495 * means if softlockcnt drops to 0 after the decrement below address
1499 1496 * space may get freed. We can't allow it since after softlock
1500 1497 * derement to 0 we still need to access as structure for possible
1501 1498 * wakeup of unmap waiters. To prevent the disappearance of as we take
1502 1499 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1503 1500 * this mutex as a barrier to make sure this routine completes before
1504 1501 * segment is freed.
1505 1502 *
1506 1503 * The second complication we have to deal with in async case is a
1507 1504 * possibility of missed wake up of unmap wait thread. When we don't
1508 1505 * hold as lock here we may take a_contents lock before unmap wait
1509 1506 * thread that was first to see softlockcnt was still not 0. As a
1510 1507 * result we'll fail to wake up an unmap wait thread. To avoid this
1511 1508 * race we set nounmapwait flag in as structure if we drop softlockcnt
1512 1509 * to 0 if async is not 0. unmapwait thread
1513 1510 * will not block if this flag is set.
1514 1511 */
1515 1512 if (async)
1516 1513 mutex_enter(&shmd->shm_segfree_syncmtx);
1517 1514
1518 1515 /*
1519 1516 * Now decrement softlockcnt.
1520 1517 */
1521 1518 ASSERT(shmd->shm_softlockcnt > 0);
1522 1519 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
1523 1520
1524 1521 if (shmd->shm_softlockcnt <= 0) {
1525 1522 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1526 1523 mutex_enter(&seg->s_as->a_contents);
1527 1524 if (async)
1528 1525 AS_SETNOUNMAPWAIT(seg->s_as);
1529 1526 if (AS_ISUNMAPWAIT(seg->s_as)) {
1530 1527 AS_CLRUNMAPWAIT(seg->s_as);
1531 1528 cv_broadcast(&seg->s_as->a_cv);
1532 1529 }
1533 1530 mutex_exit(&seg->s_as->a_contents);
1534 1531 }
1535 1532 }
1536 1533
1537 1534 if (async)
1538 1535 mutex_exit(&shmd->shm_segfree_syncmtx);
1539 1536
1540 1537 return (done);
1541 1538 }
1542 1539
1543 1540 /*
1544 1541 * Do a F_SOFTUNLOCK call over the range requested.
1545 1542 * The range must have already been F_SOFTLOCK'ed.
1546 1543 *
1547 1544 * The calls to acquire and release the anon map lock mutex were
1548 1545 * removed in order to avoid a deadly embrace during a DR
1549 1546 * memory delete operation. (Eg. DR blocks while waiting for a
1550 1547 * exclusive lock on a page that is being used for kaio; the
1551 1548 * thread that will complete the kaio and call segspt_softunlock
1552 1549 * blocks on the anon map lock; another thread holding the anon
1553 1550 * map lock blocks on another page lock via the segspt_shmfault
1554 1551 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1555 1552 *
1556 1553 * The appropriateness of the removal is based upon the following:
1557 1554 * 1. If we are holding a segment's reader lock and the page is held
1558 1555 * shared, then the corresponding element in anonmap which points to
1559 1556 * anon struct cannot change and there is no need to acquire the
1560 1557 * anonymous map lock.
1561 1558 * 2. Threads in segspt_softunlock have a reader lock on the segment
1562 1559 * and already have the shared page lock, so we are guaranteed that
1563 1560 * the anon map slot cannot change and therefore can call anon_get_ptr()
1564 1561 * without grabbing the anonymous map lock.
1565 1562 * 3. Threads that softlock a shared page break copy-on-write, even if
1566 1563 * its a read. Thus cow faults can be ignored with respect to soft
1567 1564 * unlocking, since the breaking of cow means that the anon slot(s) will
1568 1565 * not be shared.
1569 1566 */
1570 1567 static void
1571 1568 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1572 1569 size_t len, enum seg_rw rw)
1573 1570 {
1574 1571 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1575 1572 struct seg *sptseg;
1576 1573 struct spt_data *sptd;
1577 1574 page_t *pp;
1578 1575 caddr_t adr;
1579 1576 struct vnode *vp;
1580 1577 u_offset_t offset;
1581 1578 ulong_t anon_index;
1582 1579 struct anon_map *amp; /* XXX - for locknest */
1583 1580 struct anon *ap = NULL;
1584 1581 pgcnt_t npages;
1585 1582
1586 1583 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1587 1584
1588 1585 sptseg = shmd->shm_sptseg;
1589 1586 sptd = sptseg->s_data;
1590 1587
1591 1588 /*
1592 1589 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1593 1590 * and therefore their pages are SE_SHARED locked
1594 1591 * for the entire life of the segment.
1595 1592 */
1596 1593 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1597 1594 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1598 1595 goto softlock_decrement;
1599 1596 }
1600 1597
1601 1598 /*
1602 1599 * Any thread is free to do a page_find and
1603 1600 * page_unlock() on the pages within this seg.
1604 1601 *
1605 1602 * We are already holding the as->a_lock on the user's
1606 1603 * real segment, but we need to hold the a_lock on the
1607 1604 * underlying dummy as. This is mostly to satisfy the
1608 1605 * underlying HAT layer.
1609 1606 */
1610 1607 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1611 1608 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1612 1609 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1613 1610
1614 1611 amp = sptd->spt_amp;
1615 1612 ASSERT(amp != NULL);
1616 1613 anon_index = seg_page(sptseg, sptseg_addr);
1617 1614
1618 1615 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1619 1616 ap = anon_get_ptr(amp->ahp, anon_index++);
1620 1617 ASSERT(ap != NULL);
1621 1618 swap_xlate(ap, &vp, &offset);
1622 1619
1623 1620 /*
1624 1621 * Use page_find() instead of page_lookup() to
1625 1622 * find the page since we know that it has a
1626 1623 * "shared" lock.
1627 1624 */
1628 1625 pp = page_find(vp, offset);
1629 1626 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1630 1627 if (pp == NULL) {
1631 1628 panic("segspt_softunlock: "
1632 1629 "addr %p, ap %p, vp %p, off %llx",
1633 1630 (void *)adr, (void *)ap, (void *)vp, offset);
1634 1631 /*NOTREACHED*/
1635 1632 }
1636 1633
1637 1634 if (rw == S_WRITE) {
1638 1635 hat_setrefmod(pp);
1639 1636 } else if (rw != S_OTHER) {
1640 1637 hat_setref(pp);
1641 1638 }
1642 1639 page_unlock(pp);
1643 1640 }
1644 1641
1645 1642 softlock_decrement:
1646 1643 npages = btopr(len);
1647 1644 ASSERT(shmd->shm_softlockcnt >= npages);
1648 1645 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1649 1646 if (shmd->shm_softlockcnt == 0) {
1650 1647 /*
1651 1648 * All SOFTLOCKS are gone. Wakeup any waiting
1652 1649 * unmappers so they can try again to unmap.
1653 1650 * Check for waiters first without the mutex
1654 1651 * held so we don't always grab the mutex on
1655 1652 * softunlocks.
1656 1653 */
1657 1654 if (AS_ISUNMAPWAIT(seg->s_as)) {
1658 1655 mutex_enter(&seg->s_as->a_contents);
1659 1656 if (AS_ISUNMAPWAIT(seg->s_as)) {
1660 1657 AS_CLRUNMAPWAIT(seg->s_as);
1661 1658 cv_broadcast(&seg->s_as->a_cv);
1662 1659 }
1663 1660 mutex_exit(&seg->s_as->a_contents);
1664 1661 }
1665 1662 }
1666 1663 }
1667 1664
1668 1665 int
1669 1666 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1670 1667 {
1671 1668 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1672 1669 struct shm_data *shmd;
1673 1670 struct anon_map *shm_amp = shmd_arg->shm_amp;
1674 1671 struct spt_data *sptd;
1675 1672 int error = 0;
1676 1673
1677 1674 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1678 1675
1679 1676 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1680 1677 if (shmd == NULL)
1681 1678 return (ENOMEM);
1682 1679
1683 1680 shmd->shm_sptas = shmd_arg->shm_sptas;
1684 1681 shmd->shm_amp = shm_amp;
1685 1682 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1686 1683
1687 1684 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1688 1685 NULL, 0, seg->s_size);
1689 1686
1690 1687 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1691 1688
1692 1689 seg->s_data = (void *)shmd;
1693 1690 seg->s_ops = &segspt_shmops;
1694 1691 seg->s_szc = shmd->shm_sptseg->s_szc;
1695 1692 sptd = shmd->shm_sptseg->s_data;
1696 1693
1697 1694 if (sptd->spt_flags & SHM_PAGEABLE) {
1698 1695 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1699 1696 KM_NOSLEEP)) == NULL) {
1700 1697 seg->s_data = (void *)NULL;
1701 1698 kmem_free(shmd, (sizeof (*shmd)));
1702 1699 return (ENOMEM);
1703 1700 }
1704 1701 shmd->shm_lckpgs = 0;
1705 1702 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1706 1703 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1707 1704 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1708 1705 seg->s_size, seg->s_szc)) != 0) {
1709 1706 kmem_free(shmd->shm_vpage,
1710 1707 btopr(shm_amp->size));
1711 1708 }
1712 1709 }
1713 1710 } else {
1714 1711 error = hat_share(seg->s_as->a_hat, seg->s_base,
1715 1712 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1716 1713 seg->s_size, seg->s_szc);
1717 1714 }
1718 1715 if (error) {
1719 1716 seg->s_szc = 0;
1720 1717 seg->s_data = (void *)NULL;
1721 1718 kmem_free(shmd, (sizeof (*shmd)));
1722 1719 } else {
1723 1720 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1724 1721 shm_amp->refcnt++;
1725 1722 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1726 1723 }
1727 1724 return (error);
1728 1725 }
1729 1726
1730 1727 int
1731 1728 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1732 1729 {
1733 1730 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1734 1731 int reclaim = 1;
1735 1732
1736 1733 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1737 1734 retry:
1738 1735 if (shmd->shm_softlockcnt > 0) {
1739 1736 if (reclaim == 1) {
1740 1737 segspt_purge(seg);
1741 1738 reclaim = 0;
1742 1739 goto retry;
1743 1740 }
1744 1741 return (EAGAIN);
1745 1742 }
1746 1743
1747 1744 if (ssize != seg->s_size) {
1748 1745 #ifdef DEBUG
1749 1746 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1750 1747 ssize, seg->s_size);
1751 1748 #endif
1752 1749 return (EINVAL);
1753 1750 }
1754 1751
1755 1752 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1756 1753 NULL, 0);
1757 1754 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1758 1755
1759 1756 seg_free(seg);
1760 1757
1761 1758 return (0);
1762 1759 }
1763 1760
1764 1761 void
1765 1762 segspt_shmfree(struct seg *seg)
1766 1763 {
1767 1764 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1768 1765 struct anon_map *shm_amp = shmd->shm_amp;
1769 1766
1770 1767 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1771 1768
1772 1769 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1773 1770 MC_UNLOCK, NULL, 0);
1774 1771
1775 1772 /*
1776 1773 * Need to increment refcnt when attaching
1777 1774 * and decrement when detaching because of dup().
1778 1775 */
1779 1776 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1780 1777 shm_amp->refcnt--;
1781 1778 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1782 1779
1783 1780 if (shmd->shm_vpage) { /* only for DISM */
1784 1781 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1785 1782 shmd->shm_vpage = NULL;
1786 1783 }
1787 1784
1788 1785 /*
1789 1786 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1790 1787 * still working with this segment without holding as lock.
1791 1788 */
1792 1789 ASSERT(shmd->shm_softlockcnt == 0);
1793 1790 mutex_enter(&shmd->shm_segfree_syncmtx);
1794 1791 mutex_destroy(&shmd->shm_segfree_syncmtx);
1795 1792
1796 1793 kmem_free(shmd, sizeof (*shmd));
1797 1794 }
1798 1795
1799 1796 /*ARGSUSED*/
1800 1797 int
1801 1798 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1802 1799 {
1803 1800 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1804 1801
1805 1802 /*
1806 1803 * Shared page table is more than shared mapping.
1807 1804 * Individual process sharing page tables can't change prot
1808 1805 * because there is only one set of page tables.
1809 1806 * This will be allowed after private page table is
1810 1807 * supported.
1811 1808 */
1812 1809 /* need to return correct status error? */
1813 1810 return (0);
1814 1811 }
1815 1812
1816 1813
1817 1814 faultcode_t
1818 1815 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1819 1816 size_t len, enum fault_type type, enum seg_rw rw)
1820 1817 {
1821 1818 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1822 1819 struct seg *sptseg = shmd->shm_sptseg;
1823 1820 struct as *curspt = shmd->shm_sptas;
1824 1821 struct spt_data *sptd = sptseg->s_data;
1825 1822 pgcnt_t npages;
1826 1823 size_t size;
1827 1824 caddr_t segspt_addr, shm_addr;
1828 1825 page_t **ppa;
1829 1826 int i;
1830 1827 ulong_t an_idx = 0;
1831 1828 int err = 0;
1832 1829 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1833 1830 size_t pgsz;
1834 1831 pgcnt_t pgcnt;
1835 1832 caddr_t a;
1836 1833 pgcnt_t pidx;
1837 1834
1838 1835 #ifdef lint
1839 1836 hat = hat;
1840 1837 #endif
1841 1838 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1842 1839
1843 1840 /*
1844 1841 * Because of the way spt is implemented
1845 1842 * the realsize of the segment does not have to be
1846 1843 * equal to the segment size itself. The segment size is
1847 1844 * often in multiples of a page size larger than PAGESIZE.
1848 1845 * The realsize is rounded up to the nearest PAGESIZE
1849 1846 * based on what the user requested. This is a bit of
1850 1847 * ungliness that is historical but not easily fixed
1851 1848 * without re-designing the higher levels of ISM.
1852 1849 */
1853 1850 ASSERT(addr >= seg->s_base);
1854 1851 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1855 1852 return (FC_NOMAP);
1856 1853 /*
1857 1854 * For all of the following cases except F_PROT, we need to
1858 1855 * make any necessary adjustments to addr and len
1859 1856 * and get all of the necessary page_t's into an array called ppa[].
1860 1857 *
1861 1858 * The code in shmat() forces base addr and len of ISM segment
1862 1859 * to be aligned to largest page size supported. Therefore,
1863 1860 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1864 1861 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1865 1862 * in large pagesize chunks, or else we will screw up the HAT
1866 1863 * layer by calling hat_memload_array() with differing page sizes
1867 1864 * over a given virtual range.
1868 1865 */
1869 1866 pgsz = page_get_pagesize(sptseg->s_szc);
1870 1867 pgcnt = page_get_pagecnt(sptseg->s_szc);
1871 1868 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1872 1869 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1873 1870 npages = btopr(size);
1874 1871
1875 1872 /*
1876 1873 * Now we need to convert from addr in segshm to addr in segspt.
1877 1874 */
1878 1875 an_idx = seg_page(seg, shm_addr);
1879 1876 segspt_addr = sptseg->s_base + ptob(an_idx);
1880 1877
1881 1878 ASSERT((segspt_addr + ptob(npages)) <=
1882 1879 (sptseg->s_base + sptd->spt_realsize));
1883 1880 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1884 1881
1885 1882 switch (type) {
1886 1883
1887 1884 case F_SOFTLOCK:
1888 1885
1889 1886 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1890 1887 /*
1891 1888 * Fall through to the F_INVAL case to load up the hat layer
1892 1889 * entries with the HAT_LOAD_LOCK flag.
1893 1890 */
1894 1891 /* FALLTHRU */
1895 1892 case F_INVAL:
1896 1893
1897 1894 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1898 1895 return (FC_NOMAP);
1899 1896
1900 1897 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1901 1898
1902 1899 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1903 1900 if (err != 0) {
1904 1901 if (type == F_SOFTLOCK) {
1905 1902 atomic_add_long((ulong_t *)(
1906 1903 &(shmd->shm_softlockcnt)), -npages);
1907 1904 }
1908 1905 goto dism_err;
1909 1906 }
1910 1907 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1911 1908 a = segspt_addr;
1912 1909 pidx = 0;
1913 1910 if (type == F_SOFTLOCK) {
1914 1911
1915 1912 /*
1916 1913 * Load up the translation keeping it
1917 1914 * locked and don't unlock the page.
1918 1915 */
1919 1916 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1920 1917 hat_memload_array(sptseg->s_as->a_hat,
1921 1918 a, pgsz, &ppa[pidx], sptd->spt_prot,
1922 1919 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1923 1920 }
1924 1921 } else {
1925 1922 if (hat == seg->s_as->a_hat) {
1926 1923
1927 1924 /*
1928 1925 * Migrate pages marked for migration
1929 1926 */
1930 1927 if (lgrp_optimizations())
1931 1928 page_migrate(seg, shm_addr, ppa,
1932 1929 npages);
1933 1930
1934 1931 /* CPU HAT */
1935 1932 for (; pidx < npages;
1936 1933 a += pgsz, pidx += pgcnt) {
1937 1934 hat_memload_array(sptseg->s_as->a_hat,
1938 1935 a, pgsz, &ppa[pidx],
1939 1936 sptd->spt_prot,
1940 1937 HAT_LOAD_SHARE);
1941 1938 }
1942 1939 } else {
1943 1940 /* XHAT. Pass real address */
1944 1941 hat_memload_array(hat, shm_addr,
1945 1942 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1946 1943 }
1947 1944
1948 1945 /*
1949 1946 * And now drop the SE_SHARED lock(s).
1950 1947 */
1951 1948 if (dyn_ism_unmap) {
1952 1949 for (i = 0; i < npages; i++) {
1953 1950 page_unlock(ppa[i]);
1954 1951 }
1955 1952 }
1956 1953 }
1957 1954
1958 1955 if (!dyn_ism_unmap) {
1959 1956 if (hat_share(seg->s_as->a_hat, shm_addr,
1960 1957 curspt->a_hat, segspt_addr, ptob(npages),
1961 1958 seg->s_szc) != 0) {
1962 1959 panic("hat_share err in DISM fault");
1963 1960 /* NOTREACHED */
1964 1961 }
1965 1962 if (type == F_INVAL) {
1966 1963 for (i = 0; i < npages; i++) {
1967 1964 page_unlock(ppa[i]);
1968 1965 }
1969 1966 }
1970 1967 }
1971 1968 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1972 1969 dism_err:
1973 1970 kmem_free(ppa, npages * sizeof (page_t *));
1974 1971 return (err);
1975 1972
1976 1973 case F_SOFTUNLOCK:
1977 1974
1978 1975 /*
1979 1976 * This is a bit ugly, we pass in the real seg pointer,
1980 1977 * but the segspt_addr is the virtual address within the
1981 1978 * dummy seg.
1982 1979 */
1983 1980 segspt_softunlock(seg, segspt_addr, size, rw);
1984 1981 return (0);
1985 1982
1986 1983 case F_PROT:
1987 1984
1988 1985 /*
1989 1986 * This takes care of the unusual case where a user
1990 1987 * allocates a stack in shared memory and a register
1991 1988 * window overflow is written to that stack page before
1992 1989 * it is otherwise modified.
1993 1990 *
1994 1991 * We can get away with this because ISM segments are
1995 1992 * always rw. Other than this unusual case, there
1996 1993 * should be no instances of protection violations.
1997 1994 */
1998 1995 return (0);
1999 1996
2000 1997 default:
2001 1998 #ifdef DEBUG
2002 1999 panic("segspt_dismfault default type?");
2003 2000 #else
2004 2001 return (FC_NOMAP);
2005 2002 #endif
2006 2003 }
2007 2004 }
2008 2005
2009 2006
2010 2007 faultcode_t
2011 2008 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2012 2009 size_t len, enum fault_type type, enum seg_rw rw)
2013 2010 {
2014 2011 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2015 2012 struct seg *sptseg = shmd->shm_sptseg;
2016 2013 struct as *curspt = shmd->shm_sptas;
2017 2014 struct spt_data *sptd = sptseg->s_data;
2018 2015 pgcnt_t npages;
2019 2016 size_t size;
2020 2017 caddr_t sptseg_addr, shm_addr;
2021 2018 page_t *pp, **ppa;
2022 2019 int i;
2023 2020 u_offset_t offset;
2024 2021 ulong_t anon_index = 0;
2025 2022 struct vnode *vp;
2026 2023 struct anon_map *amp; /* XXX - for locknest */
2027 2024 struct anon *ap = NULL;
2028 2025 size_t pgsz;
2029 2026 pgcnt_t pgcnt;
2030 2027 caddr_t a;
2031 2028 pgcnt_t pidx;
2032 2029 size_t sz;
2033 2030
2034 2031 #ifdef lint
2035 2032 hat = hat;
2036 2033 #endif
2037 2034
2038 2035 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2039 2036
2040 2037 if (sptd->spt_flags & SHM_PAGEABLE) {
2041 2038 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2042 2039 }
2043 2040
2044 2041 /*
2045 2042 * Because of the way spt is implemented
2046 2043 * the realsize of the segment does not have to be
2047 2044 * equal to the segment size itself. The segment size is
2048 2045 * often in multiples of a page size larger than PAGESIZE.
2049 2046 * The realsize is rounded up to the nearest PAGESIZE
2050 2047 * based on what the user requested. This is a bit of
2051 2048 * ungliness that is historical but not easily fixed
2052 2049 * without re-designing the higher levels of ISM.
2053 2050 */
2054 2051 ASSERT(addr >= seg->s_base);
2055 2052 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2056 2053 return (FC_NOMAP);
2057 2054 /*
2058 2055 * For all of the following cases except F_PROT, we need to
2059 2056 * make any necessary adjustments to addr and len
2060 2057 * and get all of the necessary page_t's into an array called ppa[].
2061 2058 *
2062 2059 * The code in shmat() forces base addr and len of ISM segment
2063 2060 * to be aligned to largest page size supported. Therefore,
2064 2061 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2065 2062 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2066 2063 * in large pagesize chunks, or else we will screw up the HAT
2067 2064 * layer by calling hat_memload_array() with differing page sizes
2068 2065 * over a given virtual range.
2069 2066 */
2070 2067 pgsz = page_get_pagesize(sptseg->s_szc);
2071 2068 pgcnt = page_get_pagecnt(sptseg->s_szc);
2072 2069 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2073 2070 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2074 2071 npages = btopr(size);
2075 2072
2076 2073 /*
2077 2074 * Now we need to convert from addr in segshm to addr in segspt.
2078 2075 */
2079 2076 anon_index = seg_page(seg, shm_addr);
2080 2077 sptseg_addr = sptseg->s_base + ptob(anon_index);
2081 2078
2082 2079 /*
2083 2080 * And now we may have to adjust npages downward if we have
2084 2081 * exceeded the realsize of the segment or initial anon
2085 2082 * allocations.
2086 2083 */
2087 2084 if ((sptseg_addr + ptob(npages)) >
2088 2085 (sptseg->s_base + sptd->spt_realsize))
2089 2086 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2090 2087
2091 2088 npages = btopr(size);
2092 2089
2093 2090 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2094 2091 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2095 2092
2096 2093 switch (type) {
2097 2094
2098 2095 case F_SOFTLOCK:
2099 2096
2100 2097 /*
2101 2098 * availrmem is decremented once during anon_swap_adjust()
2102 2099 * and is incremented during the anon_unresv(), which is
2103 2100 * called from shm_rm_amp() when the segment is destroyed.
2104 2101 */
2105 2102 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2106 2103 /*
2107 2104 * Some platforms assume that ISM pages are SE_SHARED
2108 2105 * locked for the entire life of the segment.
2109 2106 */
2110 2107 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2111 2108 return (0);
2112 2109 /*
2113 2110 * Fall through to the F_INVAL case to load up the hat layer
2114 2111 * entries with the HAT_LOAD_LOCK flag.
2115 2112 */
2116 2113
2117 2114 /* FALLTHRU */
2118 2115 case F_INVAL:
2119 2116
2120 2117 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2121 2118 return (FC_NOMAP);
2122 2119
2123 2120 /*
2124 2121 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2125 2122 * may still rely on this call to hat_share(). That
2126 2123 * would imply that those hat's can fault on a
2127 2124 * HAT_LOAD_LOCK translation, which would seem
2128 2125 * contradictory.
2129 2126 */
2130 2127 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2131 2128 if (hat_share(seg->s_as->a_hat, seg->s_base,
2132 2129 curspt->a_hat, sptseg->s_base,
2133 2130 sptseg->s_size, sptseg->s_szc) != 0) {
2134 2131 panic("hat_share error in ISM fault");
2135 2132 /*NOTREACHED*/
2136 2133 }
2137 2134 return (0);
2138 2135 }
2139 2136 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2140 2137
2141 2138 /*
2142 2139 * I see no need to lock the real seg,
2143 2140 * here, because all of our work will be on the underlying
2144 2141 * dummy seg.
2145 2142 *
2146 2143 * sptseg_addr and npages now account for large pages.
2147 2144 */
2148 2145 amp = sptd->spt_amp;
2149 2146 ASSERT(amp != NULL);
2150 2147 anon_index = seg_page(sptseg, sptseg_addr);
2151 2148
2152 2149 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2153 2150 for (i = 0; i < npages; i++) {
2154 2151 ap = anon_get_ptr(amp->ahp, anon_index++);
2155 2152 ASSERT(ap != NULL);
2156 2153 swap_xlate(ap, &vp, &offset);
2157 2154 pp = page_lookup(vp, offset, SE_SHARED);
2158 2155 ASSERT(pp != NULL);
2159 2156 ppa[i] = pp;
2160 2157 }
2161 2158 ANON_LOCK_EXIT(&->a_rwlock);
2162 2159 ASSERT(i == npages);
2163 2160
2164 2161 /*
2165 2162 * We are already holding the as->a_lock on the user's
2166 2163 * real segment, but we need to hold the a_lock on the
2167 2164 * underlying dummy as. This is mostly to satisfy the
2168 2165 * underlying HAT layer.
2169 2166 */
2170 2167 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2171 2168 a = sptseg_addr;
2172 2169 pidx = 0;
2173 2170 if (type == F_SOFTLOCK) {
2174 2171 /*
2175 2172 * Load up the translation keeping it
2176 2173 * locked and don't unlock the page.
2177 2174 */
2178 2175 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2179 2176 sz = MIN(pgsz, ptob(npages - pidx));
2180 2177 hat_memload_array(sptseg->s_as->a_hat, a,
2181 2178 sz, &ppa[pidx], sptd->spt_prot,
2182 2179 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2183 2180 }
2184 2181 } else {
2185 2182 if (hat == seg->s_as->a_hat) {
2186 2183
2187 2184 /*
2188 2185 * Migrate pages marked for migration.
2189 2186 */
2190 2187 if (lgrp_optimizations())
2191 2188 page_migrate(seg, shm_addr, ppa,
2192 2189 npages);
2193 2190
2194 2191 /* CPU HAT */
2195 2192 for (; pidx < npages;
2196 2193 a += pgsz, pidx += pgcnt) {
2197 2194 sz = MIN(pgsz, ptob(npages - pidx));
2198 2195 hat_memload_array(sptseg->s_as->a_hat,
2199 2196 a, sz, &ppa[pidx],
2200 2197 sptd->spt_prot, HAT_LOAD_SHARE);
2201 2198 }
2202 2199 } else {
2203 2200 /* XHAT. Pass real address */
2204 2201 hat_memload_array(hat, shm_addr,
2205 2202 ptob(npages), ppa, sptd->spt_prot,
2206 2203 HAT_LOAD_SHARE);
2207 2204 }
2208 2205
2209 2206 /*
2210 2207 * And now drop the SE_SHARED lock(s).
2211 2208 */
2212 2209 for (i = 0; i < npages; i++)
2213 2210 page_unlock(ppa[i]);
2214 2211 }
2215 2212 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2216 2213
2217 2214 kmem_free(ppa, sizeof (page_t *) * npages);
2218 2215 return (0);
2219 2216 case F_SOFTUNLOCK:
2220 2217
2221 2218 /*
2222 2219 * This is a bit ugly, we pass in the real seg pointer,
2223 2220 * but the sptseg_addr is the virtual address within the
2224 2221 * dummy seg.
2225 2222 */
2226 2223 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2227 2224 return (0);
2228 2225
2229 2226 case F_PROT:
2230 2227
2231 2228 /*
2232 2229 * This takes care of the unusual case where a user
2233 2230 * allocates a stack in shared memory and a register
2234 2231 * window overflow is written to that stack page before
2235 2232 * it is otherwise modified.
2236 2233 *
2237 2234 * We can get away with this because ISM segments are
2238 2235 * always rw. Other than this unusual case, there
2239 2236 * should be no instances of protection violations.
2240 2237 */
2241 2238 return (0);
2242 2239
2243 2240 default:
2244 2241 #ifdef DEBUG
2245 2242 cmn_err(CE_WARN, "segspt_shmfault default type?");
2246 2243 #endif
2247 2244 return (FC_NOMAP);
2248 2245 }
2249 2246 }
2250 2247
2251 2248 /*ARGSUSED*/
2252 2249 static faultcode_t
2253 2250 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2254 2251 {
↓ open down ↓ |
2088 lines elided |
↑ open up ↑ |
2255 2252 return (0);
2256 2253 }
2257 2254
2258 2255 /*ARGSUSED*/
2259 2256 static int
2260 2257 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2261 2258 {
2262 2259 return (0);
2263 2260 }
2264 2261
2265 -/*ARGSUSED*/
2266 -static size_t
2267 -segspt_shmswapout(struct seg *seg)
2268 -{
2269 - return (0);
2270 -}
2271 -
2272 2262 /*
2273 2263 * duplicate the shared page tables
2274 2264 */
2275 2265 int
2276 2266 segspt_shmdup(struct seg *seg, struct seg *newseg)
2277 2267 {
2278 2268 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2279 2269 struct anon_map *amp = shmd->shm_amp;
2280 2270 struct shm_data *shmd_new;
2281 2271 struct seg *spt_seg = shmd->shm_sptseg;
2282 2272 struct spt_data *sptd = spt_seg->s_data;
2283 2273 int error = 0;
2284 2274
2285 2275 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2286 2276
2287 2277 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2288 2278 newseg->s_data = (void *)shmd_new;
2289 2279 shmd_new->shm_sptas = shmd->shm_sptas;
2290 2280 shmd_new->shm_amp = amp;
2291 2281 shmd_new->shm_sptseg = shmd->shm_sptseg;
2292 2282 newseg->s_ops = &segspt_shmops;
2293 2283 newseg->s_szc = seg->s_szc;
2294 2284 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2295 2285
2296 2286 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2297 2287 amp->refcnt++;
2298 2288 ANON_LOCK_EXIT(&->a_rwlock);
2299 2289
2300 2290 if (sptd->spt_flags & SHM_PAGEABLE) {
2301 2291 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2302 2292 shmd_new->shm_lckpgs = 0;
2303 2293 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2304 2294 if ((error = hat_share(newseg->s_as->a_hat,
2305 2295 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2306 2296 seg->s_size, seg->s_szc)) != 0) {
2307 2297 kmem_free(shmd_new->shm_vpage,
2308 2298 btopr(amp->size));
2309 2299 }
2310 2300 }
2311 2301 return (error);
2312 2302 } else {
2313 2303 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2314 2304 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2315 2305 seg->s_szc));
2316 2306
2317 2307 }
2318 2308 }
2319 2309
2320 2310 /*ARGSUSED*/
2321 2311 int
2322 2312 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2323 2313 {
2324 2314 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2325 2315 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2326 2316
2327 2317 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2328 2318
2329 2319 /*
2330 2320 * ISM segment is always rw.
2331 2321 */
2332 2322 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2333 2323 }
2334 2324
2335 2325 /*
2336 2326 * Return an array of locked large pages, for empty slots allocate
2337 2327 * private zero-filled anon pages.
2338 2328 */
2339 2329 static int
2340 2330 spt_anon_getpages(
2341 2331 struct seg *sptseg,
2342 2332 caddr_t sptaddr,
2343 2333 size_t len,
2344 2334 page_t *ppa[])
2345 2335 {
2346 2336 struct spt_data *sptd = sptseg->s_data;
2347 2337 struct anon_map *amp = sptd->spt_amp;
2348 2338 enum seg_rw rw = sptd->spt_prot;
2349 2339 uint_t szc = sptseg->s_szc;
2350 2340 size_t pg_sz, share_sz = page_get_pagesize(szc);
2351 2341 pgcnt_t lp_npgs;
2352 2342 caddr_t lp_addr, e_sptaddr;
2353 2343 uint_t vpprot, ppa_szc = 0;
2354 2344 struct vpage *vpage = NULL;
2355 2345 ulong_t j, ppa_idx;
2356 2346 int err, ierr = 0;
2357 2347 pgcnt_t an_idx;
2358 2348 anon_sync_obj_t cookie;
2359 2349 int anon_locked = 0;
2360 2350 pgcnt_t amp_pgs;
2361 2351
2362 2352
2363 2353 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2364 2354 ASSERT(len != 0);
2365 2355
2366 2356 pg_sz = share_sz;
2367 2357 lp_npgs = btop(pg_sz);
2368 2358 lp_addr = sptaddr;
2369 2359 e_sptaddr = sptaddr + len;
2370 2360 an_idx = seg_page(sptseg, sptaddr);
2371 2361 ppa_idx = 0;
2372 2362
2373 2363 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2374 2364
2375 2365 amp_pgs = page_get_pagecnt(amp->a_szc);
2376 2366
2377 2367 /*CONSTCOND*/
2378 2368 while (1) {
2379 2369 for (; lp_addr < e_sptaddr;
2380 2370 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2381 2371
2382 2372 /*
2383 2373 * If we're currently locked, and we get to a new
2384 2374 * page, unlock our current anon chunk.
2385 2375 */
2386 2376 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2387 2377 anon_array_exit(&cookie);
2388 2378 anon_locked = 0;
2389 2379 }
2390 2380 if (!anon_locked) {
2391 2381 anon_array_enter(amp, an_idx, &cookie);
2392 2382 anon_locked = 1;
2393 2383 }
2394 2384 ppa_szc = (uint_t)-1;
2395 2385 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2396 2386 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2397 2387 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2398 2388
2399 2389 if (ierr != 0) {
2400 2390 if (ierr > 0) {
2401 2391 err = FC_MAKE_ERR(ierr);
2402 2392 goto lpgs_err;
2403 2393 }
2404 2394 break;
2405 2395 }
2406 2396 }
2407 2397 if (lp_addr == e_sptaddr) {
2408 2398 break;
2409 2399 }
2410 2400 ASSERT(lp_addr < e_sptaddr);
2411 2401
2412 2402 /*
2413 2403 * ierr == -1 means we failed to allocate a large page.
2414 2404 * so do a size down operation.
2415 2405 *
2416 2406 * ierr == -2 means some other process that privately shares
2417 2407 * pages with this process has allocated a larger page and we
2418 2408 * need to retry with larger pages. So do a size up
2419 2409 * operation. This relies on the fact that large pages are
2420 2410 * never partially shared i.e. if we share any constituent
2421 2411 * page of a large page with another process we must share the
2422 2412 * entire large page. Note this cannot happen for SOFTLOCK
2423 2413 * case, unless current address (lpaddr) is at the beginning
2424 2414 * of the next page size boundary because the other process
2425 2415 * couldn't have relocated locked pages.
2426 2416 */
2427 2417 ASSERT(ierr == -1 || ierr == -2);
2428 2418 if (segvn_anypgsz) {
2429 2419 ASSERT(ierr == -2 || szc != 0);
2430 2420 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2431 2421 szc = (ierr == -1) ? szc - 1 : szc + 1;
2432 2422 } else {
2433 2423 /*
2434 2424 * For faults and segvn_anypgsz == 0
2435 2425 * we need to be careful not to loop forever
2436 2426 * if existing page is found with szc other
2437 2427 * than 0 or seg->s_szc. This could be due
2438 2428 * to page relocations on behalf of DR or
2439 2429 * more likely large page creation. For this
2440 2430 * case simply re-size to existing page's szc
2441 2431 * if returned by anon_map_getpages().
2442 2432 */
2443 2433 if (ppa_szc == (uint_t)-1) {
2444 2434 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2445 2435 } else {
2446 2436 ASSERT(ppa_szc <= sptseg->s_szc);
2447 2437 ASSERT(ierr == -2 || ppa_szc < szc);
2448 2438 ASSERT(ierr == -1 || ppa_szc > szc);
2449 2439 szc = ppa_szc;
2450 2440 }
2451 2441 }
2452 2442 pg_sz = page_get_pagesize(szc);
2453 2443 lp_npgs = btop(pg_sz);
2454 2444 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2455 2445 }
2456 2446 if (anon_locked) {
2457 2447 anon_array_exit(&cookie);
2458 2448 }
2459 2449 ANON_LOCK_EXIT(&->a_rwlock);
2460 2450 return (0);
2461 2451
2462 2452 lpgs_err:
2463 2453 if (anon_locked) {
2464 2454 anon_array_exit(&cookie);
2465 2455 }
2466 2456 ANON_LOCK_EXIT(&->a_rwlock);
2467 2457 for (j = 0; j < ppa_idx; j++)
2468 2458 page_unlock(ppa[j]);
2469 2459 return (err);
2470 2460 }
2471 2461
2472 2462 /*
2473 2463 * count the number of bytes in a set of spt pages that are currently not
2474 2464 * locked
2475 2465 */
2476 2466 static rctl_qty_t
2477 2467 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2478 2468 {
2479 2469 ulong_t i;
2480 2470 rctl_qty_t unlocked = 0;
2481 2471
2482 2472 for (i = 0; i < npages; i++) {
2483 2473 if (ppa[i]->p_lckcnt == 0)
2484 2474 unlocked += PAGESIZE;
2485 2475 }
2486 2476 return (unlocked);
2487 2477 }
2488 2478
2489 2479 extern u_longlong_t randtick(void);
2490 2480 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2491 2481 #define NLCK (NCPU_P2)
2492 2482 /* Random number with a range [0, n-1], n must be power of two */
2493 2483 #define RAND_P2(n) \
2494 2484 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2495 2485
2496 2486 int
2497 2487 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2498 2488 page_t **ppa, ulong_t *lockmap, size_t pos,
2499 2489 rctl_qty_t *locked)
2500 2490 {
2501 2491 struct shm_data *shmd = seg->s_data;
2502 2492 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2503 2493 ulong_t i;
2504 2494 int kernel;
2505 2495 pgcnt_t nlck = 0;
2506 2496 int rv = 0;
2507 2497 int use_reserved = 1;
2508 2498
2509 2499 /* return the number of bytes actually locked */
2510 2500 *locked = 0;
2511 2501
2512 2502 /*
2513 2503 * To avoid contention on freemem_lock, availrmem and pages_locked
2514 2504 * global counters are updated only every nlck locked pages instead of
2515 2505 * every time. Reserve nlck locks up front and deduct from this
2516 2506 * reservation for each page that requires a lock. When the reservation
2517 2507 * is consumed, reserve again. nlck is randomized, so the competing
2518 2508 * threads do not fall into a cyclic lock contention pattern. When
2519 2509 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2520 2510 * is used to lock pages.
2521 2511 */
2522 2512 for (i = 0; i < npages; anon_index++, pos++, i++) {
2523 2513 if (nlck == 0 && use_reserved == 1) {
2524 2514 nlck = NLCK + RAND_P2(NLCK);
2525 2515 /* if fewer loops left, decrease nlck */
2526 2516 nlck = MIN(nlck, npages - i);
2527 2517 /*
2528 2518 * Reserve nlck locks up front and deduct from this
2529 2519 * reservation for each page that requires a lock. When
2530 2520 * the reservation is consumed, reserve again.
2531 2521 */
2532 2522 mutex_enter(&freemem_lock);
2533 2523 if ((availrmem - nlck) < pages_pp_maximum) {
2534 2524 /* Do not do advance memory reserves */
2535 2525 use_reserved = 0;
2536 2526 } else {
2537 2527 availrmem -= nlck;
2538 2528 pages_locked += nlck;
2539 2529 }
2540 2530 mutex_exit(&freemem_lock);
2541 2531 }
2542 2532 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2543 2533 if (sptd->spt_ppa_lckcnt[anon_index] <
2544 2534 (ushort_t)DISM_LOCK_MAX) {
2545 2535 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2546 2536 (ushort_t)DISM_LOCK_MAX) {
2547 2537 cmn_err(CE_WARN,
2548 2538 "DISM page lock limit "
2549 2539 "reached on DISM offset 0x%lx\n",
2550 2540 anon_index << PAGESHIFT);
2551 2541 }
2552 2542 kernel = (sptd->spt_ppa &&
2553 2543 sptd->spt_ppa[anon_index]);
2554 2544 if (!page_pp_lock(ppa[i], 0, kernel ||
2555 2545 use_reserved)) {
2556 2546 sptd->spt_ppa_lckcnt[anon_index]--;
2557 2547 rv = EAGAIN;
2558 2548 break;
2559 2549 }
2560 2550 /* if this is a newly locked page, count it */
2561 2551 if (ppa[i]->p_lckcnt == 1) {
2562 2552 if (kernel == 0 && use_reserved == 1)
2563 2553 nlck--;
2564 2554 *locked += PAGESIZE;
2565 2555 }
2566 2556 shmd->shm_lckpgs++;
2567 2557 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2568 2558 if (lockmap != NULL)
2569 2559 BT_SET(lockmap, pos);
2570 2560 }
2571 2561 }
2572 2562 }
2573 2563 /* Return unused lock reservation */
2574 2564 if (nlck != 0 && use_reserved == 1) {
2575 2565 mutex_enter(&freemem_lock);
2576 2566 availrmem += nlck;
2577 2567 pages_locked -= nlck;
2578 2568 mutex_exit(&freemem_lock);
2579 2569 }
2580 2570
2581 2571 return (rv);
2582 2572 }
2583 2573
2584 2574 int
2585 2575 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2586 2576 rctl_qty_t *unlocked)
2587 2577 {
2588 2578 struct shm_data *shmd = seg->s_data;
2589 2579 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2590 2580 struct anon_map *amp = sptd->spt_amp;
2591 2581 struct anon *ap;
2592 2582 struct vnode *vp;
2593 2583 u_offset_t off;
2594 2584 struct page *pp;
2595 2585 int kernel;
2596 2586 anon_sync_obj_t cookie;
2597 2587 ulong_t i;
2598 2588 pgcnt_t nlck = 0;
2599 2589 pgcnt_t nlck_limit = NLCK;
2600 2590
2601 2591 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2602 2592 for (i = 0; i < npages; i++, anon_index++) {
2603 2593 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2604 2594 anon_array_enter(amp, anon_index, &cookie);
2605 2595 ap = anon_get_ptr(amp->ahp, anon_index);
2606 2596 ASSERT(ap);
2607 2597
2608 2598 swap_xlate(ap, &vp, &off);
2609 2599 anon_array_exit(&cookie);
2610 2600 pp = page_lookup(vp, off, SE_SHARED);
2611 2601 ASSERT(pp);
2612 2602 /*
2613 2603 * availrmem is decremented only for pages which are not
2614 2604 * in seg pcache, for pages in seg pcache availrmem was
2615 2605 * decremented in _dismpagelock()
2616 2606 */
2617 2607 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2618 2608 ASSERT(pp->p_lckcnt > 0);
2619 2609
2620 2610 /*
2621 2611 * lock page but do not change availrmem, we do it
2622 2612 * ourselves every nlck loops.
2623 2613 */
2624 2614 page_pp_unlock(pp, 0, 1);
2625 2615 if (pp->p_lckcnt == 0) {
2626 2616 if (kernel == 0)
2627 2617 nlck++;
2628 2618 *unlocked += PAGESIZE;
2629 2619 }
2630 2620 page_unlock(pp);
2631 2621 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2632 2622 sptd->spt_ppa_lckcnt[anon_index]--;
2633 2623 shmd->shm_lckpgs--;
2634 2624 }
2635 2625
2636 2626 /*
2637 2627 * To reduce freemem_lock contention, do not update availrmem
2638 2628 * until at least NLCK pages have been unlocked.
2639 2629 * 1. No need to update if nlck is zero
2640 2630 * 2. Always update if the last iteration
2641 2631 */
2642 2632 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2643 2633 mutex_enter(&freemem_lock);
2644 2634 availrmem += nlck;
2645 2635 pages_locked -= nlck;
2646 2636 mutex_exit(&freemem_lock);
2647 2637 nlck = 0;
2648 2638 nlck_limit = NLCK + RAND_P2(NLCK);
2649 2639 }
2650 2640 }
2651 2641 ANON_LOCK_EXIT(&->a_rwlock);
2652 2642
2653 2643 return (0);
2654 2644 }
2655 2645
2656 2646 /*ARGSUSED*/
2657 2647 static int
2658 2648 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2659 2649 int attr, int op, ulong_t *lockmap, size_t pos)
2660 2650 {
2661 2651 struct shm_data *shmd = seg->s_data;
2662 2652 struct seg *sptseg = shmd->shm_sptseg;
2663 2653 struct spt_data *sptd = sptseg->s_data;
2664 2654 struct kshmid *sp = sptd->spt_amp->a_sp;
2665 2655 pgcnt_t npages, a_npages;
2666 2656 page_t **ppa;
2667 2657 pgcnt_t an_idx, a_an_idx, ppa_idx;
2668 2658 caddr_t spt_addr, a_addr; /* spt and aligned address */
2669 2659 size_t a_len; /* aligned len */
2670 2660 size_t share_sz;
2671 2661 ulong_t i;
2672 2662 int sts = 0;
2673 2663 rctl_qty_t unlocked = 0;
2674 2664 rctl_qty_t locked = 0;
2675 2665 struct proc *p = curproc;
2676 2666 kproject_t *proj;
2677 2667
2678 2668 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2679 2669 ASSERT(sp != NULL);
2680 2670
2681 2671 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2682 2672 return (0);
2683 2673 }
2684 2674
2685 2675 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2686 2676 an_idx = seg_page(seg, addr);
2687 2677 npages = btopr(len);
2688 2678
2689 2679 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2690 2680 return (ENOMEM);
2691 2681 }
2692 2682
2693 2683 /*
2694 2684 * A shm's project never changes, so no lock needed.
2695 2685 * The shm has a hold on the project, so it will not go away.
2696 2686 * Since we have a mapping to shm within this zone, we know
2697 2687 * that the zone will not go away.
2698 2688 */
2699 2689 proj = sp->shm_perm.ipc_proj;
2700 2690
2701 2691 if (op == MC_LOCK) {
2702 2692
2703 2693 /*
2704 2694 * Need to align addr and size request if they are not
2705 2695 * aligned so we can always allocate large page(s) however
2706 2696 * we only lock what was requested in initial request.
2707 2697 */
2708 2698 share_sz = page_get_pagesize(sptseg->s_szc);
2709 2699 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2710 2700 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2711 2701 share_sz);
2712 2702 a_npages = btop(a_len);
2713 2703 a_an_idx = seg_page(seg, a_addr);
2714 2704 spt_addr = sptseg->s_base + ptob(a_an_idx);
2715 2705 ppa_idx = an_idx - a_an_idx;
2716 2706
2717 2707 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2718 2708 KM_NOSLEEP)) == NULL) {
2719 2709 return (ENOMEM);
2720 2710 }
2721 2711
2722 2712 /*
2723 2713 * Don't cache any new pages for IO and
2724 2714 * flush any cached pages.
2725 2715 */
2726 2716 mutex_enter(&sptd->spt_lock);
2727 2717 if (sptd->spt_ppa != NULL)
2728 2718 sptd->spt_flags |= DISM_PPA_CHANGED;
2729 2719
2730 2720 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2731 2721 if (sts != 0) {
2732 2722 mutex_exit(&sptd->spt_lock);
2733 2723 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2734 2724 return (sts);
2735 2725 }
2736 2726
2737 2727 mutex_enter(&sp->shm_mlock);
2738 2728 /* enforce locked memory rctl */
2739 2729 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2740 2730
2741 2731 mutex_enter(&p->p_lock);
2742 2732 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2743 2733 mutex_exit(&p->p_lock);
2744 2734 sts = EAGAIN;
2745 2735 } else {
2746 2736 mutex_exit(&p->p_lock);
2747 2737 sts = spt_lockpages(seg, an_idx, npages,
2748 2738 &ppa[ppa_idx], lockmap, pos, &locked);
2749 2739
2750 2740 /*
2751 2741 * correct locked count if not all pages could be
2752 2742 * locked
2753 2743 */
2754 2744 if ((unlocked - locked) > 0) {
2755 2745 rctl_decr_locked_mem(NULL, proj,
2756 2746 (unlocked - locked), 0);
2757 2747 }
2758 2748 }
2759 2749 /*
2760 2750 * unlock pages
2761 2751 */
2762 2752 for (i = 0; i < a_npages; i++)
2763 2753 page_unlock(ppa[i]);
2764 2754 if (sptd->spt_ppa != NULL)
2765 2755 sptd->spt_flags |= DISM_PPA_CHANGED;
2766 2756 mutex_exit(&sp->shm_mlock);
2767 2757 mutex_exit(&sptd->spt_lock);
2768 2758
2769 2759 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2770 2760
2771 2761 } else if (op == MC_UNLOCK) { /* unlock */
2772 2762 page_t **ppa;
2773 2763
2774 2764 mutex_enter(&sptd->spt_lock);
2775 2765 if (shmd->shm_lckpgs == 0) {
2776 2766 mutex_exit(&sptd->spt_lock);
2777 2767 return (0);
2778 2768 }
2779 2769 /*
2780 2770 * Don't cache new IO pages.
2781 2771 */
2782 2772 if (sptd->spt_ppa != NULL)
2783 2773 sptd->spt_flags |= DISM_PPA_CHANGED;
2784 2774
2785 2775 mutex_enter(&sp->shm_mlock);
2786 2776 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2787 2777 if ((ppa = sptd->spt_ppa) != NULL)
2788 2778 sptd->spt_flags |= DISM_PPA_CHANGED;
2789 2779 mutex_exit(&sptd->spt_lock);
2790 2780
2791 2781 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2792 2782 mutex_exit(&sp->shm_mlock);
2793 2783
2794 2784 if (ppa != NULL)
2795 2785 seg_ppurge_wiredpp(ppa);
2796 2786 }
2797 2787 return (sts);
2798 2788 }
2799 2789
2800 2790 /*ARGSUSED*/
2801 2791 int
2802 2792 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2803 2793 {
2804 2794 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2805 2795 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2806 2796 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2807 2797
2808 2798 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2809 2799
2810 2800 /*
2811 2801 * ISM segment is always rw.
2812 2802 */
2813 2803 while (--pgno >= 0)
2814 2804 *protv++ = sptd->spt_prot;
2815 2805 return (0);
2816 2806 }
2817 2807
2818 2808 /*ARGSUSED*/
2819 2809 u_offset_t
2820 2810 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2821 2811 {
2822 2812 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2823 2813
2824 2814 /* Offset does not matter in ISM memory */
2825 2815
2826 2816 return ((u_offset_t)0);
2827 2817 }
2828 2818
2829 2819 /* ARGSUSED */
2830 2820 int
2831 2821 segspt_shmgettype(struct seg *seg, caddr_t addr)
2832 2822 {
2833 2823 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2834 2824 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2835 2825
2836 2826 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2837 2827
2838 2828 /*
2839 2829 * The shared memory mapping is always MAP_SHARED, SWAP is only
2840 2830 * reserved for DISM
2841 2831 */
2842 2832 return (MAP_SHARED |
2843 2833 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2844 2834 }
2845 2835
2846 2836 /*ARGSUSED*/
2847 2837 int
2848 2838 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2849 2839 {
2850 2840 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2851 2841 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2852 2842
2853 2843 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2854 2844
2855 2845 *vpp = sptd->spt_vp;
2856 2846 return (0);
2857 2847 }
2858 2848
2859 2849 /*
2860 2850 * We need to wait for pending IO to complete to a DISM segment in order for
2861 2851 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2862 2852 * than enough time to wait.
2863 2853 */
2864 2854 static clock_t spt_pcache_wait = 120;
2865 2855
2866 2856 /*ARGSUSED*/
2867 2857 static int
2868 2858 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2869 2859 {
2870 2860 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2871 2861 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2872 2862 struct anon_map *amp;
2873 2863 pgcnt_t pg_idx;
2874 2864 ushort_t gen;
2875 2865 clock_t end_lbolt;
2876 2866 int writer;
2877 2867 page_t **ppa;
2878 2868
2879 2869 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2880 2870
2881 2871 if (behav == MADV_FREE) {
2882 2872 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2883 2873 return (0);
2884 2874
2885 2875 amp = sptd->spt_amp;
2886 2876 pg_idx = seg_page(seg, addr);
2887 2877
2888 2878 mutex_enter(&sptd->spt_lock);
2889 2879 if ((ppa = sptd->spt_ppa) == NULL) {
2890 2880 mutex_exit(&sptd->spt_lock);
2891 2881 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2892 2882 anon_disclaim(amp, pg_idx, len);
2893 2883 ANON_LOCK_EXIT(&->a_rwlock);
2894 2884 return (0);
2895 2885 }
2896 2886
2897 2887 sptd->spt_flags |= DISM_PPA_CHANGED;
2898 2888 gen = sptd->spt_gen;
2899 2889
2900 2890 mutex_exit(&sptd->spt_lock);
2901 2891
2902 2892 /*
2903 2893 * Purge all DISM cached pages
2904 2894 */
2905 2895 seg_ppurge_wiredpp(ppa);
2906 2896
2907 2897 /*
2908 2898 * Drop the AS_LOCK so that other threads can grab it
2909 2899 * in the as_pageunlock path and hopefully get the segment
2910 2900 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2911 2901 * to keep this segment resident.
2912 2902 */
2913 2903 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2914 2904 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
2915 2905 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2916 2906
2917 2907 mutex_enter(&sptd->spt_lock);
2918 2908
2919 2909 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2920 2910
2921 2911 /*
2922 2912 * Try to wait for pages to get kicked out of the seg_pcache.
2923 2913 */
2924 2914 while (sptd->spt_gen == gen &&
2925 2915 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2926 2916 ddi_get_lbolt() < end_lbolt) {
2927 2917 if (!cv_timedwait_sig(&sptd->spt_cv,
2928 2918 &sptd->spt_lock, end_lbolt)) {
2929 2919 break;
2930 2920 }
2931 2921 }
2932 2922
2933 2923 mutex_exit(&sptd->spt_lock);
2934 2924
2935 2925 /* Regrab the AS_LOCK and release our hold on the segment */
2936 2926 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2937 2927 writer ? RW_WRITER : RW_READER);
2938 2928 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
2939 2929 if (shmd->shm_softlockcnt <= 0) {
2940 2930 if (AS_ISUNMAPWAIT(seg->s_as)) {
2941 2931 mutex_enter(&seg->s_as->a_contents);
2942 2932 if (AS_ISUNMAPWAIT(seg->s_as)) {
2943 2933 AS_CLRUNMAPWAIT(seg->s_as);
2944 2934 cv_broadcast(&seg->s_as->a_cv);
2945 2935 }
2946 2936 mutex_exit(&seg->s_as->a_contents);
2947 2937 }
2948 2938 }
2949 2939
2950 2940 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2951 2941 anon_disclaim(amp, pg_idx, len);
2952 2942 ANON_LOCK_EXIT(&->a_rwlock);
2953 2943 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2954 2944 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2955 2945 int already_set;
2956 2946 ulong_t anon_index;
2957 2947 lgrp_mem_policy_t policy;
2958 2948 caddr_t shm_addr;
2959 2949 size_t share_size;
2960 2950 size_t size;
2961 2951 struct seg *sptseg = shmd->shm_sptseg;
2962 2952 caddr_t sptseg_addr;
2963 2953
2964 2954 /*
2965 2955 * Align address and length to page size of underlying segment
2966 2956 */
2967 2957 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2968 2958 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2969 2959 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2970 2960 share_size);
2971 2961
2972 2962 amp = shmd->shm_amp;
2973 2963 anon_index = seg_page(seg, shm_addr);
2974 2964
2975 2965 /*
2976 2966 * And now we may have to adjust size downward if we have
2977 2967 * exceeded the realsize of the segment or initial anon
2978 2968 * allocations.
2979 2969 */
2980 2970 sptseg_addr = sptseg->s_base + ptob(anon_index);
2981 2971 if ((sptseg_addr + size) >
2982 2972 (sptseg->s_base + sptd->spt_realsize))
2983 2973 size = (sptseg->s_base + sptd->spt_realsize) -
2984 2974 sptseg_addr;
2985 2975
2986 2976 /*
2987 2977 * Set memory allocation policy for this segment
2988 2978 */
2989 2979 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2990 2980 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2991 2981 NULL, 0, len);
2992 2982
2993 2983 /*
2994 2984 * If random memory allocation policy set already,
2995 2985 * don't bother reapplying it.
2996 2986 */
2997 2987 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2998 2988 return (0);
2999 2989
3000 2990 /*
3001 2991 * Mark any existing pages in the given range for
3002 2992 * migration, flushing the I/O page cache, and using
3003 2993 * underlying segment to calculate anon index and get
3004 2994 * anonmap and vnode pointer from
3005 2995 */
3006 2996 if (shmd->shm_softlockcnt > 0)
3007 2997 segspt_purge(seg);
3008 2998
3009 2999 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3010 3000 }
3011 3001
3012 3002 return (0);
3013 3003 }
3014 3004
3015 3005 /*ARGSUSED*/
3016 3006 void
3017 3007 segspt_shmdump(struct seg *seg)
3018 3008 {
3019 3009 /* no-op for ISM segment */
3020 3010 }
3021 3011
3022 3012 /*ARGSUSED*/
3023 3013 static faultcode_t
3024 3014 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3025 3015 {
3026 3016 return (ENOTSUP);
3027 3017 }
3028 3018
3029 3019 /*
3030 3020 * get a memory ID for an addr in a given segment
3031 3021 */
3032 3022 static int
3033 3023 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3034 3024 {
3035 3025 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3036 3026 struct anon *ap;
3037 3027 size_t anon_index;
3038 3028 struct anon_map *amp = shmd->shm_amp;
3039 3029 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3040 3030 struct seg *sptseg = shmd->shm_sptseg;
3041 3031 anon_sync_obj_t cookie;
3042 3032
3043 3033 anon_index = seg_page(seg, addr);
3044 3034
3045 3035 if (addr > (seg->s_base + sptd->spt_realsize)) {
3046 3036 return (EFAULT);
3047 3037 }
3048 3038
3049 3039 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3050 3040 anon_array_enter(amp, anon_index, &cookie);
3051 3041 ap = anon_get_ptr(amp->ahp, anon_index);
3052 3042 if (ap == NULL) {
3053 3043 struct page *pp;
3054 3044 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3055 3045
3056 3046 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3057 3047 if (pp == NULL) {
3058 3048 anon_array_exit(&cookie);
3059 3049 ANON_LOCK_EXIT(&->a_rwlock);
3060 3050 return (ENOMEM);
3061 3051 }
3062 3052 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3063 3053 page_unlock(pp);
3064 3054 }
3065 3055 anon_array_exit(&cookie);
3066 3056 ANON_LOCK_EXIT(&->a_rwlock);
3067 3057 memidp->val[0] = (uintptr_t)ap;
3068 3058 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3069 3059 return (0);
3070 3060 }
3071 3061
3072 3062 /*
3073 3063 * Get memory allocation policy info for specified address in given segment
3074 3064 */
3075 3065 static lgrp_mem_policy_info_t *
3076 3066 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3077 3067 {
3078 3068 struct anon_map *amp;
3079 3069 ulong_t anon_index;
3080 3070 lgrp_mem_policy_info_t *policy_info;
3081 3071 struct shm_data *shm_data;
3082 3072
3083 3073 ASSERT(seg != NULL);
3084 3074
3085 3075 /*
3086 3076 * Get anon_map from segshm
3087 3077 *
3088 3078 * Assume that no lock needs to be held on anon_map, since
3089 3079 * it should be protected by its reference count which must be
3090 3080 * nonzero for an existing segment
3091 3081 * Need to grab readers lock on policy tree though
3092 3082 */
3093 3083 shm_data = (struct shm_data *)seg->s_data;
3094 3084 if (shm_data == NULL)
3095 3085 return (NULL);
3096 3086 amp = shm_data->shm_amp;
3097 3087 ASSERT(amp->refcnt != 0);
3098 3088
3099 3089 /*
3100 3090 * Get policy info
3101 3091 *
3102 3092 * Assume starting anon index of 0
3103 3093 */
3104 3094 anon_index = seg_page(seg, addr);
3105 3095 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3106 3096
3107 3097 return (policy_info);
3108 3098 }
3109 3099
3110 3100 /*ARGSUSED*/
3111 3101 static int
3112 3102 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3113 3103 {
3114 3104 return (0);
3115 3105 }
↓ open down ↓ |
834 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX