Print this page
6345 remove xhat support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
87 87
88 88 struct seg_ops segspt_ops = {
89 89 SEGSPT_BADOP(int), /* dup */
90 90 segspt_unmap,
91 91 segspt_free,
92 92 SEGSPT_BADOP(int), /* fault */
93 93 SEGSPT_BADOP(faultcode_t), /* faulta */
94 94 SEGSPT_BADOP(int), /* setprot */
95 95 SEGSPT_BADOP(int), /* checkprot */
96 96 SEGSPT_BADOP(int), /* kluster */
97 97 SEGSPT_BADOP(size_t), /* swapout */
98 98 SEGSPT_BADOP(int), /* sync */
99 99 SEGSPT_BADOP(size_t), /* incore */
100 100 SEGSPT_BADOP(int), /* lockop */
101 101 SEGSPT_BADOP(int), /* getprot */
102 102 SEGSPT_BADOP(u_offset_t), /* getoffset */
103 103 SEGSPT_BADOP(int), /* gettype */
104 104 SEGSPT_BADOP(int), /* getvp */
105 105 SEGSPT_BADOP(int), /* advise */
106 106 SEGSPT_BADOP(void), /* dump */
107 107 SEGSPT_BADOP(int), /* pagelock */
108 108 SEGSPT_BADOP(int), /* setpgsz */
109 109 SEGSPT_BADOP(int), /* getmemid */
110 110 segspt_getpolicy, /* getpolicy */
111 111 SEGSPT_BADOP(int), /* capable */
112 112 seg_inherit_notsup /* inherit */
113 113 };
114 114
115 115 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
116 116 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
117 117 static void segspt_shmfree(struct seg *seg);
118 118 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
119 119 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
120 120 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
121 121 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
122 122 register size_t len, register uint_t prot);
123 123 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
124 124 uint_t prot);
125 125 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
126 126 static size_t segspt_shmswapout(struct seg *seg);
127 127 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
128 128 register char *vec);
129 129 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
130 130 int attr, uint_t flags);
131 131 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
132 132 int attr, int op, ulong_t *lockmap, size_t pos);
133 133 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
134 134 uint_t *protv);
135 135 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
136 136 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
137 137 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
138 138 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
139 139 uint_t behav);
140 140 static void segspt_shmdump(struct seg *seg);
141 141 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
142 142 struct page ***, enum lock_type, enum seg_rw);
143 143 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
144 144 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
145 145 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
146 146 static int segspt_shmcapable(struct seg *, segcapability_t);
147 147
148 148 struct seg_ops segspt_shmops = {
149 149 segspt_shmdup,
150 150 segspt_shmunmap,
151 151 segspt_shmfree,
152 152 segspt_shmfault,
153 153 segspt_shmfaulta,
154 154 segspt_shmsetprot,
155 155 segspt_shmcheckprot,
156 156 segspt_shmkluster,
157 157 segspt_shmswapout,
158 158 segspt_shmsync,
159 159 segspt_shmincore,
160 160 segspt_shmlockop,
161 161 segspt_shmgetprot,
162 162 segspt_shmgetoffset,
163 163 segspt_shmgettype,
164 164 segspt_shmgetvp,
165 165 segspt_shmadvise, /* advise */
166 166 segspt_shmdump,
167 167 segspt_shmpagelock,
168 168 segspt_shmsetpgsz,
169 169 segspt_shmgetmemid,
170 170 segspt_shmgetpolicy,
171 171 segspt_shmcapable,
172 172 seg_inherit_notsup
173 173 };
174 174
175 175 static void segspt_purge(struct seg *seg);
176 176 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
177 177 enum seg_rw, int);
178 178 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
179 179 page_t **ppa);
180 180
181 181
182 182
183 183 /*ARGSUSED*/
184 184 int
185 185 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
186 186 uint_t prot, uint_t flags, uint_t share_szc)
187 187 {
188 188 int err;
189 189 struct as *newas;
190 190 struct segspt_crargs sptcargs;
191 191
192 192 #ifdef DEBUG
193 193 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
194 194 tnf_ulong, size, size );
195 195 #endif
196 196 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
197 197 segspt_minfree = availrmem/20; /* for the system */
198 198
199 199 if (!hat_supported(HAT_SHARED_PT, (void *)0))
200 200 return (EINVAL);
201 201
202 202 /*
203 203 * get a new as for this shared memory segment
204 204 */
205 205 newas = as_alloc();
206 206 newas->a_proc = NULL;
207 207 sptcargs.amp = amp;
208 208 sptcargs.prot = prot;
209 209 sptcargs.flags = flags;
210 210 sptcargs.szc = share_szc;
211 211 /*
212 212 * create a shared page table (spt) segment
213 213 */
214 214
215 215 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
216 216 as_free(newas);
217 217 return (err);
218 218 }
219 219 *sptseg = sptcargs.seg_spt;
220 220 return (0);
221 221 }
222 222
223 223 void
224 224 sptdestroy(struct as *as, struct anon_map *amp)
225 225 {
226 226
227 227 #ifdef DEBUG
228 228 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
229 229 #endif
230 230 (void) as_unmap(as, SEGSPTADDR, amp->size);
231 231 as_free(as);
232 232 }
233 233
234 234 /*
235 235 * called from seg_free().
236 236 * free (i.e., unlock, unmap, return to free list)
237 237 * all the pages in the given seg.
238 238 */
239 239 void
240 240 segspt_free(struct seg *seg)
241 241 {
242 242 struct spt_data *sptd = (struct spt_data *)seg->s_data;
243 243
244 244 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
245 245
246 246 if (sptd != NULL) {
247 247 if (sptd->spt_realsize)
248 248 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
249 249
250 250 if (sptd->spt_ppa_lckcnt)
251 251 kmem_free(sptd->spt_ppa_lckcnt,
252 252 sizeof (*sptd->spt_ppa_lckcnt)
253 253 * btopr(sptd->spt_amp->size));
254 254 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
255 255 cv_destroy(&sptd->spt_cv);
256 256 mutex_destroy(&sptd->spt_lock);
257 257 kmem_free(sptd, sizeof (*sptd));
258 258 }
259 259 }
260 260
261 261 /*ARGSUSED*/
262 262 static int
263 263 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
264 264 uint_t flags)
265 265 {
266 266 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
267 267
268 268 return (0);
269 269 }
270 270
271 271 /*ARGSUSED*/
272 272 static size_t
273 273 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
274 274 {
275 275 caddr_t eo_seg;
276 276 pgcnt_t npages;
277 277 struct shm_data *shmd = (struct shm_data *)seg->s_data;
278 278 struct seg *sptseg;
279 279 struct spt_data *sptd;
280 280
281 281 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
282 282 #ifdef lint
283 283 seg = seg;
284 284 #endif
285 285 sptseg = shmd->shm_sptseg;
286 286 sptd = sptseg->s_data;
287 287
288 288 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
289 289 eo_seg = addr + len;
290 290 while (addr < eo_seg) {
291 291 /* page exists, and it's locked. */
292 292 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
293 293 SEG_PAGE_ANON;
294 294 addr += PAGESIZE;
295 295 }
296 296 return (len);
297 297 } else {
298 298 struct anon_map *amp = shmd->shm_amp;
299 299 struct anon *ap;
300 300 page_t *pp;
301 301 pgcnt_t anon_index;
302 302 struct vnode *vp;
303 303 u_offset_t off;
304 304 ulong_t i;
305 305 int ret;
306 306 anon_sync_obj_t cookie;
307 307
308 308 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
309 309 anon_index = seg_page(seg, addr);
310 310 npages = btopr(len);
311 311 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
312 312 return (EINVAL);
313 313 }
314 314 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
315 315 for (i = 0; i < npages; i++, anon_index++) {
316 316 ret = 0;
317 317 anon_array_enter(amp, anon_index, &cookie);
318 318 ap = anon_get_ptr(amp->ahp, anon_index);
319 319 if (ap != NULL) {
320 320 swap_xlate(ap, &vp, &off);
321 321 anon_array_exit(&cookie);
322 322 pp = page_lookup_nowait(vp, off, SE_SHARED);
323 323 if (pp != NULL) {
324 324 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
325 325 page_unlock(pp);
326 326 }
327 327 } else {
328 328 anon_array_exit(&cookie);
329 329 }
330 330 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
331 331 ret |= SEG_PAGE_LOCKED;
332 332 }
333 333 *vec++ = (char)ret;
334 334 }
335 335 ANON_LOCK_EXIT(&->a_rwlock);
336 336 return (len);
337 337 }
338 338 }
339 339
340 340 static int
341 341 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
342 342 {
343 343 size_t share_size;
344 344
345 345 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
346 346
347 347 /*
348 348 * seg.s_size may have been rounded up to the largest page size
349 349 * in shmat().
350 350 * XXX This should be cleanedup. sptdestroy should take a length
351 351 * argument which should be the same as sptcreate. Then
352 352 * this rounding would not be needed (or is done in shm.c)
353 353 * Only the check for full segment will be needed.
354 354 *
355 355 * XXX -- shouldn't raddr == 0 always? These tests don't seem
356 356 * to be useful at all.
357 357 */
358 358 share_size = page_get_pagesize(seg->s_szc);
359 359 ssize = P2ROUNDUP(ssize, share_size);
360 360
361 361 if (raddr == seg->s_base && ssize == seg->s_size) {
362 362 seg_free(seg);
363 363 return (0);
364 364 } else
365 365 return (EINVAL);
366 366 }
367 367
368 368 int
369 369 segspt_create(struct seg *seg, caddr_t argsp)
370 370 {
371 371 int err;
372 372 caddr_t addr = seg->s_base;
373 373 struct spt_data *sptd;
374 374 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
375 375 struct anon_map *amp = sptcargs->amp;
376 376 struct kshmid *sp = amp->a_sp;
377 377 struct cred *cred = CRED();
378 378 ulong_t i, j, anon_index = 0;
379 379 pgcnt_t npages = btopr(amp->size);
380 380 struct vnode *vp;
381 381 page_t **ppa;
382 382 uint_t hat_flags;
383 383 size_t pgsz;
384 384 pgcnt_t pgcnt;
385 385 caddr_t a;
386 386 pgcnt_t pidx;
387 387 size_t sz;
388 388 proc_t *procp = curproc;
389 389 rctl_qty_t lockedbytes = 0;
390 390 kproject_t *proj;
391 391
392 392 /*
393 393 * We are holding the a_lock on the underlying dummy as,
394 394 * so we can make calls to the HAT layer.
395 395 */
396 396 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
397 397 ASSERT(sp != NULL);
398 398
399 399 #ifdef DEBUG
400 400 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
401 401 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
402 402 #endif
403 403 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
404 404 if (err = anon_swap_adjust(npages))
405 405 return (err);
406 406 }
407 407 err = ENOMEM;
408 408
409 409 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
410 410 goto out1;
411 411
412 412 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
413 413 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
414 414 KM_NOSLEEP)) == NULL)
415 415 goto out2;
416 416 }
417 417
418 418 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
419 419
420 420 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
421 421 goto out3;
422 422
423 423 seg->s_ops = &segspt_ops;
424 424 sptd->spt_vp = vp;
425 425 sptd->spt_amp = amp;
426 426 sptd->spt_prot = sptcargs->prot;
427 427 sptd->spt_flags = sptcargs->flags;
428 428 seg->s_data = (caddr_t)sptd;
429 429 sptd->spt_ppa = NULL;
430 430 sptd->spt_ppa_lckcnt = NULL;
431 431 seg->s_szc = sptcargs->szc;
432 432 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
433 433 sptd->spt_gen = 0;
434 434
435 435 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
436 436 if (seg->s_szc > amp->a_szc) {
437 437 amp->a_szc = seg->s_szc;
438 438 }
439 439 ANON_LOCK_EXIT(&->a_rwlock);
440 440
441 441 /*
442 442 * Set policy to affect initial allocation of pages in
443 443 * anon_map_createpages()
444 444 */
445 445 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
446 446 NULL, 0, ptob(npages));
447 447
448 448 if (sptcargs->flags & SHM_PAGEABLE) {
449 449 size_t share_sz;
450 450 pgcnt_t new_npgs, more_pgs;
451 451 struct anon_hdr *nahp;
452 452 zone_t *zone;
453 453
454 454 share_sz = page_get_pagesize(seg->s_szc);
455 455 if (!IS_P2ALIGNED(amp->size, share_sz)) {
456 456 /*
457 457 * We are rounding up the size of the anon array
458 458 * on 4 M boundary because we always create 4 M
459 459 * of page(s) when locking, faulting pages and we
460 460 * don't have to check for all corner cases e.g.
461 461 * if there is enough space to allocate 4 M
462 462 * page.
463 463 */
464 464 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
465 465 more_pgs = new_npgs - npages;
466 466
467 467 /*
468 468 * The zone will never be NULL, as a fully created
469 469 * shm always has an owning zone.
470 470 */
471 471 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
472 472 ASSERT(zone != NULL);
473 473 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
474 474 err = ENOMEM;
475 475 goto out4;
476 476 }
477 477
478 478 nahp = anon_create(new_npgs, ANON_SLEEP);
479 479 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
480 480 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
481 481 ANON_SLEEP);
482 482 anon_release(amp->ahp, npages);
483 483 amp->ahp = nahp;
484 484 ASSERT(amp->swresv == ptob(npages));
485 485 amp->swresv = amp->size = ptob(new_npgs);
486 486 ANON_LOCK_EXIT(&->a_rwlock);
487 487 npages = new_npgs;
488 488 }
489 489
490 490 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
491 491 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
492 492 sptd->spt_pcachecnt = 0;
493 493 sptd->spt_realsize = ptob(npages);
494 494 sptcargs->seg_spt = seg;
495 495 return (0);
496 496 }
497 497
498 498 /*
499 499 * get array of pages for each anon slot in amp
500 500 */
501 501 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
502 502 seg, addr, S_CREATE, cred)) != 0)
503 503 goto out4;
504 504
505 505 mutex_enter(&sp->shm_mlock);
506 506
507 507 /* May be partially locked, so, count bytes to charge for locking */
508 508 for (i = 0; i < npages; i++)
509 509 if (ppa[i]->p_lckcnt == 0)
510 510 lockedbytes += PAGESIZE;
511 511
512 512 proj = sp->shm_perm.ipc_proj;
513 513
514 514 if (lockedbytes > 0) {
515 515 mutex_enter(&procp->p_lock);
516 516 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
517 517 mutex_exit(&procp->p_lock);
518 518 mutex_exit(&sp->shm_mlock);
519 519 for (i = 0; i < npages; i++)
520 520 page_unlock(ppa[i]);
521 521 err = ENOMEM;
522 522 goto out4;
523 523 }
524 524 mutex_exit(&procp->p_lock);
525 525 }
526 526
527 527 /*
528 528 * addr is initial address corresponding to the first page on ppa list
529 529 */
530 530 for (i = 0; i < npages; i++) {
531 531 /* attempt to lock all pages */
532 532 if (page_pp_lock(ppa[i], 0, 1) == 0) {
533 533 /*
534 534 * if unable to lock any page, unlock all
535 535 * of them and return error
536 536 */
537 537 for (j = 0; j < i; j++)
538 538 page_pp_unlock(ppa[j], 0, 1);
539 539 for (i = 0; i < npages; i++)
540 540 page_unlock(ppa[i]);
541 541 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
542 542 mutex_exit(&sp->shm_mlock);
543 543 err = ENOMEM;
544 544 goto out4;
545 545 }
546 546 }
547 547 mutex_exit(&sp->shm_mlock);
548 548
549 549 /*
550 550 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
551 551 * for the entire life of the segment. For example platforms
552 552 * that do not support Dynamic Reconfiguration.
553 553 */
554 554 hat_flags = HAT_LOAD_SHARE;
555 555 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
556 556 hat_flags |= HAT_LOAD_LOCK;
557 557
558 558 /*
559 559 * Load translations one lare page at a time
560 560 * to make sure we don't create mappings bigger than
561 561 * segment's size code in case underlying pages
562 562 * are shared with segvn's segment that uses bigger
563 563 * size code than we do.
564 564 */
565 565 pgsz = page_get_pagesize(seg->s_szc);
566 566 pgcnt = page_get_pagecnt(seg->s_szc);
567 567 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
568 568 sz = MIN(pgsz, ptob(npages - pidx));
569 569 hat_memload_array(seg->s_as->a_hat, a, sz,
570 570 &ppa[pidx], sptd->spt_prot, hat_flags);
571 571 }
572 572
573 573 /*
574 574 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
575 575 * we will leave the pages locked SE_SHARED for the life
576 576 * of the ISM segment. This will prevent any calls to
577 577 * hat_pageunload() on this ISM segment for those platforms.
578 578 */
579 579 if (!(hat_flags & HAT_LOAD_LOCK)) {
580 580 /*
581 581 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
582 582 * we no longer need to hold the SE_SHARED lock on the pages,
583 583 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
584 584 * SE_SHARED lock on the pages as necessary.
585 585 */
586 586 for (i = 0; i < npages; i++)
587 587 page_unlock(ppa[i]);
588 588 }
589 589 sptd->spt_pcachecnt = 0;
590 590 kmem_free(ppa, ((sizeof (page_t *)) * npages));
591 591 sptd->spt_realsize = ptob(npages);
592 592 atomic_add_long(&spt_used, npages);
593 593 sptcargs->seg_spt = seg;
594 594 return (0);
595 595
596 596 out4:
597 597 seg->s_data = NULL;
598 598 kmem_free(vp, sizeof (*vp));
599 599 cv_destroy(&sptd->spt_cv);
600 600 out3:
601 601 mutex_destroy(&sptd->spt_lock);
602 602 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
603 603 kmem_free(ppa, (sizeof (*ppa) * npages));
604 604 out2:
605 605 kmem_free(sptd, sizeof (*sptd));
606 606 out1:
607 607 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
608 608 anon_swap_restore(npages);
609 609 return (err);
610 610 }
611 611
612 612 /*ARGSUSED*/
613 613 void
614 614 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
615 615 {
616 616 struct page *pp;
617 617 struct spt_data *sptd = (struct spt_data *)seg->s_data;
618 618 pgcnt_t npages;
619 619 ulong_t anon_idx;
620 620 struct anon_map *amp;
621 621 struct anon *ap;
622 622 struct vnode *vp;
623 623 u_offset_t off;
624 624 uint_t hat_flags;
625 625 int root = 0;
626 626 pgcnt_t pgs, curnpgs = 0;
627 627 page_t *rootpp;
628 628 rctl_qty_t unlocked_bytes = 0;
629 629 kproject_t *proj;
630 630 kshmid_t *sp;
631 631
632 632 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
633 633
634 634 len = P2ROUNDUP(len, PAGESIZE);
635 635
636 636 npages = btop(len);
637 637
638 638 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
639 639 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
640 640 (sptd->spt_flags & SHM_PAGEABLE)) {
641 641 hat_flags = HAT_UNLOAD_UNMAP;
642 642 }
643 643
644 644 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
645 645
646 646 amp = sptd->spt_amp;
647 647 if (sptd->spt_flags & SHM_PAGEABLE)
648 648 npages = btop(amp->size);
649 649
650 650 ASSERT(amp != NULL);
651 651
652 652 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
653 653 sp = amp->a_sp;
654 654 proj = sp->shm_perm.ipc_proj;
655 655 mutex_enter(&sp->shm_mlock);
656 656 }
657 657 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
658 658 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
659 659 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
660 660 panic("segspt_free_pages: null app");
661 661 /*NOTREACHED*/
662 662 }
663 663 } else {
664 664 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
665 665 == NULL)
666 666 continue;
667 667 }
668 668 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
669 669 swap_xlate(ap, &vp, &off);
670 670
671 671 /*
672 672 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
673 673 * the pages won't be having SE_SHARED lock at this
674 674 * point.
675 675 *
676 676 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
677 677 * the pages are still held SE_SHARED locked from the
678 678 * original segspt_create()
679 679 *
680 680 * Our goal is to get SE_EXCL lock on each page, remove
681 681 * permanent lock on it and invalidate the page.
682 682 */
683 683 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
684 684 if (hat_flags == HAT_UNLOAD_UNMAP)
685 685 pp = page_lookup(vp, off, SE_EXCL);
686 686 else {
687 687 if ((pp = page_find(vp, off)) == NULL) {
688 688 panic("segspt_free_pages: "
689 689 "page not locked");
690 690 /*NOTREACHED*/
691 691 }
692 692 if (!page_tryupgrade(pp)) {
693 693 page_unlock(pp);
694 694 pp = page_lookup(vp, off, SE_EXCL);
695 695 }
696 696 }
697 697 if (pp == NULL) {
698 698 panic("segspt_free_pages: "
699 699 "page not in the system");
700 700 /*NOTREACHED*/
701 701 }
702 702 ASSERT(pp->p_lckcnt > 0);
703 703 page_pp_unlock(pp, 0, 1);
704 704 if (pp->p_lckcnt == 0)
705 705 unlocked_bytes += PAGESIZE;
706 706 } else {
707 707 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
708 708 continue;
709 709 }
710 710 /*
711 711 * It's logical to invalidate the pages here as in most cases
712 712 * these were created by segspt.
713 713 */
714 714 if (pp->p_szc != 0) {
715 715 if (root == 0) {
716 716 ASSERT(curnpgs == 0);
717 717 root = 1;
718 718 rootpp = pp;
719 719 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
720 720 ASSERT(pgs > 1);
721 721 ASSERT(IS_P2ALIGNED(pgs, pgs));
722 722 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
723 723 curnpgs--;
724 724 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
725 725 ASSERT(curnpgs == 1);
726 726 ASSERT(page_pptonum(pp) ==
727 727 page_pptonum(rootpp) + (pgs - 1));
728 728 page_destroy_pages(rootpp);
729 729 root = 0;
730 730 curnpgs = 0;
731 731 } else {
732 732 ASSERT(curnpgs > 1);
733 733 ASSERT(page_pptonum(pp) ==
734 734 page_pptonum(rootpp) + (pgs - curnpgs));
735 735 curnpgs--;
736 736 }
737 737 } else {
738 738 if (root != 0 || curnpgs != 0) {
739 739 panic("segspt_free_pages: bad large page");
740 740 /*NOTREACHED*/
741 741 }
742 742 /*
743 743 * Before destroying the pages, we need to take care
744 744 * of the rctl locked memory accounting. For that
745 745 * we need to calculte the unlocked_bytes.
746 746 */
747 747 if (pp->p_lckcnt > 0)
748 748 unlocked_bytes += PAGESIZE;
749 749 /*LINTED: constant in conditional context */
750 750 VN_DISPOSE(pp, B_INVAL, 0, kcred);
751 751 }
752 752 }
753 753 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
754 754 if (unlocked_bytes > 0)
755 755 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
756 756 mutex_exit(&sp->shm_mlock);
757 757 }
758 758 if (root != 0 || curnpgs != 0) {
759 759 panic("segspt_free_pages: bad large page");
760 760 /*NOTREACHED*/
761 761 }
762 762
763 763 /*
764 764 * mark that pages have been released
765 765 */
766 766 sptd->spt_realsize = 0;
767 767
768 768 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
769 769 atomic_add_long(&spt_used, -npages);
770 770 anon_swap_restore(npages);
771 771 }
772 772 }
773 773
774 774 /*
775 775 * Get memory allocation policy info for specified address in given segment
776 776 */
777 777 static lgrp_mem_policy_info_t *
778 778 segspt_getpolicy(struct seg *seg, caddr_t addr)
779 779 {
780 780 struct anon_map *amp;
781 781 ulong_t anon_index;
782 782 lgrp_mem_policy_info_t *policy_info;
783 783 struct spt_data *spt_data;
784 784
785 785 ASSERT(seg != NULL);
786 786
787 787 /*
788 788 * Get anon_map from segspt
789 789 *
790 790 * Assume that no lock needs to be held on anon_map, since
791 791 * it should be protected by its reference count which must be
792 792 * nonzero for an existing segment
793 793 * Need to grab readers lock on policy tree though
794 794 */
795 795 spt_data = (struct spt_data *)seg->s_data;
796 796 if (spt_data == NULL)
797 797 return (NULL);
798 798 amp = spt_data->spt_amp;
799 799 ASSERT(amp->refcnt != 0);
800 800
801 801 /*
802 802 * Get policy info
803 803 *
804 804 * Assume starting anon index of 0
805 805 */
806 806 anon_index = seg_page(seg, addr);
807 807 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
808 808
809 809 return (policy_info);
810 810 }
811 811
812 812 /*
813 813 * DISM only.
814 814 * Return locked pages over a given range.
815 815 *
816 816 * We will cache all DISM locked pages and save the pplist for the
817 817 * entire segment in the ppa field of the underlying DISM segment structure.
818 818 * Later, during a call to segspt_reclaim() we will use this ppa array
819 819 * to page_unlock() all of the pages and then we will free this ppa list.
820 820 */
821 821 /*ARGSUSED*/
822 822 static int
823 823 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
824 824 struct page ***ppp, enum lock_type type, enum seg_rw rw)
825 825 {
826 826 struct shm_data *shmd = (struct shm_data *)seg->s_data;
827 827 struct seg *sptseg = shmd->shm_sptseg;
828 828 struct spt_data *sptd = sptseg->s_data;
829 829 pgcnt_t pg_idx, npages, tot_npages, npgs;
830 830 struct page **pplist, **pl, **ppa, *pp;
831 831 struct anon_map *amp;
832 832 spgcnt_t an_idx;
833 833 int ret = ENOTSUP;
834 834 uint_t pl_built = 0;
835 835 struct anon *ap;
836 836 struct vnode *vp;
837 837 u_offset_t off;
838 838 pgcnt_t claim_availrmem = 0;
839 839 uint_t szc;
840 840
841 841 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
842 842 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
843 843
844 844 /*
845 845 * We want to lock/unlock the entire ISM segment. Therefore,
846 846 * we will be using the underlying sptseg and it's base address
847 847 * and length for the caching arguments.
848 848 */
849 849 ASSERT(sptseg);
850 850 ASSERT(sptd);
851 851
852 852 pg_idx = seg_page(seg, addr);
853 853 npages = btopr(len);
854 854
855 855 /*
856 856 * check if the request is larger than number of pages covered
857 857 * by amp
858 858 */
859 859 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
860 860 *ppp = NULL;
861 861 return (ENOTSUP);
862 862 }
863 863
864 864 if (type == L_PAGEUNLOCK) {
865 865 ASSERT(sptd->spt_ppa != NULL);
866 866
867 867 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
868 868 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
869 869
870 870 /*
871 871 * If someone is blocked while unmapping, we purge
872 872 * segment page cache and thus reclaim pplist synchronously
873 873 * without waiting for seg_pasync_thread. This speeds up
874 874 * unmapping in cases where munmap(2) is called, while
875 875 * raw async i/o is still in progress or where a thread
876 876 * exits on data fault in a multithreaded application.
877 877 */
878 878 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
879 879 (AS_ISUNMAPWAIT(seg->s_as) &&
880 880 shmd->shm_softlockcnt > 0)) {
881 881 segspt_purge(seg);
882 882 }
883 883 return (0);
884 884 }
885 885
886 886 /* The L_PAGELOCK case ... */
887 887
888 888 if (sptd->spt_flags & DISM_PPA_CHANGED) {
889 889 segspt_purge(seg);
890 890 /*
891 891 * for DISM ppa needs to be rebuild since
892 892 * number of locked pages could be changed
893 893 */
894 894 *ppp = NULL;
895 895 return (ENOTSUP);
896 896 }
897 897
898 898 /*
899 899 * First try to find pages in segment page cache, without
900 900 * holding the segment lock.
901 901 */
902 902 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
903 903 S_WRITE, SEGP_FORCE_WIRED);
904 904 if (pplist != NULL) {
905 905 ASSERT(sptd->spt_ppa != NULL);
906 906 ASSERT(sptd->spt_ppa == pplist);
907 907 ppa = sptd->spt_ppa;
908 908 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
909 909 if (ppa[an_idx] == NULL) {
910 910 seg_pinactive(seg, NULL, seg->s_base,
911 911 sptd->spt_amp->size, ppa,
912 912 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
913 913 *ppp = NULL;
914 914 return (ENOTSUP);
915 915 }
916 916 if ((szc = ppa[an_idx]->p_szc) != 0) {
917 917 npgs = page_get_pagecnt(szc);
918 918 an_idx = P2ROUNDUP(an_idx + 1, npgs);
919 919 } else {
920 920 an_idx++;
921 921 }
922 922 }
923 923 /*
924 924 * Since we cache the entire DISM segment, we want to
925 925 * set ppp to point to the first slot that corresponds
926 926 * to the requested addr, i.e. pg_idx.
927 927 */
928 928 *ppp = &(sptd->spt_ppa[pg_idx]);
929 929 return (0);
930 930 }
931 931
932 932 mutex_enter(&sptd->spt_lock);
933 933 /*
934 934 * try to find pages in segment page cache with mutex
935 935 */
936 936 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
937 937 S_WRITE, SEGP_FORCE_WIRED);
938 938 if (pplist != NULL) {
939 939 ASSERT(sptd->spt_ppa != NULL);
940 940 ASSERT(sptd->spt_ppa == pplist);
941 941 ppa = sptd->spt_ppa;
942 942 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
943 943 if (ppa[an_idx] == NULL) {
944 944 mutex_exit(&sptd->spt_lock);
945 945 seg_pinactive(seg, NULL, seg->s_base,
946 946 sptd->spt_amp->size, ppa,
947 947 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
948 948 *ppp = NULL;
949 949 return (ENOTSUP);
950 950 }
951 951 if ((szc = ppa[an_idx]->p_szc) != 0) {
952 952 npgs = page_get_pagecnt(szc);
953 953 an_idx = P2ROUNDUP(an_idx + 1, npgs);
954 954 } else {
955 955 an_idx++;
956 956 }
957 957 }
958 958 /*
959 959 * Since we cache the entire DISM segment, we want to
960 960 * set ppp to point to the first slot that corresponds
961 961 * to the requested addr, i.e. pg_idx.
962 962 */
963 963 mutex_exit(&sptd->spt_lock);
964 964 *ppp = &(sptd->spt_ppa[pg_idx]);
965 965 return (0);
966 966 }
967 967 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
968 968 SEGP_FORCE_WIRED) == SEGP_FAIL) {
969 969 mutex_exit(&sptd->spt_lock);
970 970 *ppp = NULL;
971 971 return (ENOTSUP);
972 972 }
973 973
974 974 /*
975 975 * No need to worry about protections because DISM pages are always rw.
976 976 */
977 977 pl = pplist = NULL;
978 978 amp = sptd->spt_amp;
979 979
980 980 /*
981 981 * Do we need to build the ppa array?
982 982 */
983 983 if (sptd->spt_ppa == NULL) {
984 984 pgcnt_t lpg_cnt = 0;
985 985
986 986 pl_built = 1;
987 987 tot_npages = btopr(sptd->spt_amp->size);
988 988
989 989 ASSERT(sptd->spt_pcachecnt == 0);
990 990 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
991 991 pl = pplist;
992 992
993 993 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
994 994 for (an_idx = 0; an_idx < tot_npages; ) {
995 995 ap = anon_get_ptr(amp->ahp, an_idx);
996 996 /*
997 997 * Cache only mlocked pages. For large pages
998 998 * if one (constituent) page is mlocked
999 999 * all pages for that large page
1000 1000 * are cached also. This is for quick
1001 1001 * lookups of ppa array;
1002 1002 */
1003 1003 if ((ap != NULL) && (lpg_cnt != 0 ||
1004 1004 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1005 1005
1006 1006 swap_xlate(ap, &vp, &off);
1007 1007 pp = page_lookup(vp, off, SE_SHARED);
1008 1008 ASSERT(pp != NULL);
1009 1009 if (lpg_cnt == 0) {
1010 1010 lpg_cnt++;
1011 1011 /*
1012 1012 * For a small page, we are done --
1013 1013 * lpg_count is reset to 0 below.
1014 1014 *
1015 1015 * For a large page, we are guaranteed
1016 1016 * to find the anon structures of all
1017 1017 * constituent pages and a non-zero
1018 1018 * lpg_cnt ensures that we don't test
1019 1019 * for mlock for these. We are done
1020 1020 * when lpg_count reaches (npgs + 1).
1021 1021 * If we are not the first constituent
1022 1022 * page, restart at the first one.
1023 1023 */
1024 1024 npgs = page_get_pagecnt(pp->p_szc);
1025 1025 if (!IS_P2ALIGNED(an_idx, npgs)) {
1026 1026 an_idx = P2ALIGN(an_idx, npgs);
1027 1027 page_unlock(pp);
1028 1028 continue;
1029 1029 }
1030 1030 }
1031 1031 if (++lpg_cnt > npgs)
1032 1032 lpg_cnt = 0;
1033 1033
1034 1034 /*
1035 1035 * availrmem is decremented only
1036 1036 * for unlocked pages
1037 1037 */
1038 1038 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1039 1039 claim_availrmem++;
1040 1040 pplist[an_idx] = pp;
1041 1041 }
1042 1042 an_idx++;
1043 1043 }
1044 1044 ANON_LOCK_EXIT(&->a_rwlock);
1045 1045
1046 1046 if (claim_availrmem) {
1047 1047 mutex_enter(&freemem_lock);
1048 1048 if (availrmem < tune.t_minarmem + claim_availrmem) {
1049 1049 mutex_exit(&freemem_lock);
1050 1050 ret = ENOTSUP;
1051 1051 claim_availrmem = 0;
1052 1052 goto insert_fail;
1053 1053 } else {
1054 1054 availrmem -= claim_availrmem;
1055 1055 }
1056 1056 mutex_exit(&freemem_lock);
1057 1057 }
1058 1058
1059 1059 sptd->spt_ppa = pl;
1060 1060 } else {
1061 1061 /*
1062 1062 * We already have a valid ppa[].
1063 1063 */
1064 1064 pl = sptd->spt_ppa;
1065 1065 }
1066 1066
1067 1067 ASSERT(pl != NULL);
1068 1068
1069 1069 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1070 1070 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1071 1071 segspt_reclaim);
1072 1072 if (ret == SEGP_FAIL) {
1073 1073 /*
1074 1074 * seg_pinsert failed. We return
1075 1075 * ENOTSUP, so that the as_pagelock() code will
1076 1076 * then try the slower F_SOFTLOCK path.
1077 1077 */
1078 1078 if (pl_built) {
1079 1079 /*
1080 1080 * No one else has referenced the ppa[].
1081 1081 * We created it and we need to destroy it.
1082 1082 */
1083 1083 sptd->spt_ppa = NULL;
1084 1084 }
1085 1085 ret = ENOTSUP;
1086 1086 goto insert_fail;
1087 1087 }
1088 1088
1089 1089 /*
1090 1090 * In either case, we increment softlockcnt on the 'real' segment.
1091 1091 */
1092 1092 sptd->spt_pcachecnt++;
1093 1093 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1094 1094
1095 1095 ppa = sptd->spt_ppa;
1096 1096 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1097 1097 if (ppa[an_idx] == NULL) {
1098 1098 mutex_exit(&sptd->spt_lock);
1099 1099 seg_pinactive(seg, NULL, seg->s_base,
1100 1100 sptd->spt_amp->size,
1101 1101 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1102 1102 *ppp = NULL;
1103 1103 return (ENOTSUP);
1104 1104 }
1105 1105 if ((szc = ppa[an_idx]->p_szc) != 0) {
1106 1106 npgs = page_get_pagecnt(szc);
1107 1107 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1108 1108 } else {
1109 1109 an_idx++;
1110 1110 }
1111 1111 }
1112 1112 /*
1113 1113 * We can now drop the sptd->spt_lock since the ppa[]
1114 1114 * exists and he have incremented pacachecnt.
1115 1115 */
1116 1116 mutex_exit(&sptd->spt_lock);
1117 1117
1118 1118 /*
1119 1119 * Since we cache the entire segment, we want to
1120 1120 * set ppp to point to the first slot that corresponds
1121 1121 * to the requested addr, i.e. pg_idx.
1122 1122 */
1123 1123 *ppp = &(sptd->spt_ppa[pg_idx]);
1124 1124 return (0);
1125 1125
1126 1126 insert_fail:
1127 1127 /*
1128 1128 * We will only reach this code if we tried and failed.
1129 1129 *
1130 1130 * And we can drop the lock on the dummy seg, once we've failed
1131 1131 * to set up a new ppa[].
1132 1132 */
1133 1133 mutex_exit(&sptd->spt_lock);
1134 1134
1135 1135 if (pl_built) {
1136 1136 if (claim_availrmem) {
1137 1137 mutex_enter(&freemem_lock);
1138 1138 availrmem += claim_availrmem;
1139 1139 mutex_exit(&freemem_lock);
1140 1140 }
1141 1141
1142 1142 /*
1143 1143 * We created pl and we need to destroy it.
1144 1144 */
1145 1145 pplist = pl;
1146 1146 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1147 1147 if (pplist[an_idx] != NULL)
1148 1148 page_unlock(pplist[an_idx]);
1149 1149 }
1150 1150 kmem_free(pl, sizeof (page_t *) * tot_npages);
1151 1151 }
1152 1152
1153 1153 if (shmd->shm_softlockcnt <= 0) {
1154 1154 if (AS_ISUNMAPWAIT(seg->s_as)) {
1155 1155 mutex_enter(&seg->s_as->a_contents);
1156 1156 if (AS_ISUNMAPWAIT(seg->s_as)) {
1157 1157 AS_CLRUNMAPWAIT(seg->s_as);
1158 1158 cv_broadcast(&seg->s_as->a_cv);
1159 1159 }
1160 1160 mutex_exit(&seg->s_as->a_contents);
1161 1161 }
1162 1162 }
1163 1163 *ppp = NULL;
1164 1164 return (ret);
1165 1165 }
1166 1166
1167 1167
1168 1168
1169 1169 /*
1170 1170 * return locked pages over a given range.
1171 1171 *
1172 1172 * We will cache the entire ISM segment and save the pplist for the
1173 1173 * entire segment in the ppa field of the underlying ISM segment structure.
1174 1174 * Later, during a call to segspt_reclaim() we will use this ppa array
1175 1175 * to page_unlock() all of the pages and then we will free this ppa list.
1176 1176 */
1177 1177 /*ARGSUSED*/
1178 1178 static int
1179 1179 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1180 1180 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1181 1181 {
1182 1182 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1183 1183 struct seg *sptseg = shmd->shm_sptseg;
1184 1184 struct spt_data *sptd = sptseg->s_data;
1185 1185 pgcnt_t np, page_index, npages;
1186 1186 caddr_t a, spt_base;
1187 1187 struct page **pplist, **pl, *pp;
1188 1188 struct anon_map *amp;
1189 1189 ulong_t anon_index;
1190 1190 int ret = ENOTSUP;
1191 1191 uint_t pl_built = 0;
1192 1192 struct anon *ap;
1193 1193 struct vnode *vp;
1194 1194 u_offset_t off;
1195 1195
1196 1196 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1197 1197 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1198 1198
1199 1199
1200 1200 /*
1201 1201 * We want to lock/unlock the entire ISM segment. Therefore,
1202 1202 * we will be using the underlying sptseg and it's base address
1203 1203 * and length for the caching arguments.
1204 1204 */
1205 1205 ASSERT(sptseg);
1206 1206 ASSERT(sptd);
1207 1207
1208 1208 if (sptd->spt_flags & SHM_PAGEABLE) {
1209 1209 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1210 1210 }
1211 1211
1212 1212 page_index = seg_page(seg, addr);
1213 1213 npages = btopr(len);
1214 1214
1215 1215 /*
1216 1216 * check if the request is larger than number of pages covered
1217 1217 * by amp
1218 1218 */
1219 1219 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1220 1220 *ppp = NULL;
1221 1221 return (ENOTSUP);
1222 1222 }
1223 1223
1224 1224 if (type == L_PAGEUNLOCK) {
1225 1225
1226 1226 ASSERT(sptd->spt_ppa != NULL);
1227 1227
1228 1228 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1229 1229 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1230 1230
1231 1231 /*
1232 1232 * If someone is blocked while unmapping, we purge
1233 1233 * segment page cache and thus reclaim pplist synchronously
1234 1234 * without waiting for seg_pasync_thread. This speeds up
1235 1235 * unmapping in cases where munmap(2) is called, while
1236 1236 * raw async i/o is still in progress or where a thread
1237 1237 * exits on data fault in a multithreaded application.
1238 1238 */
1239 1239 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1240 1240 segspt_purge(seg);
1241 1241 }
1242 1242 return (0);
1243 1243 }
1244 1244
1245 1245 /* The L_PAGELOCK case... */
1246 1246
1247 1247 /*
1248 1248 * First try to find pages in segment page cache, without
1249 1249 * holding the segment lock.
1250 1250 */
1251 1251 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1252 1252 S_WRITE, SEGP_FORCE_WIRED);
1253 1253 if (pplist != NULL) {
1254 1254 ASSERT(sptd->spt_ppa == pplist);
1255 1255 ASSERT(sptd->spt_ppa[page_index]);
1256 1256 /*
1257 1257 * Since we cache the entire ISM segment, we want to
1258 1258 * set ppp to point to the first slot that corresponds
1259 1259 * to the requested addr, i.e. page_index.
1260 1260 */
1261 1261 *ppp = &(sptd->spt_ppa[page_index]);
1262 1262 return (0);
1263 1263 }
1264 1264
1265 1265 mutex_enter(&sptd->spt_lock);
1266 1266
1267 1267 /*
1268 1268 * try to find pages in segment page cache
1269 1269 */
1270 1270 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1271 1271 S_WRITE, SEGP_FORCE_WIRED);
1272 1272 if (pplist != NULL) {
1273 1273 ASSERT(sptd->spt_ppa == pplist);
1274 1274 /*
1275 1275 * Since we cache the entire segment, we want to
1276 1276 * set ppp to point to the first slot that corresponds
1277 1277 * to the requested addr, i.e. page_index.
1278 1278 */
1279 1279 mutex_exit(&sptd->spt_lock);
1280 1280 *ppp = &(sptd->spt_ppa[page_index]);
1281 1281 return (0);
1282 1282 }
1283 1283
1284 1284 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1285 1285 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1286 1286 mutex_exit(&sptd->spt_lock);
1287 1287 *ppp = NULL;
1288 1288 return (ENOTSUP);
1289 1289 }
1290 1290
1291 1291 /*
1292 1292 * No need to worry about protections because ISM pages
1293 1293 * are always rw.
1294 1294 */
1295 1295 pl = pplist = NULL;
1296 1296
1297 1297 /*
1298 1298 * Do we need to build the ppa array?
1299 1299 */
1300 1300 if (sptd->spt_ppa == NULL) {
1301 1301 ASSERT(sptd->spt_ppa == pplist);
1302 1302
1303 1303 spt_base = sptseg->s_base;
1304 1304 pl_built = 1;
1305 1305
1306 1306 /*
1307 1307 * availrmem is decremented once during anon_swap_adjust()
1308 1308 * and is incremented during the anon_unresv(), which is
1309 1309 * called from shm_rm_amp() when the segment is destroyed.
1310 1310 */
1311 1311 amp = sptd->spt_amp;
1312 1312 ASSERT(amp != NULL);
1313 1313
1314 1314 /* pcachecnt is protected by sptd->spt_lock */
1315 1315 ASSERT(sptd->spt_pcachecnt == 0);
1316 1316 pplist = kmem_zalloc(sizeof (page_t *)
1317 1317 * btopr(sptd->spt_amp->size), KM_SLEEP);
1318 1318 pl = pplist;
1319 1319
1320 1320 anon_index = seg_page(sptseg, spt_base);
1321 1321
1322 1322 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1323 1323 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1324 1324 a += PAGESIZE, anon_index++, pplist++) {
1325 1325 ap = anon_get_ptr(amp->ahp, anon_index);
1326 1326 ASSERT(ap != NULL);
1327 1327 swap_xlate(ap, &vp, &off);
1328 1328 pp = page_lookup(vp, off, SE_SHARED);
1329 1329 ASSERT(pp != NULL);
1330 1330 *pplist = pp;
1331 1331 }
1332 1332 ANON_LOCK_EXIT(&->a_rwlock);
1333 1333
1334 1334 if (a < (spt_base + sptd->spt_amp->size)) {
1335 1335 ret = ENOTSUP;
1336 1336 goto insert_fail;
1337 1337 }
1338 1338 sptd->spt_ppa = pl;
1339 1339 } else {
1340 1340 /*
1341 1341 * We already have a valid ppa[].
1342 1342 */
1343 1343 pl = sptd->spt_ppa;
1344 1344 }
1345 1345
1346 1346 ASSERT(pl != NULL);
1347 1347
1348 1348 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1349 1349 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1350 1350 segspt_reclaim);
1351 1351 if (ret == SEGP_FAIL) {
1352 1352 /*
1353 1353 * seg_pinsert failed. We return
1354 1354 * ENOTSUP, so that the as_pagelock() code will
1355 1355 * then try the slower F_SOFTLOCK path.
1356 1356 */
1357 1357 if (pl_built) {
1358 1358 /*
1359 1359 * No one else has referenced the ppa[].
1360 1360 * We created it and we need to destroy it.
1361 1361 */
1362 1362 sptd->spt_ppa = NULL;
1363 1363 }
1364 1364 ret = ENOTSUP;
1365 1365 goto insert_fail;
1366 1366 }
1367 1367
1368 1368 /*
1369 1369 * In either case, we increment softlockcnt on the 'real' segment.
1370 1370 */
1371 1371 sptd->spt_pcachecnt++;
1372 1372 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1373 1373
1374 1374 /*
1375 1375 * We can now drop the sptd->spt_lock since the ppa[]
1376 1376 * exists and he have incremented pacachecnt.
1377 1377 */
1378 1378 mutex_exit(&sptd->spt_lock);
1379 1379
1380 1380 /*
1381 1381 * Since we cache the entire segment, we want to
1382 1382 * set ppp to point to the first slot that corresponds
1383 1383 * to the requested addr, i.e. page_index.
1384 1384 */
1385 1385 *ppp = &(sptd->spt_ppa[page_index]);
1386 1386 return (0);
1387 1387
1388 1388 insert_fail:
1389 1389 /*
1390 1390 * We will only reach this code if we tried and failed.
1391 1391 *
1392 1392 * And we can drop the lock on the dummy seg, once we've failed
1393 1393 * to set up a new ppa[].
1394 1394 */
1395 1395 mutex_exit(&sptd->spt_lock);
1396 1396
1397 1397 if (pl_built) {
1398 1398 /*
1399 1399 * We created pl and we need to destroy it.
1400 1400 */
1401 1401 pplist = pl;
1402 1402 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1403 1403 while (np) {
1404 1404 page_unlock(*pplist);
1405 1405 np--;
1406 1406 pplist++;
1407 1407 }
1408 1408 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1409 1409 }
1410 1410 if (shmd->shm_softlockcnt <= 0) {
1411 1411 if (AS_ISUNMAPWAIT(seg->s_as)) {
1412 1412 mutex_enter(&seg->s_as->a_contents);
1413 1413 if (AS_ISUNMAPWAIT(seg->s_as)) {
1414 1414 AS_CLRUNMAPWAIT(seg->s_as);
1415 1415 cv_broadcast(&seg->s_as->a_cv);
1416 1416 }
1417 1417 mutex_exit(&seg->s_as->a_contents);
1418 1418 }
1419 1419 }
1420 1420 *ppp = NULL;
1421 1421 return (ret);
1422 1422 }
1423 1423
1424 1424 /*
1425 1425 * purge any cached pages in the I/O page cache
1426 1426 */
1427 1427 static void
1428 1428 segspt_purge(struct seg *seg)
1429 1429 {
1430 1430 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1431 1431 }
1432 1432
1433 1433 static int
1434 1434 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1435 1435 enum seg_rw rw, int async)
1436 1436 {
1437 1437 struct seg *seg = (struct seg *)ptag;
1438 1438 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1439 1439 struct seg *sptseg;
1440 1440 struct spt_data *sptd;
1441 1441 pgcnt_t npages, i, free_availrmem = 0;
1442 1442 int done = 0;
1443 1443
1444 1444 #ifdef lint
1445 1445 addr = addr;
1446 1446 #endif
1447 1447 sptseg = shmd->shm_sptseg;
1448 1448 sptd = sptseg->s_data;
1449 1449 npages = (len >> PAGESHIFT);
1450 1450 ASSERT(npages);
1451 1451 ASSERT(sptd->spt_pcachecnt != 0);
1452 1452 ASSERT(sptd->spt_ppa == pplist);
1453 1453 ASSERT(npages == btopr(sptd->spt_amp->size));
1454 1454 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1455 1455
1456 1456 /*
1457 1457 * Acquire the lock on the dummy seg and destroy the
1458 1458 * ppa array IF this is the last pcachecnt.
1459 1459 */
1460 1460 mutex_enter(&sptd->spt_lock);
1461 1461 if (--sptd->spt_pcachecnt == 0) {
1462 1462 for (i = 0; i < npages; i++) {
1463 1463 if (pplist[i] == NULL) {
1464 1464 continue;
1465 1465 }
1466 1466 if (rw == S_WRITE) {
1467 1467 hat_setrefmod(pplist[i]);
1468 1468 } else {
1469 1469 hat_setref(pplist[i]);
1470 1470 }
1471 1471 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1472 1472 (sptd->spt_ppa_lckcnt[i] == 0))
1473 1473 free_availrmem++;
1474 1474 page_unlock(pplist[i]);
1475 1475 }
1476 1476 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1477 1477 mutex_enter(&freemem_lock);
1478 1478 availrmem += free_availrmem;
1479 1479 mutex_exit(&freemem_lock);
1480 1480 }
1481 1481 /*
1482 1482 * Since we want to cach/uncache the entire ISM segment,
1483 1483 * we will track the pplist in a segspt specific field
1484 1484 * ppa, that is initialized at the time we add an entry to
1485 1485 * the cache.
1486 1486 */
1487 1487 ASSERT(sptd->spt_pcachecnt == 0);
1488 1488 kmem_free(pplist, sizeof (page_t *) * npages);
1489 1489 sptd->spt_ppa = NULL;
1490 1490 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1491 1491 sptd->spt_gen++;
1492 1492 cv_broadcast(&sptd->spt_cv);
1493 1493 done = 1;
1494 1494 }
1495 1495 mutex_exit(&sptd->spt_lock);
1496 1496
1497 1497 /*
1498 1498 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1499 1499 * may not hold AS lock (in this case async argument is not 0). This
1500 1500 * means if softlockcnt drops to 0 after the decrement below address
1501 1501 * space may get freed. We can't allow it since after softlock
1502 1502 * derement to 0 we still need to access as structure for possible
1503 1503 * wakeup of unmap waiters. To prevent the disappearance of as we take
1504 1504 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1505 1505 * this mutex as a barrier to make sure this routine completes before
1506 1506 * segment is freed.
1507 1507 *
1508 1508 * The second complication we have to deal with in async case is a
1509 1509 * possibility of missed wake up of unmap wait thread. When we don't
1510 1510 * hold as lock here we may take a_contents lock before unmap wait
1511 1511 * thread that was first to see softlockcnt was still not 0. As a
1512 1512 * result we'll fail to wake up an unmap wait thread. To avoid this
1513 1513 * race we set nounmapwait flag in as structure if we drop softlockcnt
1514 1514 * to 0 if async is not 0. unmapwait thread
1515 1515 * will not block if this flag is set.
1516 1516 */
1517 1517 if (async)
1518 1518 mutex_enter(&shmd->shm_segfree_syncmtx);
1519 1519
1520 1520 /*
1521 1521 * Now decrement softlockcnt.
1522 1522 */
1523 1523 ASSERT(shmd->shm_softlockcnt > 0);
1524 1524 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1525 1525
1526 1526 if (shmd->shm_softlockcnt <= 0) {
1527 1527 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1528 1528 mutex_enter(&seg->s_as->a_contents);
1529 1529 if (async)
1530 1530 AS_SETNOUNMAPWAIT(seg->s_as);
1531 1531 if (AS_ISUNMAPWAIT(seg->s_as)) {
1532 1532 AS_CLRUNMAPWAIT(seg->s_as);
1533 1533 cv_broadcast(&seg->s_as->a_cv);
1534 1534 }
1535 1535 mutex_exit(&seg->s_as->a_contents);
1536 1536 }
1537 1537 }
1538 1538
1539 1539 if (async)
1540 1540 mutex_exit(&shmd->shm_segfree_syncmtx);
1541 1541
1542 1542 return (done);
1543 1543 }
1544 1544
1545 1545 /*
1546 1546 * Do a F_SOFTUNLOCK call over the range requested.
1547 1547 * The range must have already been F_SOFTLOCK'ed.
1548 1548 *
1549 1549 * The calls to acquire and release the anon map lock mutex were
1550 1550 * removed in order to avoid a deadly embrace during a DR
1551 1551 * memory delete operation. (Eg. DR blocks while waiting for a
1552 1552 * exclusive lock on a page that is being used for kaio; the
1553 1553 * thread that will complete the kaio and call segspt_softunlock
1554 1554 * blocks on the anon map lock; another thread holding the anon
1555 1555 * map lock blocks on another page lock via the segspt_shmfault
1556 1556 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1557 1557 *
1558 1558 * The appropriateness of the removal is based upon the following:
1559 1559 * 1. If we are holding a segment's reader lock and the page is held
1560 1560 * shared, then the corresponding element in anonmap which points to
1561 1561 * anon struct cannot change and there is no need to acquire the
1562 1562 * anonymous map lock.
1563 1563 * 2. Threads in segspt_softunlock have a reader lock on the segment
1564 1564 * and already have the shared page lock, so we are guaranteed that
1565 1565 * the anon map slot cannot change and therefore can call anon_get_ptr()
1566 1566 * without grabbing the anonymous map lock.
1567 1567 * 3. Threads that softlock a shared page break copy-on-write, even if
1568 1568 * its a read. Thus cow faults can be ignored with respect to soft
1569 1569 * unlocking, since the breaking of cow means that the anon slot(s) will
1570 1570 * not be shared.
1571 1571 */
1572 1572 static void
1573 1573 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1574 1574 size_t len, enum seg_rw rw)
1575 1575 {
1576 1576 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1577 1577 struct seg *sptseg;
1578 1578 struct spt_data *sptd;
1579 1579 page_t *pp;
1580 1580 caddr_t adr;
1581 1581 struct vnode *vp;
1582 1582 u_offset_t offset;
1583 1583 ulong_t anon_index;
1584 1584 struct anon_map *amp; /* XXX - for locknest */
1585 1585 struct anon *ap = NULL;
1586 1586 pgcnt_t npages;
1587 1587
1588 1588 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1589 1589
1590 1590 sptseg = shmd->shm_sptseg;
1591 1591 sptd = sptseg->s_data;
1592 1592
1593 1593 /*
1594 1594 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1595 1595 * and therefore their pages are SE_SHARED locked
1596 1596 * for the entire life of the segment.
1597 1597 */
1598 1598 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1599 1599 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1600 1600 goto softlock_decrement;
1601 1601 }
1602 1602
1603 1603 /*
1604 1604 * Any thread is free to do a page_find and
1605 1605 * page_unlock() on the pages within this seg.
1606 1606 *
1607 1607 * We are already holding the as->a_lock on the user's
1608 1608 * real segment, but we need to hold the a_lock on the
1609 1609 * underlying dummy as. This is mostly to satisfy the
1610 1610 * underlying HAT layer.
1611 1611 */
1612 1612 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1613 1613 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1614 1614 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1615 1615
1616 1616 amp = sptd->spt_amp;
1617 1617 ASSERT(amp != NULL);
1618 1618 anon_index = seg_page(sptseg, sptseg_addr);
1619 1619
1620 1620 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1621 1621 ap = anon_get_ptr(amp->ahp, anon_index++);
1622 1622 ASSERT(ap != NULL);
1623 1623 swap_xlate(ap, &vp, &offset);
1624 1624
1625 1625 /*
1626 1626 * Use page_find() instead of page_lookup() to
1627 1627 * find the page since we know that it has a
1628 1628 * "shared" lock.
1629 1629 */
1630 1630 pp = page_find(vp, offset);
1631 1631 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1632 1632 if (pp == NULL) {
1633 1633 panic("segspt_softunlock: "
1634 1634 "addr %p, ap %p, vp %p, off %llx",
1635 1635 (void *)adr, (void *)ap, (void *)vp, offset);
1636 1636 /*NOTREACHED*/
1637 1637 }
1638 1638
1639 1639 if (rw == S_WRITE) {
1640 1640 hat_setrefmod(pp);
1641 1641 } else if (rw != S_OTHER) {
1642 1642 hat_setref(pp);
1643 1643 }
1644 1644 page_unlock(pp);
1645 1645 }
1646 1646
1647 1647 softlock_decrement:
1648 1648 npages = btopr(len);
1649 1649 ASSERT(shmd->shm_softlockcnt >= npages);
1650 1650 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1651 1651 if (shmd->shm_softlockcnt == 0) {
1652 1652 /*
1653 1653 * All SOFTLOCKS are gone. Wakeup any waiting
1654 1654 * unmappers so they can try again to unmap.
1655 1655 * Check for waiters first without the mutex
1656 1656 * held so we don't always grab the mutex on
1657 1657 * softunlocks.
1658 1658 */
1659 1659 if (AS_ISUNMAPWAIT(seg->s_as)) {
1660 1660 mutex_enter(&seg->s_as->a_contents);
1661 1661 if (AS_ISUNMAPWAIT(seg->s_as)) {
1662 1662 AS_CLRUNMAPWAIT(seg->s_as);
1663 1663 cv_broadcast(&seg->s_as->a_cv);
1664 1664 }
1665 1665 mutex_exit(&seg->s_as->a_contents);
1666 1666 }
1667 1667 }
1668 1668 }
1669 1669
1670 1670 int
1671 1671 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1672 1672 {
1673 1673 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1674 1674 struct shm_data *shmd;
1675 1675 struct anon_map *shm_amp = shmd_arg->shm_amp;
1676 1676 struct spt_data *sptd;
1677 1677 int error = 0;
1678 1678
1679 1679 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1680 1680
1681 1681 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1682 1682 if (shmd == NULL)
1683 1683 return (ENOMEM);
1684 1684
1685 1685 shmd->shm_sptas = shmd_arg->shm_sptas;
1686 1686 shmd->shm_amp = shm_amp;
1687 1687 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1688 1688
1689 1689 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1690 1690 NULL, 0, seg->s_size);
1691 1691
1692 1692 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1693 1693
1694 1694 seg->s_data = (void *)shmd;
1695 1695 seg->s_ops = &segspt_shmops;
1696 1696 seg->s_szc = shmd->shm_sptseg->s_szc;
1697 1697 sptd = shmd->shm_sptseg->s_data;
1698 1698
1699 1699 if (sptd->spt_flags & SHM_PAGEABLE) {
1700 1700 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1701 1701 KM_NOSLEEP)) == NULL) {
1702 1702 seg->s_data = (void *)NULL;
1703 1703 kmem_free(shmd, (sizeof (*shmd)));
1704 1704 return (ENOMEM);
1705 1705 }
1706 1706 shmd->shm_lckpgs = 0;
1707 1707 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1708 1708 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1709 1709 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1710 1710 seg->s_size, seg->s_szc)) != 0) {
1711 1711 kmem_free(shmd->shm_vpage,
1712 1712 btopr(shm_amp->size));
1713 1713 }
1714 1714 }
1715 1715 } else {
1716 1716 error = hat_share(seg->s_as->a_hat, seg->s_base,
1717 1717 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1718 1718 seg->s_size, seg->s_szc);
1719 1719 }
1720 1720 if (error) {
1721 1721 seg->s_szc = 0;
1722 1722 seg->s_data = (void *)NULL;
1723 1723 kmem_free(shmd, (sizeof (*shmd)));
1724 1724 } else {
1725 1725 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1726 1726 shm_amp->refcnt++;
1727 1727 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1728 1728 }
1729 1729 return (error);
1730 1730 }
1731 1731
1732 1732 int
1733 1733 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1734 1734 {
1735 1735 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1736 1736 int reclaim = 1;
1737 1737
1738 1738 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1739 1739 retry:
1740 1740 if (shmd->shm_softlockcnt > 0) {
1741 1741 if (reclaim == 1) {
1742 1742 segspt_purge(seg);
1743 1743 reclaim = 0;
1744 1744 goto retry;
1745 1745 }
1746 1746 return (EAGAIN);
1747 1747 }
1748 1748
1749 1749 if (ssize != seg->s_size) {
1750 1750 #ifdef DEBUG
1751 1751 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1752 1752 ssize, seg->s_size);
1753 1753 #endif
1754 1754 return (EINVAL);
1755 1755 }
1756 1756
1757 1757 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1758 1758 NULL, 0);
1759 1759 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1760 1760
1761 1761 seg_free(seg);
1762 1762
1763 1763 return (0);
1764 1764 }
1765 1765
1766 1766 void
1767 1767 segspt_shmfree(struct seg *seg)
1768 1768 {
1769 1769 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1770 1770 struct anon_map *shm_amp = shmd->shm_amp;
1771 1771
1772 1772 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1773 1773
1774 1774 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1775 1775 MC_UNLOCK, NULL, 0);
1776 1776
1777 1777 /*
1778 1778 * Need to increment refcnt when attaching
1779 1779 * and decrement when detaching because of dup().
1780 1780 */
1781 1781 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1782 1782 shm_amp->refcnt--;
1783 1783 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1784 1784
1785 1785 if (shmd->shm_vpage) { /* only for DISM */
1786 1786 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1787 1787 shmd->shm_vpage = NULL;
1788 1788 }
1789 1789
1790 1790 /*
1791 1791 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1792 1792 * still working with this segment without holding as lock.
1793 1793 */
1794 1794 ASSERT(shmd->shm_softlockcnt == 0);
1795 1795 mutex_enter(&shmd->shm_segfree_syncmtx);
1796 1796 mutex_destroy(&shmd->shm_segfree_syncmtx);
1797 1797
1798 1798 kmem_free(shmd, sizeof (*shmd));
1799 1799 }
1800 1800
1801 1801 /*ARGSUSED*/
1802 1802 int
1803 1803 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1804 1804 {
1805 1805 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1806 1806
1807 1807 /*
1808 1808 * Shared page table is more than shared mapping.
1809 1809 * Individual process sharing page tables can't change prot
1810 1810 * because there is only one set of page tables.
1811 1811 * This will be allowed after private page table is
1812 1812 * supported.
1813 1813 */
1814 1814 /* need to return correct status error? */
1815 1815 return (0);
1816 1816 }
1817 1817
1818 1818
1819 1819 faultcode_t
1820 1820 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1821 1821 size_t len, enum fault_type type, enum seg_rw rw)
1822 1822 {
1823 1823 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1824 1824 struct seg *sptseg = shmd->shm_sptseg;
1825 1825 struct as *curspt = shmd->shm_sptas;
1826 1826 struct spt_data *sptd = sptseg->s_data;
1827 1827 pgcnt_t npages;
1828 1828 size_t size;
1829 1829 caddr_t segspt_addr, shm_addr;
1830 1830 page_t **ppa;
1831 1831 int i;
1832 1832 ulong_t an_idx = 0;
1833 1833 int err = 0;
1834 1834 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1835 1835 size_t pgsz;
1836 1836 pgcnt_t pgcnt;
1837 1837 caddr_t a;
1838 1838 pgcnt_t pidx;
1839 1839
1840 1840 #ifdef lint
1841 1841 hat = hat;
1842 1842 #endif
1843 1843 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1844 1844
1845 1845 /*
1846 1846 * Because of the way spt is implemented
1847 1847 * the realsize of the segment does not have to be
1848 1848 * equal to the segment size itself. The segment size is
1849 1849 * often in multiples of a page size larger than PAGESIZE.
1850 1850 * The realsize is rounded up to the nearest PAGESIZE
1851 1851 * based on what the user requested. This is a bit of
1852 1852 * ungliness that is historical but not easily fixed
1853 1853 * without re-designing the higher levels of ISM.
1854 1854 */
1855 1855 ASSERT(addr >= seg->s_base);
1856 1856 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1857 1857 return (FC_NOMAP);
1858 1858 /*
1859 1859 * For all of the following cases except F_PROT, we need to
1860 1860 * make any necessary adjustments to addr and len
1861 1861 * and get all of the necessary page_t's into an array called ppa[].
1862 1862 *
1863 1863 * The code in shmat() forces base addr and len of ISM segment
1864 1864 * to be aligned to largest page size supported. Therefore,
1865 1865 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1866 1866 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1867 1867 * in large pagesize chunks, or else we will screw up the HAT
1868 1868 * layer by calling hat_memload_array() with differing page sizes
1869 1869 * over a given virtual range.
1870 1870 */
1871 1871 pgsz = page_get_pagesize(sptseg->s_szc);
1872 1872 pgcnt = page_get_pagecnt(sptseg->s_szc);
1873 1873 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1874 1874 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1875 1875 npages = btopr(size);
1876 1876
1877 1877 /*
1878 1878 * Now we need to convert from addr in segshm to addr in segspt.
1879 1879 */
1880 1880 an_idx = seg_page(seg, shm_addr);
1881 1881 segspt_addr = sptseg->s_base + ptob(an_idx);
1882 1882
1883 1883 ASSERT((segspt_addr + ptob(npages)) <=
1884 1884 (sptseg->s_base + sptd->spt_realsize));
1885 1885 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1886 1886
1887 1887 switch (type) {
1888 1888
1889 1889 case F_SOFTLOCK:
1890 1890
1891 1891 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1892 1892 /*
1893 1893 * Fall through to the F_INVAL case to load up the hat layer
1894 1894 * entries with the HAT_LOAD_LOCK flag.
1895 1895 */
1896 1896 /* FALLTHRU */
1897 1897 case F_INVAL:
1898 1898
1899 1899 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1900 1900 return (FC_NOMAP);
1901 1901
1902 1902 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1903 1903
1904 1904 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1905 1905 if (err != 0) {
1906 1906 if (type == F_SOFTLOCK) {
1907 1907 atomic_add_long((ulong_t *)(
1908 1908 &(shmd->shm_softlockcnt)), -npages);
1909 1909 }
1910 1910 goto dism_err;
1911 1911 }
1912 1912 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1913 1913 a = segspt_addr;
1914 1914 pidx = 0;
1915 1915 if (type == F_SOFTLOCK) {
1916 1916
↓ open down ↓ |
1916 lines elided |
↑ open up ↑ |
1917 1917 /*
1918 1918 * Load up the translation keeping it
1919 1919 * locked and don't unlock the page.
1920 1920 */
1921 1921 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1922 1922 hat_memload_array(sptseg->s_as->a_hat,
1923 1923 a, pgsz, &ppa[pidx], sptd->spt_prot,
1924 1924 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1925 1925 }
1926 1926 } else {
1927 - if (hat == seg->s_as->a_hat) {
1927 + /*
1928 + * Migrate pages marked for migration
1929 + */
1930 + if (lgrp_optimizations())
1931 + page_migrate(seg, shm_addr, ppa, npages);
1928 1932
1929 - /*
1930 - * Migrate pages marked for migration
1931 - */
1932 - if (lgrp_optimizations())
1933 - page_migrate(seg, shm_addr, ppa,
1934 - npages);
1935 -
1936 - /* CPU HAT */
1937 - for (; pidx < npages;
1938 - a += pgsz, pidx += pgcnt) {
1939 - hat_memload_array(sptseg->s_as->a_hat,
1940 - a, pgsz, &ppa[pidx],
1941 - sptd->spt_prot,
1942 - HAT_LOAD_SHARE);
1943 - }
1944 - } else {
1945 - /* XHAT. Pass real address */
1946 - hat_memload_array(hat, shm_addr,
1947 - size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1933 + for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1934 + hat_memload_array(sptseg->s_as->a_hat,
1935 + a, pgsz, &ppa[pidx],
1936 + sptd->spt_prot,
1937 + HAT_LOAD_SHARE);
1948 1938 }
1949 1939
1950 1940 /*
1951 1941 * And now drop the SE_SHARED lock(s).
1952 1942 */
1953 1943 if (dyn_ism_unmap) {
1954 1944 for (i = 0; i < npages; i++) {
1955 1945 page_unlock(ppa[i]);
1956 1946 }
1957 1947 }
1958 1948 }
1959 1949
1960 1950 if (!dyn_ism_unmap) {
1961 1951 if (hat_share(seg->s_as->a_hat, shm_addr,
1962 1952 curspt->a_hat, segspt_addr, ptob(npages),
1963 1953 seg->s_szc) != 0) {
1964 1954 panic("hat_share err in DISM fault");
1965 1955 /* NOTREACHED */
1966 1956 }
1967 1957 if (type == F_INVAL) {
1968 1958 for (i = 0; i < npages; i++) {
1969 1959 page_unlock(ppa[i]);
1970 1960 }
1971 1961 }
1972 1962 }
1973 1963 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1974 1964 dism_err:
1975 1965 kmem_free(ppa, npages * sizeof (page_t *));
1976 1966 return (err);
1977 1967
1978 1968 case F_SOFTUNLOCK:
1979 1969
1980 1970 /*
1981 1971 * This is a bit ugly, we pass in the real seg pointer,
1982 1972 * but the segspt_addr is the virtual address within the
1983 1973 * dummy seg.
1984 1974 */
1985 1975 segspt_softunlock(seg, segspt_addr, size, rw);
1986 1976 return (0);
1987 1977
1988 1978 case F_PROT:
1989 1979
1990 1980 /*
1991 1981 * This takes care of the unusual case where a user
1992 1982 * allocates a stack in shared memory and a register
1993 1983 * window overflow is written to that stack page before
1994 1984 * it is otherwise modified.
1995 1985 *
1996 1986 * We can get away with this because ISM segments are
1997 1987 * always rw. Other than this unusual case, there
1998 1988 * should be no instances of protection violations.
1999 1989 */
2000 1990 return (0);
2001 1991
2002 1992 default:
2003 1993 #ifdef DEBUG
2004 1994 panic("segspt_dismfault default type?");
2005 1995 #else
2006 1996 return (FC_NOMAP);
2007 1997 #endif
2008 1998 }
2009 1999 }
2010 2000
2011 2001
2012 2002 faultcode_t
2013 2003 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2014 2004 size_t len, enum fault_type type, enum seg_rw rw)
2015 2005 {
2016 2006 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2017 2007 struct seg *sptseg = shmd->shm_sptseg;
2018 2008 struct as *curspt = shmd->shm_sptas;
2019 2009 struct spt_data *sptd = sptseg->s_data;
2020 2010 pgcnt_t npages;
2021 2011 size_t size;
2022 2012 caddr_t sptseg_addr, shm_addr;
2023 2013 page_t *pp, **ppa;
2024 2014 int i;
2025 2015 u_offset_t offset;
2026 2016 ulong_t anon_index = 0;
2027 2017 struct vnode *vp;
2028 2018 struct anon_map *amp; /* XXX - for locknest */
2029 2019 struct anon *ap = NULL;
2030 2020 size_t pgsz;
2031 2021 pgcnt_t pgcnt;
2032 2022 caddr_t a;
2033 2023 pgcnt_t pidx;
2034 2024 size_t sz;
2035 2025
2036 2026 #ifdef lint
2037 2027 hat = hat;
2038 2028 #endif
2039 2029
2040 2030 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2041 2031
2042 2032 if (sptd->spt_flags & SHM_PAGEABLE) {
2043 2033 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2044 2034 }
2045 2035
2046 2036 /*
2047 2037 * Because of the way spt is implemented
2048 2038 * the realsize of the segment does not have to be
2049 2039 * equal to the segment size itself. The segment size is
2050 2040 * often in multiples of a page size larger than PAGESIZE.
2051 2041 * The realsize is rounded up to the nearest PAGESIZE
2052 2042 * based on what the user requested. This is a bit of
2053 2043 * ungliness that is historical but not easily fixed
2054 2044 * without re-designing the higher levels of ISM.
2055 2045 */
2056 2046 ASSERT(addr >= seg->s_base);
2057 2047 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2058 2048 return (FC_NOMAP);
2059 2049 /*
2060 2050 * For all of the following cases except F_PROT, we need to
2061 2051 * make any necessary adjustments to addr and len
2062 2052 * and get all of the necessary page_t's into an array called ppa[].
2063 2053 *
2064 2054 * The code in shmat() forces base addr and len of ISM segment
2065 2055 * to be aligned to largest page size supported. Therefore,
2066 2056 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2067 2057 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2068 2058 * in large pagesize chunks, or else we will screw up the HAT
2069 2059 * layer by calling hat_memload_array() with differing page sizes
2070 2060 * over a given virtual range.
2071 2061 */
2072 2062 pgsz = page_get_pagesize(sptseg->s_szc);
2073 2063 pgcnt = page_get_pagecnt(sptseg->s_szc);
2074 2064 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2075 2065 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2076 2066 npages = btopr(size);
2077 2067
2078 2068 /*
2079 2069 * Now we need to convert from addr in segshm to addr in segspt.
2080 2070 */
2081 2071 anon_index = seg_page(seg, shm_addr);
2082 2072 sptseg_addr = sptseg->s_base + ptob(anon_index);
2083 2073
2084 2074 /*
2085 2075 * And now we may have to adjust npages downward if we have
2086 2076 * exceeded the realsize of the segment or initial anon
2087 2077 * allocations.
2088 2078 */
2089 2079 if ((sptseg_addr + ptob(npages)) >
2090 2080 (sptseg->s_base + sptd->spt_realsize))
2091 2081 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2092 2082
2093 2083 npages = btopr(size);
2094 2084
2095 2085 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2096 2086 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2097 2087
2098 2088 switch (type) {
2099 2089
2100 2090 case F_SOFTLOCK:
2101 2091
2102 2092 /*
2103 2093 * availrmem is decremented once during anon_swap_adjust()
2104 2094 * and is incremented during the anon_unresv(), which is
2105 2095 * called from shm_rm_amp() when the segment is destroyed.
2106 2096 */
2107 2097 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2108 2098 /*
2109 2099 * Some platforms assume that ISM pages are SE_SHARED
2110 2100 * locked for the entire life of the segment.
2111 2101 */
2112 2102 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2113 2103 return (0);
2114 2104 /*
2115 2105 * Fall through to the F_INVAL case to load up the hat layer
2116 2106 * entries with the HAT_LOAD_LOCK flag.
2117 2107 */
2118 2108
2119 2109 /* FALLTHRU */
2120 2110 case F_INVAL:
2121 2111
2122 2112 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2123 2113 return (FC_NOMAP);
2124 2114
2125 2115 /*
2126 2116 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2127 2117 * may still rely on this call to hat_share(). That
2128 2118 * would imply that those hat's can fault on a
2129 2119 * HAT_LOAD_LOCK translation, which would seem
2130 2120 * contradictory.
2131 2121 */
2132 2122 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2133 2123 if (hat_share(seg->s_as->a_hat, seg->s_base,
2134 2124 curspt->a_hat, sptseg->s_base,
2135 2125 sptseg->s_size, sptseg->s_szc) != 0) {
2136 2126 panic("hat_share error in ISM fault");
2137 2127 /*NOTREACHED*/
2138 2128 }
2139 2129 return (0);
2140 2130 }
2141 2131 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2142 2132
2143 2133 /*
2144 2134 * I see no need to lock the real seg,
2145 2135 * here, because all of our work will be on the underlying
2146 2136 * dummy seg.
2147 2137 *
2148 2138 * sptseg_addr and npages now account for large pages.
2149 2139 */
2150 2140 amp = sptd->spt_amp;
2151 2141 ASSERT(amp != NULL);
2152 2142 anon_index = seg_page(sptseg, sptseg_addr);
2153 2143
2154 2144 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2155 2145 for (i = 0; i < npages; i++) {
2156 2146 ap = anon_get_ptr(amp->ahp, anon_index++);
2157 2147 ASSERT(ap != NULL);
2158 2148 swap_xlate(ap, &vp, &offset);
2159 2149 pp = page_lookup(vp, offset, SE_SHARED);
2160 2150 ASSERT(pp != NULL);
2161 2151 ppa[i] = pp;
2162 2152 }
2163 2153 ANON_LOCK_EXIT(&->a_rwlock);
2164 2154 ASSERT(i == npages);
2165 2155
2166 2156 /*
2167 2157 * We are already holding the as->a_lock on the user's
2168 2158 * real segment, but we need to hold the a_lock on the
2169 2159 * underlying dummy as. This is mostly to satisfy the
2170 2160 * underlying HAT layer.
2171 2161 */
2172 2162 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2173 2163 a = sptseg_addr;
2174 2164 pidx = 0;
2175 2165 if (type == F_SOFTLOCK) {
2176 2166 /*
↓ open down ↓ |
219 lines elided |
↑ open up ↑ |
2177 2167 * Load up the translation keeping it
2178 2168 * locked and don't unlock the page.
2179 2169 */
2180 2170 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2181 2171 sz = MIN(pgsz, ptob(npages - pidx));
2182 2172 hat_memload_array(sptseg->s_as->a_hat, a,
2183 2173 sz, &ppa[pidx], sptd->spt_prot,
2184 2174 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2185 2175 }
2186 2176 } else {
2187 - if (hat == seg->s_as->a_hat) {
2177 + /*
2178 + * Migrate pages marked for migration.
2179 + */
2180 + if (lgrp_optimizations())
2181 + page_migrate(seg, shm_addr, ppa, npages);
2188 2182
2189 - /*
2190 - * Migrate pages marked for migration.
2191 - */
2192 - if (lgrp_optimizations())
2193 - page_migrate(seg, shm_addr, ppa,
2194 - npages);
2195 -
2196 - /* CPU HAT */
2197 - for (; pidx < npages;
2198 - a += pgsz, pidx += pgcnt) {
2199 - sz = MIN(pgsz, ptob(npages - pidx));
2200 - hat_memload_array(sptseg->s_as->a_hat,
2201 - a, sz, &ppa[pidx],
2202 - sptd->spt_prot, HAT_LOAD_SHARE);
2203 - }
2204 - } else {
2205 - /* XHAT. Pass real address */
2206 - hat_memload_array(hat, shm_addr,
2207 - ptob(npages), ppa, sptd->spt_prot,
2208 - HAT_LOAD_SHARE);
2183 + for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2184 + sz = MIN(pgsz, ptob(npages - pidx));
2185 + hat_memload_array(sptseg->s_as->a_hat,
2186 + a, sz, &ppa[pidx],
2187 + sptd->spt_prot, HAT_LOAD_SHARE);
2209 2188 }
2210 2189
2211 2190 /*
2212 2191 * And now drop the SE_SHARED lock(s).
2213 2192 */
2214 2193 for (i = 0; i < npages; i++)
2215 2194 page_unlock(ppa[i]);
2216 2195 }
2217 2196 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2218 2197
2219 2198 kmem_free(ppa, sizeof (page_t *) * npages);
2220 2199 return (0);
2221 2200 case F_SOFTUNLOCK:
2222 2201
2223 2202 /*
2224 2203 * This is a bit ugly, we pass in the real seg pointer,
2225 2204 * but the sptseg_addr is the virtual address within the
2226 2205 * dummy seg.
2227 2206 */
2228 2207 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2229 2208 return (0);
2230 2209
2231 2210 case F_PROT:
2232 2211
2233 2212 /*
2234 2213 * This takes care of the unusual case where a user
2235 2214 * allocates a stack in shared memory and a register
2236 2215 * window overflow is written to that stack page before
2237 2216 * it is otherwise modified.
2238 2217 *
2239 2218 * We can get away with this because ISM segments are
2240 2219 * always rw. Other than this unusual case, there
2241 2220 * should be no instances of protection violations.
2242 2221 */
2243 2222 return (0);
2244 2223
2245 2224 default:
2246 2225 #ifdef DEBUG
2247 2226 cmn_err(CE_WARN, "segspt_shmfault default type?");
2248 2227 #endif
2249 2228 return (FC_NOMAP);
2250 2229 }
2251 2230 }
2252 2231
2253 2232 /*ARGSUSED*/
2254 2233 static faultcode_t
2255 2234 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2256 2235 {
2257 2236 return (0);
2258 2237 }
2259 2238
2260 2239 /*ARGSUSED*/
2261 2240 static int
2262 2241 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2263 2242 {
2264 2243 return (0);
2265 2244 }
2266 2245
2267 2246 /*ARGSUSED*/
2268 2247 static size_t
2269 2248 segspt_shmswapout(struct seg *seg)
2270 2249 {
2271 2250 return (0);
2272 2251 }
2273 2252
2274 2253 /*
2275 2254 * duplicate the shared page tables
2276 2255 */
2277 2256 int
2278 2257 segspt_shmdup(struct seg *seg, struct seg *newseg)
2279 2258 {
2280 2259 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2281 2260 struct anon_map *amp = shmd->shm_amp;
2282 2261 struct shm_data *shmd_new;
2283 2262 struct seg *spt_seg = shmd->shm_sptseg;
2284 2263 struct spt_data *sptd = spt_seg->s_data;
2285 2264 int error = 0;
2286 2265
2287 2266 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2288 2267
2289 2268 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2290 2269 newseg->s_data = (void *)shmd_new;
2291 2270 shmd_new->shm_sptas = shmd->shm_sptas;
2292 2271 shmd_new->shm_amp = amp;
2293 2272 shmd_new->shm_sptseg = shmd->shm_sptseg;
2294 2273 newseg->s_ops = &segspt_shmops;
2295 2274 newseg->s_szc = seg->s_szc;
2296 2275 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2297 2276
2298 2277 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2299 2278 amp->refcnt++;
2300 2279 ANON_LOCK_EXIT(&->a_rwlock);
2301 2280
2302 2281 if (sptd->spt_flags & SHM_PAGEABLE) {
2303 2282 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2304 2283 shmd_new->shm_lckpgs = 0;
2305 2284 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2306 2285 if ((error = hat_share(newseg->s_as->a_hat,
2307 2286 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2308 2287 seg->s_size, seg->s_szc)) != 0) {
2309 2288 kmem_free(shmd_new->shm_vpage,
2310 2289 btopr(amp->size));
2311 2290 }
2312 2291 }
2313 2292 return (error);
2314 2293 } else {
2315 2294 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2316 2295 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2317 2296 seg->s_szc));
2318 2297
2319 2298 }
2320 2299 }
2321 2300
2322 2301 /*ARGSUSED*/
2323 2302 int
2324 2303 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2325 2304 {
2326 2305 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2327 2306 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2328 2307
2329 2308 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2330 2309
2331 2310 /*
2332 2311 * ISM segment is always rw.
2333 2312 */
2334 2313 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2335 2314 }
2336 2315
2337 2316 /*
2338 2317 * Return an array of locked large pages, for empty slots allocate
2339 2318 * private zero-filled anon pages.
2340 2319 */
2341 2320 static int
2342 2321 spt_anon_getpages(
2343 2322 struct seg *sptseg,
2344 2323 caddr_t sptaddr,
2345 2324 size_t len,
2346 2325 page_t *ppa[])
2347 2326 {
2348 2327 struct spt_data *sptd = sptseg->s_data;
2349 2328 struct anon_map *amp = sptd->spt_amp;
2350 2329 enum seg_rw rw = sptd->spt_prot;
2351 2330 uint_t szc = sptseg->s_szc;
2352 2331 size_t pg_sz, share_sz = page_get_pagesize(szc);
2353 2332 pgcnt_t lp_npgs;
2354 2333 caddr_t lp_addr, e_sptaddr;
2355 2334 uint_t vpprot, ppa_szc = 0;
2356 2335 struct vpage *vpage = NULL;
2357 2336 ulong_t j, ppa_idx;
2358 2337 int err, ierr = 0;
2359 2338 pgcnt_t an_idx;
2360 2339 anon_sync_obj_t cookie;
2361 2340 int anon_locked = 0;
2362 2341 pgcnt_t amp_pgs;
2363 2342
2364 2343
2365 2344 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2366 2345 ASSERT(len != 0);
2367 2346
2368 2347 pg_sz = share_sz;
2369 2348 lp_npgs = btop(pg_sz);
2370 2349 lp_addr = sptaddr;
2371 2350 e_sptaddr = sptaddr + len;
2372 2351 an_idx = seg_page(sptseg, sptaddr);
2373 2352 ppa_idx = 0;
2374 2353
2375 2354 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2376 2355
2377 2356 amp_pgs = page_get_pagecnt(amp->a_szc);
2378 2357
2379 2358 /*CONSTCOND*/
2380 2359 while (1) {
2381 2360 for (; lp_addr < e_sptaddr;
2382 2361 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2383 2362
2384 2363 /*
2385 2364 * If we're currently locked, and we get to a new
2386 2365 * page, unlock our current anon chunk.
2387 2366 */
2388 2367 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2389 2368 anon_array_exit(&cookie);
2390 2369 anon_locked = 0;
2391 2370 }
2392 2371 if (!anon_locked) {
2393 2372 anon_array_enter(amp, an_idx, &cookie);
2394 2373 anon_locked = 1;
2395 2374 }
2396 2375 ppa_szc = (uint_t)-1;
2397 2376 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2398 2377 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2399 2378 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2400 2379
2401 2380 if (ierr != 0) {
2402 2381 if (ierr > 0) {
2403 2382 err = FC_MAKE_ERR(ierr);
2404 2383 goto lpgs_err;
2405 2384 }
2406 2385 break;
2407 2386 }
2408 2387 }
2409 2388 if (lp_addr == e_sptaddr) {
2410 2389 break;
2411 2390 }
2412 2391 ASSERT(lp_addr < e_sptaddr);
2413 2392
2414 2393 /*
2415 2394 * ierr == -1 means we failed to allocate a large page.
2416 2395 * so do a size down operation.
2417 2396 *
2418 2397 * ierr == -2 means some other process that privately shares
2419 2398 * pages with this process has allocated a larger page and we
2420 2399 * need to retry with larger pages. So do a size up
2421 2400 * operation. This relies on the fact that large pages are
2422 2401 * never partially shared i.e. if we share any constituent
2423 2402 * page of a large page with another process we must share the
2424 2403 * entire large page. Note this cannot happen for SOFTLOCK
2425 2404 * case, unless current address (lpaddr) is at the beginning
2426 2405 * of the next page size boundary because the other process
2427 2406 * couldn't have relocated locked pages.
2428 2407 */
2429 2408 ASSERT(ierr == -1 || ierr == -2);
2430 2409 if (segvn_anypgsz) {
2431 2410 ASSERT(ierr == -2 || szc != 0);
2432 2411 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2433 2412 szc = (ierr == -1) ? szc - 1 : szc + 1;
2434 2413 } else {
2435 2414 /*
2436 2415 * For faults and segvn_anypgsz == 0
2437 2416 * we need to be careful not to loop forever
2438 2417 * if existing page is found with szc other
2439 2418 * than 0 or seg->s_szc. This could be due
2440 2419 * to page relocations on behalf of DR or
2441 2420 * more likely large page creation. For this
2442 2421 * case simply re-size to existing page's szc
2443 2422 * if returned by anon_map_getpages().
2444 2423 */
2445 2424 if (ppa_szc == (uint_t)-1) {
2446 2425 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2447 2426 } else {
2448 2427 ASSERT(ppa_szc <= sptseg->s_szc);
2449 2428 ASSERT(ierr == -2 || ppa_szc < szc);
2450 2429 ASSERT(ierr == -1 || ppa_szc > szc);
2451 2430 szc = ppa_szc;
2452 2431 }
2453 2432 }
2454 2433 pg_sz = page_get_pagesize(szc);
2455 2434 lp_npgs = btop(pg_sz);
2456 2435 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2457 2436 }
2458 2437 if (anon_locked) {
2459 2438 anon_array_exit(&cookie);
2460 2439 }
2461 2440 ANON_LOCK_EXIT(&->a_rwlock);
2462 2441 return (0);
2463 2442
2464 2443 lpgs_err:
2465 2444 if (anon_locked) {
2466 2445 anon_array_exit(&cookie);
2467 2446 }
2468 2447 ANON_LOCK_EXIT(&->a_rwlock);
2469 2448 for (j = 0; j < ppa_idx; j++)
2470 2449 page_unlock(ppa[j]);
2471 2450 return (err);
2472 2451 }
2473 2452
2474 2453 /*
2475 2454 * count the number of bytes in a set of spt pages that are currently not
2476 2455 * locked
2477 2456 */
2478 2457 static rctl_qty_t
2479 2458 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2480 2459 {
2481 2460 ulong_t i;
2482 2461 rctl_qty_t unlocked = 0;
2483 2462
2484 2463 for (i = 0; i < npages; i++) {
2485 2464 if (ppa[i]->p_lckcnt == 0)
2486 2465 unlocked += PAGESIZE;
2487 2466 }
2488 2467 return (unlocked);
2489 2468 }
2490 2469
2491 2470 extern u_longlong_t randtick(void);
2492 2471 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2493 2472 #define NLCK (NCPU_P2)
2494 2473 /* Random number with a range [0, n-1], n must be power of two */
2495 2474 #define RAND_P2(n) \
2496 2475 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2497 2476
2498 2477 int
2499 2478 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2500 2479 page_t **ppa, ulong_t *lockmap, size_t pos,
2501 2480 rctl_qty_t *locked)
2502 2481 {
2503 2482 struct shm_data *shmd = seg->s_data;
2504 2483 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2505 2484 ulong_t i;
2506 2485 int kernel;
2507 2486 pgcnt_t nlck = 0;
2508 2487 int rv = 0;
2509 2488 int use_reserved = 1;
2510 2489
2511 2490 /* return the number of bytes actually locked */
2512 2491 *locked = 0;
2513 2492
2514 2493 /*
2515 2494 * To avoid contention on freemem_lock, availrmem and pages_locked
2516 2495 * global counters are updated only every nlck locked pages instead of
2517 2496 * every time. Reserve nlck locks up front and deduct from this
2518 2497 * reservation for each page that requires a lock. When the reservation
2519 2498 * is consumed, reserve again. nlck is randomized, so the competing
2520 2499 * threads do not fall into a cyclic lock contention pattern. When
2521 2500 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2522 2501 * is used to lock pages.
2523 2502 */
2524 2503 for (i = 0; i < npages; anon_index++, pos++, i++) {
2525 2504 if (nlck == 0 && use_reserved == 1) {
2526 2505 nlck = NLCK + RAND_P2(NLCK);
2527 2506 /* if fewer loops left, decrease nlck */
2528 2507 nlck = MIN(nlck, npages - i);
2529 2508 /*
2530 2509 * Reserve nlck locks up front and deduct from this
2531 2510 * reservation for each page that requires a lock. When
2532 2511 * the reservation is consumed, reserve again.
2533 2512 */
2534 2513 mutex_enter(&freemem_lock);
2535 2514 if ((availrmem - nlck) < pages_pp_maximum) {
2536 2515 /* Do not do advance memory reserves */
2537 2516 use_reserved = 0;
2538 2517 } else {
2539 2518 availrmem -= nlck;
2540 2519 pages_locked += nlck;
2541 2520 }
2542 2521 mutex_exit(&freemem_lock);
2543 2522 }
2544 2523 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2545 2524 if (sptd->spt_ppa_lckcnt[anon_index] <
2546 2525 (ushort_t)DISM_LOCK_MAX) {
2547 2526 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2548 2527 (ushort_t)DISM_LOCK_MAX) {
2549 2528 cmn_err(CE_WARN,
2550 2529 "DISM page lock limit "
2551 2530 "reached on DISM offset 0x%lx\n",
2552 2531 anon_index << PAGESHIFT);
2553 2532 }
2554 2533 kernel = (sptd->spt_ppa &&
2555 2534 sptd->spt_ppa[anon_index]);
2556 2535 if (!page_pp_lock(ppa[i], 0, kernel ||
2557 2536 use_reserved)) {
2558 2537 sptd->spt_ppa_lckcnt[anon_index]--;
2559 2538 rv = EAGAIN;
2560 2539 break;
2561 2540 }
2562 2541 /* if this is a newly locked page, count it */
2563 2542 if (ppa[i]->p_lckcnt == 1) {
2564 2543 if (kernel == 0 && use_reserved == 1)
2565 2544 nlck--;
2566 2545 *locked += PAGESIZE;
2567 2546 }
2568 2547 shmd->shm_lckpgs++;
2569 2548 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2570 2549 if (lockmap != NULL)
2571 2550 BT_SET(lockmap, pos);
2572 2551 }
2573 2552 }
2574 2553 }
2575 2554 /* Return unused lock reservation */
2576 2555 if (nlck != 0 && use_reserved == 1) {
2577 2556 mutex_enter(&freemem_lock);
2578 2557 availrmem += nlck;
2579 2558 pages_locked -= nlck;
2580 2559 mutex_exit(&freemem_lock);
2581 2560 }
2582 2561
2583 2562 return (rv);
2584 2563 }
2585 2564
2586 2565 int
2587 2566 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2588 2567 rctl_qty_t *unlocked)
2589 2568 {
2590 2569 struct shm_data *shmd = seg->s_data;
2591 2570 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2592 2571 struct anon_map *amp = sptd->spt_amp;
2593 2572 struct anon *ap;
2594 2573 struct vnode *vp;
2595 2574 u_offset_t off;
2596 2575 struct page *pp;
2597 2576 int kernel;
2598 2577 anon_sync_obj_t cookie;
2599 2578 ulong_t i;
2600 2579 pgcnt_t nlck = 0;
2601 2580 pgcnt_t nlck_limit = NLCK;
2602 2581
2603 2582 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2604 2583 for (i = 0; i < npages; i++, anon_index++) {
2605 2584 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2606 2585 anon_array_enter(amp, anon_index, &cookie);
2607 2586 ap = anon_get_ptr(amp->ahp, anon_index);
2608 2587 ASSERT(ap);
2609 2588
2610 2589 swap_xlate(ap, &vp, &off);
2611 2590 anon_array_exit(&cookie);
2612 2591 pp = page_lookup(vp, off, SE_SHARED);
2613 2592 ASSERT(pp);
2614 2593 /*
2615 2594 * availrmem is decremented only for pages which are not
2616 2595 * in seg pcache, for pages in seg pcache availrmem was
2617 2596 * decremented in _dismpagelock()
2618 2597 */
2619 2598 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2620 2599 ASSERT(pp->p_lckcnt > 0);
2621 2600
2622 2601 /*
2623 2602 * lock page but do not change availrmem, we do it
2624 2603 * ourselves every nlck loops.
2625 2604 */
2626 2605 page_pp_unlock(pp, 0, 1);
2627 2606 if (pp->p_lckcnt == 0) {
2628 2607 if (kernel == 0)
2629 2608 nlck++;
2630 2609 *unlocked += PAGESIZE;
2631 2610 }
2632 2611 page_unlock(pp);
2633 2612 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2634 2613 sptd->spt_ppa_lckcnt[anon_index]--;
2635 2614 shmd->shm_lckpgs--;
2636 2615 }
2637 2616
2638 2617 /*
2639 2618 * To reduce freemem_lock contention, do not update availrmem
2640 2619 * until at least NLCK pages have been unlocked.
2641 2620 * 1. No need to update if nlck is zero
2642 2621 * 2. Always update if the last iteration
2643 2622 */
2644 2623 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2645 2624 mutex_enter(&freemem_lock);
2646 2625 availrmem += nlck;
2647 2626 pages_locked -= nlck;
2648 2627 mutex_exit(&freemem_lock);
2649 2628 nlck = 0;
2650 2629 nlck_limit = NLCK + RAND_P2(NLCK);
2651 2630 }
2652 2631 }
2653 2632 ANON_LOCK_EXIT(&->a_rwlock);
2654 2633
2655 2634 return (0);
2656 2635 }
2657 2636
2658 2637 /*ARGSUSED*/
2659 2638 static int
2660 2639 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2661 2640 int attr, int op, ulong_t *lockmap, size_t pos)
2662 2641 {
2663 2642 struct shm_data *shmd = seg->s_data;
2664 2643 struct seg *sptseg = shmd->shm_sptseg;
2665 2644 struct spt_data *sptd = sptseg->s_data;
2666 2645 struct kshmid *sp = sptd->spt_amp->a_sp;
2667 2646 pgcnt_t npages, a_npages;
2668 2647 page_t **ppa;
2669 2648 pgcnt_t an_idx, a_an_idx, ppa_idx;
2670 2649 caddr_t spt_addr, a_addr; /* spt and aligned address */
2671 2650 size_t a_len; /* aligned len */
2672 2651 size_t share_sz;
2673 2652 ulong_t i;
2674 2653 int sts = 0;
2675 2654 rctl_qty_t unlocked = 0;
2676 2655 rctl_qty_t locked = 0;
2677 2656 struct proc *p = curproc;
2678 2657 kproject_t *proj;
2679 2658
2680 2659 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2681 2660 ASSERT(sp != NULL);
2682 2661
2683 2662 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2684 2663 return (0);
2685 2664 }
2686 2665
2687 2666 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2688 2667 an_idx = seg_page(seg, addr);
2689 2668 npages = btopr(len);
2690 2669
2691 2670 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2692 2671 return (ENOMEM);
2693 2672 }
2694 2673
2695 2674 /*
2696 2675 * A shm's project never changes, so no lock needed.
2697 2676 * The shm has a hold on the project, so it will not go away.
2698 2677 * Since we have a mapping to shm within this zone, we know
2699 2678 * that the zone will not go away.
2700 2679 */
2701 2680 proj = sp->shm_perm.ipc_proj;
2702 2681
2703 2682 if (op == MC_LOCK) {
2704 2683
2705 2684 /*
2706 2685 * Need to align addr and size request if they are not
2707 2686 * aligned so we can always allocate large page(s) however
2708 2687 * we only lock what was requested in initial request.
2709 2688 */
2710 2689 share_sz = page_get_pagesize(sptseg->s_szc);
2711 2690 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2712 2691 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2713 2692 share_sz);
2714 2693 a_npages = btop(a_len);
2715 2694 a_an_idx = seg_page(seg, a_addr);
2716 2695 spt_addr = sptseg->s_base + ptob(a_an_idx);
2717 2696 ppa_idx = an_idx - a_an_idx;
2718 2697
2719 2698 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2720 2699 KM_NOSLEEP)) == NULL) {
2721 2700 return (ENOMEM);
2722 2701 }
2723 2702
2724 2703 /*
2725 2704 * Don't cache any new pages for IO and
2726 2705 * flush any cached pages.
2727 2706 */
2728 2707 mutex_enter(&sptd->spt_lock);
2729 2708 if (sptd->spt_ppa != NULL)
2730 2709 sptd->spt_flags |= DISM_PPA_CHANGED;
2731 2710
2732 2711 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2733 2712 if (sts != 0) {
2734 2713 mutex_exit(&sptd->spt_lock);
2735 2714 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2736 2715 return (sts);
2737 2716 }
2738 2717
2739 2718 mutex_enter(&sp->shm_mlock);
2740 2719 /* enforce locked memory rctl */
2741 2720 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2742 2721
2743 2722 mutex_enter(&p->p_lock);
2744 2723 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2745 2724 mutex_exit(&p->p_lock);
2746 2725 sts = EAGAIN;
2747 2726 } else {
2748 2727 mutex_exit(&p->p_lock);
2749 2728 sts = spt_lockpages(seg, an_idx, npages,
2750 2729 &ppa[ppa_idx], lockmap, pos, &locked);
2751 2730
2752 2731 /*
2753 2732 * correct locked count if not all pages could be
2754 2733 * locked
2755 2734 */
2756 2735 if ((unlocked - locked) > 0) {
2757 2736 rctl_decr_locked_mem(NULL, proj,
2758 2737 (unlocked - locked), 0);
2759 2738 }
2760 2739 }
2761 2740 /*
2762 2741 * unlock pages
2763 2742 */
2764 2743 for (i = 0; i < a_npages; i++)
2765 2744 page_unlock(ppa[i]);
2766 2745 if (sptd->spt_ppa != NULL)
2767 2746 sptd->spt_flags |= DISM_PPA_CHANGED;
2768 2747 mutex_exit(&sp->shm_mlock);
2769 2748 mutex_exit(&sptd->spt_lock);
2770 2749
2771 2750 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2772 2751
2773 2752 } else if (op == MC_UNLOCK) { /* unlock */
2774 2753 page_t **ppa;
2775 2754
2776 2755 mutex_enter(&sptd->spt_lock);
2777 2756 if (shmd->shm_lckpgs == 0) {
2778 2757 mutex_exit(&sptd->spt_lock);
2779 2758 return (0);
2780 2759 }
2781 2760 /*
2782 2761 * Don't cache new IO pages.
2783 2762 */
2784 2763 if (sptd->spt_ppa != NULL)
2785 2764 sptd->spt_flags |= DISM_PPA_CHANGED;
2786 2765
2787 2766 mutex_enter(&sp->shm_mlock);
2788 2767 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2789 2768 if ((ppa = sptd->spt_ppa) != NULL)
2790 2769 sptd->spt_flags |= DISM_PPA_CHANGED;
2791 2770 mutex_exit(&sptd->spt_lock);
2792 2771
2793 2772 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2794 2773 mutex_exit(&sp->shm_mlock);
2795 2774
2796 2775 if (ppa != NULL)
2797 2776 seg_ppurge_wiredpp(ppa);
2798 2777 }
2799 2778 return (sts);
2800 2779 }
2801 2780
2802 2781 /*ARGSUSED*/
2803 2782 int
2804 2783 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2805 2784 {
2806 2785 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2807 2786 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2808 2787 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2809 2788
2810 2789 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2811 2790
2812 2791 /*
2813 2792 * ISM segment is always rw.
2814 2793 */
2815 2794 while (--pgno >= 0)
2816 2795 *protv++ = sptd->spt_prot;
2817 2796 return (0);
2818 2797 }
2819 2798
2820 2799 /*ARGSUSED*/
2821 2800 u_offset_t
2822 2801 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2823 2802 {
2824 2803 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2825 2804
2826 2805 /* Offset does not matter in ISM memory */
2827 2806
2828 2807 return ((u_offset_t)0);
2829 2808 }
2830 2809
2831 2810 /* ARGSUSED */
2832 2811 int
2833 2812 segspt_shmgettype(struct seg *seg, caddr_t addr)
2834 2813 {
2835 2814 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2836 2815 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2837 2816
2838 2817 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2839 2818
2840 2819 /*
2841 2820 * The shared memory mapping is always MAP_SHARED, SWAP is only
2842 2821 * reserved for DISM
2843 2822 */
2844 2823 return (MAP_SHARED |
2845 2824 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2846 2825 }
2847 2826
2848 2827 /*ARGSUSED*/
2849 2828 int
2850 2829 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2851 2830 {
2852 2831 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2853 2832 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2854 2833
2855 2834 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2856 2835
2857 2836 *vpp = sptd->spt_vp;
2858 2837 return (0);
2859 2838 }
2860 2839
2861 2840 /*
2862 2841 * We need to wait for pending IO to complete to a DISM segment in order for
2863 2842 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2864 2843 * than enough time to wait.
2865 2844 */
2866 2845 static clock_t spt_pcache_wait = 120;
2867 2846
2868 2847 /*ARGSUSED*/
2869 2848 static int
2870 2849 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2871 2850 {
2872 2851 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2873 2852 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2874 2853 struct anon_map *amp;
2875 2854 pgcnt_t pg_idx;
2876 2855 ushort_t gen;
2877 2856 clock_t end_lbolt;
2878 2857 int writer;
2879 2858 page_t **ppa;
2880 2859
2881 2860 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2882 2861
2883 2862 if (behav == MADV_FREE) {
2884 2863 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2885 2864 return (0);
2886 2865
2887 2866 amp = sptd->spt_amp;
2888 2867 pg_idx = seg_page(seg, addr);
2889 2868
2890 2869 mutex_enter(&sptd->spt_lock);
2891 2870 if ((ppa = sptd->spt_ppa) == NULL) {
2892 2871 mutex_exit(&sptd->spt_lock);
2893 2872 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2894 2873 anon_disclaim(amp, pg_idx, len);
2895 2874 ANON_LOCK_EXIT(&->a_rwlock);
2896 2875 return (0);
2897 2876 }
2898 2877
2899 2878 sptd->spt_flags |= DISM_PPA_CHANGED;
2900 2879 gen = sptd->spt_gen;
2901 2880
2902 2881 mutex_exit(&sptd->spt_lock);
2903 2882
2904 2883 /*
2905 2884 * Purge all DISM cached pages
2906 2885 */
2907 2886 seg_ppurge_wiredpp(ppa);
2908 2887
2909 2888 /*
2910 2889 * Drop the AS_LOCK so that other threads can grab it
2911 2890 * in the as_pageunlock path and hopefully get the segment
2912 2891 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2913 2892 * to keep this segment resident.
2914 2893 */
2915 2894 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2916 2895 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2917 2896 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2918 2897
2919 2898 mutex_enter(&sptd->spt_lock);
2920 2899
2921 2900 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2922 2901
2923 2902 /*
2924 2903 * Try to wait for pages to get kicked out of the seg_pcache.
2925 2904 */
2926 2905 while (sptd->spt_gen == gen &&
2927 2906 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2928 2907 ddi_get_lbolt() < end_lbolt) {
2929 2908 if (!cv_timedwait_sig(&sptd->spt_cv,
2930 2909 &sptd->spt_lock, end_lbolt)) {
2931 2910 break;
2932 2911 }
2933 2912 }
2934 2913
2935 2914 mutex_exit(&sptd->spt_lock);
2936 2915
2937 2916 /* Regrab the AS_LOCK and release our hold on the segment */
2938 2917 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2939 2918 writer ? RW_WRITER : RW_READER);
2940 2919 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2941 2920 if (shmd->shm_softlockcnt <= 0) {
2942 2921 if (AS_ISUNMAPWAIT(seg->s_as)) {
2943 2922 mutex_enter(&seg->s_as->a_contents);
2944 2923 if (AS_ISUNMAPWAIT(seg->s_as)) {
2945 2924 AS_CLRUNMAPWAIT(seg->s_as);
2946 2925 cv_broadcast(&seg->s_as->a_cv);
2947 2926 }
2948 2927 mutex_exit(&seg->s_as->a_contents);
2949 2928 }
2950 2929 }
2951 2930
2952 2931 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2953 2932 anon_disclaim(amp, pg_idx, len);
2954 2933 ANON_LOCK_EXIT(&->a_rwlock);
2955 2934 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2956 2935 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2957 2936 int already_set;
2958 2937 ulong_t anon_index;
2959 2938 lgrp_mem_policy_t policy;
2960 2939 caddr_t shm_addr;
2961 2940 size_t share_size;
2962 2941 size_t size;
2963 2942 struct seg *sptseg = shmd->shm_sptseg;
2964 2943 caddr_t sptseg_addr;
2965 2944
2966 2945 /*
2967 2946 * Align address and length to page size of underlying segment
2968 2947 */
2969 2948 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2970 2949 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2971 2950 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2972 2951 share_size);
2973 2952
2974 2953 amp = shmd->shm_amp;
2975 2954 anon_index = seg_page(seg, shm_addr);
2976 2955
2977 2956 /*
2978 2957 * And now we may have to adjust size downward if we have
2979 2958 * exceeded the realsize of the segment or initial anon
2980 2959 * allocations.
2981 2960 */
2982 2961 sptseg_addr = sptseg->s_base + ptob(anon_index);
2983 2962 if ((sptseg_addr + size) >
2984 2963 (sptseg->s_base + sptd->spt_realsize))
2985 2964 size = (sptseg->s_base + sptd->spt_realsize) -
2986 2965 sptseg_addr;
2987 2966
2988 2967 /*
2989 2968 * Set memory allocation policy for this segment
2990 2969 */
2991 2970 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2992 2971 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2993 2972 NULL, 0, len);
2994 2973
2995 2974 /*
2996 2975 * If random memory allocation policy set already,
2997 2976 * don't bother reapplying it.
2998 2977 */
2999 2978 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
3000 2979 return (0);
3001 2980
3002 2981 /*
3003 2982 * Mark any existing pages in the given range for
3004 2983 * migration, flushing the I/O page cache, and using
3005 2984 * underlying segment to calculate anon index and get
3006 2985 * anonmap and vnode pointer from
3007 2986 */
3008 2987 if (shmd->shm_softlockcnt > 0)
3009 2988 segspt_purge(seg);
3010 2989
3011 2990 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3012 2991 }
3013 2992
3014 2993 return (0);
3015 2994 }
3016 2995
3017 2996 /*ARGSUSED*/
3018 2997 void
3019 2998 segspt_shmdump(struct seg *seg)
3020 2999 {
3021 3000 /* no-op for ISM segment */
3022 3001 }
3023 3002
3024 3003 /*ARGSUSED*/
3025 3004 static faultcode_t
3026 3005 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3027 3006 {
3028 3007 return (ENOTSUP);
3029 3008 }
3030 3009
3031 3010 /*
3032 3011 * get a memory ID for an addr in a given segment
3033 3012 */
3034 3013 static int
3035 3014 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3036 3015 {
3037 3016 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3038 3017 struct anon *ap;
3039 3018 size_t anon_index;
3040 3019 struct anon_map *amp = shmd->shm_amp;
3041 3020 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3042 3021 struct seg *sptseg = shmd->shm_sptseg;
3043 3022 anon_sync_obj_t cookie;
3044 3023
3045 3024 anon_index = seg_page(seg, addr);
3046 3025
3047 3026 if (addr > (seg->s_base + sptd->spt_realsize)) {
3048 3027 return (EFAULT);
3049 3028 }
3050 3029
3051 3030 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3052 3031 anon_array_enter(amp, anon_index, &cookie);
3053 3032 ap = anon_get_ptr(amp->ahp, anon_index);
3054 3033 if (ap == NULL) {
3055 3034 struct page *pp;
3056 3035 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3057 3036
3058 3037 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3059 3038 if (pp == NULL) {
3060 3039 anon_array_exit(&cookie);
3061 3040 ANON_LOCK_EXIT(&->a_rwlock);
3062 3041 return (ENOMEM);
3063 3042 }
3064 3043 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3065 3044 page_unlock(pp);
3066 3045 }
3067 3046 anon_array_exit(&cookie);
3068 3047 ANON_LOCK_EXIT(&->a_rwlock);
3069 3048 memidp->val[0] = (uintptr_t)ap;
3070 3049 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3071 3050 return (0);
3072 3051 }
3073 3052
3074 3053 /*
3075 3054 * Get memory allocation policy info for specified address in given segment
3076 3055 */
3077 3056 static lgrp_mem_policy_info_t *
3078 3057 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3079 3058 {
3080 3059 struct anon_map *amp;
3081 3060 ulong_t anon_index;
3082 3061 lgrp_mem_policy_info_t *policy_info;
3083 3062 struct shm_data *shm_data;
3084 3063
3085 3064 ASSERT(seg != NULL);
3086 3065
3087 3066 /*
3088 3067 * Get anon_map from segshm
3089 3068 *
3090 3069 * Assume that no lock needs to be held on anon_map, since
3091 3070 * it should be protected by its reference count which must be
3092 3071 * nonzero for an existing segment
3093 3072 * Need to grab readers lock on policy tree though
3094 3073 */
3095 3074 shm_data = (struct shm_data *)seg->s_data;
3096 3075 if (shm_data == NULL)
3097 3076 return (NULL);
3098 3077 amp = shm_data->shm_amp;
3099 3078 ASSERT(amp->refcnt != 0);
3100 3079
3101 3080 /*
3102 3081 * Get policy info
3103 3082 *
3104 3083 * Assume starting anon index of 0
3105 3084 */
3106 3085 anon_index = seg_page(seg, addr);
3107 3086 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3108 3087
3109 3088 return (policy_info);
3110 3089 }
3111 3090
3112 3091 /*ARGSUSED*/
3113 3092 static int
3114 3093 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3115 3094 {
3116 3095 return (0);
3117 3096 }
↓ open down ↓ |
899 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX