Print this page
6146 seg_inherit_notsup is redundant
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
87 87
88 88 struct seg_ops segspt_ops = {
89 89 .dup = SEGSPT_BADOP(int),
90 90 .unmap = segspt_unmap,
91 91 .free = segspt_free,
92 92 .fault = SEGSPT_BADOP(int),
93 93 .faulta = SEGSPT_BADOP(faultcode_t),
94 94 .setprot = SEGSPT_BADOP(int),
95 95 .checkprot = SEGSPT_BADOP(int),
96 96 .kluster = SEGSPT_BADOP(int),
97 97 .swapout = SEGSPT_BADOP(size_t),
98 98 .sync = SEGSPT_BADOP(int),
99 99 .incore = SEGSPT_BADOP(size_t),
100 100 .lockop = SEGSPT_BADOP(int),
101 101 .getprot = SEGSPT_BADOP(int),
↓ open down ↓ |
101 lines elided |
↑ open up ↑ |
102 102 .getoffset = SEGSPT_BADOP(u_offset_t),
103 103 .gettype = SEGSPT_BADOP(int),
104 104 .getvp = SEGSPT_BADOP(int),
105 105 .advise = SEGSPT_BADOP(int),
106 106 .dump = SEGSPT_BADOP(void),
107 107 .pagelock = SEGSPT_BADOP(int),
108 108 .setpagesize = SEGSPT_BADOP(int),
109 109 .getmemid = SEGSPT_BADOP(int),
110 110 .getpolicy = segspt_getpolicy,
111 111 .capable = SEGSPT_BADOP(int),
112 - .inherit = seg_inherit_notsup,
113 112 };
114 113
115 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
116 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
117 116 static void segspt_shmfree(struct seg *seg);
118 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
119 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
120 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
121 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
122 121 register size_t len, register uint_t prot);
123 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
124 123 uint_t prot);
125 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
126 125 static size_t segspt_shmswapout(struct seg *seg);
127 126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
128 127 register char *vec);
129 128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
130 129 int attr, uint_t flags);
131 130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
132 131 int attr, int op, ulong_t *lockmap, size_t pos);
133 132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
134 133 uint_t *protv);
135 134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
136 135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
137 136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
138 137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
139 138 uint_t behav);
140 139 static void segspt_shmdump(struct seg *seg);
141 140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
142 141 struct page ***, enum lock_type, enum seg_rw);
143 142 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
144 143 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
145 144 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
146 145 static int segspt_shmcapable(struct seg *, segcapability_t);
147 146
148 147 struct seg_ops segspt_shmops = {
149 148 .dup = segspt_shmdup,
150 149 .unmap = segspt_shmunmap,
151 150 .free = segspt_shmfree,
152 151 .fault = segspt_shmfault,
153 152 .faulta = segspt_shmfaulta,
154 153 .setprot = segspt_shmsetprot,
155 154 .checkprot = segspt_shmcheckprot,
156 155 .kluster = segspt_shmkluster,
157 156 .swapout = segspt_shmswapout,
158 157 .sync = segspt_shmsync,
159 158 .incore = segspt_shmincore,
160 159 .lockop = segspt_shmlockop,
161 160 .getprot = segspt_shmgetprot,
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
162 161 .getoffset = segspt_shmgetoffset,
163 162 .gettype = segspt_shmgettype,
164 163 .getvp = segspt_shmgetvp,
165 164 .advise = segspt_shmadvise,
166 165 .dump = segspt_shmdump,
167 166 .pagelock = segspt_shmpagelock,
168 167 .setpagesize = segspt_shmsetpgsz,
169 168 .getmemid = segspt_shmgetmemid,
170 169 .getpolicy = segspt_shmgetpolicy,
171 170 .capable = segspt_shmcapable,
172 - .inherit = seg_inherit_notsup,
173 171 };
174 172
175 173 static void segspt_purge(struct seg *seg);
176 174 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
177 175 enum seg_rw, int);
178 176 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
179 177 page_t **ppa);
180 178
181 179
182 180
183 181 /*ARGSUSED*/
184 182 int
185 183 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
186 184 uint_t prot, uint_t flags, uint_t share_szc)
187 185 {
188 186 int err;
189 187 struct as *newas;
190 188 struct segspt_crargs sptcargs;
191 189
192 190 #ifdef DEBUG
193 191 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
194 192 tnf_ulong, size, size );
195 193 #endif
196 194 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
197 195 segspt_minfree = availrmem/20; /* for the system */
198 196
199 197 if (!hat_supported(HAT_SHARED_PT, (void *)0))
200 198 return (EINVAL);
201 199
202 200 /*
203 201 * get a new as for this shared memory segment
204 202 */
205 203 newas = as_alloc();
206 204 newas->a_proc = NULL;
207 205 sptcargs.amp = amp;
208 206 sptcargs.prot = prot;
209 207 sptcargs.flags = flags;
210 208 sptcargs.szc = share_szc;
211 209 /*
212 210 * create a shared page table (spt) segment
213 211 */
214 212
215 213 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
216 214 as_free(newas);
217 215 return (err);
218 216 }
219 217 *sptseg = sptcargs.seg_spt;
220 218 return (0);
221 219 }
222 220
223 221 void
224 222 sptdestroy(struct as *as, struct anon_map *amp)
225 223 {
226 224
227 225 #ifdef DEBUG
228 226 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
229 227 #endif
230 228 (void) as_unmap(as, SEGSPTADDR, amp->size);
231 229 as_free(as);
232 230 }
233 231
234 232 /*
235 233 * called from seg_free().
236 234 * free (i.e., unlock, unmap, return to free list)
237 235 * all the pages in the given seg.
238 236 */
239 237 void
240 238 segspt_free(struct seg *seg)
241 239 {
242 240 struct spt_data *sptd = (struct spt_data *)seg->s_data;
243 241
244 242 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
245 243
246 244 if (sptd != NULL) {
247 245 if (sptd->spt_realsize)
248 246 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
249 247
250 248 if (sptd->spt_ppa_lckcnt)
251 249 kmem_free(sptd->spt_ppa_lckcnt,
252 250 sizeof (*sptd->spt_ppa_lckcnt)
253 251 * btopr(sptd->spt_amp->size));
254 252 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
255 253 cv_destroy(&sptd->spt_cv);
256 254 mutex_destroy(&sptd->spt_lock);
257 255 kmem_free(sptd, sizeof (*sptd));
258 256 }
259 257 }
260 258
261 259 /*ARGSUSED*/
262 260 static int
263 261 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
264 262 uint_t flags)
265 263 {
266 264 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
267 265
268 266 return (0);
269 267 }
270 268
271 269 /*ARGSUSED*/
272 270 static size_t
273 271 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
274 272 {
275 273 caddr_t eo_seg;
276 274 pgcnt_t npages;
277 275 struct shm_data *shmd = (struct shm_data *)seg->s_data;
278 276 struct seg *sptseg;
279 277 struct spt_data *sptd;
280 278
281 279 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
282 280 #ifdef lint
283 281 seg = seg;
284 282 #endif
285 283 sptseg = shmd->shm_sptseg;
286 284 sptd = sptseg->s_data;
287 285
288 286 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
289 287 eo_seg = addr + len;
290 288 while (addr < eo_seg) {
291 289 /* page exists, and it's locked. */
292 290 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
293 291 SEG_PAGE_ANON;
294 292 addr += PAGESIZE;
295 293 }
296 294 return (len);
297 295 } else {
298 296 struct anon_map *amp = shmd->shm_amp;
299 297 struct anon *ap;
300 298 page_t *pp;
301 299 pgcnt_t anon_index;
302 300 struct vnode *vp;
303 301 u_offset_t off;
304 302 ulong_t i;
305 303 int ret;
306 304 anon_sync_obj_t cookie;
307 305
308 306 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
309 307 anon_index = seg_page(seg, addr);
310 308 npages = btopr(len);
311 309 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
312 310 return (EINVAL);
313 311 }
314 312 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
315 313 for (i = 0; i < npages; i++, anon_index++) {
316 314 ret = 0;
317 315 anon_array_enter(amp, anon_index, &cookie);
318 316 ap = anon_get_ptr(amp->ahp, anon_index);
319 317 if (ap != NULL) {
320 318 swap_xlate(ap, &vp, &off);
321 319 anon_array_exit(&cookie);
322 320 pp = page_lookup_nowait(vp, off, SE_SHARED);
323 321 if (pp != NULL) {
324 322 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
325 323 page_unlock(pp);
326 324 }
327 325 } else {
328 326 anon_array_exit(&cookie);
329 327 }
330 328 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
331 329 ret |= SEG_PAGE_LOCKED;
332 330 }
333 331 *vec++ = (char)ret;
334 332 }
335 333 ANON_LOCK_EXIT(&->a_rwlock);
336 334 return (len);
337 335 }
338 336 }
339 337
340 338 static int
341 339 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
342 340 {
343 341 size_t share_size;
344 342
345 343 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
346 344
347 345 /*
348 346 * seg.s_size may have been rounded up to the largest page size
349 347 * in shmat().
350 348 * XXX This should be cleanedup. sptdestroy should take a length
351 349 * argument which should be the same as sptcreate. Then
352 350 * this rounding would not be needed (or is done in shm.c)
353 351 * Only the check for full segment will be needed.
354 352 *
355 353 * XXX -- shouldn't raddr == 0 always? These tests don't seem
356 354 * to be useful at all.
357 355 */
358 356 share_size = page_get_pagesize(seg->s_szc);
359 357 ssize = P2ROUNDUP(ssize, share_size);
360 358
361 359 if (raddr == seg->s_base && ssize == seg->s_size) {
362 360 seg_free(seg);
363 361 return (0);
364 362 } else
365 363 return (EINVAL);
366 364 }
367 365
368 366 int
369 367 segspt_create(struct seg *seg, caddr_t argsp)
370 368 {
371 369 int err;
372 370 caddr_t addr = seg->s_base;
373 371 struct spt_data *sptd;
374 372 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
375 373 struct anon_map *amp = sptcargs->amp;
376 374 struct kshmid *sp = amp->a_sp;
377 375 struct cred *cred = CRED();
378 376 ulong_t i, j, anon_index = 0;
379 377 pgcnt_t npages = btopr(amp->size);
380 378 struct vnode *vp;
381 379 page_t **ppa;
382 380 uint_t hat_flags;
383 381 size_t pgsz;
384 382 pgcnt_t pgcnt;
385 383 caddr_t a;
386 384 pgcnt_t pidx;
387 385 size_t sz;
388 386 proc_t *procp = curproc;
389 387 rctl_qty_t lockedbytes = 0;
390 388 kproject_t *proj;
391 389
392 390 /*
393 391 * We are holding the a_lock on the underlying dummy as,
394 392 * so we can make calls to the HAT layer.
395 393 */
396 394 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
397 395 ASSERT(sp != NULL);
398 396
399 397 #ifdef DEBUG
400 398 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
401 399 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
402 400 #endif
403 401 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
404 402 if (err = anon_swap_adjust(npages))
405 403 return (err);
406 404 }
407 405 err = ENOMEM;
408 406
409 407 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
410 408 goto out1;
411 409
412 410 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
413 411 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
414 412 KM_NOSLEEP)) == NULL)
415 413 goto out2;
416 414 }
417 415
418 416 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
419 417
420 418 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
421 419 goto out3;
422 420
423 421 seg->s_ops = &segspt_ops;
424 422 sptd->spt_vp = vp;
425 423 sptd->spt_amp = amp;
426 424 sptd->spt_prot = sptcargs->prot;
427 425 sptd->spt_flags = sptcargs->flags;
428 426 seg->s_data = (caddr_t)sptd;
429 427 sptd->spt_ppa = NULL;
430 428 sptd->spt_ppa_lckcnt = NULL;
431 429 seg->s_szc = sptcargs->szc;
432 430 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
433 431 sptd->spt_gen = 0;
434 432
435 433 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
436 434 if (seg->s_szc > amp->a_szc) {
437 435 amp->a_szc = seg->s_szc;
438 436 }
439 437 ANON_LOCK_EXIT(&->a_rwlock);
440 438
441 439 /*
442 440 * Set policy to affect initial allocation of pages in
443 441 * anon_map_createpages()
444 442 */
445 443 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
446 444 NULL, 0, ptob(npages));
447 445
448 446 if (sptcargs->flags & SHM_PAGEABLE) {
449 447 size_t share_sz;
450 448 pgcnt_t new_npgs, more_pgs;
451 449 struct anon_hdr *nahp;
452 450 zone_t *zone;
453 451
454 452 share_sz = page_get_pagesize(seg->s_szc);
455 453 if (!IS_P2ALIGNED(amp->size, share_sz)) {
456 454 /*
457 455 * We are rounding up the size of the anon array
458 456 * on 4 M boundary because we always create 4 M
459 457 * of page(s) when locking, faulting pages and we
460 458 * don't have to check for all corner cases e.g.
461 459 * if there is enough space to allocate 4 M
462 460 * page.
463 461 */
464 462 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
465 463 more_pgs = new_npgs - npages;
466 464
467 465 /*
468 466 * The zone will never be NULL, as a fully created
469 467 * shm always has an owning zone.
470 468 */
471 469 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
472 470 ASSERT(zone != NULL);
473 471 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
474 472 err = ENOMEM;
475 473 goto out4;
476 474 }
477 475
478 476 nahp = anon_create(new_npgs, ANON_SLEEP);
479 477 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
480 478 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
481 479 ANON_SLEEP);
482 480 anon_release(amp->ahp, npages);
483 481 amp->ahp = nahp;
484 482 ASSERT(amp->swresv == ptob(npages));
485 483 amp->swresv = amp->size = ptob(new_npgs);
486 484 ANON_LOCK_EXIT(&->a_rwlock);
487 485 npages = new_npgs;
488 486 }
489 487
490 488 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
491 489 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
492 490 sptd->spt_pcachecnt = 0;
493 491 sptd->spt_realsize = ptob(npages);
494 492 sptcargs->seg_spt = seg;
495 493 return (0);
496 494 }
497 495
498 496 /*
499 497 * get array of pages for each anon slot in amp
500 498 */
501 499 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
502 500 seg, addr, S_CREATE, cred)) != 0)
503 501 goto out4;
504 502
505 503 mutex_enter(&sp->shm_mlock);
506 504
507 505 /* May be partially locked, so, count bytes to charge for locking */
508 506 for (i = 0; i < npages; i++)
509 507 if (ppa[i]->p_lckcnt == 0)
510 508 lockedbytes += PAGESIZE;
511 509
512 510 proj = sp->shm_perm.ipc_proj;
513 511
514 512 if (lockedbytes > 0) {
515 513 mutex_enter(&procp->p_lock);
516 514 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
517 515 mutex_exit(&procp->p_lock);
518 516 mutex_exit(&sp->shm_mlock);
519 517 for (i = 0; i < npages; i++)
520 518 page_unlock(ppa[i]);
521 519 err = ENOMEM;
522 520 goto out4;
523 521 }
524 522 mutex_exit(&procp->p_lock);
525 523 }
526 524
527 525 /*
528 526 * addr is initial address corresponding to the first page on ppa list
529 527 */
530 528 for (i = 0; i < npages; i++) {
531 529 /* attempt to lock all pages */
532 530 if (page_pp_lock(ppa[i], 0, 1) == 0) {
533 531 /*
534 532 * if unable to lock any page, unlock all
535 533 * of them and return error
536 534 */
537 535 for (j = 0; j < i; j++)
538 536 page_pp_unlock(ppa[j], 0, 1);
539 537 for (i = 0; i < npages; i++)
540 538 page_unlock(ppa[i]);
541 539 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
542 540 mutex_exit(&sp->shm_mlock);
543 541 err = ENOMEM;
544 542 goto out4;
545 543 }
546 544 }
547 545 mutex_exit(&sp->shm_mlock);
548 546
549 547 /*
550 548 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
551 549 * for the entire life of the segment. For example platforms
552 550 * that do not support Dynamic Reconfiguration.
553 551 */
554 552 hat_flags = HAT_LOAD_SHARE;
555 553 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
556 554 hat_flags |= HAT_LOAD_LOCK;
557 555
558 556 /*
559 557 * Load translations one lare page at a time
560 558 * to make sure we don't create mappings bigger than
561 559 * segment's size code in case underlying pages
562 560 * are shared with segvn's segment that uses bigger
563 561 * size code than we do.
564 562 */
565 563 pgsz = page_get_pagesize(seg->s_szc);
566 564 pgcnt = page_get_pagecnt(seg->s_szc);
567 565 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
568 566 sz = MIN(pgsz, ptob(npages - pidx));
569 567 hat_memload_array(seg->s_as->a_hat, a, sz,
570 568 &ppa[pidx], sptd->spt_prot, hat_flags);
571 569 }
572 570
573 571 /*
574 572 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
575 573 * we will leave the pages locked SE_SHARED for the life
576 574 * of the ISM segment. This will prevent any calls to
577 575 * hat_pageunload() on this ISM segment for those platforms.
578 576 */
579 577 if (!(hat_flags & HAT_LOAD_LOCK)) {
580 578 /*
581 579 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
582 580 * we no longer need to hold the SE_SHARED lock on the pages,
583 581 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
584 582 * SE_SHARED lock on the pages as necessary.
585 583 */
586 584 for (i = 0; i < npages; i++)
587 585 page_unlock(ppa[i]);
588 586 }
589 587 sptd->spt_pcachecnt = 0;
590 588 kmem_free(ppa, ((sizeof (page_t *)) * npages));
591 589 sptd->spt_realsize = ptob(npages);
592 590 atomic_add_long(&spt_used, npages);
593 591 sptcargs->seg_spt = seg;
594 592 return (0);
595 593
596 594 out4:
597 595 seg->s_data = NULL;
598 596 kmem_free(vp, sizeof (*vp));
599 597 cv_destroy(&sptd->spt_cv);
600 598 out3:
601 599 mutex_destroy(&sptd->spt_lock);
602 600 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
603 601 kmem_free(ppa, (sizeof (*ppa) * npages));
604 602 out2:
605 603 kmem_free(sptd, sizeof (*sptd));
606 604 out1:
607 605 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
608 606 anon_swap_restore(npages);
609 607 return (err);
610 608 }
611 609
612 610 /*ARGSUSED*/
613 611 void
614 612 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
615 613 {
616 614 struct page *pp;
617 615 struct spt_data *sptd = (struct spt_data *)seg->s_data;
618 616 pgcnt_t npages;
619 617 ulong_t anon_idx;
620 618 struct anon_map *amp;
621 619 struct anon *ap;
622 620 struct vnode *vp;
623 621 u_offset_t off;
624 622 uint_t hat_flags;
625 623 int root = 0;
626 624 pgcnt_t pgs, curnpgs = 0;
627 625 page_t *rootpp;
628 626 rctl_qty_t unlocked_bytes = 0;
629 627 kproject_t *proj;
630 628 kshmid_t *sp;
631 629
632 630 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
633 631
634 632 len = P2ROUNDUP(len, PAGESIZE);
635 633
636 634 npages = btop(len);
637 635
638 636 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
639 637 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
640 638 (sptd->spt_flags & SHM_PAGEABLE)) {
641 639 hat_flags = HAT_UNLOAD_UNMAP;
642 640 }
643 641
644 642 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
645 643
646 644 amp = sptd->spt_amp;
647 645 if (sptd->spt_flags & SHM_PAGEABLE)
648 646 npages = btop(amp->size);
649 647
650 648 ASSERT(amp != NULL);
651 649
652 650 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
653 651 sp = amp->a_sp;
654 652 proj = sp->shm_perm.ipc_proj;
655 653 mutex_enter(&sp->shm_mlock);
656 654 }
657 655 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
658 656 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
659 657 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
660 658 panic("segspt_free_pages: null app");
661 659 /*NOTREACHED*/
662 660 }
663 661 } else {
664 662 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
665 663 == NULL)
666 664 continue;
667 665 }
668 666 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
669 667 swap_xlate(ap, &vp, &off);
670 668
671 669 /*
672 670 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
673 671 * the pages won't be having SE_SHARED lock at this
674 672 * point.
675 673 *
676 674 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
677 675 * the pages are still held SE_SHARED locked from the
678 676 * original segspt_create()
679 677 *
680 678 * Our goal is to get SE_EXCL lock on each page, remove
681 679 * permanent lock on it and invalidate the page.
682 680 */
683 681 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
684 682 if (hat_flags == HAT_UNLOAD_UNMAP)
685 683 pp = page_lookup(vp, off, SE_EXCL);
686 684 else {
687 685 if ((pp = page_find(vp, off)) == NULL) {
688 686 panic("segspt_free_pages: "
689 687 "page not locked");
690 688 /*NOTREACHED*/
691 689 }
692 690 if (!page_tryupgrade(pp)) {
693 691 page_unlock(pp);
694 692 pp = page_lookup(vp, off, SE_EXCL);
695 693 }
696 694 }
697 695 if (pp == NULL) {
698 696 panic("segspt_free_pages: "
699 697 "page not in the system");
700 698 /*NOTREACHED*/
701 699 }
702 700 ASSERT(pp->p_lckcnt > 0);
703 701 page_pp_unlock(pp, 0, 1);
704 702 if (pp->p_lckcnt == 0)
705 703 unlocked_bytes += PAGESIZE;
706 704 } else {
707 705 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
708 706 continue;
709 707 }
710 708 /*
711 709 * It's logical to invalidate the pages here as in most cases
712 710 * these were created by segspt.
713 711 */
714 712 if (pp->p_szc != 0) {
715 713 if (root == 0) {
716 714 ASSERT(curnpgs == 0);
717 715 root = 1;
718 716 rootpp = pp;
719 717 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
720 718 ASSERT(pgs > 1);
721 719 ASSERT(IS_P2ALIGNED(pgs, pgs));
722 720 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
723 721 curnpgs--;
724 722 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
725 723 ASSERT(curnpgs == 1);
726 724 ASSERT(page_pptonum(pp) ==
727 725 page_pptonum(rootpp) + (pgs - 1));
728 726 page_destroy_pages(rootpp);
729 727 root = 0;
730 728 curnpgs = 0;
731 729 } else {
732 730 ASSERT(curnpgs > 1);
733 731 ASSERT(page_pptonum(pp) ==
734 732 page_pptonum(rootpp) + (pgs - curnpgs));
735 733 curnpgs--;
736 734 }
737 735 } else {
738 736 if (root != 0 || curnpgs != 0) {
739 737 panic("segspt_free_pages: bad large page");
740 738 /*NOTREACHED*/
741 739 }
742 740 /*
743 741 * Before destroying the pages, we need to take care
744 742 * of the rctl locked memory accounting. For that
745 743 * we need to calculte the unlocked_bytes.
746 744 */
747 745 if (pp->p_lckcnt > 0)
748 746 unlocked_bytes += PAGESIZE;
749 747 /*LINTED: constant in conditional context */
750 748 VN_DISPOSE(pp, B_INVAL, 0, kcred);
751 749 }
752 750 }
753 751 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
754 752 if (unlocked_bytes > 0)
755 753 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
756 754 mutex_exit(&sp->shm_mlock);
757 755 }
758 756 if (root != 0 || curnpgs != 0) {
759 757 panic("segspt_free_pages: bad large page");
760 758 /*NOTREACHED*/
761 759 }
762 760
763 761 /*
764 762 * mark that pages have been released
765 763 */
766 764 sptd->spt_realsize = 0;
767 765
768 766 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
769 767 atomic_add_long(&spt_used, -npages);
770 768 anon_swap_restore(npages);
771 769 }
772 770 }
773 771
774 772 /*
775 773 * Get memory allocation policy info for specified address in given segment
776 774 */
777 775 static lgrp_mem_policy_info_t *
778 776 segspt_getpolicy(struct seg *seg, caddr_t addr)
779 777 {
780 778 struct anon_map *amp;
781 779 ulong_t anon_index;
782 780 lgrp_mem_policy_info_t *policy_info;
783 781 struct spt_data *spt_data;
784 782
785 783 ASSERT(seg != NULL);
786 784
787 785 /*
788 786 * Get anon_map from segspt
789 787 *
790 788 * Assume that no lock needs to be held on anon_map, since
791 789 * it should be protected by its reference count which must be
792 790 * nonzero for an existing segment
793 791 * Need to grab readers lock on policy tree though
794 792 */
795 793 spt_data = (struct spt_data *)seg->s_data;
796 794 if (spt_data == NULL)
797 795 return (NULL);
798 796 amp = spt_data->spt_amp;
799 797 ASSERT(amp->refcnt != 0);
800 798
801 799 /*
802 800 * Get policy info
803 801 *
804 802 * Assume starting anon index of 0
805 803 */
806 804 anon_index = seg_page(seg, addr);
807 805 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
808 806
809 807 return (policy_info);
810 808 }
811 809
812 810 /*
813 811 * DISM only.
814 812 * Return locked pages over a given range.
815 813 *
816 814 * We will cache all DISM locked pages and save the pplist for the
817 815 * entire segment in the ppa field of the underlying DISM segment structure.
818 816 * Later, during a call to segspt_reclaim() we will use this ppa array
819 817 * to page_unlock() all of the pages and then we will free this ppa list.
820 818 */
821 819 /*ARGSUSED*/
822 820 static int
823 821 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
824 822 struct page ***ppp, enum lock_type type, enum seg_rw rw)
825 823 {
826 824 struct shm_data *shmd = (struct shm_data *)seg->s_data;
827 825 struct seg *sptseg = shmd->shm_sptseg;
828 826 struct spt_data *sptd = sptseg->s_data;
829 827 pgcnt_t pg_idx, npages, tot_npages, npgs;
830 828 struct page **pplist, **pl, **ppa, *pp;
831 829 struct anon_map *amp;
832 830 spgcnt_t an_idx;
833 831 int ret = ENOTSUP;
834 832 uint_t pl_built = 0;
835 833 struct anon *ap;
836 834 struct vnode *vp;
837 835 u_offset_t off;
838 836 pgcnt_t claim_availrmem = 0;
839 837 uint_t szc;
840 838
841 839 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
842 840 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
843 841
844 842 /*
845 843 * We want to lock/unlock the entire ISM segment. Therefore,
846 844 * we will be using the underlying sptseg and it's base address
847 845 * and length for the caching arguments.
848 846 */
849 847 ASSERT(sptseg);
850 848 ASSERT(sptd);
851 849
852 850 pg_idx = seg_page(seg, addr);
853 851 npages = btopr(len);
854 852
855 853 /*
856 854 * check if the request is larger than number of pages covered
857 855 * by amp
858 856 */
859 857 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
860 858 *ppp = NULL;
861 859 return (ENOTSUP);
862 860 }
863 861
864 862 if (type == L_PAGEUNLOCK) {
865 863 ASSERT(sptd->spt_ppa != NULL);
866 864
867 865 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
868 866 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
869 867
870 868 /*
871 869 * If someone is blocked while unmapping, we purge
872 870 * segment page cache and thus reclaim pplist synchronously
873 871 * without waiting for seg_pasync_thread. This speeds up
874 872 * unmapping in cases where munmap(2) is called, while
875 873 * raw async i/o is still in progress or where a thread
876 874 * exits on data fault in a multithreaded application.
877 875 */
878 876 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
879 877 (AS_ISUNMAPWAIT(seg->s_as) &&
880 878 shmd->shm_softlockcnt > 0)) {
881 879 segspt_purge(seg);
882 880 }
883 881 return (0);
884 882 }
885 883
886 884 /* The L_PAGELOCK case ... */
887 885
888 886 if (sptd->spt_flags & DISM_PPA_CHANGED) {
889 887 segspt_purge(seg);
890 888 /*
891 889 * for DISM ppa needs to be rebuild since
892 890 * number of locked pages could be changed
893 891 */
894 892 *ppp = NULL;
895 893 return (ENOTSUP);
896 894 }
897 895
898 896 /*
899 897 * First try to find pages in segment page cache, without
900 898 * holding the segment lock.
901 899 */
902 900 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
903 901 S_WRITE, SEGP_FORCE_WIRED);
904 902 if (pplist != NULL) {
905 903 ASSERT(sptd->spt_ppa != NULL);
906 904 ASSERT(sptd->spt_ppa == pplist);
907 905 ppa = sptd->spt_ppa;
908 906 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
909 907 if (ppa[an_idx] == NULL) {
910 908 seg_pinactive(seg, NULL, seg->s_base,
911 909 sptd->spt_amp->size, ppa,
912 910 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
913 911 *ppp = NULL;
914 912 return (ENOTSUP);
915 913 }
916 914 if ((szc = ppa[an_idx]->p_szc) != 0) {
917 915 npgs = page_get_pagecnt(szc);
918 916 an_idx = P2ROUNDUP(an_idx + 1, npgs);
919 917 } else {
920 918 an_idx++;
921 919 }
922 920 }
923 921 /*
924 922 * Since we cache the entire DISM segment, we want to
925 923 * set ppp to point to the first slot that corresponds
926 924 * to the requested addr, i.e. pg_idx.
927 925 */
928 926 *ppp = &(sptd->spt_ppa[pg_idx]);
929 927 return (0);
930 928 }
931 929
932 930 mutex_enter(&sptd->spt_lock);
933 931 /*
934 932 * try to find pages in segment page cache with mutex
935 933 */
936 934 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
937 935 S_WRITE, SEGP_FORCE_WIRED);
938 936 if (pplist != NULL) {
939 937 ASSERT(sptd->spt_ppa != NULL);
940 938 ASSERT(sptd->spt_ppa == pplist);
941 939 ppa = sptd->spt_ppa;
942 940 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
943 941 if (ppa[an_idx] == NULL) {
944 942 mutex_exit(&sptd->spt_lock);
945 943 seg_pinactive(seg, NULL, seg->s_base,
946 944 sptd->spt_amp->size, ppa,
947 945 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
948 946 *ppp = NULL;
949 947 return (ENOTSUP);
950 948 }
951 949 if ((szc = ppa[an_idx]->p_szc) != 0) {
952 950 npgs = page_get_pagecnt(szc);
953 951 an_idx = P2ROUNDUP(an_idx + 1, npgs);
954 952 } else {
955 953 an_idx++;
956 954 }
957 955 }
958 956 /*
959 957 * Since we cache the entire DISM segment, we want to
960 958 * set ppp to point to the first slot that corresponds
961 959 * to the requested addr, i.e. pg_idx.
962 960 */
963 961 mutex_exit(&sptd->spt_lock);
964 962 *ppp = &(sptd->spt_ppa[pg_idx]);
965 963 return (0);
966 964 }
967 965 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
968 966 SEGP_FORCE_WIRED) == SEGP_FAIL) {
969 967 mutex_exit(&sptd->spt_lock);
970 968 *ppp = NULL;
971 969 return (ENOTSUP);
972 970 }
973 971
974 972 /*
975 973 * No need to worry about protections because DISM pages are always rw.
976 974 */
977 975 pl = pplist = NULL;
978 976 amp = sptd->spt_amp;
979 977
980 978 /*
981 979 * Do we need to build the ppa array?
982 980 */
983 981 if (sptd->spt_ppa == NULL) {
984 982 pgcnt_t lpg_cnt = 0;
985 983
986 984 pl_built = 1;
987 985 tot_npages = btopr(sptd->spt_amp->size);
988 986
989 987 ASSERT(sptd->spt_pcachecnt == 0);
990 988 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
991 989 pl = pplist;
992 990
993 991 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
994 992 for (an_idx = 0; an_idx < tot_npages; ) {
995 993 ap = anon_get_ptr(amp->ahp, an_idx);
996 994 /*
997 995 * Cache only mlocked pages. For large pages
998 996 * if one (constituent) page is mlocked
999 997 * all pages for that large page
1000 998 * are cached also. This is for quick
1001 999 * lookups of ppa array;
1002 1000 */
1003 1001 if ((ap != NULL) && (lpg_cnt != 0 ||
1004 1002 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1005 1003
1006 1004 swap_xlate(ap, &vp, &off);
1007 1005 pp = page_lookup(vp, off, SE_SHARED);
1008 1006 ASSERT(pp != NULL);
1009 1007 if (lpg_cnt == 0) {
1010 1008 lpg_cnt++;
1011 1009 /*
1012 1010 * For a small page, we are done --
1013 1011 * lpg_count is reset to 0 below.
1014 1012 *
1015 1013 * For a large page, we are guaranteed
1016 1014 * to find the anon structures of all
1017 1015 * constituent pages and a non-zero
1018 1016 * lpg_cnt ensures that we don't test
1019 1017 * for mlock for these. We are done
1020 1018 * when lpg_count reaches (npgs + 1).
1021 1019 * If we are not the first constituent
1022 1020 * page, restart at the first one.
1023 1021 */
1024 1022 npgs = page_get_pagecnt(pp->p_szc);
1025 1023 if (!IS_P2ALIGNED(an_idx, npgs)) {
1026 1024 an_idx = P2ALIGN(an_idx, npgs);
1027 1025 page_unlock(pp);
1028 1026 continue;
1029 1027 }
1030 1028 }
1031 1029 if (++lpg_cnt > npgs)
1032 1030 lpg_cnt = 0;
1033 1031
1034 1032 /*
1035 1033 * availrmem is decremented only
1036 1034 * for unlocked pages
1037 1035 */
1038 1036 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1039 1037 claim_availrmem++;
1040 1038 pplist[an_idx] = pp;
1041 1039 }
1042 1040 an_idx++;
1043 1041 }
1044 1042 ANON_LOCK_EXIT(&->a_rwlock);
1045 1043
1046 1044 if (claim_availrmem) {
1047 1045 mutex_enter(&freemem_lock);
1048 1046 if (availrmem < tune.t_minarmem + claim_availrmem) {
1049 1047 mutex_exit(&freemem_lock);
1050 1048 ret = ENOTSUP;
1051 1049 claim_availrmem = 0;
1052 1050 goto insert_fail;
1053 1051 } else {
1054 1052 availrmem -= claim_availrmem;
1055 1053 }
1056 1054 mutex_exit(&freemem_lock);
1057 1055 }
1058 1056
1059 1057 sptd->spt_ppa = pl;
1060 1058 } else {
1061 1059 /*
1062 1060 * We already have a valid ppa[].
1063 1061 */
1064 1062 pl = sptd->spt_ppa;
1065 1063 }
1066 1064
1067 1065 ASSERT(pl != NULL);
1068 1066
1069 1067 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1070 1068 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1071 1069 segspt_reclaim);
1072 1070 if (ret == SEGP_FAIL) {
1073 1071 /*
1074 1072 * seg_pinsert failed. We return
1075 1073 * ENOTSUP, so that the as_pagelock() code will
1076 1074 * then try the slower F_SOFTLOCK path.
1077 1075 */
1078 1076 if (pl_built) {
1079 1077 /*
1080 1078 * No one else has referenced the ppa[].
1081 1079 * We created it and we need to destroy it.
1082 1080 */
1083 1081 sptd->spt_ppa = NULL;
1084 1082 }
1085 1083 ret = ENOTSUP;
1086 1084 goto insert_fail;
1087 1085 }
1088 1086
1089 1087 /*
1090 1088 * In either case, we increment softlockcnt on the 'real' segment.
1091 1089 */
1092 1090 sptd->spt_pcachecnt++;
1093 1091 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1094 1092
1095 1093 ppa = sptd->spt_ppa;
1096 1094 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1097 1095 if (ppa[an_idx] == NULL) {
1098 1096 mutex_exit(&sptd->spt_lock);
1099 1097 seg_pinactive(seg, NULL, seg->s_base,
1100 1098 sptd->spt_amp->size,
1101 1099 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1102 1100 *ppp = NULL;
1103 1101 return (ENOTSUP);
1104 1102 }
1105 1103 if ((szc = ppa[an_idx]->p_szc) != 0) {
1106 1104 npgs = page_get_pagecnt(szc);
1107 1105 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1108 1106 } else {
1109 1107 an_idx++;
1110 1108 }
1111 1109 }
1112 1110 /*
1113 1111 * We can now drop the sptd->spt_lock since the ppa[]
1114 1112 * exists and he have incremented pacachecnt.
1115 1113 */
1116 1114 mutex_exit(&sptd->spt_lock);
1117 1115
1118 1116 /*
1119 1117 * Since we cache the entire segment, we want to
1120 1118 * set ppp to point to the first slot that corresponds
1121 1119 * to the requested addr, i.e. pg_idx.
1122 1120 */
1123 1121 *ppp = &(sptd->spt_ppa[pg_idx]);
1124 1122 return (0);
1125 1123
1126 1124 insert_fail:
1127 1125 /*
1128 1126 * We will only reach this code if we tried and failed.
1129 1127 *
1130 1128 * And we can drop the lock on the dummy seg, once we've failed
1131 1129 * to set up a new ppa[].
1132 1130 */
1133 1131 mutex_exit(&sptd->spt_lock);
1134 1132
1135 1133 if (pl_built) {
1136 1134 if (claim_availrmem) {
1137 1135 mutex_enter(&freemem_lock);
1138 1136 availrmem += claim_availrmem;
1139 1137 mutex_exit(&freemem_lock);
1140 1138 }
1141 1139
1142 1140 /*
1143 1141 * We created pl and we need to destroy it.
1144 1142 */
1145 1143 pplist = pl;
1146 1144 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1147 1145 if (pplist[an_idx] != NULL)
1148 1146 page_unlock(pplist[an_idx]);
1149 1147 }
1150 1148 kmem_free(pl, sizeof (page_t *) * tot_npages);
1151 1149 }
1152 1150
1153 1151 if (shmd->shm_softlockcnt <= 0) {
1154 1152 if (AS_ISUNMAPWAIT(seg->s_as)) {
1155 1153 mutex_enter(&seg->s_as->a_contents);
1156 1154 if (AS_ISUNMAPWAIT(seg->s_as)) {
1157 1155 AS_CLRUNMAPWAIT(seg->s_as);
1158 1156 cv_broadcast(&seg->s_as->a_cv);
1159 1157 }
1160 1158 mutex_exit(&seg->s_as->a_contents);
1161 1159 }
1162 1160 }
1163 1161 *ppp = NULL;
1164 1162 return (ret);
1165 1163 }
1166 1164
1167 1165
1168 1166
1169 1167 /*
1170 1168 * return locked pages over a given range.
1171 1169 *
1172 1170 * We will cache the entire ISM segment and save the pplist for the
1173 1171 * entire segment in the ppa field of the underlying ISM segment structure.
1174 1172 * Later, during a call to segspt_reclaim() we will use this ppa array
1175 1173 * to page_unlock() all of the pages and then we will free this ppa list.
1176 1174 */
1177 1175 /*ARGSUSED*/
1178 1176 static int
1179 1177 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1180 1178 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1181 1179 {
1182 1180 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1183 1181 struct seg *sptseg = shmd->shm_sptseg;
1184 1182 struct spt_data *sptd = sptseg->s_data;
1185 1183 pgcnt_t np, page_index, npages;
1186 1184 caddr_t a, spt_base;
1187 1185 struct page **pplist, **pl, *pp;
1188 1186 struct anon_map *amp;
1189 1187 ulong_t anon_index;
1190 1188 int ret = ENOTSUP;
1191 1189 uint_t pl_built = 0;
1192 1190 struct anon *ap;
1193 1191 struct vnode *vp;
1194 1192 u_offset_t off;
1195 1193
1196 1194 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1197 1195 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1198 1196
1199 1197
1200 1198 /*
1201 1199 * We want to lock/unlock the entire ISM segment. Therefore,
1202 1200 * we will be using the underlying sptseg and it's base address
1203 1201 * and length for the caching arguments.
1204 1202 */
1205 1203 ASSERT(sptseg);
1206 1204 ASSERT(sptd);
1207 1205
1208 1206 if (sptd->spt_flags & SHM_PAGEABLE) {
1209 1207 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1210 1208 }
1211 1209
1212 1210 page_index = seg_page(seg, addr);
1213 1211 npages = btopr(len);
1214 1212
1215 1213 /*
1216 1214 * check if the request is larger than number of pages covered
1217 1215 * by amp
1218 1216 */
1219 1217 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1220 1218 *ppp = NULL;
1221 1219 return (ENOTSUP);
1222 1220 }
1223 1221
1224 1222 if (type == L_PAGEUNLOCK) {
1225 1223
1226 1224 ASSERT(sptd->spt_ppa != NULL);
1227 1225
1228 1226 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1229 1227 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1230 1228
1231 1229 /*
1232 1230 * If someone is blocked while unmapping, we purge
1233 1231 * segment page cache and thus reclaim pplist synchronously
1234 1232 * without waiting for seg_pasync_thread. This speeds up
1235 1233 * unmapping in cases where munmap(2) is called, while
1236 1234 * raw async i/o is still in progress or where a thread
1237 1235 * exits on data fault in a multithreaded application.
1238 1236 */
1239 1237 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1240 1238 segspt_purge(seg);
1241 1239 }
1242 1240 return (0);
1243 1241 }
1244 1242
1245 1243 /* The L_PAGELOCK case... */
1246 1244
1247 1245 /*
1248 1246 * First try to find pages in segment page cache, without
1249 1247 * holding the segment lock.
1250 1248 */
1251 1249 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1252 1250 S_WRITE, SEGP_FORCE_WIRED);
1253 1251 if (pplist != NULL) {
1254 1252 ASSERT(sptd->spt_ppa == pplist);
1255 1253 ASSERT(sptd->spt_ppa[page_index]);
1256 1254 /*
1257 1255 * Since we cache the entire ISM segment, we want to
1258 1256 * set ppp to point to the first slot that corresponds
1259 1257 * to the requested addr, i.e. page_index.
1260 1258 */
1261 1259 *ppp = &(sptd->spt_ppa[page_index]);
1262 1260 return (0);
1263 1261 }
1264 1262
1265 1263 mutex_enter(&sptd->spt_lock);
1266 1264
1267 1265 /*
1268 1266 * try to find pages in segment page cache
1269 1267 */
1270 1268 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1271 1269 S_WRITE, SEGP_FORCE_WIRED);
1272 1270 if (pplist != NULL) {
1273 1271 ASSERT(sptd->spt_ppa == pplist);
1274 1272 /*
1275 1273 * Since we cache the entire segment, we want to
1276 1274 * set ppp to point to the first slot that corresponds
1277 1275 * to the requested addr, i.e. page_index.
1278 1276 */
1279 1277 mutex_exit(&sptd->spt_lock);
1280 1278 *ppp = &(sptd->spt_ppa[page_index]);
1281 1279 return (0);
1282 1280 }
1283 1281
1284 1282 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1285 1283 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1286 1284 mutex_exit(&sptd->spt_lock);
1287 1285 *ppp = NULL;
1288 1286 return (ENOTSUP);
1289 1287 }
1290 1288
1291 1289 /*
1292 1290 * No need to worry about protections because ISM pages
1293 1291 * are always rw.
1294 1292 */
1295 1293 pl = pplist = NULL;
1296 1294
1297 1295 /*
1298 1296 * Do we need to build the ppa array?
1299 1297 */
1300 1298 if (sptd->spt_ppa == NULL) {
1301 1299 ASSERT(sptd->spt_ppa == pplist);
1302 1300
1303 1301 spt_base = sptseg->s_base;
1304 1302 pl_built = 1;
1305 1303
1306 1304 /*
1307 1305 * availrmem is decremented once during anon_swap_adjust()
1308 1306 * and is incremented during the anon_unresv(), which is
1309 1307 * called from shm_rm_amp() when the segment is destroyed.
1310 1308 */
1311 1309 amp = sptd->spt_amp;
1312 1310 ASSERT(amp != NULL);
1313 1311
1314 1312 /* pcachecnt is protected by sptd->spt_lock */
1315 1313 ASSERT(sptd->spt_pcachecnt == 0);
1316 1314 pplist = kmem_zalloc(sizeof (page_t *)
1317 1315 * btopr(sptd->spt_amp->size), KM_SLEEP);
1318 1316 pl = pplist;
1319 1317
1320 1318 anon_index = seg_page(sptseg, spt_base);
1321 1319
1322 1320 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1323 1321 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1324 1322 a += PAGESIZE, anon_index++, pplist++) {
1325 1323 ap = anon_get_ptr(amp->ahp, anon_index);
1326 1324 ASSERT(ap != NULL);
1327 1325 swap_xlate(ap, &vp, &off);
1328 1326 pp = page_lookup(vp, off, SE_SHARED);
1329 1327 ASSERT(pp != NULL);
1330 1328 *pplist = pp;
1331 1329 }
1332 1330 ANON_LOCK_EXIT(&->a_rwlock);
1333 1331
1334 1332 if (a < (spt_base + sptd->spt_amp->size)) {
1335 1333 ret = ENOTSUP;
1336 1334 goto insert_fail;
1337 1335 }
1338 1336 sptd->spt_ppa = pl;
1339 1337 } else {
1340 1338 /*
1341 1339 * We already have a valid ppa[].
1342 1340 */
1343 1341 pl = sptd->spt_ppa;
1344 1342 }
1345 1343
1346 1344 ASSERT(pl != NULL);
1347 1345
1348 1346 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1349 1347 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1350 1348 segspt_reclaim);
1351 1349 if (ret == SEGP_FAIL) {
1352 1350 /*
1353 1351 * seg_pinsert failed. We return
1354 1352 * ENOTSUP, so that the as_pagelock() code will
1355 1353 * then try the slower F_SOFTLOCK path.
1356 1354 */
1357 1355 if (pl_built) {
1358 1356 /*
1359 1357 * No one else has referenced the ppa[].
1360 1358 * We created it and we need to destroy it.
1361 1359 */
1362 1360 sptd->spt_ppa = NULL;
1363 1361 }
1364 1362 ret = ENOTSUP;
1365 1363 goto insert_fail;
1366 1364 }
1367 1365
1368 1366 /*
1369 1367 * In either case, we increment softlockcnt on the 'real' segment.
1370 1368 */
1371 1369 sptd->spt_pcachecnt++;
1372 1370 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1373 1371
1374 1372 /*
1375 1373 * We can now drop the sptd->spt_lock since the ppa[]
1376 1374 * exists and he have incremented pacachecnt.
1377 1375 */
1378 1376 mutex_exit(&sptd->spt_lock);
1379 1377
1380 1378 /*
1381 1379 * Since we cache the entire segment, we want to
1382 1380 * set ppp to point to the first slot that corresponds
1383 1381 * to the requested addr, i.e. page_index.
1384 1382 */
1385 1383 *ppp = &(sptd->spt_ppa[page_index]);
1386 1384 return (0);
1387 1385
1388 1386 insert_fail:
1389 1387 /*
1390 1388 * We will only reach this code if we tried and failed.
1391 1389 *
1392 1390 * And we can drop the lock on the dummy seg, once we've failed
1393 1391 * to set up a new ppa[].
1394 1392 */
1395 1393 mutex_exit(&sptd->spt_lock);
1396 1394
1397 1395 if (pl_built) {
1398 1396 /*
1399 1397 * We created pl and we need to destroy it.
1400 1398 */
1401 1399 pplist = pl;
1402 1400 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1403 1401 while (np) {
1404 1402 page_unlock(*pplist);
1405 1403 np--;
1406 1404 pplist++;
1407 1405 }
1408 1406 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1409 1407 }
1410 1408 if (shmd->shm_softlockcnt <= 0) {
1411 1409 if (AS_ISUNMAPWAIT(seg->s_as)) {
1412 1410 mutex_enter(&seg->s_as->a_contents);
1413 1411 if (AS_ISUNMAPWAIT(seg->s_as)) {
1414 1412 AS_CLRUNMAPWAIT(seg->s_as);
1415 1413 cv_broadcast(&seg->s_as->a_cv);
1416 1414 }
1417 1415 mutex_exit(&seg->s_as->a_contents);
1418 1416 }
1419 1417 }
1420 1418 *ppp = NULL;
1421 1419 return (ret);
1422 1420 }
1423 1421
1424 1422 /*
1425 1423 * purge any cached pages in the I/O page cache
1426 1424 */
1427 1425 static void
1428 1426 segspt_purge(struct seg *seg)
1429 1427 {
1430 1428 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1431 1429 }
1432 1430
1433 1431 static int
1434 1432 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1435 1433 enum seg_rw rw, int async)
1436 1434 {
1437 1435 struct seg *seg = (struct seg *)ptag;
1438 1436 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1439 1437 struct seg *sptseg;
1440 1438 struct spt_data *sptd;
1441 1439 pgcnt_t npages, i, free_availrmem = 0;
1442 1440 int done = 0;
1443 1441
1444 1442 #ifdef lint
1445 1443 addr = addr;
1446 1444 #endif
1447 1445 sptseg = shmd->shm_sptseg;
1448 1446 sptd = sptseg->s_data;
1449 1447 npages = (len >> PAGESHIFT);
1450 1448 ASSERT(npages);
1451 1449 ASSERT(sptd->spt_pcachecnt != 0);
1452 1450 ASSERT(sptd->spt_ppa == pplist);
1453 1451 ASSERT(npages == btopr(sptd->spt_amp->size));
1454 1452 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1455 1453
1456 1454 /*
1457 1455 * Acquire the lock on the dummy seg and destroy the
1458 1456 * ppa array IF this is the last pcachecnt.
1459 1457 */
1460 1458 mutex_enter(&sptd->spt_lock);
1461 1459 if (--sptd->spt_pcachecnt == 0) {
1462 1460 for (i = 0; i < npages; i++) {
1463 1461 if (pplist[i] == NULL) {
1464 1462 continue;
1465 1463 }
1466 1464 if (rw == S_WRITE) {
1467 1465 hat_setrefmod(pplist[i]);
1468 1466 } else {
1469 1467 hat_setref(pplist[i]);
1470 1468 }
1471 1469 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1472 1470 (sptd->spt_ppa_lckcnt[i] == 0))
1473 1471 free_availrmem++;
1474 1472 page_unlock(pplist[i]);
1475 1473 }
1476 1474 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1477 1475 mutex_enter(&freemem_lock);
1478 1476 availrmem += free_availrmem;
1479 1477 mutex_exit(&freemem_lock);
1480 1478 }
1481 1479 /*
1482 1480 * Since we want to cach/uncache the entire ISM segment,
1483 1481 * we will track the pplist in a segspt specific field
1484 1482 * ppa, that is initialized at the time we add an entry to
1485 1483 * the cache.
1486 1484 */
1487 1485 ASSERT(sptd->spt_pcachecnt == 0);
1488 1486 kmem_free(pplist, sizeof (page_t *) * npages);
1489 1487 sptd->spt_ppa = NULL;
1490 1488 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1491 1489 sptd->spt_gen++;
1492 1490 cv_broadcast(&sptd->spt_cv);
1493 1491 done = 1;
1494 1492 }
1495 1493 mutex_exit(&sptd->spt_lock);
1496 1494
1497 1495 /*
1498 1496 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1499 1497 * may not hold AS lock (in this case async argument is not 0). This
1500 1498 * means if softlockcnt drops to 0 after the decrement below address
1501 1499 * space may get freed. We can't allow it since after softlock
1502 1500 * derement to 0 we still need to access as structure for possible
1503 1501 * wakeup of unmap waiters. To prevent the disappearance of as we take
1504 1502 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1505 1503 * this mutex as a barrier to make sure this routine completes before
1506 1504 * segment is freed.
1507 1505 *
1508 1506 * The second complication we have to deal with in async case is a
1509 1507 * possibility of missed wake up of unmap wait thread. When we don't
1510 1508 * hold as lock here we may take a_contents lock before unmap wait
1511 1509 * thread that was first to see softlockcnt was still not 0. As a
1512 1510 * result we'll fail to wake up an unmap wait thread. To avoid this
1513 1511 * race we set nounmapwait flag in as structure if we drop softlockcnt
1514 1512 * to 0 if async is not 0. unmapwait thread
1515 1513 * will not block if this flag is set.
1516 1514 */
1517 1515 if (async)
1518 1516 mutex_enter(&shmd->shm_segfree_syncmtx);
1519 1517
1520 1518 /*
1521 1519 * Now decrement softlockcnt.
1522 1520 */
1523 1521 ASSERT(shmd->shm_softlockcnt > 0);
1524 1522 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1525 1523
1526 1524 if (shmd->shm_softlockcnt <= 0) {
1527 1525 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1528 1526 mutex_enter(&seg->s_as->a_contents);
1529 1527 if (async)
1530 1528 AS_SETNOUNMAPWAIT(seg->s_as);
1531 1529 if (AS_ISUNMAPWAIT(seg->s_as)) {
1532 1530 AS_CLRUNMAPWAIT(seg->s_as);
1533 1531 cv_broadcast(&seg->s_as->a_cv);
1534 1532 }
1535 1533 mutex_exit(&seg->s_as->a_contents);
1536 1534 }
1537 1535 }
1538 1536
1539 1537 if (async)
1540 1538 mutex_exit(&shmd->shm_segfree_syncmtx);
1541 1539
1542 1540 return (done);
1543 1541 }
1544 1542
1545 1543 /*
1546 1544 * Do a F_SOFTUNLOCK call over the range requested.
1547 1545 * The range must have already been F_SOFTLOCK'ed.
1548 1546 *
1549 1547 * The calls to acquire and release the anon map lock mutex were
1550 1548 * removed in order to avoid a deadly embrace during a DR
1551 1549 * memory delete operation. (Eg. DR blocks while waiting for a
1552 1550 * exclusive lock on a page that is being used for kaio; the
1553 1551 * thread that will complete the kaio and call segspt_softunlock
1554 1552 * blocks on the anon map lock; another thread holding the anon
1555 1553 * map lock blocks on another page lock via the segspt_shmfault
1556 1554 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1557 1555 *
1558 1556 * The appropriateness of the removal is based upon the following:
1559 1557 * 1. If we are holding a segment's reader lock and the page is held
1560 1558 * shared, then the corresponding element in anonmap which points to
1561 1559 * anon struct cannot change and there is no need to acquire the
1562 1560 * anonymous map lock.
1563 1561 * 2. Threads in segspt_softunlock have a reader lock on the segment
1564 1562 * and already have the shared page lock, so we are guaranteed that
1565 1563 * the anon map slot cannot change and therefore can call anon_get_ptr()
1566 1564 * without grabbing the anonymous map lock.
1567 1565 * 3. Threads that softlock a shared page break copy-on-write, even if
1568 1566 * its a read. Thus cow faults can be ignored with respect to soft
1569 1567 * unlocking, since the breaking of cow means that the anon slot(s) will
1570 1568 * not be shared.
1571 1569 */
1572 1570 static void
1573 1571 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1574 1572 size_t len, enum seg_rw rw)
1575 1573 {
1576 1574 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1577 1575 struct seg *sptseg;
1578 1576 struct spt_data *sptd;
1579 1577 page_t *pp;
1580 1578 caddr_t adr;
1581 1579 struct vnode *vp;
1582 1580 u_offset_t offset;
1583 1581 ulong_t anon_index;
1584 1582 struct anon_map *amp; /* XXX - for locknest */
1585 1583 struct anon *ap = NULL;
1586 1584 pgcnt_t npages;
1587 1585
1588 1586 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1589 1587
1590 1588 sptseg = shmd->shm_sptseg;
1591 1589 sptd = sptseg->s_data;
1592 1590
1593 1591 /*
1594 1592 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1595 1593 * and therefore their pages are SE_SHARED locked
1596 1594 * for the entire life of the segment.
1597 1595 */
1598 1596 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1599 1597 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1600 1598 goto softlock_decrement;
1601 1599 }
1602 1600
1603 1601 /*
1604 1602 * Any thread is free to do a page_find and
1605 1603 * page_unlock() on the pages within this seg.
1606 1604 *
1607 1605 * We are already holding the as->a_lock on the user's
1608 1606 * real segment, but we need to hold the a_lock on the
1609 1607 * underlying dummy as. This is mostly to satisfy the
1610 1608 * underlying HAT layer.
1611 1609 */
1612 1610 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1613 1611 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1614 1612 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1615 1613
1616 1614 amp = sptd->spt_amp;
1617 1615 ASSERT(amp != NULL);
1618 1616 anon_index = seg_page(sptseg, sptseg_addr);
1619 1617
1620 1618 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1621 1619 ap = anon_get_ptr(amp->ahp, anon_index++);
1622 1620 ASSERT(ap != NULL);
1623 1621 swap_xlate(ap, &vp, &offset);
1624 1622
1625 1623 /*
1626 1624 * Use page_find() instead of page_lookup() to
1627 1625 * find the page since we know that it has a
1628 1626 * "shared" lock.
1629 1627 */
1630 1628 pp = page_find(vp, offset);
1631 1629 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1632 1630 if (pp == NULL) {
1633 1631 panic("segspt_softunlock: "
1634 1632 "addr %p, ap %p, vp %p, off %llx",
1635 1633 (void *)adr, (void *)ap, (void *)vp, offset);
1636 1634 /*NOTREACHED*/
1637 1635 }
1638 1636
1639 1637 if (rw == S_WRITE) {
1640 1638 hat_setrefmod(pp);
1641 1639 } else if (rw != S_OTHER) {
1642 1640 hat_setref(pp);
1643 1641 }
1644 1642 page_unlock(pp);
1645 1643 }
1646 1644
1647 1645 softlock_decrement:
1648 1646 npages = btopr(len);
1649 1647 ASSERT(shmd->shm_softlockcnt >= npages);
1650 1648 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1651 1649 if (shmd->shm_softlockcnt == 0) {
1652 1650 /*
1653 1651 * All SOFTLOCKS are gone. Wakeup any waiting
1654 1652 * unmappers so they can try again to unmap.
1655 1653 * Check for waiters first without the mutex
1656 1654 * held so we don't always grab the mutex on
1657 1655 * softunlocks.
1658 1656 */
1659 1657 if (AS_ISUNMAPWAIT(seg->s_as)) {
1660 1658 mutex_enter(&seg->s_as->a_contents);
1661 1659 if (AS_ISUNMAPWAIT(seg->s_as)) {
1662 1660 AS_CLRUNMAPWAIT(seg->s_as);
1663 1661 cv_broadcast(&seg->s_as->a_cv);
1664 1662 }
1665 1663 mutex_exit(&seg->s_as->a_contents);
1666 1664 }
1667 1665 }
1668 1666 }
1669 1667
1670 1668 int
1671 1669 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1672 1670 {
1673 1671 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1674 1672 struct shm_data *shmd;
1675 1673 struct anon_map *shm_amp = shmd_arg->shm_amp;
1676 1674 struct spt_data *sptd;
1677 1675 int error = 0;
1678 1676
1679 1677 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1680 1678
1681 1679 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1682 1680 if (shmd == NULL)
1683 1681 return (ENOMEM);
1684 1682
1685 1683 shmd->shm_sptas = shmd_arg->shm_sptas;
1686 1684 shmd->shm_amp = shm_amp;
1687 1685 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1688 1686
1689 1687 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1690 1688 NULL, 0, seg->s_size);
1691 1689
1692 1690 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1693 1691
1694 1692 seg->s_data = (void *)shmd;
1695 1693 seg->s_ops = &segspt_shmops;
1696 1694 seg->s_szc = shmd->shm_sptseg->s_szc;
1697 1695 sptd = shmd->shm_sptseg->s_data;
1698 1696
1699 1697 if (sptd->spt_flags & SHM_PAGEABLE) {
1700 1698 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1701 1699 KM_NOSLEEP)) == NULL) {
1702 1700 seg->s_data = (void *)NULL;
1703 1701 kmem_free(shmd, (sizeof (*shmd)));
1704 1702 return (ENOMEM);
1705 1703 }
1706 1704 shmd->shm_lckpgs = 0;
1707 1705 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1708 1706 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1709 1707 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1710 1708 seg->s_size, seg->s_szc)) != 0) {
1711 1709 kmem_free(shmd->shm_vpage,
1712 1710 btopr(shm_amp->size));
1713 1711 }
1714 1712 }
1715 1713 } else {
1716 1714 error = hat_share(seg->s_as->a_hat, seg->s_base,
1717 1715 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1718 1716 seg->s_size, seg->s_szc);
1719 1717 }
1720 1718 if (error) {
1721 1719 seg->s_szc = 0;
1722 1720 seg->s_data = (void *)NULL;
1723 1721 kmem_free(shmd, (sizeof (*shmd)));
1724 1722 } else {
1725 1723 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1726 1724 shm_amp->refcnt++;
1727 1725 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1728 1726 }
1729 1727 return (error);
1730 1728 }
1731 1729
1732 1730 int
1733 1731 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1734 1732 {
1735 1733 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1736 1734 int reclaim = 1;
1737 1735
1738 1736 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1739 1737 retry:
1740 1738 if (shmd->shm_softlockcnt > 0) {
1741 1739 if (reclaim == 1) {
1742 1740 segspt_purge(seg);
1743 1741 reclaim = 0;
1744 1742 goto retry;
1745 1743 }
1746 1744 return (EAGAIN);
1747 1745 }
1748 1746
1749 1747 if (ssize != seg->s_size) {
1750 1748 #ifdef DEBUG
1751 1749 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1752 1750 ssize, seg->s_size);
1753 1751 #endif
1754 1752 return (EINVAL);
1755 1753 }
1756 1754
1757 1755 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1758 1756 NULL, 0);
1759 1757 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1760 1758
1761 1759 seg_free(seg);
1762 1760
1763 1761 return (0);
1764 1762 }
1765 1763
1766 1764 void
1767 1765 segspt_shmfree(struct seg *seg)
1768 1766 {
1769 1767 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1770 1768 struct anon_map *shm_amp = shmd->shm_amp;
1771 1769
1772 1770 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1773 1771
1774 1772 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1775 1773 MC_UNLOCK, NULL, 0);
1776 1774
1777 1775 /*
1778 1776 * Need to increment refcnt when attaching
1779 1777 * and decrement when detaching because of dup().
1780 1778 */
1781 1779 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1782 1780 shm_amp->refcnt--;
1783 1781 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1784 1782
1785 1783 if (shmd->shm_vpage) { /* only for DISM */
1786 1784 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1787 1785 shmd->shm_vpage = NULL;
1788 1786 }
1789 1787
1790 1788 /*
1791 1789 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1792 1790 * still working with this segment without holding as lock.
1793 1791 */
1794 1792 ASSERT(shmd->shm_softlockcnt == 0);
1795 1793 mutex_enter(&shmd->shm_segfree_syncmtx);
1796 1794 mutex_destroy(&shmd->shm_segfree_syncmtx);
1797 1795
1798 1796 kmem_free(shmd, sizeof (*shmd));
1799 1797 }
1800 1798
1801 1799 /*ARGSUSED*/
1802 1800 int
1803 1801 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1804 1802 {
1805 1803 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1806 1804
1807 1805 /*
1808 1806 * Shared page table is more than shared mapping.
1809 1807 * Individual process sharing page tables can't change prot
1810 1808 * because there is only one set of page tables.
1811 1809 * This will be allowed after private page table is
1812 1810 * supported.
1813 1811 */
1814 1812 /* need to return correct status error? */
1815 1813 return (0);
1816 1814 }
1817 1815
1818 1816
1819 1817 faultcode_t
1820 1818 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1821 1819 size_t len, enum fault_type type, enum seg_rw rw)
1822 1820 {
1823 1821 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1824 1822 struct seg *sptseg = shmd->shm_sptseg;
1825 1823 struct as *curspt = shmd->shm_sptas;
1826 1824 struct spt_data *sptd = sptseg->s_data;
1827 1825 pgcnt_t npages;
1828 1826 size_t size;
1829 1827 caddr_t segspt_addr, shm_addr;
1830 1828 page_t **ppa;
1831 1829 int i;
1832 1830 ulong_t an_idx = 0;
1833 1831 int err = 0;
1834 1832 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1835 1833 size_t pgsz;
1836 1834 pgcnt_t pgcnt;
1837 1835 caddr_t a;
1838 1836 pgcnt_t pidx;
1839 1837
1840 1838 #ifdef lint
1841 1839 hat = hat;
1842 1840 #endif
1843 1841 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1844 1842
1845 1843 /*
1846 1844 * Because of the way spt is implemented
1847 1845 * the realsize of the segment does not have to be
1848 1846 * equal to the segment size itself. The segment size is
1849 1847 * often in multiples of a page size larger than PAGESIZE.
1850 1848 * The realsize is rounded up to the nearest PAGESIZE
1851 1849 * based on what the user requested. This is a bit of
1852 1850 * ungliness that is historical but not easily fixed
1853 1851 * without re-designing the higher levels of ISM.
1854 1852 */
1855 1853 ASSERT(addr >= seg->s_base);
1856 1854 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1857 1855 return (FC_NOMAP);
1858 1856 /*
1859 1857 * For all of the following cases except F_PROT, we need to
1860 1858 * make any necessary adjustments to addr and len
1861 1859 * and get all of the necessary page_t's into an array called ppa[].
1862 1860 *
1863 1861 * The code in shmat() forces base addr and len of ISM segment
1864 1862 * to be aligned to largest page size supported. Therefore,
1865 1863 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1866 1864 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1867 1865 * in large pagesize chunks, or else we will screw up the HAT
1868 1866 * layer by calling hat_memload_array() with differing page sizes
1869 1867 * over a given virtual range.
1870 1868 */
1871 1869 pgsz = page_get_pagesize(sptseg->s_szc);
1872 1870 pgcnt = page_get_pagecnt(sptseg->s_szc);
1873 1871 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1874 1872 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1875 1873 npages = btopr(size);
1876 1874
1877 1875 /*
1878 1876 * Now we need to convert from addr in segshm to addr in segspt.
1879 1877 */
1880 1878 an_idx = seg_page(seg, shm_addr);
1881 1879 segspt_addr = sptseg->s_base + ptob(an_idx);
1882 1880
1883 1881 ASSERT((segspt_addr + ptob(npages)) <=
1884 1882 (sptseg->s_base + sptd->spt_realsize));
1885 1883 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1886 1884
1887 1885 switch (type) {
1888 1886
1889 1887 case F_SOFTLOCK:
1890 1888
1891 1889 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1892 1890 /*
1893 1891 * Fall through to the F_INVAL case to load up the hat layer
1894 1892 * entries with the HAT_LOAD_LOCK flag.
1895 1893 */
1896 1894 /* FALLTHRU */
1897 1895 case F_INVAL:
1898 1896
1899 1897 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1900 1898 return (FC_NOMAP);
1901 1899
1902 1900 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1903 1901
1904 1902 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1905 1903 if (err != 0) {
1906 1904 if (type == F_SOFTLOCK) {
1907 1905 atomic_add_long((ulong_t *)(
1908 1906 &(shmd->shm_softlockcnt)), -npages);
1909 1907 }
1910 1908 goto dism_err;
1911 1909 }
1912 1910 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1913 1911 a = segspt_addr;
1914 1912 pidx = 0;
1915 1913 if (type == F_SOFTLOCK) {
1916 1914
1917 1915 /*
1918 1916 * Load up the translation keeping it
1919 1917 * locked and don't unlock the page.
1920 1918 */
1921 1919 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1922 1920 hat_memload_array(sptseg->s_as->a_hat,
1923 1921 a, pgsz, &ppa[pidx], sptd->spt_prot,
1924 1922 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1925 1923 }
1926 1924 } else {
1927 1925 if (hat == seg->s_as->a_hat) {
1928 1926
1929 1927 /*
1930 1928 * Migrate pages marked for migration
1931 1929 */
1932 1930 if (lgrp_optimizations())
1933 1931 page_migrate(seg, shm_addr, ppa,
1934 1932 npages);
1935 1933
1936 1934 /* CPU HAT */
1937 1935 for (; pidx < npages;
1938 1936 a += pgsz, pidx += pgcnt) {
1939 1937 hat_memload_array(sptseg->s_as->a_hat,
1940 1938 a, pgsz, &ppa[pidx],
1941 1939 sptd->spt_prot,
1942 1940 HAT_LOAD_SHARE);
1943 1941 }
1944 1942 } else {
1945 1943 /* XHAT. Pass real address */
1946 1944 hat_memload_array(hat, shm_addr,
1947 1945 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1948 1946 }
1949 1947
1950 1948 /*
1951 1949 * And now drop the SE_SHARED lock(s).
1952 1950 */
1953 1951 if (dyn_ism_unmap) {
1954 1952 for (i = 0; i < npages; i++) {
1955 1953 page_unlock(ppa[i]);
1956 1954 }
1957 1955 }
1958 1956 }
1959 1957
1960 1958 if (!dyn_ism_unmap) {
1961 1959 if (hat_share(seg->s_as->a_hat, shm_addr,
1962 1960 curspt->a_hat, segspt_addr, ptob(npages),
1963 1961 seg->s_szc) != 0) {
1964 1962 panic("hat_share err in DISM fault");
1965 1963 /* NOTREACHED */
1966 1964 }
1967 1965 if (type == F_INVAL) {
1968 1966 for (i = 0; i < npages; i++) {
1969 1967 page_unlock(ppa[i]);
1970 1968 }
1971 1969 }
1972 1970 }
1973 1971 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1974 1972 dism_err:
1975 1973 kmem_free(ppa, npages * sizeof (page_t *));
1976 1974 return (err);
1977 1975
1978 1976 case F_SOFTUNLOCK:
1979 1977
1980 1978 /*
1981 1979 * This is a bit ugly, we pass in the real seg pointer,
1982 1980 * but the segspt_addr is the virtual address within the
1983 1981 * dummy seg.
1984 1982 */
1985 1983 segspt_softunlock(seg, segspt_addr, size, rw);
1986 1984 return (0);
1987 1985
1988 1986 case F_PROT:
1989 1987
1990 1988 /*
1991 1989 * This takes care of the unusual case where a user
1992 1990 * allocates a stack in shared memory and a register
1993 1991 * window overflow is written to that stack page before
1994 1992 * it is otherwise modified.
1995 1993 *
1996 1994 * We can get away with this because ISM segments are
1997 1995 * always rw. Other than this unusual case, there
1998 1996 * should be no instances of protection violations.
1999 1997 */
2000 1998 return (0);
2001 1999
2002 2000 default:
2003 2001 #ifdef DEBUG
2004 2002 panic("segspt_dismfault default type?");
2005 2003 #else
2006 2004 return (FC_NOMAP);
2007 2005 #endif
2008 2006 }
2009 2007 }
2010 2008
2011 2009
2012 2010 faultcode_t
2013 2011 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2014 2012 size_t len, enum fault_type type, enum seg_rw rw)
2015 2013 {
2016 2014 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2017 2015 struct seg *sptseg = shmd->shm_sptseg;
2018 2016 struct as *curspt = shmd->shm_sptas;
2019 2017 struct spt_data *sptd = sptseg->s_data;
2020 2018 pgcnt_t npages;
2021 2019 size_t size;
2022 2020 caddr_t sptseg_addr, shm_addr;
2023 2021 page_t *pp, **ppa;
2024 2022 int i;
2025 2023 u_offset_t offset;
2026 2024 ulong_t anon_index = 0;
2027 2025 struct vnode *vp;
2028 2026 struct anon_map *amp; /* XXX - for locknest */
2029 2027 struct anon *ap = NULL;
2030 2028 size_t pgsz;
2031 2029 pgcnt_t pgcnt;
2032 2030 caddr_t a;
2033 2031 pgcnt_t pidx;
2034 2032 size_t sz;
2035 2033
2036 2034 #ifdef lint
2037 2035 hat = hat;
2038 2036 #endif
2039 2037
2040 2038 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2041 2039
2042 2040 if (sptd->spt_flags & SHM_PAGEABLE) {
2043 2041 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2044 2042 }
2045 2043
2046 2044 /*
2047 2045 * Because of the way spt is implemented
2048 2046 * the realsize of the segment does not have to be
2049 2047 * equal to the segment size itself. The segment size is
2050 2048 * often in multiples of a page size larger than PAGESIZE.
2051 2049 * The realsize is rounded up to the nearest PAGESIZE
2052 2050 * based on what the user requested. This is a bit of
2053 2051 * ungliness that is historical but not easily fixed
2054 2052 * without re-designing the higher levels of ISM.
2055 2053 */
2056 2054 ASSERT(addr >= seg->s_base);
2057 2055 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2058 2056 return (FC_NOMAP);
2059 2057 /*
2060 2058 * For all of the following cases except F_PROT, we need to
2061 2059 * make any necessary adjustments to addr and len
2062 2060 * and get all of the necessary page_t's into an array called ppa[].
2063 2061 *
2064 2062 * The code in shmat() forces base addr and len of ISM segment
2065 2063 * to be aligned to largest page size supported. Therefore,
2066 2064 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2067 2065 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2068 2066 * in large pagesize chunks, or else we will screw up the HAT
2069 2067 * layer by calling hat_memload_array() with differing page sizes
2070 2068 * over a given virtual range.
2071 2069 */
2072 2070 pgsz = page_get_pagesize(sptseg->s_szc);
2073 2071 pgcnt = page_get_pagecnt(sptseg->s_szc);
2074 2072 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2075 2073 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2076 2074 npages = btopr(size);
2077 2075
2078 2076 /*
2079 2077 * Now we need to convert from addr in segshm to addr in segspt.
2080 2078 */
2081 2079 anon_index = seg_page(seg, shm_addr);
2082 2080 sptseg_addr = sptseg->s_base + ptob(anon_index);
2083 2081
2084 2082 /*
2085 2083 * And now we may have to adjust npages downward if we have
2086 2084 * exceeded the realsize of the segment or initial anon
2087 2085 * allocations.
2088 2086 */
2089 2087 if ((sptseg_addr + ptob(npages)) >
2090 2088 (sptseg->s_base + sptd->spt_realsize))
2091 2089 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2092 2090
2093 2091 npages = btopr(size);
2094 2092
2095 2093 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2096 2094 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2097 2095
2098 2096 switch (type) {
2099 2097
2100 2098 case F_SOFTLOCK:
2101 2099
2102 2100 /*
2103 2101 * availrmem is decremented once during anon_swap_adjust()
2104 2102 * and is incremented during the anon_unresv(), which is
2105 2103 * called from shm_rm_amp() when the segment is destroyed.
2106 2104 */
2107 2105 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2108 2106 /*
2109 2107 * Some platforms assume that ISM pages are SE_SHARED
2110 2108 * locked for the entire life of the segment.
2111 2109 */
2112 2110 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2113 2111 return (0);
2114 2112 /*
2115 2113 * Fall through to the F_INVAL case to load up the hat layer
2116 2114 * entries with the HAT_LOAD_LOCK flag.
2117 2115 */
2118 2116
2119 2117 /* FALLTHRU */
2120 2118 case F_INVAL:
2121 2119
2122 2120 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2123 2121 return (FC_NOMAP);
2124 2122
2125 2123 /*
2126 2124 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2127 2125 * may still rely on this call to hat_share(). That
2128 2126 * would imply that those hat's can fault on a
2129 2127 * HAT_LOAD_LOCK translation, which would seem
2130 2128 * contradictory.
2131 2129 */
2132 2130 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2133 2131 if (hat_share(seg->s_as->a_hat, seg->s_base,
2134 2132 curspt->a_hat, sptseg->s_base,
2135 2133 sptseg->s_size, sptseg->s_szc) != 0) {
2136 2134 panic("hat_share error in ISM fault");
2137 2135 /*NOTREACHED*/
2138 2136 }
2139 2137 return (0);
2140 2138 }
2141 2139 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2142 2140
2143 2141 /*
2144 2142 * I see no need to lock the real seg,
2145 2143 * here, because all of our work will be on the underlying
2146 2144 * dummy seg.
2147 2145 *
2148 2146 * sptseg_addr and npages now account for large pages.
2149 2147 */
2150 2148 amp = sptd->spt_amp;
2151 2149 ASSERT(amp != NULL);
2152 2150 anon_index = seg_page(sptseg, sptseg_addr);
2153 2151
2154 2152 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2155 2153 for (i = 0; i < npages; i++) {
2156 2154 ap = anon_get_ptr(amp->ahp, anon_index++);
2157 2155 ASSERT(ap != NULL);
2158 2156 swap_xlate(ap, &vp, &offset);
2159 2157 pp = page_lookup(vp, offset, SE_SHARED);
2160 2158 ASSERT(pp != NULL);
2161 2159 ppa[i] = pp;
2162 2160 }
2163 2161 ANON_LOCK_EXIT(&->a_rwlock);
2164 2162 ASSERT(i == npages);
2165 2163
2166 2164 /*
2167 2165 * We are already holding the as->a_lock on the user's
2168 2166 * real segment, but we need to hold the a_lock on the
2169 2167 * underlying dummy as. This is mostly to satisfy the
2170 2168 * underlying HAT layer.
2171 2169 */
2172 2170 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2173 2171 a = sptseg_addr;
2174 2172 pidx = 0;
2175 2173 if (type == F_SOFTLOCK) {
2176 2174 /*
2177 2175 * Load up the translation keeping it
2178 2176 * locked and don't unlock the page.
2179 2177 */
2180 2178 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2181 2179 sz = MIN(pgsz, ptob(npages - pidx));
2182 2180 hat_memload_array(sptseg->s_as->a_hat, a,
2183 2181 sz, &ppa[pidx], sptd->spt_prot,
2184 2182 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2185 2183 }
2186 2184 } else {
2187 2185 if (hat == seg->s_as->a_hat) {
2188 2186
2189 2187 /*
2190 2188 * Migrate pages marked for migration.
2191 2189 */
2192 2190 if (lgrp_optimizations())
2193 2191 page_migrate(seg, shm_addr, ppa,
2194 2192 npages);
2195 2193
2196 2194 /* CPU HAT */
2197 2195 for (; pidx < npages;
2198 2196 a += pgsz, pidx += pgcnt) {
2199 2197 sz = MIN(pgsz, ptob(npages - pidx));
2200 2198 hat_memload_array(sptseg->s_as->a_hat,
2201 2199 a, sz, &ppa[pidx],
2202 2200 sptd->spt_prot, HAT_LOAD_SHARE);
2203 2201 }
2204 2202 } else {
2205 2203 /* XHAT. Pass real address */
2206 2204 hat_memload_array(hat, shm_addr,
2207 2205 ptob(npages), ppa, sptd->spt_prot,
2208 2206 HAT_LOAD_SHARE);
2209 2207 }
2210 2208
2211 2209 /*
2212 2210 * And now drop the SE_SHARED lock(s).
2213 2211 */
2214 2212 for (i = 0; i < npages; i++)
2215 2213 page_unlock(ppa[i]);
2216 2214 }
2217 2215 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2218 2216
2219 2217 kmem_free(ppa, sizeof (page_t *) * npages);
2220 2218 return (0);
2221 2219 case F_SOFTUNLOCK:
2222 2220
2223 2221 /*
2224 2222 * This is a bit ugly, we pass in the real seg pointer,
2225 2223 * but the sptseg_addr is the virtual address within the
2226 2224 * dummy seg.
2227 2225 */
2228 2226 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2229 2227 return (0);
2230 2228
2231 2229 case F_PROT:
2232 2230
2233 2231 /*
2234 2232 * This takes care of the unusual case where a user
2235 2233 * allocates a stack in shared memory and a register
2236 2234 * window overflow is written to that stack page before
2237 2235 * it is otherwise modified.
2238 2236 *
2239 2237 * We can get away with this because ISM segments are
2240 2238 * always rw. Other than this unusual case, there
2241 2239 * should be no instances of protection violations.
2242 2240 */
2243 2241 return (0);
2244 2242
2245 2243 default:
2246 2244 #ifdef DEBUG
2247 2245 cmn_err(CE_WARN, "segspt_shmfault default type?");
2248 2246 #endif
2249 2247 return (FC_NOMAP);
2250 2248 }
2251 2249 }
2252 2250
2253 2251 /*ARGSUSED*/
2254 2252 static faultcode_t
2255 2253 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2256 2254 {
2257 2255 return (0);
2258 2256 }
2259 2257
2260 2258 /*ARGSUSED*/
2261 2259 static int
2262 2260 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2263 2261 {
2264 2262 return (0);
2265 2263 }
2266 2264
2267 2265 /*ARGSUSED*/
2268 2266 static size_t
2269 2267 segspt_shmswapout(struct seg *seg)
2270 2268 {
2271 2269 return (0);
2272 2270 }
2273 2271
2274 2272 /*
2275 2273 * duplicate the shared page tables
2276 2274 */
2277 2275 int
2278 2276 segspt_shmdup(struct seg *seg, struct seg *newseg)
2279 2277 {
2280 2278 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2281 2279 struct anon_map *amp = shmd->shm_amp;
2282 2280 struct shm_data *shmd_new;
2283 2281 struct seg *spt_seg = shmd->shm_sptseg;
2284 2282 struct spt_data *sptd = spt_seg->s_data;
2285 2283 int error = 0;
2286 2284
2287 2285 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2288 2286
2289 2287 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2290 2288 newseg->s_data = (void *)shmd_new;
2291 2289 shmd_new->shm_sptas = shmd->shm_sptas;
2292 2290 shmd_new->shm_amp = amp;
2293 2291 shmd_new->shm_sptseg = shmd->shm_sptseg;
2294 2292 newseg->s_ops = &segspt_shmops;
2295 2293 newseg->s_szc = seg->s_szc;
2296 2294 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2297 2295
2298 2296 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2299 2297 amp->refcnt++;
2300 2298 ANON_LOCK_EXIT(&->a_rwlock);
2301 2299
2302 2300 if (sptd->spt_flags & SHM_PAGEABLE) {
2303 2301 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2304 2302 shmd_new->shm_lckpgs = 0;
2305 2303 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2306 2304 if ((error = hat_share(newseg->s_as->a_hat,
2307 2305 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2308 2306 seg->s_size, seg->s_szc)) != 0) {
2309 2307 kmem_free(shmd_new->shm_vpage,
2310 2308 btopr(amp->size));
2311 2309 }
2312 2310 }
2313 2311 return (error);
2314 2312 } else {
2315 2313 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2316 2314 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2317 2315 seg->s_szc));
2318 2316
2319 2317 }
2320 2318 }
2321 2319
2322 2320 /*ARGSUSED*/
2323 2321 int
2324 2322 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2325 2323 {
2326 2324 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2327 2325 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2328 2326
2329 2327 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2330 2328
2331 2329 /*
2332 2330 * ISM segment is always rw.
2333 2331 */
2334 2332 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2335 2333 }
2336 2334
2337 2335 /*
2338 2336 * Return an array of locked large pages, for empty slots allocate
2339 2337 * private zero-filled anon pages.
2340 2338 */
2341 2339 static int
2342 2340 spt_anon_getpages(
2343 2341 struct seg *sptseg,
2344 2342 caddr_t sptaddr,
2345 2343 size_t len,
2346 2344 page_t *ppa[])
2347 2345 {
2348 2346 struct spt_data *sptd = sptseg->s_data;
2349 2347 struct anon_map *amp = sptd->spt_amp;
2350 2348 enum seg_rw rw = sptd->spt_prot;
2351 2349 uint_t szc = sptseg->s_szc;
2352 2350 size_t pg_sz, share_sz = page_get_pagesize(szc);
2353 2351 pgcnt_t lp_npgs;
2354 2352 caddr_t lp_addr, e_sptaddr;
2355 2353 uint_t vpprot, ppa_szc = 0;
2356 2354 struct vpage *vpage = NULL;
2357 2355 ulong_t j, ppa_idx;
2358 2356 int err, ierr = 0;
2359 2357 pgcnt_t an_idx;
2360 2358 anon_sync_obj_t cookie;
2361 2359 int anon_locked = 0;
2362 2360 pgcnt_t amp_pgs;
2363 2361
2364 2362
2365 2363 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2366 2364 ASSERT(len != 0);
2367 2365
2368 2366 pg_sz = share_sz;
2369 2367 lp_npgs = btop(pg_sz);
2370 2368 lp_addr = sptaddr;
2371 2369 e_sptaddr = sptaddr + len;
2372 2370 an_idx = seg_page(sptseg, sptaddr);
2373 2371 ppa_idx = 0;
2374 2372
2375 2373 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2376 2374
2377 2375 amp_pgs = page_get_pagecnt(amp->a_szc);
2378 2376
2379 2377 /*CONSTCOND*/
2380 2378 while (1) {
2381 2379 for (; lp_addr < e_sptaddr;
2382 2380 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2383 2381
2384 2382 /*
2385 2383 * If we're currently locked, and we get to a new
2386 2384 * page, unlock our current anon chunk.
2387 2385 */
2388 2386 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2389 2387 anon_array_exit(&cookie);
2390 2388 anon_locked = 0;
2391 2389 }
2392 2390 if (!anon_locked) {
2393 2391 anon_array_enter(amp, an_idx, &cookie);
2394 2392 anon_locked = 1;
2395 2393 }
2396 2394 ppa_szc = (uint_t)-1;
2397 2395 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2398 2396 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2399 2397 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2400 2398
2401 2399 if (ierr != 0) {
2402 2400 if (ierr > 0) {
2403 2401 err = FC_MAKE_ERR(ierr);
2404 2402 goto lpgs_err;
2405 2403 }
2406 2404 break;
2407 2405 }
2408 2406 }
2409 2407 if (lp_addr == e_sptaddr) {
2410 2408 break;
2411 2409 }
2412 2410 ASSERT(lp_addr < e_sptaddr);
2413 2411
2414 2412 /*
2415 2413 * ierr == -1 means we failed to allocate a large page.
2416 2414 * so do a size down operation.
2417 2415 *
2418 2416 * ierr == -2 means some other process that privately shares
2419 2417 * pages with this process has allocated a larger page and we
2420 2418 * need to retry with larger pages. So do a size up
2421 2419 * operation. This relies on the fact that large pages are
2422 2420 * never partially shared i.e. if we share any constituent
2423 2421 * page of a large page with another process we must share the
2424 2422 * entire large page. Note this cannot happen for SOFTLOCK
2425 2423 * case, unless current address (lpaddr) is at the beginning
2426 2424 * of the next page size boundary because the other process
2427 2425 * couldn't have relocated locked pages.
2428 2426 */
2429 2427 ASSERT(ierr == -1 || ierr == -2);
2430 2428 if (segvn_anypgsz) {
2431 2429 ASSERT(ierr == -2 || szc != 0);
2432 2430 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2433 2431 szc = (ierr == -1) ? szc - 1 : szc + 1;
2434 2432 } else {
2435 2433 /*
2436 2434 * For faults and segvn_anypgsz == 0
2437 2435 * we need to be careful not to loop forever
2438 2436 * if existing page is found with szc other
2439 2437 * than 0 or seg->s_szc. This could be due
2440 2438 * to page relocations on behalf of DR or
2441 2439 * more likely large page creation. For this
2442 2440 * case simply re-size to existing page's szc
2443 2441 * if returned by anon_map_getpages().
2444 2442 */
2445 2443 if (ppa_szc == (uint_t)-1) {
2446 2444 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2447 2445 } else {
2448 2446 ASSERT(ppa_szc <= sptseg->s_szc);
2449 2447 ASSERT(ierr == -2 || ppa_szc < szc);
2450 2448 ASSERT(ierr == -1 || ppa_szc > szc);
2451 2449 szc = ppa_szc;
2452 2450 }
2453 2451 }
2454 2452 pg_sz = page_get_pagesize(szc);
2455 2453 lp_npgs = btop(pg_sz);
2456 2454 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2457 2455 }
2458 2456 if (anon_locked) {
2459 2457 anon_array_exit(&cookie);
2460 2458 }
2461 2459 ANON_LOCK_EXIT(&->a_rwlock);
2462 2460 return (0);
2463 2461
2464 2462 lpgs_err:
2465 2463 if (anon_locked) {
2466 2464 anon_array_exit(&cookie);
2467 2465 }
2468 2466 ANON_LOCK_EXIT(&->a_rwlock);
2469 2467 for (j = 0; j < ppa_idx; j++)
2470 2468 page_unlock(ppa[j]);
2471 2469 return (err);
2472 2470 }
2473 2471
2474 2472 /*
2475 2473 * count the number of bytes in a set of spt pages that are currently not
2476 2474 * locked
2477 2475 */
2478 2476 static rctl_qty_t
2479 2477 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2480 2478 {
2481 2479 ulong_t i;
2482 2480 rctl_qty_t unlocked = 0;
2483 2481
2484 2482 for (i = 0; i < npages; i++) {
2485 2483 if (ppa[i]->p_lckcnt == 0)
2486 2484 unlocked += PAGESIZE;
2487 2485 }
2488 2486 return (unlocked);
2489 2487 }
2490 2488
2491 2489 extern u_longlong_t randtick(void);
2492 2490 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2493 2491 #define NLCK (NCPU_P2)
2494 2492 /* Random number with a range [0, n-1], n must be power of two */
2495 2493 #define RAND_P2(n) \
2496 2494 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2497 2495
2498 2496 int
2499 2497 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2500 2498 page_t **ppa, ulong_t *lockmap, size_t pos,
2501 2499 rctl_qty_t *locked)
2502 2500 {
2503 2501 struct shm_data *shmd = seg->s_data;
2504 2502 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2505 2503 ulong_t i;
2506 2504 int kernel;
2507 2505 pgcnt_t nlck = 0;
2508 2506 int rv = 0;
2509 2507 int use_reserved = 1;
2510 2508
2511 2509 /* return the number of bytes actually locked */
2512 2510 *locked = 0;
2513 2511
2514 2512 /*
2515 2513 * To avoid contention on freemem_lock, availrmem and pages_locked
2516 2514 * global counters are updated only every nlck locked pages instead of
2517 2515 * every time. Reserve nlck locks up front and deduct from this
2518 2516 * reservation for each page that requires a lock. When the reservation
2519 2517 * is consumed, reserve again. nlck is randomized, so the competing
2520 2518 * threads do not fall into a cyclic lock contention pattern. When
2521 2519 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2522 2520 * is used to lock pages.
2523 2521 */
2524 2522 for (i = 0; i < npages; anon_index++, pos++, i++) {
2525 2523 if (nlck == 0 && use_reserved == 1) {
2526 2524 nlck = NLCK + RAND_P2(NLCK);
2527 2525 /* if fewer loops left, decrease nlck */
2528 2526 nlck = MIN(nlck, npages - i);
2529 2527 /*
2530 2528 * Reserve nlck locks up front and deduct from this
2531 2529 * reservation for each page that requires a lock. When
2532 2530 * the reservation is consumed, reserve again.
2533 2531 */
2534 2532 mutex_enter(&freemem_lock);
2535 2533 if ((availrmem - nlck) < pages_pp_maximum) {
2536 2534 /* Do not do advance memory reserves */
2537 2535 use_reserved = 0;
2538 2536 } else {
2539 2537 availrmem -= nlck;
2540 2538 pages_locked += nlck;
2541 2539 }
2542 2540 mutex_exit(&freemem_lock);
2543 2541 }
2544 2542 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2545 2543 if (sptd->spt_ppa_lckcnt[anon_index] <
2546 2544 (ushort_t)DISM_LOCK_MAX) {
2547 2545 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2548 2546 (ushort_t)DISM_LOCK_MAX) {
2549 2547 cmn_err(CE_WARN,
2550 2548 "DISM page lock limit "
2551 2549 "reached on DISM offset 0x%lx\n",
2552 2550 anon_index << PAGESHIFT);
2553 2551 }
2554 2552 kernel = (sptd->spt_ppa &&
2555 2553 sptd->spt_ppa[anon_index]);
2556 2554 if (!page_pp_lock(ppa[i], 0, kernel ||
2557 2555 use_reserved)) {
2558 2556 sptd->spt_ppa_lckcnt[anon_index]--;
2559 2557 rv = EAGAIN;
2560 2558 break;
2561 2559 }
2562 2560 /* if this is a newly locked page, count it */
2563 2561 if (ppa[i]->p_lckcnt == 1) {
2564 2562 if (kernel == 0 && use_reserved == 1)
2565 2563 nlck--;
2566 2564 *locked += PAGESIZE;
2567 2565 }
2568 2566 shmd->shm_lckpgs++;
2569 2567 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2570 2568 if (lockmap != NULL)
2571 2569 BT_SET(lockmap, pos);
2572 2570 }
2573 2571 }
2574 2572 }
2575 2573 /* Return unused lock reservation */
2576 2574 if (nlck != 0 && use_reserved == 1) {
2577 2575 mutex_enter(&freemem_lock);
2578 2576 availrmem += nlck;
2579 2577 pages_locked -= nlck;
2580 2578 mutex_exit(&freemem_lock);
2581 2579 }
2582 2580
2583 2581 return (rv);
2584 2582 }
2585 2583
2586 2584 int
2587 2585 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2588 2586 rctl_qty_t *unlocked)
2589 2587 {
2590 2588 struct shm_data *shmd = seg->s_data;
2591 2589 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2592 2590 struct anon_map *amp = sptd->spt_amp;
2593 2591 struct anon *ap;
2594 2592 struct vnode *vp;
2595 2593 u_offset_t off;
2596 2594 struct page *pp;
2597 2595 int kernel;
2598 2596 anon_sync_obj_t cookie;
2599 2597 ulong_t i;
2600 2598 pgcnt_t nlck = 0;
2601 2599 pgcnt_t nlck_limit = NLCK;
2602 2600
2603 2601 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2604 2602 for (i = 0; i < npages; i++, anon_index++) {
2605 2603 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2606 2604 anon_array_enter(amp, anon_index, &cookie);
2607 2605 ap = anon_get_ptr(amp->ahp, anon_index);
2608 2606 ASSERT(ap);
2609 2607
2610 2608 swap_xlate(ap, &vp, &off);
2611 2609 anon_array_exit(&cookie);
2612 2610 pp = page_lookup(vp, off, SE_SHARED);
2613 2611 ASSERT(pp);
2614 2612 /*
2615 2613 * availrmem is decremented only for pages which are not
2616 2614 * in seg pcache, for pages in seg pcache availrmem was
2617 2615 * decremented in _dismpagelock()
2618 2616 */
2619 2617 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2620 2618 ASSERT(pp->p_lckcnt > 0);
2621 2619
2622 2620 /*
2623 2621 * lock page but do not change availrmem, we do it
2624 2622 * ourselves every nlck loops.
2625 2623 */
2626 2624 page_pp_unlock(pp, 0, 1);
2627 2625 if (pp->p_lckcnt == 0) {
2628 2626 if (kernel == 0)
2629 2627 nlck++;
2630 2628 *unlocked += PAGESIZE;
2631 2629 }
2632 2630 page_unlock(pp);
2633 2631 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2634 2632 sptd->spt_ppa_lckcnt[anon_index]--;
2635 2633 shmd->shm_lckpgs--;
2636 2634 }
2637 2635
2638 2636 /*
2639 2637 * To reduce freemem_lock contention, do not update availrmem
2640 2638 * until at least NLCK pages have been unlocked.
2641 2639 * 1. No need to update if nlck is zero
2642 2640 * 2. Always update if the last iteration
2643 2641 */
2644 2642 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2645 2643 mutex_enter(&freemem_lock);
2646 2644 availrmem += nlck;
2647 2645 pages_locked -= nlck;
2648 2646 mutex_exit(&freemem_lock);
2649 2647 nlck = 0;
2650 2648 nlck_limit = NLCK + RAND_P2(NLCK);
2651 2649 }
2652 2650 }
2653 2651 ANON_LOCK_EXIT(&->a_rwlock);
2654 2652
2655 2653 return (0);
2656 2654 }
2657 2655
2658 2656 /*ARGSUSED*/
2659 2657 static int
2660 2658 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2661 2659 int attr, int op, ulong_t *lockmap, size_t pos)
2662 2660 {
2663 2661 struct shm_data *shmd = seg->s_data;
2664 2662 struct seg *sptseg = shmd->shm_sptseg;
2665 2663 struct spt_data *sptd = sptseg->s_data;
2666 2664 struct kshmid *sp = sptd->spt_amp->a_sp;
2667 2665 pgcnt_t npages, a_npages;
2668 2666 page_t **ppa;
2669 2667 pgcnt_t an_idx, a_an_idx, ppa_idx;
2670 2668 caddr_t spt_addr, a_addr; /* spt and aligned address */
2671 2669 size_t a_len; /* aligned len */
2672 2670 size_t share_sz;
2673 2671 ulong_t i;
2674 2672 int sts = 0;
2675 2673 rctl_qty_t unlocked = 0;
2676 2674 rctl_qty_t locked = 0;
2677 2675 struct proc *p = curproc;
2678 2676 kproject_t *proj;
2679 2677
2680 2678 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2681 2679 ASSERT(sp != NULL);
2682 2680
2683 2681 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2684 2682 return (0);
2685 2683 }
2686 2684
2687 2685 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2688 2686 an_idx = seg_page(seg, addr);
2689 2687 npages = btopr(len);
2690 2688
2691 2689 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2692 2690 return (ENOMEM);
2693 2691 }
2694 2692
2695 2693 /*
2696 2694 * A shm's project never changes, so no lock needed.
2697 2695 * The shm has a hold on the project, so it will not go away.
2698 2696 * Since we have a mapping to shm within this zone, we know
2699 2697 * that the zone will not go away.
2700 2698 */
2701 2699 proj = sp->shm_perm.ipc_proj;
2702 2700
2703 2701 if (op == MC_LOCK) {
2704 2702
2705 2703 /*
2706 2704 * Need to align addr and size request if they are not
2707 2705 * aligned so we can always allocate large page(s) however
2708 2706 * we only lock what was requested in initial request.
2709 2707 */
2710 2708 share_sz = page_get_pagesize(sptseg->s_szc);
2711 2709 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2712 2710 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2713 2711 share_sz);
2714 2712 a_npages = btop(a_len);
2715 2713 a_an_idx = seg_page(seg, a_addr);
2716 2714 spt_addr = sptseg->s_base + ptob(a_an_idx);
2717 2715 ppa_idx = an_idx - a_an_idx;
2718 2716
2719 2717 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2720 2718 KM_NOSLEEP)) == NULL) {
2721 2719 return (ENOMEM);
2722 2720 }
2723 2721
2724 2722 /*
2725 2723 * Don't cache any new pages for IO and
2726 2724 * flush any cached pages.
2727 2725 */
2728 2726 mutex_enter(&sptd->spt_lock);
2729 2727 if (sptd->spt_ppa != NULL)
2730 2728 sptd->spt_flags |= DISM_PPA_CHANGED;
2731 2729
2732 2730 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2733 2731 if (sts != 0) {
2734 2732 mutex_exit(&sptd->spt_lock);
2735 2733 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2736 2734 return (sts);
2737 2735 }
2738 2736
2739 2737 mutex_enter(&sp->shm_mlock);
2740 2738 /* enforce locked memory rctl */
2741 2739 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2742 2740
2743 2741 mutex_enter(&p->p_lock);
2744 2742 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2745 2743 mutex_exit(&p->p_lock);
2746 2744 sts = EAGAIN;
2747 2745 } else {
2748 2746 mutex_exit(&p->p_lock);
2749 2747 sts = spt_lockpages(seg, an_idx, npages,
2750 2748 &ppa[ppa_idx], lockmap, pos, &locked);
2751 2749
2752 2750 /*
2753 2751 * correct locked count if not all pages could be
2754 2752 * locked
2755 2753 */
2756 2754 if ((unlocked - locked) > 0) {
2757 2755 rctl_decr_locked_mem(NULL, proj,
2758 2756 (unlocked - locked), 0);
2759 2757 }
2760 2758 }
2761 2759 /*
2762 2760 * unlock pages
2763 2761 */
2764 2762 for (i = 0; i < a_npages; i++)
2765 2763 page_unlock(ppa[i]);
2766 2764 if (sptd->spt_ppa != NULL)
2767 2765 sptd->spt_flags |= DISM_PPA_CHANGED;
2768 2766 mutex_exit(&sp->shm_mlock);
2769 2767 mutex_exit(&sptd->spt_lock);
2770 2768
2771 2769 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2772 2770
2773 2771 } else if (op == MC_UNLOCK) { /* unlock */
2774 2772 page_t **ppa;
2775 2773
2776 2774 mutex_enter(&sptd->spt_lock);
2777 2775 if (shmd->shm_lckpgs == 0) {
2778 2776 mutex_exit(&sptd->spt_lock);
2779 2777 return (0);
2780 2778 }
2781 2779 /*
2782 2780 * Don't cache new IO pages.
2783 2781 */
2784 2782 if (sptd->spt_ppa != NULL)
2785 2783 sptd->spt_flags |= DISM_PPA_CHANGED;
2786 2784
2787 2785 mutex_enter(&sp->shm_mlock);
2788 2786 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2789 2787 if ((ppa = sptd->spt_ppa) != NULL)
2790 2788 sptd->spt_flags |= DISM_PPA_CHANGED;
2791 2789 mutex_exit(&sptd->spt_lock);
2792 2790
2793 2791 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2794 2792 mutex_exit(&sp->shm_mlock);
2795 2793
2796 2794 if (ppa != NULL)
2797 2795 seg_ppurge_wiredpp(ppa);
2798 2796 }
2799 2797 return (sts);
2800 2798 }
2801 2799
2802 2800 /*ARGSUSED*/
2803 2801 int
2804 2802 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2805 2803 {
2806 2804 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2807 2805 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2808 2806 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2809 2807
2810 2808 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2811 2809
2812 2810 /*
2813 2811 * ISM segment is always rw.
2814 2812 */
2815 2813 while (--pgno >= 0)
2816 2814 *protv++ = sptd->spt_prot;
2817 2815 return (0);
2818 2816 }
2819 2817
2820 2818 /*ARGSUSED*/
2821 2819 u_offset_t
2822 2820 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2823 2821 {
2824 2822 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2825 2823
2826 2824 /* Offset does not matter in ISM memory */
2827 2825
2828 2826 return ((u_offset_t)0);
2829 2827 }
2830 2828
2831 2829 /* ARGSUSED */
2832 2830 int
2833 2831 segspt_shmgettype(struct seg *seg, caddr_t addr)
2834 2832 {
2835 2833 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2836 2834 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2837 2835
2838 2836 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2839 2837
2840 2838 /*
2841 2839 * The shared memory mapping is always MAP_SHARED, SWAP is only
2842 2840 * reserved for DISM
2843 2841 */
2844 2842 return (MAP_SHARED |
2845 2843 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2846 2844 }
2847 2845
2848 2846 /*ARGSUSED*/
2849 2847 int
2850 2848 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2851 2849 {
2852 2850 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2853 2851 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2854 2852
2855 2853 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2856 2854
2857 2855 *vpp = sptd->spt_vp;
2858 2856 return (0);
2859 2857 }
2860 2858
2861 2859 /*
2862 2860 * We need to wait for pending IO to complete to a DISM segment in order for
2863 2861 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2864 2862 * than enough time to wait.
2865 2863 */
2866 2864 static clock_t spt_pcache_wait = 120;
2867 2865
2868 2866 /*ARGSUSED*/
2869 2867 static int
2870 2868 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2871 2869 {
2872 2870 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2873 2871 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2874 2872 struct anon_map *amp;
2875 2873 pgcnt_t pg_idx;
2876 2874 ushort_t gen;
2877 2875 clock_t end_lbolt;
2878 2876 int writer;
2879 2877 page_t **ppa;
2880 2878
2881 2879 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2882 2880
2883 2881 if (behav == MADV_FREE) {
2884 2882 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2885 2883 return (0);
2886 2884
2887 2885 amp = sptd->spt_amp;
2888 2886 pg_idx = seg_page(seg, addr);
2889 2887
2890 2888 mutex_enter(&sptd->spt_lock);
2891 2889 if ((ppa = sptd->spt_ppa) == NULL) {
2892 2890 mutex_exit(&sptd->spt_lock);
2893 2891 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2894 2892 anon_disclaim(amp, pg_idx, len);
2895 2893 ANON_LOCK_EXIT(&->a_rwlock);
2896 2894 return (0);
2897 2895 }
2898 2896
2899 2897 sptd->spt_flags |= DISM_PPA_CHANGED;
2900 2898 gen = sptd->spt_gen;
2901 2899
2902 2900 mutex_exit(&sptd->spt_lock);
2903 2901
2904 2902 /*
2905 2903 * Purge all DISM cached pages
2906 2904 */
2907 2905 seg_ppurge_wiredpp(ppa);
2908 2906
2909 2907 /*
2910 2908 * Drop the AS_LOCK so that other threads can grab it
2911 2909 * in the as_pageunlock path and hopefully get the segment
2912 2910 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2913 2911 * to keep this segment resident.
2914 2912 */
2915 2913 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2916 2914 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2917 2915 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2918 2916
2919 2917 mutex_enter(&sptd->spt_lock);
2920 2918
2921 2919 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2922 2920
2923 2921 /*
2924 2922 * Try to wait for pages to get kicked out of the seg_pcache.
2925 2923 */
2926 2924 while (sptd->spt_gen == gen &&
2927 2925 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2928 2926 ddi_get_lbolt() < end_lbolt) {
2929 2927 if (!cv_timedwait_sig(&sptd->spt_cv,
2930 2928 &sptd->spt_lock, end_lbolt)) {
2931 2929 break;
2932 2930 }
2933 2931 }
2934 2932
2935 2933 mutex_exit(&sptd->spt_lock);
2936 2934
2937 2935 /* Regrab the AS_LOCK and release our hold on the segment */
2938 2936 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2939 2937 writer ? RW_WRITER : RW_READER);
2940 2938 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2941 2939 if (shmd->shm_softlockcnt <= 0) {
2942 2940 if (AS_ISUNMAPWAIT(seg->s_as)) {
2943 2941 mutex_enter(&seg->s_as->a_contents);
2944 2942 if (AS_ISUNMAPWAIT(seg->s_as)) {
2945 2943 AS_CLRUNMAPWAIT(seg->s_as);
2946 2944 cv_broadcast(&seg->s_as->a_cv);
2947 2945 }
2948 2946 mutex_exit(&seg->s_as->a_contents);
2949 2947 }
2950 2948 }
2951 2949
2952 2950 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2953 2951 anon_disclaim(amp, pg_idx, len);
2954 2952 ANON_LOCK_EXIT(&->a_rwlock);
2955 2953 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2956 2954 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2957 2955 int already_set;
2958 2956 ulong_t anon_index;
2959 2957 lgrp_mem_policy_t policy;
2960 2958 caddr_t shm_addr;
2961 2959 size_t share_size;
2962 2960 size_t size;
2963 2961 struct seg *sptseg = shmd->shm_sptseg;
2964 2962 caddr_t sptseg_addr;
2965 2963
2966 2964 /*
2967 2965 * Align address and length to page size of underlying segment
2968 2966 */
2969 2967 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2970 2968 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2971 2969 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2972 2970 share_size);
2973 2971
2974 2972 amp = shmd->shm_amp;
2975 2973 anon_index = seg_page(seg, shm_addr);
2976 2974
2977 2975 /*
2978 2976 * And now we may have to adjust size downward if we have
2979 2977 * exceeded the realsize of the segment or initial anon
2980 2978 * allocations.
2981 2979 */
2982 2980 sptseg_addr = sptseg->s_base + ptob(anon_index);
2983 2981 if ((sptseg_addr + size) >
2984 2982 (sptseg->s_base + sptd->spt_realsize))
2985 2983 size = (sptseg->s_base + sptd->spt_realsize) -
2986 2984 sptseg_addr;
2987 2985
2988 2986 /*
2989 2987 * Set memory allocation policy for this segment
2990 2988 */
2991 2989 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2992 2990 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2993 2991 NULL, 0, len);
2994 2992
2995 2993 /*
2996 2994 * If random memory allocation policy set already,
2997 2995 * don't bother reapplying it.
2998 2996 */
2999 2997 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
3000 2998 return (0);
3001 2999
3002 3000 /*
3003 3001 * Mark any existing pages in the given range for
3004 3002 * migration, flushing the I/O page cache, and using
3005 3003 * underlying segment to calculate anon index and get
3006 3004 * anonmap and vnode pointer from
3007 3005 */
3008 3006 if (shmd->shm_softlockcnt > 0)
3009 3007 segspt_purge(seg);
3010 3008
3011 3009 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3012 3010 }
3013 3011
3014 3012 return (0);
3015 3013 }
3016 3014
3017 3015 /*ARGSUSED*/
3018 3016 void
3019 3017 segspt_shmdump(struct seg *seg)
3020 3018 {
3021 3019 /* no-op for ISM segment */
3022 3020 }
3023 3021
3024 3022 /*ARGSUSED*/
3025 3023 static faultcode_t
3026 3024 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3027 3025 {
3028 3026 return (ENOTSUP);
3029 3027 }
3030 3028
3031 3029 /*
3032 3030 * get a memory ID for an addr in a given segment
3033 3031 */
3034 3032 static int
3035 3033 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3036 3034 {
3037 3035 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3038 3036 struct anon *ap;
3039 3037 size_t anon_index;
3040 3038 struct anon_map *amp = shmd->shm_amp;
3041 3039 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3042 3040 struct seg *sptseg = shmd->shm_sptseg;
3043 3041 anon_sync_obj_t cookie;
3044 3042
3045 3043 anon_index = seg_page(seg, addr);
3046 3044
3047 3045 if (addr > (seg->s_base + sptd->spt_realsize)) {
3048 3046 return (EFAULT);
3049 3047 }
3050 3048
3051 3049 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3052 3050 anon_array_enter(amp, anon_index, &cookie);
3053 3051 ap = anon_get_ptr(amp->ahp, anon_index);
3054 3052 if (ap == NULL) {
3055 3053 struct page *pp;
3056 3054 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3057 3055
3058 3056 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3059 3057 if (pp == NULL) {
3060 3058 anon_array_exit(&cookie);
3061 3059 ANON_LOCK_EXIT(&->a_rwlock);
3062 3060 return (ENOMEM);
3063 3061 }
3064 3062 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3065 3063 page_unlock(pp);
3066 3064 }
3067 3065 anon_array_exit(&cookie);
3068 3066 ANON_LOCK_EXIT(&->a_rwlock);
3069 3067 memidp->val[0] = (uintptr_t)ap;
3070 3068 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3071 3069 return (0);
3072 3070 }
3073 3071
3074 3072 /*
3075 3073 * Get memory allocation policy info for specified address in given segment
3076 3074 */
3077 3075 static lgrp_mem_policy_info_t *
3078 3076 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3079 3077 {
3080 3078 struct anon_map *amp;
3081 3079 ulong_t anon_index;
3082 3080 lgrp_mem_policy_info_t *policy_info;
3083 3081 struct shm_data *shm_data;
3084 3082
3085 3083 ASSERT(seg != NULL);
3086 3084
3087 3085 /*
3088 3086 * Get anon_map from segshm
3089 3087 *
3090 3088 * Assume that no lock needs to be held on anon_map, since
3091 3089 * it should be protected by its reference count which must be
3092 3090 * nonzero for an existing segment
3093 3091 * Need to grab readers lock on policy tree though
3094 3092 */
3095 3093 shm_data = (struct shm_data *)seg->s_data;
3096 3094 if (shm_data == NULL)
3097 3095 return (NULL);
3098 3096 amp = shm_data->shm_amp;
3099 3097 ASSERT(amp->refcnt != 0);
3100 3098
3101 3099 /*
3102 3100 * Get policy info
3103 3101 *
3104 3102 * Assume starting anon index of 0
3105 3103 */
3106 3104 anon_index = seg_page(seg, addr);
3107 3105 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3108 3106
3109 3107 return (policy_info);
3110 3108 }
3111 3109
3112 3110 /*ARGSUSED*/
3113 3111 static int
3114 3112 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3115 3113 {
3116 3114 return (0);
3117 3115 }
↓ open down ↓ |
2935 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX