Print this page
6149 use NULL capable segop as a shorthand for no-capabilities
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
87 87
88 88 struct seg_ops segspt_ops = {
89 89 .dup = SEGSPT_BADOP(int),
90 90 .unmap = segspt_unmap,
91 91 .free = segspt_free,
92 92 .fault = SEGSPT_BADOP(int),
93 93 .faulta = SEGSPT_BADOP(faultcode_t),
94 94 .setprot = SEGSPT_BADOP(int),
95 95 .checkprot = SEGSPT_BADOP(int),
96 96 .kluster = SEGSPT_BADOP(int),
97 97 .swapout = SEGSPT_BADOP(size_t),
98 98 .sync = SEGSPT_BADOP(int),
99 99 .incore = SEGSPT_BADOP(size_t),
100 100 .lockop = SEGSPT_BADOP(int),
101 101 .getprot = SEGSPT_BADOP(int),
102 102 .getoffset = SEGSPT_BADOP(u_offset_t),
103 103 .gettype = SEGSPT_BADOP(int),
104 104 .getvp = SEGSPT_BADOP(int),
105 105 .advise = SEGSPT_BADOP(int),
106 106 .dump = SEGSPT_BADOP(void),
107 107 .pagelock = SEGSPT_BADOP(int),
108 108 .setpagesize = SEGSPT_BADOP(int),
109 109 .getmemid = SEGSPT_BADOP(int),
110 110 .getpolicy = segspt_getpolicy,
111 111 .capable = SEGSPT_BADOP(int),
112 112 };
113 113
114 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
115 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
116 116 static void segspt_shmfree(struct seg *seg);
117 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
118 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
119 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 121 register size_t len, register uint_t prot);
122 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 123 uint_t prot);
124 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 125 static size_t segspt_shmswapout(struct seg *seg);
126 126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 127 register char *vec);
128 128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 129 int attr, uint_t flags);
130 130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 131 int attr, int op, ulong_t *lockmap, size_t pos);
132 132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 133 uint_t *protv);
134 134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
135 135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 138 uint_t behav);
139 139 static void segspt_shmdump(struct seg *seg);
140 140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
141 141 struct page ***, enum lock_type, enum seg_rw);
142 142 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
143 143 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
144 144 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
145 -static int segspt_shmcapable(struct seg *, segcapability_t);
146 145
147 146 struct seg_ops segspt_shmops = {
148 147 .dup = segspt_shmdup,
149 148 .unmap = segspt_shmunmap,
150 149 .free = segspt_shmfree,
151 150 .fault = segspt_shmfault,
152 151 .faulta = segspt_shmfaulta,
153 152 .setprot = segspt_shmsetprot,
154 153 .checkprot = segspt_shmcheckprot,
155 154 .kluster = segspt_shmkluster,
156 155 .swapout = segspt_shmswapout,
157 156 .sync = segspt_shmsync,
158 157 .incore = segspt_shmincore,
159 158 .lockop = segspt_shmlockop,
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
160 159 .getprot = segspt_shmgetprot,
161 160 .getoffset = segspt_shmgetoffset,
162 161 .gettype = segspt_shmgettype,
163 162 .getvp = segspt_shmgetvp,
164 163 .advise = segspt_shmadvise,
165 164 .dump = segspt_shmdump,
166 165 .pagelock = segspt_shmpagelock,
167 166 .setpagesize = segspt_shmsetpgsz,
168 167 .getmemid = segspt_shmgetmemid,
169 168 .getpolicy = segspt_shmgetpolicy,
170 - .capable = segspt_shmcapable,
171 169 };
172 170
173 171 static void segspt_purge(struct seg *seg);
174 172 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
175 173 enum seg_rw, int);
176 174 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
177 175 page_t **ppa);
178 176
179 177
180 178
181 179 /*ARGSUSED*/
182 180 int
183 181 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
184 182 uint_t prot, uint_t flags, uint_t share_szc)
185 183 {
186 184 int err;
187 185 struct as *newas;
188 186 struct segspt_crargs sptcargs;
189 187
190 188 #ifdef DEBUG
191 189 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
192 190 tnf_ulong, size, size );
193 191 #endif
194 192 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
195 193 segspt_minfree = availrmem/20; /* for the system */
196 194
197 195 if (!hat_supported(HAT_SHARED_PT, (void *)0))
198 196 return (EINVAL);
199 197
200 198 /*
201 199 * get a new as for this shared memory segment
202 200 */
203 201 newas = as_alloc();
204 202 newas->a_proc = NULL;
205 203 sptcargs.amp = amp;
206 204 sptcargs.prot = prot;
207 205 sptcargs.flags = flags;
208 206 sptcargs.szc = share_szc;
209 207 /*
210 208 * create a shared page table (spt) segment
211 209 */
212 210
213 211 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
214 212 as_free(newas);
215 213 return (err);
216 214 }
217 215 *sptseg = sptcargs.seg_spt;
218 216 return (0);
219 217 }
220 218
221 219 void
222 220 sptdestroy(struct as *as, struct anon_map *amp)
223 221 {
224 222
225 223 #ifdef DEBUG
226 224 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
227 225 #endif
228 226 (void) as_unmap(as, SEGSPTADDR, amp->size);
229 227 as_free(as);
230 228 }
231 229
232 230 /*
233 231 * called from seg_free().
234 232 * free (i.e., unlock, unmap, return to free list)
235 233 * all the pages in the given seg.
236 234 */
237 235 void
238 236 segspt_free(struct seg *seg)
239 237 {
240 238 struct spt_data *sptd = (struct spt_data *)seg->s_data;
241 239
242 240 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
243 241
244 242 if (sptd != NULL) {
245 243 if (sptd->spt_realsize)
246 244 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
247 245
248 246 if (sptd->spt_ppa_lckcnt)
249 247 kmem_free(sptd->spt_ppa_lckcnt,
250 248 sizeof (*sptd->spt_ppa_lckcnt)
251 249 * btopr(sptd->spt_amp->size));
252 250 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
253 251 cv_destroy(&sptd->spt_cv);
254 252 mutex_destroy(&sptd->spt_lock);
255 253 kmem_free(sptd, sizeof (*sptd));
256 254 }
257 255 }
258 256
259 257 /*ARGSUSED*/
260 258 static int
261 259 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
262 260 uint_t flags)
263 261 {
264 262 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
265 263
266 264 return (0);
267 265 }
268 266
269 267 /*ARGSUSED*/
270 268 static size_t
271 269 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
272 270 {
273 271 caddr_t eo_seg;
274 272 pgcnt_t npages;
275 273 struct shm_data *shmd = (struct shm_data *)seg->s_data;
276 274 struct seg *sptseg;
277 275 struct spt_data *sptd;
278 276
279 277 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
280 278 #ifdef lint
281 279 seg = seg;
282 280 #endif
283 281 sptseg = shmd->shm_sptseg;
284 282 sptd = sptseg->s_data;
285 283
286 284 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
287 285 eo_seg = addr + len;
288 286 while (addr < eo_seg) {
289 287 /* page exists, and it's locked. */
290 288 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
291 289 SEG_PAGE_ANON;
292 290 addr += PAGESIZE;
293 291 }
294 292 return (len);
295 293 } else {
296 294 struct anon_map *amp = shmd->shm_amp;
297 295 struct anon *ap;
298 296 page_t *pp;
299 297 pgcnt_t anon_index;
300 298 struct vnode *vp;
301 299 u_offset_t off;
302 300 ulong_t i;
303 301 int ret;
304 302 anon_sync_obj_t cookie;
305 303
306 304 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
307 305 anon_index = seg_page(seg, addr);
308 306 npages = btopr(len);
309 307 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
310 308 return (EINVAL);
311 309 }
312 310 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
313 311 for (i = 0; i < npages; i++, anon_index++) {
314 312 ret = 0;
315 313 anon_array_enter(amp, anon_index, &cookie);
316 314 ap = anon_get_ptr(amp->ahp, anon_index);
317 315 if (ap != NULL) {
318 316 swap_xlate(ap, &vp, &off);
319 317 anon_array_exit(&cookie);
320 318 pp = page_lookup_nowait(vp, off, SE_SHARED);
321 319 if (pp != NULL) {
322 320 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
323 321 page_unlock(pp);
324 322 }
325 323 } else {
326 324 anon_array_exit(&cookie);
327 325 }
328 326 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
329 327 ret |= SEG_PAGE_LOCKED;
330 328 }
331 329 *vec++ = (char)ret;
332 330 }
333 331 ANON_LOCK_EXIT(&->a_rwlock);
334 332 return (len);
335 333 }
336 334 }
337 335
338 336 static int
339 337 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
340 338 {
341 339 size_t share_size;
342 340
343 341 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
344 342
345 343 /*
346 344 * seg.s_size may have been rounded up to the largest page size
347 345 * in shmat().
348 346 * XXX This should be cleanedup. sptdestroy should take a length
349 347 * argument which should be the same as sptcreate. Then
350 348 * this rounding would not be needed (or is done in shm.c)
351 349 * Only the check for full segment will be needed.
352 350 *
353 351 * XXX -- shouldn't raddr == 0 always? These tests don't seem
354 352 * to be useful at all.
355 353 */
356 354 share_size = page_get_pagesize(seg->s_szc);
357 355 ssize = P2ROUNDUP(ssize, share_size);
358 356
359 357 if (raddr == seg->s_base && ssize == seg->s_size) {
360 358 seg_free(seg);
361 359 return (0);
362 360 } else
363 361 return (EINVAL);
364 362 }
365 363
366 364 int
367 365 segspt_create(struct seg *seg, caddr_t argsp)
368 366 {
369 367 int err;
370 368 caddr_t addr = seg->s_base;
371 369 struct spt_data *sptd;
372 370 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
373 371 struct anon_map *amp = sptcargs->amp;
374 372 struct kshmid *sp = amp->a_sp;
375 373 struct cred *cred = CRED();
376 374 ulong_t i, j, anon_index = 0;
377 375 pgcnt_t npages = btopr(amp->size);
378 376 struct vnode *vp;
379 377 page_t **ppa;
380 378 uint_t hat_flags;
381 379 size_t pgsz;
382 380 pgcnt_t pgcnt;
383 381 caddr_t a;
384 382 pgcnt_t pidx;
385 383 size_t sz;
386 384 proc_t *procp = curproc;
387 385 rctl_qty_t lockedbytes = 0;
388 386 kproject_t *proj;
389 387
390 388 /*
391 389 * We are holding the a_lock on the underlying dummy as,
392 390 * so we can make calls to the HAT layer.
393 391 */
394 392 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
395 393 ASSERT(sp != NULL);
396 394
397 395 #ifdef DEBUG
398 396 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
399 397 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
400 398 #endif
401 399 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
402 400 if (err = anon_swap_adjust(npages))
403 401 return (err);
404 402 }
405 403 err = ENOMEM;
406 404
407 405 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
408 406 goto out1;
409 407
410 408 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
411 409 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
412 410 KM_NOSLEEP)) == NULL)
413 411 goto out2;
414 412 }
415 413
416 414 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
417 415
418 416 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
419 417 goto out3;
420 418
421 419 seg->s_ops = &segspt_ops;
422 420 sptd->spt_vp = vp;
423 421 sptd->spt_amp = amp;
424 422 sptd->spt_prot = sptcargs->prot;
425 423 sptd->spt_flags = sptcargs->flags;
426 424 seg->s_data = (caddr_t)sptd;
427 425 sptd->spt_ppa = NULL;
428 426 sptd->spt_ppa_lckcnt = NULL;
429 427 seg->s_szc = sptcargs->szc;
430 428 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
431 429 sptd->spt_gen = 0;
432 430
433 431 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
434 432 if (seg->s_szc > amp->a_szc) {
435 433 amp->a_szc = seg->s_szc;
436 434 }
437 435 ANON_LOCK_EXIT(&->a_rwlock);
438 436
439 437 /*
440 438 * Set policy to affect initial allocation of pages in
441 439 * anon_map_createpages()
442 440 */
443 441 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
444 442 NULL, 0, ptob(npages));
445 443
446 444 if (sptcargs->flags & SHM_PAGEABLE) {
447 445 size_t share_sz;
448 446 pgcnt_t new_npgs, more_pgs;
449 447 struct anon_hdr *nahp;
450 448 zone_t *zone;
451 449
452 450 share_sz = page_get_pagesize(seg->s_szc);
453 451 if (!IS_P2ALIGNED(amp->size, share_sz)) {
454 452 /*
455 453 * We are rounding up the size of the anon array
456 454 * on 4 M boundary because we always create 4 M
457 455 * of page(s) when locking, faulting pages and we
458 456 * don't have to check for all corner cases e.g.
459 457 * if there is enough space to allocate 4 M
460 458 * page.
461 459 */
462 460 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
463 461 more_pgs = new_npgs - npages;
464 462
465 463 /*
466 464 * The zone will never be NULL, as a fully created
467 465 * shm always has an owning zone.
468 466 */
469 467 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
470 468 ASSERT(zone != NULL);
471 469 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
472 470 err = ENOMEM;
473 471 goto out4;
474 472 }
475 473
476 474 nahp = anon_create(new_npgs, ANON_SLEEP);
477 475 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
478 476 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
479 477 ANON_SLEEP);
480 478 anon_release(amp->ahp, npages);
481 479 amp->ahp = nahp;
482 480 ASSERT(amp->swresv == ptob(npages));
483 481 amp->swresv = amp->size = ptob(new_npgs);
484 482 ANON_LOCK_EXIT(&->a_rwlock);
485 483 npages = new_npgs;
486 484 }
487 485
488 486 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
489 487 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
490 488 sptd->spt_pcachecnt = 0;
491 489 sptd->spt_realsize = ptob(npages);
492 490 sptcargs->seg_spt = seg;
493 491 return (0);
494 492 }
495 493
496 494 /*
497 495 * get array of pages for each anon slot in amp
498 496 */
499 497 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
500 498 seg, addr, S_CREATE, cred)) != 0)
501 499 goto out4;
502 500
503 501 mutex_enter(&sp->shm_mlock);
504 502
505 503 /* May be partially locked, so, count bytes to charge for locking */
506 504 for (i = 0; i < npages; i++)
507 505 if (ppa[i]->p_lckcnt == 0)
508 506 lockedbytes += PAGESIZE;
509 507
510 508 proj = sp->shm_perm.ipc_proj;
511 509
512 510 if (lockedbytes > 0) {
513 511 mutex_enter(&procp->p_lock);
514 512 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
515 513 mutex_exit(&procp->p_lock);
516 514 mutex_exit(&sp->shm_mlock);
517 515 for (i = 0; i < npages; i++)
518 516 page_unlock(ppa[i]);
519 517 err = ENOMEM;
520 518 goto out4;
521 519 }
522 520 mutex_exit(&procp->p_lock);
523 521 }
524 522
525 523 /*
526 524 * addr is initial address corresponding to the first page on ppa list
527 525 */
528 526 for (i = 0; i < npages; i++) {
529 527 /* attempt to lock all pages */
530 528 if (page_pp_lock(ppa[i], 0, 1) == 0) {
531 529 /*
532 530 * if unable to lock any page, unlock all
533 531 * of them and return error
534 532 */
535 533 for (j = 0; j < i; j++)
536 534 page_pp_unlock(ppa[j], 0, 1);
537 535 for (i = 0; i < npages; i++)
538 536 page_unlock(ppa[i]);
539 537 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
540 538 mutex_exit(&sp->shm_mlock);
541 539 err = ENOMEM;
542 540 goto out4;
543 541 }
544 542 }
545 543 mutex_exit(&sp->shm_mlock);
546 544
547 545 /*
548 546 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
549 547 * for the entire life of the segment. For example platforms
550 548 * that do not support Dynamic Reconfiguration.
551 549 */
552 550 hat_flags = HAT_LOAD_SHARE;
553 551 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
554 552 hat_flags |= HAT_LOAD_LOCK;
555 553
556 554 /*
557 555 * Load translations one lare page at a time
558 556 * to make sure we don't create mappings bigger than
559 557 * segment's size code in case underlying pages
560 558 * are shared with segvn's segment that uses bigger
561 559 * size code than we do.
562 560 */
563 561 pgsz = page_get_pagesize(seg->s_szc);
564 562 pgcnt = page_get_pagecnt(seg->s_szc);
565 563 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
566 564 sz = MIN(pgsz, ptob(npages - pidx));
567 565 hat_memload_array(seg->s_as->a_hat, a, sz,
568 566 &ppa[pidx], sptd->spt_prot, hat_flags);
569 567 }
570 568
571 569 /*
572 570 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
573 571 * we will leave the pages locked SE_SHARED for the life
574 572 * of the ISM segment. This will prevent any calls to
575 573 * hat_pageunload() on this ISM segment for those platforms.
576 574 */
577 575 if (!(hat_flags & HAT_LOAD_LOCK)) {
578 576 /*
579 577 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
580 578 * we no longer need to hold the SE_SHARED lock on the pages,
581 579 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
582 580 * SE_SHARED lock on the pages as necessary.
583 581 */
584 582 for (i = 0; i < npages; i++)
585 583 page_unlock(ppa[i]);
586 584 }
587 585 sptd->spt_pcachecnt = 0;
588 586 kmem_free(ppa, ((sizeof (page_t *)) * npages));
589 587 sptd->spt_realsize = ptob(npages);
590 588 atomic_add_long(&spt_used, npages);
591 589 sptcargs->seg_spt = seg;
592 590 return (0);
593 591
594 592 out4:
595 593 seg->s_data = NULL;
596 594 kmem_free(vp, sizeof (*vp));
597 595 cv_destroy(&sptd->spt_cv);
598 596 out3:
599 597 mutex_destroy(&sptd->spt_lock);
600 598 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
601 599 kmem_free(ppa, (sizeof (*ppa) * npages));
602 600 out2:
603 601 kmem_free(sptd, sizeof (*sptd));
604 602 out1:
605 603 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
606 604 anon_swap_restore(npages);
607 605 return (err);
608 606 }
609 607
610 608 /*ARGSUSED*/
611 609 void
612 610 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
613 611 {
614 612 struct page *pp;
615 613 struct spt_data *sptd = (struct spt_data *)seg->s_data;
616 614 pgcnt_t npages;
617 615 ulong_t anon_idx;
618 616 struct anon_map *amp;
619 617 struct anon *ap;
620 618 struct vnode *vp;
621 619 u_offset_t off;
622 620 uint_t hat_flags;
623 621 int root = 0;
624 622 pgcnt_t pgs, curnpgs = 0;
625 623 page_t *rootpp;
626 624 rctl_qty_t unlocked_bytes = 0;
627 625 kproject_t *proj;
628 626 kshmid_t *sp;
629 627
630 628 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
631 629
632 630 len = P2ROUNDUP(len, PAGESIZE);
633 631
634 632 npages = btop(len);
635 633
636 634 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
637 635 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
638 636 (sptd->spt_flags & SHM_PAGEABLE)) {
639 637 hat_flags = HAT_UNLOAD_UNMAP;
640 638 }
641 639
642 640 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
643 641
644 642 amp = sptd->spt_amp;
645 643 if (sptd->spt_flags & SHM_PAGEABLE)
646 644 npages = btop(amp->size);
647 645
648 646 ASSERT(amp != NULL);
649 647
650 648 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
651 649 sp = amp->a_sp;
652 650 proj = sp->shm_perm.ipc_proj;
653 651 mutex_enter(&sp->shm_mlock);
654 652 }
655 653 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
656 654 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
657 655 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
658 656 panic("segspt_free_pages: null app");
659 657 /*NOTREACHED*/
660 658 }
661 659 } else {
662 660 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
663 661 == NULL)
664 662 continue;
665 663 }
666 664 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
667 665 swap_xlate(ap, &vp, &off);
668 666
669 667 /*
670 668 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
671 669 * the pages won't be having SE_SHARED lock at this
672 670 * point.
673 671 *
674 672 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
675 673 * the pages are still held SE_SHARED locked from the
676 674 * original segspt_create()
677 675 *
678 676 * Our goal is to get SE_EXCL lock on each page, remove
679 677 * permanent lock on it and invalidate the page.
680 678 */
681 679 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
682 680 if (hat_flags == HAT_UNLOAD_UNMAP)
683 681 pp = page_lookup(vp, off, SE_EXCL);
684 682 else {
685 683 if ((pp = page_find(vp, off)) == NULL) {
686 684 panic("segspt_free_pages: "
687 685 "page not locked");
688 686 /*NOTREACHED*/
689 687 }
690 688 if (!page_tryupgrade(pp)) {
691 689 page_unlock(pp);
692 690 pp = page_lookup(vp, off, SE_EXCL);
693 691 }
694 692 }
695 693 if (pp == NULL) {
696 694 panic("segspt_free_pages: "
697 695 "page not in the system");
698 696 /*NOTREACHED*/
699 697 }
700 698 ASSERT(pp->p_lckcnt > 0);
701 699 page_pp_unlock(pp, 0, 1);
702 700 if (pp->p_lckcnt == 0)
703 701 unlocked_bytes += PAGESIZE;
704 702 } else {
705 703 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
706 704 continue;
707 705 }
708 706 /*
709 707 * It's logical to invalidate the pages here as in most cases
710 708 * these were created by segspt.
711 709 */
712 710 if (pp->p_szc != 0) {
713 711 if (root == 0) {
714 712 ASSERT(curnpgs == 0);
715 713 root = 1;
716 714 rootpp = pp;
717 715 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
718 716 ASSERT(pgs > 1);
719 717 ASSERT(IS_P2ALIGNED(pgs, pgs));
720 718 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
721 719 curnpgs--;
722 720 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
723 721 ASSERT(curnpgs == 1);
724 722 ASSERT(page_pptonum(pp) ==
725 723 page_pptonum(rootpp) + (pgs - 1));
726 724 page_destroy_pages(rootpp);
727 725 root = 0;
728 726 curnpgs = 0;
729 727 } else {
730 728 ASSERT(curnpgs > 1);
731 729 ASSERT(page_pptonum(pp) ==
732 730 page_pptonum(rootpp) + (pgs - curnpgs));
733 731 curnpgs--;
734 732 }
735 733 } else {
736 734 if (root != 0 || curnpgs != 0) {
737 735 panic("segspt_free_pages: bad large page");
738 736 /*NOTREACHED*/
739 737 }
740 738 /*
741 739 * Before destroying the pages, we need to take care
742 740 * of the rctl locked memory accounting. For that
743 741 * we need to calculte the unlocked_bytes.
744 742 */
745 743 if (pp->p_lckcnt > 0)
746 744 unlocked_bytes += PAGESIZE;
747 745 /*LINTED: constant in conditional context */
748 746 VN_DISPOSE(pp, B_INVAL, 0, kcred);
749 747 }
750 748 }
751 749 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
752 750 if (unlocked_bytes > 0)
753 751 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
754 752 mutex_exit(&sp->shm_mlock);
755 753 }
756 754 if (root != 0 || curnpgs != 0) {
757 755 panic("segspt_free_pages: bad large page");
758 756 /*NOTREACHED*/
759 757 }
760 758
761 759 /*
762 760 * mark that pages have been released
763 761 */
764 762 sptd->spt_realsize = 0;
765 763
766 764 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
767 765 atomic_add_long(&spt_used, -npages);
768 766 anon_swap_restore(npages);
769 767 }
770 768 }
771 769
772 770 /*
773 771 * Get memory allocation policy info for specified address in given segment
774 772 */
775 773 static lgrp_mem_policy_info_t *
776 774 segspt_getpolicy(struct seg *seg, caddr_t addr)
777 775 {
778 776 struct anon_map *amp;
779 777 ulong_t anon_index;
780 778 lgrp_mem_policy_info_t *policy_info;
781 779 struct spt_data *spt_data;
782 780
783 781 ASSERT(seg != NULL);
784 782
785 783 /*
786 784 * Get anon_map from segspt
787 785 *
788 786 * Assume that no lock needs to be held on anon_map, since
789 787 * it should be protected by its reference count which must be
790 788 * nonzero for an existing segment
791 789 * Need to grab readers lock on policy tree though
792 790 */
793 791 spt_data = (struct spt_data *)seg->s_data;
794 792 if (spt_data == NULL)
795 793 return (NULL);
796 794 amp = spt_data->spt_amp;
797 795 ASSERT(amp->refcnt != 0);
798 796
799 797 /*
800 798 * Get policy info
801 799 *
802 800 * Assume starting anon index of 0
803 801 */
804 802 anon_index = seg_page(seg, addr);
805 803 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
806 804
807 805 return (policy_info);
808 806 }
809 807
810 808 /*
811 809 * DISM only.
812 810 * Return locked pages over a given range.
813 811 *
814 812 * We will cache all DISM locked pages and save the pplist for the
815 813 * entire segment in the ppa field of the underlying DISM segment structure.
816 814 * Later, during a call to segspt_reclaim() we will use this ppa array
817 815 * to page_unlock() all of the pages and then we will free this ppa list.
818 816 */
819 817 /*ARGSUSED*/
820 818 static int
821 819 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
822 820 struct page ***ppp, enum lock_type type, enum seg_rw rw)
823 821 {
824 822 struct shm_data *shmd = (struct shm_data *)seg->s_data;
825 823 struct seg *sptseg = shmd->shm_sptseg;
826 824 struct spt_data *sptd = sptseg->s_data;
827 825 pgcnt_t pg_idx, npages, tot_npages, npgs;
828 826 struct page **pplist, **pl, **ppa, *pp;
829 827 struct anon_map *amp;
830 828 spgcnt_t an_idx;
831 829 int ret = ENOTSUP;
832 830 uint_t pl_built = 0;
833 831 struct anon *ap;
834 832 struct vnode *vp;
835 833 u_offset_t off;
836 834 pgcnt_t claim_availrmem = 0;
837 835 uint_t szc;
838 836
839 837 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
840 838 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
841 839
842 840 /*
843 841 * We want to lock/unlock the entire ISM segment. Therefore,
844 842 * we will be using the underlying sptseg and it's base address
845 843 * and length for the caching arguments.
846 844 */
847 845 ASSERT(sptseg);
848 846 ASSERT(sptd);
849 847
850 848 pg_idx = seg_page(seg, addr);
851 849 npages = btopr(len);
852 850
853 851 /*
854 852 * check if the request is larger than number of pages covered
855 853 * by amp
856 854 */
857 855 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
858 856 *ppp = NULL;
859 857 return (ENOTSUP);
860 858 }
861 859
862 860 if (type == L_PAGEUNLOCK) {
863 861 ASSERT(sptd->spt_ppa != NULL);
864 862
865 863 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
866 864 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
867 865
868 866 /*
869 867 * If someone is blocked while unmapping, we purge
870 868 * segment page cache and thus reclaim pplist synchronously
871 869 * without waiting for seg_pasync_thread. This speeds up
872 870 * unmapping in cases where munmap(2) is called, while
873 871 * raw async i/o is still in progress or where a thread
874 872 * exits on data fault in a multithreaded application.
875 873 */
876 874 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
877 875 (AS_ISUNMAPWAIT(seg->s_as) &&
878 876 shmd->shm_softlockcnt > 0)) {
879 877 segspt_purge(seg);
880 878 }
881 879 return (0);
882 880 }
883 881
884 882 /* The L_PAGELOCK case ... */
885 883
886 884 if (sptd->spt_flags & DISM_PPA_CHANGED) {
887 885 segspt_purge(seg);
888 886 /*
889 887 * for DISM ppa needs to be rebuild since
890 888 * number of locked pages could be changed
891 889 */
892 890 *ppp = NULL;
893 891 return (ENOTSUP);
894 892 }
895 893
896 894 /*
897 895 * First try to find pages in segment page cache, without
898 896 * holding the segment lock.
899 897 */
900 898 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
901 899 S_WRITE, SEGP_FORCE_WIRED);
902 900 if (pplist != NULL) {
903 901 ASSERT(sptd->spt_ppa != NULL);
904 902 ASSERT(sptd->spt_ppa == pplist);
905 903 ppa = sptd->spt_ppa;
906 904 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
907 905 if (ppa[an_idx] == NULL) {
908 906 seg_pinactive(seg, NULL, seg->s_base,
909 907 sptd->spt_amp->size, ppa,
910 908 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
911 909 *ppp = NULL;
912 910 return (ENOTSUP);
913 911 }
914 912 if ((szc = ppa[an_idx]->p_szc) != 0) {
915 913 npgs = page_get_pagecnt(szc);
916 914 an_idx = P2ROUNDUP(an_idx + 1, npgs);
917 915 } else {
918 916 an_idx++;
919 917 }
920 918 }
921 919 /*
922 920 * Since we cache the entire DISM segment, we want to
923 921 * set ppp to point to the first slot that corresponds
924 922 * to the requested addr, i.e. pg_idx.
925 923 */
926 924 *ppp = &(sptd->spt_ppa[pg_idx]);
927 925 return (0);
928 926 }
929 927
930 928 mutex_enter(&sptd->spt_lock);
931 929 /*
932 930 * try to find pages in segment page cache with mutex
933 931 */
934 932 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
935 933 S_WRITE, SEGP_FORCE_WIRED);
936 934 if (pplist != NULL) {
937 935 ASSERT(sptd->spt_ppa != NULL);
938 936 ASSERT(sptd->spt_ppa == pplist);
939 937 ppa = sptd->spt_ppa;
940 938 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
941 939 if (ppa[an_idx] == NULL) {
942 940 mutex_exit(&sptd->spt_lock);
943 941 seg_pinactive(seg, NULL, seg->s_base,
944 942 sptd->spt_amp->size, ppa,
945 943 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
946 944 *ppp = NULL;
947 945 return (ENOTSUP);
948 946 }
949 947 if ((szc = ppa[an_idx]->p_szc) != 0) {
950 948 npgs = page_get_pagecnt(szc);
951 949 an_idx = P2ROUNDUP(an_idx + 1, npgs);
952 950 } else {
953 951 an_idx++;
954 952 }
955 953 }
956 954 /*
957 955 * Since we cache the entire DISM segment, we want to
958 956 * set ppp to point to the first slot that corresponds
959 957 * to the requested addr, i.e. pg_idx.
960 958 */
961 959 mutex_exit(&sptd->spt_lock);
962 960 *ppp = &(sptd->spt_ppa[pg_idx]);
963 961 return (0);
964 962 }
965 963 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
966 964 SEGP_FORCE_WIRED) == SEGP_FAIL) {
967 965 mutex_exit(&sptd->spt_lock);
968 966 *ppp = NULL;
969 967 return (ENOTSUP);
970 968 }
971 969
972 970 /*
973 971 * No need to worry about protections because DISM pages are always rw.
974 972 */
975 973 pl = pplist = NULL;
976 974 amp = sptd->spt_amp;
977 975
978 976 /*
979 977 * Do we need to build the ppa array?
980 978 */
981 979 if (sptd->spt_ppa == NULL) {
982 980 pgcnt_t lpg_cnt = 0;
983 981
984 982 pl_built = 1;
985 983 tot_npages = btopr(sptd->spt_amp->size);
986 984
987 985 ASSERT(sptd->spt_pcachecnt == 0);
988 986 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
989 987 pl = pplist;
990 988
991 989 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
992 990 for (an_idx = 0; an_idx < tot_npages; ) {
993 991 ap = anon_get_ptr(amp->ahp, an_idx);
994 992 /*
995 993 * Cache only mlocked pages. For large pages
996 994 * if one (constituent) page is mlocked
997 995 * all pages for that large page
998 996 * are cached also. This is for quick
999 997 * lookups of ppa array;
1000 998 */
1001 999 if ((ap != NULL) && (lpg_cnt != 0 ||
1002 1000 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1003 1001
1004 1002 swap_xlate(ap, &vp, &off);
1005 1003 pp = page_lookup(vp, off, SE_SHARED);
1006 1004 ASSERT(pp != NULL);
1007 1005 if (lpg_cnt == 0) {
1008 1006 lpg_cnt++;
1009 1007 /*
1010 1008 * For a small page, we are done --
1011 1009 * lpg_count is reset to 0 below.
1012 1010 *
1013 1011 * For a large page, we are guaranteed
1014 1012 * to find the anon structures of all
1015 1013 * constituent pages and a non-zero
1016 1014 * lpg_cnt ensures that we don't test
1017 1015 * for mlock for these. We are done
1018 1016 * when lpg_count reaches (npgs + 1).
1019 1017 * If we are not the first constituent
1020 1018 * page, restart at the first one.
1021 1019 */
1022 1020 npgs = page_get_pagecnt(pp->p_szc);
1023 1021 if (!IS_P2ALIGNED(an_idx, npgs)) {
1024 1022 an_idx = P2ALIGN(an_idx, npgs);
1025 1023 page_unlock(pp);
1026 1024 continue;
1027 1025 }
1028 1026 }
1029 1027 if (++lpg_cnt > npgs)
1030 1028 lpg_cnt = 0;
1031 1029
1032 1030 /*
1033 1031 * availrmem is decremented only
1034 1032 * for unlocked pages
1035 1033 */
1036 1034 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1037 1035 claim_availrmem++;
1038 1036 pplist[an_idx] = pp;
1039 1037 }
1040 1038 an_idx++;
1041 1039 }
1042 1040 ANON_LOCK_EXIT(&->a_rwlock);
1043 1041
1044 1042 if (claim_availrmem) {
1045 1043 mutex_enter(&freemem_lock);
1046 1044 if (availrmem < tune.t_minarmem + claim_availrmem) {
1047 1045 mutex_exit(&freemem_lock);
1048 1046 ret = ENOTSUP;
1049 1047 claim_availrmem = 0;
1050 1048 goto insert_fail;
1051 1049 } else {
1052 1050 availrmem -= claim_availrmem;
1053 1051 }
1054 1052 mutex_exit(&freemem_lock);
1055 1053 }
1056 1054
1057 1055 sptd->spt_ppa = pl;
1058 1056 } else {
1059 1057 /*
1060 1058 * We already have a valid ppa[].
1061 1059 */
1062 1060 pl = sptd->spt_ppa;
1063 1061 }
1064 1062
1065 1063 ASSERT(pl != NULL);
1066 1064
1067 1065 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1068 1066 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1069 1067 segspt_reclaim);
1070 1068 if (ret == SEGP_FAIL) {
1071 1069 /*
1072 1070 * seg_pinsert failed. We return
1073 1071 * ENOTSUP, so that the as_pagelock() code will
1074 1072 * then try the slower F_SOFTLOCK path.
1075 1073 */
1076 1074 if (pl_built) {
1077 1075 /*
1078 1076 * No one else has referenced the ppa[].
1079 1077 * We created it and we need to destroy it.
1080 1078 */
1081 1079 sptd->spt_ppa = NULL;
1082 1080 }
1083 1081 ret = ENOTSUP;
1084 1082 goto insert_fail;
1085 1083 }
1086 1084
1087 1085 /*
1088 1086 * In either case, we increment softlockcnt on the 'real' segment.
1089 1087 */
1090 1088 sptd->spt_pcachecnt++;
1091 1089 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1092 1090
1093 1091 ppa = sptd->spt_ppa;
1094 1092 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1095 1093 if (ppa[an_idx] == NULL) {
1096 1094 mutex_exit(&sptd->spt_lock);
1097 1095 seg_pinactive(seg, NULL, seg->s_base,
1098 1096 sptd->spt_amp->size,
1099 1097 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1100 1098 *ppp = NULL;
1101 1099 return (ENOTSUP);
1102 1100 }
1103 1101 if ((szc = ppa[an_idx]->p_szc) != 0) {
1104 1102 npgs = page_get_pagecnt(szc);
1105 1103 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1106 1104 } else {
1107 1105 an_idx++;
1108 1106 }
1109 1107 }
1110 1108 /*
1111 1109 * We can now drop the sptd->spt_lock since the ppa[]
1112 1110 * exists and he have incremented pacachecnt.
1113 1111 */
1114 1112 mutex_exit(&sptd->spt_lock);
1115 1113
1116 1114 /*
1117 1115 * Since we cache the entire segment, we want to
1118 1116 * set ppp to point to the first slot that corresponds
1119 1117 * to the requested addr, i.e. pg_idx.
1120 1118 */
1121 1119 *ppp = &(sptd->spt_ppa[pg_idx]);
1122 1120 return (0);
1123 1121
1124 1122 insert_fail:
1125 1123 /*
1126 1124 * We will only reach this code if we tried and failed.
1127 1125 *
1128 1126 * And we can drop the lock on the dummy seg, once we've failed
1129 1127 * to set up a new ppa[].
1130 1128 */
1131 1129 mutex_exit(&sptd->spt_lock);
1132 1130
1133 1131 if (pl_built) {
1134 1132 if (claim_availrmem) {
1135 1133 mutex_enter(&freemem_lock);
1136 1134 availrmem += claim_availrmem;
1137 1135 mutex_exit(&freemem_lock);
1138 1136 }
1139 1137
1140 1138 /*
1141 1139 * We created pl and we need to destroy it.
1142 1140 */
1143 1141 pplist = pl;
1144 1142 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1145 1143 if (pplist[an_idx] != NULL)
1146 1144 page_unlock(pplist[an_idx]);
1147 1145 }
1148 1146 kmem_free(pl, sizeof (page_t *) * tot_npages);
1149 1147 }
1150 1148
1151 1149 if (shmd->shm_softlockcnt <= 0) {
1152 1150 if (AS_ISUNMAPWAIT(seg->s_as)) {
1153 1151 mutex_enter(&seg->s_as->a_contents);
1154 1152 if (AS_ISUNMAPWAIT(seg->s_as)) {
1155 1153 AS_CLRUNMAPWAIT(seg->s_as);
1156 1154 cv_broadcast(&seg->s_as->a_cv);
1157 1155 }
1158 1156 mutex_exit(&seg->s_as->a_contents);
1159 1157 }
1160 1158 }
1161 1159 *ppp = NULL;
1162 1160 return (ret);
1163 1161 }
1164 1162
1165 1163
1166 1164
1167 1165 /*
1168 1166 * return locked pages over a given range.
1169 1167 *
1170 1168 * We will cache the entire ISM segment and save the pplist for the
1171 1169 * entire segment in the ppa field of the underlying ISM segment structure.
1172 1170 * Later, during a call to segspt_reclaim() we will use this ppa array
1173 1171 * to page_unlock() all of the pages and then we will free this ppa list.
1174 1172 */
1175 1173 /*ARGSUSED*/
1176 1174 static int
1177 1175 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1178 1176 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1179 1177 {
1180 1178 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1181 1179 struct seg *sptseg = shmd->shm_sptseg;
1182 1180 struct spt_data *sptd = sptseg->s_data;
1183 1181 pgcnt_t np, page_index, npages;
1184 1182 caddr_t a, spt_base;
1185 1183 struct page **pplist, **pl, *pp;
1186 1184 struct anon_map *amp;
1187 1185 ulong_t anon_index;
1188 1186 int ret = ENOTSUP;
1189 1187 uint_t pl_built = 0;
1190 1188 struct anon *ap;
1191 1189 struct vnode *vp;
1192 1190 u_offset_t off;
1193 1191
1194 1192 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1195 1193 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1196 1194
1197 1195
1198 1196 /*
1199 1197 * We want to lock/unlock the entire ISM segment. Therefore,
1200 1198 * we will be using the underlying sptseg and it's base address
1201 1199 * and length for the caching arguments.
1202 1200 */
1203 1201 ASSERT(sptseg);
1204 1202 ASSERT(sptd);
1205 1203
1206 1204 if (sptd->spt_flags & SHM_PAGEABLE) {
1207 1205 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1208 1206 }
1209 1207
1210 1208 page_index = seg_page(seg, addr);
1211 1209 npages = btopr(len);
1212 1210
1213 1211 /*
1214 1212 * check if the request is larger than number of pages covered
1215 1213 * by amp
1216 1214 */
1217 1215 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1218 1216 *ppp = NULL;
1219 1217 return (ENOTSUP);
1220 1218 }
1221 1219
1222 1220 if (type == L_PAGEUNLOCK) {
1223 1221
1224 1222 ASSERT(sptd->spt_ppa != NULL);
1225 1223
1226 1224 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1227 1225 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1228 1226
1229 1227 /*
1230 1228 * If someone is blocked while unmapping, we purge
1231 1229 * segment page cache and thus reclaim pplist synchronously
1232 1230 * without waiting for seg_pasync_thread. This speeds up
1233 1231 * unmapping in cases where munmap(2) is called, while
1234 1232 * raw async i/o is still in progress or where a thread
1235 1233 * exits on data fault in a multithreaded application.
1236 1234 */
1237 1235 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1238 1236 segspt_purge(seg);
1239 1237 }
1240 1238 return (0);
1241 1239 }
1242 1240
1243 1241 /* The L_PAGELOCK case... */
1244 1242
1245 1243 /*
1246 1244 * First try to find pages in segment page cache, without
1247 1245 * holding the segment lock.
1248 1246 */
1249 1247 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1250 1248 S_WRITE, SEGP_FORCE_WIRED);
1251 1249 if (pplist != NULL) {
1252 1250 ASSERT(sptd->spt_ppa == pplist);
1253 1251 ASSERT(sptd->spt_ppa[page_index]);
1254 1252 /*
1255 1253 * Since we cache the entire ISM segment, we want to
1256 1254 * set ppp to point to the first slot that corresponds
1257 1255 * to the requested addr, i.e. page_index.
1258 1256 */
1259 1257 *ppp = &(sptd->spt_ppa[page_index]);
1260 1258 return (0);
1261 1259 }
1262 1260
1263 1261 mutex_enter(&sptd->spt_lock);
1264 1262
1265 1263 /*
1266 1264 * try to find pages in segment page cache
1267 1265 */
1268 1266 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1269 1267 S_WRITE, SEGP_FORCE_WIRED);
1270 1268 if (pplist != NULL) {
1271 1269 ASSERT(sptd->spt_ppa == pplist);
1272 1270 /*
1273 1271 * Since we cache the entire segment, we want to
1274 1272 * set ppp to point to the first slot that corresponds
1275 1273 * to the requested addr, i.e. page_index.
1276 1274 */
1277 1275 mutex_exit(&sptd->spt_lock);
1278 1276 *ppp = &(sptd->spt_ppa[page_index]);
1279 1277 return (0);
1280 1278 }
1281 1279
1282 1280 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1283 1281 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1284 1282 mutex_exit(&sptd->spt_lock);
1285 1283 *ppp = NULL;
1286 1284 return (ENOTSUP);
1287 1285 }
1288 1286
1289 1287 /*
1290 1288 * No need to worry about protections because ISM pages
1291 1289 * are always rw.
1292 1290 */
1293 1291 pl = pplist = NULL;
1294 1292
1295 1293 /*
1296 1294 * Do we need to build the ppa array?
1297 1295 */
1298 1296 if (sptd->spt_ppa == NULL) {
1299 1297 ASSERT(sptd->spt_ppa == pplist);
1300 1298
1301 1299 spt_base = sptseg->s_base;
1302 1300 pl_built = 1;
1303 1301
1304 1302 /*
1305 1303 * availrmem is decremented once during anon_swap_adjust()
1306 1304 * and is incremented during the anon_unresv(), which is
1307 1305 * called from shm_rm_amp() when the segment is destroyed.
1308 1306 */
1309 1307 amp = sptd->spt_amp;
1310 1308 ASSERT(amp != NULL);
1311 1309
1312 1310 /* pcachecnt is protected by sptd->spt_lock */
1313 1311 ASSERT(sptd->spt_pcachecnt == 0);
1314 1312 pplist = kmem_zalloc(sizeof (page_t *)
1315 1313 * btopr(sptd->spt_amp->size), KM_SLEEP);
1316 1314 pl = pplist;
1317 1315
1318 1316 anon_index = seg_page(sptseg, spt_base);
1319 1317
1320 1318 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1321 1319 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1322 1320 a += PAGESIZE, anon_index++, pplist++) {
1323 1321 ap = anon_get_ptr(amp->ahp, anon_index);
1324 1322 ASSERT(ap != NULL);
1325 1323 swap_xlate(ap, &vp, &off);
1326 1324 pp = page_lookup(vp, off, SE_SHARED);
1327 1325 ASSERT(pp != NULL);
1328 1326 *pplist = pp;
1329 1327 }
1330 1328 ANON_LOCK_EXIT(&->a_rwlock);
1331 1329
1332 1330 if (a < (spt_base + sptd->spt_amp->size)) {
1333 1331 ret = ENOTSUP;
1334 1332 goto insert_fail;
1335 1333 }
1336 1334 sptd->spt_ppa = pl;
1337 1335 } else {
1338 1336 /*
1339 1337 * We already have a valid ppa[].
1340 1338 */
1341 1339 pl = sptd->spt_ppa;
1342 1340 }
1343 1341
1344 1342 ASSERT(pl != NULL);
1345 1343
1346 1344 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1347 1345 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1348 1346 segspt_reclaim);
1349 1347 if (ret == SEGP_FAIL) {
1350 1348 /*
1351 1349 * seg_pinsert failed. We return
1352 1350 * ENOTSUP, so that the as_pagelock() code will
1353 1351 * then try the slower F_SOFTLOCK path.
1354 1352 */
1355 1353 if (pl_built) {
1356 1354 /*
1357 1355 * No one else has referenced the ppa[].
1358 1356 * We created it and we need to destroy it.
1359 1357 */
1360 1358 sptd->spt_ppa = NULL;
1361 1359 }
1362 1360 ret = ENOTSUP;
1363 1361 goto insert_fail;
1364 1362 }
1365 1363
1366 1364 /*
1367 1365 * In either case, we increment softlockcnt on the 'real' segment.
1368 1366 */
1369 1367 sptd->spt_pcachecnt++;
1370 1368 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1371 1369
1372 1370 /*
1373 1371 * We can now drop the sptd->spt_lock since the ppa[]
1374 1372 * exists and he have incremented pacachecnt.
1375 1373 */
1376 1374 mutex_exit(&sptd->spt_lock);
1377 1375
1378 1376 /*
1379 1377 * Since we cache the entire segment, we want to
1380 1378 * set ppp to point to the first slot that corresponds
1381 1379 * to the requested addr, i.e. page_index.
1382 1380 */
1383 1381 *ppp = &(sptd->spt_ppa[page_index]);
1384 1382 return (0);
1385 1383
1386 1384 insert_fail:
1387 1385 /*
1388 1386 * We will only reach this code if we tried and failed.
1389 1387 *
1390 1388 * And we can drop the lock on the dummy seg, once we've failed
1391 1389 * to set up a new ppa[].
1392 1390 */
1393 1391 mutex_exit(&sptd->spt_lock);
1394 1392
1395 1393 if (pl_built) {
1396 1394 /*
1397 1395 * We created pl and we need to destroy it.
1398 1396 */
1399 1397 pplist = pl;
1400 1398 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1401 1399 while (np) {
1402 1400 page_unlock(*pplist);
1403 1401 np--;
1404 1402 pplist++;
1405 1403 }
1406 1404 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1407 1405 }
1408 1406 if (shmd->shm_softlockcnt <= 0) {
1409 1407 if (AS_ISUNMAPWAIT(seg->s_as)) {
1410 1408 mutex_enter(&seg->s_as->a_contents);
1411 1409 if (AS_ISUNMAPWAIT(seg->s_as)) {
1412 1410 AS_CLRUNMAPWAIT(seg->s_as);
1413 1411 cv_broadcast(&seg->s_as->a_cv);
1414 1412 }
1415 1413 mutex_exit(&seg->s_as->a_contents);
1416 1414 }
1417 1415 }
1418 1416 *ppp = NULL;
1419 1417 return (ret);
1420 1418 }
1421 1419
1422 1420 /*
1423 1421 * purge any cached pages in the I/O page cache
1424 1422 */
1425 1423 static void
1426 1424 segspt_purge(struct seg *seg)
1427 1425 {
1428 1426 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1429 1427 }
1430 1428
1431 1429 static int
1432 1430 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1433 1431 enum seg_rw rw, int async)
1434 1432 {
1435 1433 struct seg *seg = (struct seg *)ptag;
1436 1434 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1437 1435 struct seg *sptseg;
1438 1436 struct spt_data *sptd;
1439 1437 pgcnt_t npages, i, free_availrmem = 0;
1440 1438 int done = 0;
1441 1439
1442 1440 #ifdef lint
1443 1441 addr = addr;
1444 1442 #endif
1445 1443 sptseg = shmd->shm_sptseg;
1446 1444 sptd = sptseg->s_data;
1447 1445 npages = (len >> PAGESHIFT);
1448 1446 ASSERT(npages);
1449 1447 ASSERT(sptd->spt_pcachecnt != 0);
1450 1448 ASSERT(sptd->spt_ppa == pplist);
1451 1449 ASSERT(npages == btopr(sptd->spt_amp->size));
1452 1450 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1453 1451
1454 1452 /*
1455 1453 * Acquire the lock on the dummy seg and destroy the
1456 1454 * ppa array IF this is the last pcachecnt.
1457 1455 */
1458 1456 mutex_enter(&sptd->spt_lock);
1459 1457 if (--sptd->spt_pcachecnt == 0) {
1460 1458 for (i = 0; i < npages; i++) {
1461 1459 if (pplist[i] == NULL) {
1462 1460 continue;
1463 1461 }
1464 1462 if (rw == S_WRITE) {
1465 1463 hat_setrefmod(pplist[i]);
1466 1464 } else {
1467 1465 hat_setref(pplist[i]);
1468 1466 }
1469 1467 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1470 1468 (sptd->spt_ppa_lckcnt[i] == 0))
1471 1469 free_availrmem++;
1472 1470 page_unlock(pplist[i]);
1473 1471 }
1474 1472 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1475 1473 mutex_enter(&freemem_lock);
1476 1474 availrmem += free_availrmem;
1477 1475 mutex_exit(&freemem_lock);
1478 1476 }
1479 1477 /*
1480 1478 * Since we want to cach/uncache the entire ISM segment,
1481 1479 * we will track the pplist in a segspt specific field
1482 1480 * ppa, that is initialized at the time we add an entry to
1483 1481 * the cache.
1484 1482 */
1485 1483 ASSERT(sptd->spt_pcachecnt == 0);
1486 1484 kmem_free(pplist, sizeof (page_t *) * npages);
1487 1485 sptd->spt_ppa = NULL;
1488 1486 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1489 1487 sptd->spt_gen++;
1490 1488 cv_broadcast(&sptd->spt_cv);
1491 1489 done = 1;
1492 1490 }
1493 1491 mutex_exit(&sptd->spt_lock);
1494 1492
1495 1493 /*
1496 1494 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1497 1495 * may not hold AS lock (in this case async argument is not 0). This
1498 1496 * means if softlockcnt drops to 0 after the decrement below address
1499 1497 * space may get freed. We can't allow it since after softlock
1500 1498 * derement to 0 we still need to access as structure for possible
1501 1499 * wakeup of unmap waiters. To prevent the disappearance of as we take
1502 1500 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1503 1501 * this mutex as a barrier to make sure this routine completes before
1504 1502 * segment is freed.
1505 1503 *
1506 1504 * The second complication we have to deal with in async case is a
1507 1505 * possibility of missed wake up of unmap wait thread. When we don't
1508 1506 * hold as lock here we may take a_contents lock before unmap wait
1509 1507 * thread that was first to see softlockcnt was still not 0. As a
1510 1508 * result we'll fail to wake up an unmap wait thread. To avoid this
1511 1509 * race we set nounmapwait flag in as structure if we drop softlockcnt
1512 1510 * to 0 if async is not 0. unmapwait thread
1513 1511 * will not block if this flag is set.
1514 1512 */
1515 1513 if (async)
1516 1514 mutex_enter(&shmd->shm_segfree_syncmtx);
1517 1515
1518 1516 /*
1519 1517 * Now decrement softlockcnt.
1520 1518 */
1521 1519 ASSERT(shmd->shm_softlockcnt > 0);
1522 1520 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1523 1521
1524 1522 if (shmd->shm_softlockcnt <= 0) {
1525 1523 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1526 1524 mutex_enter(&seg->s_as->a_contents);
1527 1525 if (async)
1528 1526 AS_SETNOUNMAPWAIT(seg->s_as);
1529 1527 if (AS_ISUNMAPWAIT(seg->s_as)) {
1530 1528 AS_CLRUNMAPWAIT(seg->s_as);
1531 1529 cv_broadcast(&seg->s_as->a_cv);
1532 1530 }
1533 1531 mutex_exit(&seg->s_as->a_contents);
1534 1532 }
1535 1533 }
1536 1534
1537 1535 if (async)
1538 1536 mutex_exit(&shmd->shm_segfree_syncmtx);
1539 1537
1540 1538 return (done);
1541 1539 }
1542 1540
1543 1541 /*
1544 1542 * Do a F_SOFTUNLOCK call over the range requested.
1545 1543 * The range must have already been F_SOFTLOCK'ed.
1546 1544 *
1547 1545 * The calls to acquire and release the anon map lock mutex were
1548 1546 * removed in order to avoid a deadly embrace during a DR
1549 1547 * memory delete operation. (Eg. DR blocks while waiting for a
1550 1548 * exclusive lock on a page that is being used for kaio; the
1551 1549 * thread that will complete the kaio and call segspt_softunlock
1552 1550 * blocks on the anon map lock; another thread holding the anon
1553 1551 * map lock blocks on another page lock via the segspt_shmfault
1554 1552 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1555 1553 *
1556 1554 * The appropriateness of the removal is based upon the following:
1557 1555 * 1. If we are holding a segment's reader lock and the page is held
1558 1556 * shared, then the corresponding element in anonmap which points to
1559 1557 * anon struct cannot change and there is no need to acquire the
1560 1558 * anonymous map lock.
1561 1559 * 2. Threads in segspt_softunlock have a reader lock on the segment
1562 1560 * and already have the shared page lock, so we are guaranteed that
1563 1561 * the anon map slot cannot change and therefore can call anon_get_ptr()
1564 1562 * without grabbing the anonymous map lock.
1565 1563 * 3. Threads that softlock a shared page break copy-on-write, even if
1566 1564 * its a read. Thus cow faults can be ignored with respect to soft
1567 1565 * unlocking, since the breaking of cow means that the anon slot(s) will
1568 1566 * not be shared.
1569 1567 */
1570 1568 static void
1571 1569 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1572 1570 size_t len, enum seg_rw rw)
1573 1571 {
1574 1572 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1575 1573 struct seg *sptseg;
1576 1574 struct spt_data *sptd;
1577 1575 page_t *pp;
1578 1576 caddr_t adr;
1579 1577 struct vnode *vp;
1580 1578 u_offset_t offset;
1581 1579 ulong_t anon_index;
1582 1580 struct anon_map *amp; /* XXX - for locknest */
1583 1581 struct anon *ap = NULL;
1584 1582 pgcnt_t npages;
1585 1583
1586 1584 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1587 1585
1588 1586 sptseg = shmd->shm_sptseg;
1589 1587 sptd = sptseg->s_data;
1590 1588
1591 1589 /*
1592 1590 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1593 1591 * and therefore their pages are SE_SHARED locked
1594 1592 * for the entire life of the segment.
1595 1593 */
1596 1594 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1597 1595 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1598 1596 goto softlock_decrement;
1599 1597 }
1600 1598
1601 1599 /*
1602 1600 * Any thread is free to do a page_find and
1603 1601 * page_unlock() on the pages within this seg.
1604 1602 *
1605 1603 * We are already holding the as->a_lock on the user's
1606 1604 * real segment, but we need to hold the a_lock on the
1607 1605 * underlying dummy as. This is mostly to satisfy the
1608 1606 * underlying HAT layer.
1609 1607 */
1610 1608 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1611 1609 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1612 1610 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1613 1611
1614 1612 amp = sptd->spt_amp;
1615 1613 ASSERT(amp != NULL);
1616 1614 anon_index = seg_page(sptseg, sptseg_addr);
1617 1615
1618 1616 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1619 1617 ap = anon_get_ptr(amp->ahp, anon_index++);
1620 1618 ASSERT(ap != NULL);
1621 1619 swap_xlate(ap, &vp, &offset);
1622 1620
1623 1621 /*
1624 1622 * Use page_find() instead of page_lookup() to
1625 1623 * find the page since we know that it has a
1626 1624 * "shared" lock.
1627 1625 */
1628 1626 pp = page_find(vp, offset);
1629 1627 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1630 1628 if (pp == NULL) {
1631 1629 panic("segspt_softunlock: "
1632 1630 "addr %p, ap %p, vp %p, off %llx",
1633 1631 (void *)adr, (void *)ap, (void *)vp, offset);
1634 1632 /*NOTREACHED*/
1635 1633 }
1636 1634
1637 1635 if (rw == S_WRITE) {
1638 1636 hat_setrefmod(pp);
1639 1637 } else if (rw != S_OTHER) {
1640 1638 hat_setref(pp);
1641 1639 }
1642 1640 page_unlock(pp);
1643 1641 }
1644 1642
1645 1643 softlock_decrement:
1646 1644 npages = btopr(len);
1647 1645 ASSERT(shmd->shm_softlockcnt >= npages);
1648 1646 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1649 1647 if (shmd->shm_softlockcnt == 0) {
1650 1648 /*
1651 1649 * All SOFTLOCKS are gone. Wakeup any waiting
1652 1650 * unmappers so they can try again to unmap.
1653 1651 * Check for waiters first without the mutex
1654 1652 * held so we don't always grab the mutex on
1655 1653 * softunlocks.
1656 1654 */
1657 1655 if (AS_ISUNMAPWAIT(seg->s_as)) {
1658 1656 mutex_enter(&seg->s_as->a_contents);
1659 1657 if (AS_ISUNMAPWAIT(seg->s_as)) {
1660 1658 AS_CLRUNMAPWAIT(seg->s_as);
1661 1659 cv_broadcast(&seg->s_as->a_cv);
1662 1660 }
1663 1661 mutex_exit(&seg->s_as->a_contents);
1664 1662 }
1665 1663 }
1666 1664 }
1667 1665
1668 1666 int
1669 1667 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1670 1668 {
1671 1669 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1672 1670 struct shm_data *shmd;
1673 1671 struct anon_map *shm_amp = shmd_arg->shm_amp;
1674 1672 struct spt_data *sptd;
1675 1673 int error = 0;
1676 1674
1677 1675 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1678 1676
1679 1677 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1680 1678 if (shmd == NULL)
1681 1679 return (ENOMEM);
1682 1680
1683 1681 shmd->shm_sptas = shmd_arg->shm_sptas;
1684 1682 shmd->shm_amp = shm_amp;
1685 1683 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1686 1684
1687 1685 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1688 1686 NULL, 0, seg->s_size);
1689 1687
1690 1688 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1691 1689
1692 1690 seg->s_data = (void *)shmd;
1693 1691 seg->s_ops = &segspt_shmops;
1694 1692 seg->s_szc = shmd->shm_sptseg->s_szc;
1695 1693 sptd = shmd->shm_sptseg->s_data;
1696 1694
1697 1695 if (sptd->spt_flags & SHM_PAGEABLE) {
1698 1696 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1699 1697 KM_NOSLEEP)) == NULL) {
1700 1698 seg->s_data = (void *)NULL;
1701 1699 kmem_free(shmd, (sizeof (*shmd)));
1702 1700 return (ENOMEM);
1703 1701 }
1704 1702 shmd->shm_lckpgs = 0;
1705 1703 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1706 1704 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1707 1705 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1708 1706 seg->s_size, seg->s_szc)) != 0) {
1709 1707 kmem_free(shmd->shm_vpage,
1710 1708 btopr(shm_amp->size));
1711 1709 }
1712 1710 }
1713 1711 } else {
1714 1712 error = hat_share(seg->s_as->a_hat, seg->s_base,
1715 1713 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1716 1714 seg->s_size, seg->s_szc);
1717 1715 }
1718 1716 if (error) {
1719 1717 seg->s_szc = 0;
1720 1718 seg->s_data = (void *)NULL;
1721 1719 kmem_free(shmd, (sizeof (*shmd)));
1722 1720 } else {
1723 1721 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1724 1722 shm_amp->refcnt++;
1725 1723 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1726 1724 }
1727 1725 return (error);
1728 1726 }
1729 1727
1730 1728 int
1731 1729 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1732 1730 {
1733 1731 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1734 1732 int reclaim = 1;
1735 1733
1736 1734 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1737 1735 retry:
1738 1736 if (shmd->shm_softlockcnt > 0) {
1739 1737 if (reclaim == 1) {
1740 1738 segspt_purge(seg);
1741 1739 reclaim = 0;
1742 1740 goto retry;
1743 1741 }
1744 1742 return (EAGAIN);
1745 1743 }
1746 1744
1747 1745 if (ssize != seg->s_size) {
1748 1746 #ifdef DEBUG
1749 1747 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1750 1748 ssize, seg->s_size);
1751 1749 #endif
1752 1750 return (EINVAL);
1753 1751 }
1754 1752
1755 1753 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1756 1754 NULL, 0);
1757 1755 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1758 1756
1759 1757 seg_free(seg);
1760 1758
1761 1759 return (0);
1762 1760 }
1763 1761
1764 1762 void
1765 1763 segspt_shmfree(struct seg *seg)
1766 1764 {
1767 1765 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1768 1766 struct anon_map *shm_amp = shmd->shm_amp;
1769 1767
1770 1768 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1771 1769
1772 1770 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1773 1771 MC_UNLOCK, NULL, 0);
1774 1772
1775 1773 /*
1776 1774 * Need to increment refcnt when attaching
1777 1775 * and decrement when detaching because of dup().
1778 1776 */
1779 1777 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1780 1778 shm_amp->refcnt--;
1781 1779 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1782 1780
1783 1781 if (shmd->shm_vpage) { /* only for DISM */
1784 1782 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1785 1783 shmd->shm_vpage = NULL;
1786 1784 }
1787 1785
1788 1786 /*
1789 1787 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1790 1788 * still working with this segment without holding as lock.
1791 1789 */
1792 1790 ASSERT(shmd->shm_softlockcnt == 0);
1793 1791 mutex_enter(&shmd->shm_segfree_syncmtx);
1794 1792 mutex_destroy(&shmd->shm_segfree_syncmtx);
1795 1793
1796 1794 kmem_free(shmd, sizeof (*shmd));
1797 1795 }
1798 1796
1799 1797 /*ARGSUSED*/
1800 1798 int
1801 1799 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1802 1800 {
1803 1801 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1804 1802
1805 1803 /*
1806 1804 * Shared page table is more than shared mapping.
1807 1805 * Individual process sharing page tables can't change prot
1808 1806 * because there is only one set of page tables.
1809 1807 * This will be allowed after private page table is
1810 1808 * supported.
1811 1809 */
1812 1810 /* need to return correct status error? */
1813 1811 return (0);
1814 1812 }
1815 1813
1816 1814
1817 1815 faultcode_t
1818 1816 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1819 1817 size_t len, enum fault_type type, enum seg_rw rw)
1820 1818 {
1821 1819 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1822 1820 struct seg *sptseg = shmd->shm_sptseg;
1823 1821 struct as *curspt = shmd->shm_sptas;
1824 1822 struct spt_data *sptd = sptseg->s_data;
1825 1823 pgcnt_t npages;
1826 1824 size_t size;
1827 1825 caddr_t segspt_addr, shm_addr;
1828 1826 page_t **ppa;
1829 1827 int i;
1830 1828 ulong_t an_idx = 0;
1831 1829 int err = 0;
1832 1830 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1833 1831 size_t pgsz;
1834 1832 pgcnt_t pgcnt;
1835 1833 caddr_t a;
1836 1834 pgcnt_t pidx;
1837 1835
1838 1836 #ifdef lint
1839 1837 hat = hat;
1840 1838 #endif
1841 1839 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1842 1840
1843 1841 /*
1844 1842 * Because of the way spt is implemented
1845 1843 * the realsize of the segment does not have to be
1846 1844 * equal to the segment size itself. The segment size is
1847 1845 * often in multiples of a page size larger than PAGESIZE.
1848 1846 * The realsize is rounded up to the nearest PAGESIZE
1849 1847 * based on what the user requested. This is a bit of
1850 1848 * ungliness that is historical but not easily fixed
1851 1849 * without re-designing the higher levels of ISM.
1852 1850 */
1853 1851 ASSERT(addr >= seg->s_base);
1854 1852 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1855 1853 return (FC_NOMAP);
1856 1854 /*
1857 1855 * For all of the following cases except F_PROT, we need to
1858 1856 * make any necessary adjustments to addr and len
1859 1857 * and get all of the necessary page_t's into an array called ppa[].
1860 1858 *
1861 1859 * The code in shmat() forces base addr and len of ISM segment
1862 1860 * to be aligned to largest page size supported. Therefore,
1863 1861 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1864 1862 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1865 1863 * in large pagesize chunks, or else we will screw up the HAT
1866 1864 * layer by calling hat_memload_array() with differing page sizes
1867 1865 * over a given virtual range.
1868 1866 */
1869 1867 pgsz = page_get_pagesize(sptseg->s_szc);
1870 1868 pgcnt = page_get_pagecnt(sptseg->s_szc);
1871 1869 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1872 1870 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1873 1871 npages = btopr(size);
1874 1872
1875 1873 /*
1876 1874 * Now we need to convert from addr in segshm to addr in segspt.
1877 1875 */
1878 1876 an_idx = seg_page(seg, shm_addr);
1879 1877 segspt_addr = sptseg->s_base + ptob(an_idx);
1880 1878
1881 1879 ASSERT((segspt_addr + ptob(npages)) <=
1882 1880 (sptseg->s_base + sptd->spt_realsize));
1883 1881 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1884 1882
1885 1883 switch (type) {
1886 1884
1887 1885 case F_SOFTLOCK:
1888 1886
1889 1887 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1890 1888 /*
1891 1889 * Fall through to the F_INVAL case to load up the hat layer
1892 1890 * entries with the HAT_LOAD_LOCK flag.
1893 1891 */
1894 1892 /* FALLTHRU */
1895 1893 case F_INVAL:
1896 1894
1897 1895 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1898 1896 return (FC_NOMAP);
1899 1897
1900 1898 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1901 1899
1902 1900 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1903 1901 if (err != 0) {
1904 1902 if (type == F_SOFTLOCK) {
1905 1903 atomic_add_long((ulong_t *)(
1906 1904 &(shmd->shm_softlockcnt)), -npages);
1907 1905 }
1908 1906 goto dism_err;
1909 1907 }
1910 1908 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1911 1909 a = segspt_addr;
1912 1910 pidx = 0;
1913 1911 if (type == F_SOFTLOCK) {
1914 1912
1915 1913 /*
1916 1914 * Load up the translation keeping it
1917 1915 * locked and don't unlock the page.
1918 1916 */
1919 1917 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1920 1918 hat_memload_array(sptseg->s_as->a_hat,
1921 1919 a, pgsz, &ppa[pidx], sptd->spt_prot,
1922 1920 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1923 1921 }
1924 1922 } else {
1925 1923 if (hat == seg->s_as->a_hat) {
1926 1924
1927 1925 /*
1928 1926 * Migrate pages marked for migration
1929 1927 */
1930 1928 if (lgrp_optimizations())
1931 1929 page_migrate(seg, shm_addr, ppa,
1932 1930 npages);
1933 1931
1934 1932 /* CPU HAT */
1935 1933 for (; pidx < npages;
1936 1934 a += pgsz, pidx += pgcnt) {
1937 1935 hat_memload_array(sptseg->s_as->a_hat,
1938 1936 a, pgsz, &ppa[pidx],
1939 1937 sptd->spt_prot,
1940 1938 HAT_LOAD_SHARE);
1941 1939 }
1942 1940 } else {
1943 1941 /* XHAT. Pass real address */
1944 1942 hat_memload_array(hat, shm_addr,
1945 1943 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1946 1944 }
1947 1945
1948 1946 /*
1949 1947 * And now drop the SE_SHARED lock(s).
1950 1948 */
1951 1949 if (dyn_ism_unmap) {
1952 1950 for (i = 0; i < npages; i++) {
1953 1951 page_unlock(ppa[i]);
1954 1952 }
1955 1953 }
1956 1954 }
1957 1955
1958 1956 if (!dyn_ism_unmap) {
1959 1957 if (hat_share(seg->s_as->a_hat, shm_addr,
1960 1958 curspt->a_hat, segspt_addr, ptob(npages),
1961 1959 seg->s_szc) != 0) {
1962 1960 panic("hat_share err in DISM fault");
1963 1961 /* NOTREACHED */
1964 1962 }
1965 1963 if (type == F_INVAL) {
1966 1964 for (i = 0; i < npages; i++) {
1967 1965 page_unlock(ppa[i]);
1968 1966 }
1969 1967 }
1970 1968 }
1971 1969 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1972 1970 dism_err:
1973 1971 kmem_free(ppa, npages * sizeof (page_t *));
1974 1972 return (err);
1975 1973
1976 1974 case F_SOFTUNLOCK:
1977 1975
1978 1976 /*
1979 1977 * This is a bit ugly, we pass in the real seg pointer,
1980 1978 * but the segspt_addr is the virtual address within the
1981 1979 * dummy seg.
1982 1980 */
1983 1981 segspt_softunlock(seg, segspt_addr, size, rw);
1984 1982 return (0);
1985 1983
1986 1984 case F_PROT:
1987 1985
1988 1986 /*
1989 1987 * This takes care of the unusual case where a user
1990 1988 * allocates a stack in shared memory and a register
1991 1989 * window overflow is written to that stack page before
1992 1990 * it is otherwise modified.
1993 1991 *
1994 1992 * We can get away with this because ISM segments are
1995 1993 * always rw. Other than this unusual case, there
1996 1994 * should be no instances of protection violations.
1997 1995 */
1998 1996 return (0);
1999 1997
2000 1998 default:
2001 1999 #ifdef DEBUG
2002 2000 panic("segspt_dismfault default type?");
2003 2001 #else
2004 2002 return (FC_NOMAP);
2005 2003 #endif
2006 2004 }
2007 2005 }
2008 2006
2009 2007
2010 2008 faultcode_t
2011 2009 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2012 2010 size_t len, enum fault_type type, enum seg_rw rw)
2013 2011 {
2014 2012 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2015 2013 struct seg *sptseg = shmd->shm_sptseg;
2016 2014 struct as *curspt = shmd->shm_sptas;
2017 2015 struct spt_data *sptd = sptseg->s_data;
2018 2016 pgcnt_t npages;
2019 2017 size_t size;
2020 2018 caddr_t sptseg_addr, shm_addr;
2021 2019 page_t *pp, **ppa;
2022 2020 int i;
2023 2021 u_offset_t offset;
2024 2022 ulong_t anon_index = 0;
2025 2023 struct vnode *vp;
2026 2024 struct anon_map *amp; /* XXX - for locknest */
2027 2025 struct anon *ap = NULL;
2028 2026 size_t pgsz;
2029 2027 pgcnt_t pgcnt;
2030 2028 caddr_t a;
2031 2029 pgcnt_t pidx;
2032 2030 size_t sz;
2033 2031
2034 2032 #ifdef lint
2035 2033 hat = hat;
2036 2034 #endif
2037 2035
2038 2036 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2039 2037
2040 2038 if (sptd->spt_flags & SHM_PAGEABLE) {
2041 2039 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2042 2040 }
2043 2041
2044 2042 /*
2045 2043 * Because of the way spt is implemented
2046 2044 * the realsize of the segment does not have to be
2047 2045 * equal to the segment size itself. The segment size is
2048 2046 * often in multiples of a page size larger than PAGESIZE.
2049 2047 * The realsize is rounded up to the nearest PAGESIZE
2050 2048 * based on what the user requested. This is a bit of
2051 2049 * ungliness that is historical but not easily fixed
2052 2050 * without re-designing the higher levels of ISM.
2053 2051 */
2054 2052 ASSERT(addr >= seg->s_base);
2055 2053 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2056 2054 return (FC_NOMAP);
2057 2055 /*
2058 2056 * For all of the following cases except F_PROT, we need to
2059 2057 * make any necessary adjustments to addr and len
2060 2058 * and get all of the necessary page_t's into an array called ppa[].
2061 2059 *
2062 2060 * The code in shmat() forces base addr and len of ISM segment
2063 2061 * to be aligned to largest page size supported. Therefore,
2064 2062 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2065 2063 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2066 2064 * in large pagesize chunks, or else we will screw up the HAT
2067 2065 * layer by calling hat_memload_array() with differing page sizes
2068 2066 * over a given virtual range.
2069 2067 */
2070 2068 pgsz = page_get_pagesize(sptseg->s_szc);
2071 2069 pgcnt = page_get_pagecnt(sptseg->s_szc);
2072 2070 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2073 2071 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2074 2072 npages = btopr(size);
2075 2073
2076 2074 /*
2077 2075 * Now we need to convert from addr in segshm to addr in segspt.
2078 2076 */
2079 2077 anon_index = seg_page(seg, shm_addr);
2080 2078 sptseg_addr = sptseg->s_base + ptob(anon_index);
2081 2079
2082 2080 /*
2083 2081 * And now we may have to adjust npages downward if we have
2084 2082 * exceeded the realsize of the segment or initial anon
2085 2083 * allocations.
2086 2084 */
2087 2085 if ((sptseg_addr + ptob(npages)) >
2088 2086 (sptseg->s_base + sptd->spt_realsize))
2089 2087 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2090 2088
2091 2089 npages = btopr(size);
2092 2090
2093 2091 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2094 2092 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2095 2093
2096 2094 switch (type) {
2097 2095
2098 2096 case F_SOFTLOCK:
2099 2097
2100 2098 /*
2101 2099 * availrmem is decremented once during anon_swap_adjust()
2102 2100 * and is incremented during the anon_unresv(), which is
2103 2101 * called from shm_rm_amp() when the segment is destroyed.
2104 2102 */
2105 2103 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2106 2104 /*
2107 2105 * Some platforms assume that ISM pages are SE_SHARED
2108 2106 * locked for the entire life of the segment.
2109 2107 */
2110 2108 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2111 2109 return (0);
2112 2110 /*
2113 2111 * Fall through to the F_INVAL case to load up the hat layer
2114 2112 * entries with the HAT_LOAD_LOCK flag.
2115 2113 */
2116 2114
2117 2115 /* FALLTHRU */
2118 2116 case F_INVAL:
2119 2117
2120 2118 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2121 2119 return (FC_NOMAP);
2122 2120
2123 2121 /*
2124 2122 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2125 2123 * may still rely on this call to hat_share(). That
2126 2124 * would imply that those hat's can fault on a
2127 2125 * HAT_LOAD_LOCK translation, which would seem
2128 2126 * contradictory.
2129 2127 */
2130 2128 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2131 2129 if (hat_share(seg->s_as->a_hat, seg->s_base,
2132 2130 curspt->a_hat, sptseg->s_base,
2133 2131 sptseg->s_size, sptseg->s_szc) != 0) {
2134 2132 panic("hat_share error in ISM fault");
2135 2133 /*NOTREACHED*/
2136 2134 }
2137 2135 return (0);
2138 2136 }
2139 2137 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2140 2138
2141 2139 /*
2142 2140 * I see no need to lock the real seg,
2143 2141 * here, because all of our work will be on the underlying
2144 2142 * dummy seg.
2145 2143 *
2146 2144 * sptseg_addr and npages now account for large pages.
2147 2145 */
2148 2146 amp = sptd->spt_amp;
2149 2147 ASSERT(amp != NULL);
2150 2148 anon_index = seg_page(sptseg, sptseg_addr);
2151 2149
2152 2150 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2153 2151 for (i = 0; i < npages; i++) {
2154 2152 ap = anon_get_ptr(amp->ahp, anon_index++);
2155 2153 ASSERT(ap != NULL);
2156 2154 swap_xlate(ap, &vp, &offset);
2157 2155 pp = page_lookup(vp, offset, SE_SHARED);
2158 2156 ASSERT(pp != NULL);
2159 2157 ppa[i] = pp;
2160 2158 }
2161 2159 ANON_LOCK_EXIT(&->a_rwlock);
2162 2160 ASSERT(i == npages);
2163 2161
2164 2162 /*
2165 2163 * We are already holding the as->a_lock on the user's
2166 2164 * real segment, but we need to hold the a_lock on the
2167 2165 * underlying dummy as. This is mostly to satisfy the
2168 2166 * underlying HAT layer.
2169 2167 */
2170 2168 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2171 2169 a = sptseg_addr;
2172 2170 pidx = 0;
2173 2171 if (type == F_SOFTLOCK) {
2174 2172 /*
2175 2173 * Load up the translation keeping it
2176 2174 * locked and don't unlock the page.
2177 2175 */
2178 2176 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2179 2177 sz = MIN(pgsz, ptob(npages - pidx));
2180 2178 hat_memload_array(sptseg->s_as->a_hat, a,
2181 2179 sz, &ppa[pidx], sptd->spt_prot,
2182 2180 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2183 2181 }
2184 2182 } else {
2185 2183 if (hat == seg->s_as->a_hat) {
2186 2184
2187 2185 /*
2188 2186 * Migrate pages marked for migration.
2189 2187 */
2190 2188 if (lgrp_optimizations())
2191 2189 page_migrate(seg, shm_addr, ppa,
2192 2190 npages);
2193 2191
2194 2192 /* CPU HAT */
2195 2193 for (; pidx < npages;
2196 2194 a += pgsz, pidx += pgcnt) {
2197 2195 sz = MIN(pgsz, ptob(npages - pidx));
2198 2196 hat_memload_array(sptseg->s_as->a_hat,
2199 2197 a, sz, &ppa[pidx],
2200 2198 sptd->spt_prot, HAT_LOAD_SHARE);
2201 2199 }
2202 2200 } else {
2203 2201 /* XHAT. Pass real address */
2204 2202 hat_memload_array(hat, shm_addr,
2205 2203 ptob(npages), ppa, sptd->spt_prot,
2206 2204 HAT_LOAD_SHARE);
2207 2205 }
2208 2206
2209 2207 /*
2210 2208 * And now drop the SE_SHARED lock(s).
2211 2209 */
2212 2210 for (i = 0; i < npages; i++)
2213 2211 page_unlock(ppa[i]);
2214 2212 }
2215 2213 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2216 2214
2217 2215 kmem_free(ppa, sizeof (page_t *) * npages);
2218 2216 return (0);
2219 2217 case F_SOFTUNLOCK:
2220 2218
2221 2219 /*
2222 2220 * This is a bit ugly, we pass in the real seg pointer,
2223 2221 * but the sptseg_addr is the virtual address within the
2224 2222 * dummy seg.
2225 2223 */
2226 2224 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2227 2225 return (0);
2228 2226
2229 2227 case F_PROT:
2230 2228
2231 2229 /*
2232 2230 * This takes care of the unusual case where a user
2233 2231 * allocates a stack in shared memory and a register
2234 2232 * window overflow is written to that stack page before
2235 2233 * it is otherwise modified.
2236 2234 *
2237 2235 * We can get away with this because ISM segments are
2238 2236 * always rw. Other than this unusual case, there
2239 2237 * should be no instances of protection violations.
2240 2238 */
2241 2239 return (0);
2242 2240
2243 2241 default:
2244 2242 #ifdef DEBUG
2245 2243 cmn_err(CE_WARN, "segspt_shmfault default type?");
2246 2244 #endif
2247 2245 return (FC_NOMAP);
2248 2246 }
2249 2247 }
2250 2248
2251 2249 /*ARGSUSED*/
2252 2250 static faultcode_t
2253 2251 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2254 2252 {
2255 2253 return (0);
2256 2254 }
2257 2255
2258 2256 /*ARGSUSED*/
2259 2257 static int
2260 2258 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2261 2259 {
2262 2260 return (0);
2263 2261 }
2264 2262
2265 2263 /*ARGSUSED*/
2266 2264 static size_t
2267 2265 segspt_shmswapout(struct seg *seg)
2268 2266 {
2269 2267 return (0);
2270 2268 }
2271 2269
2272 2270 /*
2273 2271 * duplicate the shared page tables
2274 2272 */
2275 2273 int
2276 2274 segspt_shmdup(struct seg *seg, struct seg *newseg)
2277 2275 {
2278 2276 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2279 2277 struct anon_map *amp = shmd->shm_amp;
2280 2278 struct shm_data *shmd_new;
2281 2279 struct seg *spt_seg = shmd->shm_sptseg;
2282 2280 struct spt_data *sptd = spt_seg->s_data;
2283 2281 int error = 0;
2284 2282
2285 2283 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2286 2284
2287 2285 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2288 2286 newseg->s_data = (void *)shmd_new;
2289 2287 shmd_new->shm_sptas = shmd->shm_sptas;
2290 2288 shmd_new->shm_amp = amp;
2291 2289 shmd_new->shm_sptseg = shmd->shm_sptseg;
2292 2290 newseg->s_ops = &segspt_shmops;
2293 2291 newseg->s_szc = seg->s_szc;
2294 2292 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2295 2293
2296 2294 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2297 2295 amp->refcnt++;
2298 2296 ANON_LOCK_EXIT(&->a_rwlock);
2299 2297
2300 2298 if (sptd->spt_flags & SHM_PAGEABLE) {
2301 2299 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2302 2300 shmd_new->shm_lckpgs = 0;
2303 2301 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2304 2302 if ((error = hat_share(newseg->s_as->a_hat,
2305 2303 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2306 2304 seg->s_size, seg->s_szc)) != 0) {
2307 2305 kmem_free(shmd_new->shm_vpage,
2308 2306 btopr(amp->size));
2309 2307 }
2310 2308 }
2311 2309 return (error);
2312 2310 } else {
2313 2311 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2314 2312 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2315 2313 seg->s_szc));
2316 2314
2317 2315 }
2318 2316 }
2319 2317
2320 2318 /*ARGSUSED*/
2321 2319 int
2322 2320 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2323 2321 {
2324 2322 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2325 2323 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2326 2324
2327 2325 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2328 2326
2329 2327 /*
2330 2328 * ISM segment is always rw.
2331 2329 */
2332 2330 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2333 2331 }
2334 2332
2335 2333 /*
2336 2334 * Return an array of locked large pages, for empty slots allocate
2337 2335 * private zero-filled anon pages.
2338 2336 */
2339 2337 static int
2340 2338 spt_anon_getpages(
2341 2339 struct seg *sptseg,
2342 2340 caddr_t sptaddr,
2343 2341 size_t len,
2344 2342 page_t *ppa[])
2345 2343 {
2346 2344 struct spt_data *sptd = sptseg->s_data;
2347 2345 struct anon_map *amp = sptd->spt_amp;
2348 2346 enum seg_rw rw = sptd->spt_prot;
2349 2347 uint_t szc = sptseg->s_szc;
2350 2348 size_t pg_sz, share_sz = page_get_pagesize(szc);
2351 2349 pgcnt_t lp_npgs;
2352 2350 caddr_t lp_addr, e_sptaddr;
2353 2351 uint_t vpprot, ppa_szc = 0;
2354 2352 struct vpage *vpage = NULL;
2355 2353 ulong_t j, ppa_idx;
2356 2354 int err, ierr = 0;
2357 2355 pgcnt_t an_idx;
2358 2356 anon_sync_obj_t cookie;
2359 2357 int anon_locked = 0;
2360 2358 pgcnt_t amp_pgs;
2361 2359
2362 2360
2363 2361 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2364 2362 ASSERT(len != 0);
2365 2363
2366 2364 pg_sz = share_sz;
2367 2365 lp_npgs = btop(pg_sz);
2368 2366 lp_addr = sptaddr;
2369 2367 e_sptaddr = sptaddr + len;
2370 2368 an_idx = seg_page(sptseg, sptaddr);
2371 2369 ppa_idx = 0;
2372 2370
2373 2371 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2374 2372
2375 2373 amp_pgs = page_get_pagecnt(amp->a_szc);
2376 2374
2377 2375 /*CONSTCOND*/
2378 2376 while (1) {
2379 2377 for (; lp_addr < e_sptaddr;
2380 2378 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2381 2379
2382 2380 /*
2383 2381 * If we're currently locked, and we get to a new
2384 2382 * page, unlock our current anon chunk.
2385 2383 */
2386 2384 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2387 2385 anon_array_exit(&cookie);
2388 2386 anon_locked = 0;
2389 2387 }
2390 2388 if (!anon_locked) {
2391 2389 anon_array_enter(amp, an_idx, &cookie);
2392 2390 anon_locked = 1;
2393 2391 }
2394 2392 ppa_szc = (uint_t)-1;
2395 2393 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2396 2394 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2397 2395 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2398 2396
2399 2397 if (ierr != 0) {
2400 2398 if (ierr > 0) {
2401 2399 err = FC_MAKE_ERR(ierr);
2402 2400 goto lpgs_err;
2403 2401 }
2404 2402 break;
2405 2403 }
2406 2404 }
2407 2405 if (lp_addr == e_sptaddr) {
2408 2406 break;
2409 2407 }
2410 2408 ASSERT(lp_addr < e_sptaddr);
2411 2409
2412 2410 /*
2413 2411 * ierr == -1 means we failed to allocate a large page.
2414 2412 * so do a size down operation.
2415 2413 *
2416 2414 * ierr == -2 means some other process that privately shares
2417 2415 * pages with this process has allocated a larger page and we
2418 2416 * need to retry with larger pages. So do a size up
2419 2417 * operation. This relies on the fact that large pages are
2420 2418 * never partially shared i.e. if we share any constituent
2421 2419 * page of a large page with another process we must share the
2422 2420 * entire large page. Note this cannot happen for SOFTLOCK
2423 2421 * case, unless current address (lpaddr) is at the beginning
2424 2422 * of the next page size boundary because the other process
2425 2423 * couldn't have relocated locked pages.
2426 2424 */
2427 2425 ASSERT(ierr == -1 || ierr == -2);
2428 2426 if (segvn_anypgsz) {
2429 2427 ASSERT(ierr == -2 || szc != 0);
2430 2428 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2431 2429 szc = (ierr == -1) ? szc - 1 : szc + 1;
2432 2430 } else {
2433 2431 /*
2434 2432 * For faults and segvn_anypgsz == 0
2435 2433 * we need to be careful not to loop forever
2436 2434 * if existing page is found with szc other
2437 2435 * than 0 or seg->s_szc. This could be due
2438 2436 * to page relocations on behalf of DR or
2439 2437 * more likely large page creation. For this
2440 2438 * case simply re-size to existing page's szc
2441 2439 * if returned by anon_map_getpages().
2442 2440 */
2443 2441 if (ppa_szc == (uint_t)-1) {
2444 2442 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2445 2443 } else {
2446 2444 ASSERT(ppa_szc <= sptseg->s_szc);
2447 2445 ASSERT(ierr == -2 || ppa_szc < szc);
2448 2446 ASSERT(ierr == -1 || ppa_szc > szc);
2449 2447 szc = ppa_szc;
2450 2448 }
2451 2449 }
2452 2450 pg_sz = page_get_pagesize(szc);
2453 2451 lp_npgs = btop(pg_sz);
2454 2452 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2455 2453 }
2456 2454 if (anon_locked) {
2457 2455 anon_array_exit(&cookie);
2458 2456 }
2459 2457 ANON_LOCK_EXIT(&->a_rwlock);
2460 2458 return (0);
2461 2459
2462 2460 lpgs_err:
2463 2461 if (anon_locked) {
2464 2462 anon_array_exit(&cookie);
2465 2463 }
2466 2464 ANON_LOCK_EXIT(&->a_rwlock);
2467 2465 for (j = 0; j < ppa_idx; j++)
2468 2466 page_unlock(ppa[j]);
2469 2467 return (err);
2470 2468 }
2471 2469
2472 2470 /*
2473 2471 * count the number of bytes in a set of spt pages that are currently not
2474 2472 * locked
2475 2473 */
2476 2474 static rctl_qty_t
2477 2475 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2478 2476 {
2479 2477 ulong_t i;
2480 2478 rctl_qty_t unlocked = 0;
2481 2479
2482 2480 for (i = 0; i < npages; i++) {
2483 2481 if (ppa[i]->p_lckcnt == 0)
2484 2482 unlocked += PAGESIZE;
2485 2483 }
2486 2484 return (unlocked);
2487 2485 }
2488 2486
2489 2487 extern u_longlong_t randtick(void);
2490 2488 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2491 2489 #define NLCK (NCPU_P2)
2492 2490 /* Random number with a range [0, n-1], n must be power of two */
2493 2491 #define RAND_P2(n) \
2494 2492 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2495 2493
2496 2494 int
2497 2495 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2498 2496 page_t **ppa, ulong_t *lockmap, size_t pos,
2499 2497 rctl_qty_t *locked)
2500 2498 {
2501 2499 struct shm_data *shmd = seg->s_data;
2502 2500 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2503 2501 ulong_t i;
2504 2502 int kernel;
2505 2503 pgcnt_t nlck = 0;
2506 2504 int rv = 0;
2507 2505 int use_reserved = 1;
2508 2506
2509 2507 /* return the number of bytes actually locked */
2510 2508 *locked = 0;
2511 2509
2512 2510 /*
2513 2511 * To avoid contention on freemem_lock, availrmem and pages_locked
2514 2512 * global counters are updated only every nlck locked pages instead of
2515 2513 * every time. Reserve nlck locks up front and deduct from this
2516 2514 * reservation for each page that requires a lock. When the reservation
2517 2515 * is consumed, reserve again. nlck is randomized, so the competing
2518 2516 * threads do not fall into a cyclic lock contention pattern. When
2519 2517 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2520 2518 * is used to lock pages.
2521 2519 */
2522 2520 for (i = 0; i < npages; anon_index++, pos++, i++) {
2523 2521 if (nlck == 0 && use_reserved == 1) {
2524 2522 nlck = NLCK + RAND_P2(NLCK);
2525 2523 /* if fewer loops left, decrease nlck */
2526 2524 nlck = MIN(nlck, npages - i);
2527 2525 /*
2528 2526 * Reserve nlck locks up front and deduct from this
2529 2527 * reservation for each page that requires a lock. When
2530 2528 * the reservation is consumed, reserve again.
2531 2529 */
2532 2530 mutex_enter(&freemem_lock);
2533 2531 if ((availrmem - nlck) < pages_pp_maximum) {
2534 2532 /* Do not do advance memory reserves */
2535 2533 use_reserved = 0;
2536 2534 } else {
2537 2535 availrmem -= nlck;
2538 2536 pages_locked += nlck;
2539 2537 }
2540 2538 mutex_exit(&freemem_lock);
2541 2539 }
2542 2540 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2543 2541 if (sptd->spt_ppa_lckcnt[anon_index] <
2544 2542 (ushort_t)DISM_LOCK_MAX) {
2545 2543 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2546 2544 (ushort_t)DISM_LOCK_MAX) {
2547 2545 cmn_err(CE_WARN,
2548 2546 "DISM page lock limit "
2549 2547 "reached on DISM offset 0x%lx\n",
2550 2548 anon_index << PAGESHIFT);
2551 2549 }
2552 2550 kernel = (sptd->spt_ppa &&
2553 2551 sptd->spt_ppa[anon_index]);
2554 2552 if (!page_pp_lock(ppa[i], 0, kernel ||
2555 2553 use_reserved)) {
2556 2554 sptd->spt_ppa_lckcnt[anon_index]--;
2557 2555 rv = EAGAIN;
2558 2556 break;
2559 2557 }
2560 2558 /* if this is a newly locked page, count it */
2561 2559 if (ppa[i]->p_lckcnt == 1) {
2562 2560 if (kernel == 0 && use_reserved == 1)
2563 2561 nlck--;
2564 2562 *locked += PAGESIZE;
2565 2563 }
2566 2564 shmd->shm_lckpgs++;
2567 2565 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2568 2566 if (lockmap != NULL)
2569 2567 BT_SET(lockmap, pos);
2570 2568 }
2571 2569 }
2572 2570 }
2573 2571 /* Return unused lock reservation */
2574 2572 if (nlck != 0 && use_reserved == 1) {
2575 2573 mutex_enter(&freemem_lock);
2576 2574 availrmem += nlck;
2577 2575 pages_locked -= nlck;
2578 2576 mutex_exit(&freemem_lock);
2579 2577 }
2580 2578
2581 2579 return (rv);
2582 2580 }
2583 2581
2584 2582 int
2585 2583 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2586 2584 rctl_qty_t *unlocked)
2587 2585 {
2588 2586 struct shm_data *shmd = seg->s_data;
2589 2587 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2590 2588 struct anon_map *amp = sptd->spt_amp;
2591 2589 struct anon *ap;
2592 2590 struct vnode *vp;
2593 2591 u_offset_t off;
2594 2592 struct page *pp;
2595 2593 int kernel;
2596 2594 anon_sync_obj_t cookie;
2597 2595 ulong_t i;
2598 2596 pgcnt_t nlck = 0;
2599 2597 pgcnt_t nlck_limit = NLCK;
2600 2598
2601 2599 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2602 2600 for (i = 0; i < npages; i++, anon_index++) {
2603 2601 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2604 2602 anon_array_enter(amp, anon_index, &cookie);
2605 2603 ap = anon_get_ptr(amp->ahp, anon_index);
2606 2604 ASSERT(ap);
2607 2605
2608 2606 swap_xlate(ap, &vp, &off);
2609 2607 anon_array_exit(&cookie);
2610 2608 pp = page_lookup(vp, off, SE_SHARED);
2611 2609 ASSERT(pp);
2612 2610 /*
2613 2611 * availrmem is decremented only for pages which are not
2614 2612 * in seg pcache, for pages in seg pcache availrmem was
2615 2613 * decremented in _dismpagelock()
2616 2614 */
2617 2615 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2618 2616 ASSERT(pp->p_lckcnt > 0);
2619 2617
2620 2618 /*
2621 2619 * lock page but do not change availrmem, we do it
2622 2620 * ourselves every nlck loops.
2623 2621 */
2624 2622 page_pp_unlock(pp, 0, 1);
2625 2623 if (pp->p_lckcnt == 0) {
2626 2624 if (kernel == 0)
2627 2625 nlck++;
2628 2626 *unlocked += PAGESIZE;
2629 2627 }
2630 2628 page_unlock(pp);
2631 2629 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2632 2630 sptd->spt_ppa_lckcnt[anon_index]--;
2633 2631 shmd->shm_lckpgs--;
2634 2632 }
2635 2633
2636 2634 /*
2637 2635 * To reduce freemem_lock contention, do not update availrmem
2638 2636 * until at least NLCK pages have been unlocked.
2639 2637 * 1. No need to update if nlck is zero
2640 2638 * 2. Always update if the last iteration
2641 2639 */
2642 2640 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2643 2641 mutex_enter(&freemem_lock);
2644 2642 availrmem += nlck;
2645 2643 pages_locked -= nlck;
2646 2644 mutex_exit(&freemem_lock);
2647 2645 nlck = 0;
2648 2646 nlck_limit = NLCK + RAND_P2(NLCK);
2649 2647 }
2650 2648 }
2651 2649 ANON_LOCK_EXIT(&->a_rwlock);
2652 2650
2653 2651 return (0);
2654 2652 }
2655 2653
2656 2654 /*ARGSUSED*/
2657 2655 static int
2658 2656 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2659 2657 int attr, int op, ulong_t *lockmap, size_t pos)
2660 2658 {
2661 2659 struct shm_data *shmd = seg->s_data;
2662 2660 struct seg *sptseg = shmd->shm_sptseg;
2663 2661 struct spt_data *sptd = sptseg->s_data;
2664 2662 struct kshmid *sp = sptd->spt_amp->a_sp;
2665 2663 pgcnt_t npages, a_npages;
2666 2664 page_t **ppa;
2667 2665 pgcnt_t an_idx, a_an_idx, ppa_idx;
2668 2666 caddr_t spt_addr, a_addr; /* spt and aligned address */
2669 2667 size_t a_len; /* aligned len */
2670 2668 size_t share_sz;
2671 2669 ulong_t i;
2672 2670 int sts = 0;
2673 2671 rctl_qty_t unlocked = 0;
2674 2672 rctl_qty_t locked = 0;
2675 2673 struct proc *p = curproc;
2676 2674 kproject_t *proj;
2677 2675
2678 2676 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2679 2677 ASSERT(sp != NULL);
2680 2678
2681 2679 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2682 2680 return (0);
2683 2681 }
2684 2682
2685 2683 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2686 2684 an_idx = seg_page(seg, addr);
2687 2685 npages = btopr(len);
2688 2686
2689 2687 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2690 2688 return (ENOMEM);
2691 2689 }
2692 2690
2693 2691 /*
2694 2692 * A shm's project never changes, so no lock needed.
2695 2693 * The shm has a hold on the project, so it will not go away.
2696 2694 * Since we have a mapping to shm within this zone, we know
2697 2695 * that the zone will not go away.
2698 2696 */
2699 2697 proj = sp->shm_perm.ipc_proj;
2700 2698
2701 2699 if (op == MC_LOCK) {
2702 2700
2703 2701 /*
2704 2702 * Need to align addr and size request if they are not
2705 2703 * aligned so we can always allocate large page(s) however
2706 2704 * we only lock what was requested in initial request.
2707 2705 */
2708 2706 share_sz = page_get_pagesize(sptseg->s_szc);
2709 2707 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2710 2708 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2711 2709 share_sz);
2712 2710 a_npages = btop(a_len);
2713 2711 a_an_idx = seg_page(seg, a_addr);
2714 2712 spt_addr = sptseg->s_base + ptob(a_an_idx);
2715 2713 ppa_idx = an_idx - a_an_idx;
2716 2714
2717 2715 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2718 2716 KM_NOSLEEP)) == NULL) {
2719 2717 return (ENOMEM);
2720 2718 }
2721 2719
2722 2720 /*
2723 2721 * Don't cache any new pages for IO and
2724 2722 * flush any cached pages.
2725 2723 */
2726 2724 mutex_enter(&sptd->spt_lock);
2727 2725 if (sptd->spt_ppa != NULL)
2728 2726 sptd->spt_flags |= DISM_PPA_CHANGED;
2729 2727
2730 2728 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2731 2729 if (sts != 0) {
2732 2730 mutex_exit(&sptd->spt_lock);
2733 2731 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2734 2732 return (sts);
2735 2733 }
2736 2734
2737 2735 mutex_enter(&sp->shm_mlock);
2738 2736 /* enforce locked memory rctl */
2739 2737 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2740 2738
2741 2739 mutex_enter(&p->p_lock);
2742 2740 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2743 2741 mutex_exit(&p->p_lock);
2744 2742 sts = EAGAIN;
2745 2743 } else {
2746 2744 mutex_exit(&p->p_lock);
2747 2745 sts = spt_lockpages(seg, an_idx, npages,
2748 2746 &ppa[ppa_idx], lockmap, pos, &locked);
2749 2747
2750 2748 /*
2751 2749 * correct locked count if not all pages could be
2752 2750 * locked
2753 2751 */
2754 2752 if ((unlocked - locked) > 0) {
2755 2753 rctl_decr_locked_mem(NULL, proj,
2756 2754 (unlocked - locked), 0);
2757 2755 }
2758 2756 }
2759 2757 /*
2760 2758 * unlock pages
2761 2759 */
2762 2760 for (i = 0; i < a_npages; i++)
2763 2761 page_unlock(ppa[i]);
2764 2762 if (sptd->spt_ppa != NULL)
2765 2763 sptd->spt_flags |= DISM_PPA_CHANGED;
2766 2764 mutex_exit(&sp->shm_mlock);
2767 2765 mutex_exit(&sptd->spt_lock);
2768 2766
2769 2767 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2770 2768
2771 2769 } else if (op == MC_UNLOCK) { /* unlock */
2772 2770 page_t **ppa;
2773 2771
2774 2772 mutex_enter(&sptd->spt_lock);
2775 2773 if (shmd->shm_lckpgs == 0) {
2776 2774 mutex_exit(&sptd->spt_lock);
2777 2775 return (0);
2778 2776 }
2779 2777 /*
2780 2778 * Don't cache new IO pages.
2781 2779 */
2782 2780 if (sptd->spt_ppa != NULL)
2783 2781 sptd->spt_flags |= DISM_PPA_CHANGED;
2784 2782
2785 2783 mutex_enter(&sp->shm_mlock);
2786 2784 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2787 2785 if ((ppa = sptd->spt_ppa) != NULL)
2788 2786 sptd->spt_flags |= DISM_PPA_CHANGED;
2789 2787 mutex_exit(&sptd->spt_lock);
2790 2788
2791 2789 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2792 2790 mutex_exit(&sp->shm_mlock);
2793 2791
2794 2792 if (ppa != NULL)
2795 2793 seg_ppurge_wiredpp(ppa);
2796 2794 }
2797 2795 return (sts);
2798 2796 }
2799 2797
2800 2798 /*ARGSUSED*/
2801 2799 int
2802 2800 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2803 2801 {
2804 2802 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2805 2803 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2806 2804 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2807 2805
2808 2806 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2809 2807
2810 2808 /*
2811 2809 * ISM segment is always rw.
2812 2810 */
2813 2811 while (--pgno >= 0)
2814 2812 *protv++ = sptd->spt_prot;
2815 2813 return (0);
2816 2814 }
2817 2815
2818 2816 /*ARGSUSED*/
2819 2817 u_offset_t
2820 2818 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2821 2819 {
2822 2820 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2823 2821
2824 2822 /* Offset does not matter in ISM memory */
2825 2823
2826 2824 return ((u_offset_t)0);
2827 2825 }
2828 2826
2829 2827 /* ARGSUSED */
2830 2828 int
2831 2829 segspt_shmgettype(struct seg *seg, caddr_t addr)
2832 2830 {
2833 2831 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2834 2832 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2835 2833
2836 2834 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2837 2835
2838 2836 /*
2839 2837 * The shared memory mapping is always MAP_SHARED, SWAP is only
2840 2838 * reserved for DISM
2841 2839 */
2842 2840 return (MAP_SHARED |
2843 2841 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2844 2842 }
2845 2843
2846 2844 /*ARGSUSED*/
2847 2845 int
2848 2846 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2849 2847 {
2850 2848 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2851 2849 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2852 2850
2853 2851 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2854 2852
2855 2853 *vpp = sptd->spt_vp;
2856 2854 return (0);
2857 2855 }
2858 2856
2859 2857 /*
2860 2858 * We need to wait for pending IO to complete to a DISM segment in order for
2861 2859 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2862 2860 * than enough time to wait.
2863 2861 */
2864 2862 static clock_t spt_pcache_wait = 120;
2865 2863
2866 2864 /*ARGSUSED*/
2867 2865 static int
2868 2866 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2869 2867 {
2870 2868 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2871 2869 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2872 2870 struct anon_map *amp;
2873 2871 pgcnt_t pg_idx;
2874 2872 ushort_t gen;
2875 2873 clock_t end_lbolt;
2876 2874 int writer;
2877 2875 page_t **ppa;
2878 2876
2879 2877 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2880 2878
2881 2879 if (behav == MADV_FREE) {
2882 2880 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2883 2881 return (0);
2884 2882
2885 2883 amp = sptd->spt_amp;
2886 2884 pg_idx = seg_page(seg, addr);
2887 2885
2888 2886 mutex_enter(&sptd->spt_lock);
2889 2887 if ((ppa = sptd->spt_ppa) == NULL) {
2890 2888 mutex_exit(&sptd->spt_lock);
2891 2889 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2892 2890 anon_disclaim(amp, pg_idx, len);
2893 2891 ANON_LOCK_EXIT(&->a_rwlock);
2894 2892 return (0);
2895 2893 }
2896 2894
2897 2895 sptd->spt_flags |= DISM_PPA_CHANGED;
2898 2896 gen = sptd->spt_gen;
2899 2897
2900 2898 mutex_exit(&sptd->spt_lock);
2901 2899
2902 2900 /*
2903 2901 * Purge all DISM cached pages
2904 2902 */
2905 2903 seg_ppurge_wiredpp(ppa);
2906 2904
2907 2905 /*
2908 2906 * Drop the AS_LOCK so that other threads can grab it
2909 2907 * in the as_pageunlock path and hopefully get the segment
2910 2908 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2911 2909 * to keep this segment resident.
2912 2910 */
2913 2911 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2914 2912 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2915 2913 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2916 2914
2917 2915 mutex_enter(&sptd->spt_lock);
2918 2916
2919 2917 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2920 2918
2921 2919 /*
2922 2920 * Try to wait for pages to get kicked out of the seg_pcache.
2923 2921 */
2924 2922 while (sptd->spt_gen == gen &&
2925 2923 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2926 2924 ddi_get_lbolt() < end_lbolt) {
2927 2925 if (!cv_timedwait_sig(&sptd->spt_cv,
2928 2926 &sptd->spt_lock, end_lbolt)) {
2929 2927 break;
2930 2928 }
2931 2929 }
2932 2930
2933 2931 mutex_exit(&sptd->spt_lock);
2934 2932
2935 2933 /* Regrab the AS_LOCK and release our hold on the segment */
2936 2934 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2937 2935 writer ? RW_WRITER : RW_READER);
2938 2936 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2939 2937 if (shmd->shm_softlockcnt <= 0) {
2940 2938 if (AS_ISUNMAPWAIT(seg->s_as)) {
2941 2939 mutex_enter(&seg->s_as->a_contents);
2942 2940 if (AS_ISUNMAPWAIT(seg->s_as)) {
2943 2941 AS_CLRUNMAPWAIT(seg->s_as);
2944 2942 cv_broadcast(&seg->s_as->a_cv);
2945 2943 }
2946 2944 mutex_exit(&seg->s_as->a_contents);
2947 2945 }
2948 2946 }
2949 2947
2950 2948 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2951 2949 anon_disclaim(amp, pg_idx, len);
2952 2950 ANON_LOCK_EXIT(&->a_rwlock);
2953 2951 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2954 2952 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2955 2953 int already_set;
2956 2954 ulong_t anon_index;
2957 2955 lgrp_mem_policy_t policy;
2958 2956 caddr_t shm_addr;
2959 2957 size_t share_size;
2960 2958 size_t size;
2961 2959 struct seg *sptseg = shmd->shm_sptseg;
2962 2960 caddr_t sptseg_addr;
2963 2961
2964 2962 /*
2965 2963 * Align address and length to page size of underlying segment
2966 2964 */
2967 2965 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2968 2966 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2969 2967 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2970 2968 share_size);
2971 2969
2972 2970 amp = shmd->shm_amp;
2973 2971 anon_index = seg_page(seg, shm_addr);
2974 2972
2975 2973 /*
2976 2974 * And now we may have to adjust size downward if we have
2977 2975 * exceeded the realsize of the segment or initial anon
2978 2976 * allocations.
2979 2977 */
2980 2978 sptseg_addr = sptseg->s_base + ptob(anon_index);
2981 2979 if ((sptseg_addr + size) >
2982 2980 (sptseg->s_base + sptd->spt_realsize))
2983 2981 size = (sptseg->s_base + sptd->spt_realsize) -
2984 2982 sptseg_addr;
2985 2983
2986 2984 /*
2987 2985 * Set memory allocation policy for this segment
2988 2986 */
2989 2987 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2990 2988 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2991 2989 NULL, 0, len);
2992 2990
2993 2991 /*
2994 2992 * If random memory allocation policy set already,
2995 2993 * don't bother reapplying it.
2996 2994 */
2997 2995 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2998 2996 return (0);
2999 2997
3000 2998 /*
3001 2999 * Mark any existing pages in the given range for
3002 3000 * migration, flushing the I/O page cache, and using
3003 3001 * underlying segment to calculate anon index and get
3004 3002 * anonmap and vnode pointer from
3005 3003 */
3006 3004 if (shmd->shm_softlockcnt > 0)
3007 3005 segspt_purge(seg);
3008 3006
3009 3007 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3010 3008 }
3011 3009
3012 3010 return (0);
3013 3011 }
3014 3012
3015 3013 /*ARGSUSED*/
3016 3014 void
3017 3015 segspt_shmdump(struct seg *seg)
3018 3016 {
3019 3017 /* no-op for ISM segment */
3020 3018 }
3021 3019
3022 3020 /*ARGSUSED*/
3023 3021 static faultcode_t
3024 3022 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3025 3023 {
3026 3024 return (ENOTSUP);
3027 3025 }
3028 3026
3029 3027 /*
3030 3028 * get a memory ID for an addr in a given segment
3031 3029 */
3032 3030 static int
3033 3031 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3034 3032 {
3035 3033 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3036 3034 struct anon *ap;
3037 3035 size_t anon_index;
3038 3036 struct anon_map *amp = shmd->shm_amp;
3039 3037 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3040 3038 struct seg *sptseg = shmd->shm_sptseg;
3041 3039 anon_sync_obj_t cookie;
3042 3040
3043 3041 anon_index = seg_page(seg, addr);
3044 3042
3045 3043 if (addr > (seg->s_base + sptd->spt_realsize)) {
3046 3044 return (EFAULT);
3047 3045 }
3048 3046
3049 3047 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3050 3048 anon_array_enter(amp, anon_index, &cookie);
3051 3049 ap = anon_get_ptr(amp->ahp, anon_index);
3052 3050 if (ap == NULL) {
3053 3051 struct page *pp;
3054 3052 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3055 3053
3056 3054 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3057 3055 if (pp == NULL) {
3058 3056 anon_array_exit(&cookie);
3059 3057 ANON_LOCK_EXIT(&->a_rwlock);
3060 3058 return (ENOMEM);
3061 3059 }
3062 3060 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3063 3061 page_unlock(pp);
3064 3062 }
3065 3063 anon_array_exit(&cookie);
3066 3064 ANON_LOCK_EXIT(&->a_rwlock);
3067 3065 memidp->val[0] = (uintptr_t)ap;
3068 3066 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3069 3067 return (0);
3070 3068 }
3071 3069
3072 3070 /*
3073 3071 * Get memory allocation policy info for specified address in given segment
3074 3072 */
3075 3073 static lgrp_mem_policy_info_t *
3076 3074 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3077 3075 {
3078 3076 struct anon_map *amp;
3079 3077 ulong_t anon_index;
3080 3078 lgrp_mem_policy_info_t *policy_info;
3081 3079 struct shm_data *shm_data;
3082 3080
3083 3081 ASSERT(seg != NULL);
3084 3082
3085 3083 /*
3086 3084 * Get anon_map from segshm
3087 3085 *
3088 3086 * Assume that no lock needs to be held on anon_map, since
3089 3087 * it should be protected by its reference count which must be
3090 3088 * nonzero for an existing segment
3091 3089 * Need to grab readers lock on policy tree though
3092 3090 */
3093 3091 shm_data = (struct shm_data *)seg->s_data;
3094 3092 if (shm_data == NULL)
3095 3093 return (NULL);
3096 3094 amp = shm_data->shm_amp;
3097 3095 ASSERT(amp->refcnt != 0);
↓ open down ↓ |
2917 lines elided |
↑ open up ↑ |
3098 3096
3099 3097 /*
3100 3098 * Get policy info
3101 3099 *
3102 3100 * Assume starting anon index of 0
3103 3101 */
3104 3102 anon_index = seg_page(seg, addr);
3105 3103 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3106 3104
3107 3105 return (policy_info);
3108 -}
3109 -
3110 -/*ARGSUSED*/
3111 -static int
3112 -segspt_shmcapable(struct seg *seg, segcapability_t capability)
3113 -{
3114 - return (0);
3115 3106 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX