171 * trap with a tlb miss.
172 *
173 * In order to help limit the number of segments we merge adjacent nofault
174 * segments into a single segment. If we get a large number of segments
175 * we'll also try to delete a random other nf segment.
176 */
177 /* ARGSUSED */
178 int
179 segnf_create(struct seg *seg, void *argsp)
180 {
181 uint_t prot;
182 pgcnt_t vacpgs;
183 u_offset_t off = 0;
184 caddr_t vaddr = NULL;
185 int i, color;
186 struct seg *s1;
187 struct seg *s2;
188 size_t size;
189 struct as *as = seg->s_as;
190
191 ASSERT(as && AS_WRITE_HELD(as, &as->a_lock));
192
193 /*
194 * Need a page per virtual color or just 1 if no vac.
195 */
196 mutex_enter(&segnf_lock);
197 if (nfpp == NULL) {
198 struct seg kseg;
199
200 vacpgs = 1;
201 if (shm_alignment > PAGESIZE) {
202 vacpgs = shm_alignment >> PAGESHIFT;
203 }
204
205 nfpp = kmem_alloc(sizeof (*nfpp) * vacpgs, KM_SLEEP);
206
207 kseg.s_as = &kas;
208 for (i = 0; i < vacpgs; i++, off += PAGESIZE,
209 vaddr += PAGESIZE) {
210 nfpp[i] = page_create_va(&nfvp, off, PAGESIZE,
211 PG_WAIT | PG_NORELOC, &kseg, vaddr);
289 return (0);
290 }
291
292 /*
293 * Never really need "No fault" segments, so they aren't dup'd.
294 */
295 /* ARGSUSED */
296 static int
297 segnf_dup(struct seg *seg, struct seg *newseg)
298 {
299 panic("segnf_dup");
300 return (0);
301 }
302
303 /*
304 * Split a segment at addr for length len.
305 */
306 static int
307 segnf_unmap(struct seg *seg, caddr_t addr, size_t len)
308 {
309 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
310
311 /*
312 * Check for bad sizes.
313 */
314 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
315 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
316 cmn_err(CE_PANIC, "segnf_unmap: bad unmap size");
317 }
318
319 /*
320 * Unload any hardware translations in the range to be taken out.
321 */
322 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
323
324 if (addr == seg->s_base && len == seg->s_size) {
325 /*
326 * Freeing entire segment.
327 */
328 seg_free(seg);
329 } else if (addr == seg->s_base) {
358 /*
359 * s_data can't be NULL because of ASSERTs in common VM code.
360 */
361 nseg->s_ops = seg->s_ops;
362 nseg->s_data = nseg;
363 nseg->s_flags |= S_PURGE;
364 mutex_enter(&seg->s_as->a_contents);
365 seg->s_as->a_flags |= AS_NEEDSPURGE;
366 mutex_exit(&seg->s_as->a_contents);
367 }
368
369 return (0);
370 }
371
372 /*
373 * Free a segment.
374 */
375 static void
376 segnf_free(struct seg *seg)
377 {
378 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
379 }
380
381 /*
382 * No faults allowed on segnf.
383 */
384 static faultcode_t
385 segnf_nomap(void)
386 {
387 return (FC_NOMAP);
388 }
389
390 /* ARGSUSED */
391 static int
392 segnf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
393 {
394 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
395 return (EACCES);
396 }
397
398 /* ARGSUSED */
399 static int
400 segnf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
401 {
402 uint_t sprot;
403 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
404
405 sprot = seg->s_as == &kas ? PROT_READ : PROT_READ|PROT_USER;
406 return ((prot & sprot) == prot ? 0 : EACCES);
407 }
408
409 static void
410 segnf_badop(void)
411 {
412 panic("segnf_badop");
413 /*NOTREACHED*/
414 }
415
416 static int
417 segnf_nop(void)
418 {
419 return (0);
420 }
421
422 static int
423 segnf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
424 {
425 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
426 size_t p;
427 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
428
429 for (p = 0; p < pgno; ++p)
430 protv[p] = PROT_READ;
431 return (0);
432 }
433
434 /* ARGSUSED */
435 static u_offset_t
436 segnf_getoffset(struct seg *seg, caddr_t addr)
437 {
438 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
439
440 return ((u_offset_t)0);
441 }
442
443 /* ARGSUSED */
444 static int
445 segnf_gettype(struct seg *seg, caddr_t addr)
446 {
447 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
448
449 return (MAP_SHARED);
450 }
451
452 /* ARGSUSED */
453 static int
454 segnf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
455 {
456 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
457
458 *vpp = &nfvp;
459 return (0);
460 }
461
462 /*
463 * segnf pages are not dumped, so we just return
464 */
465 /* ARGSUSED */
466 static void
467 segnf_dump(struct seg *seg)
468 {}
469
470 /*ARGSUSED*/
471 static int
472 segnf_pagelock(struct seg *seg, caddr_t addr, size_t len,
473 struct page ***ppp, enum lock_type type, enum seg_rw rw)
474 {
475 return (ENOTSUP);
476 }
|
171 * trap with a tlb miss.
172 *
173 * In order to help limit the number of segments we merge adjacent nofault
174 * segments into a single segment. If we get a large number of segments
175 * we'll also try to delete a random other nf segment.
176 */
177 /* ARGSUSED */
178 int
179 segnf_create(struct seg *seg, void *argsp)
180 {
181 uint_t prot;
182 pgcnt_t vacpgs;
183 u_offset_t off = 0;
184 caddr_t vaddr = NULL;
185 int i, color;
186 struct seg *s1;
187 struct seg *s2;
188 size_t size;
189 struct as *as = seg->s_as;
190
191 ASSERT(as && AS_WRITE_HELD(as));
192
193 /*
194 * Need a page per virtual color or just 1 if no vac.
195 */
196 mutex_enter(&segnf_lock);
197 if (nfpp == NULL) {
198 struct seg kseg;
199
200 vacpgs = 1;
201 if (shm_alignment > PAGESIZE) {
202 vacpgs = shm_alignment >> PAGESHIFT;
203 }
204
205 nfpp = kmem_alloc(sizeof (*nfpp) * vacpgs, KM_SLEEP);
206
207 kseg.s_as = &kas;
208 for (i = 0; i < vacpgs; i++, off += PAGESIZE,
209 vaddr += PAGESIZE) {
210 nfpp[i] = page_create_va(&nfvp, off, PAGESIZE,
211 PG_WAIT | PG_NORELOC, &kseg, vaddr);
289 return (0);
290 }
291
292 /*
293 * Never really need "No fault" segments, so they aren't dup'd.
294 */
295 /* ARGSUSED */
296 static int
297 segnf_dup(struct seg *seg, struct seg *newseg)
298 {
299 panic("segnf_dup");
300 return (0);
301 }
302
303 /*
304 * Split a segment at addr for length len.
305 */
306 static int
307 segnf_unmap(struct seg *seg, caddr_t addr, size_t len)
308 {
309 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
310
311 /*
312 * Check for bad sizes.
313 */
314 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
315 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
316 cmn_err(CE_PANIC, "segnf_unmap: bad unmap size");
317 }
318
319 /*
320 * Unload any hardware translations in the range to be taken out.
321 */
322 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
323
324 if (addr == seg->s_base && len == seg->s_size) {
325 /*
326 * Freeing entire segment.
327 */
328 seg_free(seg);
329 } else if (addr == seg->s_base) {
358 /*
359 * s_data can't be NULL because of ASSERTs in common VM code.
360 */
361 nseg->s_ops = seg->s_ops;
362 nseg->s_data = nseg;
363 nseg->s_flags |= S_PURGE;
364 mutex_enter(&seg->s_as->a_contents);
365 seg->s_as->a_flags |= AS_NEEDSPURGE;
366 mutex_exit(&seg->s_as->a_contents);
367 }
368
369 return (0);
370 }
371
372 /*
373 * Free a segment.
374 */
375 static void
376 segnf_free(struct seg *seg)
377 {
378 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
379 }
380
381 /*
382 * No faults allowed on segnf.
383 */
384 static faultcode_t
385 segnf_nomap(void)
386 {
387 return (FC_NOMAP);
388 }
389
390 /* ARGSUSED */
391 static int
392 segnf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
393 {
394 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
395 return (EACCES);
396 }
397
398 /* ARGSUSED */
399 static int
400 segnf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
401 {
402 uint_t sprot;
403 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
404
405 sprot = seg->s_as == &kas ? PROT_READ : PROT_READ|PROT_USER;
406 return ((prot & sprot) == prot ? 0 : EACCES);
407 }
408
409 static void
410 segnf_badop(void)
411 {
412 panic("segnf_badop");
413 /*NOTREACHED*/
414 }
415
416 static int
417 segnf_nop(void)
418 {
419 return (0);
420 }
421
422 static int
423 segnf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
424 {
425 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
426 size_t p;
427 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
428
429 for (p = 0; p < pgno; ++p)
430 protv[p] = PROT_READ;
431 return (0);
432 }
433
434 /* ARGSUSED */
435 static u_offset_t
436 segnf_getoffset(struct seg *seg, caddr_t addr)
437 {
438 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
439
440 return ((u_offset_t)0);
441 }
442
443 /* ARGSUSED */
444 static int
445 segnf_gettype(struct seg *seg, caddr_t addr)
446 {
447 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
448
449 return (MAP_SHARED);
450 }
451
452 /* ARGSUSED */
453 static int
454 segnf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
455 {
456 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
457
458 *vpp = &nfvp;
459 return (0);
460 }
461
462 /*
463 * segnf pages are not dumped, so we just return
464 */
465 /* ARGSUSED */
466 static void
467 segnf_dump(struct seg *seg)
468 {}
469
470 /*ARGSUSED*/
471 static int
472 segnf_pagelock(struct seg *seg, caddr_t addr, size_t len,
473 struct page ***ppp, enum lock_type type, enum seg_rw rw)
474 {
475 return (ENOTSUP);
476 }
|