453 }
454
455 /*ARGSUSED1*/
456 static int
457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
458 {
459 struct segmf_data *data = seg->s_data;
460
461 *vpp = VTOCVP(data->vp);
462 return (0);
463 }
464
465 /*ARGSUSED*/
466 static int
467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
468 {
469 return (0);
470 }
471
472 /*ARGSUSED*/
473 static void
474 segmf_dump(struct seg *seg)
475 {}
476
477 /*ARGSUSED*/
478 static int
479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
480 struct page ***ppp, enum lock_type type, enum seg_rw rw)
481 {
482 return (ENOTSUP);
483 }
484
485 static int
486 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
487 {
488 struct segmf_data *data = seg->s_data;
489
490 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
491 memid->val[1] = (uintptr_t)seg_page(seg, addr);
492 return (0);
493 }
494
495 /*
496 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
497 * pre-faulting is necessary due to live migration; in particular we must
736 return (0);
737 }
738
739 static struct seg_ops segmf_ops = {
740 .dup = segmf_dup,
741 .unmap = segmf_unmap,
742 .free = segmf_free,
743 .fault = segmf_fault,
744 .faulta = segmf_faulta,
745 .setprot = segmf_setprot,
746 .checkprot = segmf_checkprot,
747 .kluster = segmf_kluster,
748 .sync = segmf_sync,
749 .incore = segmf_incore,
750 .lockop = segmf_lockop,
751 .getprot = segmf_getprot,
752 .getoffset = segmf_getoffset,
753 .gettype = segmf_gettype,
754 .getvp = segmf_getvp,
755 .advise = segmf_advise,
756 .dump = segmf_dump,
757 .pagelock = segmf_pagelock,
758 .getmemid = segmf_getmemid,
759 };
|
453 }
454
455 /*ARGSUSED1*/
456 static int
457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
458 {
459 struct segmf_data *data = seg->s_data;
460
461 *vpp = VTOCVP(data->vp);
462 return (0);
463 }
464
465 /*ARGSUSED*/
466 static int
467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
468 {
469 return (0);
470 }
471
472 /*ARGSUSED*/
473 static int
474 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
475 struct page ***ppp, enum lock_type type, enum seg_rw rw)
476 {
477 return (ENOTSUP);
478 }
479
480 static int
481 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
482 {
483 struct segmf_data *data = seg->s_data;
484
485 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
486 memid->val[1] = (uintptr_t)seg_page(seg, addr);
487 return (0);
488 }
489
490 /*
491 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
492 * pre-faulting is necessary due to live migration; in particular we must
731 return (0);
732 }
733
734 static struct seg_ops segmf_ops = {
735 .dup = segmf_dup,
736 .unmap = segmf_unmap,
737 .free = segmf_free,
738 .fault = segmf_fault,
739 .faulta = segmf_faulta,
740 .setprot = segmf_setprot,
741 .checkprot = segmf_checkprot,
742 .kluster = segmf_kluster,
743 .sync = segmf_sync,
744 .incore = segmf_incore,
745 .lockop = segmf_lockop,
746 .getprot = segmf_getprot,
747 .getoffset = segmf_getoffset,
748 .gettype = segmf_gettype,
749 .getvp = segmf_getvp,
750 .advise = segmf_advise,
751 .pagelock = segmf_pagelock,
752 .getmemid = segmf_getmemid,
753 };
|