482 return (ENOTSUP);
483 }
484
485 /*ARGSUSED*/
486 static int
487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
488 {
489 return (ENOTSUP);
490 }
491
492 static int
493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
494 {
495 struct segmf_data *data = seg->s_data;
496
497 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
498 memid->val[1] = (uintptr_t)seg_page(seg, addr);
499 return (0);
500 }
501
502 /*ARGSUSED*/
503 static int
504 segmf_capable(struct seg *seg, segcapability_t capability)
505 {
506 return (0);
507 }
508
509 /*
510 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
511 * pre-faulting is necessary due to live migration; in particular we must
512 * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
513 * later on a bad MFN. Whilst this isn't necessary for the other MMAP
514 * ioctl()s, we lock them too, as they should be transitory.
515 */
516 int
517 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
518 pgcnt_t pgcnt, domid_t domid)
519 {
520 struct segmf_data *data = seg->s_data;
521 pgcnt_t base;
522 faultcode_t fc;
523 pgcnt_t i;
524 int error = 0;
525
526 if (seg->s_ops != &segmf_ops)
527 return (EINVAL);
528
754 .dup = segmf_dup,
755 .unmap = segmf_unmap,
756 .free = segmf_free,
757 .fault = segmf_fault,
758 .faulta = segmf_faulta,
759 .setprot = segmf_setprot,
760 .checkprot = segmf_checkprot,
761 .kluster = segmf_kluster,
762 .sync = segmf_sync,
763 .incore = segmf_incore,
764 .lockop = segmf_lockop,
765 .getprot = segmf_getprot,
766 .getoffset = segmf_getoffset,
767 .gettype = segmf_gettype,
768 .getvp = segmf_getvp,
769 .advise = segmf_advise,
770 .dump = segmf_dump,
771 .pagelock = segmf_pagelock,
772 .setpagesize = segmf_setpagesize,
773 .getmemid = segmf_getmemid,
774 .capable = segmf_capable,
775 };
|
482 return (ENOTSUP);
483 }
484
485 /*ARGSUSED*/
486 static int
487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
488 {
489 return (ENOTSUP);
490 }
491
492 static int
493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
494 {
495 struct segmf_data *data = seg->s_data;
496
497 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
498 memid->val[1] = (uintptr_t)seg_page(seg, addr);
499 return (0);
500 }
501
502 /*
503 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
504 * pre-faulting is necessary due to live migration; in particular we must
505 * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
506 * later on a bad MFN. Whilst this isn't necessary for the other MMAP
507 * ioctl()s, we lock them too, as they should be transitory.
508 */
509 int
510 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
511 pgcnt_t pgcnt, domid_t domid)
512 {
513 struct segmf_data *data = seg->s_data;
514 pgcnt_t base;
515 faultcode_t fc;
516 pgcnt_t i;
517 int error = 0;
518
519 if (seg->s_ops != &segmf_ops)
520 return (EINVAL);
521
747 .dup = segmf_dup,
748 .unmap = segmf_unmap,
749 .free = segmf_free,
750 .fault = segmf_fault,
751 .faulta = segmf_faulta,
752 .setprot = segmf_setprot,
753 .checkprot = segmf_checkprot,
754 .kluster = segmf_kluster,
755 .sync = segmf_sync,
756 .incore = segmf_incore,
757 .lockop = segmf_lockop,
758 .getprot = segmf_getprot,
759 .getoffset = segmf_getoffset,
760 .gettype = segmf_gettype,
761 .getvp = segmf_getvp,
762 .advise = segmf_advise,
763 .dump = segmf_dump,
764 .pagelock = segmf_pagelock,
765 .setpagesize = segmf_setpagesize,
766 .getmemid = segmf_getmemid,
767 };
|