1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*
  28  * Machine frame segment driver.  This segment driver allows dom0 processes to
  29  * map pages of other domains or Xen (e.g. during save/restore).  ioctl()s on
  30  * the privcmd driver provide the MFN values backing each mapping, and we map
  31  * them into the process's address space at this time.  Demand-faulting is not
  32  * supported by this driver due to the requirements upon some of the ioctl()s.
  33  */
  34 
  35 
  36 #include <sys/types.h>
  37 #include <sys/systm.h>
  38 #include <sys/vmsystm.h>
  39 #include <sys/mman.h>
  40 #include <sys/errno.h>
  41 #include <sys/kmem.h>
  42 #include <sys/cmn_err.h>
  43 #include <sys/vnode.h>
  44 #include <sys/conf.h>
  45 #include <sys/debug.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/hypervisor.h>
  48 
  49 #include <vm/page.h>
  50 #include <vm/hat.h>
  51 #include <vm/as.h>
  52 #include <vm/seg.h>
  53 
  54 #include <vm/hat_pte.h>
  55 #include <vm/hat_i86.h>
  56 #include <vm/seg_mf.h>
  57 
  58 #include <sys/fs/snode.h>
  59 
  60 #define VTOCVP(vp)      (VTOS(vp)->s_commonvp)
  61 
  62 typedef struct segmf_mfn_s {
  63         mfn_t           m_mfn;
  64 } segmf_mfn_t;
  65 
  66 /* g_flags */
  67 #define SEGMF_GFLAGS_WR         0x1
  68 #define SEGMF_GFLAGS_MAPPED     0x2
  69 typedef struct segmf_gref_s {
  70         uint64_t        g_ptep;
  71         grant_ref_t     g_gref;
  72         uint32_t        g_flags;
  73         grant_handle_t  g_handle;
  74 } segmf_gref_t;
  75 
  76 typedef union segmf_mu_u {
  77         segmf_mfn_t     m;
  78         segmf_gref_t    g;
  79 } segmf_mu_t;
  80 
  81 typedef enum {
  82         SEGMF_MAP_EMPTY = 0,
  83         SEGMF_MAP_MFN,
  84         SEGMF_MAP_GREF
  85 } segmf_map_type_t;
  86 
  87 typedef struct segmf_map_s {
  88         segmf_map_type_t        t_type;
  89         segmf_mu_t              u;
  90 } segmf_map_t;
  91 
  92 struct segmf_data {
  93         kmutex_t        lock;
  94         struct vnode    *vp;
  95         uchar_t         prot;
  96         uchar_t         maxprot;
  97         size_t          softlockcnt;
  98         domid_t         domid;
  99         segmf_map_t     *map;
 100 };
 101 
 102 static struct seg_ops segmf_ops;
 103 
 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
 105 
 106 static struct segmf_data *
 107 segmf_data_zalloc(struct seg *seg)
 108 {
 109         struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
 110 
 111         mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
 112         seg->s_ops = &segmf_ops;
 113         seg->s_data = data;
 114         return (data);
 115 }
 116 
 117 int
 118 segmf_create(struct seg *seg, void *args)
 119 {
 120         struct segmf_crargs *a = args;
 121         struct segmf_data *data;
 122         struct as *as = seg->s_as;
 123         pgcnt_t i, npages = seg_pages(seg);
 124         int error;
 125 
 126         hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 127 
 128         data = segmf_data_zalloc(seg);
 129         data->vp = specfind(a->dev, VCHR);
 130         data->prot = a->prot;
 131         data->maxprot = a->maxprot;
 132 
 133         data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
 134         for (i = 0; i < npages; i++) {
 135                 data->map[i].t_type = SEGMF_MAP_EMPTY;
 136         }
 137 
 138         error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
 139             data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
 140 
 141         if (error != 0)
 142                 hat_unload(as->a_hat,
 143                     seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
 144         return (error);
 145 }
 146 
 147 /*
 148  * Duplicate a seg and return new segment in newseg.
 149  */
 150 static int
 151 segmf_dup(struct seg *seg, struct seg *newseg)
 152 {
 153         struct segmf_data *data = seg->s_data;
 154         struct segmf_data *ndata;
 155         pgcnt_t npages = seg_pages(newseg);
 156         size_t sz;
 157 
 158         ndata = segmf_data_zalloc(newseg);
 159 
 160         VN_HOLD(data->vp);
 161         ndata->vp = data->vp;
 162         ndata->prot = data->prot;
 163         ndata->maxprot = data->maxprot;
 164         ndata->domid = data->domid;
 165 
 166         sz = npages * sizeof (segmf_map_t);
 167         ndata->map = kmem_alloc(sz, KM_SLEEP);
 168         bcopy(data->map, ndata->map, sz);
 169 
 170         return (VOP_ADDMAP(VTOCVP(ndata->vp), 0, newseg->s_as,
 171             newseg->s_base, newseg->s_size, ndata->prot, ndata->maxprot,
 172             MAP_SHARED, CRED(), NULL));
 173 }
 174 
 175 /*
 176  * We only support unmapping the whole segment, and we automatically unlock
 177  * what we previously soft-locked.
 178  */
 179 static int
 180 segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
 181 {
 182         struct segmf_data *data = seg->s_data;
 183         offset_t off;
 184 
 185         if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
 186             (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
 187                 panic("segmf_unmap");
 188 
 189         if (addr != seg->s_base || len != seg->s_size)
 190                 return (ENOTSUP);
 191 
 192         hat_unload(seg->s_as->a_hat, addr, len,
 193             HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
 194 
 195         off = (offset_t)seg_page(seg, addr);
 196 
 197         ASSERT(data->vp != NULL);
 198 
 199         (void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
 200             data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
 201 
 202         seg_free(seg);
 203         return (0);
 204 }
 205 
 206 static void
 207 segmf_free(struct seg *seg)
 208 {
 209         struct segmf_data *data = seg->s_data;
 210         pgcnt_t npages = seg_pages(seg);
 211 
 212         kmem_free(data->map, npages * sizeof (segmf_map_t));
 213         VN_RELE(data->vp);
 214         mutex_destroy(&data->lock);
 215         kmem_free(data, sizeof (*data));
 216 }
 217 
 218 static int segmf_faultpage_debug = 0;
 219 /*ARGSUSED*/
 220 static int
 221 segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
 222     enum fault_type type, uint_t prot)
 223 {
 224         struct segmf_data *data = seg->s_data;
 225         uint_t hat_flags = HAT_LOAD_NOCONSIST;
 226         mfn_t mfn;
 227         x86pte_t pte;
 228         segmf_map_t *map;
 229         uint_t idx;
 230 
 231 
 232         idx = seg_page(seg, addr);
 233         map = &data->map[idx];
 234         ASSERT(map->t_type == SEGMF_MAP_MFN);
 235 
 236         mfn = map->u.m.m_mfn;
 237 
 238         if (type == F_SOFTLOCK) {
 239                 mutex_enter(&freemem_lock);
 240                 data->softlockcnt++;
 241                 mutex_exit(&freemem_lock);
 242                 hat_flags |= HAT_LOAD_LOCK;
 243         } else
 244                 hat_flags |= HAT_LOAD;
 245 
 246         if (segmf_faultpage_debug > 0) {
 247                 uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
 248                     (void *)addr, data->domid, mfn, prot);
 249                 segmf_faultpage_debug--;
 250         }
 251 
 252         /*
 253          * Ask the HAT to load a throwaway mapping to page zero, then
 254          * overwrite it with our foreign domain mapping. It gets removed
 255          * later via hat_unload()
 256          */
 257         hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
 258             PROT_READ | HAT_UNORDERED_OK, hat_flags);
 259 
 260         pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
 261         if (prot & PROT_WRITE)
 262                 pte |= PT_WRITABLE;
 263 
 264         if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
 265             UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
 266                 hat_flags = HAT_UNLOAD_UNMAP;
 267 
 268                 if (type == F_SOFTLOCK) {
 269                         hat_flags |= HAT_UNLOAD_UNLOCK;
 270                         mutex_enter(&freemem_lock);
 271                         data->softlockcnt--;
 272                         mutex_exit(&freemem_lock);
 273                 }
 274 
 275                 hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
 276                 return (FC_MAKE_ERR(EFAULT));
 277         }
 278 
 279         return (0);
 280 }
 281 
 282 static int
 283 seg_rw_to_prot(enum seg_rw rw)
 284 {
 285         switch (rw) {
 286         case S_READ:
 287                 return (PROT_READ);
 288         case S_WRITE:
 289                 return (PROT_WRITE);
 290         case S_EXEC:
 291                 return (PROT_EXEC);
 292         case S_OTHER:
 293         default:
 294                 break;
 295         }
 296         return (PROT_READ | PROT_WRITE | PROT_EXEC);
 297 }
 298 
 299 static void
 300 segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
 301 {
 302         struct segmf_data *data = seg->s_data;
 303 
 304         hat_unlock(hat, addr, len);
 305 
 306         mutex_enter(&freemem_lock);
 307         ASSERT(data->softlockcnt >= btopr(len));
 308         data->softlockcnt -= btopr(len);
 309         mutex_exit(&freemem_lock);
 310 
 311         if (data->softlockcnt == 0) {
 312                 struct as *as = seg->s_as;
 313 
 314                 if (AS_ISUNMAPWAIT(as)) {
 315                         mutex_enter(&as->a_contents);
 316                         if (AS_ISUNMAPWAIT(as)) {
 317                                 AS_CLRUNMAPWAIT(as);
 318                                 cv_broadcast(&as->a_cv);
 319                         }
 320                         mutex_exit(&as->a_contents);
 321                 }
 322         }
 323 }
 324 
 325 static int
 326 segmf_fault_range(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 327     enum fault_type type, enum seg_rw rw)
 328 {
 329         struct segmf_data *data = seg->s_data;
 330         int error = 0;
 331         caddr_t a;
 332 
 333         if ((data->prot & seg_rw_to_prot(rw)) == 0)
 334                 return (FC_PROT);
 335 
 336         /* loop over the address range handling each fault */
 337 
 338         for (a = addr; a < addr + len; a += PAGESIZE) {
 339                 error = segmf_faultpage(hat, seg, a, type, data->prot);
 340                 if (error != 0)
 341                         break;
 342         }
 343 
 344         if (error != 0 && type == F_SOFTLOCK) {
 345                 size_t done = (size_t)(a - addr);
 346 
 347                 /*
 348                  * Undo what's been done so far.
 349                  */
 350                 if (done > 0)
 351                         segmf_softunlock(hat, seg, addr, done);
 352         }
 353 
 354         return (error);
 355 }
 356 
 357 /*
 358  * We never demand-fault for seg_mf.
 359  */
 360 /*ARGSUSED*/
 361 static int
 362 segmf_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 363     enum fault_type type, enum seg_rw rw)
 364 {
 365         return (FC_MAKE_ERR(EFAULT));
 366 }
 367 
 368 /*ARGSUSED*/
 369 static int
 370 segmf_faulta(struct seg *seg, caddr_t addr)
 371 {
 372         return (0);
 373 }
 374 
 375 /*ARGSUSED*/
 376 static int
 377 segmf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
 378 {
 379         return (EINVAL);
 380 }
 381 
 382 /*ARGSUSED*/
 383 static int
 384 segmf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
 385 {
 386         return (EINVAL);
 387 }
 388 
 389 /*ARGSUSED*/
 390 static int
 391 segmf_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 392 {
 393         return (-1);
 394 }
 395 
 396 /*ARGSUSED*/
 397 static int
 398 segmf_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
 399 {
 400         return (0);
 401 }
 402 
 403 /*
 404  * XXPV Hmm.  Should we say that mf mapping are "in core?"
 405  */
 406 
 407 /*ARGSUSED*/
 408 static size_t
 409 segmf_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 410 {
 411         size_t v;
 412 
 413         for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
 414             len -= PAGESIZE, v += PAGESIZE)
 415                 *vec++ = 1;
 416         return (v);
 417 }
 418 
 419 /*ARGSUSED*/
 420 static int
 421 segmf_lockop(struct seg *seg, caddr_t addr,
 422     size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
 423 {
 424         return (0);
 425 }
 426 
 427 static int
 428 segmf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
 429 {
 430         struct segmf_data *data = seg->s_data;
 431         pgcnt_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
 432 
 433         if (pgno != 0) {
 434                 do
 435                         protv[--pgno] = data->prot;
 436                 while (pgno != 0)
 437                         ;
 438         }
 439         return (0);
 440 }
 441 
 442 static u_offset_t
 443 segmf_getoffset(struct seg *seg, caddr_t addr)
 444 {
 445         return (addr - seg->s_base);
 446 }
 447 
 448 /*ARGSUSED*/
 449 static int
 450 segmf_gettype(struct seg *seg, caddr_t addr)
 451 {
 452         return (MAP_SHARED);
 453 }
 454 
 455 /*ARGSUSED1*/
 456 static int
 457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
 458 {
 459         struct segmf_data *data = seg->s_data;
 460 
 461         *vpp = VTOCVP(data->vp);
 462         return (0);
 463 }
 464 
 465 /*ARGSUSED*/
 466 static int
 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
 468 {
 469         return (0);
 470 }
 471 
 472 /*ARGSUSED*/
 473 static int
 474 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
 475     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 476 {
 477         return (ENOTSUP);
 478 }
 479 
 480 static int
 481 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 482 {
 483         struct segmf_data *data = seg->s_data;
 484 
 485         memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 486         memid->val[1] = (uintptr_t)seg_page(seg, addr);
 487         return (0);
 488 }
 489 
 490 /*
 491  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 492  * pre-faulting is necessary due to live migration; in particular we must
 493  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 494  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 495  * ioctl()s, we lock them too, as they should be transitory.
 496  */
 497 int
 498 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 499     pgcnt_t pgcnt, domid_t domid)
 500 {
 501         struct segmf_data *data = seg->s_data;
 502         pgcnt_t base;
 503         faultcode_t fc;
 504         pgcnt_t i;
 505         int error = 0;
 506 
 507         if (seg->s_ops != &segmf_ops)
 508                 return (EINVAL);
 509 
 510         /*
 511          * Don't mess with dom0.
 512          *
 513          * Only allow the domid to be set once for the segment.
 514          * After that attempts to add mappings to this segment for
 515          * other domains explicitly fails.
 516          */
 517 
 518         if (domid == 0 || domid == DOMID_SELF)
 519                 return (EACCES);
 520 
 521         mutex_enter(&data->lock);
 522 
 523         if (data->domid == 0)
 524                 data->domid = domid;
 525 
 526         if (data->domid != domid) {
 527                 error = EINVAL;
 528                 goto out;
 529         }
 530 
 531         base = seg_page(seg, addr);
 532 
 533         for (i = 0; i < pgcnt; i++) {
 534                 data->map[base + i].t_type = SEGMF_MAP_MFN;
 535                 data->map[base + i].u.m.m_mfn = mfn++;
 536         }
 537 
 538         fc = segmf_fault_range(seg->s_as->a_hat, seg, addr,
 539             pgcnt * MMU_PAGESIZE, F_SOFTLOCK, S_OTHER);
 540 
 541         if (fc != 0) {
 542                 error = fc_decode(fc);
 543                 for (i = 0; i < pgcnt; i++) {
 544                         data->map[base + i].t_type = SEGMF_MAP_EMPTY;
 545                 }
 546         }
 547 
 548 out:
 549         mutex_exit(&data->lock);
 550         return (error);
 551 }
 552 
 553 int
 554 segmf_add_grefs(struct seg *seg, caddr_t addr, uint_t flags,
 555     grant_ref_t *grefs, uint_t cnt, domid_t domid)
 556 {
 557         struct segmf_data *data;
 558         segmf_map_t *map;
 559         faultcode_t fc;
 560         uint_t idx;
 561         uint_t i;
 562         int e;
 563 
 564         if (seg->s_ops != &segmf_ops)
 565                 return (EINVAL);
 566 
 567         /*
 568          * Don't mess with dom0.
 569          *
 570          * Only allow the domid to be set once for the segment.
 571          * After that attempts to add mappings to this segment for
 572          * other domains explicitly fails.
 573          */
 574 
 575         if (domid == 0 || domid == DOMID_SELF)
 576                 return (EACCES);
 577 
 578         data = seg->s_data;
 579         idx = seg_page(seg, addr);
 580         map = &data->map[idx];
 581         e = 0;
 582 
 583         mutex_enter(&data->lock);
 584 
 585         if (data->domid == 0)
 586                 data->domid = domid;
 587 
 588         if (data->domid != domid) {
 589                 e = EINVAL;
 590                 goto out;
 591         }
 592 
 593         /* store away the grefs passed in then fault in the pages */
 594         for (i = 0; i < cnt; i++) {
 595                 map[i].t_type = SEGMF_MAP_GREF;
 596                 map[i].u.g.g_gref = grefs[i];
 597                 map[i].u.g.g_handle = 0;
 598                 map[i].u.g.g_flags = 0;
 599                 if (flags & SEGMF_GREF_WR) {
 600                         map[i].u.g.g_flags |= SEGMF_GFLAGS_WR;
 601                 }
 602         }
 603         fc = segmf_fault_gref_range(seg, addr, cnt);
 604         if (fc != 0) {
 605                 e = fc_decode(fc);
 606                 for (i = 0; i < cnt; i++) {
 607                         data->map[i].t_type = SEGMF_MAP_EMPTY;
 608                 }
 609         }
 610 
 611 out:
 612         mutex_exit(&data->lock);
 613         return (e);
 614 }
 615 
 616 int
 617 segmf_release_grefs(struct seg *seg, caddr_t addr, uint_t cnt)
 618 {
 619         gnttab_unmap_grant_ref_t mapop[SEGMF_MAX_GREFS];
 620         struct segmf_data *data;
 621         segmf_map_t *map;
 622         uint_t idx;
 623         long e;
 624         int i;
 625         int n;
 626 
 627 
 628         if (cnt > SEGMF_MAX_GREFS) {
 629                 return (-1);
 630         }
 631 
 632         idx = seg_page(seg, addr);
 633         data = seg->s_data;
 634         map = &data->map[idx];
 635 
 636         bzero(mapop, sizeof (gnttab_unmap_grant_ref_t) * cnt);
 637 
 638         /*
 639          * for each entry which isn't empty and is currently mapped,
 640          * set it up for an unmap then mark them empty.
 641          */
 642         n = 0;
 643         for (i = 0; i < cnt; i++) {
 644                 ASSERT(map[i].t_type != SEGMF_MAP_MFN);
 645                 if ((map[i].t_type == SEGMF_MAP_GREF) &&
 646                     (map[i].u.g.g_flags & SEGMF_GFLAGS_MAPPED)) {
 647                         mapop[n].handle = map[i].u.g.g_handle;
 648                         mapop[n].host_addr = map[i].u.g.g_ptep;
 649                         mapop[n].dev_bus_addr = 0;
 650                         n++;
 651                 }
 652                 map[i].t_type = SEGMF_MAP_EMPTY;
 653         }
 654 
 655         /* if there's nothing to unmap, just return */
 656         if (n == 0) {
 657                 return (0);
 658         }
 659 
 660         e = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &mapop, n);
 661         if (e != 0) {
 662                 return (-1);
 663         }
 664 
 665         return (0);
 666 }
 667 
 668 
 669 void
 670 segmf_add_gref_pte(struct seg *seg, caddr_t addr, uint64_t pte_ma)
 671 {
 672         struct segmf_data *data;
 673         uint_t idx;
 674 
 675         idx = seg_page(seg, addr);
 676         data = seg->s_data;
 677 
 678         data->map[idx].u.g.g_ptep = pte_ma;
 679 }
 680 
 681 
 682 static int
 683 segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t cnt)
 684 {
 685         gnttab_map_grant_ref_t mapop[SEGMF_MAX_GREFS];
 686         struct segmf_data *data;
 687         segmf_map_t *map;
 688         uint_t idx;
 689         int e;
 690         int i;
 691 
 692 
 693         if (cnt > SEGMF_MAX_GREFS) {
 694                 return (-1);
 695         }
 696 
 697         data = seg->s_data;
 698         idx = seg_page(seg, addr);
 699         map = &data->map[idx];
 700 
 701         bzero(mapop, sizeof (gnttab_map_grant_ref_t) * cnt);
 702 
 703         ASSERT(map->t_type == SEGMF_MAP_GREF);
 704 
 705         /*
 706          * map in each page passed in into the user apps AS. We do this by
 707          * passing the MA of the actual pte of the mapping to the hypervisor.
 708          */
 709         for (i = 0; i < cnt; i++) {
 710                 mapop[i].host_addr = map[i].u.g.g_ptep;
 711                 mapop[i].dom = data->domid;
 712                 mapop[i].ref = map[i].u.g.g_gref;
 713                 mapop[i].flags = GNTMAP_host_map | GNTMAP_application_map |
 714                     GNTMAP_contains_pte;
 715                 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
 716                         mapop[i].flags |= GNTMAP_readonly;
 717                 }
 718         }
 719         e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
 720         if ((e != 0) || (mapop[0].status != GNTST_okay)) {
 721                 return (FC_MAKE_ERR(EFAULT));
 722         }
 723 
 724         /* save handle for segmf_release_grefs() and mark it as mapped */
 725         for (i = 0; i < cnt; i++) {
 726                 ASSERT(mapop[i].status == GNTST_okay);
 727                 map[i].u.g.g_handle = mapop[i].handle;
 728                 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
 729         }
 730 
 731         return (0);
 732 }
 733 
 734 static struct seg_ops segmf_ops = {
 735         .dup            = segmf_dup,
 736         .unmap          = segmf_unmap,
 737         .free           = segmf_free,
 738         .fault          = segmf_fault,
 739         .faulta         = segmf_faulta,
 740         .setprot        = segmf_setprot,
 741         .checkprot      = segmf_checkprot,
 742         .kluster        = segmf_kluster,
 743         .sync           = segmf_sync,
 744         .incore         = segmf_incore,
 745         .lockop         = segmf_lockop,
 746         .getprot        = segmf_getprot,
 747         .getoffset      = segmf_getoffset,
 748         .gettype        = segmf_gettype,
 749         .getvp          = segmf_getvp,
 750         .advise         = segmf_advise,
 751         .pagelock       = segmf_pagelock,
 752         .getmemid       = segmf_getmemid,
 753 };