1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*
  28  * Machine frame segment driver.  This segment driver allows dom0 processes to
  29  * map pages of other domains or Xen (e.g. during save/restore).  ioctl()s on
  30  * the privcmd driver provide the MFN values backing each mapping, and we map
  31  * them into the process's address space at this time.  Demand-faulting is not
  32  * supported by this driver due to the requirements upon some of the ioctl()s.
  33  */
  34 
  35 
  36 #include <sys/types.h>
  37 #include <sys/systm.h>
  38 #include <sys/vmsystm.h>
  39 #include <sys/mman.h>
  40 #include <sys/errno.h>
  41 #include <sys/kmem.h>
  42 #include <sys/cmn_err.h>
  43 #include <sys/vnode.h>
  44 #include <sys/conf.h>
  45 #include <sys/debug.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/hypervisor.h>
  48 
  49 #include <vm/page.h>
  50 #include <vm/hat.h>
  51 #include <vm/as.h>
  52 #include <vm/seg.h>
  53 
  54 #include <vm/hat_pte.h>
  55 #include <vm/hat_i86.h>
  56 #include <vm/seg_mf.h>
  57 
  58 #include <sys/fs/snode.h>
  59 
  60 #define VTOCVP(vp)      (VTOS(vp)->s_commonvp)
  61 
  62 typedef struct segmf_mfn_s {
  63         mfn_t           m_mfn;
  64 } segmf_mfn_t;
  65 
  66 /* g_flags */
  67 #define SEGMF_GFLAGS_WR         0x1
  68 #define SEGMF_GFLAGS_MAPPED     0x2
  69 typedef struct segmf_gref_s {
  70         uint64_t        g_ptep;
  71         grant_ref_t     g_gref;
  72         uint32_t        g_flags;
  73         grant_handle_t  g_handle;
  74 } segmf_gref_t;
  75 
  76 typedef union segmf_mu_u {
  77         segmf_mfn_t     m;
  78         segmf_gref_t    g;
  79 } segmf_mu_t;
  80 
  81 typedef enum {
  82         SEGMF_MAP_EMPTY = 0,
  83         SEGMF_MAP_MFN,
  84         SEGMF_MAP_GREF
  85 } segmf_map_type_t;
  86 
  87 typedef struct segmf_map_s {
  88         segmf_map_type_t        t_type;
  89         segmf_mu_t              u;
  90 } segmf_map_t;
  91 
  92 struct segmf_data {
  93         kmutex_t        lock;
  94         struct vnode    *vp;
  95         uchar_t         prot;
  96         uchar_t         maxprot;
  97         size_t          softlockcnt;
  98         domid_t         domid;
  99         segmf_map_t     *map;
 100 };
 101 
 102 static struct seg_ops segmf_ops;
 103 
 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
 105 
 106 static struct segmf_data *
 107 segmf_data_zalloc(struct seg *seg)
 108 {
 109         struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
 110 
 111         mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
 112         seg->s_ops = &segmf_ops;
 113         seg->s_data = data;
 114         return (data);
 115 }
 116 
 117 int
 118 segmf_create(struct seg *seg, void *args)
 119 {
 120         struct segmf_crargs *a = args;
 121         struct segmf_data *data;
 122         struct as *as = seg->s_as;
 123         pgcnt_t i, npages = seg_pages(seg);
 124         int error;
 125 
 126         hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 127 
 128         data = segmf_data_zalloc(seg);
 129         data->vp = specfind(a->dev, VCHR);
 130         data->prot = a->prot;
 131         data->maxprot = a->maxprot;
 132 
 133         data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
 134         for (i = 0; i < npages; i++) {
 135                 data->map[i].t_type = SEGMF_MAP_EMPTY;
 136         }
 137 
 138         error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
 139             data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
 140 
 141         if (error != 0)
 142                 hat_unload(as->a_hat,
 143                     seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
 144         return (error);
 145 }
 146 
 147 /*
 148  * Duplicate a seg and return new segment in newseg.
 149  */
 150 static int
 151 segmf_dup(struct seg *seg, struct seg *newseg)
 152 {
 153         struct segmf_data *data = seg->s_data;
 154         struct segmf_data *ndata;
 155         pgcnt_t npages = seg_pages(newseg);
 156         size_t sz;
 157 
 158         ndata = segmf_data_zalloc(newseg);
 159 
 160         VN_HOLD(data->vp);
 161         ndata->vp = data->vp;
 162         ndata->prot = data->prot;
 163         ndata->maxprot = data->maxprot;
 164         ndata->domid = data->domid;
 165 
 166         sz = npages * sizeof (segmf_map_t);
 167         ndata->map = kmem_alloc(sz, KM_SLEEP);
 168         bcopy(data->map, ndata->map, sz);
 169 
 170         return (VOP_ADDMAP(VTOCVP(ndata->vp), 0, newseg->s_as,
 171             newseg->s_base, newseg->s_size, ndata->prot, ndata->maxprot,
 172             MAP_SHARED, CRED(), NULL));
 173 }
 174 
 175 /*
 176  * We only support unmapping the whole segment, and we automatically unlock
 177  * what we previously soft-locked.
 178  */
 179 static int
 180 segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
 181 {
 182         struct segmf_data *data = seg->s_data;
 183         offset_t off;
 184 
 185         if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
 186             (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
 187                 panic("segmf_unmap");
 188 
 189         if (addr != seg->s_base || len != seg->s_size)
 190                 return (ENOTSUP);
 191 
 192         hat_unload(seg->s_as->a_hat, addr, len,
 193             HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
 194 
 195         off = (offset_t)seg_page(seg, addr);
 196 
 197         ASSERT(data->vp != NULL);
 198 
 199         (void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
 200             data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
 201 
 202         seg_free(seg);
 203         return (0);
 204 }
 205 
 206 static void
 207 segmf_free(struct seg *seg)
 208 {
 209         struct segmf_data *data = seg->s_data;
 210         pgcnt_t npages = seg_pages(seg);
 211 
 212         kmem_free(data->map, npages * sizeof (segmf_map_t));
 213         VN_RELE(data->vp);
 214         mutex_destroy(&data->lock);
 215         kmem_free(data, sizeof (*data));
 216 }
 217 
 218 static int segmf_faultpage_debug = 0;
 219 /*ARGSUSED*/
 220 static int
 221 segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
 222     enum fault_type type, uint_t prot)
 223 {
 224         struct segmf_data *data = seg->s_data;
 225         uint_t hat_flags = HAT_LOAD_NOCONSIST;
 226         mfn_t mfn;
 227         x86pte_t pte;
 228         segmf_map_t *map;
 229         uint_t idx;
 230 
 231 
 232         idx = seg_page(seg, addr);
 233         map = &data->map[idx];
 234         ASSERT(map->t_type == SEGMF_MAP_MFN);
 235 
 236         mfn = map->u.m.m_mfn;
 237 
 238         if (type == F_SOFTLOCK) {
 239                 mutex_enter(&freemem_lock);
 240                 data->softlockcnt++;
 241                 mutex_exit(&freemem_lock);
 242                 hat_flags |= HAT_LOAD_LOCK;
 243         } else
 244                 hat_flags |= HAT_LOAD;
 245 
 246         if (segmf_faultpage_debug > 0) {
 247                 uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
 248                     (void *)addr, data->domid, mfn, prot);
 249                 segmf_faultpage_debug--;
 250         }
 251 
 252         /*
 253          * Ask the HAT to load a throwaway mapping to page zero, then
 254          * overwrite it with our foreign domain mapping. It gets removed
 255          * later via hat_unload()
 256          */
 257         hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
 258             PROT_READ | HAT_UNORDERED_OK, hat_flags);
 259 
 260         pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
 261         if (prot & PROT_WRITE)
 262                 pte |= PT_WRITABLE;
 263 
 264         if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
 265             UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
 266                 hat_flags = HAT_UNLOAD_UNMAP;
 267 
 268                 if (type == F_SOFTLOCK) {
 269                         hat_flags |= HAT_UNLOAD_UNLOCK;
 270                         mutex_enter(&freemem_lock);
 271                         data->softlockcnt--;
 272                         mutex_exit(&freemem_lock);
 273                 }
 274 
 275                 hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
 276                 return (FC_MAKE_ERR(EFAULT));
 277         }
 278 
 279         return (0);
 280 }
 281 
 282 static int
 283 seg_rw_to_prot(enum seg_rw rw)
 284 {
 285         switch (rw) {
 286         case S_READ:
 287                 return (PROT_READ);
 288         case S_WRITE:
 289                 return (PROT_WRITE);
 290         case S_EXEC:
 291                 return (PROT_EXEC);
 292         case S_OTHER:
 293         default:
 294                 break;
 295         }
 296         return (PROT_READ | PROT_WRITE | PROT_EXEC);
 297 }
 298 
 299 static void
 300 segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
 301 {
 302         struct segmf_data *data = seg->s_data;
 303 
 304         hat_unlock(hat, addr, len);
 305 
 306         mutex_enter(&freemem_lock);
 307         ASSERT(data->softlockcnt >= btopr(len));
 308         data->softlockcnt -= btopr(len);
 309         mutex_exit(&freemem_lock);
 310 
 311         if (data->softlockcnt == 0) {
 312                 struct as *as = seg->s_as;
 313 
 314                 if (AS_ISUNMAPWAIT(as)) {
 315                         mutex_enter(&as->a_contents);
 316                         if (AS_ISUNMAPWAIT(as)) {
 317                                 AS_CLRUNMAPWAIT(as);
 318                                 cv_broadcast(&as->a_cv);
 319                         }
 320                         mutex_exit(&as->a_contents);
 321                 }
 322         }
 323 }
 324 
 325 static int
 326 segmf_fault_range(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 327     enum fault_type type, enum seg_rw rw)
 328 {
 329         struct segmf_data *data = seg->s_data;
 330         int error = 0;
 331         caddr_t a;
 332 
 333         if ((data->prot & seg_rw_to_prot(rw)) == 0)
 334                 return (FC_PROT);
 335 
 336         /* loop over the address range handling each fault */
 337 
 338         for (a = addr; a < addr + len; a += PAGESIZE) {
 339                 error = segmf_faultpage(hat, seg, a, type, data->prot);
 340                 if (error != 0)
 341                         break;
 342         }
 343 
 344         if (error != 0 && type == F_SOFTLOCK) {
 345                 size_t done = (size_t)(a - addr);
 346 
 347                 /*
 348                  * Undo what's been done so far.
 349                  */
 350                 if (done > 0)
 351                         segmf_softunlock(hat, seg, addr, done);
 352         }
 353 
 354         return (error);
 355 }
 356 
 357 /*
 358  * We never demand-fault for seg_mf.
 359  */
 360 /*ARGSUSED*/
 361 static int
 362 segmf_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 363     enum fault_type type, enum seg_rw rw)
 364 {
 365         return (FC_MAKE_ERR(EFAULT));
 366 }
 367 
 368 /*ARGSUSED*/
 369 static int
 370 segmf_faulta(struct seg *seg, caddr_t addr)
 371 {
 372         return (0);
 373 }
 374 
 375 /*ARGSUSED*/
 376 static int
 377 segmf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
 378 {
 379         return (EINVAL);
 380 }
 381 
 382 /*ARGSUSED*/
 383 static int
 384 segmf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
 385 {
 386         return (EINVAL);
 387 }
 388 
 389 /*ARGSUSED*/
 390 static int
 391 segmf_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 392 {
 393         return (-1);
 394 }
 395 
 396 /*ARGSUSED*/
 397 static int
 398 segmf_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
 399 {
 400         return (0);
 401 }
 402 
 403 /*
 404  * XXPV Hmm.  Should we say that mf mapping are "in core?"
 405  */
 406 
 407 /*ARGSUSED*/
 408 static size_t
 409 segmf_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 410 {
 411         size_t v;
 412 
 413         for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
 414             len -= PAGESIZE, v += PAGESIZE)
 415                 *vec++ = 1;
 416         return (v);
 417 }
 418 
 419 /*ARGSUSED*/
 420 static int
 421 segmf_lockop(struct seg *seg, caddr_t addr,
 422     size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
 423 {
 424         return (0);
 425 }
 426 
 427 static int
 428 segmf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
 429 {
 430         struct segmf_data *data = seg->s_data;
 431         pgcnt_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
 432 
 433         if (pgno != 0) {
 434                 do
 435                         protv[--pgno] = data->prot;
 436                 while (pgno != 0)
 437                         ;
 438         }
 439         return (0);
 440 }
 441 
 442 static u_offset_t
 443 segmf_getoffset(struct seg *seg, caddr_t addr)
 444 {
 445         return (addr - seg->s_base);
 446 }
 447 
 448 /*ARGSUSED*/
 449 static int
 450 segmf_gettype(struct seg *seg, caddr_t addr)
 451 {
 452         return (MAP_SHARED);
 453 }
 454 
 455 /*ARGSUSED1*/
 456 static int
 457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
 458 {
 459         struct segmf_data *data = seg->s_data;
 460 
 461         *vpp = VTOCVP(data->vp);
 462         return (0);
 463 }
 464 
 465 /*ARGSUSED*/
 466 static int
 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
 468 {
 469         return (0);
 470 }
 471 
 472 /*ARGSUSED*/
 473 static void
 474 segmf_dump(struct seg *seg)
 475 {}
 476 
 477 /*ARGSUSED*/
 478 static int
 479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
 480     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 481 {
 482         return (ENOTSUP);
 483 }
 484 
 485 static int
 486 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 487 {
 488         struct segmf_data *data = seg->s_data;
 489 
 490         memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 491         memid->val[1] = (uintptr_t)seg_page(seg, addr);
 492         return (0);
 493 }
 494 
 495 /*
 496  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 497  * pre-faulting is necessary due to live migration; in particular we must
 498  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 499  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 500  * ioctl()s, we lock them too, as they should be transitory.
 501  */
 502 int
 503 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 504     pgcnt_t pgcnt, domid_t domid)
 505 {
 506         struct segmf_data *data = seg->s_data;
 507         pgcnt_t base;
 508         faultcode_t fc;
 509         pgcnt_t i;
 510         int error = 0;
 511 
 512         if (seg->s_ops != &segmf_ops)
 513                 return (EINVAL);
 514 
 515         /*
 516          * Don't mess with dom0.
 517          *
 518          * Only allow the domid to be set once for the segment.
 519          * After that attempts to add mappings to this segment for
 520          * other domains explicitly fails.
 521          */
 522 
 523         if (domid == 0 || domid == DOMID_SELF)
 524                 return (EACCES);
 525 
 526         mutex_enter(&data->lock);
 527 
 528         if (data->domid == 0)
 529                 data->domid = domid;
 530 
 531         if (data->domid != domid) {
 532                 error = EINVAL;
 533                 goto out;
 534         }
 535 
 536         base = seg_page(seg, addr);
 537 
 538         for (i = 0; i < pgcnt; i++) {
 539                 data->map[base + i].t_type = SEGMF_MAP_MFN;
 540                 data->map[base + i].u.m.m_mfn = mfn++;
 541         }
 542 
 543         fc = segmf_fault_range(seg->s_as->a_hat, seg, addr,
 544             pgcnt * MMU_PAGESIZE, F_SOFTLOCK, S_OTHER);
 545 
 546         if (fc != 0) {
 547                 error = fc_decode(fc);
 548                 for (i = 0; i < pgcnt; i++) {
 549                         data->map[base + i].t_type = SEGMF_MAP_EMPTY;
 550                 }
 551         }
 552 
 553 out:
 554         mutex_exit(&data->lock);
 555         return (error);
 556 }
 557 
 558 int
 559 segmf_add_grefs(struct seg *seg, caddr_t addr, uint_t flags,
 560     grant_ref_t *grefs, uint_t cnt, domid_t domid)
 561 {
 562         struct segmf_data *data;
 563         segmf_map_t *map;
 564         faultcode_t fc;
 565         uint_t idx;
 566         uint_t i;
 567         int e;
 568 
 569         if (seg->s_ops != &segmf_ops)
 570                 return (EINVAL);
 571 
 572         /*
 573          * Don't mess with dom0.
 574          *
 575          * Only allow the domid to be set once for the segment.
 576          * After that attempts to add mappings to this segment for
 577          * other domains explicitly fails.
 578          */
 579 
 580         if (domid == 0 || domid == DOMID_SELF)
 581                 return (EACCES);
 582 
 583         data = seg->s_data;
 584         idx = seg_page(seg, addr);
 585         map = &data->map[idx];
 586         e = 0;
 587 
 588         mutex_enter(&data->lock);
 589 
 590         if (data->domid == 0)
 591                 data->domid = domid;
 592 
 593         if (data->domid != domid) {
 594                 e = EINVAL;
 595                 goto out;
 596         }
 597 
 598         /* store away the grefs passed in then fault in the pages */
 599         for (i = 0; i < cnt; i++) {
 600                 map[i].t_type = SEGMF_MAP_GREF;
 601                 map[i].u.g.g_gref = grefs[i];
 602                 map[i].u.g.g_handle = 0;
 603                 map[i].u.g.g_flags = 0;
 604                 if (flags & SEGMF_GREF_WR) {
 605                         map[i].u.g.g_flags |= SEGMF_GFLAGS_WR;
 606                 }
 607         }
 608         fc = segmf_fault_gref_range(seg, addr, cnt);
 609         if (fc != 0) {
 610                 e = fc_decode(fc);
 611                 for (i = 0; i < cnt; i++) {
 612                         data->map[i].t_type = SEGMF_MAP_EMPTY;
 613                 }
 614         }
 615 
 616 out:
 617         mutex_exit(&data->lock);
 618         return (e);
 619 }
 620 
 621 int
 622 segmf_release_grefs(struct seg *seg, caddr_t addr, uint_t cnt)
 623 {
 624         gnttab_unmap_grant_ref_t mapop[SEGMF_MAX_GREFS];
 625         struct segmf_data *data;
 626         segmf_map_t *map;
 627         uint_t idx;
 628         long e;
 629         int i;
 630         int n;
 631 
 632 
 633         if (cnt > SEGMF_MAX_GREFS) {
 634                 return (-1);
 635         }
 636 
 637         idx = seg_page(seg, addr);
 638         data = seg->s_data;
 639         map = &data->map[idx];
 640 
 641         bzero(mapop, sizeof (gnttab_unmap_grant_ref_t) * cnt);
 642 
 643         /*
 644          * for each entry which isn't empty and is currently mapped,
 645          * set it up for an unmap then mark them empty.
 646          */
 647         n = 0;
 648         for (i = 0; i < cnt; i++) {
 649                 ASSERT(map[i].t_type != SEGMF_MAP_MFN);
 650                 if ((map[i].t_type == SEGMF_MAP_GREF) &&
 651                     (map[i].u.g.g_flags & SEGMF_GFLAGS_MAPPED)) {
 652                         mapop[n].handle = map[i].u.g.g_handle;
 653                         mapop[n].host_addr = map[i].u.g.g_ptep;
 654                         mapop[n].dev_bus_addr = 0;
 655                         n++;
 656                 }
 657                 map[i].t_type = SEGMF_MAP_EMPTY;
 658         }
 659 
 660         /* if there's nothing to unmap, just return */
 661         if (n == 0) {
 662                 return (0);
 663         }
 664 
 665         e = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &mapop, n);
 666         if (e != 0) {
 667                 return (-1);
 668         }
 669 
 670         return (0);
 671 }
 672 
 673 
 674 void
 675 segmf_add_gref_pte(struct seg *seg, caddr_t addr, uint64_t pte_ma)
 676 {
 677         struct segmf_data *data;
 678         uint_t idx;
 679 
 680         idx = seg_page(seg, addr);
 681         data = seg->s_data;
 682 
 683         data->map[idx].u.g.g_ptep = pte_ma;
 684 }
 685 
 686 
 687 static int
 688 segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t cnt)
 689 {
 690         gnttab_map_grant_ref_t mapop[SEGMF_MAX_GREFS];
 691         struct segmf_data *data;
 692         segmf_map_t *map;
 693         uint_t idx;
 694         int e;
 695         int i;
 696 
 697 
 698         if (cnt > SEGMF_MAX_GREFS) {
 699                 return (-1);
 700         }
 701 
 702         data = seg->s_data;
 703         idx = seg_page(seg, addr);
 704         map = &data->map[idx];
 705 
 706         bzero(mapop, sizeof (gnttab_map_grant_ref_t) * cnt);
 707 
 708         ASSERT(map->t_type == SEGMF_MAP_GREF);
 709 
 710         /*
 711          * map in each page passed in into the user apps AS. We do this by
 712          * passing the MA of the actual pte of the mapping to the hypervisor.
 713          */
 714         for (i = 0; i < cnt; i++) {
 715                 mapop[i].host_addr = map[i].u.g.g_ptep;
 716                 mapop[i].dom = data->domid;
 717                 mapop[i].ref = map[i].u.g.g_gref;
 718                 mapop[i].flags = GNTMAP_host_map | GNTMAP_application_map |
 719                     GNTMAP_contains_pte;
 720                 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
 721                         mapop[i].flags |= GNTMAP_readonly;
 722                 }
 723         }
 724         e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
 725         if ((e != 0) || (mapop[0].status != GNTST_okay)) {
 726                 return (FC_MAKE_ERR(EFAULT));
 727         }
 728 
 729         /* save handle for segmf_release_grefs() and mark it as mapped */
 730         for (i = 0; i < cnt; i++) {
 731                 ASSERT(mapop[i].status == GNTST_okay);
 732                 map[i].u.g.g_handle = mapop[i].handle;
 733                 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
 734         }
 735 
 736         return (0);
 737 }
 738 
 739 static struct seg_ops segmf_ops = {
 740         .dup            = segmf_dup,
 741         .unmap          = segmf_unmap,
 742         .free           = segmf_free,
 743         .fault          = segmf_fault,
 744         .faulta         = segmf_faulta,
 745         .setprot        = segmf_setprot,
 746         .checkprot      = segmf_checkprot,
 747         .kluster        = segmf_kluster,
 748         .sync           = segmf_sync,
 749         .incore         = segmf_incore,
 750         .lockop         = segmf_lockop,
 751         .getprot        = segmf_getprot,
 752         .getoffset      = segmf_getoffset,
 753         .gettype        = segmf_gettype,
 754         .getvp          = segmf_getvp,
 755         .advise         = segmf_advise,
 756         .dump           = segmf_dump,
 757         .pagelock       = segmf_pagelock,
 758         .getmemid       = segmf_getmemid,
 759 };