Print this page
6146 seg_inherit_notsup is redundant
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86xpv/vm/seg_mf.c
+++ new/usr/src/uts/i86xpv/vm/seg_mf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Machine frame segment driver. This segment driver allows dom0 processes to
29 29 * map pages of other domains or Xen (e.g. during save/restore). ioctl()s on
30 30 * the privcmd driver provide the MFN values backing each mapping, and we map
31 31 * them into the process's address space at this time. Demand-faulting is not
32 32 * supported by this driver due to the requirements upon some of the ioctl()s.
33 33 */
34 34
35 35
36 36 #include <sys/types.h>
37 37 #include <sys/systm.h>
38 38 #include <sys/vmsystm.h>
39 39 #include <sys/mman.h>
40 40 #include <sys/errno.h>
41 41 #include <sys/kmem.h>
42 42 #include <sys/cmn_err.h>
43 43 #include <sys/vnode.h>
44 44 #include <sys/conf.h>
45 45 #include <sys/debug.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/hypervisor.h>
48 48
49 49 #include <vm/page.h>
50 50 #include <vm/hat.h>
51 51 #include <vm/as.h>
52 52 #include <vm/seg.h>
53 53
54 54 #include <vm/hat_pte.h>
55 55 #include <vm/hat_i86.h>
56 56 #include <vm/seg_mf.h>
57 57
58 58 #include <sys/fs/snode.h>
59 59
60 60 #define VTOCVP(vp) (VTOS(vp)->s_commonvp)
61 61
62 62 typedef struct segmf_mfn_s {
63 63 mfn_t m_mfn;
64 64 } segmf_mfn_t;
65 65
66 66 /* g_flags */
67 67 #define SEGMF_GFLAGS_WR 0x1
68 68 #define SEGMF_GFLAGS_MAPPED 0x2
69 69 typedef struct segmf_gref_s {
70 70 uint64_t g_ptep;
71 71 grant_ref_t g_gref;
72 72 uint32_t g_flags;
73 73 grant_handle_t g_handle;
74 74 } segmf_gref_t;
75 75
76 76 typedef union segmf_mu_u {
77 77 segmf_mfn_t m;
78 78 segmf_gref_t g;
79 79 } segmf_mu_t;
80 80
81 81 typedef enum {
82 82 SEGMF_MAP_EMPTY = 0,
83 83 SEGMF_MAP_MFN,
84 84 SEGMF_MAP_GREF
85 85 } segmf_map_type_t;
86 86
87 87 typedef struct segmf_map_s {
88 88 segmf_map_type_t t_type;
89 89 segmf_mu_t u;
90 90 } segmf_map_t;
91 91
92 92 struct segmf_data {
93 93 kmutex_t lock;
94 94 struct vnode *vp;
95 95 uchar_t prot;
96 96 uchar_t maxprot;
97 97 size_t softlockcnt;
98 98 domid_t domid;
99 99 segmf_map_t *map;
100 100 };
101 101
102 102 static struct seg_ops segmf_ops;
103 103
104 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
105 105
106 106 static struct segmf_data *
107 107 segmf_data_zalloc(struct seg *seg)
108 108 {
109 109 struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
110 110
111 111 mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
112 112 seg->s_ops = &segmf_ops;
113 113 seg->s_data = data;
114 114 return (data);
115 115 }
116 116
117 117 int
118 118 segmf_create(struct seg *seg, void *args)
119 119 {
120 120 struct segmf_crargs *a = args;
121 121 struct segmf_data *data;
122 122 struct as *as = seg->s_as;
123 123 pgcnt_t i, npages = seg_pages(seg);
124 124 int error;
125 125
126 126 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
127 127
128 128 data = segmf_data_zalloc(seg);
129 129 data->vp = specfind(a->dev, VCHR);
130 130 data->prot = a->prot;
131 131 data->maxprot = a->maxprot;
132 132
133 133 data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
134 134 for (i = 0; i < npages; i++) {
135 135 data->map[i].t_type = SEGMF_MAP_EMPTY;
136 136 }
137 137
138 138 error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
139 139 data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
140 140
141 141 if (error != 0)
142 142 hat_unload(as->a_hat,
143 143 seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
144 144 return (error);
145 145 }
146 146
147 147 /*
148 148 * Duplicate a seg and return new segment in newseg.
149 149 */
150 150 static int
151 151 segmf_dup(struct seg *seg, struct seg *newseg)
152 152 {
153 153 struct segmf_data *data = seg->s_data;
154 154 struct segmf_data *ndata;
155 155 pgcnt_t npages = seg_pages(newseg);
156 156 size_t sz;
157 157
158 158 ndata = segmf_data_zalloc(newseg);
159 159
160 160 VN_HOLD(data->vp);
161 161 ndata->vp = data->vp;
162 162 ndata->prot = data->prot;
163 163 ndata->maxprot = data->maxprot;
164 164 ndata->domid = data->domid;
165 165
166 166 sz = npages * sizeof (segmf_map_t);
167 167 ndata->map = kmem_alloc(sz, KM_SLEEP);
168 168 bcopy(data->map, ndata->map, sz);
169 169
170 170 return (VOP_ADDMAP(VTOCVP(ndata->vp), 0, newseg->s_as,
171 171 newseg->s_base, newseg->s_size, ndata->prot, ndata->maxprot,
172 172 MAP_SHARED, CRED(), NULL));
173 173 }
174 174
175 175 /*
176 176 * We only support unmapping the whole segment, and we automatically unlock
177 177 * what we previously soft-locked.
178 178 */
179 179 static int
180 180 segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
181 181 {
182 182 struct segmf_data *data = seg->s_data;
183 183 offset_t off;
184 184
185 185 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
186 186 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
187 187 panic("segmf_unmap");
188 188
189 189 if (addr != seg->s_base || len != seg->s_size)
190 190 return (ENOTSUP);
191 191
192 192 hat_unload(seg->s_as->a_hat, addr, len,
193 193 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
194 194
195 195 off = (offset_t)seg_page(seg, addr);
196 196
197 197 ASSERT(data->vp != NULL);
198 198
199 199 (void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
200 200 data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
201 201
202 202 seg_free(seg);
203 203 return (0);
204 204 }
205 205
206 206 static void
207 207 segmf_free(struct seg *seg)
208 208 {
209 209 struct segmf_data *data = seg->s_data;
210 210 pgcnt_t npages = seg_pages(seg);
211 211
212 212 kmem_free(data->map, npages * sizeof (segmf_map_t));
213 213 VN_RELE(data->vp);
214 214 mutex_destroy(&data->lock);
215 215 kmem_free(data, sizeof (*data));
216 216 }
217 217
218 218 static int segmf_faultpage_debug = 0;
219 219 /*ARGSUSED*/
220 220 static int
221 221 segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
222 222 enum fault_type type, uint_t prot)
223 223 {
224 224 struct segmf_data *data = seg->s_data;
225 225 uint_t hat_flags = HAT_LOAD_NOCONSIST;
226 226 mfn_t mfn;
227 227 x86pte_t pte;
228 228 segmf_map_t *map;
229 229 uint_t idx;
230 230
231 231
232 232 idx = seg_page(seg, addr);
233 233 map = &data->map[idx];
234 234 ASSERT(map->t_type == SEGMF_MAP_MFN);
235 235
236 236 mfn = map->u.m.m_mfn;
237 237
238 238 if (type == F_SOFTLOCK) {
239 239 mutex_enter(&freemem_lock);
240 240 data->softlockcnt++;
241 241 mutex_exit(&freemem_lock);
242 242 hat_flags |= HAT_LOAD_LOCK;
243 243 } else
244 244 hat_flags |= HAT_LOAD;
245 245
246 246 if (segmf_faultpage_debug > 0) {
247 247 uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
248 248 (void *)addr, data->domid, mfn, prot);
249 249 segmf_faultpage_debug--;
250 250 }
251 251
252 252 /*
253 253 * Ask the HAT to load a throwaway mapping to page zero, then
254 254 * overwrite it with our foreign domain mapping. It gets removed
255 255 * later via hat_unload()
256 256 */
257 257 hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
258 258 PROT_READ | HAT_UNORDERED_OK, hat_flags);
259 259
260 260 pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
261 261 if (prot & PROT_WRITE)
262 262 pte |= PT_WRITABLE;
263 263
264 264 if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
265 265 UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
266 266 hat_flags = HAT_UNLOAD_UNMAP;
267 267
268 268 if (type == F_SOFTLOCK) {
269 269 hat_flags |= HAT_UNLOAD_UNLOCK;
270 270 mutex_enter(&freemem_lock);
271 271 data->softlockcnt--;
272 272 mutex_exit(&freemem_lock);
273 273 }
274 274
275 275 hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
276 276 return (FC_MAKE_ERR(EFAULT));
277 277 }
278 278
279 279 return (0);
280 280 }
281 281
282 282 static int
283 283 seg_rw_to_prot(enum seg_rw rw)
284 284 {
285 285 switch (rw) {
286 286 case S_READ:
287 287 return (PROT_READ);
288 288 case S_WRITE:
289 289 return (PROT_WRITE);
290 290 case S_EXEC:
291 291 return (PROT_EXEC);
292 292 case S_OTHER:
293 293 default:
294 294 break;
295 295 }
296 296 return (PROT_READ | PROT_WRITE | PROT_EXEC);
297 297 }
298 298
299 299 static void
300 300 segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
301 301 {
302 302 struct segmf_data *data = seg->s_data;
303 303
304 304 hat_unlock(hat, addr, len);
305 305
306 306 mutex_enter(&freemem_lock);
307 307 ASSERT(data->softlockcnt >= btopr(len));
308 308 data->softlockcnt -= btopr(len);
309 309 mutex_exit(&freemem_lock);
310 310
311 311 if (data->softlockcnt == 0) {
312 312 struct as *as = seg->s_as;
313 313
314 314 if (AS_ISUNMAPWAIT(as)) {
315 315 mutex_enter(&as->a_contents);
316 316 if (AS_ISUNMAPWAIT(as)) {
317 317 AS_CLRUNMAPWAIT(as);
318 318 cv_broadcast(&as->a_cv);
319 319 }
320 320 mutex_exit(&as->a_contents);
321 321 }
322 322 }
323 323 }
324 324
325 325 static int
326 326 segmf_fault_range(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
327 327 enum fault_type type, enum seg_rw rw)
328 328 {
329 329 struct segmf_data *data = seg->s_data;
330 330 int error = 0;
331 331 caddr_t a;
332 332
333 333 if ((data->prot & seg_rw_to_prot(rw)) == 0)
334 334 return (FC_PROT);
335 335
336 336 /* loop over the address range handling each fault */
337 337
338 338 for (a = addr; a < addr + len; a += PAGESIZE) {
339 339 error = segmf_faultpage(hat, seg, a, type, data->prot);
340 340 if (error != 0)
341 341 break;
342 342 }
343 343
344 344 if (error != 0 && type == F_SOFTLOCK) {
345 345 size_t done = (size_t)(a - addr);
346 346
347 347 /*
348 348 * Undo what's been done so far.
349 349 */
350 350 if (done > 0)
351 351 segmf_softunlock(hat, seg, addr, done);
352 352 }
353 353
354 354 return (error);
355 355 }
356 356
357 357 /*
358 358 * We never demand-fault for seg_mf.
359 359 */
360 360 /*ARGSUSED*/
361 361 static int
362 362 segmf_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
363 363 enum fault_type type, enum seg_rw rw)
364 364 {
365 365 return (FC_MAKE_ERR(EFAULT));
366 366 }
367 367
368 368 /*ARGSUSED*/
369 369 static int
370 370 segmf_faulta(struct seg *seg, caddr_t addr)
371 371 {
372 372 return (0);
373 373 }
374 374
375 375 /*ARGSUSED*/
376 376 static int
377 377 segmf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
378 378 {
379 379 return (EINVAL);
380 380 }
381 381
382 382 /*ARGSUSED*/
383 383 static int
384 384 segmf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
385 385 {
386 386 return (EINVAL);
387 387 }
388 388
389 389 /*ARGSUSED*/
390 390 static int
391 391 segmf_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
392 392 {
393 393 return (-1);
394 394 }
395 395
396 396 /*ARGSUSED*/
397 397 static int
398 398 segmf_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
399 399 {
400 400 return (0);
401 401 }
402 402
403 403 /*
404 404 * XXPV Hmm. Should we say that mf mapping are "in core?"
405 405 */
406 406
407 407 /*ARGSUSED*/
408 408 static size_t
409 409 segmf_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
410 410 {
411 411 size_t v;
412 412
413 413 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
414 414 len -= PAGESIZE, v += PAGESIZE)
415 415 *vec++ = 1;
416 416 return (v);
417 417 }
418 418
419 419 /*ARGSUSED*/
420 420 static int
421 421 segmf_lockop(struct seg *seg, caddr_t addr,
422 422 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
423 423 {
424 424 return (0);
425 425 }
426 426
427 427 static int
428 428 segmf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
429 429 {
430 430 struct segmf_data *data = seg->s_data;
431 431 pgcnt_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
432 432
433 433 if (pgno != 0) {
434 434 do
435 435 protv[--pgno] = data->prot;
436 436 while (pgno != 0)
437 437 ;
438 438 }
439 439 return (0);
440 440 }
441 441
442 442 static u_offset_t
443 443 segmf_getoffset(struct seg *seg, caddr_t addr)
444 444 {
445 445 return (addr - seg->s_base);
446 446 }
447 447
448 448 /*ARGSUSED*/
449 449 static int
450 450 segmf_gettype(struct seg *seg, caddr_t addr)
451 451 {
452 452 return (MAP_SHARED);
453 453 }
454 454
455 455 /*ARGSUSED1*/
456 456 static int
457 457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
458 458 {
459 459 struct segmf_data *data = seg->s_data;
460 460
461 461 *vpp = VTOCVP(data->vp);
462 462 return (0);
463 463 }
464 464
465 465 /*ARGSUSED*/
466 466 static int
467 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
468 468 {
469 469 return (0);
470 470 }
471 471
472 472 /*ARGSUSED*/
473 473 static void
474 474 segmf_dump(struct seg *seg)
475 475 {}
476 476
477 477 /*ARGSUSED*/
478 478 static int
479 479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
480 480 struct page ***ppp, enum lock_type type, enum seg_rw rw)
481 481 {
482 482 return (ENOTSUP);
483 483 }
484 484
485 485 /*ARGSUSED*/
486 486 static int
487 487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
488 488 {
489 489 return (ENOTSUP);
490 490 }
491 491
492 492 static int
493 493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
494 494 {
495 495 struct segmf_data *data = seg->s_data;
496 496
497 497 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
498 498 memid->val[1] = (uintptr_t)seg_page(seg, addr);
499 499 return (0);
500 500 }
501 501
502 502 /*ARGSUSED*/
503 503 static lgrp_mem_policy_info_t *
504 504 segmf_getpolicy(struct seg *seg, caddr_t addr)
505 505 {
506 506 return (NULL);
507 507 }
508 508
509 509 /*ARGSUSED*/
510 510 static int
511 511 segmf_capable(struct seg *seg, segcapability_t capability)
512 512 {
513 513 return (0);
514 514 }
515 515
516 516 /*
517 517 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
518 518 * pre-faulting is necessary due to live migration; in particular we must
519 519 * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
520 520 * later on a bad MFN. Whilst this isn't necessary for the other MMAP
521 521 * ioctl()s, we lock them too, as they should be transitory.
522 522 */
523 523 int
524 524 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
525 525 pgcnt_t pgcnt, domid_t domid)
526 526 {
527 527 struct segmf_data *data = seg->s_data;
528 528 pgcnt_t base;
529 529 faultcode_t fc;
530 530 pgcnt_t i;
531 531 int error = 0;
532 532
533 533 if (seg->s_ops != &segmf_ops)
534 534 return (EINVAL);
535 535
536 536 /*
537 537 * Don't mess with dom0.
538 538 *
539 539 * Only allow the domid to be set once for the segment.
540 540 * After that attempts to add mappings to this segment for
541 541 * other domains explicitly fails.
542 542 */
543 543
544 544 if (domid == 0 || domid == DOMID_SELF)
545 545 return (EACCES);
546 546
547 547 mutex_enter(&data->lock);
548 548
549 549 if (data->domid == 0)
550 550 data->domid = domid;
551 551
552 552 if (data->domid != domid) {
553 553 error = EINVAL;
554 554 goto out;
555 555 }
556 556
557 557 base = seg_page(seg, addr);
558 558
559 559 for (i = 0; i < pgcnt; i++) {
560 560 data->map[base + i].t_type = SEGMF_MAP_MFN;
561 561 data->map[base + i].u.m.m_mfn = mfn++;
562 562 }
563 563
564 564 fc = segmf_fault_range(seg->s_as->a_hat, seg, addr,
565 565 pgcnt * MMU_PAGESIZE, F_SOFTLOCK, S_OTHER);
566 566
567 567 if (fc != 0) {
568 568 error = fc_decode(fc);
569 569 for (i = 0; i < pgcnt; i++) {
570 570 data->map[base + i].t_type = SEGMF_MAP_EMPTY;
571 571 }
572 572 }
573 573
574 574 out:
575 575 mutex_exit(&data->lock);
576 576 return (error);
577 577 }
578 578
579 579 int
580 580 segmf_add_grefs(struct seg *seg, caddr_t addr, uint_t flags,
581 581 grant_ref_t *grefs, uint_t cnt, domid_t domid)
582 582 {
583 583 struct segmf_data *data;
584 584 segmf_map_t *map;
585 585 faultcode_t fc;
586 586 uint_t idx;
587 587 uint_t i;
588 588 int e;
589 589
590 590 if (seg->s_ops != &segmf_ops)
591 591 return (EINVAL);
592 592
593 593 /*
594 594 * Don't mess with dom0.
595 595 *
596 596 * Only allow the domid to be set once for the segment.
597 597 * After that attempts to add mappings to this segment for
598 598 * other domains explicitly fails.
599 599 */
600 600
601 601 if (domid == 0 || domid == DOMID_SELF)
602 602 return (EACCES);
603 603
604 604 data = seg->s_data;
605 605 idx = seg_page(seg, addr);
606 606 map = &data->map[idx];
607 607 e = 0;
608 608
609 609 mutex_enter(&data->lock);
610 610
611 611 if (data->domid == 0)
612 612 data->domid = domid;
613 613
614 614 if (data->domid != domid) {
615 615 e = EINVAL;
616 616 goto out;
617 617 }
618 618
619 619 /* store away the grefs passed in then fault in the pages */
620 620 for (i = 0; i < cnt; i++) {
621 621 map[i].t_type = SEGMF_MAP_GREF;
622 622 map[i].u.g.g_gref = grefs[i];
623 623 map[i].u.g.g_handle = 0;
624 624 map[i].u.g.g_flags = 0;
625 625 if (flags & SEGMF_GREF_WR) {
626 626 map[i].u.g.g_flags |= SEGMF_GFLAGS_WR;
627 627 }
628 628 }
629 629 fc = segmf_fault_gref_range(seg, addr, cnt);
630 630 if (fc != 0) {
631 631 e = fc_decode(fc);
632 632 for (i = 0; i < cnt; i++) {
633 633 data->map[i].t_type = SEGMF_MAP_EMPTY;
634 634 }
635 635 }
636 636
637 637 out:
638 638 mutex_exit(&data->lock);
639 639 return (e);
640 640 }
641 641
642 642 int
643 643 segmf_release_grefs(struct seg *seg, caddr_t addr, uint_t cnt)
644 644 {
645 645 gnttab_unmap_grant_ref_t mapop[SEGMF_MAX_GREFS];
646 646 struct segmf_data *data;
647 647 segmf_map_t *map;
648 648 uint_t idx;
649 649 long e;
650 650 int i;
651 651 int n;
652 652
653 653
654 654 if (cnt > SEGMF_MAX_GREFS) {
655 655 return (-1);
656 656 }
657 657
658 658 idx = seg_page(seg, addr);
659 659 data = seg->s_data;
660 660 map = &data->map[idx];
661 661
662 662 bzero(mapop, sizeof (gnttab_unmap_grant_ref_t) * cnt);
663 663
664 664 /*
665 665 * for each entry which isn't empty and is currently mapped,
666 666 * set it up for an unmap then mark them empty.
667 667 */
668 668 n = 0;
669 669 for (i = 0; i < cnt; i++) {
670 670 ASSERT(map[i].t_type != SEGMF_MAP_MFN);
671 671 if ((map[i].t_type == SEGMF_MAP_GREF) &&
672 672 (map[i].u.g.g_flags & SEGMF_GFLAGS_MAPPED)) {
673 673 mapop[n].handle = map[i].u.g.g_handle;
674 674 mapop[n].host_addr = map[i].u.g.g_ptep;
675 675 mapop[n].dev_bus_addr = 0;
676 676 n++;
677 677 }
678 678 map[i].t_type = SEGMF_MAP_EMPTY;
679 679 }
680 680
681 681 /* if there's nothing to unmap, just return */
682 682 if (n == 0) {
683 683 return (0);
684 684 }
685 685
686 686 e = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &mapop, n);
687 687 if (e != 0) {
688 688 return (-1);
689 689 }
690 690
691 691 return (0);
692 692 }
693 693
694 694
695 695 void
696 696 segmf_add_gref_pte(struct seg *seg, caddr_t addr, uint64_t pte_ma)
697 697 {
698 698 struct segmf_data *data;
699 699 uint_t idx;
700 700
701 701 idx = seg_page(seg, addr);
702 702 data = seg->s_data;
703 703
704 704 data->map[idx].u.g.g_ptep = pte_ma;
705 705 }
706 706
707 707
708 708 static int
709 709 segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t cnt)
710 710 {
711 711 gnttab_map_grant_ref_t mapop[SEGMF_MAX_GREFS];
712 712 struct segmf_data *data;
713 713 segmf_map_t *map;
714 714 uint_t idx;
715 715 int e;
716 716 int i;
717 717
718 718
719 719 if (cnt > SEGMF_MAX_GREFS) {
720 720 return (-1);
721 721 }
722 722
723 723 data = seg->s_data;
724 724 idx = seg_page(seg, addr);
725 725 map = &data->map[idx];
726 726
727 727 bzero(mapop, sizeof (gnttab_map_grant_ref_t) * cnt);
728 728
729 729 ASSERT(map->t_type == SEGMF_MAP_GREF);
730 730
731 731 /*
732 732 * map in each page passed in into the user apps AS. We do this by
733 733 * passing the MA of the actual pte of the mapping to the hypervisor.
734 734 */
735 735 for (i = 0; i < cnt; i++) {
736 736 mapop[i].host_addr = map[i].u.g.g_ptep;
737 737 mapop[i].dom = data->domid;
738 738 mapop[i].ref = map[i].u.g.g_gref;
739 739 mapop[i].flags = GNTMAP_host_map | GNTMAP_application_map |
740 740 GNTMAP_contains_pte;
741 741 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
742 742 mapop[i].flags |= GNTMAP_readonly;
743 743 }
744 744 }
745 745 e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
746 746 if ((e != 0) || (mapop[0].status != GNTST_okay)) {
747 747 return (FC_MAKE_ERR(EFAULT));
748 748 }
749 749
750 750 /* save handle for segmf_release_grefs() and mark it as mapped */
751 751 for (i = 0; i < cnt; i++) {
752 752 ASSERT(mapop[i].status == GNTST_okay);
753 753 map[i].u.g.g_handle = mapop[i].handle;
754 754 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
755 755 }
756 756
757 757 return (0);
758 758 }
759 759
760 760 static struct seg_ops segmf_ops = {
761 761 .dup = segmf_dup,
762 762 .unmap = segmf_unmap,
763 763 .free = segmf_free,
764 764 .fault = segmf_fault,
765 765 .faulta = segmf_faulta,
766 766 .setprot = segmf_setprot,
767 767 .checkprot = segmf_checkprot,
768 768 .kluster = segmf_kluster,
769 769 .sync = segmf_sync,
770 770 .incore = segmf_incore,
771 771 .lockop = segmf_lockop,
772 772 .getprot = segmf_getprot,
↓ open down ↓ |
772 lines elided |
↑ open up ↑ |
773 773 .getoffset = segmf_getoffset,
774 774 .gettype = segmf_gettype,
775 775 .getvp = segmf_getvp,
776 776 .advise = segmf_advise,
777 777 .dump = segmf_dump,
778 778 .pagelock = segmf_pagelock,
779 779 .setpagesize = segmf_setpagesize,
780 780 .getmemid = segmf_getmemid,
781 781 .getpolicy = segmf_getpolicy,
782 782 .capable = segmf_capable,
783 - .inherit = seg_inherit_notsup,
784 783 };
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX