Print this page
6152 use NULL dump segop as a shorthand for no-op
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86xpv/vm/seg_mf.c
+++ new/usr/src/uts/i86xpv/vm/seg_mf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Machine frame segment driver. This segment driver allows dom0 processes to
29 29 * map pages of other domains or Xen (e.g. during save/restore). ioctl()s on
30 30 * the privcmd driver provide the MFN values backing each mapping, and we map
31 31 * them into the process's address space at this time. Demand-faulting is not
32 32 * supported by this driver due to the requirements upon some of the ioctl()s.
33 33 */
34 34
35 35
36 36 #include <sys/types.h>
37 37 #include <sys/systm.h>
38 38 #include <sys/vmsystm.h>
39 39 #include <sys/mman.h>
40 40 #include <sys/errno.h>
41 41 #include <sys/kmem.h>
42 42 #include <sys/cmn_err.h>
43 43 #include <sys/vnode.h>
44 44 #include <sys/conf.h>
45 45 #include <sys/debug.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/hypervisor.h>
48 48
49 49 #include <vm/page.h>
50 50 #include <vm/hat.h>
51 51 #include <vm/as.h>
52 52 #include <vm/seg.h>
53 53
54 54 #include <vm/hat_pte.h>
55 55 #include <vm/hat_i86.h>
56 56 #include <vm/seg_mf.h>
57 57
58 58 #include <sys/fs/snode.h>
59 59
60 60 #define VTOCVP(vp) (VTOS(vp)->s_commonvp)
61 61
62 62 typedef struct segmf_mfn_s {
63 63 mfn_t m_mfn;
64 64 } segmf_mfn_t;
65 65
66 66 /* g_flags */
67 67 #define SEGMF_GFLAGS_WR 0x1
68 68 #define SEGMF_GFLAGS_MAPPED 0x2
69 69 typedef struct segmf_gref_s {
70 70 uint64_t g_ptep;
71 71 grant_ref_t g_gref;
72 72 uint32_t g_flags;
73 73 grant_handle_t g_handle;
74 74 } segmf_gref_t;
75 75
76 76 typedef union segmf_mu_u {
77 77 segmf_mfn_t m;
78 78 segmf_gref_t g;
79 79 } segmf_mu_t;
80 80
81 81 typedef enum {
82 82 SEGMF_MAP_EMPTY = 0,
83 83 SEGMF_MAP_MFN,
84 84 SEGMF_MAP_GREF
85 85 } segmf_map_type_t;
86 86
87 87 typedef struct segmf_map_s {
88 88 segmf_map_type_t t_type;
89 89 segmf_mu_t u;
90 90 } segmf_map_t;
91 91
92 92 struct segmf_data {
93 93 kmutex_t lock;
94 94 struct vnode *vp;
95 95 uchar_t prot;
96 96 uchar_t maxprot;
97 97 size_t softlockcnt;
98 98 domid_t domid;
99 99 segmf_map_t *map;
100 100 };
101 101
102 102 static struct seg_ops segmf_ops;
103 103
104 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
105 105
106 106 static struct segmf_data *
107 107 segmf_data_zalloc(struct seg *seg)
108 108 {
109 109 struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
110 110
111 111 mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
112 112 seg->s_ops = &segmf_ops;
113 113 seg->s_data = data;
114 114 return (data);
115 115 }
116 116
117 117 int
118 118 segmf_create(struct seg *seg, void *args)
119 119 {
120 120 struct segmf_crargs *a = args;
121 121 struct segmf_data *data;
122 122 struct as *as = seg->s_as;
123 123 pgcnt_t i, npages = seg_pages(seg);
124 124 int error;
125 125
126 126 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
127 127
128 128 data = segmf_data_zalloc(seg);
129 129 data->vp = specfind(a->dev, VCHR);
130 130 data->prot = a->prot;
131 131 data->maxprot = a->maxprot;
132 132
133 133 data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
134 134 for (i = 0; i < npages; i++) {
135 135 data->map[i].t_type = SEGMF_MAP_EMPTY;
136 136 }
137 137
138 138 error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
139 139 data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
140 140
141 141 if (error != 0)
142 142 hat_unload(as->a_hat,
143 143 seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
144 144 return (error);
145 145 }
146 146
147 147 /*
148 148 * Duplicate a seg and return new segment in newseg.
149 149 */
150 150 static int
151 151 segmf_dup(struct seg *seg, struct seg *newseg)
152 152 {
153 153 struct segmf_data *data = seg->s_data;
154 154 struct segmf_data *ndata;
155 155 pgcnt_t npages = seg_pages(newseg);
156 156 size_t sz;
157 157
158 158 ndata = segmf_data_zalloc(newseg);
159 159
160 160 VN_HOLD(data->vp);
161 161 ndata->vp = data->vp;
162 162 ndata->prot = data->prot;
163 163 ndata->maxprot = data->maxprot;
164 164 ndata->domid = data->domid;
165 165
166 166 sz = npages * sizeof (segmf_map_t);
167 167 ndata->map = kmem_alloc(sz, KM_SLEEP);
168 168 bcopy(data->map, ndata->map, sz);
169 169
170 170 return (VOP_ADDMAP(VTOCVP(ndata->vp), 0, newseg->s_as,
171 171 newseg->s_base, newseg->s_size, ndata->prot, ndata->maxprot,
172 172 MAP_SHARED, CRED(), NULL));
173 173 }
174 174
175 175 /*
176 176 * We only support unmapping the whole segment, and we automatically unlock
177 177 * what we previously soft-locked.
178 178 */
179 179 static int
180 180 segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
181 181 {
182 182 struct segmf_data *data = seg->s_data;
183 183 offset_t off;
184 184
185 185 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
186 186 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
187 187 panic("segmf_unmap");
188 188
189 189 if (addr != seg->s_base || len != seg->s_size)
190 190 return (ENOTSUP);
191 191
192 192 hat_unload(seg->s_as->a_hat, addr, len,
193 193 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
194 194
195 195 off = (offset_t)seg_page(seg, addr);
196 196
197 197 ASSERT(data->vp != NULL);
198 198
199 199 (void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
200 200 data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
201 201
202 202 seg_free(seg);
203 203 return (0);
204 204 }
205 205
206 206 static void
207 207 segmf_free(struct seg *seg)
208 208 {
209 209 struct segmf_data *data = seg->s_data;
210 210 pgcnt_t npages = seg_pages(seg);
211 211
212 212 kmem_free(data->map, npages * sizeof (segmf_map_t));
213 213 VN_RELE(data->vp);
214 214 mutex_destroy(&data->lock);
215 215 kmem_free(data, sizeof (*data));
216 216 }
217 217
218 218 static int segmf_faultpage_debug = 0;
219 219 /*ARGSUSED*/
220 220 static int
221 221 segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
222 222 enum fault_type type, uint_t prot)
223 223 {
224 224 struct segmf_data *data = seg->s_data;
225 225 uint_t hat_flags = HAT_LOAD_NOCONSIST;
226 226 mfn_t mfn;
227 227 x86pte_t pte;
228 228 segmf_map_t *map;
229 229 uint_t idx;
230 230
231 231
232 232 idx = seg_page(seg, addr);
233 233 map = &data->map[idx];
234 234 ASSERT(map->t_type == SEGMF_MAP_MFN);
235 235
236 236 mfn = map->u.m.m_mfn;
237 237
238 238 if (type == F_SOFTLOCK) {
239 239 mutex_enter(&freemem_lock);
240 240 data->softlockcnt++;
241 241 mutex_exit(&freemem_lock);
242 242 hat_flags |= HAT_LOAD_LOCK;
243 243 } else
244 244 hat_flags |= HAT_LOAD;
245 245
246 246 if (segmf_faultpage_debug > 0) {
247 247 uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
248 248 (void *)addr, data->domid, mfn, prot);
249 249 segmf_faultpage_debug--;
250 250 }
251 251
252 252 /*
253 253 * Ask the HAT to load a throwaway mapping to page zero, then
254 254 * overwrite it with our foreign domain mapping. It gets removed
255 255 * later via hat_unload()
256 256 */
257 257 hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
258 258 PROT_READ | HAT_UNORDERED_OK, hat_flags);
259 259
260 260 pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
261 261 if (prot & PROT_WRITE)
262 262 pte |= PT_WRITABLE;
263 263
264 264 if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
265 265 UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
266 266 hat_flags = HAT_UNLOAD_UNMAP;
267 267
268 268 if (type == F_SOFTLOCK) {
269 269 hat_flags |= HAT_UNLOAD_UNLOCK;
270 270 mutex_enter(&freemem_lock);
271 271 data->softlockcnt--;
272 272 mutex_exit(&freemem_lock);
273 273 }
274 274
275 275 hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
276 276 return (FC_MAKE_ERR(EFAULT));
277 277 }
278 278
279 279 return (0);
280 280 }
281 281
282 282 static int
283 283 seg_rw_to_prot(enum seg_rw rw)
284 284 {
285 285 switch (rw) {
286 286 case S_READ:
287 287 return (PROT_READ);
288 288 case S_WRITE:
289 289 return (PROT_WRITE);
290 290 case S_EXEC:
291 291 return (PROT_EXEC);
292 292 case S_OTHER:
293 293 default:
294 294 break;
295 295 }
296 296 return (PROT_READ | PROT_WRITE | PROT_EXEC);
297 297 }
298 298
299 299 static void
300 300 segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
301 301 {
302 302 struct segmf_data *data = seg->s_data;
303 303
304 304 hat_unlock(hat, addr, len);
305 305
306 306 mutex_enter(&freemem_lock);
307 307 ASSERT(data->softlockcnt >= btopr(len));
308 308 data->softlockcnt -= btopr(len);
309 309 mutex_exit(&freemem_lock);
310 310
311 311 if (data->softlockcnt == 0) {
312 312 struct as *as = seg->s_as;
313 313
314 314 if (AS_ISUNMAPWAIT(as)) {
315 315 mutex_enter(&as->a_contents);
316 316 if (AS_ISUNMAPWAIT(as)) {
317 317 AS_CLRUNMAPWAIT(as);
318 318 cv_broadcast(&as->a_cv);
319 319 }
320 320 mutex_exit(&as->a_contents);
321 321 }
322 322 }
323 323 }
324 324
325 325 static int
326 326 segmf_fault_range(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
327 327 enum fault_type type, enum seg_rw rw)
328 328 {
329 329 struct segmf_data *data = seg->s_data;
330 330 int error = 0;
331 331 caddr_t a;
332 332
333 333 if ((data->prot & seg_rw_to_prot(rw)) == 0)
334 334 return (FC_PROT);
335 335
336 336 /* loop over the address range handling each fault */
337 337
338 338 for (a = addr; a < addr + len; a += PAGESIZE) {
339 339 error = segmf_faultpage(hat, seg, a, type, data->prot);
340 340 if (error != 0)
341 341 break;
342 342 }
343 343
344 344 if (error != 0 && type == F_SOFTLOCK) {
345 345 size_t done = (size_t)(a - addr);
346 346
347 347 /*
348 348 * Undo what's been done so far.
349 349 */
350 350 if (done > 0)
351 351 segmf_softunlock(hat, seg, addr, done);
352 352 }
353 353
354 354 return (error);
355 355 }
356 356
357 357 /*
358 358 * We never demand-fault for seg_mf.
359 359 */
360 360 /*ARGSUSED*/
361 361 static int
362 362 segmf_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
363 363 enum fault_type type, enum seg_rw rw)
364 364 {
365 365 return (FC_MAKE_ERR(EFAULT));
366 366 }
367 367
368 368 /*ARGSUSED*/
369 369 static int
370 370 segmf_faulta(struct seg *seg, caddr_t addr)
371 371 {
372 372 return (0);
373 373 }
374 374
375 375 /*ARGSUSED*/
376 376 static int
377 377 segmf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
378 378 {
379 379 return (EINVAL);
380 380 }
381 381
382 382 /*ARGSUSED*/
383 383 static int
384 384 segmf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
385 385 {
386 386 return (EINVAL);
387 387 }
388 388
389 389 /*ARGSUSED*/
390 390 static int
391 391 segmf_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
392 392 {
393 393 return (-1);
394 394 }
395 395
396 396 /*ARGSUSED*/
397 397 static int
398 398 segmf_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
399 399 {
400 400 return (0);
401 401 }
402 402
403 403 /*
404 404 * XXPV Hmm. Should we say that mf mapping are "in core?"
405 405 */
406 406
407 407 /*ARGSUSED*/
408 408 static size_t
409 409 segmf_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
410 410 {
411 411 size_t v;
412 412
413 413 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
414 414 len -= PAGESIZE, v += PAGESIZE)
415 415 *vec++ = 1;
416 416 return (v);
417 417 }
418 418
419 419 /*ARGSUSED*/
420 420 static int
421 421 segmf_lockop(struct seg *seg, caddr_t addr,
422 422 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
423 423 {
424 424 return (0);
425 425 }
426 426
427 427 static int
428 428 segmf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
429 429 {
430 430 struct segmf_data *data = seg->s_data;
431 431 pgcnt_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
432 432
433 433 if (pgno != 0) {
434 434 do
435 435 protv[--pgno] = data->prot;
436 436 while (pgno != 0)
437 437 ;
438 438 }
439 439 return (0);
440 440 }
441 441
442 442 static u_offset_t
443 443 segmf_getoffset(struct seg *seg, caddr_t addr)
444 444 {
445 445 return (addr - seg->s_base);
446 446 }
447 447
448 448 /*ARGSUSED*/
449 449 static int
450 450 segmf_gettype(struct seg *seg, caddr_t addr)
451 451 {
452 452 return (MAP_SHARED);
453 453 }
454 454
455 455 /*ARGSUSED1*/
456 456 static int
457 457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
458 458 {
459 459 struct segmf_data *data = seg->s_data;
460 460
461 461 *vpp = VTOCVP(data->vp);
462 462 return (0);
↓ open down ↓ |
462 lines elided |
↑ open up ↑ |
463 463 }
464 464
465 465 /*ARGSUSED*/
466 466 static int
467 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
468 468 {
469 469 return (0);
470 470 }
471 471
472 472 /*ARGSUSED*/
473 -static void
474 -segmf_dump(struct seg *seg)
475 -{}
476 -
477 -/*ARGSUSED*/
478 473 static int
479 474 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
480 475 struct page ***ppp, enum lock_type type, enum seg_rw rw)
481 476 {
482 477 return (ENOTSUP);
483 478 }
484 479
485 480 static int
486 481 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
487 482 {
488 483 struct segmf_data *data = seg->s_data;
489 484
490 485 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
491 486 memid->val[1] = (uintptr_t)seg_page(seg, addr);
492 487 return (0);
493 488 }
494 489
495 490 /*
496 491 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
497 492 * pre-faulting is necessary due to live migration; in particular we must
498 493 * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
499 494 * later on a bad MFN. Whilst this isn't necessary for the other MMAP
500 495 * ioctl()s, we lock them too, as they should be transitory.
501 496 */
502 497 int
503 498 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
504 499 pgcnt_t pgcnt, domid_t domid)
505 500 {
506 501 struct segmf_data *data = seg->s_data;
507 502 pgcnt_t base;
508 503 faultcode_t fc;
509 504 pgcnt_t i;
510 505 int error = 0;
511 506
512 507 if (seg->s_ops != &segmf_ops)
513 508 return (EINVAL);
514 509
515 510 /*
516 511 * Don't mess with dom0.
517 512 *
518 513 * Only allow the domid to be set once for the segment.
519 514 * After that attempts to add mappings to this segment for
520 515 * other domains explicitly fails.
521 516 */
522 517
523 518 if (domid == 0 || domid == DOMID_SELF)
524 519 return (EACCES);
525 520
526 521 mutex_enter(&data->lock);
527 522
528 523 if (data->domid == 0)
529 524 data->domid = domid;
530 525
531 526 if (data->domid != domid) {
532 527 error = EINVAL;
533 528 goto out;
534 529 }
535 530
536 531 base = seg_page(seg, addr);
537 532
538 533 for (i = 0; i < pgcnt; i++) {
539 534 data->map[base + i].t_type = SEGMF_MAP_MFN;
540 535 data->map[base + i].u.m.m_mfn = mfn++;
541 536 }
542 537
543 538 fc = segmf_fault_range(seg->s_as->a_hat, seg, addr,
544 539 pgcnt * MMU_PAGESIZE, F_SOFTLOCK, S_OTHER);
545 540
546 541 if (fc != 0) {
547 542 error = fc_decode(fc);
548 543 for (i = 0; i < pgcnt; i++) {
549 544 data->map[base + i].t_type = SEGMF_MAP_EMPTY;
550 545 }
551 546 }
552 547
553 548 out:
554 549 mutex_exit(&data->lock);
555 550 return (error);
556 551 }
557 552
558 553 int
559 554 segmf_add_grefs(struct seg *seg, caddr_t addr, uint_t flags,
560 555 grant_ref_t *grefs, uint_t cnt, domid_t domid)
561 556 {
562 557 struct segmf_data *data;
563 558 segmf_map_t *map;
564 559 faultcode_t fc;
565 560 uint_t idx;
566 561 uint_t i;
567 562 int e;
568 563
569 564 if (seg->s_ops != &segmf_ops)
570 565 return (EINVAL);
571 566
572 567 /*
573 568 * Don't mess with dom0.
574 569 *
575 570 * Only allow the domid to be set once for the segment.
576 571 * After that attempts to add mappings to this segment for
577 572 * other domains explicitly fails.
578 573 */
579 574
580 575 if (domid == 0 || domid == DOMID_SELF)
581 576 return (EACCES);
582 577
583 578 data = seg->s_data;
584 579 idx = seg_page(seg, addr);
585 580 map = &data->map[idx];
586 581 e = 0;
587 582
588 583 mutex_enter(&data->lock);
589 584
590 585 if (data->domid == 0)
591 586 data->domid = domid;
592 587
593 588 if (data->domid != domid) {
594 589 e = EINVAL;
595 590 goto out;
596 591 }
597 592
598 593 /* store away the grefs passed in then fault in the pages */
599 594 for (i = 0; i < cnt; i++) {
600 595 map[i].t_type = SEGMF_MAP_GREF;
601 596 map[i].u.g.g_gref = grefs[i];
602 597 map[i].u.g.g_handle = 0;
603 598 map[i].u.g.g_flags = 0;
604 599 if (flags & SEGMF_GREF_WR) {
605 600 map[i].u.g.g_flags |= SEGMF_GFLAGS_WR;
606 601 }
607 602 }
608 603 fc = segmf_fault_gref_range(seg, addr, cnt);
609 604 if (fc != 0) {
610 605 e = fc_decode(fc);
611 606 for (i = 0; i < cnt; i++) {
612 607 data->map[i].t_type = SEGMF_MAP_EMPTY;
613 608 }
614 609 }
615 610
616 611 out:
617 612 mutex_exit(&data->lock);
618 613 return (e);
619 614 }
620 615
621 616 int
622 617 segmf_release_grefs(struct seg *seg, caddr_t addr, uint_t cnt)
623 618 {
624 619 gnttab_unmap_grant_ref_t mapop[SEGMF_MAX_GREFS];
625 620 struct segmf_data *data;
626 621 segmf_map_t *map;
627 622 uint_t idx;
628 623 long e;
629 624 int i;
630 625 int n;
631 626
632 627
633 628 if (cnt > SEGMF_MAX_GREFS) {
634 629 return (-1);
635 630 }
636 631
637 632 idx = seg_page(seg, addr);
638 633 data = seg->s_data;
639 634 map = &data->map[idx];
640 635
641 636 bzero(mapop, sizeof (gnttab_unmap_grant_ref_t) * cnt);
642 637
643 638 /*
644 639 * for each entry which isn't empty and is currently mapped,
645 640 * set it up for an unmap then mark them empty.
646 641 */
647 642 n = 0;
648 643 for (i = 0; i < cnt; i++) {
649 644 ASSERT(map[i].t_type != SEGMF_MAP_MFN);
650 645 if ((map[i].t_type == SEGMF_MAP_GREF) &&
651 646 (map[i].u.g.g_flags & SEGMF_GFLAGS_MAPPED)) {
652 647 mapop[n].handle = map[i].u.g.g_handle;
653 648 mapop[n].host_addr = map[i].u.g.g_ptep;
654 649 mapop[n].dev_bus_addr = 0;
655 650 n++;
656 651 }
657 652 map[i].t_type = SEGMF_MAP_EMPTY;
658 653 }
659 654
660 655 /* if there's nothing to unmap, just return */
661 656 if (n == 0) {
662 657 return (0);
663 658 }
664 659
665 660 e = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &mapop, n);
666 661 if (e != 0) {
667 662 return (-1);
668 663 }
669 664
670 665 return (0);
671 666 }
672 667
673 668
674 669 void
675 670 segmf_add_gref_pte(struct seg *seg, caddr_t addr, uint64_t pte_ma)
676 671 {
677 672 struct segmf_data *data;
678 673 uint_t idx;
679 674
680 675 idx = seg_page(seg, addr);
681 676 data = seg->s_data;
682 677
683 678 data->map[idx].u.g.g_ptep = pte_ma;
684 679 }
685 680
686 681
687 682 static int
688 683 segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t cnt)
689 684 {
690 685 gnttab_map_grant_ref_t mapop[SEGMF_MAX_GREFS];
691 686 struct segmf_data *data;
692 687 segmf_map_t *map;
693 688 uint_t idx;
694 689 int e;
695 690 int i;
696 691
697 692
698 693 if (cnt > SEGMF_MAX_GREFS) {
699 694 return (-1);
700 695 }
701 696
702 697 data = seg->s_data;
703 698 idx = seg_page(seg, addr);
704 699 map = &data->map[idx];
705 700
706 701 bzero(mapop, sizeof (gnttab_map_grant_ref_t) * cnt);
707 702
708 703 ASSERT(map->t_type == SEGMF_MAP_GREF);
709 704
710 705 /*
711 706 * map in each page passed in into the user apps AS. We do this by
712 707 * passing the MA of the actual pte of the mapping to the hypervisor.
713 708 */
714 709 for (i = 0; i < cnt; i++) {
715 710 mapop[i].host_addr = map[i].u.g.g_ptep;
716 711 mapop[i].dom = data->domid;
717 712 mapop[i].ref = map[i].u.g.g_gref;
718 713 mapop[i].flags = GNTMAP_host_map | GNTMAP_application_map |
719 714 GNTMAP_contains_pte;
720 715 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
721 716 mapop[i].flags |= GNTMAP_readonly;
722 717 }
723 718 }
724 719 e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
725 720 if ((e != 0) || (mapop[0].status != GNTST_okay)) {
726 721 return (FC_MAKE_ERR(EFAULT));
727 722 }
728 723
729 724 /* save handle for segmf_release_grefs() and mark it as mapped */
730 725 for (i = 0; i < cnt; i++) {
731 726 ASSERT(mapop[i].status == GNTST_okay);
732 727 map[i].u.g.g_handle = mapop[i].handle;
733 728 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
734 729 }
735 730
736 731 return (0);
737 732 }
738 733
739 734 static struct seg_ops segmf_ops = {
740 735 .dup = segmf_dup,
741 736 .unmap = segmf_unmap,
742 737 .free = segmf_free,
743 738 .fault = segmf_fault,
744 739 .faulta = segmf_faulta,
745 740 .setprot = segmf_setprot,
↓ open down ↓ |
258 lines elided |
↑ open up ↑ |
746 741 .checkprot = segmf_checkprot,
747 742 .kluster = segmf_kluster,
748 743 .sync = segmf_sync,
749 744 .incore = segmf_incore,
750 745 .lockop = segmf_lockop,
751 746 .getprot = segmf_getprot,
752 747 .getoffset = segmf_getoffset,
753 748 .gettype = segmf_gettype,
754 749 .getvp = segmf_getvp,
755 750 .advise = segmf_advise,
756 - .dump = segmf_dump,
757 751 .pagelock = segmf_pagelock,
758 752 .getmemid = segmf_getmemid,
759 753 };
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX