Print this page
6149 use NULL capable segop as a shorthand for no-capabilities
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86xpv/vm/seg_mf.c
+++ new/usr/src/uts/i86xpv/vm/seg_mf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Machine frame segment driver. This segment driver allows dom0 processes to
29 29 * map pages of other domains or Xen (e.g. during save/restore). ioctl()s on
30 30 * the privcmd driver provide the MFN values backing each mapping, and we map
31 31 * them into the process's address space at this time. Demand-faulting is not
32 32 * supported by this driver due to the requirements upon some of the ioctl()s.
33 33 */
34 34
35 35
36 36 #include <sys/types.h>
37 37 #include <sys/systm.h>
38 38 #include <sys/vmsystm.h>
39 39 #include <sys/mman.h>
40 40 #include <sys/errno.h>
41 41 #include <sys/kmem.h>
42 42 #include <sys/cmn_err.h>
43 43 #include <sys/vnode.h>
44 44 #include <sys/conf.h>
45 45 #include <sys/debug.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/hypervisor.h>
48 48
49 49 #include <vm/page.h>
50 50 #include <vm/hat.h>
51 51 #include <vm/as.h>
52 52 #include <vm/seg.h>
53 53
54 54 #include <vm/hat_pte.h>
55 55 #include <vm/hat_i86.h>
56 56 #include <vm/seg_mf.h>
57 57
58 58 #include <sys/fs/snode.h>
59 59
60 60 #define VTOCVP(vp) (VTOS(vp)->s_commonvp)
61 61
62 62 typedef struct segmf_mfn_s {
63 63 mfn_t m_mfn;
64 64 } segmf_mfn_t;
65 65
66 66 /* g_flags */
67 67 #define SEGMF_GFLAGS_WR 0x1
68 68 #define SEGMF_GFLAGS_MAPPED 0x2
69 69 typedef struct segmf_gref_s {
70 70 uint64_t g_ptep;
71 71 grant_ref_t g_gref;
72 72 uint32_t g_flags;
73 73 grant_handle_t g_handle;
74 74 } segmf_gref_t;
75 75
76 76 typedef union segmf_mu_u {
77 77 segmf_mfn_t m;
78 78 segmf_gref_t g;
79 79 } segmf_mu_t;
80 80
81 81 typedef enum {
82 82 SEGMF_MAP_EMPTY = 0,
83 83 SEGMF_MAP_MFN,
84 84 SEGMF_MAP_GREF
85 85 } segmf_map_type_t;
86 86
87 87 typedef struct segmf_map_s {
88 88 segmf_map_type_t t_type;
89 89 segmf_mu_t u;
90 90 } segmf_map_t;
91 91
92 92 struct segmf_data {
93 93 kmutex_t lock;
94 94 struct vnode *vp;
95 95 uchar_t prot;
96 96 uchar_t maxprot;
97 97 size_t softlockcnt;
98 98 domid_t domid;
99 99 segmf_map_t *map;
100 100 };
101 101
102 102 static struct seg_ops segmf_ops;
103 103
104 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
105 105
106 106 static struct segmf_data *
107 107 segmf_data_zalloc(struct seg *seg)
108 108 {
109 109 struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
110 110
111 111 mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
112 112 seg->s_ops = &segmf_ops;
113 113 seg->s_data = data;
114 114 return (data);
115 115 }
116 116
117 117 int
118 118 segmf_create(struct seg *seg, void *args)
119 119 {
120 120 struct segmf_crargs *a = args;
121 121 struct segmf_data *data;
122 122 struct as *as = seg->s_as;
123 123 pgcnt_t i, npages = seg_pages(seg);
124 124 int error;
125 125
126 126 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
127 127
128 128 data = segmf_data_zalloc(seg);
129 129 data->vp = specfind(a->dev, VCHR);
130 130 data->prot = a->prot;
131 131 data->maxprot = a->maxprot;
132 132
133 133 data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
134 134 for (i = 0; i < npages; i++) {
135 135 data->map[i].t_type = SEGMF_MAP_EMPTY;
136 136 }
137 137
138 138 error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
139 139 data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
140 140
141 141 if (error != 0)
142 142 hat_unload(as->a_hat,
143 143 seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
144 144 return (error);
145 145 }
146 146
147 147 /*
148 148 * Duplicate a seg and return new segment in newseg.
149 149 */
150 150 static int
151 151 segmf_dup(struct seg *seg, struct seg *newseg)
152 152 {
153 153 struct segmf_data *data = seg->s_data;
154 154 struct segmf_data *ndata;
155 155 pgcnt_t npages = seg_pages(newseg);
156 156 size_t sz;
157 157
158 158 ndata = segmf_data_zalloc(newseg);
159 159
160 160 VN_HOLD(data->vp);
161 161 ndata->vp = data->vp;
162 162 ndata->prot = data->prot;
163 163 ndata->maxprot = data->maxprot;
164 164 ndata->domid = data->domid;
165 165
166 166 sz = npages * sizeof (segmf_map_t);
167 167 ndata->map = kmem_alloc(sz, KM_SLEEP);
168 168 bcopy(data->map, ndata->map, sz);
169 169
170 170 return (VOP_ADDMAP(VTOCVP(ndata->vp), 0, newseg->s_as,
171 171 newseg->s_base, newseg->s_size, ndata->prot, ndata->maxprot,
172 172 MAP_SHARED, CRED(), NULL));
173 173 }
174 174
175 175 /*
176 176 * We only support unmapping the whole segment, and we automatically unlock
177 177 * what we previously soft-locked.
178 178 */
179 179 static int
180 180 segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
181 181 {
182 182 struct segmf_data *data = seg->s_data;
183 183 offset_t off;
184 184
185 185 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
186 186 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
187 187 panic("segmf_unmap");
188 188
189 189 if (addr != seg->s_base || len != seg->s_size)
190 190 return (ENOTSUP);
191 191
192 192 hat_unload(seg->s_as->a_hat, addr, len,
193 193 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
194 194
195 195 off = (offset_t)seg_page(seg, addr);
196 196
197 197 ASSERT(data->vp != NULL);
198 198
199 199 (void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
200 200 data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
201 201
202 202 seg_free(seg);
203 203 return (0);
204 204 }
205 205
206 206 static void
207 207 segmf_free(struct seg *seg)
208 208 {
209 209 struct segmf_data *data = seg->s_data;
210 210 pgcnt_t npages = seg_pages(seg);
211 211
212 212 kmem_free(data->map, npages * sizeof (segmf_map_t));
213 213 VN_RELE(data->vp);
214 214 mutex_destroy(&data->lock);
215 215 kmem_free(data, sizeof (*data));
216 216 }
217 217
218 218 static int segmf_faultpage_debug = 0;
219 219 /*ARGSUSED*/
220 220 static int
221 221 segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
222 222 enum fault_type type, uint_t prot)
223 223 {
224 224 struct segmf_data *data = seg->s_data;
225 225 uint_t hat_flags = HAT_LOAD_NOCONSIST;
226 226 mfn_t mfn;
227 227 x86pte_t pte;
228 228 segmf_map_t *map;
229 229 uint_t idx;
230 230
231 231
232 232 idx = seg_page(seg, addr);
233 233 map = &data->map[idx];
234 234 ASSERT(map->t_type == SEGMF_MAP_MFN);
235 235
236 236 mfn = map->u.m.m_mfn;
237 237
238 238 if (type == F_SOFTLOCK) {
239 239 mutex_enter(&freemem_lock);
240 240 data->softlockcnt++;
241 241 mutex_exit(&freemem_lock);
242 242 hat_flags |= HAT_LOAD_LOCK;
243 243 } else
244 244 hat_flags |= HAT_LOAD;
245 245
246 246 if (segmf_faultpage_debug > 0) {
247 247 uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
248 248 (void *)addr, data->domid, mfn, prot);
249 249 segmf_faultpage_debug--;
250 250 }
251 251
252 252 /*
253 253 * Ask the HAT to load a throwaway mapping to page zero, then
254 254 * overwrite it with our foreign domain mapping. It gets removed
255 255 * later via hat_unload()
256 256 */
257 257 hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
258 258 PROT_READ | HAT_UNORDERED_OK, hat_flags);
259 259
260 260 pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
261 261 if (prot & PROT_WRITE)
262 262 pte |= PT_WRITABLE;
263 263
264 264 if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
265 265 UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
266 266 hat_flags = HAT_UNLOAD_UNMAP;
267 267
268 268 if (type == F_SOFTLOCK) {
269 269 hat_flags |= HAT_UNLOAD_UNLOCK;
270 270 mutex_enter(&freemem_lock);
271 271 data->softlockcnt--;
272 272 mutex_exit(&freemem_lock);
273 273 }
274 274
275 275 hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
276 276 return (FC_MAKE_ERR(EFAULT));
277 277 }
278 278
279 279 return (0);
280 280 }
281 281
282 282 static int
283 283 seg_rw_to_prot(enum seg_rw rw)
284 284 {
285 285 switch (rw) {
286 286 case S_READ:
287 287 return (PROT_READ);
288 288 case S_WRITE:
289 289 return (PROT_WRITE);
290 290 case S_EXEC:
291 291 return (PROT_EXEC);
292 292 case S_OTHER:
293 293 default:
294 294 break;
295 295 }
296 296 return (PROT_READ | PROT_WRITE | PROT_EXEC);
297 297 }
298 298
299 299 static void
300 300 segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
301 301 {
302 302 struct segmf_data *data = seg->s_data;
303 303
304 304 hat_unlock(hat, addr, len);
305 305
306 306 mutex_enter(&freemem_lock);
307 307 ASSERT(data->softlockcnt >= btopr(len));
308 308 data->softlockcnt -= btopr(len);
309 309 mutex_exit(&freemem_lock);
310 310
311 311 if (data->softlockcnt == 0) {
312 312 struct as *as = seg->s_as;
313 313
314 314 if (AS_ISUNMAPWAIT(as)) {
315 315 mutex_enter(&as->a_contents);
316 316 if (AS_ISUNMAPWAIT(as)) {
317 317 AS_CLRUNMAPWAIT(as);
318 318 cv_broadcast(&as->a_cv);
319 319 }
320 320 mutex_exit(&as->a_contents);
321 321 }
322 322 }
323 323 }
324 324
325 325 static int
326 326 segmf_fault_range(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
327 327 enum fault_type type, enum seg_rw rw)
328 328 {
329 329 struct segmf_data *data = seg->s_data;
330 330 int error = 0;
331 331 caddr_t a;
332 332
333 333 if ((data->prot & seg_rw_to_prot(rw)) == 0)
334 334 return (FC_PROT);
335 335
336 336 /* loop over the address range handling each fault */
337 337
338 338 for (a = addr; a < addr + len; a += PAGESIZE) {
339 339 error = segmf_faultpage(hat, seg, a, type, data->prot);
340 340 if (error != 0)
341 341 break;
342 342 }
343 343
344 344 if (error != 0 && type == F_SOFTLOCK) {
345 345 size_t done = (size_t)(a - addr);
346 346
347 347 /*
348 348 * Undo what's been done so far.
349 349 */
350 350 if (done > 0)
351 351 segmf_softunlock(hat, seg, addr, done);
352 352 }
353 353
354 354 return (error);
355 355 }
356 356
357 357 /*
358 358 * We never demand-fault for seg_mf.
359 359 */
360 360 /*ARGSUSED*/
361 361 static int
362 362 segmf_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
363 363 enum fault_type type, enum seg_rw rw)
364 364 {
365 365 return (FC_MAKE_ERR(EFAULT));
366 366 }
367 367
368 368 /*ARGSUSED*/
369 369 static int
370 370 segmf_faulta(struct seg *seg, caddr_t addr)
371 371 {
372 372 return (0);
373 373 }
374 374
375 375 /*ARGSUSED*/
376 376 static int
377 377 segmf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
378 378 {
379 379 return (EINVAL);
380 380 }
381 381
382 382 /*ARGSUSED*/
383 383 static int
384 384 segmf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
385 385 {
386 386 return (EINVAL);
387 387 }
388 388
389 389 /*ARGSUSED*/
390 390 static int
391 391 segmf_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
392 392 {
393 393 return (-1);
394 394 }
395 395
396 396 /*ARGSUSED*/
397 397 static int
398 398 segmf_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
399 399 {
400 400 return (0);
401 401 }
402 402
403 403 /*
404 404 * XXPV Hmm. Should we say that mf mapping are "in core?"
405 405 */
406 406
407 407 /*ARGSUSED*/
408 408 static size_t
409 409 segmf_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
410 410 {
411 411 size_t v;
412 412
413 413 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
414 414 len -= PAGESIZE, v += PAGESIZE)
415 415 *vec++ = 1;
416 416 return (v);
417 417 }
418 418
419 419 /*ARGSUSED*/
420 420 static int
421 421 segmf_lockop(struct seg *seg, caddr_t addr,
422 422 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
423 423 {
424 424 return (0);
425 425 }
426 426
427 427 static int
428 428 segmf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
429 429 {
430 430 struct segmf_data *data = seg->s_data;
431 431 pgcnt_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
432 432
433 433 if (pgno != 0) {
434 434 do
435 435 protv[--pgno] = data->prot;
436 436 while (pgno != 0)
437 437 ;
438 438 }
439 439 return (0);
440 440 }
441 441
442 442 static u_offset_t
443 443 segmf_getoffset(struct seg *seg, caddr_t addr)
444 444 {
445 445 return (addr - seg->s_base);
446 446 }
447 447
448 448 /*ARGSUSED*/
449 449 static int
450 450 segmf_gettype(struct seg *seg, caddr_t addr)
451 451 {
452 452 return (MAP_SHARED);
453 453 }
454 454
455 455 /*ARGSUSED1*/
456 456 static int
457 457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
458 458 {
459 459 struct segmf_data *data = seg->s_data;
460 460
461 461 *vpp = VTOCVP(data->vp);
462 462 return (0);
463 463 }
464 464
465 465 /*ARGSUSED*/
466 466 static int
467 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
468 468 {
469 469 return (0);
470 470 }
471 471
472 472 /*ARGSUSED*/
473 473 static void
474 474 segmf_dump(struct seg *seg)
475 475 {}
476 476
477 477 /*ARGSUSED*/
478 478 static int
479 479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
480 480 struct page ***ppp, enum lock_type type, enum seg_rw rw)
481 481 {
482 482 return (ENOTSUP);
483 483 }
484 484
485 485 /*ARGSUSED*/
486 486 static int
487 487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
488 488 {
489 489 return (ENOTSUP);
490 490 }
491 491
↓ open down ↓ |
491 lines elided |
↑ open up ↑ |
492 492 static int
493 493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
494 494 {
495 495 struct segmf_data *data = seg->s_data;
496 496
497 497 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
498 498 memid->val[1] = (uintptr_t)seg_page(seg, addr);
499 499 return (0);
500 500 }
501 501
502 -/*ARGSUSED*/
503 -static int
504 -segmf_capable(struct seg *seg, segcapability_t capability)
505 -{
506 - return (0);
507 -}
508 -
509 502 /*
510 503 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
511 504 * pre-faulting is necessary due to live migration; in particular we must
512 505 * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
513 506 * later on a bad MFN. Whilst this isn't necessary for the other MMAP
514 507 * ioctl()s, we lock them too, as they should be transitory.
515 508 */
516 509 int
517 510 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
518 511 pgcnt_t pgcnt, domid_t domid)
519 512 {
520 513 struct segmf_data *data = seg->s_data;
521 514 pgcnt_t base;
522 515 faultcode_t fc;
523 516 pgcnt_t i;
524 517 int error = 0;
525 518
526 519 if (seg->s_ops != &segmf_ops)
527 520 return (EINVAL);
528 521
529 522 /*
530 523 * Don't mess with dom0.
531 524 *
532 525 * Only allow the domid to be set once for the segment.
533 526 * After that attempts to add mappings to this segment for
534 527 * other domains explicitly fails.
535 528 */
536 529
537 530 if (domid == 0 || domid == DOMID_SELF)
538 531 return (EACCES);
539 532
540 533 mutex_enter(&data->lock);
541 534
542 535 if (data->domid == 0)
543 536 data->domid = domid;
544 537
545 538 if (data->domid != domid) {
546 539 error = EINVAL;
547 540 goto out;
548 541 }
549 542
550 543 base = seg_page(seg, addr);
551 544
552 545 for (i = 0; i < pgcnt; i++) {
553 546 data->map[base + i].t_type = SEGMF_MAP_MFN;
554 547 data->map[base + i].u.m.m_mfn = mfn++;
555 548 }
556 549
557 550 fc = segmf_fault_range(seg->s_as->a_hat, seg, addr,
558 551 pgcnt * MMU_PAGESIZE, F_SOFTLOCK, S_OTHER);
559 552
560 553 if (fc != 0) {
561 554 error = fc_decode(fc);
562 555 for (i = 0; i < pgcnt; i++) {
563 556 data->map[base + i].t_type = SEGMF_MAP_EMPTY;
564 557 }
565 558 }
566 559
567 560 out:
568 561 mutex_exit(&data->lock);
569 562 return (error);
570 563 }
571 564
572 565 int
573 566 segmf_add_grefs(struct seg *seg, caddr_t addr, uint_t flags,
574 567 grant_ref_t *grefs, uint_t cnt, domid_t domid)
575 568 {
576 569 struct segmf_data *data;
577 570 segmf_map_t *map;
578 571 faultcode_t fc;
579 572 uint_t idx;
580 573 uint_t i;
581 574 int e;
582 575
583 576 if (seg->s_ops != &segmf_ops)
584 577 return (EINVAL);
585 578
586 579 /*
587 580 * Don't mess with dom0.
588 581 *
589 582 * Only allow the domid to be set once for the segment.
590 583 * After that attempts to add mappings to this segment for
591 584 * other domains explicitly fails.
592 585 */
593 586
594 587 if (domid == 0 || domid == DOMID_SELF)
595 588 return (EACCES);
596 589
597 590 data = seg->s_data;
598 591 idx = seg_page(seg, addr);
599 592 map = &data->map[idx];
600 593 e = 0;
601 594
602 595 mutex_enter(&data->lock);
603 596
604 597 if (data->domid == 0)
605 598 data->domid = domid;
606 599
607 600 if (data->domid != domid) {
608 601 e = EINVAL;
609 602 goto out;
610 603 }
611 604
612 605 /* store away the grefs passed in then fault in the pages */
613 606 for (i = 0; i < cnt; i++) {
614 607 map[i].t_type = SEGMF_MAP_GREF;
615 608 map[i].u.g.g_gref = grefs[i];
616 609 map[i].u.g.g_handle = 0;
617 610 map[i].u.g.g_flags = 0;
618 611 if (flags & SEGMF_GREF_WR) {
619 612 map[i].u.g.g_flags |= SEGMF_GFLAGS_WR;
620 613 }
621 614 }
622 615 fc = segmf_fault_gref_range(seg, addr, cnt);
623 616 if (fc != 0) {
624 617 e = fc_decode(fc);
625 618 for (i = 0; i < cnt; i++) {
626 619 data->map[i].t_type = SEGMF_MAP_EMPTY;
627 620 }
628 621 }
629 622
630 623 out:
631 624 mutex_exit(&data->lock);
632 625 return (e);
633 626 }
634 627
635 628 int
636 629 segmf_release_grefs(struct seg *seg, caddr_t addr, uint_t cnt)
637 630 {
638 631 gnttab_unmap_grant_ref_t mapop[SEGMF_MAX_GREFS];
639 632 struct segmf_data *data;
640 633 segmf_map_t *map;
641 634 uint_t idx;
642 635 long e;
643 636 int i;
644 637 int n;
645 638
646 639
647 640 if (cnt > SEGMF_MAX_GREFS) {
648 641 return (-1);
649 642 }
650 643
651 644 idx = seg_page(seg, addr);
652 645 data = seg->s_data;
653 646 map = &data->map[idx];
654 647
655 648 bzero(mapop, sizeof (gnttab_unmap_grant_ref_t) * cnt);
656 649
657 650 /*
658 651 * for each entry which isn't empty and is currently mapped,
659 652 * set it up for an unmap then mark them empty.
660 653 */
661 654 n = 0;
662 655 for (i = 0; i < cnt; i++) {
663 656 ASSERT(map[i].t_type != SEGMF_MAP_MFN);
664 657 if ((map[i].t_type == SEGMF_MAP_GREF) &&
665 658 (map[i].u.g.g_flags & SEGMF_GFLAGS_MAPPED)) {
666 659 mapop[n].handle = map[i].u.g.g_handle;
667 660 mapop[n].host_addr = map[i].u.g.g_ptep;
668 661 mapop[n].dev_bus_addr = 0;
669 662 n++;
670 663 }
671 664 map[i].t_type = SEGMF_MAP_EMPTY;
672 665 }
673 666
674 667 /* if there's nothing to unmap, just return */
675 668 if (n == 0) {
676 669 return (0);
677 670 }
678 671
679 672 e = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &mapop, n);
680 673 if (e != 0) {
681 674 return (-1);
682 675 }
683 676
684 677 return (0);
685 678 }
686 679
687 680
688 681 void
689 682 segmf_add_gref_pte(struct seg *seg, caddr_t addr, uint64_t pte_ma)
690 683 {
691 684 struct segmf_data *data;
692 685 uint_t idx;
693 686
694 687 idx = seg_page(seg, addr);
695 688 data = seg->s_data;
696 689
697 690 data->map[idx].u.g.g_ptep = pte_ma;
698 691 }
699 692
700 693
701 694 static int
702 695 segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t cnt)
703 696 {
704 697 gnttab_map_grant_ref_t mapop[SEGMF_MAX_GREFS];
705 698 struct segmf_data *data;
706 699 segmf_map_t *map;
707 700 uint_t idx;
708 701 int e;
709 702 int i;
710 703
711 704
712 705 if (cnt > SEGMF_MAX_GREFS) {
713 706 return (-1);
714 707 }
715 708
716 709 data = seg->s_data;
717 710 idx = seg_page(seg, addr);
718 711 map = &data->map[idx];
719 712
720 713 bzero(mapop, sizeof (gnttab_map_grant_ref_t) * cnt);
721 714
722 715 ASSERT(map->t_type == SEGMF_MAP_GREF);
723 716
724 717 /*
725 718 * map in each page passed in into the user apps AS. We do this by
726 719 * passing the MA of the actual pte of the mapping to the hypervisor.
727 720 */
728 721 for (i = 0; i < cnt; i++) {
729 722 mapop[i].host_addr = map[i].u.g.g_ptep;
730 723 mapop[i].dom = data->domid;
731 724 mapop[i].ref = map[i].u.g.g_gref;
732 725 mapop[i].flags = GNTMAP_host_map | GNTMAP_application_map |
733 726 GNTMAP_contains_pte;
734 727 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
735 728 mapop[i].flags |= GNTMAP_readonly;
736 729 }
737 730 }
738 731 e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
739 732 if ((e != 0) || (mapop[0].status != GNTST_okay)) {
740 733 return (FC_MAKE_ERR(EFAULT));
741 734 }
742 735
743 736 /* save handle for segmf_release_grefs() and mark it as mapped */
744 737 for (i = 0; i < cnt; i++) {
745 738 ASSERT(mapop[i].status == GNTST_okay);
746 739 map[i].u.g.g_handle = mapop[i].handle;
747 740 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
748 741 }
749 742
750 743 return (0);
751 744 }
752 745
753 746 static struct seg_ops segmf_ops = {
754 747 .dup = segmf_dup,
755 748 .unmap = segmf_unmap,
756 749 .free = segmf_free,
757 750 .fault = segmf_fault,
758 751 .faulta = segmf_faulta,
759 752 .setprot = segmf_setprot,
760 753 .checkprot = segmf_checkprot,
761 754 .kluster = segmf_kluster,
762 755 .sync = segmf_sync,
763 756 .incore = segmf_incore,
↓ open down ↓ |
245 lines elided |
↑ open up ↑ |
764 757 .lockop = segmf_lockop,
765 758 .getprot = segmf_getprot,
766 759 .getoffset = segmf_getoffset,
767 760 .gettype = segmf_gettype,
768 761 .getvp = segmf_getvp,
769 762 .advise = segmf_advise,
770 763 .dump = segmf_dump,
771 764 .pagelock = segmf_pagelock,
772 765 .setpagesize = segmf_setpagesize,
773 766 .getmemid = segmf_getmemid,
774 - .capable = segmf_capable,
775 767 };
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX