Print this page
6152 use NULL dump segop as a shorthand for no-op
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_dev.c
+++ new/usr/src/uts/common/vm/seg_dev.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - segment of a mapped device.
42 42 *
43 43 * This segment driver is used when mapping character special devices.
44 44 */
45 45
46 46 #include <sys/types.h>
47 47 #include <sys/t_lock.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/vtrace.h>
50 50 #include <sys/systm.h>
51 51 #include <sys/vmsystm.h>
52 52 #include <sys/mman.h>
53 53 #include <sys/errno.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/vnode.h>
57 57 #include <sys/proc.h>
58 58 #include <sys/conf.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/ddidevmap.h>
61 61 #include <sys/ddi_implfuncs.h>
62 62 #include <sys/lgrp.h>
63 63
64 64 #include <vm/page.h>
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_dev.h>
69 69 #include <vm/seg_kp.h>
70 70 #include <vm/seg_kmem.h>
71 71 #include <vm/vpage.h>
72 72
73 73 #include <sys/sunddi.h>
74 74 #include <sys/esunddi.h>
75 75 #include <sys/fs/snode.h>
76 76
77 77
78 78 #if DEBUG
79 79 int segdev_debug;
80 80 #define DEBUGF(level, args) { if (segdev_debug >= (level)) cmn_err args; }
81 81 #else
82 82 #define DEBUGF(level, args)
83 83 #endif
84 84
85 85 /* Default timeout for devmap context management */
86 86 #define CTX_TIMEOUT_VALUE 0
87 87
88 88 #define HOLD_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
89 89 { mutex_enter(&dhp->dh_lock); }
90 90
91 91 #define RELE_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
92 92 { mutex_exit(&dhp->dh_lock); }
93 93
94 94 #define round_down_p2(a, s) ((a) & ~((s) - 1))
95 95 #define round_up_p2(a, s) (((a) + (s) - 1) & ~((s) - 1))
96 96
97 97 /*
98 98 * VA_PA_ALIGNED checks to see if both VA and PA are on pgsize boundary
99 99 * VA_PA_PGSIZE_ALIGNED check to see if VA is aligned with PA w.r.t. pgsize
100 100 */
101 101 #define VA_PA_ALIGNED(uvaddr, paddr, pgsize) \
102 102 (((uvaddr | paddr) & (pgsize - 1)) == 0)
103 103 #define VA_PA_PGSIZE_ALIGNED(uvaddr, paddr, pgsize) \
104 104 (((uvaddr ^ paddr) & (pgsize - 1)) == 0)
105 105
106 106 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
107 107
108 108 #define VTOCVP(vp) (VTOS(vp)->s_commonvp) /* we "know" it's an snode */
109 109
110 110 static struct devmap_ctx *devmapctx_list = NULL;
111 111 static struct devmap_softlock *devmap_slist = NULL;
112 112
113 113 /*
114 114 * mutex, vnode and page for the page of zeros we use for the trash mappings.
115 115 * One trash page is allocated on the first ddi_umem_setup call that uses it
116 116 * XXX Eventually, we may want to combine this with what segnf does when all
117 117 * hat layers implement HAT_NOFAULT.
118 118 *
119 119 * The trash page is used when the backing store for a userland mapping is
120 120 * removed but the application semantics do not take kindly to a SIGBUS.
121 121 * In that scenario, the applications pages are mapped to some dummy page
122 122 * which returns garbage on read and writes go into a common place.
123 123 * (Perfect for NO_FAULT semantics)
124 124 * The device driver is responsible to communicating to the app with some
125 125 * other mechanism that such remapping has happened and the app should take
126 126 * corrective action.
127 127 * We can also use an anonymous memory page as there is no requirement to
128 128 * keep the page locked, however this complicates the fault code. RFE.
129 129 */
130 130 static struct vnode trashvp;
131 131 static struct page *trashpp;
132 132
133 133 /* Non-pageable kernel memory is allocated from the umem_np_arena. */
134 134 static vmem_t *umem_np_arena;
135 135
136 136 /* Set the cookie to a value we know will never be a valid umem_cookie */
137 137 #define DEVMAP_DEVMEM_COOKIE ((ddi_umem_cookie_t)0x1)
138 138
139 139 /*
140 140 * Macros to check if type of devmap handle
141 141 */
142 142 #define cookie_is_devmem(c) \
143 143 ((c) == (struct ddi_umem_cookie *)DEVMAP_DEVMEM_COOKIE)
144 144
145 145 #define cookie_is_pmem(c) \
146 146 ((c) == (struct ddi_umem_cookie *)DEVMAP_PMEM_COOKIE)
147 147
148 148 #define cookie_is_kpmem(c) (!cookie_is_devmem(c) && !cookie_is_pmem(c) &&\
149 149 ((c)->type == KMEM_PAGEABLE))
150 150
151 151 #define dhp_is_devmem(dhp) \
152 152 (cookie_is_devmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
153 153
154 154 #define dhp_is_pmem(dhp) \
155 155 (cookie_is_pmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
156 156
157 157 #define dhp_is_kpmem(dhp) \
158 158 (cookie_is_kpmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
159 159
160 160 /*
161 161 * Private seg op routines.
162 162 */
163 163 static int segdev_dup(struct seg *, struct seg *);
164 164 static int segdev_unmap(struct seg *, caddr_t, size_t);
165 165 static void segdev_free(struct seg *);
166 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 167 enum fault_type, enum seg_rw);
168 168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
↓ open down ↓ |
170 lines elided |
↑ open up ↑ |
171 171 static void segdev_badop(void);
172 172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 175 ulong_t *, size_t);
176 176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 178 static int segdev_gettype(struct seg *, caddr_t);
179 179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 -static void segdev_dump(struct seg *);
182 181 static int segdev_pagelock(struct seg *, caddr_t, size_t,
183 182 struct page ***, enum lock_type, enum seg_rw);
184 183 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
185 184
186 185 /*
187 186 * XXX this struct is used by rootnex_map_fault to identify
188 187 * the segment it has been passed. So if you make it
189 188 * "static" you'll need to fix rootnex_map_fault.
190 189 */
191 190 struct seg_ops segdev_ops = {
192 191 .dup = segdev_dup,
193 192 .unmap = segdev_unmap,
194 193 .free = segdev_free,
195 194 .fault = segdev_fault,
196 195 .faulta = segdev_faulta,
197 196 .setprot = segdev_setprot,
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
198 197 .checkprot = segdev_checkprot,
199 198 .kluster = (int (*)())segdev_badop,
200 199 .sync = segdev_sync,
201 200 .incore = segdev_incore,
202 201 .lockop = segdev_lockop,
203 202 .getprot = segdev_getprot,
204 203 .getoffset = segdev_getoffset,
205 204 .gettype = segdev_gettype,
206 205 .getvp = segdev_getvp,
207 206 .advise = segdev_advise,
208 - .dump = segdev_dump,
209 207 .pagelock = segdev_pagelock,
210 208 .getmemid = segdev_getmemid,
211 209 };
212 210
213 211 /*
214 212 * Private segdev support routines
215 213 */
216 214 static struct segdev_data *sdp_alloc(void);
217 215
218 216 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
219 217 size_t, enum seg_rw);
220 218
221 219 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
222 220 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
223 221
224 222 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
225 223 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
226 224
227 225 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
228 226 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
229 227 static void devmap_softlock_rele(devmap_handle_t *);
230 228 static void devmap_ctx_rele(devmap_handle_t *);
231 229
232 230 static void devmap_ctxto(void *);
233 231
234 232 static devmap_handle_t *devmap_find_handle(devmap_handle_t *dhp_head,
235 233 caddr_t addr);
236 234
237 235 static ulong_t devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
238 236 ulong_t *opfn, ulong_t *pagesize);
239 237
240 238 static void free_devmap_handle(devmap_handle_t *dhp);
241 239
242 240 static int devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
243 241 struct seg *newseg);
244 242
245 243 static devmap_handle_t *devmap_handle_unmap(devmap_handle_t *dhp);
246 244
247 245 static void devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len);
248 246
249 247 static void devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr);
250 248
251 249 static int devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
252 250 offset_t off, size_t len, uint_t flags);
253 251
254 252 static void devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len,
255 253 caddr_t addr, size_t *llen, caddr_t *laddr);
256 254
257 255 static void devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len);
258 256
259 257 static void *devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag);
260 258 static void devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size);
261 259
262 260 static void *devmap_umem_alloc_np(size_t size, size_t flags);
263 261 static void devmap_umem_free_np(void *addr, size_t size);
264 262
265 263 /*
266 264 * routines to lock and unlock underlying segkp segment for
267 265 * KMEM_PAGEABLE type cookies.
268 266 */
269 267 static faultcode_t acquire_kpmem_lock(struct ddi_umem_cookie *, size_t);
270 268 static void release_kpmem_lock(struct ddi_umem_cookie *, size_t);
271 269
272 270 /*
273 271 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
274 272 * drivers with devmap_access callbacks
275 273 */
276 274 static int devmap_softlock_enter(struct devmap_softlock *, size_t,
277 275 enum fault_type);
278 276 static void devmap_softlock_exit(struct devmap_softlock *, size_t,
279 277 enum fault_type);
280 278
281 279 static kmutex_t devmapctx_lock;
282 280
283 281 static kmutex_t devmap_slock;
284 282
285 283 /*
286 284 * Initialize the thread callbacks and thread private data.
287 285 */
288 286 static struct devmap_ctx *
289 287 devmap_ctxinit(dev_t dev, ulong_t id)
290 288 {
291 289 struct devmap_ctx *devctx;
292 290 struct devmap_ctx *tmp;
293 291 dev_info_t *dip;
294 292
295 293 tmp = kmem_zalloc(sizeof (struct devmap_ctx), KM_SLEEP);
296 294
297 295 mutex_enter(&devmapctx_lock);
298 296
299 297 dip = e_ddi_hold_devi_by_dev(dev, 0);
300 298 ASSERT(dip != NULL);
301 299 ddi_release_devi(dip);
302 300
303 301 for (devctx = devmapctx_list; devctx != NULL; devctx = devctx->next)
304 302 if ((devctx->dip == dip) && (devctx->id == id))
305 303 break;
306 304
307 305 if (devctx == NULL) {
308 306 devctx = tmp;
309 307 devctx->dip = dip;
310 308 devctx->id = id;
311 309 mutex_init(&devctx->lock, NULL, MUTEX_DEFAULT, NULL);
312 310 cv_init(&devctx->cv, NULL, CV_DEFAULT, NULL);
313 311 devctx->next = devmapctx_list;
314 312 devmapctx_list = devctx;
315 313 } else
316 314 kmem_free(tmp, sizeof (struct devmap_ctx));
317 315
318 316 mutex_enter(&devctx->lock);
319 317 devctx->refcnt++;
320 318 mutex_exit(&devctx->lock);
321 319 mutex_exit(&devmapctx_lock);
322 320
323 321 return (devctx);
324 322 }
325 323
326 324 /*
327 325 * Timeout callback called if a CPU has not given up the device context
328 326 * within dhp->dh_timeout_length ticks
329 327 */
330 328 static void
331 329 devmap_ctxto(void *data)
332 330 {
333 331 struct devmap_ctx *devctx = data;
334 332
335 333 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_CTXTO,
336 334 "devmap_ctxto:timeout expired, devctx=%p", (void *)devctx);
337 335 mutex_enter(&devctx->lock);
338 336 /*
339 337 * Set oncpu = 0 so the next mapping trying to get the device context
340 338 * can.
341 339 */
342 340 devctx->oncpu = 0;
343 341 devctx->timeout = 0;
344 342 cv_signal(&devctx->cv);
345 343 mutex_exit(&devctx->lock);
346 344 }
347 345
348 346 /*
349 347 * Create a device segment.
350 348 */
351 349 int
352 350 segdev_create(struct seg *seg, void *argsp)
353 351 {
354 352 struct segdev_data *sdp;
355 353 struct segdev_crargs *a = (struct segdev_crargs *)argsp;
356 354 devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
357 355 int error;
358 356
359 357 /*
360 358 * Since the address space is "write" locked, we
361 359 * don't need the segment lock to protect "segdev" data.
362 360 */
363 361 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
364 362
365 363 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
366 364
367 365 sdp = sdp_alloc();
368 366
369 367 sdp->mapfunc = a->mapfunc;
370 368 sdp->offset = a->offset;
371 369 sdp->prot = a->prot;
372 370 sdp->maxprot = a->maxprot;
373 371 sdp->type = a->type;
374 372 sdp->pageprot = 0;
375 373 sdp->softlockcnt = 0;
376 374 sdp->vpage = NULL;
377 375
378 376 if (sdp->mapfunc == NULL)
379 377 sdp->devmap_data = dhp;
380 378 else
381 379 sdp->devmap_data = dhp = NULL;
382 380
383 381 sdp->hat_flags = a->hat_flags;
384 382 sdp->hat_attr = a->hat_attr;
385 383
386 384 /*
387 385 * Currently, hat_flags supports only HAT_LOAD_NOCONSIST
388 386 */
389 387 ASSERT(!(sdp->hat_flags & ~HAT_LOAD_NOCONSIST));
390 388
391 389 /*
392 390 * Hold shadow vnode -- segdev only deals with
393 391 * character (VCHR) devices. We use the common
394 392 * vp to hang pages on.
395 393 */
396 394 sdp->vp = specfind(a->dev, VCHR);
397 395 ASSERT(sdp->vp != NULL);
398 396
399 397 seg->s_ops = &segdev_ops;
400 398 seg->s_data = sdp;
401 399
402 400 while (dhp != NULL) {
403 401 dhp->dh_seg = seg;
404 402 dhp = dhp->dh_next;
405 403 }
406 404
407 405 /*
408 406 * Inform the vnode of the new mapping.
409 407 */
410 408 /*
411 409 * It is ok to use pass sdp->maxprot to ADDMAP rather than to use
412 410 * dhp specific maxprot because spec_addmap does not use maxprot.
413 411 */
414 412 error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
415 413 seg->s_as, seg->s_base, seg->s_size,
416 414 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
417 415
418 416 if (error != 0) {
419 417 sdp->devmap_data = NULL;
420 418 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
421 419 HAT_UNLOAD_UNMAP);
422 420 } else {
423 421 /*
424 422 * Mappings of /dev/null don't count towards the VSZ of a
425 423 * process. Mappings of /dev/null have no mapping type.
426 424 */
427 425 if ((segop_gettype(seg, seg->s_base) & (MAP_SHARED |
428 426 MAP_PRIVATE)) == 0) {
429 427 seg->s_as->a_resvsize -= seg->s_size;
430 428 }
431 429 }
432 430
433 431 return (error);
434 432 }
435 433
436 434 static struct segdev_data *
437 435 sdp_alloc(void)
438 436 {
439 437 struct segdev_data *sdp;
440 438
441 439 sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
442 440 rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
443 441
444 442 return (sdp);
445 443 }
446 444
447 445 /*
448 446 * Duplicate seg and return new segment in newseg.
449 447 */
450 448 static int
451 449 segdev_dup(struct seg *seg, struct seg *newseg)
452 450 {
453 451 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
454 452 struct segdev_data *newsdp;
455 453 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
456 454 size_t npages;
457 455 int ret;
458 456
459 457 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
460 458 "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
461 459
462 460 DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
463 461 (void *)dhp, (void *)seg));
464 462
465 463 /*
466 464 * Since the address space is "write" locked, we
467 465 * don't need the segment lock to protect "segdev" data.
468 466 */
469 467 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
470 468
471 469 newsdp = sdp_alloc();
472 470
473 471 newseg->s_ops = seg->s_ops;
474 472 newseg->s_data = (void *)newsdp;
475 473
476 474 VN_HOLD(sdp->vp);
477 475 newsdp->vp = sdp->vp;
478 476 newsdp->mapfunc = sdp->mapfunc;
479 477 newsdp->offset = sdp->offset;
480 478 newsdp->pageprot = sdp->pageprot;
481 479 newsdp->prot = sdp->prot;
482 480 newsdp->maxprot = sdp->maxprot;
483 481 newsdp->type = sdp->type;
484 482 newsdp->hat_attr = sdp->hat_attr;
485 483 newsdp->hat_flags = sdp->hat_flags;
486 484 newsdp->softlockcnt = 0;
487 485
488 486 /*
489 487 * Initialize per page data if the segment we are
490 488 * dup'ing has per page information.
491 489 */
492 490 npages = seg_pages(newseg);
493 491
494 492 if (sdp->vpage != NULL) {
495 493 size_t nbytes = vpgtob(npages);
496 494
497 495 newsdp->vpage = kmem_zalloc(nbytes, KM_SLEEP);
498 496 bcopy(sdp->vpage, newsdp->vpage, nbytes);
499 497 } else
500 498 newsdp->vpage = NULL;
501 499
502 500 /*
503 501 * duplicate devmap handles
504 502 */
505 503 if (dhp != NULL) {
506 504 ret = devmap_handle_dup(dhp,
507 505 (devmap_handle_t **)&newsdp->devmap_data, newseg);
508 506 if (ret != 0) {
509 507 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DUP_CK1,
510 508 "segdev_dup:ret1 ret=%x, dhp=%p seg=%p",
511 509 ret, (void *)dhp, (void *)seg);
512 510 DEBUGF(1, (CE_CONT,
513 511 "segdev_dup: ret %x dhp %p seg %p\n",
514 512 ret, (void *)dhp, (void *)seg));
515 513 return (ret);
516 514 }
517 515 }
518 516
519 517 /*
520 518 * Inform the common vnode of the new mapping.
521 519 */
522 520 return (VOP_ADDMAP(VTOCVP(newsdp->vp),
523 521 newsdp->offset, newseg->s_as,
524 522 newseg->s_base, newseg->s_size, newsdp->prot,
525 523 newsdp->maxprot, sdp->type, CRED(), NULL));
526 524 }
527 525
528 526 /*
529 527 * duplicate devmap handles
530 528 */
531 529 static int
532 530 devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
533 531 struct seg *newseg)
534 532 {
535 533 devmap_handle_t *newdhp_save = NULL;
536 534 devmap_handle_t *newdhp = NULL;
537 535 struct devmap_callback_ctl *callbackops;
538 536
539 537 while (dhp != NULL) {
540 538 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
541 539
542 540 /* Need to lock the original dhp while copying if REMAP */
543 541 HOLD_DHP_LOCK(dhp);
544 542 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
545 543 RELE_DHP_LOCK(dhp);
546 544 newdhp->dh_seg = newseg;
547 545 newdhp->dh_next = NULL;
548 546 if (newdhp_save != NULL)
549 547 newdhp_save->dh_next = newdhp;
550 548 else
551 549 *new_dhp = newdhp;
552 550 newdhp_save = newdhp;
553 551
554 552 callbackops = &newdhp->dh_callbackops;
555 553
556 554 if (dhp->dh_softlock != NULL)
557 555 newdhp->dh_softlock = devmap_softlock_init(
558 556 newdhp->dh_dev,
559 557 (ulong_t)callbackops->devmap_access);
560 558 if (dhp->dh_ctx != NULL)
561 559 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
562 560 (ulong_t)callbackops->devmap_access);
563 561
564 562 /*
565 563 * Initialize dh_lock if we want to do remap.
566 564 */
567 565 if (newdhp->dh_flags & DEVMAP_ALLOW_REMAP) {
568 566 mutex_init(&newdhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
569 567 newdhp->dh_flags |= DEVMAP_LOCK_INITED;
570 568 }
571 569
572 570 if (callbackops->devmap_dup != NULL) {
573 571 int ret;
574 572
575 573 /*
576 574 * Call the dup callback so that the driver can
577 575 * duplicate its private data.
578 576 */
579 577 ret = (*callbackops->devmap_dup)(dhp, dhp->dh_pvtp,
580 578 (devmap_cookie_t *)newdhp, &newdhp->dh_pvtp);
581 579
582 580 if (ret != 0) {
583 581 /*
584 582 * We want to free up this segment as the driver
585 583 * has indicated that we can't dup it. But we
586 584 * don't want to call the drivers, devmap_unmap,
587 585 * callback function as the driver does not
588 586 * think this segment exists. The caller of
589 587 * devmap_dup will call seg_free on newseg
590 588 * as it was the caller that allocated the
591 589 * segment.
592 590 */
593 591 DEBUGF(1, (CE_CONT, "devmap_handle_dup ERROR: "
594 592 "newdhp %p dhp %p\n", (void *)newdhp,
595 593 (void *)dhp));
596 594 callbackops->devmap_unmap = NULL;
597 595 return (ret);
598 596 }
599 597 }
600 598
601 599 dhp = dhp->dh_next;
602 600 }
603 601
604 602 return (0);
605 603 }
606 604
607 605 /*
608 606 * Split a segment at addr for length len.
609 607 */
610 608 /*ARGSUSED*/
611 609 static int
612 610 segdev_unmap(struct seg *seg, caddr_t addr, size_t len)
613 611 {
614 612 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
615 613 register struct segdev_data *nsdp;
616 614 register struct seg *nseg;
617 615 register size_t opages; /* old segment size in pages */
618 616 register size_t npages; /* new segment size in pages */
619 617 register size_t dpages; /* pages being deleted (unmapped) */
620 618 register size_t nbytes;
621 619 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
622 620 devmap_handle_t *dhpp;
623 621 devmap_handle_t *newdhp;
624 622 struct devmap_callback_ctl *callbackops;
625 623 caddr_t nbase;
626 624 offset_t off;
627 625 ulong_t nsize;
628 626 size_t mlen, sz;
629 627
630 628 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
631 629 "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
632 630 (void *)dhp, (void *)seg, (void *)addr, len);
633 631
634 632 DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
635 633 (void *)dhp, (void *)seg, (void *)addr, len));
636 634
637 635 /*
638 636 * Since the address space is "write" locked, we
639 637 * don't need the segment lock to protect "segdev" data.
640 638 */
641 639 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
642 640
643 641 if ((sz = sdp->softlockcnt) > 0) {
644 642 /*
645 643 * Fail the unmap if pages are SOFTLOCKed through this mapping.
646 644 * softlockcnt is protected from change by the as write lock.
647 645 */
648 646 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
649 647 "segdev_unmap:error softlockcnt = %ld", sz);
650 648 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
651 649 return (EAGAIN);
652 650 }
653 651
654 652 /*
655 653 * Check for bad sizes
656 654 */
657 655 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
658 656 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
659 657 panic("segdev_unmap");
660 658
661 659 if (dhp != NULL) {
662 660 devmap_handle_t *tdhp;
663 661 /*
664 662 * If large page size was used in hat_devload(),
665 663 * the same page size must be used in hat_unload().
666 664 */
667 665 dhpp = tdhp = devmap_find_handle(dhp, addr);
668 666 while (tdhp != NULL) {
669 667 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
670 668 break;
671 669 }
672 670 tdhp = tdhp->dh_next;
673 671 }
674 672 if (tdhp != NULL) { /* found a dhp using large pages */
675 673 size_t slen = len;
676 674 size_t mlen;
677 675 size_t soff;
678 676
679 677 soff = (ulong_t)(addr - dhpp->dh_uvaddr);
680 678 while (slen != 0) {
681 679 mlen = MIN(slen, (dhpp->dh_len - soff));
682 680 hat_unload(seg->s_as->a_hat, dhpp->dh_uvaddr,
683 681 dhpp->dh_len, HAT_UNLOAD_UNMAP);
684 682 dhpp = dhpp->dh_next;
685 683 ASSERT(slen >= mlen);
686 684 slen -= mlen;
687 685 soff = 0;
688 686 }
689 687 } else
690 688 hat_unload(seg->s_as->a_hat, addr, len,
691 689 HAT_UNLOAD_UNMAP);
692 690 } else {
693 691 /*
694 692 * Unload any hardware translations in the range
695 693 * to be taken out.
696 694 */
697 695 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
698 696 }
699 697
700 698 /*
701 699 * get the user offset which will used in the driver callbacks
702 700 */
703 701 off = sdp->offset + (offset_t)(addr - seg->s_base);
704 702
705 703 /*
706 704 * Inform the vnode of the unmapping.
707 705 */
708 706 ASSERT(sdp->vp != NULL);
709 707 (void) VOP_DELMAP(VTOCVP(sdp->vp), off, seg->s_as, addr, len,
710 708 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
711 709
712 710 /*
713 711 * Check for entire segment
714 712 */
715 713 if (addr == seg->s_base && len == seg->s_size) {
716 714 seg_free(seg);
717 715 return (0);
718 716 }
719 717
720 718 opages = seg_pages(seg);
721 719 dpages = btop(len);
722 720 npages = opages - dpages;
723 721
724 722 /*
725 723 * Check for beginning of segment
726 724 */
727 725 if (addr == seg->s_base) {
728 726 if (sdp->vpage != NULL) {
729 727 register struct vpage *ovpage;
730 728
731 729 ovpage = sdp->vpage; /* keep pointer to vpage */
732 730
733 731 nbytes = vpgtob(npages);
734 732 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
735 733 bcopy(&ovpage[dpages], sdp->vpage, nbytes);
736 734
737 735 /* free up old vpage */
738 736 kmem_free(ovpage, vpgtob(opages));
739 737 }
740 738
741 739 /*
742 740 * free devmap handles from the beginning of the mapping.
743 741 */
744 742 if (dhp != NULL)
745 743 devmap_handle_unmap_head(dhp, len);
746 744
747 745 sdp->offset += (offset_t)len;
748 746
749 747 seg->s_base += len;
750 748 seg->s_size -= len;
751 749
752 750 return (0);
753 751 }
754 752
755 753 /*
756 754 * Check for end of segment
757 755 */
758 756 if (addr + len == seg->s_base + seg->s_size) {
759 757 if (sdp->vpage != NULL) {
760 758 register struct vpage *ovpage;
761 759
762 760 ovpage = sdp->vpage; /* keep pointer to vpage */
763 761
764 762 nbytes = vpgtob(npages);
765 763 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
766 764 bcopy(ovpage, sdp->vpage, nbytes);
767 765
768 766 /* free up old vpage */
769 767 kmem_free(ovpage, vpgtob(opages));
770 768 }
771 769 seg->s_size -= len;
772 770
773 771 /*
774 772 * free devmap handles from addr to the end of the mapping.
775 773 */
776 774 if (dhp != NULL)
777 775 devmap_handle_unmap_tail(dhp, addr);
778 776
779 777 return (0);
780 778 }
781 779
782 780 /*
783 781 * The section to go is in the middle of the segment,
784 782 * have to make it into two segments. nseg is made for
785 783 * the high end while seg is cut down at the low end.
786 784 */
787 785 nbase = addr + len; /* new seg base */
788 786 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
789 787 seg->s_size = addr - seg->s_base; /* shrink old seg */
790 788 nseg = seg_alloc(seg->s_as, nbase, nsize);
791 789 if (nseg == NULL)
792 790 panic("segdev_unmap seg_alloc");
793 791
794 792 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK2,
795 793 "segdev_unmap: seg=%p nseg=%p", (void *)seg, (void *)nseg);
796 794 DEBUGF(3, (CE_CONT, "segdev_unmap: segdev_dup seg %p nseg %p\n",
797 795 (void *)seg, (void *)nseg));
798 796 nsdp = sdp_alloc();
799 797
800 798 nseg->s_ops = seg->s_ops;
801 799 nseg->s_data = (void *)nsdp;
802 800
803 801 VN_HOLD(sdp->vp);
804 802 nsdp->mapfunc = sdp->mapfunc;
805 803 nsdp->offset = sdp->offset + (offset_t)(nseg->s_base - seg->s_base);
806 804 nsdp->vp = sdp->vp;
807 805 nsdp->pageprot = sdp->pageprot;
808 806 nsdp->prot = sdp->prot;
809 807 nsdp->maxprot = sdp->maxprot;
810 808 nsdp->type = sdp->type;
811 809 nsdp->hat_attr = sdp->hat_attr;
812 810 nsdp->hat_flags = sdp->hat_flags;
813 811 nsdp->softlockcnt = 0;
814 812
815 813 /*
816 814 * Initialize per page data if the segment we are
817 815 * dup'ing has per page information.
818 816 */
819 817 if (sdp->vpage != NULL) {
820 818 /* need to split vpage into two arrays */
821 819 register size_t nnbytes;
822 820 register size_t nnpages;
823 821 register struct vpage *ovpage;
824 822
825 823 ovpage = sdp->vpage; /* keep pointer to vpage */
826 824
827 825 npages = seg_pages(seg); /* seg has shrunk */
828 826 nbytes = vpgtob(npages);
829 827 nnpages = seg_pages(nseg);
830 828 nnbytes = vpgtob(nnpages);
831 829
832 830 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
833 831 bcopy(ovpage, sdp->vpage, nbytes);
834 832
835 833 nsdp->vpage = kmem_alloc(nnbytes, KM_SLEEP);
836 834 bcopy(&ovpage[npages + dpages], nsdp->vpage, nnbytes);
837 835
838 836 /* free up old vpage */
839 837 kmem_free(ovpage, vpgtob(opages));
840 838 } else
841 839 nsdp->vpage = NULL;
842 840
843 841 /*
844 842 * unmap dhps.
845 843 */
846 844 if (dhp == NULL) {
847 845 nsdp->devmap_data = NULL;
848 846 return (0);
849 847 }
850 848 while (dhp != NULL) {
851 849 callbackops = &dhp->dh_callbackops;
852 850 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK3,
853 851 "segdev_unmap: dhp=%p addr=%p", dhp, addr);
854 852 DEBUGF(3, (CE_CONT, "unmap: dhp %p addr %p uvaddr %p len %lx\n",
855 853 (void *)dhp, (void *)addr,
856 854 (void *)dhp->dh_uvaddr, dhp->dh_len));
857 855
858 856 if (addr == (dhp->dh_uvaddr + dhp->dh_len)) {
859 857 dhpp = dhp->dh_next;
860 858 dhp->dh_next = NULL;
861 859 dhp = dhpp;
862 860 } else if (addr > (dhp->dh_uvaddr + dhp->dh_len)) {
863 861 dhp = dhp->dh_next;
864 862 } else if (addr > dhp->dh_uvaddr &&
865 863 (addr + len) < (dhp->dh_uvaddr + dhp->dh_len)) {
866 864 /*
867 865 * <addr, addr+len> is enclosed by dhp.
868 866 * create a newdhp that begins at addr+len and
869 867 * ends at dhp->dh_uvaddr+dhp->dh_len.
870 868 */
871 869 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
872 870 HOLD_DHP_LOCK(dhp);
873 871 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
874 872 RELE_DHP_LOCK(dhp);
875 873 newdhp->dh_seg = nseg;
876 874 newdhp->dh_next = dhp->dh_next;
877 875 if (dhp->dh_softlock != NULL)
878 876 newdhp->dh_softlock = devmap_softlock_init(
879 877 newdhp->dh_dev,
880 878 (ulong_t)callbackops->devmap_access);
881 879 if (dhp->dh_ctx != NULL)
882 880 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
883 881 (ulong_t)callbackops->devmap_access);
884 882 if (newdhp->dh_flags & DEVMAP_LOCK_INITED) {
885 883 mutex_init(&newdhp->dh_lock,
886 884 NULL, MUTEX_DEFAULT, NULL);
887 885 }
888 886 if (callbackops->devmap_unmap != NULL)
889 887 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
890 888 off, len, dhp, &dhp->dh_pvtp,
891 889 newdhp, &newdhp->dh_pvtp);
892 890 mlen = len + (addr - dhp->dh_uvaddr);
893 891 devmap_handle_reduce_len(newdhp, mlen);
894 892 nsdp->devmap_data = newdhp;
895 893 /* XX Changing len should recalculate LARGE flag */
896 894 dhp->dh_len = addr - dhp->dh_uvaddr;
897 895 dhpp = dhp->dh_next;
898 896 dhp->dh_next = NULL;
899 897 dhp = dhpp;
900 898 } else if ((addr > dhp->dh_uvaddr) &&
901 899 ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len))) {
902 900 mlen = dhp->dh_len + dhp->dh_uvaddr - addr;
903 901 /*
904 902 * <addr, addr+len> spans over dhps.
905 903 */
906 904 if (callbackops->devmap_unmap != NULL)
907 905 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
908 906 off, mlen, (devmap_cookie_t *)dhp,
909 907 &dhp->dh_pvtp, NULL, NULL);
910 908 /* XX Changing len should recalculate LARGE flag */
911 909 dhp->dh_len = addr - dhp->dh_uvaddr;
912 910 dhpp = dhp->dh_next;
913 911 dhp->dh_next = NULL;
914 912 dhp = dhpp;
915 913 nsdp->devmap_data = dhp;
916 914 } else if ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len)) {
917 915 /*
918 916 * dhp is enclosed by <addr, addr+len>.
919 917 */
920 918 dhp->dh_seg = nseg;
921 919 nsdp->devmap_data = dhp;
922 920 dhp = devmap_handle_unmap(dhp);
923 921 nsdp->devmap_data = dhp; /* XX redundant? */
924 922 } else if (((addr + len) > dhp->dh_uvaddr) &&
925 923 ((addr + len) < (dhp->dh_uvaddr + dhp->dh_len))) {
926 924 mlen = addr + len - dhp->dh_uvaddr;
927 925 if (callbackops->devmap_unmap != NULL)
928 926 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
929 927 dhp->dh_uoff, mlen, NULL,
930 928 NULL, dhp, &dhp->dh_pvtp);
931 929 devmap_handle_reduce_len(dhp, mlen);
932 930 nsdp->devmap_data = dhp;
933 931 dhp->dh_seg = nseg;
934 932 dhp = dhp->dh_next;
935 933 } else {
936 934 dhp->dh_seg = nseg;
937 935 dhp = dhp->dh_next;
938 936 }
939 937 }
940 938 return (0);
941 939 }
942 940
943 941 /*
944 942 * Utility function handles reducing the length of a devmap handle during unmap
945 943 * Note that is only used for unmapping the front portion of the handler,
946 944 * i.e., we are bumping up the offset/pfn etc up by len
947 945 * Do not use if reducing length at the tail.
948 946 */
949 947 static void
950 948 devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len)
951 949 {
952 950 struct ddi_umem_cookie *cp;
953 951 struct devmap_pmem_cookie *pcp;
954 952 /*
955 953 * adjust devmap handle fields
956 954 */
957 955 ASSERT(len < dhp->dh_len);
958 956
959 957 /* Make sure only page-aligned changes are done */
960 958 ASSERT((len & PAGEOFFSET) == 0);
961 959
962 960 dhp->dh_len -= len;
963 961 dhp->dh_uoff += (offset_t)len;
964 962 dhp->dh_roff += (offset_t)len;
965 963 dhp->dh_uvaddr += len;
966 964 /* Need to grab dhp lock if REMAP */
967 965 HOLD_DHP_LOCK(dhp);
968 966 cp = dhp->dh_cookie;
969 967 if (!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)) {
970 968 if (cookie_is_devmem(cp)) {
971 969 dhp->dh_pfn += btop(len);
972 970 } else if (cookie_is_pmem(cp)) {
973 971 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
974 972 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
975 973 dhp->dh_roff < ptob(pcp->dp_npages));
976 974 } else {
977 975 ASSERT(dhp->dh_roff < cp->size);
978 976 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
979 977 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
980 978 ASSERT((dhp->dh_cvaddr + len) <=
981 979 (cp->cvaddr + cp->size));
982 980
983 981 dhp->dh_cvaddr += len;
984 982 }
985 983 }
986 984 /* XXX - Should recalculate the DEVMAP_FLAG_LARGE after changes */
987 985 RELE_DHP_LOCK(dhp);
988 986 }
989 987
990 988 /*
991 989 * Free devmap handle, dhp.
992 990 * Return the next devmap handle on the linked list.
993 991 */
994 992 static devmap_handle_t *
995 993 devmap_handle_unmap(devmap_handle_t *dhp)
996 994 {
997 995 struct devmap_callback_ctl *callbackops = &dhp->dh_callbackops;
998 996 struct segdev_data *sdp = (struct segdev_data *)dhp->dh_seg->s_data;
999 997 devmap_handle_t *dhpp = (devmap_handle_t *)sdp->devmap_data;
1000 998
1001 999 ASSERT(dhp != NULL);
1002 1000
1003 1001 /*
1004 1002 * before we free up dhp, call the driver's devmap_unmap entry point
1005 1003 * to free resources allocated for this dhp.
1006 1004 */
1007 1005 if (callbackops->devmap_unmap != NULL) {
1008 1006 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, dhp->dh_uoff,
1009 1007 dhp->dh_len, NULL, NULL, NULL, NULL);
1010 1008 }
1011 1009
1012 1010 if (dhpp == dhp) { /* releasing first dhp, change sdp data */
1013 1011 sdp->devmap_data = dhp->dh_next;
1014 1012 } else {
1015 1013 while (dhpp->dh_next != dhp) {
1016 1014 dhpp = dhpp->dh_next;
1017 1015 }
1018 1016 dhpp->dh_next = dhp->dh_next;
1019 1017 }
1020 1018 dhpp = dhp->dh_next; /* return value is next dhp in chain */
1021 1019
1022 1020 if (dhp->dh_softlock != NULL)
1023 1021 devmap_softlock_rele(dhp);
1024 1022
1025 1023 if (dhp->dh_ctx != NULL)
1026 1024 devmap_ctx_rele(dhp);
1027 1025
1028 1026 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1029 1027 mutex_destroy(&dhp->dh_lock);
1030 1028 }
1031 1029 kmem_free(dhp, sizeof (devmap_handle_t));
1032 1030
1033 1031 return (dhpp);
1034 1032 }
1035 1033
1036 1034 /*
1037 1035 * Free complete devmap handles from dhp for len bytes
1038 1036 * dhp can be either the first handle or a subsequent handle
1039 1037 */
1040 1038 static void
1041 1039 devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len)
1042 1040 {
1043 1041 struct devmap_callback_ctl *callbackops;
1044 1042
1045 1043 /*
1046 1044 * free the devmap handles covered by len.
1047 1045 */
1048 1046 while (len >= dhp->dh_len) {
1049 1047 len -= dhp->dh_len;
1050 1048 dhp = devmap_handle_unmap(dhp);
1051 1049 }
1052 1050 if (len != 0) { /* partial unmap at head of first remaining dhp */
1053 1051 callbackops = &dhp->dh_callbackops;
1054 1052
1055 1053 /*
1056 1054 * Call the unmap callback so the drivers can make
1057 1055 * adjustment on its private data.
1058 1056 */
1059 1057 if (callbackops->devmap_unmap != NULL)
1060 1058 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
1061 1059 dhp->dh_uoff, len, NULL, NULL, dhp, &dhp->dh_pvtp);
1062 1060 devmap_handle_reduce_len(dhp, len);
1063 1061 }
1064 1062 }
1065 1063
1066 1064 /*
1067 1065 * Free devmap handles to truncate the mapping after addr
1068 1066 * RFE: Simpler to pass in dhp pointing at correct dhp (avoid find again)
1069 1067 * Also could then use the routine in middle unmap case too
1070 1068 */
1071 1069 static void
1072 1070 devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr)
1073 1071 {
1074 1072 register struct seg *seg = dhp->dh_seg;
1075 1073 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1076 1074 register devmap_handle_t *dhph = (devmap_handle_t *)sdp->devmap_data;
1077 1075 struct devmap_callback_ctl *callbackops;
1078 1076 register devmap_handle_t *dhpp;
1079 1077 size_t maplen;
1080 1078 ulong_t off;
1081 1079 size_t len;
1082 1080
1083 1081 maplen = (size_t)(addr - dhp->dh_uvaddr);
1084 1082 dhph = devmap_find_handle(dhph, addr);
1085 1083
1086 1084 while (dhph != NULL) {
1087 1085 if (maplen == 0) {
1088 1086 dhph = devmap_handle_unmap(dhph);
1089 1087 } else {
1090 1088 callbackops = &dhph->dh_callbackops;
1091 1089 len = dhph->dh_len - maplen;
1092 1090 off = (ulong_t)sdp->offset + (addr - seg->s_base);
1093 1091 /*
1094 1092 * Call the unmap callback so the driver
1095 1093 * can make adjustments on its private data.
1096 1094 */
1097 1095 if (callbackops->devmap_unmap != NULL)
1098 1096 (*callbackops->devmap_unmap)(dhph,
1099 1097 dhph->dh_pvtp, off, len,
1100 1098 (devmap_cookie_t *)dhph,
1101 1099 &dhph->dh_pvtp, NULL, NULL);
1102 1100 /* XXX Reducing len needs to recalculate LARGE flag */
1103 1101 dhph->dh_len = maplen;
1104 1102 maplen = 0;
1105 1103 dhpp = dhph->dh_next;
1106 1104 dhph->dh_next = NULL;
1107 1105 dhph = dhpp;
1108 1106 }
1109 1107 } /* end while */
1110 1108 }
1111 1109
1112 1110 /*
1113 1111 * Free a segment.
1114 1112 */
1115 1113 static void
1116 1114 segdev_free(struct seg *seg)
1117 1115 {
1118 1116 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1119 1117 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
1120 1118
1121 1119 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1122 1120 "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1123 1121 DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1124 1122 (void *)dhp, (void *)seg));
1125 1123
1126 1124 /*
1127 1125 * Since the address space is "write" locked, we
1128 1126 * don't need the segment lock to protect "segdev" data.
1129 1127 */
1130 1128 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1131 1129
1132 1130 while (dhp != NULL)
1133 1131 dhp = devmap_handle_unmap(dhp);
1134 1132
1135 1133 VN_RELE(sdp->vp);
1136 1134 if (sdp->vpage != NULL)
1137 1135 kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1138 1136
1139 1137 rw_destroy(&sdp->lock);
1140 1138 kmem_free(sdp, sizeof (*sdp));
1141 1139 }
1142 1140
1143 1141 static void
1144 1142 free_devmap_handle(devmap_handle_t *dhp)
1145 1143 {
1146 1144 register devmap_handle_t *dhpp;
1147 1145
1148 1146 /*
1149 1147 * free up devmap handle
1150 1148 */
1151 1149 while (dhp != NULL) {
1152 1150 dhpp = dhp->dh_next;
1153 1151 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1154 1152 mutex_destroy(&dhp->dh_lock);
1155 1153 }
1156 1154
1157 1155 if (dhp->dh_softlock != NULL)
1158 1156 devmap_softlock_rele(dhp);
1159 1157
1160 1158 if (dhp->dh_ctx != NULL)
1161 1159 devmap_ctx_rele(dhp);
1162 1160
1163 1161 kmem_free(dhp, sizeof (devmap_handle_t));
1164 1162 dhp = dhpp;
1165 1163 }
1166 1164 }
1167 1165
1168 1166 /*
1169 1167 * routines to lock and unlock underlying segkp segment for
1170 1168 * KMEM_PAGEABLE type cookies.
1171 1169 * segkp only allows a single pending F_SOFTLOCK
1172 1170 * we keep track of number of locks in the cookie so we can
1173 1171 * have multiple pending faults and manage the calls to segkp.
1174 1172 * RFE: if segkp supports either pagelock or can support multiple
1175 1173 * calls to F_SOFTLOCK, then these routines can go away.
1176 1174 * If pagelock, segdev_faultpage can fault on a page by page basis
1177 1175 * and simplifies the code quite a bit.
1178 1176 * if multiple calls allowed but not partial ranges, then need for
1179 1177 * cookie->lock and locked count goes away, code can call as_fault directly
1180 1178 */
1181 1179 static faultcode_t
1182 1180 acquire_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1183 1181 {
1184 1182 int err = 0;
1185 1183 ASSERT(cookie_is_kpmem(cookie));
1186 1184 /*
1187 1185 * Fault in pages in segkp with F_SOFTLOCK.
1188 1186 * We want to hold the lock until all pages have been loaded.
1189 1187 * segkp only allows single caller to hold SOFTLOCK, so cookie
1190 1188 * holds a count so we dont call into segkp multiple times
1191 1189 */
1192 1190 mutex_enter(&cookie->lock);
1193 1191
1194 1192 /*
1195 1193 * Check for overflow in locked field
1196 1194 */
1197 1195 if ((UINT32_MAX - cookie->locked) < npages) {
1198 1196 err = FC_MAKE_ERR(ENOMEM);
1199 1197 } else if (cookie->locked == 0) {
1200 1198 /* First time locking */
1201 1199 err = as_fault(kas.a_hat, &kas, cookie->cvaddr,
1202 1200 cookie->size, F_SOFTLOCK, PROT_READ|PROT_WRITE);
1203 1201 }
1204 1202 if (!err) {
1205 1203 cookie->locked += npages;
1206 1204 }
1207 1205 mutex_exit(&cookie->lock);
1208 1206 return (err);
1209 1207 }
1210 1208
1211 1209 static void
1212 1210 release_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1213 1211 {
1214 1212 mutex_enter(&cookie->lock);
1215 1213 ASSERT(cookie_is_kpmem(cookie));
1216 1214 ASSERT(cookie->locked >= npages);
1217 1215 cookie->locked -= (uint_t)npages;
1218 1216 if (cookie->locked == 0) {
1219 1217 /* Last unlock */
1220 1218 if (as_fault(kas.a_hat, &kas, cookie->cvaddr,
1221 1219 cookie->size, F_SOFTUNLOCK, PROT_READ|PROT_WRITE))
1222 1220 panic("segdev releasing kpmem lock %p", (void *)cookie);
1223 1221 }
1224 1222 mutex_exit(&cookie->lock);
1225 1223 }
1226 1224
1227 1225 /*
1228 1226 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
1229 1227 * drivers with devmap_access callbacks
1230 1228 * slock->softlocked basically works like a rw lock
1231 1229 * -ve counts => F_SOFTLOCK in progress
1232 1230 * +ve counts => F_INVAL/F_PROT in progress
1233 1231 * We allow only one F_SOFTLOCK at a time
1234 1232 * but can have multiple pending F_INVAL/F_PROT calls
1235 1233 *
1236 1234 * This routine waits using cv_wait_sig so killing processes is more graceful
1237 1235 * Returns EINTR if coming out of this routine due to a signal, 0 otherwise
1238 1236 */
1239 1237 static int devmap_softlock_enter(
1240 1238 struct devmap_softlock *slock,
1241 1239 size_t npages,
1242 1240 enum fault_type type)
1243 1241 {
1244 1242 if (npages == 0)
1245 1243 return (0);
1246 1244 mutex_enter(&(slock->lock));
1247 1245 switch (type) {
1248 1246 case F_SOFTLOCK :
1249 1247 while (slock->softlocked) {
1250 1248 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1251 1249 /* signalled */
1252 1250 mutex_exit(&(slock->lock));
1253 1251 return (EINTR);
1254 1252 }
1255 1253 }
1256 1254 slock->softlocked -= npages; /* -ve count => locked */
1257 1255 break;
1258 1256 case F_INVAL :
1259 1257 case F_PROT :
1260 1258 while (slock->softlocked < 0)
1261 1259 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1262 1260 /* signalled */
1263 1261 mutex_exit(&(slock->lock));
1264 1262 return (EINTR);
1265 1263 }
1266 1264 slock->softlocked += npages; /* +ve count => f_invals */
1267 1265 break;
1268 1266 default:
1269 1267 ASSERT(0);
1270 1268 }
1271 1269 mutex_exit(&(slock->lock));
1272 1270 return (0);
1273 1271 }
1274 1272
1275 1273 static void devmap_softlock_exit(
1276 1274 struct devmap_softlock *slock,
1277 1275 size_t npages,
1278 1276 enum fault_type type)
1279 1277 {
1280 1278 if (slock == NULL)
1281 1279 return;
1282 1280 mutex_enter(&(slock->lock));
1283 1281 switch (type) {
1284 1282 case F_SOFTLOCK :
1285 1283 ASSERT(-slock->softlocked >= npages);
1286 1284 slock->softlocked += npages; /* -ve count is softlocked */
1287 1285 if (slock->softlocked == 0)
1288 1286 cv_signal(&slock->cv);
1289 1287 break;
1290 1288 case F_INVAL :
1291 1289 case F_PROT:
1292 1290 ASSERT(slock->softlocked >= npages);
1293 1291 slock->softlocked -= npages;
1294 1292 if (slock->softlocked == 0)
1295 1293 cv_signal(&slock->cv);
1296 1294 break;
1297 1295 default:
1298 1296 ASSERT(0);
1299 1297 }
1300 1298 mutex_exit(&(slock->lock));
1301 1299 }
1302 1300
1303 1301 /*
1304 1302 * Do a F_SOFTUNLOCK call over the range requested.
1305 1303 * The range must have already been F_SOFTLOCK'ed.
1306 1304 * The segment lock should be held, (but not the segment private lock?)
1307 1305 * The softunlock code below does not adjust for large page sizes
1308 1306 * assumes the caller already did any addr/len adjustments for
1309 1307 * pagesize mappings before calling.
1310 1308 */
1311 1309 /*ARGSUSED*/
1312 1310 static void
1313 1311 segdev_softunlock(
1314 1312 struct hat *hat, /* the hat */
1315 1313 struct seg *seg, /* seg_dev of interest */
1316 1314 caddr_t addr, /* base address of range */
1317 1315 size_t len, /* number of bytes */
1318 1316 enum seg_rw rw) /* type of access at fault */
1319 1317 {
1320 1318 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1321 1319 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1322 1320
1323 1321 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SOFTUNLOCK,
1324 1322 "segdev_softunlock:dhp_head=%p sdp=%p addr=%p len=%lx",
1325 1323 dhp_head, sdp, addr, len);
1326 1324 DEBUGF(3, (CE_CONT, "segdev_softunlock: dhp %p lockcnt %lx "
1327 1325 "addr %p len %lx\n",
1328 1326 (void *)dhp_head, sdp->softlockcnt, (void *)addr, len));
1329 1327
1330 1328 hat_unlock(hat, addr, len);
1331 1329
1332 1330 if (dhp_head != NULL) {
1333 1331 devmap_handle_t *dhp;
1334 1332 size_t mlen;
1335 1333 size_t tlen = len;
1336 1334 ulong_t off;
1337 1335
1338 1336 dhp = devmap_find_handle(dhp_head, addr);
1339 1337 ASSERT(dhp != NULL);
1340 1338
1341 1339 off = (ulong_t)(addr - dhp->dh_uvaddr);
1342 1340 while (tlen != 0) {
1343 1341 mlen = MIN(tlen, (dhp->dh_len - off));
1344 1342
1345 1343 /*
1346 1344 * unlock segkp memory, locked during F_SOFTLOCK
1347 1345 */
1348 1346 if (dhp_is_kpmem(dhp)) {
1349 1347 release_kpmem_lock(
1350 1348 (struct ddi_umem_cookie *)dhp->dh_cookie,
1351 1349 btopr(mlen));
1352 1350 }
1353 1351
1354 1352 /*
1355 1353 * Do the softlock accounting for devmap_access
1356 1354 */
1357 1355 if (dhp->dh_callbackops.devmap_access != NULL) {
1358 1356 devmap_softlock_exit(dhp->dh_softlock,
1359 1357 btopr(mlen), F_SOFTLOCK);
1360 1358 }
1361 1359
1362 1360 tlen -= mlen;
1363 1361 dhp = dhp->dh_next;
1364 1362 off = 0;
1365 1363 }
1366 1364 }
1367 1365
1368 1366 mutex_enter(&freemem_lock);
1369 1367 ASSERT(sdp->softlockcnt >= btopr(len));
1370 1368 sdp->softlockcnt -= btopr(len);
1371 1369 mutex_exit(&freemem_lock);
1372 1370 if (sdp->softlockcnt == 0) {
1373 1371 /*
1374 1372 * All SOFTLOCKS are gone. Wakeup any waiting
1375 1373 * unmappers so they can try again to unmap.
1376 1374 * Check for waiters first without the mutex
1377 1375 * held so we don't always grab the mutex on
1378 1376 * softunlocks.
1379 1377 */
1380 1378 if (AS_ISUNMAPWAIT(seg->s_as)) {
1381 1379 mutex_enter(&seg->s_as->a_contents);
1382 1380 if (AS_ISUNMAPWAIT(seg->s_as)) {
1383 1381 AS_CLRUNMAPWAIT(seg->s_as);
1384 1382 cv_broadcast(&seg->s_as->a_cv);
1385 1383 }
1386 1384 mutex_exit(&seg->s_as->a_contents);
1387 1385 }
1388 1386 }
1389 1387
1390 1388 }
1391 1389
1392 1390 /*
1393 1391 * Handle fault for a single page.
1394 1392 * Done in a separate routine so we can handle errors more easily.
1395 1393 * This routine is called only from segdev_faultpages()
1396 1394 * when looping over the range of addresses requested. The segment lock is held.
1397 1395 */
1398 1396 static faultcode_t
1399 1397 segdev_faultpage(
1400 1398 struct hat *hat, /* the hat */
1401 1399 struct seg *seg, /* seg_dev of interest */
1402 1400 caddr_t addr, /* address in as */
1403 1401 struct vpage *vpage, /* pointer to vpage for seg, addr */
1404 1402 enum fault_type type, /* type of fault */
1405 1403 enum seg_rw rw, /* type of access at fault */
1406 1404 devmap_handle_t *dhp) /* devmap handle if any for this page */
1407 1405 {
1408 1406 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1409 1407 uint_t prot;
1410 1408 pfn_t pfnum = PFN_INVALID;
1411 1409 u_offset_t offset;
1412 1410 uint_t hat_flags;
1413 1411 dev_info_t *dip;
1414 1412
1415 1413 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE,
1416 1414 "segdev_faultpage: dhp=%p seg=%p addr=%p", dhp, seg, addr);
1417 1415 DEBUGF(8, (CE_CONT, "segdev_faultpage: dhp %p seg %p addr %p \n",
1418 1416 (void *)dhp, (void *)seg, (void *)addr));
1419 1417
1420 1418 /*
1421 1419 * Initialize protection value for this page.
1422 1420 * If we have per page protection values check it now.
1423 1421 */
1424 1422 if (sdp->pageprot) {
1425 1423 uint_t protchk;
1426 1424
1427 1425 switch (rw) {
1428 1426 case S_READ:
1429 1427 protchk = PROT_READ;
1430 1428 break;
1431 1429 case S_WRITE:
1432 1430 protchk = PROT_WRITE;
1433 1431 break;
1434 1432 case S_EXEC:
1435 1433 protchk = PROT_EXEC;
1436 1434 break;
1437 1435 case S_OTHER:
1438 1436 default:
1439 1437 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1440 1438 break;
1441 1439 }
1442 1440
1443 1441 prot = VPP_PROT(vpage);
1444 1442 if ((prot & protchk) == 0)
1445 1443 return (FC_PROT); /* illegal access type */
1446 1444 } else {
1447 1445 prot = sdp->prot;
1448 1446 /* caller has already done segment level protection check */
1449 1447 }
1450 1448
1451 1449 if (type == F_SOFTLOCK) {
1452 1450 mutex_enter(&freemem_lock);
1453 1451 sdp->softlockcnt++;
1454 1452 mutex_exit(&freemem_lock);
1455 1453 }
1456 1454
1457 1455 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
1458 1456 offset = sdp->offset + (u_offset_t)(addr - seg->s_base);
1459 1457 /*
1460 1458 * In the devmap framework, sdp->mapfunc is set to NULL. we can get
1461 1459 * pfnum from dhp->dh_pfn (at beginning of segment) and offset from
1462 1460 * seg->s_base.
1463 1461 */
1464 1462 if (dhp == NULL) {
1465 1463 /* If segment has devmap_data, then dhp should be non-NULL */
1466 1464 ASSERT(sdp->devmap_data == NULL);
1467 1465 pfnum = (pfn_t)cdev_mmap(sdp->mapfunc, sdp->vp->v_rdev,
1468 1466 (off_t)offset, prot);
1469 1467 prot |= sdp->hat_attr;
1470 1468 } else {
1471 1469 ulong_t off;
1472 1470 struct ddi_umem_cookie *cp;
1473 1471 struct devmap_pmem_cookie *pcp;
1474 1472
1475 1473 /* ensure the dhp passed in contains addr. */
1476 1474 ASSERT(dhp == devmap_find_handle(
1477 1475 (devmap_handle_t *)sdp->devmap_data, addr));
1478 1476
1479 1477 off = addr - dhp->dh_uvaddr;
1480 1478
1481 1479 /*
1482 1480 * This routine assumes that the caller makes sure that the
1483 1481 * fields in dhp used below are unchanged due to remap during
1484 1482 * this call. Caller does HOLD_DHP_LOCK if neeed
1485 1483 */
1486 1484 cp = dhp->dh_cookie;
1487 1485 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1488 1486 pfnum = PFN_INVALID;
1489 1487 } else if (cookie_is_devmem(cp)) {
1490 1488 pfnum = dhp->dh_pfn + btop(off);
1491 1489 } else if (cookie_is_pmem(cp)) {
1492 1490 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
1493 1491 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
1494 1492 dhp->dh_roff < ptob(pcp->dp_npages));
1495 1493 pfnum = page_pptonum(
1496 1494 pcp->dp_pparray[btop(off + dhp->dh_roff)]);
1497 1495 } else {
1498 1496 ASSERT(dhp->dh_roff < cp->size);
1499 1497 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
1500 1498 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
1501 1499 ASSERT((dhp->dh_cvaddr + off) <=
1502 1500 (cp->cvaddr + cp->size));
1503 1501 ASSERT((dhp->dh_cvaddr + off + PAGESIZE) <=
1504 1502 (cp->cvaddr + cp->size));
1505 1503
1506 1504 switch (cp->type) {
1507 1505 case UMEM_LOCKED :
1508 1506 if (cp->pparray != NULL) {
1509 1507 ASSERT((dhp->dh_roff &
1510 1508 PAGEOFFSET) == 0);
1511 1509 pfnum = page_pptonum(
1512 1510 cp->pparray[btop(off +
1513 1511 dhp->dh_roff)]);
1514 1512 } else {
1515 1513 pfnum = hat_getpfnum(
1516 1514 ((proc_t *)cp->procp)->p_as->a_hat,
1517 1515 cp->cvaddr + off);
1518 1516 }
1519 1517 break;
1520 1518 case UMEM_TRASH :
1521 1519 pfnum = page_pptonum(trashpp);
1522 1520 /*
1523 1521 * We should set hat_flags to HAT_NOFAULT also
1524 1522 * However, not all hat layers implement this
1525 1523 */
1526 1524 break;
1527 1525 case KMEM_PAGEABLE:
1528 1526 case KMEM_NON_PAGEABLE:
1529 1527 pfnum = hat_getpfnum(kas.a_hat,
1530 1528 dhp->dh_cvaddr + off);
1531 1529 break;
1532 1530 default :
1533 1531 pfnum = PFN_INVALID;
1534 1532 break;
1535 1533 }
1536 1534 }
1537 1535 prot |= dhp->dh_hat_attr;
1538 1536 }
1539 1537 if (pfnum == PFN_INVALID) {
1540 1538 return (FC_MAKE_ERR(EFAULT));
1541 1539 }
1542 1540 /* prot should already be OR'ed in with hat_attributes if needed */
1543 1541
1544 1542 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE_CK1,
1545 1543 "segdev_faultpage: pfnum=%lx memory=%x prot=%x flags=%x",
1546 1544 pfnum, pf_is_memory(pfnum), prot, hat_flags);
1547 1545 DEBUGF(9, (CE_CONT, "segdev_faultpage: pfnum %lx memory %x "
1548 1546 "prot %x flags %x\n", pfnum, pf_is_memory(pfnum), prot, hat_flags));
1549 1547
1550 1548 if (pf_is_memory(pfnum) || (dhp != NULL)) {
1551 1549 /*
1552 1550 * It's not _really_ required here to pass sdp->hat_flags
1553 1551 * to hat_devload even though we do it.
1554 1552 * This is because hat figures it out DEVMEM mappings
1555 1553 * are non-consistent, anyway.
1556 1554 */
1557 1555 hat_devload(hat, addr, PAGESIZE, pfnum,
1558 1556 prot, hat_flags | sdp->hat_flags);
1559 1557 return (0);
1560 1558 }
1561 1559
1562 1560 /*
1563 1561 * Fall through to the case where devmap is not used and need to call
1564 1562 * up the device tree to set up the mapping
1565 1563 */
1566 1564
1567 1565 dip = VTOS(VTOCVP(sdp->vp))->s_dip;
1568 1566 ASSERT(dip);
1569 1567
1570 1568 /*
1571 1569 * When calling ddi_map_fault, we do not OR in sdp->hat_attr
1572 1570 * This is because this calls drivers which may not expect
1573 1571 * prot to have any other values than PROT_ALL
1574 1572 * The root nexus driver has a hack to peek into the segment
1575 1573 * structure and then OR in sdp->hat_attr.
1576 1574 * XX In case the bus_ops interfaces are ever revisited
1577 1575 * we need to fix this. prot should include other hat attributes
1578 1576 */
1579 1577 if (ddi_map_fault(dip, hat, seg, addr, NULL, pfnum, prot & PROT_ALL,
1580 1578 (uint_t)(type == F_SOFTLOCK)) != DDI_SUCCESS) {
1581 1579 return (FC_MAKE_ERR(EFAULT));
1582 1580 }
1583 1581 return (0);
1584 1582 }
1585 1583
1586 1584 static faultcode_t
1587 1585 segdev_fault(
1588 1586 struct hat *hat, /* the hat */
1589 1587 struct seg *seg, /* the seg_dev of interest */
1590 1588 caddr_t addr, /* the address of the fault */
1591 1589 size_t len, /* the length of the range */
1592 1590 enum fault_type type, /* type of fault */
1593 1591 enum seg_rw rw) /* type of access at fault */
1594 1592 {
1595 1593 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1596 1594 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1597 1595 devmap_handle_t *dhp;
1598 1596 struct devmap_softlock *slock = NULL;
1599 1597 ulong_t slpage = 0;
1600 1598 ulong_t off;
1601 1599 caddr_t maddr = addr;
1602 1600 int err;
1603 1601 int err_is_faultcode = 0;
1604 1602
1605 1603 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1606 1604 "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1607 1605 (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1608 1606 DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1609 1607 "addr %p len %lx type %x\n",
1610 1608 (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1611 1609
1612 1610 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1613 1611
1614 1612 /* Handle non-devmap case */
1615 1613 if (dhp_head == NULL)
1616 1614 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1617 1615
1618 1616 /* Find devmap handle */
1619 1617 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1620 1618 return (FC_NOMAP);
1621 1619
1622 1620 /*
1623 1621 * The seg_dev driver does not implement copy-on-write,
1624 1622 * and always loads translations with maximal allowed permissions
1625 1623 * but we got an fault trying to access the device.
1626 1624 * Servicing the fault is not going to result in any better result
1627 1625 * RFE: If we want devmap_access callbacks to be involved in F_PROT
1628 1626 * faults, then the code below is written for that
1629 1627 * Pending resolution of the following:
1630 1628 * - determine if the F_INVAL/F_SOFTLOCK syncing
1631 1629 * is needed for F_PROT also or not. The code below assumes it does
1632 1630 * - If driver sees F_PROT and calls devmap_load with same type,
1633 1631 * then segdev_faultpages will fail with FC_PROT anyway, need to
1634 1632 * change that so calls from devmap_load to segdev_faultpages for
1635 1633 * F_PROT type are retagged to F_INVAL.
1636 1634 * RFE: Today we dont have drivers that use devmap and want to handle
1637 1635 * F_PROT calls. The code in segdev_fault* is written to allow
1638 1636 * this case but is not tested. A driver that needs this capability
1639 1637 * should be able to remove the short-circuit case; resolve the
1640 1638 * above issues and "should" work.
1641 1639 */
1642 1640 if (type == F_PROT) {
1643 1641 return (FC_PROT);
1644 1642 }
1645 1643
1646 1644 /*
1647 1645 * Loop through dhp list calling devmap_access or segdev_faultpages for
1648 1646 * each devmap handle.
1649 1647 * drivers which implement devmap_access can interpose on faults and do
1650 1648 * device-appropriate special actions before calling devmap_load.
1651 1649 */
1652 1650
1653 1651 /*
1654 1652 * Unfortunately, this simple loop has turned out to expose a variety
1655 1653 * of complex problems which results in the following convoluted code.
1656 1654 *
1657 1655 * First, a desire to handle a serialization of F_SOFTLOCK calls
1658 1656 * to the driver within the framework.
1659 1657 * This results in a dh_softlock structure that is on a per device
1660 1658 * (or device instance) basis and serializes devmap_access calls.
1661 1659 * Ideally we would need to do this for underlying
1662 1660 * memory/device regions that are being faulted on
1663 1661 * but that is hard to identify and with REMAP, harder
1664 1662 * Second, a desire to serialize F_INVAL(and F_PROT) calls w.r.t.
1665 1663 * to F_SOFTLOCK calls to the driver.
1666 1664 * These serializations are to simplify the driver programmer model.
1667 1665 * To support these two features, the code first goes through the
1668 1666 * devmap handles and counts the pages (slpage) that are covered
1669 1667 * by devmap_access callbacks.
1670 1668 * This part ends with a devmap_softlock_enter call
1671 1669 * which allows only one F_SOFTLOCK active on a device instance,
1672 1670 * but multiple F_INVAL/F_PROTs can be active except when a
1673 1671 * F_SOFTLOCK is active
1674 1672 *
1675 1673 * Next, we dont short-circuit the fault code upfront to call
1676 1674 * segdev_softunlock for F_SOFTUNLOCK, because we must use
1677 1675 * the same length when we softlock and softunlock.
1678 1676 *
1679 1677 * -Hat layers may not support softunlocking lengths less than the
1680 1678 * original length when there is large page support.
1681 1679 * -kpmem locking is dependent on keeping the lengths same.
1682 1680 * -if drivers handled F_SOFTLOCK, they probably also expect to
1683 1681 * see an F_SOFTUNLOCK of the same length
1684 1682 * Hence, if extending lengths during softlock,
1685 1683 * softunlock has to make the same adjustments and goes through
1686 1684 * the same loop calling segdev_faultpages/segdev_softunlock
1687 1685 * But some of the synchronization and error handling is different
1688 1686 */
1689 1687
1690 1688 if (type != F_SOFTUNLOCK) {
1691 1689 devmap_handle_t *dhpp = dhp;
1692 1690 size_t slen = len;
1693 1691
1694 1692 /*
1695 1693 * Calculate count of pages that are :
1696 1694 * a) within the (potentially extended) fault region
1697 1695 * b) AND covered by devmap handle with devmap_access
1698 1696 */
1699 1697 off = (ulong_t)(addr - dhpp->dh_uvaddr);
1700 1698 while (slen != 0) {
1701 1699 size_t mlen;
1702 1700
1703 1701 /*
1704 1702 * Softlocking on a region that allows remap is
1705 1703 * unsupported due to unresolved locking issues
1706 1704 * XXX: unclear what these are?
1707 1705 * One potential is that if there is a pending
1708 1706 * softlock, then a remap should not be allowed
1709 1707 * until the unlock is done. This is easily
1710 1708 * fixed by returning error in devmap*remap on
1711 1709 * checking the dh->dh_softlock->softlocked value
1712 1710 */
1713 1711 if ((type == F_SOFTLOCK) &&
1714 1712 (dhpp->dh_flags & DEVMAP_ALLOW_REMAP)) {
1715 1713 return (FC_NOSUPPORT);
1716 1714 }
1717 1715
1718 1716 mlen = MIN(slen, (dhpp->dh_len - off));
1719 1717 if (dhpp->dh_callbackops.devmap_access) {
1720 1718 size_t llen;
1721 1719 caddr_t laddr;
1722 1720 /*
1723 1721 * use extended length for large page mappings
1724 1722 */
1725 1723 HOLD_DHP_LOCK(dhpp);
1726 1724 if ((sdp->pageprot == 0) &&
1727 1725 (dhpp->dh_flags & DEVMAP_FLAG_LARGE)) {
1728 1726 devmap_get_large_pgsize(dhpp,
1729 1727 mlen, maddr, &llen, &laddr);
1730 1728 } else {
1731 1729 llen = mlen;
1732 1730 }
1733 1731 RELE_DHP_LOCK(dhpp);
1734 1732
1735 1733 slpage += btopr(llen);
1736 1734 slock = dhpp->dh_softlock;
1737 1735 }
1738 1736 maddr += mlen;
1739 1737 ASSERT(slen >= mlen);
1740 1738 slen -= mlen;
1741 1739 dhpp = dhpp->dh_next;
1742 1740 off = 0;
1743 1741 }
1744 1742 /*
1745 1743 * synchonize with other faulting threads and wait till safe
1746 1744 * devmap_softlock_enter might return due to signal in cv_wait
1747 1745 *
1748 1746 * devmap_softlock_enter has to be called outside of while loop
1749 1747 * to prevent a deadlock if len spans over multiple dhps.
1750 1748 * dh_softlock is based on device instance and if multiple dhps
1751 1749 * use the same device instance, the second dhp's LOCK call
1752 1750 * will hang waiting on the first to complete.
1753 1751 * devmap_setup verifies that slocks in a dhp_chain are same.
1754 1752 * RFE: this deadlock only hold true for F_SOFTLOCK. For
1755 1753 * F_INVAL/F_PROT, since we now allow multiple in parallel,
1756 1754 * we could have done the softlock_enter inside the loop
1757 1755 * and supported multi-dhp mappings with dissimilar devices
1758 1756 */
1759 1757 if (err = devmap_softlock_enter(slock, slpage, type))
1760 1758 return (FC_MAKE_ERR(err));
1761 1759 }
1762 1760
1763 1761 /* reset 'maddr' to the start addr of the range of fault. */
1764 1762 maddr = addr;
1765 1763
1766 1764 /* calculate the offset corresponds to 'addr' in the first dhp. */
1767 1765 off = (ulong_t)(addr - dhp->dh_uvaddr);
1768 1766
1769 1767 /*
1770 1768 * The fault length may span over multiple dhps.
1771 1769 * Loop until the total length is satisfied.
1772 1770 */
1773 1771 while (len != 0) {
1774 1772 size_t llen;
1775 1773 size_t mlen;
1776 1774 caddr_t laddr;
1777 1775
1778 1776 /*
1779 1777 * mlen is the smaller of 'len' and the length
1780 1778 * from addr to the end of mapping defined by dhp.
1781 1779 */
1782 1780 mlen = MIN(len, (dhp->dh_len - off));
1783 1781
1784 1782 HOLD_DHP_LOCK(dhp);
1785 1783 /*
1786 1784 * Pass the extended length and address to devmap_access
1787 1785 * if large pagesize is used for loading address translations.
1788 1786 */
1789 1787 if ((sdp->pageprot == 0) &&
1790 1788 (dhp->dh_flags & DEVMAP_FLAG_LARGE)) {
1791 1789 devmap_get_large_pgsize(dhp, mlen, maddr,
1792 1790 &llen, &laddr);
1793 1791 ASSERT(maddr == addr || laddr == maddr);
1794 1792 } else {
1795 1793 llen = mlen;
1796 1794 laddr = maddr;
1797 1795 }
1798 1796
1799 1797 if (dhp->dh_callbackops.devmap_access != NULL) {
1800 1798 offset_t aoff;
1801 1799
1802 1800 aoff = sdp->offset + (offset_t)(laddr - seg->s_base);
1803 1801
1804 1802 /*
1805 1803 * call driver's devmap_access entry point which will
1806 1804 * call devmap_load/contextmgmt to load the translations
1807 1805 *
1808 1806 * We drop the dhp_lock before calling access so
1809 1807 * drivers can call devmap_*_remap within access
1810 1808 */
1811 1809 RELE_DHP_LOCK(dhp);
1812 1810
1813 1811 err = (*dhp->dh_callbackops.devmap_access)(
1814 1812 dhp, (void *)dhp->dh_pvtp, aoff, llen, type, rw);
1815 1813 } else {
1816 1814 /*
1817 1815 * If no devmap_access entry point, then load mappings
1818 1816 * hold dhp_lock across faultpages if REMAP
1819 1817 */
1820 1818 err = segdev_faultpages(hat, seg, laddr, llen,
1821 1819 type, rw, dhp);
1822 1820 err_is_faultcode = 1;
1823 1821 RELE_DHP_LOCK(dhp);
1824 1822 }
1825 1823
1826 1824 if (err) {
1827 1825 if ((type == F_SOFTLOCK) && (maddr > addr)) {
1828 1826 /*
1829 1827 * If not first dhp, use
1830 1828 * segdev_fault(F_SOFTUNLOCK) for prior dhps
1831 1829 * While this is recursion, it is incorrect to
1832 1830 * call just segdev_softunlock
1833 1831 * if we are using either large pages
1834 1832 * or devmap_access. It will be more right
1835 1833 * to go through the same loop as above
1836 1834 * rather than call segdev_softunlock directly
1837 1835 * It will use the right lenghths as well as
1838 1836 * call into the driver devmap_access routines.
1839 1837 */
1840 1838 size_t done = (size_t)(maddr - addr);
1841 1839 (void) segdev_fault(hat, seg, addr, done,
1842 1840 F_SOFTUNLOCK, S_OTHER);
1843 1841 /*
1844 1842 * reduce slpage by number of pages
1845 1843 * released by segdev_softunlock
1846 1844 */
1847 1845 ASSERT(slpage >= btopr(done));
1848 1846 devmap_softlock_exit(slock,
1849 1847 slpage - btopr(done), type);
1850 1848 } else {
1851 1849 devmap_softlock_exit(slock, slpage, type);
1852 1850 }
1853 1851
1854 1852
1855 1853 /*
1856 1854 * Segdev_faultpages() already returns a faultcode,
1857 1855 * hence, result from segdev_faultpages() should be
1858 1856 * returned directly.
1859 1857 */
1860 1858 if (err_is_faultcode)
1861 1859 return (err);
1862 1860 return (FC_MAKE_ERR(err));
1863 1861 }
1864 1862
1865 1863 maddr += mlen;
1866 1864 ASSERT(len >= mlen);
1867 1865 len -= mlen;
1868 1866 dhp = dhp->dh_next;
1869 1867 off = 0;
1870 1868
1871 1869 ASSERT(!dhp || len == 0 || maddr == dhp->dh_uvaddr);
1872 1870 }
1873 1871 /*
1874 1872 * release the softlock count at end of fault
1875 1873 * For F_SOFTLOCk this is done in the later F_SOFTUNLOCK
1876 1874 */
1877 1875 if ((type == F_INVAL) || (type == F_PROT))
1878 1876 devmap_softlock_exit(slock, slpage, type);
1879 1877 return (0);
1880 1878 }
1881 1879
1882 1880 /*
1883 1881 * segdev_faultpages
1884 1882 *
1885 1883 * Used to fault in seg_dev segment pages. Called by segdev_fault or devmap_load
1886 1884 * This routine assumes that the callers makes sure that the fields
1887 1885 * in dhp used below are not changed due to remap during this call.
1888 1886 * Caller does HOLD_DHP_LOCK if neeed
1889 1887 * This routine returns a faultcode_t as a return value for segdev_fault.
1890 1888 */
1891 1889 static faultcode_t
1892 1890 segdev_faultpages(
1893 1891 struct hat *hat, /* the hat */
1894 1892 struct seg *seg, /* the seg_dev of interest */
1895 1893 caddr_t addr, /* the address of the fault */
1896 1894 size_t len, /* the length of the range */
1897 1895 enum fault_type type, /* type of fault */
1898 1896 enum seg_rw rw, /* type of access at fault */
1899 1897 devmap_handle_t *dhp) /* devmap handle */
1900 1898 {
1901 1899 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1902 1900 register caddr_t a;
1903 1901 struct vpage *vpage;
1904 1902 struct ddi_umem_cookie *kpmem_cookie = NULL;
1905 1903 int err;
1906 1904
1907 1905 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGES,
1908 1906 "segdev_faultpages: dhp=%p seg=%p addr=%p len=%lx",
1909 1907 (void *)dhp, (void *)seg, (void *)addr, len);
1910 1908 DEBUGF(5, (CE_CONT, "segdev_faultpages: "
1911 1909 "dhp %p seg %p addr %p len %lx\n",
1912 1910 (void *)dhp, (void *)seg, (void *)addr, len));
1913 1911
1914 1912 /*
1915 1913 * The seg_dev driver does not implement copy-on-write,
1916 1914 * and always loads translations with maximal allowed permissions
1917 1915 * but we got an fault trying to access the device.
1918 1916 * Servicing the fault is not going to result in any better result
1919 1917 * XXX: If we want to allow devmap_access to handle F_PROT calls,
1920 1918 * This code should be removed and let the normal fault handling
1921 1919 * take care of finding the error
1922 1920 */
1923 1921 if (type == F_PROT) {
1924 1922 return (FC_PROT);
1925 1923 }
1926 1924
1927 1925 if (type == F_SOFTUNLOCK) {
1928 1926 segdev_softunlock(hat, seg, addr, len, rw);
1929 1927 return (0);
1930 1928 }
1931 1929
1932 1930 /*
1933 1931 * For kernel pageable memory, fault/lock segkp pages
1934 1932 * We hold this until the completion of this
1935 1933 * fault (INVAL/PROT) or till unlock (SOFTLOCK).
1936 1934 */
1937 1935 if ((dhp != NULL) && dhp_is_kpmem(dhp)) {
1938 1936 kpmem_cookie = (struct ddi_umem_cookie *)dhp->dh_cookie;
1939 1937 if (err = acquire_kpmem_lock(kpmem_cookie, btopr(len)))
1940 1938 return (err);
1941 1939 }
1942 1940
1943 1941 /*
1944 1942 * If we have the same protections for the entire segment,
1945 1943 * insure that the access being attempted is legitimate.
1946 1944 */
1947 1945 rw_enter(&sdp->lock, RW_READER);
1948 1946 if (sdp->pageprot == 0) {
1949 1947 uint_t protchk;
1950 1948
1951 1949 switch (rw) {
1952 1950 case S_READ:
1953 1951 protchk = PROT_READ;
1954 1952 break;
1955 1953 case S_WRITE:
1956 1954 protchk = PROT_WRITE;
1957 1955 break;
1958 1956 case S_EXEC:
1959 1957 protchk = PROT_EXEC;
1960 1958 break;
1961 1959 case S_OTHER:
1962 1960 default:
1963 1961 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1964 1962 break;
1965 1963 }
1966 1964
1967 1965 if ((sdp->prot & protchk) == 0) {
1968 1966 rw_exit(&sdp->lock);
1969 1967 /* undo kpmem locking */
1970 1968 if (kpmem_cookie != NULL) {
1971 1969 release_kpmem_lock(kpmem_cookie, btopr(len));
1972 1970 }
1973 1971 return (FC_PROT); /* illegal access type */
1974 1972 }
1975 1973 }
1976 1974
1977 1975 /*
1978 1976 * we do a single hat_devload for the range if
1979 1977 * - devmap framework (dhp is not NULL),
1980 1978 * - pageprot == 0, i.e., no per-page protection set and
1981 1979 * - is device pages, irrespective of whether we are using large pages
1982 1980 */
1983 1981 if ((sdp->pageprot == 0) && (dhp != NULL) && dhp_is_devmem(dhp)) {
1984 1982 pfn_t pfnum;
1985 1983 uint_t hat_flags;
1986 1984
1987 1985 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1988 1986 rw_exit(&sdp->lock);
1989 1987 return (FC_NOMAP);
1990 1988 }
1991 1989
1992 1990 if (type == F_SOFTLOCK) {
1993 1991 mutex_enter(&freemem_lock);
1994 1992 sdp->softlockcnt += btopr(len);
1995 1993 mutex_exit(&freemem_lock);
1996 1994 }
1997 1995
1998 1996 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
1999 1997 pfnum = dhp->dh_pfn + btop((uintptr_t)(addr - dhp->dh_uvaddr));
2000 1998 ASSERT(!pf_is_memory(pfnum));
2001 1999
2002 2000 hat_devload(hat, addr, len, pfnum, sdp->prot | dhp->dh_hat_attr,
2003 2001 hat_flags | sdp->hat_flags);
2004 2002 rw_exit(&sdp->lock);
2005 2003 return (0);
2006 2004 }
2007 2005
2008 2006 /* Handle cases where we have to loop through fault handling per-page */
2009 2007
2010 2008 if (sdp->vpage == NULL)
2011 2009 vpage = NULL;
2012 2010 else
2013 2011 vpage = &sdp->vpage[seg_page(seg, addr)];
2014 2012
2015 2013 /* loop over the address range handling each fault */
2016 2014 for (a = addr; a < addr + len; a += PAGESIZE) {
2017 2015 if (err = segdev_faultpage(hat, seg, a, vpage, type, rw, dhp)) {
2018 2016 break;
2019 2017 }
2020 2018 if (vpage != NULL)
2021 2019 vpage++;
2022 2020 }
2023 2021 rw_exit(&sdp->lock);
2024 2022 if (err && (type == F_SOFTLOCK)) { /* error handling for F_SOFTLOCK */
2025 2023 size_t done = (size_t)(a - addr); /* pages fault successfully */
2026 2024 if (done > 0) {
2027 2025 /* use softunlock for those pages */
2028 2026 segdev_softunlock(hat, seg, addr, done, S_OTHER);
2029 2027 }
2030 2028 if (kpmem_cookie != NULL) {
2031 2029 /* release kpmem lock for rest of pages */
2032 2030 ASSERT(len >= done);
2033 2031 release_kpmem_lock(kpmem_cookie, btopr(len - done));
2034 2032 }
2035 2033 } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
2036 2034 /* for non-SOFTLOCK cases, release kpmem */
2037 2035 release_kpmem_lock(kpmem_cookie, btopr(len));
2038 2036 }
2039 2037 return (err);
2040 2038 }
2041 2039
2042 2040 /*
2043 2041 * Asynchronous page fault. We simply do nothing since this
2044 2042 * entry point is not supposed to load up the translation.
2045 2043 */
2046 2044 /*ARGSUSED*/
2047 2045 static faultcode_t
2048 2046 segdev_faulta(struct seg *seg, caddr_t addr)
2049 2047 {
2050 2048 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2051 2049 "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2052 2050 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2053 2051
2054 2052 return (0);
2055 2053 }
2056 2054
2057 2055 static int
2058 2056 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2059 2057 {
2060 2058 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2061 2059 register devmap_handle_t *dhp;
2062 2060 register struct vpage *vp, *evp;
2063 2061 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2064 2062 ulong_t off;
2065 2063 size_t mlen, sz;
2066 2064
2067 2065 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2068 2066 "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2069 2067 (void *)seg, (void *)addr, len, prot);
2070 2068 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2071 2069
2072 2070 if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2073 2071 /*
2074 2072 * Fail the setprot if pages are SOFTLOCKed through this
2075 2073 * mapping.
2076 2074 * Softlockcnt is protected from change by the as read lock.
2077 2075 */
2078 2076 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2079 2077 "segdev_setprot:error softlockcnt=%lx", sz);
2080 2078 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
2081 2079 return (EAGAIN);
2082 2080 }
2083 2081
2084 2082 if (dhp_head != NULL) {
2085 2083 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
2086 2084 return (EINVAL);
2087 2085
2088 2086 /*
2089 2087 * check if violate maxprot.
2090 2088 */
2091 2089 off = (ulong_t)(addr - dhp->dh_uvaddr);
2092 2090 mlen = len;
2093 2091 while (dhp) {
2094 2092 if ((dhp->dh_maxprot & prot) != prot)
2095 2093 return (EACCES); /* violated maxprot */
2096 2094
2097 2095 if (mlen > (dhp->dh_len - off)) {
2098 2096 mlen -= dhp->dh_len - off;
2099 2097 dhp = dhp->dh_next;
2100 2098 off = 0;
2101 2099 } else
2102 2100 break;
2103 2101 }
2104 2102 } else {
2105 2103 if ((sdp->maxprot & prot) != prot)
2106 2104 return (EACCES);
2107 2105 }
2108 2106
2109 2107 rw_enter(&sdp->lock, RW_WRITER);
2110 2108 if (addr == seg->s_base && len == seg->s_size && sdp->pageprot == 0) {
2111 2109 if (sdp->prot == prot) {
2112 2110 rw_exit(&sdp->lock);
2113 2111 return (0); /* all done */
2114 2112 }
2115 2113 sdp->prot = (uchar_t)prot;
2116 2114 } else {
2117 2115 sdp->pageprot = 1;
2118 2116 if (sdp->vpage == NULL) {
2119 2117 /*
2120 2118 * First time through setting per page permissions,
2121 2119 * initialize all the vpage structures to prot
2122 2120 */
2123 2121 sdp->vpage = kmem_zalloc(vpgtob(seg_pages(seg)),
2124 2122 KM_SLEEP);
2125 2123 evp = &sdp->vpage[seg_pages(seg)];
2126 2124 for (vp = sdp->vpage; vp < evp; vp++)
2127 2125 VPP_SETPROT(vp, sdp->prot);
2128 2126 }
2129 2127 /*
2130 2128 * Now go change the needed vpages protections.
2131 2129 */
2132 2130 evp = &sdp->vpage[seg_page(seg, addr + len)];
2133 2131 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++)
2134 2132 VPP_SETPROT(vp, prot);
2135 2133 }
2136 2134 rw_exit(&sdp->lock);
2137 2135
2138 2136 if (dhp_head != NULL) {
2139 2137 devmap_handle_t *tdhp;
2140 2138 /*
2141 2139 * If large page size was used in hat_devload(),
2142 2140 * the same page size must be used in hat_unload().
2143 2141 */
2144 2142 dhp = tdhp = devmap_find_handle(dhp_head, addr);
2145 2143 while (tdhp != NULL) {
2146 2144 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
2147 2145 break;
2148 2146 }
2149 2147 tdhp = tdhp->dh_next;
2150 2148 }
2151 2149 if (tdhp) {
2152 2150 size_t slen = len;
2153 2151 size_t mlen;
2154 2152 size_t soff;
2155 2153
2156 2154 soff = (ulong_t)(addr - dhp->dh_uvaddr);
2157 2155 while (slen != 0) {
2158 2156 mlen = MIN(slen, (dhp->dh_len - soff));
2159 2157 hat_unload(seg->s_as->a_hat, dhp->dh_uvaddr,
2160 2158 dhp->dh_len, HAT_UNLOAD);
2161 2159 dhp = dhp->dh_next;
2162 2160 ASSERT(slen >= mlen);
2163 2161 slen -= mlen;
2164 2162 soff = 0;
2165 2163 }
2166 2164 return (0);
2167 2165 }
2168 2166 }
2169 2167
2170 2168 if ((prot & ~PROT_USER) == PROT_NONE) {
2171 2169 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
2172 2170 } else {
2173 2171 /*
2174 2172 * RFE: the segment should keep track of all attributes
2175 2173 * allowing us to remove the deprecated hat_chgprot
2176 2174 * and use hat_chgattr.
2177 2175 */
2178 2176 hat_chgprot(seg->s_as->a_hat, addr, len, prot);
2179 2177 }
2180 2178
2181 2179 return (0);
2182 2180 }
2183 2181
2184 2182 static int
2185 2183 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2186 2184 {
2187 2185 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2188 2186 struct vpage *vp, *evp;
2189 2187
2190 2188 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2191 2189 "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2192 2190 (void *)seg, (void *)addr, len, prot);
2193 2191 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2194 2192
2195 2193 /*
2196 2194 * If segment protection can be used, simply check against them
2197 2195 */
2198 2196 rw_enter(&sdp->lock, RW_READER);
2199 2197 if (sdp->pageprot == 0) {
2200 2198 register int err;
2201 2199
2202 2200 err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2203 2201 rw_exit(&sdp->lock);
2204 2202 return (err);
2205 2203 }
2206 2204
2207 2205 /*
2208 2206 * Have to check down to the vpage level
2209 2207 */
2210 2208 evp = &sdp->vpage[seg_page(seg, addr + len)];
2211 2209 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
2212 2210 if ((VPP_PROT(vp) & prot) != prot) {
2213 2211 rw_exit(&sdp->lock);
2214 2212 return (EACCES);
2215 2213 }
2216 2214 }
2217 2215 rw_exit(&sdp->lock);
2218 2216 return (0);
2219 2217 }
2220 2218
2221 2219 static int
2222 2220 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2223 2221 {
2224 2222 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2225 2223 size_t pgno;
2226 2224
2227 2225 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2228 2226 "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2229 2227 (void *)seg, (void *)addr, len, (void *)protv);
2230 2228 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2231 2229
2232 2230 pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2233 2231 if (pgno != 0) {
2234 2232 rw_enter(&sdp->lock, RW_READER);
2235 2233 if (sdp->pageprot == 0) {
2236 2234 do {
2237 2235 protv[--pgno] = sdp->prot;
2238 2236 } while (pgno != 0);
2239 2237 } else {
2240 2238 size_t pgoff = seg_page(seg, addr);
2241 2239
2242 2240 do {
2243 2241 pgno--;
2244 2242 protv[pgno] =
2245 2243 VPP_PROT(&sdp->vpage[pgno + pgoff]);
2246 2244 } while (pgno != 0);
2247 2245 }
2248 2246 rw_exit(&sdp->lock);
2249 2247 }
2250 2248 return (0);
2251 2249 }
2252 2250
2253 2251 static u_offset_t
2254 2252 segdev_getoffset(register struct seg *seg, caddr_t addr)
2255 2253 {
2256 2254 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2257 2255
2258 2256 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2259 2257 "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2260 2258
2261 2259 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2262 2260
2263 2261 return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2264 2262 }
2265 2263
2266 2264 /*ARGSUSED*/
2267 2265 static int
2268 2266 segdev_gettype(register struct seg *seg, caddr_t addr)
2269 2267 {
2270 2268 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2271 2269
2272 2270 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2273 2271 "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2274 2272
2275 2273 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2276 2274
2277 2275 return (sdp->type);
2278 2276 }
2279 2277
2280 2278
2281 2279 /*ARGSUSED*/
2282 2280 static int
2283 2281 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2284 2282 {
2285 2283 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2286 2284
2287 2285 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2288 2286 "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2289 2287
2290 2288 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2291 2289
2292 2290 /*
2293 2291 * Note that this vp is the common_vp of the device, where the
2294 2292 * pages are hung ..
2295 2293 */
2296 2294 *vpp = VTOCVP(sdp->vp);
2297 2295
2298 2296 return (0);
2299 2297 }
2300 2298
2301 2299 static void
2302 2300 segdev_badop(void)
2303 2301 {
2304 2302 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
2305 2303 "segdev_badop:start");
2306 2304 panic("segdev_badop");
2307 2305 /*NOTREACHED*/
2308 2306 }
2309 2307
2310 2308 /*
2311 2309 * segdev pages are not in the cache, and thus can't really be controlled.
2312 2310 * Hence, syncs are simply always successful.
2313 2311 */
2314 2312 /*ARGSUSED*/
2315 2313 static int
2316 2314 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2317 2315 {
2318 2316 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2319 2317
2320 2318 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2321 2319
2322 2320 return (0);
2323 2321 }
2324 2322
2325 2323 /*
2326 2324 * segdev pages are always "in core".
2327 2325 */
2328 2326 /*ARGSUSED*/
2329 2327 static size_t
2330 2328 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2331 2329 {
2332 2330 size_t v = 0;
2333 2331
2334 2332 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2335 2333
2336 2334 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2337 2335
2338 2336 for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2339 2337 v += PAGESIZE)
2340 2338 *vec++ = 1;
2341 2339 return (v);
2342 2340 }
2343 2341
2344 2342 /*
2345 2343 * segdev pages are not in the cache, and thus can't really be controlled.
2346 2344 * Hence, locks are simply always successful.
2347 2345 */
2348 2346 /*ARGSUSED*/
2349 2347 static int
2350 2348 segdev_lockop(struct seg *seg, caddr_t addr,
2351 2349 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2352 2350 {
2353 2351 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2354 2352
2355 2353 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2356 2354
2357 2355 return (0);
2358 2356 }
2359 2357
2360 2358 /*
2361 2359 * segdev pages are not in the cache, and thus can't really be controlled.
2362 2360 * Hence, advise is simply always successful.
2363 2361 */
↓ open down ↓ |
2145 lines elided |
↑ open up ↑ |
2364 2362 /*ARGSUSED*/
2365 2363 static int
2366 2364 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2367 2365 {
2368 2366 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2369 2367
2370 2368 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2371 2369
2372 2370 return (0);
2373 2371 }
2374 -
2375 -/*
2376 - * segdev pages are not dumped, so we just return
2377 - */
2378 -/*ARGSUSED*/
2379 -static void
2380 -segdev_dump(struct seg *seg)
2381 -{}
2382 2372
2383 2373 /*
2384 2374 * ddi_segmap_setup: Used by drivers who wish specify mapping attributes
2385 2375 * for a segment. Called from a drivers segmap(9E)
2386 2376 * routine.
2387 2377 */
2388 2378 /*ARGSUSED*/
2389 2379 int
2390 2380 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2391 2381 off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2392 2382 ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2393 2383 {
2394 2384 struct segdev_crargs dev_a;
2395 2385 int (*mapfunc)(dev_t dev, off_t off, int prot);
2396 2386 uint_t hat_attr;
2397 2387 pfn_t pfn;
2398 2388 int error, i;
2399 2389
2400 2390 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
2401 2391 "ddi_segmap_setup:start");
2402 2392
2403 2393 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2404 2394 return (ENODEV);
2405 2395
2406 2396 /*
2407 2397 * Character devices that support the d_mmap
2408 2398 * interface can only be mmap'ed shared.
2409 2399 */
2410 2400 if ((flags & MAP_TYPE) != MAP_SHARED)
2411 2401 return (EINVAL);
2412 2402
2413 2403 /*
2414 2404 * Check that this region is indeed mappable on this platform.
2415 2405 * Use the mapping function.
2416 2406 */
2417 2407 if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
2418 2408 return (ENXIO);
2419 2409
2420 2410 /*
2421 2411 * Check to ensure that the entire range is
2422 2412 * legal and we are not trying to map in
2423 2413 * more than the device will let us.
2424 2414 */
2425 2415 for (i = 0; i < len; i += PAGESIZE) {
2426 2416 if (i == 0) {
2427 2417 /*
2428 2418 * Save the pfn at offset here. This pfn will be
2429 2419 * used later to get user address.
2430 2420 */
2431 2421 if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
2432 2422 maxprot)) == PFN_INVALID)
2433 2423 return (ENXIO);
2434 2424 } else {
2435 2425 if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
2436 2426 PFN_INVALID)
2437 2427 return (ENXIO);
2438 2428 }
2439 2429 }
2440 2430
2441 2431 as_rangelock(as);
2442 2432 /* Pick an address w/o worrying about any vac alignment constraints. */
2443 2433 error = choose_addr(as, addrp, len, ptob(pfn), ADDR_NOVACALIGN, flags);
2444 2434 if (error != 0) {
2445 2435 as_rangeunlock(as);
2446 2436 return (error);
2447 2437 }
2448 2438
2449 2439 dev_a.mapfunc = mapfunc;
2450 2440 dev_a.dev = dev;
2451 2441 dev_a.offset = (offset_t)offset;
2452 2442 dev_a.type = flags & MAP_TYPE;
2453 2443 dev_a.prot = (uchar_t)prot;
2454 2444 dev_a.maxprot = (uchar_t)maxprot;
2455 2445 dev_a.hat_attr = hat_attr;
2456 2446 dev_a.hat_flags = 0;
2457 2447 dev_a.devmap_data = NULL;
2458 2448
2459 2449 error = as_map(as, *addrp, len, segdev_create, &dev_a);
2460 2450 as_rangeunlock(as);
2461 2451 return (error);
2462 2452
2463 2453 }
2464 2454
2465 2455 /*ARGSUSED*/
2466 2456 static int
2467 2457 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2468 2458 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2469 2459 {
2470 2460 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2471 2461 "segdev_pagelock:start");
2472 2462 return (ENOTSUP);
2473 2463 }
2474 2464
2475 2465 /*
2476 2466 * devmap_device: Used by devmap framework to establish mapping
2477 2467 * called by devmap_seup(9F) during map setup time.
2478 2468 */
2479 2469 /*ARGSUSED*/
2480 2470 static int
2481 2471 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2482 2472 offset_t off, size_t len, uint_t flags)
2483 2473 {
2484 2474 devmap_handle_t *rdhp, *maxdhp;
2485 2475 struct segdev_crargs dev_a;
2486 2476 int err;
2487 2477 uint_t maxprot = PROT_ALL;
2488 2478 offset_t offset = 0;
2489 2479 pfn_t pfn;
2490 2480 struct devmap_pmem_cookie *pcp;
2491 2481
2492 2482 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
2493 2483 "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
2494 2484 (void *)dhp, (void *)addr, off, len);
2495 2485
2496 2486 DEBUGF(2, (CE_CONT, "devmap_device: dhp %p addr %p off %llx len %lx\n",
2497 2487 (void *)dhp, (void *)addr, off, len));
2498 2488
2499 2489 as_rangelock(as);
2500 2490 if ((flags & MAP_FIXED) == 0) {
2501 2491 offset_t aligned_off;
2502 2492
2503 2493 rdhp = maxdhp = dhp;
2504 2494 while (rdhp != NULL) {
2505 2495 maxdhp = (maxdhp->dh_len > rdhp->dh_len) ?
2506 2496 maxdhp : rdhp;
2507 2497 rdhp = rdhp->dh_next;
2508 2498 maxprot |= dhp->dh_maxprot;
2509 2499 }
2510 2500 offset = maxdhp->dh_uoff - dhp->dh_uoff;
2511 2501
2512 2502 /*
2513 2503 * Use the dhp that has the
2514 2504 * largest len to get user address.
2515 2505 */
2516 2506 /*
2517 2507 * If MAPPING_INVALID, cannot use dh_pfn/dh_cvaddr,
2518 2508 * use 0 which is as good as any other.
2519 2509 */
2520 2510 if (maxdhp->dh_flags & DEVMAP_MAPPING_INVALID) {
2521 2511 aligned_off = (offset_t)0;
2522 2512 } else if (dhp_is_devmem(maxdhp)) {
2523 2513 aligned_off = (offset_t)ptob(maxdhp->dh_pfn) - offset;
2524 2514 } else if (dhp_is_pmem(maxdhp)) {
2525 2515 pcp = (struct devmap_pmem_cookie *)maxdhp->dh_pcookie;
2526 2516 pfn = page_pptonum(
2527 2517 pcp->dp_pparray[btop(maxdhp->dh_roff)]);
2528 2518 aligned_off = (offset_t)ptob(pfn) - offset;
2529 2519 } else {
2530 2520 aligned_off = (offset_t)(uintptr_t)maxdhp->dh_cvaddr -
2531 2521 offset;
2532 2522 }
2533 2523
2534 2524 /*
2535 2525 * Pick an address aligned to dh_cookie.
2536 2526 * for kernel memory/user memory, cookie is cvaddr.
2537 2527 * for device memory, cookie is physical address.
2538 2528 */
2539 2529 map_addr(addr, len, aligned_off, 1, flags);
2540 2530 if (*addr == NULL) {
2541 2531 as_rangeunlock(as);
2542 2532 return (ENOMEM);
2543 2533 }
2544 2534 } else {
2545 2535 /*
2546 2536 * User-specified address; blow away any previous mappings.
2547 2537 */
2548 2538 (void) as_unmap(as, *addr, len);
2549 2539 }
2550 2540
2551 2541 dev_a.mapfunc = NULL;
2552 2542 dev_a.dev = dhp->dh_dev;
2553 2543 dev_a.type = flags & MAP_TYPE;
2554 2544 dev_a.offset = off;
2555 2545 /*
2556 2546 * sdp->maxprot has the least restrict protection of all dhps.
2557 2547 */
2558 2548 dev_a.maxprot = maxprot;
2559 2549 dev_a.prot = dhp->dh_prot;
2560 2550 /*
2561 2551 * devmap uses dhp->dh_hat_attr for hat.
2562 2552 */
2563 2553 dev_a.hat_flags = 0;
2564 2554 dev_a.hat_attr = 0;
2565 2555 dev_a.devmap_data = (void *)dhp;
2566 2556
2567 2557 err = as_map(as, *addr, len, segdev_create, &dev_a);
2568 2558 as_rangeunlock(as);
2569 2559 return (err);
2570 2560 }
2571 2561
2572 2562 int
2573 2563 devmap_do_ctxmgt(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
2574 2564 uint_t type, uint_t rw, int (*ctxmgt)(devmap_cookie_t, void *, offset_t,
2575 2565 size_t, uint_t, uint_t))
2576 2566 {
2577 2567 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2578 2568 struct devmap_ctx *devctx;
2579 2569 int do_timeout = 0;
2580 2570 int ret;
2581 2571
2582 2572 #ifdef lint
2583 2573 pvtp = pvtp;
2584 2574 #endif
2585 2575
2586 2576 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT,
2587 2577 "devmap_do_ctxmgt:start dhp=%p off=%llx, len=%lx",
2588 2578 (void *)dhp, off, len);
2589 2579 DEBUGF(7, (CE_CONT, "devmap_do_ctxmgt: dhp %p off %llx len %lx\n",
2590 2580 (void *)dhp, off, len));
2591 2581
2592 2582 if (ctxmgt == NULL)
2593 2583 return (FC_HWERR);
2594 2584
2595 2585 devctx = dhp->dh_ctx;
2596 2586
2597 2587 /*
2598 2588 * If we are on an MP system with more than one cpu running
2599 2589 * and if a thread on some CPU already has the context, wait
2600 2590 * for it to finish if there is a hysteresis timeout.
2601 2591 *
2602 2592 * We call cv_wait() instead of cv_wait_sig() because
2603 2593 * it does not matter much if it returned due to a signal
2604 2594 * or due to a cv_signal() or cv_broadcast(). In either event
2605 2595 * we need to complete the mapping otherwise the processes
2606 2596 * will die with a SEGV.
2607 2597 */
2608 2598 if ((dhp->dh_timeout_length > 0) && (ncpus > 1)) {
2609 2599 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK1,
2610 2600 "devmap_do_ctxmgt:doing hysteresis, devctl %p dhp %p",
2611 2601 devctx, dhp);
2612 2602 do_timeout = 1;
2613 2603 mutex_enter(&devctx->lock);
2614 2604 while (devctx->oncpu)
2615 2605 cv_wait(&devctx->cv, &devctx->lock);
2616 2606 devctx->oncpu = 1;
2617 2607 mutex_exit(&devctx->lock);
2618 2608 }
2619 2609
2620 2610 /*
2621 2611 * Call the contextmgt callback so that the driver can handle
2622 2612 * the fault.
2623 2613 */
2624 2614 ret = (*ctxmgt)(dhp, dhp->dh_pvtp, off, len, type, rw);
2625 2615
2626 2616 /*
2627 2617 * If devmap_access() returned -1, then there was a hardware
2628 2618 * error so we need to convert the return value to something
2629 2619 * that trap() will understand. Otherwise, the return value
2630 2620 * is already a fault code generated by devmap_unload()
2631 2621 * or devmap_load().
2632 2622 */
2633 2623 if (ret) {
2634 2624 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK2,
2635 2625 "devmap_do_ctxmgt: ret=%x dhp=%p devctx=%p",
2636 2626 ret, dhp, devctx);
2637 2627 DEBUGF(1, (CE_CONT, "devmap_do_ctxmgt: ret %x dhp %p\n",
2638 2628 ret, (void *)dhp));
2639 2629 if (devctx->oncpu) {
2640 2630 mutex_enter(&devctx->lock);
2641 2631 devctx->oncpu = 0;
2642 2632 cv_signal(&devctx->cv);
2643 2633 mutex_exit(&devctx->lock);
2644 2634 }
2645 2635 return (FC_HWERR);
2646 2636 }
2647 2637
2648 2638 /*
2649 2639 * Setup the timeout if we need to
2650 2640 */
2651 2641 if (do_timeout) {
2652 2642 mutex_enter(&devctx->lock);
2653 2643 if (dhp->dh_timeout_length > 0) {
2654 2644 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK3,
2655 2645 "devmap_do_ctxmgt:timeout set");
2656 2646 devctx->timeout = timeout(devmap_ctxto,
2657 2647 devctx, dhp->dh_timeout_length);
2658 2648 } else {
2659 2649 /*
2660 2650 * We don't want to wait so set oncpu to
2661 2651 * 0 and wake up anyone waiting.
2662 2652 */
2663 2653 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK4,
2664 2654 "devmap_do_ctxmgt:timeout not set");
2665 2655 devctx->oncpu = 0;
2666 2656 cv_signal(&devctx->cv);
2667 2657 }
2668 2658 mutex_exit(&devctx->lock);
2669 2659 }
2670 2660
2671 2661 return (DDI_SUCCESS);
2672 2662 }
2673 2663
2674 2664 /*
2675 2665 * end of mapping
2676 2666 * poff fault_offset |
2677 2667 * base | | |
2678 2668 * | | | |
2679 2669 * V V V V
2680 2670 * +-----------+---------------+-------+---------+-------+
2681 2671 * ^ ^ ^ ^
2682 2672 * |<--- offset--->|<-len->| |
2683 2673 * |<--- dh_len(size of mapping) --->|
2684 2674 * |<-- pg -->|
2685 2675 * -->|rlen|<--
2686 2676 */
2687 2677 static ulong_t
2688 2678 devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
2689 2679 ulong_t *opfn, ulong_t *pagesize)
2690 2680 {
2691 2681 register int level;
2692 2682 ulong_t pg;
2693 2683 ulong_t poff;
2694 2684 ulong_t base;
2695 2685 caddr_t uvaddr;
2696 2686 long rlen;
2697 2687
2698 2688 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP,
2699 2689 "devmap_roundup:start dhp=%p off=%lx len=%lx",
2700 2690 (void *)dhp, offset, len);
2701 2691 DEBUGF(2, (CE_CONT, "devmap_roundup: dhp %p off %lx len %lx\n",
2702 2692 (void *)dhp, offset, len));
2703 2693
2704 2694 /*
2705 2695 * get the max. pagesize that is aligned within the range
2706 2696 * <dh_pfn, dh_pfn+offset>.
2707 2697 *
2708 2698 * The calculations below use physical address to ddetermine
2709 2699 * the page size to use. The same calculations can use the
2710 2700 * virtual address to determine the page size.
2711 2701 */
2712 2702 base = (ulong_t)ptob(dhp->dh_pfn);
2713 2703 for (level = dhp->dh_mmulevel; level >= 0; level--) {
2714 2704 pg = page_get_pagesize(level);
2715 2705 poff = ((base + offset) & ~(pg - 1));
2716 2706 uvaddr = dhp->dh_uvaddr + (poff - base);
2717 2707 if ((poff >= base) &&
2718 2708 ((poff + pg) <= (base + dhp->dh_len)) &&
2719 2709 VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg))
2720 2710 break;
2721 2711 }
2722 2712
2723 2713 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK1,
2724 2714 "devmap_roundup: base=%lx poff=%lx dhp=%p",
2725 2715 base, poff, dhp);
2726 2716 DEBUGF(2, (CE_CONT, "devmap_roundup: base %lx poff %lx pfn %lx\n",
2727 2717 base, poff, dhp->dh_pfn));
2728 2718
2729 2719 ASSERT(VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg));
2730 2720 ASSERT(level >= 0);
2731 2721
2732 2722 *pagesize = pg;
2733 2723 *opfn = dhp->dh_pfn + btop(poff - base);
2734 2724
2735 2725 rlen = len + offset - (poff - base + pg);
2736 2726
2737 2727 ASSERT(rlen < (long)len);
2738 2728
2739 2729 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK2,
2740 2730 "devmap_roundup:ret dhp=%p level=%x rlen=%lx psiz=%p opfn=%p",
2741 2731 (void *)dhp, level, rlen, pagesize, opfn);
2742 2732 DEBUGF(1, (CE_CONT, "devmap_roundup: dhp %p "
2743 2733 "level %x rlen %lx psize %lx opfn %lx\n",
2744 2734 (void *)dhp, level, rlen, *pagesize, *opfn));
2745 2735
2746 2736 return ((ulong_t)((rlen > 0) ? rlen : 0));
2747 2737 }
2748 2738
2749 2739 /*
2750 2740 * find the dhp that contains addr.
2751 2741 */
2752 2742 static devmap_handle_t *
2753 2743 devmap_find_handle(devmap_handle_t *dhp_head, caddr_t addr)
2754 2744 {
2755 2745 devmap_handle_t *dhp;
2756 2746
2757 2747 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_FIND_HANDLE,
2758 2748 "devmap_find_handle:start");
2759 2749
2760 2750 dhp = dhp_head;
2761 2751 while (dhp) {
2762 2752 if (addr >= dhp->dh_uvaddr &&
2763 2753 addr < (dhp->dh_uvaddr + dhp->dh_len))
2764 2754 return (dhp);
2765 2755 dhp = dhp->dh_next;
2766 2756 }
2767 2757
2768 2758 return ((devmap_handle_t *)NULL);
2769 2759 }
2770 2760
2771 2761 /*
2772 2762 * devmap_unload:
2773 2763 * Marks a segdev segment or pages if offset->offset+len
2774 2764 * is not the entire segment as intercept and unloads the
2775 2765 * pages in the range offset -> offset+len.
2776 2766 */
2777 2767 int
2778 2768 devmap_unload(devmap_cookie_t dhc, offset_t offset, size_t len)
2779 2769 {
2780 2770 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2781 2771 caddr_t addr;
2782 2772 ulong_t size;
2783 2773 ssize_t soff;
2784 2774
2785 2775 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_UNLOAD,
2786 2776 "devmap_unload:start dhp=%p offset=%llx len=%lx",
2787 2777 (void *)dhp, offset, len);
2788 2778 DEBUGF(7, (CE_CONT, "devmap_unload: dhp %p offset %llx len %lx\n",
2789 2779 (void *)dhp, offset, len));
2790 2780
2791 2781 soff = (ssize_t)(offset - dhp->dh_uoff);
2792 2782 soff = round_down_p2(soff, PAGESIZE);
2793 2783 if (soff < 0 || soff >= dhp->dh_len)
2794 2784 return (FC_MAKE_ERR(EINVAL));
2795 2785
2796 2786 /*
2797 2787 * Address and size must be page aligned. Len is set to the
2798 2788 * number of bytes in the number of pages that are required to
2799 2789 * support len. Offset is set to the byte offset of the first byte
2800 2790 * of the page that contains offset.
2801 2791 */
2802 2792 len = round_up_p2(len, PAGESIZE);
2803 2793
2804 2794 /*
2805 2795 * If len is == 0, then calculate the size by getting
2806 2796 * the number of bytes from offset to the end of the segment.
2807 2797 */
2808 2798 if (len == 0)
2809 2799 size = dhp->dh_len - soff;
2810 2800 else {
2811 2801 size = len;
2812 2802 if ((soff + size) > dhp->dh_len)
2813 2803 return (FC_MAKE_ERR(EINVAL));
2814 2804 }
2815 2805
2816 2806 /*
2817 2807 * The address is offset bytes from the base address of
2818 2808 * the dhp.
2819 2809 */
2820 2810 addr = (caddr_t)(soff + dhp->dh_uvaddr);
2821 2811
2822 2812 /*
2823 2813 * If large page size was used in hat_devload(),
2824 2814 * the same page size must be used in hat_unload().
2825 2815 */
2826 2816 if (dhp->dh_flags & DEVMAP_FLAG_LARGE) {
2827 2817 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
2828 2818 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
2829 2819 } else {
2830 2820 hat_unload(dhp->dh_seg->s_as->a_hat, addr, size,
2831 2821 HAT_UNLOAD|HAT_UNLOAD_OTHER);
2832 2822 }
2833 2823
2834 2824 return (0);
2835 2825 }
2836 2826
2837 2827 /*
2838 2828 * calculates the optimal page size that will be used for hat_devload().
2839 2829 */
2840 2830 static void
2841 2831 devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, caddr_t addr,
2842 2832 size_t *llen, caddr_t *laddr)
2843 2833 {
2844 2834 ulong_t off;
2845 2835 ulong_t pfn;
2846 2836 ulong_t pgsize;
2847 2837 uint_t first = 1;
2848 2838
2849 2839 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GET_LARGE_PGSIZE,
2850 2840 "devmap_get_large_pgsize:start");
2851 2841
2852 2842 /*
2853 2843 * RFE - Code only supports large page mappings for devmem
2854 2844 * This code could be changed in future if we want to support
2855 2845 * large page mappings for kernel exported memory.
2856 2846 */
2857 2847 ASSERT(dhp_is_devmem(dhp));
2858 2848 ASSERT(!(dhp->dh_flags & DEVMAP_MAPPING_INVALID));
2859 2849
2860 2850 *llen = 0;
2861 2851 off = (ulong_t)(addr - dhp->dh_uvaddr);
2862 2852 while ((long)len > 0) {
2863 2853 /*
2864 2854 * get the optimal pfn to minimize address translations.
2865 2855 * devmap_roundup() returns residue bytes for next round
2866 2856 * calculations.
2867 2857 */
2868 2858 len = devmap_roundup(dhp, off, len, &pfn, &pgsize);
2869 2859
2870 2860 if (first) {
2871 2861 *laddr = dhp->dh_uvaddr + ptob(pfn - dhp->dh_pfn);
2872 2862 first = 0;
2873 2863 }
2874 2864
2875 2865 *llen += pgsize;
2876 2866 off = ptob(pfn - dhp->dh_pfn) + pgsize;
2877 2867 }
2878 2868 /* Large page mapping len/addr cover more range than original fault */
2879 2869 ASSERT(*llen >= len && *laddr <= addr);
2880 2870 ASSERT((*laddr + *llen) >= (addr + len));
2881 2871 }
2882 2872
2883 2873 /*
2884 2874 * Initialize the devmap_softlock structure.
2885 2875 */
2886 2876 static struct devmap_softlock *
2887 2877 devmap_softlock_init(dev_t dev, ulong_t id)
2888 2878 {
2889 2879 struct devmap_softlock *slock;
2890 2880 struct devmap_softlock *tmp;
2891 2881
2892 2882 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_INIT,
2893 2883 "devmap_softlock_init:start");
2894 2884
2895 2885 tmp = kmem_zalloc(sizeof (struct devmap_softlock), KM_SLEEP);
2896 2886 mutex_enter(&devmap_slock);
2897 2887
2898 2888 for (slock = devmap_slist; slock != NULL; slock = slock->next)
2899 2889 if ((slock->dev == dev) && (slock->id == id))
2900 2890 break;
2901 2891
2902 2892 if (slock == NULL) {
2903 2893 slock = tmp;
2904 2894 slock->dev = dev;
2905 2895 slock->id = id;
2906 2896 mutex_init(&slock->lock, NULL, MUTEX_DEFAULT, NULL);
2907 2897 cv_init(&slock->cv, NULL, CV_DEFAULT, NULL);
2908 2898 slock->next = devmap_slist;
2909 2899 devmap_slist = slock;
2910 2900 } else
2911 2901 kmem_free(tmp, sizeof (struct devmap_softlock));
2912 2902
2913 2903 mutex_enter(&slock->lock);
2914 2904 slock->refcnt++;
2915 2905 mutex_exit(&slock->lock);
2916 2906 mutex_exit(&devmap_slock);
2917 2907
2918 2908 return (slock);
2919 2909 }
2920 2910
2921 2911 /*
2922 2912 * Wake up processes that sleep on softlocked.
2923 2913 * Free dh_softlock if refcnt is 0.
2924 2914 */
2925 2915 static void
2926 2916 devmap_softlock_rele(devmap_handle_t *dhp)
2927 2917 {
2928 2918 struct devmap_softlock *slock = dhp->dh_softlock;
2929 2919 struct devmap_softlock *tmp;
2930 2920 struct devmap_softlock *parent;
2931 2921
2932 2922 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_RELE,
2933 2923 "devmap_softlock_rele:start");
2934 2924
2935 2925 mutex_enter(&devmap_slock);
2936 2926 mutex_enter(&slock->lock);
2937 2927
2938 2928 ASSERT(slock->refcnt > 0);
2939 2929
2940 2930 slock->refcnt--;
2941 2931
2942 2932 /*
2943 2933 * If no one is using the device, free up the slock data.
2944 2934 */
2945 2935 if (slock->refcnt == 0) {
2946 2936 slock->softlocked = 0;
2947 2937 cv_signal(&slock->cv);
2948 2938
2949 2939 if (devmap_slist == slock)
2950 2940 devmap_slist = slock->next;
2951 2941 else {
2952 2942 parent = devmap_slist;
2953 2943 for (tmp = devmap_slist->next; tmp != NULL;
2954 2944 tmp = tmp->next) {
2955 2945 if (tmp == slock) {
2956 2946 parent->next = tmp->next;
2957 2947 break;
2958 2948 }
2959 2949 parent = tmp;
2960 2950 }
2961 2951 }
2962 2952 mutex_exit(&slock->lock);
2963 2953 mutex_destroy(&slock->lock);
2964 2954 cv_destroy(&slock->cv);
2965 2955 kmem_free(slock, sizeof (struct devmap_softlock));
2966 2956 } else
2967 2957 mutex_exit(&slock->lock);
2968 2958
2969 2959 mutex_exit(&devmap_slock);
2970 2960 }
2971 2961
2972 2962 /*
2973 2963 * Wake up processes that sleep on dh_ctx->locked.
2974 2964 * Free dh_ctx if refcnt is 0.
2975 2965 */
2976 2966 static void
2977 2967 devmap_ctx_rele(devmap_handle_t *dhp)
2978 2968 {
2979 2969 struct devmap_ctx *devctx = dhp->dh_ctx;
2980 2970 struct devmap_ctx *tmp;
2981 2971 struct devmap_ctx *parent;
2982 2972 timeout_id_t tid;
2983 2973
2984 2974 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE,
2985 2975 "devmap_ctx_rele:start");
2986 2976
2987 2977 mutex_enter(&devmapctx_lock);
2988 2978 mutex_enter(&devctx->lock);
2989 2979
2990 2980 ASSERT(devctx->refcnt > 0);
2991 2981
2992 2982 devctx->refcnt--;
2993 2983
2994 2984 /*
2995 2985 * If no one is using the device, free up the devctx data.
2996 2986 */
2997 2987 if (devctx->refcnt == 0) {
2998 2988 /*
2999 2989 * Untimeout any threads using this mapping as they are about
3000 2990 * to go away.
3001 2991 */
3002 2992 if (devctx->timeout != 0) {
3003 2993 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE_CK1,
3004 2994 "devmap_ctx_rele:untimeout ctx->timeout");
3005 2995
3006 2996 tid = devctx->timeout;
3007 2997 mutex_exit(&devctx->lock);
3008 2998 (void) untimeout(tid);
3009 2999 mutex_enter(&devctx->lock);
3010 3000 }
3011 3001
3012 3002 devctx->oncpu = 0;
3013 3003 cv_signal(&devctx->cv);
3014 3004
3015 3005 if (devmapctx_list == devctx)
3016 3006 devmapctx_list = devctx->next;
3017 3007 else {
3018 3008 parent = devmapctx_list;
3019 3009 for (tmp = devmapctx_list->next; tmp != NULL;
3020 3010 tmp = tmp->next) {
3021 3011 if (tmp == devctx) {
3022 3012 parent->next = tmp->next;
3023 3013 break;
3024 3014 }
3025 3015 parent = tmp;
3026 3016 }
3027 3017 }
3028 3018 mutex_exit(&devctx->lock);
3029 3019 mutex_destroy(&devctx->lock);
3030 3020 cv_destroy(&devctx->cv);
3031 3021 kmem_free(devctx, sizeof (struct devmap_ctx));
3032 3022 } else
3033 3023 mutex_exit(&devctx->lock);
3034 3024
3035 3025 mutex_exit(&devmapctx_lock);
3036 3026 }
3037 3027
3038 3028 /*
3039 3029 * devmap_load:
3040 3030 * Marks a segdev segment or pages if offset->offset+len
3041 3031 * is not the entire segment as nointercept and faults in
3042 3032 * the pages in the range offset -> offset+len.
3043 3033 */
3044 3034 int
3045 3035 devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type,
3046 3036 uint_t rw)
3047 3037 {
3048 3038 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3049 3039 struct as *asp = dhp->dh_seg->s_as;
3050 3040 caddr_t addr;
3051 3041 ulong_t size;
3052 3042 ssize_t soff; /* offset from the beginning of the segment */
3053 3043 int rc;
3054 3044
3055 3045 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
3056 3046 "devmap_load:start dhp=%p offset=%llx len=%lx",
3057 3047 (void *)dhp, offset, len);
3058 3048
3059 3049 DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3060 3050 (void *)dhp, offset, len));
3061 3051
3062 3052 /*
3063 3053 * Hat layer only supports devload to process' context for which
3064 3054 * the as lock is held. Verify here and return error if drivers
3065 3055 * inadvertently call devmap_load on a wrong devmap handle.
3066 3056 */
3067 3057 if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock))
3068 3058 return (FC_MAKE_ERR(EINVAL));
3069 3059
3070 3060 soff = (ssize_t)(offset - dhp->dh_uoff);
3071 3061 soff = round_down_p2(soff, PAGESIZE);
3072 3062 if (soff < 0 || soff >= dhp->dh_len)
3073 3063 return (FC_MAKE_ERR(EINVAL));
3074 3064
3075 3065 /*
3076 3066 * Address and size must be page aligned. Len is set to the
3077 3067 * number of bytes in the number of pages that are required to
3078 3068 * support len. Offset is set to the byte offset of the first byte
3079 3069 * of the page that contains offset.
3080 3070 */
3081 3071 len = round_up_p2(len, PAGESIZE);
3082 3072
3083 3073 /*
3084 3074 * If len == 0, then calculate the size by getting
3085 3075 * the number of bytes from offset to the end of the segment.
3086 3076 */
3087 3077 if (len == 0)
3088 3078 size = dhp->dh_len - soff;
3089 3079 else {
3090 3080 size = len;
3091 3081 if ((soff + size) > dhp->dh_len)
3092 3082 return (FC_MAKE_ERR(EINVAL));
3093 3083 }
3094 3084
3095 3085 /*
3096 3086 * The address is offset bytes from the base address of
3097 3087 * the segment.
3098 3088 */
3099 3089 addr = (caddr_t)(soff + dhp->dh_uvaddr);
3100 3090
3101 3091 HOLD_DHP_LOCK(dhp);
3102 3092 rc = segdev_faultpages(asp->a_hat,
3103 3093 dhp->dh_seg, addr, size, type, rw, dhp);
3104 3094 RELE_DHP_LOCK(dhp);
3105 3095 return (rc);
3106 3096 }
3107 3097
3108 3098 int
3109 3099 devmap_setup(dev_t dev, offset_t off, struct as *as, caddr_t *addrp,
3110 3100 size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3111 3101 {
3112 3102 register devmap_handle_t *dhp;
3113 3103 int (*devmap)(dev_t, devmap_cookie_t, offset_t, size_t,
3114 3104 size_t *, uint_t);
3115 3105 int (*mmap)(dev_t, off_t, int);
3116 3106 struct devmap_callback_ctl *callbackops;
3117 3107 devmap_handle_t *dhp_head = NULL;
3118 3108 devmap_handle_t *dhp_prev = NULL;
3119 3109 devmap_handle_t *dhp_curr;
3120 3110 caddr_t addr;
3121 3111 int map_flag;
3122 3112 int ret;
3123 3113 ulong_t total_len;
3124 3114 size_t map_len;
3125 3115 size_t resid_len = len;
3126 3116 offset_t map_off = off;
3127 3117 struct devmap_softlock *slock = NULL;
3128 3118
3129 3119 #ifdef lint
3130 3120 cred = cred;
3131 3121 #endif
3132 3122
3133 3123 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SETUP,
3134 3124 "devmap_setup:start off=%llx len=%lx", off, len);
3135 3125 DEBUGF(3, (CE_CONT, "devmap_setup: off %llx len %lx\n",
3136 3126 off, len));
3137 3127
3138 3128 devmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_devmap;
3139 3129 mmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap;
3140 3130
3141 3131 /*
3142 3132 * driver must provide devmap(9E) entry point in cb_ops to use the
3143 3133 * devmap framework.
3144 3134 */
3145 3135 if (devmap == NULL || devmap == nulldev || devmap == nodev)
3146 3136 return (EINVAL);
3147 3137
3148 3138 /*
3149 3139 * To protect from an inadvertent entry because the devmap entry point
3150 3140 * is not NULL, return error if D_DEVMAP bit is not set in cb_flag and
3151 3141 * mmap is NULL.
3152 3142 */
3153 3143 map_flag = devopsp[getmajor(dev)]->devo_cb_ops->cb_flag;
3154 3144 if ((map_flag & D_DEVMAP) == 0 && (mmap == NULL || mmap == nulldev))
3155 3145 return (EINVAL);
3156 3146
3157 3147 /*
3158 3148 * devmap allows mmap(2) to map multiple registers.
3159 3149 * one devmap_handle is created for each register mapped.
3160 3150 */
3161 3151 for (total_len = 0; total_len < len; total_len += map_len) {
3162 3152 dhp = kmem_zalloc(sizeof (devmap_handle_t), KM_SLEEP);
3163 3153
3164 3154 if (dhp_prev != NULL)
3165 3155 dhp_prev->dh_next = dhp;
3166 3156 else
3167 3157 dhp_head = dhp;
3168 3158 dhp_prev = dhp;
3169 3159
3170 3160 dhp->dh_prot = prot;
3171 3161 dhp->dh_orig_maxprot = dhp->dh_maxprot = maxprot;
3172 3162 dhp->dh_dev = dev;
3173 3163 dhp->dh_timeout_length = CTX_TIMEOUT_VALUE;
3174 3164 dhp->dh_uoff = map_off;
3175 3165
3176 3166 /*
3177 3167 * Get mapping specific info from
3178 3168 * the driver, such as rnumber, roff, len, callbackops,
3179 3169 * accattrp and, if the mapping is for kernel memory,
3180 3170 * ddi_umem_cookie.
3181 3171 */
3182 3172 if ((ret = cdev_devmap(dev, dhp, map_off,
3183 3173 resid_len, &map_len, get_udatamodel())) != 0) {
3184 3174 free_devmap_handle(dhp_head);
3185 3175 return (ENXIO);
3186 3176 }
3187 3177
3188 3178 if (map_len & PAGEOFFSET) {
3189 3179 free_devmap_handle(dhp_head);
3190 3180 return (EINVAL);
3191 3181 }
3192 3182
3193 3183 callbackops = &dhp->dh_callbackops;
3194 3184
3195 3185 if ((callbackops->devmap_access == NULL) ||
3196 3186 (callbackops->devmap_access == nulldev) ||
3197 3187 (callbackops->devmap_access == nodev)) {
3198 3188 /*
3199 3189 * Normally devmap does not support MAP_PRIVATE unless
3200 3190 * the drivers provide a valid devmap_access routine.
3201 3191 */
3202 3192 if ((flags & MAP_PRIVATE) != 0) {
3203 3193 free_devmap_handle(dhp_head);
3204 3194 return (EINVAL);
3205 3195 }
3206 3196 } else {
3207 3197 /*
3208 3198 * Initialize dhp_softlock and dh_ctx if the drivers
3209 3199 * provide devmap_access.
3210 3200 */
3211 3201 dhp->dh_softlock = devmap_softlock_init(dev,
3212 3202 (ulong_t)callbackops->devmap_access);
3213 3203 dhp->dh_ctx = devmap_ctxinit(dev,
3214 3204 (ulong_t)callbackops->devmap_access);
3215 3205
3216 3206 /*
3217 3207 * segdev_fault can only work when all
3218 3208 * dh_softlock in a multi-dhp mapping
3219 3209 * are same. see comments in segdev_fault
3220 3210 * This code keeps track of the first
3221 3211 * dh_softlock allocated in slock and
3222 3212 * compares all later allocations and if
3223 3213 * not similar, returns an error.
3224 3214 */
3225 3215 if (slock == NULL)
3226 3216 slock = dhp->dh_softlock;
3227 3217 if (slock != dhp->dh_softlock) {
3228 3218 free_devmap_handle(dhp_head);
3229 3219 return (ENOTSUP);
3230 3220 }
3231 3221 }
3232 3222
3233 3223 map_off += map_len;
3234 3224 resid_len -= map_len;
3235 3225 }
3236 3226
3237 3227 /*
3238 3228 * get the user virtual address and establish the mapping between
3239 3229 * uvaddr and device physical address.
3240 3230 */
3241 3231 if ((ret = devmap_device(dhp_head, as, addrp, off, len, flags))
3242 3232 != 0) {
3243 3233 /*
3244 3234 * free devmap handles if error during the mapping.
3245 3235 */
3246 3236 free_devmap_handle(dhp_head);
3247 3237
3248 3238 return (ret);
3249 3239 }
3250 3240
3251 3241 /*
3252 3242 * call the driver's devmap_map callback to do more after the mapping,
3253 3243 * such as to allocate driver private data for context management.
3254 3244 */
3255 3245 dhp = dhp_head;
3256 3246 map_off = off;
3257 3247 addr = *addrp;
3258 3248 while (dhp != NULL) {
3259 3249 callbackops = &dhp->dh_callbackops;
3260 3250 dhp->dh_uvaddr = addr;
3261 3251 dhp_curr = dhp;
3262 3252 if (callbackops->devmap_map != NULL) {
3263 3253 ret = (*callbackops->devmap_map)((devmap_cookie_t)dhp,
3264 3254 dev, flags, map_off,
3265 3255 dhp->dh_len, &dhp->dh_pvtp);
3266 3256 if (ret != 0) {
3267 3257 struct segdev_data *sdp;
3268 3258
3269 3259 /*
3270 3260 * call driver's devmap_unmap entry point
3271 3261 * to free driver resources.
3272 3262 */
3273 3263 dhp = dhp_head;
3274 3264 map_off = off;
3275 3265 while (dhp != dhp_curr) {
3276 3266 callbackops = &dhp->dh_callbackops;
3277 3267 if (callbackops->devmap_unmap != NULL) {
3278 3268 (*callbackops->devmap_unmap)(
3279 3269 dhp, dhp->dh_pvtp,
3280 3270 map_off, dhp->dh_len,
3281 3271 NULL, NULL, NULL, NULL);
3282 3272 }
3283 3273 map_off += dhp->dh_len;
3284 3274 dhp = dhp->dh_next;
3285 3275 }
3286 3276 sdp = dhp_head->dh_seg->s_data;
3287 3277 sdp->devmap_data = NULL;
3288 3278 free_devmap_handle(dhp_head);
3289 3279 return (ENXIO);
3290 3280 }
3291 3281 }
3292 3282 map_off += dhp->dh_len;
3293 3283 addr += dhp->dh_len;
3294 3284 dhp = dhp->dh_next;
3295 3285 }
3296 3286
3297 3287 return (0);
3298 3288 }
3299 3289
3300 3290 int
3301 3291 ddi_devmap_segmap(dev_t dev, off_t off, ddi_as_handle_t as, caddr_t *addrp,
3302 3292 off_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3303 3293 {
3304 3294 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP,
3305 3295 "devmap_segmap:start");
3306 3296 return (devmap_setup(dev, (offset_t)off, (struct as *)as, addrp,
3307 3297 (size_t)len, prot, maxprot, flags, cred));
3308 3298 }
3309 3299
3310 3300 /*
3311 3301 * Called from devmap_devmem_setup/remap to see if can use large pages for
3312 3302 * this device mapping.
3313 3303 * Also calculate the max. page size for this mapping.
3314 3304 * this page size will be used in fault routine for
3315 3305 * optimal page size calculations.
3316 3306 */
3317 3307 static void
3318 3308 devmap_devmem_large_page_setup(devmap_handle_t *dhp)
3319 3309 {
3320 3310 ASSERT(dhp_is_devmem(dhp));
3321 3311 dhp->dh_mmulevel = 0;
3322 3312
3323 3313 /*
3324 3314 * use large page size only if:
3325 3315 * 1. device memory.
3326 3316 * 2. mmu supports multiple page sizes,
3327 3317 * 3. Driver did not disallow it
3328 3318 * 4. dhp length is at least as big as the large pagesize
3329 3319 * 5. the uvaddr and pfn are large pagesize aligned
3330 3320 */
3331 3321 if (page_num_pagesizes() > 1 &&
3332 3322 !(dhp->dh_flags & (DEVMAP_USE_PAGESIZE | DEVMAP_MAPPING_INVALID))) {
3333 3323 ulong_t base;
3334 3324 int level;
3335 3325
3336 3326 base = (ulong_t)ptob(dhp->dh_pfn);
3337 3327 for (level = 1; level < page_num_pagesizes(); level++) {
3338 3328 size_t pgsize = page_get_pagesize(level);
3339 3329 if ((dhp->dh_len < pgsize) ||
3340 3330 (!VA_PA_PGSIZE_ALIGNED((uintptr_t)dhp->dh_uvaddr,
3341 3331 base, pgsize))) {
3342 3332 break;
3343 3333 }
3344 3334 }
3345 3335 dhp->dh_mmulevel = level - 1;
3346 3336 }
3347 3337 if (dhp->dh_mmulevel > 0) {
3348 3338 dhp->dh_flags |= DEVMAP_FLAG_LARGE;
3349 3339 } else {
3350 3340 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3351 3341 }
3352 3342 }
3353 3343
3354 3344 /*
3355 3345 * Called by driver devmap routine to pass device specific info to
3356 3346 * the framework. used for device memory mapping only.
3357 3347 */
3358 3348 int
3359 3349 devmap_devmem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3360 3350 struct devmap_callback_ctl *callbackops, uint_t rnumber, offset_t roff,
3361 3351 size_t len, uint_t maxprot, uint_t flags, ddi_device_acc_attr_t *accattrp)
3362 3352 {
3363 3353 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3364 3354 ddi_acc_handle_t handle;
3365 3355 ddi_map_req_t mr;
3366 3356 ddi_acc_hdl_t *hp;
3367 3357 int err;
3368 3358
3369 3359 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_SETUP,
3370 3360 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3371 3361 (void *)dhp, roff, rnumber, (uint_t)len);
3372 3362 DEBUGF(2, (CE_CONT, "devmap_devmem_setup: dhp %p offset %llx "
3373 3363 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3374 3364
3375 3365 /*
3376 3366 * First to check if this function has been called for this dhp.
3377 3367 */
3378 3368 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3379 3369 return (DDI_FAILURE);
3380 3370
3381 3371 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3382 3372 return (DDI_FAILURE);
3383 3373
3384 3374 if (flags & DEVMAP_MAPPING_INVALID) {
3385 3375 /*
3386 3376 * Don't go up the tree to get pfn if the driver specifies
3387 3377 * DEVMAP_MAPPING_INVALID in flags.
3388 3378 *
3389 3379 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3390 3380 * remap permission.
3391 3381 */
3392 3382 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3393 3383 return (DDI_FAILURE);
3394 3384 }
3395 3385 dhp->dh_pfn = PFN_INVALID;
3396 3386 } else {
3397 3387 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3398 3388 if (handle == NULL)
3399 3389 return (DDI_FAILURE);
3400 3390
3401 3391 hp = impl_acc_hdl_get(handle);
3402 3392 hp->ah_vers = VERS_ACCHDL;
3403 3393 hp->ah_dip = dip;
3404 3394 hp->ah_rnumber = rnumber;
3405 3395 hp->ah_offset = roff;
3406 3396 hp->ah_len = len;
3407 3397 if (accattrp != NULL)
3408 3398 hp->ah_acc = *accattrp;
3409 3399
3410 3400 mr.map_op = DDI_MO_MAP_LOCKED;
3411 3401 mr.map_type = DDI_MT_RNUMBER;
3412 3402 mr.map_obj.rnumber = rnumber;
3413 3403 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3414 3404 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3415 3405 mr.map_handlep = hp;
3416 3406 mr.map_vers = DDI_MAP_VERSION;
3417 3407
3418 3408 /*
3419 3409 * up the device tree to get pfn.
3420 3410 * The rootnex_map_regspec() routine in nexus drivers has been
3421 3411 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3422 3412 */
3423 3413 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&dhp->dh_pfn);
3424 3414 dhp->dh_hat_attr = hp->ah_hat_flags;
3425 3415 impl_acc_hdl_free(handle);
3426 3416
3427 3417 if (err)
3428 3418 return (DDI_FAILURE);
3429 3419 }
3430 3420 /* Should not be using devmem setup for memory pages */
3431 3421 ASSERT(!pf_is_memory(dhp->dh_pfn));
3432 3422
3433 3423 /* Only some of the flags bits are settable by the driver */
3434 3424 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3435 3425 dhp->dh_len = ptob(btopr(len));
3436 3426
3437 3427 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3438 3428 dhp->dh_roff = ptob(btop(roff));
3439 3429
3440 3430 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3441 3431 devmap_devmem_large_page_setup(dhp);
3442 3432 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3443 3433 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3444 3434
3445 3435
3446 3436 if (callbackops != NULL) {
3447 3437 bcopy(callbackops, &dhp->dh_callbackops,
3448 3438 sizeof (struct devmap_callback_ctl));
3449 3439 }
3450 3440
3451 3441 /*
3452 3442 * Initialize dh_lock if we want to do remap.
3453 3443 */
3454 3444 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3455 3445 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3456 3446 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3457 3447 }
3458 3448
3459 3449 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3460 3450
3461 3451 return (DDI_SUCCESS);
3462 3452 }
3463 3453
3464 3454 int
3465 3455 devmap_devmem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3466 3456 uint_t rnumber, offset_t roff, size_t len, uint_t maxprot,
3467 3457 uint_t flags, ddi_device_acc_attr_t *accattrp)
3468 3458 {
3469 3459 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3470 3460 ddi_acc_handle_t handle;
3471 3461 ddi_map_req_t mr;
3472 3462 ddi_acc_hdl_t *hp;
3473 3463 pfn_t pfn;
3474 3464 uint_t hat_flags;
3475 3465 int err;
3476 3466
3477 3467 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_REMAP,
3478 3468 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3479 3469 (void *)dhp, roff, rnumber, (uint_t)len);
3480 3470 DEBUGF(2, (CE_CONT, "devmap_devmem_remap: dhp %p offset %llx "
3481 3471 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3482 3472
3483 3473 /*
3484 3474 * Return failure if setup has not been done or no remap permission
3485 3475 * has been granted during the setup.
3486 3476 */
3487 3477 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3488 3478 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3489 3479 return (DDI_FAILURE);
3490 3480
3491 3481 /* Only DEVMAP_MAPPING_INVALID flag supported for remap */
3492 3482 if ((flags != 0) && (flags != DEVMAP_MAPPING_INVALID))
3493 3483 return (DDI_FAILURE);
3494 3484
3495 3485 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3496 3486 return (DDI_FAILURE);
3497 3487
3498 3488 if (!(flags & DEVMAP_MAPPING_INVALID)) {
3499 3489 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3500 3490 if (handle == NULL)
3501 3491 return (DDI_FAILURE);
3502 3492 }
3503 3493
3504 3494 HOLD_DHP_LOCK(dhp);
3505 3495
3506 3496 /*
3507 3497 * Unload the old mapping, so next fault will setup the new mappings
3508 3498 * Do this while holding the dhp lock so other faults dont reestablish
3509 3499 * the mappings
3510 3500 */
3511 3501 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3512 3502 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3513 3503
3514 3504 if (flags & DEVMAP_MAPPING_INVALID) {
3515 3505 dhp->dh_flags |= DEVMAP_MAPPING_INVALID;
3516 3506 dhp->dh_pfn = PFN_INVALID;
3517 3507 } else {
3518 3508 /* clear any prior DEVMAP_MAPPING_INVALID flag */
3519 3509 dhp->dh_flags &= ~DEVMAP_MAPPING_INVALID;
3520 3510 hp = impl_acc_hdl_get(handle);
3521 3511 hp->ah_vers = VERS_ACCHDL;
3522 3512 hp->ah_dip = dip;
3523 3513 hp->ah_rnumber = rnumber;
3524 3514 hp->ah_offset = roff;
3525 3515 hp->ah_len = len;
3526 3516 if (accattrp != NULL)
3527 3517 hp->ah_acc = *accattrp;
3528 3518
3529 3519 mr.map_op = DDI_MO_MAP_LOCKED;
3530 3520 mr.map_type = DDI_MT_RNUMBER;
3531 3521 mr.map_obj.rnumber = rnumber;
3532 3522 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3533 3523 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3534 3524 mr.map_handlep = hp;
3535 3525 mr.map_vers = DDI_MAP_VERSION;
3536 3526
3537 3527 /*
3538 3528 * up the device tree to get pfn.
3539 3529 * The rootnex_map_regspec() routine in nexus drivers has been
3540 3530 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3541 3531 */
3542 3532 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&pfn);
3543 3533 hat_flags = hp->ah_hat_flags;
3544 3534 impl_acc_hdl_free(handle);
3545 3535 if (err) {
3546 3536 RELE_DHP_LOCK(dhp);
3547 3537 return (DDI_FAILURE);
3548 3538 }
3549 3539 /*
3550 3540 * Store result of ddi_map first in local variables, as we do
3551 3541 * not want to overwrite the existing dhp with wrong data.
3552 3542 */
3553 3543 dhp->dh_pfn = pfn;
3554 3544 dhp->dh_hat_attr = hat_flags;
3555 3545 }
3556 3546
3557 3547 /* clear the large page size flag */
3558 3548 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3559 3549
3560 3550 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3561 3551 dhp->dh_roff = ptob(btop(roff));
3562 3552
3563 3553 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3564 3554 devmap_devmem_large_page_setup(dhp);
3565 3555 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3566 3556 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3567 3557
3568 3558 RELE_DHP_LOCK(dhp);
3569 3559 return (DDI_SUCCESS);
3570 3560 }
3571 3561
3572 3562 /*
3573 3563 * called by driver devmap routine to pass kernel virtual address mapping
3574 3564 * info to the framework. used only for kernel memory
3575 3565 * allocated from ddi_umem_alloc().
3576 3566 */
3577 3567 int
3578 3568 devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3579 3569 struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie,
3580 3570 offset_t off, size_t len, uint_t maxprot, uint_t flags,
3581 3571 ddi_device_acc_attr_t *accattrp)
3582 3572 {
3583 3573 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3584 3574 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3585 3575
3586 3576 #ifdef lint
3587 3577 dip = dip;
3588 3578 #endif
3589 3579
3590 3580 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_SETUP,
3591 3581 "devmap_umem_setup:start dhp=%p offset=%llx cookie=%p len=%lx",
3592 3582 (void *)dhp, off, cookie, len);
3593 3583 DEBUGF(2, (CE_CONT, "devmap_umem_setup: dhp %p offset %llx "
3594 3584 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3595 3585
3596 3586 if (cookie == NULL)
3597 3587 return (DDI_FAILURE);
3598 3588
3599 3589 /* For UMEM_TRASH, this restriction is not needed */
3600 3590 if ((off + len) > cp->size)
3601 3591 return (DDI_FAILURE);
3602 3592
3603 3593 /* check if the cache attributes are supported */
3604 3594 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3605 3595 return (DDI_FAILURE);
3606 3596
3607 3597 /*
3608 3598 * First to check if this function has been called for this dhp.
3609 3599 */
3610 3600 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3611 3601 return (DDI_FAILURE);
3612 3602
3613 3603 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3614 3604 return (DDI_FAILURE);
3615 3605
3616 3606 if (flags & DEVMAP_MAPPING_INVALID) {
3617 3607 /*
3618 3608 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3619 3609 * remap permission.
3620 3610 */
3621 3611 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3622 3612 return (DDI_FAILURE);
3623 3613 }
3624 3614 } else {
3625 3615 dhp->dh_cookie = cookie;
3626 3616 dhp->dh_roff = ptob(btop(off));
3627 3617 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3628 3618 /* set HAT cache attributes */
3629 3619 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3630 3620 /* set HAT endianess attributes */
3631 3621 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3632 3622 }
3633 3623
3634 3624 /*
3635 3625 * The default is _not_ to pass HAT_LOAD_NOCONSIST to hat_devload();
3636 3626 * we pass HAT_LOAD_NOCONSIST _only_ in cases where hat tries to
3637 3627 * create consistent mappings but our intention was to create
3638 3628 * non-consistent mappings.
3639 3629 *
3640 3630 * DEVMEM: hat figures it out it's DEVMEM and creates non-consistent
3641 3631 * mappings.
3642 3632 *
3643 3633 * kernel exported memory: hat figures it out it's memory and always
3644 3634 * creates consistent mappings.
3645 3635 *
3646 3636 * /dev/mem: non-consistent mappings. See comments in common/io/mem.c
3647 3637 *
3648 3638 * /dev/kmem: consistent mappings are created unless they are
3649 3639 * MAP_FIXED. We _explicitly_ tell hat to create non-consistent
3650 3640 * mappings by passing HAT_LOAD_NOCONSIST in case of MAP_FIXED
3651 3641 * mappings of /dev/kmem. See common/io/mem.c
3652 3642 */
3653 3643
3654 3644 /* Only some of the flags bits are settable by the driver */
3655 3645 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3656 3646
3657 3647 dhp->dh_len = ptob(btopr(len));
3658 3648 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3659 3649 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3660 3650
3661 3651 if (callbackops != NULL) {
3662 3652 bcopy(callbackops, &dhp->dh_callbackops,
3663 3653 sizeof (struct devmap_callback_ctl));
3664 3654 }
3665 3655 /*
3666 3656 * Initialize dh_lock if we want to do remap.
3667 3657 */
3668 3658 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3669 3659 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3670 3660 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3671 3661 }
3672 3662
3673 3663 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3674 3664
3675 3665 return (DDI_SUCCESS);
3676 3666 }
3677 3667
3678 3668 int
3679 3669 devmap_umem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3680 3670 ddi_umem_cookie_t cookie, offset_t off, size_t len, uint_t maxprot,
3681 3671 uint_t flags, ddi_device_acc_attr_t *accattrp)
3682 3672 {
3683 3673 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3684 3674 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3685 3675
3686 3676 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_REMAP,
3687 3677 "devmap_umem_remap:start dhp=%p offset=%llx cookie=%p len=%lx",
3688 3678 (void *)dhp, off, cookie, len);
3689 3679 DEBUGF(2, (CE_CONT, "devmap_umem_remap: dhp %p offset %llx "
3690 3680 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3691 3681
3692 3682 #ifdef lint
3693 3683 dip = dip;
3694 3684 accattrp = accattrp;
3695 3685 #endif
3696 3686 /*
3697 3687 * Reture failure if setup has not been done or no remap permission
3698 3688 * has been granted during the setup.
3699 3689 */
3700 3690 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3701 3691 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3702 3692 return (DDI_FAILURE);
3703 3693
3704 3694 /* No flags supported for remap yet */
3705 3695 if (flags != 0)
3706 3696 return (DDI_FAILURE);
3707 3697
3708 3698 /* check if the cache attributes are supported */
3709 3699 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3710 3700 return (DDI_FAILURE);
3711 3701
3712 3702 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3713 3703 return (DDI_FAILURE);
3714 3704
3715 3705 /* For UMEM_TRASH, this restriction is not needed */
3716 3706 if ((off + len) > cp->size)
3717 3707 return (DDI_FAILURE);
3718 3708
3719 3709 HOLD_DHP_LOCK(dhp);
3720 3710 /*
3721 3711 * Unload the old mapping, so next fault will setup the new mappings
3722 3712 * Do this while holding the dhp lock so other faults dont reestablish
3723 3713 * the mappings
3724 3714 */
3725 3715 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3726 3716 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3727 3717
3728 3718 dhp->dh_cookie = cookie;
3729 3719 dhp->dh_roff = ptob(btop(off));
3730 3720 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3731 3721 /* set HAT cache attributes */
3732 3722 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3733 3723 /* set HAT endianess attributes */
3734 3724 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3735 3725
3736 3726 /* clear the large page size flag */
3737 3727 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3738 3728
3739 3729 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3740 3730 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3741 3731 RELE_DHP_LOCK(dhp);
3742 3732 return (DDI_SUCCESS);
3743 3733 }
3744 3734
3745 3735 /*
3746 3736 * to set timeout value for the driver's context management callback, e.g.
3747 3737 * devmap_access().
3748 3738 */
3749 3739 void
3750 3740 devmap_set_ctx_timeout(devmap_cookie_t dhc, clock_t ticks)
3751 3741 {
3752 3742 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3753 3743
3754 3744 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SET_CTX_TIMEOUT,
3755 3745 "devmap_set_ctx_timeout:start dhp=%p ticks=%x",
3756 3746 (void *)dhp, ticks);
3757 3747 dhp->dh_timeout_length = ticks;
3758 3748 }
3759 3749
3760 3750 int
3761 3751 devmap_default_access(devmap_cookie_t dhp, void *pvtp, offset_t off,
3762 3752 size_t len, uint_t type, uint_t rw)
3763 3753 {
3764 3754 #ifdef lint
3765 3755 pvtp = pvtp;
3766 3756 #endif
3767 3757
3768 3758 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DEFAULT_ACCESS,
3769 3759 "devmap_default_access:start");
3770 3760 return (devmap_load(dhp, off, len, type, rw));
3771 3761 }
3772 3762
3773 3763 /*
3774 3764 * segkmem_alloc() wrapper to allocate memory which is both
3775 3765 * non-relocatable (for DR) and sharelocked, since the rest
3776 3766 * of this segment driver requires it.
3777 3767 */
3778 3768 static void *
3779 3769 devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag)
3780 3770 {
3781 3771 ASSERT(vmp != NULL);
3782 3772 ASSERT(kvseg.s_base != NULL);
3783 3773 vmflag |= (VM_NORELOC | SEGKMEM_SHARELOCKED);
3784 3774 return (segkmem_alloc(vmp, size, vmflag));
3785 3775 }
3786 3776
3787 3777 /*
3788 3778 * This is where things are a bit incestuous with seg_kmem: unlike
3789 3779 * seg_kp, seg_kmem does not keep its pages long-term sharelocked, so
3790 3780 * we need to do a bit of a dance around that to prevent duplication of
3791 3781 * code until we decide to bite the bullet and implement a new kernel
3792 3782 * segment for driver-allocated memory that is exported to user space.
3793 3783 */
3794 3784 static void
3795 3785 devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size)
3796 3786 {
3797 3787 page_t *pp;
3798 3788 caddr_t addr = inaddr;
3799 3789 caddr_t eaddr;
3800 3790 pgcnt_t npages = btopr(size);
3801 3791
3802 3792 ASSERT(vmp != NULL);
3803 3793 ASSERT(kvseg.s_base != NULL);
3804 3794 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
3805 3795
3806 3796 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
3807 3797
3808 3798 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
3809 3799 /*
3810 3800 * Use page_find() instead of page_lookup() to find the page
3811 3801 * since we know that it is hashed and has a shared lock.
3812 3802 */
3813 3803 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
3814 3804
3815 3805 if (pp == NULL)
3816 3806 panic("devmap_free_pages: page not found");
3817 3807 if (!page_tryupgrade(pp)) {
3818 3808 page_unlock(pp);
3819 3809 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr,
3820 3810 SE_EXCL);
3821 3811 if (pp == NULL)
3822 3812 panic("devmap_free_pages: page already freed");
3823 3813 }
3824 3814 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
3825 3815 pp->p_lckcnt = 0;
3826 3816 page_destroy(pp, 0);
3827 3817 }
3828 3818 page_unresv(npages);
3829 3819
3830 3820 if (vmp != NULL)
3831 3821 vmem_free(vmp, inaddr, size);
3832 3822 }
3833 3823
3834 3824 /*
3835 3825 * devmap_umem_alloc_np() replaces kmem_zalloc() as the method for
3836 3826 * allocating non-pageable kmem in response to a ddi_umem_alloc()
3837 3827 * default request. For now we allocate our own pages and we keep
3838 3828 * them long-term sharelocked, since: A) the fault routines expect the
3839 3829 * memory to already be locked; B) pageable umem is already long-term
3840 3830 * locked; C) it's a lot of work to make it otherwise, particularly
3841 3831 * since the nexus layer expects the pages to never fault. An RFE is to
3842 3832 * not keep the pages long-term locked, but instead to be able to
3843 3833 * take faults on them and simply look them up in kvp in case we
3844 3834 * fault on them. Even then, we must take care not to let pageout
3845 3835 * steal them from us since the data must remain resident; if we
3846 3836 * do this we must come up with some way to pin the pages to prevent
3847 3837 * faults while a driver is doing DMA to/from them.
3848 3838 */
3849 3839 static void *
3850 3840 devmap_umem_alloc_np(size_t size, size_t flags)
3851 3841 {
3852 3842 void *buf;
3853 3843 int vmflags = (flags & DDI_UMEM_NOSLEEP)? VM_NOSLEEP : VM_SLEEP;
3854 3844
3855 3845 buf = vmem_alloc(umem_np_arena, size, vmflags);
3856 3846 if (buf != NULL)
3857 3847 bzero(buf, size);
3858 3848 return (buf);
3859 3849 }
3860 3850
3861 3851 static void
3862 3852 devmap_umem_free_np(void *addr, size_t size)
3863 3853 {
3864 3854 vmem_free(umem_np_arena, addr, size);
3865 3855 }
3866 3856
3867 3857 /*
3868 3858 * allocate page aligned kernel memory for exporting to user land.
3869 3859 * The devmap framework will use the cookie allocated by ddi_umem_alloc()
3870 3860 * to find a user virtual address that is in same color as the address
3871 3861 * allocated here.
3872 3862 */
3873 3863 void *
3874 3864 ddi_umem_alloc(size_t size, int flags, ddi_umem_cookie_t *cookie)
3875 3865 {
3876 3866 register size_t len = ptob(btopr(size));
3877 3867 void *buf = NULL;
3878 3868 struct ddi_umem_cookie *cp;
3879 3869 int iflags = 0;
3880 3870
3881 3871 *cookie = NULL;
3882 3872
3883 3873 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_ALLOC,
3884 3874 "devmap_umem_alloc:start");
3885 3875 if (len == 0)
3886 3876 return ((void *)NULL);
3887 3877
3888 3878 /*
3889 3879 * allocate cookie
3890 3880 */
3891 3881 if ((cp = kmem_zalloc(sizeof (struct ddi_umem_cookie),
3892 3882 flags & DDI_UMEM_NOSLEEP ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
3893 3883 ASSERT(flags & DDI_UMEM_NOSLEEP);
3894 3884 return ((void *)NULL);
3895 3885 }
3896 3886
3897 3887 if (flags & DDI_UMEM_PAGEABLE) {
3898 3888 /* Only one of the flags is allowed */
3899 3889 ASSERT(!(flags & DDI_UMEM_TRASH));
3900 3890 /* initialize resource with 0 */
3901 3891 iflags = KPD_ZERO;
3902 3892
3903 3893 /*
3904 3894 * to allocate unlocked pageable memory, use segkp_get() to
3905 3895 * create a segkp segment. Since segkp can only service kas,
3906 3896 * other segment drivers such as segdev have to do
3907 3897 * as_fault(segkp, SOFTLOCK) in its fault routine,
3908 3898 */
3909 3899 if (flags & DDI_UMEM_NOSLEEP)
3910 3900 iflags |= KPD_NOWAIT;
3911 3901
3912 3902 if ((buf = segkp_get(segkp, len, iflags)) == NULL) {
3913 3903 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3914 3904 return ((void *)NULL);
3915 3905 }
3916 3906 cp->type = KMEM_PAGEABLE;
3917 3907 mutex_init(&cp->lock, NULL, MUTEX_DEFAULT, NULL);
3918 3908 cp->locked = 0;
3919 3909 } else if (flags & DDI_UMEM_TRASH) {
3920 3910 /* Only one of the flags is allowed */
3921 3911 ASSERT(!(flags & DDI_UMEM_PAGEABLE));
3922 3912 cp->type = UMEM_TRASH;
3923 3913 buf = NULL;
3924 3914 } else {
3925 3915 if ((buf = devmap_umem_alloc_np(len, flags)) == NULL) {
3926 3916 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3927 3917 return ((void *)NULL);
3928 3918 }
3929 3919
3930 3920 cp->type = KMEM_NON_PAGEABLE;
3931 3921 }
3932 3922
3933 3923 /*
3934 3924 * need to save size here. size will be used when
3935 3925 * we do kmem_free.
3936 3926 */
3937 3927 cp->size = len;
3938 3928 cp->cvaddr = (caddr_t)buf;
3939 3929
3940 3930 *cookie = (void *)cp;
3941 3931 return (buf);
3942 3932 }
3943 3933
3944 3934 void
3945 3935 ddi_umem_free(ddi_umem_cookie_t cookie)
3946 3936 {
3947 3937 struct ddi_umem_cookie *cp;
3948 3938
3949 3939 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_FREE,
3950 3940 "devmap_umem_free:start");
3951 3941
3952 3942 /*
3953 3943 * if cookie is NULL, no effects on the system
3954 3944 */
3955 3945 if (cookie == NULL)
3956 3946 return;
3957 3947
3958 3948 cp = (struct ddi_umem_cookie *)cookie;
3959 3949
3960 3950 switch (cp->type) {
3961 3951 case KMEM_PAGEABLE :
3962 3952 ASSERT(cp->cvaddr != NULL && cp->size != 0);
3963 3953 /*
3964 3954 * Check if there are still any pending faults on the cookie
3965 3955 * while the driver is deleting it,
3966 3956 * XXX - could change to an ASSERT but wont catch errant drivers
3967 3957 */
3968 3958 mutex_enter(&cp->lock);
3969 3959 if (cp->locked) {
3970 3960 mutex_exit(&cp->lock);
3971 3961 panic("ddi_umem_free for cookie with pending faults %p",
3972 3962 (void *)cp);
3973 3963 return;
3974 3964 }
3975 3965
3976 3966 segkp_release(segkp, cp->cvaddr);
3977 3967
3978 3968 /*
3979 3969 * release mutex associated with this cookie.
3980 3970 */
3981 3971 mutex_destroy(&cp->lock);
3982 3972 break;
3983 3973 case KMEM_NON_PAGEABLE :
3984 3974 ASSERT(cp->cvaddr != NULL && cp->size != 0);
3985 3975 devmap_umem_free_np(cp->cvaddr, cp->size);
3986 3976 break;
3987 3977 case UMEM_TRASH :
3988 3978 break;
3989 3979 case UMEM_LOCKED :
3990 3980 /* Callers should use ddi_umem_unlock for this type */
3991 3981 ddi_umem_unlock(cookie);
3992 3982 /* Frees the cookie too */
3993 3983 return;
3994 3984 default:
3995 3985 /* panic so we can diagnose the underlying cause */
3996 3986 panic("ddi_umem_free: illegal cookie type 0x%x\n",
3997 3987 cp->type);
3998 3988 }
3999 3989
4000 3990 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4001 3991 }
4002 3992
4003 3993
4004 3994 static int
4005 3995 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4006 3996 {
4007 3997 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4008 3998
4009 3999 /*
4010 4000 * It looks as if it is always mapped shared
4011 4001 */
4012 4002 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4013 4003 "segdev_getmemid:start");
4014 4004 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4015 4005 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4016 4006 return (0);
4017 4007 }
4018 4008
4019 4009 /*
4020 4010 * ddi_umem_alloc() non-pageable quantum cache max size.
4021 4011 * This is just a SWAG.
4022 4012 */
4023 4013 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4024 4014
4025 4015 /*
4026 4016 * Initialize seg_dev from boot. This routine sets up the trash page
4027 4017 * and creates the umem_np_arena used to back non-pageable memory
4028 4018 * requests.
4029 4019 */
4030 4020 void
4031 4021 segdev_init(void)
4032 4022 {
4033 4023 struct seg kseg;
4034 4024
4035 4025 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
4036 4026 devmap_alloc_pages, devmap_free_pages, heap_arena,
4037 4027 DEVMAP_UMEM_QUANTUM, VM_SLEEP);
4038 4028
4039 4029 kseg.s_as = &kas;
4040 4030 trashpp = page_create_va(&trashvp, 0, PAGESIZE,
4041 4031 PG_NORELOC | PG_EXCL | PG_WAIT, &kseg, NULL);
4042 4032 if (trashpp == NULL)
4043 4033 panic("segdev_init: failed to create trash page");
4044 4034 pagezero(trashpp, 0, PAGESIZE);
4045 4035 page_downgrade(trashpp);
4046 4036 }
4047 4037
4048 4038 /*
4049 4039 * Invoke platform-dependent support routines so that /proc can have
4050 4040 * the platform code deal with curious hardware.
4051 4041 */
4052 4042 int
4053 4043 segdev_copyfrom(struct seg *seg,
4054 4044 caddr_t uaddr, const void *devaddr, void *kaddr, size_t len)
4055 4045 {
4056 4046 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4057 4047 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4058 4048
4059 4049 return (e_ddi_copyfromdev(sp->s_dip,
4060 4050 (off_t)(uaddr - seg->s_base), devaddr, kaddr, len));
4061 4051 }
4062 4052
4063 4053 int
4064 4054 segdev_copyto(struct seg *seg,
4065 4055 caddr_t uaddr, const void *kaddr, void *devaddr, size_t len)
4066 4056 {
4067 4057 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4068 4058 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4069 4059
4070 4060 return (e_ddi_copytodev(sp->s_dip,
4071 4061 (off_t)(uaddr - seg->s_base), kaddr, devaddr, len));
4072 4062 }
↓ open down ↓ |
1681 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX