Print this page
6144 use C99 initializers in segment ops structures
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_dev.c
+++ new/usr/src/uts/common/vm/seg_dev.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - segment of a mapped device.
42 42 *
43 43 * This segment driver is used when mapping character special devices.
44 44 */
45 45
46 46 #include <sys/types.h>
47 47 #include <sys/t_lock.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/vtrace.h>
50 50 #include <sys/systm.h>
51 51 #include <sys/vmsystm.h>
52 52 #include <sys/mman.h>
53 53 #include <sys/errno.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/vnode.h>
57 57 #include <sys/proc.h>
58 58 #include <sys/conf.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/ddidevmap.h>
61 61 #include <sys/ddi_implfuncs.h>
62 62 #include <sys/lgrp.h>
63 63
64 64 #include <vm/page.h>
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_dev.h>
69 69 #include <vm/seg_kp.h>
70 70 #include <vm/seg_kmem.h>
71 71 #include <vm/vpage.h>
72 72
73 73 #include <sys/sunddi.h>
74 74 #include <sys/esunddi.h>
75 75 #include <sys/fs/snode.h>
76 76
77 77
78 78 #if DEBUG
79 79 int segdev_debug;
80 80 #define DEBUGF(level, args) { if (segdev_debug >= (level)) cmn_err args; }
81 81 #else
82 82 #define DEBUGF(level, args)
83 83 #endif
84 84
85 85 /* Default timeout for devmap context management */
86 86 #define CTX_TIMEOUT_VALUE 0
87 87
88 88 #define HOLD_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
89 89 { mutex_enter(&dhp->dh_lock); }
90 90
91 91 #define RELE_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
92 92 { mutex_exit(&dhp->dh_lock); }
93 93
94 94 #define round_down_p2(a, s) ((a) & ~((s) - 1))
95 95 #define round_up_p2(a, s) (((a) + (s) - 1) & ~((s) - 1))
96 96
97 97 /*
98 98 * VA_PA_ALIGNED checks to see if both VA and PA are on pgsize boundary
99 99 * VA_PA_PGSIZE_ALIGNED check to see if VA is aligned with PA w.r.t. pgsize
100 100 */
101 101 #define VA_PA_ALIGNED(uvaddr, paddr, pgsize) \
102 102 (((uvaddr | paddr) & (pgsize - 1)) == 0)
103 103 #define VA_PA_PGSIZE_ALIGNED(uvaddr, paddr, pgsize) \
104 104 (((uvaddr ^ paddr) & (pgsize - 1)) == 0)
105 105
106 106 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
107 107
108 108 #define VTOCVP(vp) (VTOS(vp)->s_commonvp) /* we "know" it's an snode */
109 109
110 110 static struct devmap_ctx *devmapctx_list = NULL;
111 111 static struct devmap_softlock *devmap_slist = NULL;
112 112
113 113 /*
114 114 * mutex, vnode and page for the page of zeros we use for the trash mappings.
115 115 * One trash page is allocated on the first ddi_umem_setup call that uses it
116 116 * XXX Eventually, we may want to combine this with what segnf does when all
117 117 * hat layers implement HAT_NOFAULT.
118 118 *
119 119 * The trash page is used when the backing store for a userland mapping is
120 120 * removed but the application semantics do not take kindly to a SIGBUS.
121 121 * In that scenario, the applications pages are mapped to some dummy page
122 122 * which returns garbage on read and writes go into a common place.
123 123 * (Perfect for NO_FAULT semantics)
124 124 * The device driver is responsible to communicating to the app with some
125 125 * other mechanism that such remapping has happened and the app should take
126 126 * corrective action.
127 127 * We can also use an anonymous memory page as there is no requirement to
128 128 * keep the page locked, however this complicates the fault code. RFE.
129 129 */
130 130 static struct vnode trashvp;
131 131 static struct page *trashpp;
132 132
133 133 /* Non-pageable kernel memory is allocated from the umem_np_arena. */
134 134 static vmem_t *umem_np_arena;
135 135
136 136 /* Set the cookie to a value we know will never be a valid umem_cookie */
137 137 #define DEVMAP_DEVMEM_COOKIE ((ddi_umem_cookie_t)0x1)
138 138
139 139 /*
140 140 * Macros to check if type of devmap handle
141 141 */
142 142 #define cookie_is_devmem(c) \
143 143 ((c) == (struct ddi_umem_cookie *)DEVMAP_DEVMEM_COOKIE)
144 144
145 145 #define cookie_is_pmem(c) \
146 146 ((c) == (struct ddi_umem_cookie *)DEVMAP_PMEM_COOKIE)
147 147
148 148 #define cookie_is_kpmem(c) (!cookie_is_devmem(c) && !cookie_is_pmem(c) &&\
149 149 ((c)->type == KMEM_PAGEABLE))
150 150
151 151 #define dhp_is_devmem(dhp) \
152 152 (cookie_is_devmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
153 153
154 154 #define dhp_is_pmem(dhp) \
155 155 (cookie_is_pmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
156 156
157 157 #define dhp_is_kpmem(dhp) \
158 158 (cookie_is_kpmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
159 159
160 160 /*
161 161 * Private seg op routines.
162 162 */
163 163 static int segdev_dup(struct seg *, struct seg *);
164 164 static int segdev_unmap(struct seg *, caddr_t, size_t);
165 165 static void segdev_free(struct seg *);
166 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 167 enum fault_type, enum seg_rw);
168 168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
171 171 static void segdev_badop(void);
172 172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 175 ulong_t *, size_t);
176 176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 178 static int segdev_gettype(struct seg *, caddr_t);
179 179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 181 static void segdev_dump(struct seg *);
182 182 static int segdev_pagelock(struct seg *, caddr_t, size_t,
183 183 struct page ***, enum lock_type, enum seg_rw);
184 184 static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
↓ open down ↓ |
184 lines elided |
↑ open up ↑ |
185 185 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
186 186 static lgrp_mem_policy_info_t *segdev_getpolicy(struct seg *, caddr_t);
187 187 static int segdev_capable(struct seg *, segcapability_t);
188 188
189 189 /*
190 190 * XXX this struct is used by rootnex_map_fault to identify
191 191 * the segment it has been passed. So if you make it
192 192 * "static" you'll need to fix rootnex_map_fault.
193 193 */
194 194 struct seg_ops segdev_ops = {
195 - segdev_dup,
196 - segdev_unmap,
197 - segdev_free,
198 - segdev_fault,
199 - segdev_faulta,
200 - segdev_setprot,
201 - segdev_checkprot,
202 - (int (*)())segdev_badop, /* kluster */
203 - (size_t (*)(struct seg *))NULL, /* swapout */
204 - segdev_sync, /* sync */
205 - segdev_incore,
206 - segdev_lockop, /* lockop */
207 - segdev_getprot,
208 - segdev_getoffset,
209 - segdev_gettype,
210 - segdev_getvp,
211 - segdev_advise,
212 - segdev_dump,
213 - segdev_pagelock,
214 - segdev_setpagesize,
215 - segdev_getmemid,
216 - segdev_getpolicy,
217 - segdev_capable,
218 - seg_inherit_notsup
195 + .dup = segdev_dup,
196 + .unmap = segdev_unmap,
197 + .free = segdev_free,
198 + .fault = segdev_fault,
199 + .faulta = segdev_faulta,
200 + .setprot = segdev_setprot,
201 + .checkprot = segdev_checkprot,
202 + .kluster = (int (*)())segdev_badop,
203 + .sync = segdev_sync,
204 + .incore = segdev_incore,
205 + .lockop = segdev_lockop,
206 + .getprot = segdev_getprot,
207 + .getoffset = segdev_getoffset,
208 + .gettype = segdev_gettype,
209 + .getvp = segdev_getvp,
210 + .advise = segdev_advise,
211 + .dump = segdev_dump,
212 + .pagelock = segdev_pagelock,
213 + .setpagesize = segdev_setpagesize,
214 + .getmemid = segdev_getmemid,
215 + .getpolicy = segdev_getpolicy,
216 + .capable = segdev_capable,
217 + .inherit = seg_inherit_notsup,
219 218 };
220 219
221 220 /*
222 221 * Private segdev support routines
223 222 */
224 223 static struct segdev_data *sdp_alloc(void);
225 224
226 225 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
227 226 size_t, enum seg_rw);
228 227
229 228 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
230 229 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
231 230
232 231 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
233 232 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
234 233
235 234 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
236 235 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
237 236 static void devmap_softlock_rele(devmap_handle_t *);
238 237 static void devmap_ctx_rele(devmap_handle_t *);
239 238
240 239 static void devmap_ctxto(void *);
241 240
242 241 static devmap_handle_t *devmap_find_handle(devmap_handle_t *dhp_head,
243 242 caddr_t addr);
244 243
245 244 static ulong_t devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
246 245 ulong_t *opfn, ulong_t *pagesize);
247 246
248 247 static void free_devmap_handle(devmap_handle_t *dhp);
249 248
250 249 static int devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
251 250 struct seg *newseg);
252 251
253 252 static devmap_handle_t *devmap_handle_unmap(devmap_handle_t *dhp);
254 253
255 254 static void devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len);
256 255
257 256 static void devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr);
258 257
259 258 static int devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
260 259 offset_t off, size_t len, uint_t flags);
261 260
262 261 static void devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len,
263 262 caddr_t addr, size_t *llen, caddr_t *laddr);
264 263
265 264 static void devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len);
266 265
267 266 static void *devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag);
268 267 static void devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size);
269 268
270 269 static void *devmap_umem_alloc_np(size_t size, size_t flags);
271 270 static void devmap_umem_free_np(void *addr, size_t size);
272 271
273 272 /*
274 273 * routines to lock and unlock underlying segkp segment for
275 274 * KMEM_PAGEABLE type cookies.
276 275 */
277 276 static faultcode_t acquire_kpmem_lock(struct ddi_umem_cookie *, size_t);
278 277 static void release_kpmem_lock(struct ddi_umem_cookie *, size_t);
279 278
280 279 /*
281 280 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
282 281 * drivers with devmap_access callbacks
283 282 */
284 283 static int devmap_softlock_enter(struct devmap_softlock *, size_t,
285 284 enum fault_type);
286 285 static void devmap_softlock_exit(struct devmap_softlock *, size_t,
287 286 enum fault_type);
288 287
289 288 static kmutex_t devmapctx_lock;
290 289
291 290 static kmutex_t devmap_slock;
292 291
293 292 /*
294 293 * Initialize the thread callbacks and thread private data.
295 294 */
296 295 static struct devmap_ctx *
297 296 devmap_ctxinit(dev_t dev, ulong_t id)
298 297 {
299 298 struct devmap_ctx *devctx;
300 299 struct devmap_ctx *tmp;
301 300 dev_info_t *dip;
302 301
303 302 tmp = kmem_zalloc(sizeof (struct devmap_ctx), KM_SLEEP);
304 303
305 304 mutex_enter(&devmapctx_lock);
306 305
307 306 dip = e_ddi_hold_devi_by_dev(dev, 0);
308 307 ASSERT(dip != NULL);
309 308 ddi_release_devi(dip);
310 309
311 310 for (devctx = devmapctx_list; devctx != NULL; devctx = devctx->next)
312 311 if ((devctx->dip == dip) && (devctx->id == id))
313 312 break;
314 313
315 314 if (devctx == NULL) {
316 315 devctx = tmp;
317 316 devctx->dip = dip;
318 317 devctx->id = id;
319 318 mutex_init(&devctx->lock, NULL, MUTEX_DEFAULT, NULL);
320 319 cv_init(&devctx->cv, NULL, CV_DEFAULT, NULL);
321 320 devctx->next = devmapctx_list;
322 321 devmapctx_list = devctx;
323 322 } else
324 323 kmem_free(tmp, sizeof (struct devmap_ctx));
325 324
326 325 mutex_enter(&devctx->lock);
327 326 devctx->refcnt++;
328 327 mutex_exit(&devctx->lock);
329 328 mutex_exit(&devmapctx_lock);
330 329
331 330 return (devctx);
332 331 }
333 332
334 333 /*
335 334 * Timeout callback called if a CPU has not given up the device context
336 335 * within dhp->dh_timeout_length ticks
337 336 */
338 337 static void
339 338 devmap_ctxto(void *data)
340 339 {
341 340 struct devmap_ctx *devctx = data;
342 341
343 342 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_CTXTO,
344 343 "devmap_ctxto:timeout expired, devctx=%p", (void *)devctx);
345 344 mutex_enter(&devctx->lock);
346 345 /*
347 346 * Set oncpu = 0 so the next mapping trying to get the device context
348 347 * can.
349 348 */
350 349 devctx->oncpu = 0;
351 350 devctx->timeout = 0;
352 351 cv_signal(&devctx->cv);
353 352 mutex_exit(&devctx->lock);
354 353 }
355 354
356 355 /*
357 356 * Create a device segment.
358 357 */
359 358 int
360 359 segdev_create(struct seg *seg, void *argsp)
361 360 {
362 361 struct segdev_data *sdp;
363 362 struct segdev_crargs *a = (struct segdev_crargs *)argsp;
364 363 devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
365 364 int error;
366 365
367 366 /*
368 367 * Since the address space is "write" locked, we
369 368 * don't need the segment lock to protect "segdev" data.
370 369 */
371 370 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
372 371
373 372 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
374 373
375 374 sdp = sdp_alloc();
376 375
377 376 sdp->mapfunc = a->mapfunc;
378 377 sdp->offset = a->offset;
379 378 sdp->prot = a->prot;
380 379 sdp->maxprot = a->maxprot;
381 380 sdp->type = a->type;
382 381 sdp->pageprot = 0;
383 382 sdp->softlockcnt = 0;
384 383 sdp->vpage = NULL;
385 384
386 385 if (sdp->mapfunc == NULL)
387 386 sdp->devmap_data = dhp;
388 387 else
389 388 sdp->devmap_data = dhp = NULL;
390 389
391 390 sdp->hat_flags = a->hat_flags;
392 391 sdp->hat_attr = a->hat_attr;
393 392
394 393 /*
395 394 * Currently, hat_flags supports only HAT_LOAD_NOCONSIST
396 395 */
397 396 ASSERT(!(sdp->hat_flags & ~HAT_LOAD_NOCONSIST));
398 397
399 398 /*
400 399 * Hold shadow vnode -- segdev only deals with
401 400 * character (VCHR) devices. We use the common
402 401 * vp to hang pages on.
403 402 */
404 403 sdp->vp = specfind(a->dev, VCHR);
405 404 ASSERT(sdp->vp != NULL);
406 405
407 406 seg->s_ops = &segdev_ops;
408 407 seg->s_data = sdp;
409 408
410 409 while (dhp != NULL) {
411 410 dhp->dh_seg = seg;
412 411 dhp = dhp->dh_next;
413 412 }
414 413
415 414 /*
416 415 * Inform the vnode of the new mapping.
417 416 */
418 417 /*
419 418 * It is ok to use pass sdp->maxprot to ADDMAP rather than to use
420 419 * dhp specific maxprot because spec_addmap does not use maxprot.
421 420 */
422 421 error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
423 422 seg->s_as, seg->s_base, seg->s_size,
424 423 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
425 424
426 425 if (error != 0) {
427 426 sdp->devmap_data = NULL;
428 427 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
429 428 HAT_UNLOAD_UNMAP);
430 429 } else {
431 430 /*
432 431 * Mappings of /dev/null don't count towards the VSZ of a
433 432 * process. Mappings of /dev/null have no mapping type.
434 433 */
435 434 if ((SEGOP_GETTYPE(seg, (seg)->s_base) & (MAP_SHARED |
436 435 MAP_PRIVATE)) == 0) {
437 436 seg->s_as->a_resvsize -= seg->s_size;
438 437 }
439 438 }
440 439
441 440 return (error);
442 441 }
443 442
444 443 static struct segdev_data *
445 444 sdp_alloc(void)
446 445 {
447 446 struct segdev_data *sdp;
448 447
449 448 sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
450 449 rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
451 450
452 451 return (sdp);
453 452 }
454 453
455 454 /*
456 455 * Duplicate seg and return new segment in newseg.
457 456 */
458 457 static int
459 458 segdev_dup(struct seg *seg, struct seg *newseg)
460 459 {
461 460 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
462 461 struct segdev_data *newsdp;
463 462 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
464 463 size_t npages;
465 464 int ret;
466 465
467 466 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
468 467 "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
469 468
470 469 DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
471 470 (void *)dhp, (void *)seg));
472 471
473 472 /*
474 473 * Since the address space is "write" locked, we
475 474 * don't need the segment lock to protect "segdev" data.
476 475 */
477 476 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
478 477
479 478 newsdp = sdp_alloc();
480 479
481 480 newseg->s_ops = seg->s_ops;
482 481 newseg->s_data = (void *)newsdp;
483 482
484 483 VN_HOLD(sdp->vp);
485 484 newsdp->vp = sdp->vp;
486 485 newsdp->mapfunc = sdp->mapfunc;
487 486 newsdp->offset = sdp->offset;
488 487 newsdp->pageprot = sdp->pageprot;
489 488 newsdp->prot = sdp->prot;
490 489 newsdp->maxprot = sdp->maxprot;
491 490 newsdp->type = sdp->type;
492 491 newsdp->hat_attr = sdp->hat_attr;
493 492 newsdp->hat_flags = sdp->hat_flags;
494 493 newsdp->softlockcnt = 0;
495 494
496 495 /*
497 496 * Initialize per page data if the segment we are
498 497 * dup'ing has per page information.
499 498 */
500 499 npages = seg_pages(newseg);
501 500
502 501 if (sdp->vpage != NULL) {
503 502 size_t nbytes = vpgtob(npages);
504 503
505 504 newsdp->vpage = kmem_zalloc(nbytes, KM_SLEEP);
506 505 bcopy(sdp->vpage, newsdp->vpage, nbytes);
507 506 } else
508 507 newsdp->vpage = NULL;
509 508
510 509 /*
511 510 * duplicate devmap handles
512 511 */
513 512 if (dhp != NULL) {
514 513 ret = devmap_handle_dup(dhp,
515 514 (devmap_handle_t **)&newsdp->devmap_data, newseg);
516 515 if (ret != 0) {
517 516 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DUP_CK1,
518 517 "segdev_dup:ret1 ret=%x, dhp=%p seg=%p",
519 518 ret, (void *)dhp, (void *)seg);
520 519 DEBUGF(1, (CE_CONT,
521 520 "segdev_dup: ret %x dhp %p seg %p\n",
522 521 ret, (void *)dhp, (void *)seg));
523 522 return (ret);
524 523 }
525 524 }
526 525
527 526 /*
528 527 * Inform the common vnode of the new mapping.
529 528 */
530 529 return (VOP_ADDMAP(VTOCVP(newsdp->vp),
531 530 newsdp->offset, newseg->s_as,
532 531 newseg->s_base, newseg->s_size, newsdp->prot,
533 532 newsdp->maxprot, sdp->type, CRED(), NULL));
534 533 }
535 534
536 535 /*
537 536 * duplicate devmap handles
538 537 */
539 538 static int
540 539 devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
541 540 struct seg *newseg)
542 541 {
543 542 devmap_handle_t *newdhp_save = NULL;
544 543 devmap_handle_t *newdhp = NULL;
545 544 struct devmap_callback_ctl *callbackops;
546 545
547 546 while (dhp != NULL) {
548 547 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
549 548
550 549 /* Need to lock the original dhp while copying if REMAP */
551 550 HOLD_DHP_LOCK(dhp);
552 551 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
553 552 RELE_DHP_LOCK(dhp);
554 553 newdhp->dh_seg = newseg;
555 554 newdhp->dh_next = NULL;
556 555 if (newdhp_save != NULL)
557 556 newdhp_save->dh_next = newdhp;
558 557 else
559 558 *new_dhp = newdhp;
560 559 newdhp_save = newdhp;
561 560
562 561 callbackops = &newdhp->dh_callbackops;
563 562
564 563 if (dhp->dh_softlock != NULL)
565 564 newdhp->dh_softlock = devmap_softlock_init(
566 565 newdhp->dh_dev,
567 566 (ulong_t)callbackops->devmap_access);
568 567 if (dhp->dh_ctx != NULL)
569 568 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
570 569 (ulong_t)callbackops->devmap_access);
571 570
572 571 /*
573 572 * Initialize dh_lock if we want to do remap.
574 573 */
575 574 if (newdhp->dh_flags & DEVMAP_ALLOW_REMAP) {
576 575 mutex_init(&newdhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
577 576 newdhp->dh_flags |= DEVMAP_LOCK_INITED;
578 577 }
579 578
580 579 if (callbackops->devmap_dup != NULL) {
581 580 int ret;
582 581
583 582 /*
584 583 * Call the dup callback so that the driver can
585 584 * duplicate its private data.
586 585 */
587 586 ret = (*callbackops->devmap_dup)(dhp, dhp->dh_pvtp,
588 587 (devmap_cookie_t *)newdhp, &newdhp->dh_pvtp);
589 588
590 589 if (ret != 0) {
591 590 /*
592 591 * We want to free up this segment as the driver
593 592 * has indicated that we can't dup it. But we
594 593 * don't want to call the drivers, devmap_unmap,
595 594 * callback function as the driver does not
596 595 * think this segment exists. The caller of
597 596 * devmap_dup will call seg_free on newseg
598 597 * as it was the caller that allocated the
599 598 * segment.
600 599 */
601 600 DEBUGF(1, (CE_CONT, "devmap_handle_dup ERROR: "
602 601 "newdhp %p dhp %p\n", (void *)newdhp,
603 602 (void *)dhp));
604 603 callbackops->devmap_unmap = NULL;
605 604 return (ret);
606 605 }
607 606 }
608 607
609 608 dhp = dhp->dh_next;
610 609 }
611 610
612 611 return (0);
613 612 }
614 613
615 614 /*
616 615 * Split a segment at addr for length len.
617 616 */
618 617 /*ARGSUSED*/
619 618 static int
620 619 segdev_unmap(struct seg *seg, caddr_t addr, size_t len)
621 620 {
622 621 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
623 622 register struct segdev_data *nsdp;
624 623 register struct seg *nseg;
625 624 register size_t opages; /* old segment size in pages */
626 625 register size_t npages; /* new segment size in pages */
627 626 register size_t dpages; /* pages being deleted (unmapped) */
628 627 register size_t nbytes;
629 628 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
630 629 devmap_handle_t *dhpp;
631 630 devmap_handle_t *newdhp;
632 631 struct devmap_callback_ctl *callbackops;
633 632 caddr_t nbase;
634 633 offset_t off;
635 634 ulong_t nsize;
636 635 size_t mlen, sz;
637 636
638 637 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
639 638 "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
640 639 (void *)dhp, (void *)seg, (void *)addr, len);
641 640
642 641 DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
643 642 (void *)dhp, (void *)seg, (void *)addr, len));
644 643
645 644 /*
646 645 * Since the address space is "write" locked, we
647 646 * don't need the segment lock to protect "segdev" data.
648 647 */
649 648 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
650 649
651 650 if ((sz = sdp->softlockcnt) > 0) {
652 651 /*
653 652 * Fail the unmap if pages are SOFTLOCKed through this mapping.
654 653 * softlockcnt is protected from change by the as write lock.
655 654 */
656 655 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
657 656 "segdev_unmap:error softlockcnt = %ld", sz);
658 657 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
659 658 return (EAGAIN);
660 659 }
661 660
662 661 /*
663 662 * Check for bad sizes
664 663 */
665 664 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
666 665 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
667 666 panic("segdev_unmap");
668 667
669 668 if (dhp != NULL) {
670 669 devmap_handle_t *tdhp;
671 670 /*
672 671 * If large page size was used in hat_devload(),
673 672 * the same page size must be used in hat_unload().
674 673 */
675 674 dhpp = tdhp = devmap_find_handle(dhp, addr);
676 675 while (tdhp != NULL) {
677 676 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
678 677 break;
679 678 }
680 679 tdhp = tdhp->dh_next;
681 680 }
682 681 if (tdhp != NULL) { /* found a dhp using large pages */
683 682 size_t slen = len;
684 683 size_t mlen;
685 684 size_t soff;
686 685
687 686 soff = (ulong_t)(addr - dhpp->dh_uvaddr);
688 687 while (slen != 0) {
689 688 mlen = MIN(slen, (dhpp->dh_len - soff));
690 689 hat_unload(seg->s_as->a_hat, dhpp->dh_uvaddr,
691 690 dhpp->dh_len, HAT_UNLOAD_UNMAP);
692 691 dhpp = dhpp->dh_next;
693 692 ASSERT(slen >= mlen);
694 693 slen -= mlen;
695 694 soff = 0;
696 695 }
697 696 } else
698 697 hat_unload(seg->s_as->a_hat, addr, len,
699 698 HAT_UNLOAD_UNMAP);
700 699 } else {
701 700 /*
702 701 * Unload any hardware translations in the range
703 702 * to be taken out.
704 703 */
705 704 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
706 705 }
707 706
708 707 /*
709 708 * get the user offset which will used in the driver callbacks
710 709 */
711 710 off = sdp->offset + (offset_t)(addr - seg->s_base);
712 711
713 712 /*
714 713 * Inform the vnode of the unmapping.
715 714 */
716 715 ASSERT(sdp->vp != NULL);
717 716 (void) VOP_DELMAP(VTOCVP(sdp->vp), off, seg->s_as, addr, len,
718 717 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
719 718
720 719 /*
721 720 * Check for entire segment
722 721 */
723 722 if (addr == seg->s_base && len == seg->s_size) {
724 723 seg_free(seg);
725 724 return (0);
726 725 }
727 726
728 727 opages = seg_pages(seg);
729 728 dpages = btop(len);
730 729 npages = opages - dpages;
731 730
732 731 /*
733 732 * Check for beginning of segment
734 733 */
735 734 if (addr == seg->s_base) {
736 735 if (sdp->vpage != NULL) {
737 736 register struct vpage *ovpage;
738 737
739 738 ovpage = sdp->vpage; /* keep pointer to vpage */
740 739
741 740 nbytes = vpgtob(npages);
742 741 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
743 742 bcopy(&ovpage[dpages], sdp->vpage, nbytes);
744 743
745 744 /* free up old vpage */
746 745 kmem_free(ovpage, vpgtob(opages));
747 746 }
748 747
749 748 /*
750 749 * free devmap handles from the beginning of the mapping.
751 750 */
752 751 if (dhp != NULL)
753 752 devmap_handle_unmap_head(dhp, len);
754 753
755 754 sdp->offset += (offset_t)len;
756 755
757 756 seg->s_base += len;
758 757 seg->s_size -= len;
759 758
760 759 return (0);
761 760 }
762 761
763 762 /*
764 763 * Check for end of segment
765 764 */
766 765 if (addr + len == seg->s_base + seg->s_size) {
767 766 if (sdp->vpage != NULL) {
768 767 register struct vpage *ovpage;
769 768
770 769 ovpage = sdp->vpage; /* keep pointer to vpage */
771 770
772 771 nbytes = vpgtob(npages);
773 772 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
774 773 bcopy(ovpage, sdp->vpage, nbytes);
775 774
776 775 /* free up old vpage */
777 776 kmem_free(ovpage, vpgtob(opages));
778 777 }
779 778 seg->s_size -= len;
780 779
781 780 /*
782 781 * free devmap handles from addr to the end of the mapping.
783 782 */
784 783 if (dhp != NULL)
785 784 devmap_handle_unmap_tail(dhp, addr);
786 785
787 786 return (0);
788 787 }
789 788
790 789 /*
791 790 * The section to go is in the middle of the segment,
792 791 * have to make it into two segments. nseg is made for
793 792 * the high end while seg is cut down at the low end.
794 793 */
795 794 nbase = addr + len; /* new seg base */
796 795 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
797 796 seg->s_size = addr - seg->s_base; /* shrink old seg */
798 797 nseg = seg_alloc(seg->s_as, nbase, nsize);
799 798 if (nseg == NULL)
800 799 panic("segdev_unmap seg_alloc");
801 800
802 801 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK2,
803 802 "segdev_unmap: seg=%p nseg=%p", (void *)seg, (void *)nseg);
804 803 DEBUGF(3, (CE_CONT, "segdev_unmap: segdev_dup seg %p nseg %p\n",
805 804 (void *)seg, (void *)nseg));
806 805 nsdp = sdp_alloc();
807 806
808 807 nseg->s_ops = seg->s_ops;
809 808 nseg->s_data = (void *)nsdp;
810 809
811 810 VN_HOLD(sdp->vp);
812 811 nsdp->mapfunc = sdp->mapfunc;
813 812 nsdp->offset = sdp->offset + (offset_t)(nseg->s_base - seg->s_base);
814 813 nsdp->vp = sdp->vp;
815 814 nsdp->pageprot = sdp->pageprot;
816 815 nsdp->prot = sdp->prot;
817 816 nsdp->maxprot = sdp->maxprot;
818 817 nsdp->type = sdp->type;
819 818 nsdp->hat_attr = sdp->hat_attr;
820 819 nsdp->hat_flags = sdp->hat_flags;
821 820 nsdp->softlockcnt = 0;
822 821
823 822 /*
824 823 * Initialize per page data if the segment we are
825 824 * dup'ing has per page information.
826 825 */
827 826 if (sdp->vpage != NULL) {
828 827 /* need to split vpage into two arrays */
829 828 register size_t nnbytes;
830 829 register size_t nnpages;
831 830 register struct vpage *ovpage;
832 831
833 832 ovpage = sdp->vpage; /* keep pointer to vpage */
834 833
835 834 npages = seg_pages(seg); /* seg has shrunk */
836 835 nbytes = vpgtob(npages);
837 836 nnpages = seg_pages(nseg);
838 837 nnbytes = vpgtob(nnpages);
839 838
840 839 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
841 840 bcopy(ovpage, sdp->vpage, nbytes);
842 841
843 842 nsdp->vpage = kmem_alloc(nnbytes, KM_SLEEP);
844 843 bcopy(&ovpage[npages + dpages], nsdp->vpage, nnbytes);
845 844
846 845 /* free up old vpage */
847 846 kmem_free(ovpage, vpgtob(opages));
848 847 } else
849 848 nsdp->vpage = NULL;
850 849
851 850 /*
852 851 * unmap dhps.
853 852 */
854 853 if (dhp == NULL) {
855 854 nsdp->devmap_data = NULL;
856 855 return (0);
857 856 }
858 857 while (dhp != NULL) {
859 858 callbackops = &dhp->dh_callbackops;
860 859 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK3,
861 860 "segdev_unmap: dhp=%p addr=%p", dhp, addr);
862 861 DEBUGF(3, (CE_CONT, "unmap: dhp %p addr %p uvaddr %p len %lx\n",
863 862 (void *)dhp, (void *)addr,
864 863 (void *)dhp->dh_uvaddr, dhp->dh_len));
865 864
866 865 if (addr == (dhp->dh_uvaddr + dhp->dh_len)) {
867 866 dhpp = dhp->dh_next;
868 867 dhp->dh_next = NULL;
869 868 dhp = dhpp;
870 869 } else if (addr > (dhp->dh_uvaddr + dhp->dh_len)) {
871 870 dhp = dhp->dh_next;
872 871 } else if (addr > dhp->dh_uvaddr &&
873 872 (addr + len) < (dhp->dh_uvaddr + dhp->dh_len)) {
874 873 /*
875 874 * <addr, addr+len> is enclosed by dhp.
876 875 * create a newdhp that begins at addr+len and
877 876 * ends at dhp->dh_uvaddr+dhp->dh_len.
878 877 */
879 878 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
880 879 HOLD_DHP_LOCK(dhp);
881 880 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
882 881 RELE_DHP_LOCK(dhp);
883 882 newdhp->dh_seg = nseg;
884 883 newdhp->dh_next = dhp->dh_next;
885 884 if (dhp->dh_softlock != NULL)
886 885 newdhp->dh_softlock = devmap_softlock_init(
887 886 newdhp->dh_dev,
888 887 (ulong_t)callbackops->devmap_access);
889 888 if (dhp->dh_ctx != NULL)
890 889 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
891 890 (ulong_t)callbackops->devmap_access);
892 891 if (newdhp->dh_flags & DEVMAP_LOCK_INITED) {
893 892 mutex_init(&newdhp->dh_lock,
894 893 NULL, MUTEX_DEFAULT, NULL);
895 894 }
896 895 if (callbackops->devmap_unmap != NULL)
897 896 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
898 897 off, len, dhp, &dhp->dh_pvtp,
899 898 newdhp, &newdhp->dh_pvtp);
900 899 mlen = len + (addr - dhp->dh_uvaddr);
901 900 devmap_handle_reduce_len(newdhp, mlen);
902 901 nsdp->devmap_data = newdhp;
903 902 /* XX Changing len should recalculate LARGE flag */
904 903 dhp->dh_len = addr - dhp->dh_uvaddr;
905 904 dhpp = dhp->dh_next;
906 905 dhp->dh_next = NULL;
907 906 dhp = dhpp;
908 907 } else if ((addr > dhp->dh_uvaddr) &&
909 908 ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len))) {
910 909 mlen = dhp->dh_len + dhp->dh_uvaddr - addr;
911 910 /*
912 911 * <addr, addr+len> spans over dhps.
913 912 */
914 913 if (callbackops->devmap_unmap != NULL)
915 914 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
916 915 off, mlen, (devmap_cookie_t *)dhp,
917 916 &dhp->dh_pvtp, NULL, NULL);
918 917 /* XX Changing len should recalculate LARGE flag */
919 918 dhp->dh_len = addr - dhp->dh_uvaddr;
920 919 dhpp = dhp->dh_next;
921 920 dhp->dh_next = NULL;
922 921 dhp = dhpp;
923 922 nsdp->devmap_data = dhp;
924 923 } else if ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len)) {
925 924 /*
926 925 * dhp is enclosed by <addr, addr+len>.
927 926 */
928 927 dhp->dh_seg = nseg;
929 928 nsdp->devmap_data = dhp;
930 929 dhp = devmap_handle_unmap(dhp);
931 930 nsdp->devmap_data = dhp; /* XX redundant? */
932 931 } else if (((addr + len) > dhp->dh_uvaddr) &&
933 932 ((addr + len) < (dhp->dh_uvaddr + dhp->dh_len))) {
934 933 mlen = addr + len - dhp->dh_uvaddr;
935 934 if (callbackops->devmap_unmap != NULL)
936 935 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
937 936 dhp->dh_uoff, mlen, NULL,
938 937 NULL, dhp, &dhp->dh_pvtp);
939 938 devmap_handle_reduce_len(dhp, mlen);
940 939 nsdp->devmap_data = dhp;
941 940 dhp->dh_seg = nseg;
942 941 dhp = dhp->dh_next;
943 942 } else {
944 943 dhp->dh_seg = nseg;
945 944 dhp = dhp->dh_next;
946 945 }
947 946 }
948 947 return (0);
949 948 }
950 949
951 950 /*
952 951 * Utility function handles reducing the length of a devmap handle during unmap
953 952 * Note that is only used for unmapping the front portion of the handler,
954 953 * i.e., we are bumping up the offset/pfn etc up by len
955 954 * Do not use if reducing length at the tail.
956 955 */
957 956 static void
958 957 devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len)
959 958 {
960 959 struct ddi_umem_cookie *cp;
961 960 struct devmap_pmem_cookie *pcp;
962 961 /*
963 962 * adjust devmap handle fields
964 963 */
965 964 ASSERT(len < dhp->dh_len);
966 965
967 966 /* Make sure only page-aligned changes are done */
968 967 ASSERT((len & PAGEOFFSET) == 0);
969 968
970 969 dhp->dh_len -= len;
971 970 dhp->dh_uoff += (offset_t)len;
972 971 dhp->dh_roff += (offset_t)len;
973 972 dhp->dh_uvaddr += len;
974 973 /* Need to grab dhp lock if REMAP */
975 974 HOLD_DHP_LOCK(dhp);
976 975 cp = dhp->dh_cookie;
977 976 if (!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)) {
978 977 if (cookie_is_devmem(cp)) {
979 978 dhp->dh_pfn += btop(len);
980 979 } else if (cookie_is_pmem(cp)) {
981 980 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
982 981 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
983 982 dhp->dh_roff < ptob(pcp->dp_npages));
984 983 } else {
985 984 ASSERT(dhp->dh_roff < cp->size);
986 985 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
987 986 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
988 987 ASSERT((dhp->dh_cvaddr + len) <=
989 988 (cp->cvaddr + cp->size));
990 989
991 990 dhp->dh_cvaddr += len;
992 991 }
993 992 }
994 993 /* XXX - Should recalculate the DEVMAP_FLAG_LARGE after changes */
995 994 RELE_DHP_LOCK(dhp);
996 995 }
997 996
998 997 /*
999 998 * Free devmap handle, dhp.
1000 999 * Return the next devmap handle on the linked list.
1001 1000 */
1002 1001 static devmap_handle_t *
1003 1002 devmap_handle_unmap(devmap_handle_t *dhp)
1004 1003 {
1005 1004 struct devmap_callback_ctl *callbackops = &dhp->dh_callbackops;
1006 1005 struct segdev_data *sdp = (struct segdev_data *)dhp->dh_seg->s_data;
1007 1006 devmap_handle_t *dhpp = (devmap_handle_t *)sdp->devmap_data;
1008 1007
1009 1008 ASSERT(dhp != NULL);
1010 1009
1011 1010 /*
1012 1011 * before we free up dhp, call the driver's devmap_unmap entry point
1013 1012 * to free resources allocated for this dhp.
1014 1013 */
1015 1014 if (callbackops->devmap_unmap != NULL) {
1016 1015 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, dhp->dh_uoff,
1017 1016 dhp->dh_len, NULL, NULL, NULL, NULL);
1018 1017 }
1019 1018
1020 1019 if (dhpp == dhp) { /* releasing first dhp, change sdp data */
1021 1020 sdp->devmap_data = dhp->dh_next;
1022 1021 } else {
1023 1022 while (dhpp->dh_next != dhp) {
1024 1023 dhpp = dhpp->dh_next;
1025 1024 }
1026 1025 dhpp->dh_next = dhp->dh_next;
1027 1026 }
1028 1027 dhpp = dhp->dh_next; /* return value is next dhp in chain */
1029 1028
1030 1029 if (dhp->dh_softlock != NULL)
1031 1030 devmap_softlock_rele(dhp);
1032 1031
1033 1032 if (dhp->dh_ctx != NULL)
1034 1033 devmap_ctx_rele(dhp);
1035 1034
1036 1035 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1037 1036 mutex_destroy(&dhp->dh_lock);
1038 1037 }
1039 1038 kmem_free(dhp, sizeof (devmap_handle_t));
1040 1039
1041 1040 return (dhpp);
1042 1041 }
1043 1042
1044 1043 /*
1045 1044 * Free complete devmap handles from dhp for len bytes
1046 1045 * dhp can be either the first handle or a subsequent handle
1047 1046 */
1048 1047 static void
1049 1048 devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len)
1050 1049 {
1051 1050 struct devmap_callback_ctl *callbackops;
1052 1051
1053 1052 /*
1054 1053 * free the devmap handles covered by len.
1055 1054 */
1056 1055 while (len >= dhp->dh_len) {
1057 1056 len -= dhp->dh_len;
1058 1057 dhp = devmap_handle_unmap(dhp);
1059 1058 }
1060 1059 if (len != 0) { /* partial unmap at head of first remaining dhp */
1061 1060 callbackops = &dhp->dh_callbackops;
1062 1061
1063 1062 /*
1064 1063 * Call the unmap callback so the drivers can make
1065 1064 * adjustment on its private data.
1066 1065 */
1067 1066 if (callbackops->devmap_unmap != NULL)
1068 1067 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
1069 1068 dhp->dh_uoff, len, NULL, NULL, dhp, &dhp->dh_pvtp);
1070 1069 devmap_handle_reduce_len(dhp, len);
1071 1070 }
1072 1071 }
1073 1072
1074 1073 /*
1075 1074 * Free devmap handles to truncate the mapping after addr
1076 1075 * RFE: Simpler to pass in dhp pointing at correct dhp (avoid find again)
1077 1076 * Also could then use the routine in middle unmap case too
1078 1077 */
1079 1078 static void
1080 1079 devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr)
1081 1080 {
1082 1081 register struct seg *seg = dhp->dh_seg;
1083 1082 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1084 1083 register devmap_handle_t *dhph = (devmap_handle_t *)sdp->devmap_data;
1085 1084 struct devmap_callback_ctl *callbackops;
1086 1085 register devmap_handle_t *dhpp;
1087 1086 size_t maplen;
1088 1087 ulong_t off;
1089 1088 size_t len;
1090 1089
1091 1090 maplen = (size_t)(addr - dhp->dh_uvaddr);
1092 1091 dhph = devmap_find_handle(dhph, addr);
1093 1092
1094 1093 while (dhph != NULL) {
1095 1094 if (maplen == 0) {
1096 1095 dhph = devmap_handle_unmap(dhph);
1097 1096 } else {
1098 1097 callbackops = &dhph->dh_callbackops;
1099 1098 len = dhph->dh_len - maplen;
1100 1099 off = (ulong_t)sdp->offset + (addr - seg->s_base);
1101 1100 /*
1102 1101 * Call the unmap callback so the driver
1103 1102 * can make adjustments on its private data.
1104 1103 */
1105 1104 if (callbackops->devmap_unmap != NULL)
1106 1105 (*callbackops->devmap_unmap)(dhph,
1107 1106 dhph->dh_pvtp, off, len,
1108 1107 (devmap_cookie_t *)dhph,
1109 1108 &dhph->dh_pvtp, NULL, NULL);
1110 1109 /* XXX Reducing len needs to recalculate LARGE flag */
1111 1110 dhph->dh_len = maplen;
1112 1111 maplen = 0;
1113 1112 dhpp = dhph->dh_next;
1114 1113 dhph->dh_next = NULL;
1115 1114 dhph = dhpp;
1116 1115 }
1117 1116 } /* end while */
1118 1117 }
1119 1118
1120 1119 /*
1121 1120 * Free a segment.
1122 1121 */
1123 1122 static void
1124 1123 segdev_free(struct seg *seg)
1125 1124 {
1126 1125 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1127 1126 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
1128 1127
1129 1128 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1130 1129 "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1131 1130 DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1132 1131 (void *)dhp, (void *)seg));
1133 1132
1134 1133 /*
1135 1134 * Since the address space is "write" locked, we
1136 1135 * don't need the segment lock to protect "segdev" data.
1137 1136 */
1138 1137 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1139 1138
1140 1139 while (dhp != NULL)
1141 1140 dhp = devmap_handle_unmap(dhp);
1142 1141
1143 1142 VN_RELE(sdp->vp);
1144 1143 if (sdp->vpage != NULL)
1145 1144 kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1146 1145
1147 1146 rw_destroy(&sdp->lock);
1148 1147 kmem_free(sdp, sizeof (*sdp));
1149 1148 }
1150 1149
1151 1150 static void
1152 1151 free_devmap_handle(devmap_handle_t *dhp)
1153 1152 {
1154 1153 register devmap_handle_t *dhpp;
1155 1154
1156 1155 /*
1157 1156 * free up devmap handle
1158 1157 */
1159 1158 while (dhp != NULL) {
1160 1159 dhpp = dhp->dh_next;
1161 1160 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1162 1161 mutex_destroy(&dhp->dh_lock);
1163 1162 }
1164 1163
1165 1164 if (dhp->dh_softlock != NULL)
1166 1165 devmap_softlock_rele(dhp);
1167 1166
1168 1167 if (dhp->dh_ctx != NULL)
1169 1168 devmap_ctx_rele(dhp);
1170 1169
1171 1170 kmem_free(dhp, sizeof (devmap_handle_t));
1172 1171 dhp = dhpp;
1173 1172 }
1174 1173 }
1175 1174
1176 1175 /*
1177 1176 * routines to lock and unlock underlying segkp segment for
1178 1177 * KMEM_PAGEABLE type cookies.
1179 1178 * segkp only allows a single pending F_SOFTLOCK
1180 1179 * we keep track of number of locks in the cookie so we can
1181 1180 * have multiple pending faults and manage the calls to segkp.
1182 1181 * RFE: if segkp supports either pagelock or can support multiple
1183 1182 * calls to F_SOFTLOCK, then these routines can go away.
1184 1183 * If pagelock, segdev_faultpage can fault on a page by page basis
1185 1184 * and simplifies the code quite a bit.
1186 1185 * if multiple calls allowed but not partial ranges, then need for
1187 1186 * cookie->lock and locked count goes away, code can call as_fault directly
1188 1187 */
1189 1188 static faultcode_t
1190 1189 acquire_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1191 1190 {
1192 1191 int err = 0;
1193 1192 ASSERT(cookie_is_kpmem(cookie));
1194 1193 /*
1195 1194 * Fault in pages in segkp with F_SOFTLOCK.
1196 1195 * We want to hold the lock until all pages have been loaded.
1197 1196 * segkp only allows single caller to hold SOFTLOCK, so cookie
1198 1197 * holds a count so we dont call into segkp multiple times
1199 1198 */
1200 1199 mutex_enter(&cookie->lock);
1201 1200
1202 1201 /*
1203 1202 * Check for overflow in locked field
1204 1203 */
1205 1204 if ((UINT32_MAX - cookie->locked) < npages) {
1206 1205 err = FC_MAKE_ERR(ENOMEM);
1207 1206 } else if (cookie->locked == 0) {
1208 1207 /* First time locking */
1209 1208 err = as_fault(kas.a_hat, &kas, cookie->cvaddr,
1210 1209 cookie->size, F_SOFTLOCK, PROT_READ|PROT_WRITE);
1211 1210 }
1212 1211 if (!err) {
1213 1212 cookie->locked += npages;
1214 1213 }
1215 1214 mutex_exit(&cookie->lock);
1216 1215 return (err);
1217 1216 }
1218 1217
1219 1218 static void
1220 1219 release_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1221 1220 {
1222 1221 mutex_enter(&cookie->lock);
1223 1222 ASSERT(cookie_is_kpmem(cookie));
1224 1223 ASSERT(cookie->locked >= npages);
1225 1224 cookie->locked -= (uint_t)npages;
1226 1225 if (cookie->locked == 0) {
1227 1226 /* Last unlock */
1228 1227 if (as_fault(kas.a_hat, &kas, cookie->cvaddr,
1229 1228 cookie->size, F_SOFTUNLOCK, PROT_READ|PROT_WRITE))
1230 1229 panic("segdev releasing kpmem lock %p", (void *)cookie);
1231 1230 }
1232 1231 mutex_exit(&cookie->lock);
1233 1232 }
1234 1233
1235 1234 /*
1236 1235 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
1237 1236 * drivers with devmap_access callbacks
1238 1237 * slock->softlocked basically works like a rw lock
1239 1238 * -ve counts => F_SOFTLOCK in progress
1240 1239 * +ve counts => F_INVAL/F_PROT in progress
1241 1240 * We allow only one F_SOFTLOCK at a time
1242 1241 * but can have multiple pending F_INVAL/F_PROT calls
1243 1242 *
1244 1243 * This routine waits using cv_wait_sig so killing processes is more graceful
1245 1244 * Returns EINTR if coming out of this routine due to a signal, 0 otherwise
1246 1245 */
1247 1246 static int devmap_softlock_enter(
1248 1247 struct devmap_softlock *slock,
1249 1248 size_t npages,
1250 1249 enum fault_type type)
1251 1250 {
1252 1251 if (npages == 0)
1253 1252 return (0);
1254 1253 mutex_enter(&(slock->lock));
1255 1254 switch (type) {
1256 1255 case F_SOFTLOCK :
1257 1256 while (slock->softlocked) {
1258 1257 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1259 1258 /* signalled */
1260 1259 mutex_exit(&(slock->lock));
1261 1260 return (EINTR);
1262 1261 }
1263 1262 }
1264 1263 slock->softlocked -= npages; /* -ve count => locked */
1265 1264 break;
1266 1265 case F_INVAL :
1267 1266 case F_PROT :
1268 1267 while (slock->softlocked < 0)
1269 1268 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1270 1269 /* signalled */
1271 1270 mutex_exit(&(slock->lock));
1272 1271 return (EINTR);
1273 1272 }
1274 1273 slock->softlocked += npages; /* +ve count => f_invals */
1275 1274 break;
1276 1275 default:
1277 1276 ASSERT(0);
1278 1277 }
1279 1278 mutex_exit(&(slock->lock));
1280 1279 return (0);
1281 1280 }
1282 1281
1283 1282 static void devmap_softlock_exit(
1284 1283 struct devmap_softlock *slock,
1285 1284 size_t npages,
1286 1285 enum fault_type type)
1287 1286 {
1288 1287 if (slock == NULL)
1289 1288 return;
1290 1289 mutex_enter(&(slock->lock));
1291 1290 switch (type) {
1292 1291 case F_SOFTLOCK :
1293 1292 ASSERT(-slock->softlocked >= npages);
1294 1293 slock->softlocked += npages; /* -ve count is softlocked */
1295 1294 if (slock->softlocked == 0)
1296 1295 cv_signal(&slock->cv);
1297 1296 break;
1298 1297 case F_INVAL :
1299 1298 case F_PROT:
1300 1299 ASSERT(slock->softlocked >= npages);
1301 1300 slock->softlocked -= npages;
1302 1301 if (slock->softlocked == 0)
1303 1302 cv_signal(&slock->cv);
1304 1303 break;
1305 1304 default:
1306 1305 ASSERT(0);
1307 1306 }
1308 1307 mutex_exit(&(slock->lock));
1309 1308 }
1310 1309
1311 1310 /*
1312 1311 * Do a F_SOFTUNLOCK call over the range requested.
1313 1312 * The range must have already been F_SOFTLOCK'ed.
1314 1313 * The segment lock should be held, (but not the segment private lock?)
1315 1314 * The softunlock code below does not adjust for large page sizes
1316 1315 * assumes the caller already did any addr/len adjustments for
1317 1316 * pagesize mappings before calling.
1318 1317 */
1319 1318 /*ARGSUSED*/
1320 1319 static void
1321 1320 segdev_softunlock(
1322 1321 struct hat *hat, /* the hat */
1323 1322 struct seg *seg, /* seg_dev of interest */
1324 1323 caddr_t addr, /* base address of range */
1325 1324 size_t len, /* number of bytes */
1326 1325 enum seg_rw rw) /* type of access at fault */
1327 1326 {
1328 1327 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1329 1328 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1330 1329
1331 1330 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SOFTUNLOCK,
1332 1331 "segdev_softunlock:dhp_head=%p sdp=%p addr=%p len=%lx",
1333 1332 dhp_head, sdp, addr, len);
1334 1333 DEBUGF(3, (CE_CONT, "segdev_softunlock: dhp %p lockcnt %lx "
1335 1334 "addr %p len %lx\n",
1336 1335 (void *)dhp_head, sdp->softlockcnt, (void *)addr, len));
1337 1336
1338 1337 hat_unlock(hat, addr, len);
1339 1338
1340 1339 if (dhp_head != NULL) {
1341 1340 devmap_handle_t *dhp;
1342 1341 size_t mlen;
1343 1342 size_t tlen = len;
1344 1343 ulong_t off;
1345 1344
1346 1345 dhp = devmap_find_handle(dhp_head, addr);
1347 1346 ASSERT(dhp != NULL);
1348 1347
1349 1348 off = (ulong_t)(addr - dhp->dh_uvaddr);
1350 1349 while (tlen != 0) {
1351 1350 mlen = MIN(tlen, (dhp->dh_len - off));
1352 1351
1353 1352 /*
1354 1353 * unlock segkp memory, locked during F_SOFTLOCK
1355 1354 */
1356 1355 if (dhp_is_kpmem(dhp)) {
1357 1356 release_kpmem_lock(
1358 1357 (struct ddi_umem_cookie *)dhp->dh_cookie,
1359 1358 btopr(mlen));
1360 1359 }
1361 1360
1362 1361 /*
1363 1362 * Do the softlock accounting for devmap_access
1364 1363 */
1365 1364 if (dhp->dh_callbackops.devmap_access != NULL) {
1366 1365 devmap_softlock_exit(dhp->dh_softlock,
1367 1366 btopr(mlen), F_SOFTLOCK);
1368 1367 }
1369 1368
1370 1369 tlen -= mlen;
1371 1370 dhp = dhp->dh_next;
1372 1371 off = 0;
1373 1372 }
1374 1373 }
1375 1374
1376 1375 mutex_enter(&freemem_lock);
1377 1376 ASSERT(sdp->softlockcnt >= btopr(len));
1378 1377 sdp->softlockcnt -= btopr(len);
1379 1378 mutex_exit(&freemem_lock);
1380 1379 if (sdp->softlockcnt == 0) {
1381 1380 /*
1382 1381 * All SOFTLOCKS are gone. Wakeup any waiting
1383 1382 * unmappers so they can try again to unmap.
1384 1383 * Check for waiters first without the mutex
1385 1384 * held so we don't always grab the mutex on
1386 1385 * softunlocks.
1387 1386 */
1388 1387 if (AS_ISUNMAPWAIT(seg->s_as)) {
1389 1388 mutex_enter(&seg->s_as->a_contents);
1390 1389 if (AS_ISUNMAPWAIT(seg->s_as)) {
1391 1390 AS_CLRUNMAPWAIT(seg->s_as);
1392 1391 cv_broadcast(&seg->s_as->a_cv);
1393 1392 }
1394 1393 mutex_exit(&seg->s_as->a_contents);
1395 1394 }
1396 1395 }
1397 1396
1398 1397 }
1399 1398
1400 1399 /*
1401 1400 * Handle fault for a single page.
1402 1401 * Done in a separate routine so we can handle errors more easily.
1403 1402 * This routine is called only from segdev_faultpages()
1404 1403 * when looping over the range of addresses requested. The segment lock is held.
1405 1404 */
1406 1405 static faultcode_t
1407 1406 segdev_faultpage(
1408 1407 struct hat *hat, /* the hat */
1409 1408 struct seg *seg, /* seg_dev of interest */
1410 1409 caddr_t addr, /* address in as */
1411 1410 struct vpage *vpage, /* pointer to vpage for seg, addr */
1412 1411 enum fault_type type, /* type of fault */
1413 1412 enum seg_rw rw, /* type of access at fault */
1414 1413 devmap_handle_t *dhp) /* devmap handle if any for this page */
1415 1414 {
1416 1415 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1417 1416 uint_t prot;
1418 1417 pfn_t pfnum = PFN_INVALID;
1419 1418 u_offset_t offset;
1420 1419 uint_t hat_flags;
1421 1420 dev_info_t *dip;
1422 1421
1423 1422 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE,
1424 1423 "segdev_faultpage: dhp=%p seg=%p addr=%p", dhp, seg, addr);
1425 1424 DEBUGF(8, (CE_CONT, "segdev_faultpage: dhp %p seg %p addr %p \n",
1426 1425 (void *)dhp, (void *)seg, (void *)addr));
1427 1426
1428 1427 /*
1429 1428 * Initialize protection value for this page.
1430 1429 * If we have per page protection values check it now.
1431 1430 */
1432 1431 if (sdp->pageprot) {
1433 1432 uint_t protchk;
1434 1433
1435 1434 switch (rw) {
1436 1435 case S_READ:
1437 1436 protchk = PROT_READ;
1438 1437 break;
1439 1438 case S_WRITE:
1440 1439 protchk = PROT_WRITE;
1441 1440 break;
1442 1441 case S_EXEC:
1443 1442 protchk = PROT_EXEC;
1444 1443 break;
1445 1444 case S_OTHER:
1446 1445 default:
1447 1446 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1448 1447 break;
1449 1448 }
1450 1449
1451 1450 prot = VPP_PROT(vpage);
1452 1451 if ((prot & protchk) == 0)
1453 1452 return (FC_PROT); /* illegal access type */
1454 1453 } else {
1455 1454 prot = sdp->prot;
1456 1455 /* caller has already done segment level protection check */
1457 1456 }
1458 1457
1459 1458 if (type == F_SOFTLOCK) {
1460 1459 mutex_enter(&freemem_lock);
1461 1460 sdp->softlockcnt++;
1462 1461 mutex_exit(&freemem_lock);
1463 1462 }
1464 1463
1465 1464 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
1466 1465 offset = sdp->offset + (u_offset_t)(addr - seg->s_base);
1467 1466 /*
1468 1467 * In the devmap framework, sdp->mapfunc is set to NULL. we can get
1469 1468 * pfnum from dhp->dh_pfn (at beginning of segment) and offset from
1470 1469 * seg->s_base.
1471 1470 */
1472 1471 if (dhp == NULL) {
1473 1472 /* If segment has devmap_data, then dhp should be non-NULL */
1474 1473 ASSERT(sdp->devmap_data == NULL);
1475 1474 pfnum = (pfn_t)cdev_mmap(sdp->mapfunc, sdp->vp->v_rdev,
1476 1475 (off_t)offset, prot);
1477 1476 prot |= sdp->hat_attr;
1478 1477 } else {
1479 1478 ulong_t off;
1480 1479 struct ddi_umem_cookie *cp;
1481 1480 struct devmap_pmem_cookie *pcp;
1482 1481
1483 1482 /* ensure the dhp passed in contains addr. */
1484 1483 ASSERT(dhp == devmap_find_handle(
1485 1484 (devmap_handle_t *)sdp->devmap_data, addr));
1486 1485
1487 1486 off = addr - dhp->dh_uvaddr;
1488 1487
1489 1488 /*
1490 1489 * This routine assumes that the caller makes sure that the
1491 1490 * fields in dhp used below are unchanged due to remap during
1492 1491 * this call. Caller does HOLD_DHP_LOCK if neeed
1493 1492 */
1494 1493 cp = dhp->dh_cookie;
1495 1494 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1496 1495 pfnum = PFN_INVALID;
1497 1496 } else if (cookie_is_devmem(cp)) {
1498 1497 pfnum = dhp->dh_pfn + btop(off);
1499 1498 } else if (cookie_is_pmem(cp)) {
1500 1499 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
1501 1500 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
1502 1501 dhp->dh_roff < ptob(pcp->dp_npages));
1503 1502 pfnum = page_pptonum(
1504 1503 pcp->dp_pparray[btop(off + dhp->dh_roff)]);
1505 1504 } else {
1506 1505 ASSERT(dhp->dh_roff < cp->size);
1507 1506 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
1508 1507 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
1509 1508 ASSERT((dhp->dh_cvaddr + off) <=
1510 1509 (cp->cvaddr + cp->size));
1511 1510 ASSERT((dhp->dh_cvaddr + off + PAGESIZE) <=
1512 1511 (cp->cvaddr + cp->size));
1513 1512
1514 1513 switch (cp->type) {
1515 1514 case UMEM_LOCKED :
1516 1515 if (cp->pparray != NULL) {
1517 1516 ASSERT((dhp->dh_roff &
1518 1517 PAGEOFFSET) == 0);
1519 1518 pfnum = page_pptonum(
1520 1519 cp->pparray[btop(off +
1521 1520 dhp->dh_roff)]);
1522 1521 } else {
1523 1522 pfnum = hat_getpfnum(
1524 1523 ((proc_t *)cp->procp)->p_as->a_hat,
1525 1524 cp->cvaddr + off);
1526 1525 }
1527 1526 break;
1528 1527 case UMEM_TRASH :
1529 1528 pfnum = page_pptonum(trashpp);
1530 1529 /*
1531 1530 * We should set hat_flags to HAT_NOFAULT also
1532 1531 * However, not all hat layers implement this
1533 1532 */
1534 1533 break;
1535 1534 case KMEM_PAGEABLE:
1536 1535 case KMEM_NON_PAGEABLE:
1537 1536 pfnum = hat_getpfnum(kas.a_hat,
1538 1537 dhp->dh_cvaddr + off);
1539 1538 break;
1540 1539 default :
1541 1540 pfnum = PFN_INVALID;
1542 1541 break;
1543 1542 }
1544 1543 }
1545 1544 prot |= dhp->dh_hat_attr;
1546 1545 }
1547 1546 if (pfnum == PFN_INVALID) {
1548 1547 return (FC_MAKE_ERR(EFAULT));
1549 1548 }
1550 1549 /* prot should already be OR'ed in with hat_attributes if needed */
1551 1550
1552 1551 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE_CK1,
1553 1552 "segdev_faultpage: pfnum=%lx memory=%x prot=%x flags=%x",
1554 1553 pfnum, pf_is_memory(pfnum), prot, hat_flags);
1555 1554 DEBUGF(9, (CE_CONT, "segdev_faultpage: pfnum %lx memory %x "
1556 1555 "prot %x flags %x\n", pfnum, pf_is_memory(pfnum), prot, hat_flags));
1557 1556
1558 1557 if (pf_is_memory(pfnum) || (dhp != NULL)) {
1559 1558 /*
1560 1559 * It's not _really_ required here to pass sdp->hat_flags
1561 1560 * to hat_devload even though we do it.
1562 1561 * This is because hat figures it out DEVMEM mappings
1563 1562 * are non-consistent, anyway.
1564 1563 */
1565 1564 hat_devload(hat, addr, PAGESIZE, pfnum,
1566 1565 prot, hat_flags | sdp->hat_flags);
1567 1566 return (0);
1568 1567 }
1569 1568
1570 1569 /*
1571 1570 * Fall through to the case where devmap is not used and need to call
1572 1571 * up the device tree to set up the mapping
1573 1572 */
1574 1573
1575 1574 dip = VTOS(VTOCVP(sdp->vp))->s_dip;
1576 1575 ASSERT(dip);
1577 1576
1578 1577 /*
1579 1578 * When calling ddi_map_fault, we do not OR in sdp->hat_attr
1580 1579 * This is because this calls drivers which may not expect
1581 1580 * prot to have any other values than PROT_ALL
1582 1581 * The root nexus driver has a hack to peek into the segment
1583 1582 * structure and then OR in sdp->hat_attr.
1584 1583 * XX In case the bus_ops interfaces are ever revisited
1585 1584 * we need to fix this. prot should include other hat attributes
1586 1585 */
1587 1586 if (ddi_map_fault(dip, hat, seg, addr, NULL, pfnum, prot & PROT_ALL,
1588 1587 (uint_t)(type == F_SOFTLOCK)) != DDI_SUCCESS) {
1589 1588 return (FC_MAKE_ERR(EFAULT));
1590 1589 }
1591 1590 return (0);
1592 1591 }
1593 1592
1594 1593 static faultcode_t
1595 1594 segdev_fault(
1596 1595 struct hat *hat, /* the hat */
1597 1596 struct seg *seg, /* the seg_dev of interest */
1598 1597 caddr_t addr, /* the address of the fault */
1599 1598 size_t len, /* the length of the range */
1600 1599 enum fault_type type, /* type of fault */
1601 1600 enum seg_rw rw) /* type of access at fault */
1602 1601 {
1603 1602 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1604 1603 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1605 1604 devmap_handle_t *dhp;
1606 1605 struct devmap_softlock *slock = NULL;
1607 1606 ulong_t slpage = 0;
1608 1607 ulong_t off;
1609 1608 caddr_t maddr = addr;
1610 1609 int err;
1611 1610 int err_is_faultcode = 0;
1612 1611
1613 1612 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1614 1613 "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1615 1614 (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1616 1615 DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1617 1616 "addr %p len %lx type %x\n",
1618 1617 (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1619 1618
1620 1619 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1621 1620
1622 1621 /* Handle non-devmap case */
1623 1622 if (dhp_head == NULL)
1624 1623 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1625 1624
1626 1625 /* Find devmap handle */
1627 1626 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1628 1627 return (FC_NOMAP);
1629 1628
1630 1629 /*
1631 1630 * The seg_dev driver does not implement copy-on-write,
1632 1631 * and always loads translations with maximal allowed permissions
1633 1632 * but we got an fault trying to access the device.
1634 1633 * Servicing the fault is not going to result in any better result
1635 1634 * RFE: If we want devmap_access callbacks to be involved in F_PROT
1636 1635 * faults, then the code below is written for that
1637 1636 * Pending resolution of the following:
1638 1637 * - determine if the F_INVAL/F_SOFTLOCK syncing
1639 1638 * is needed for F_PROT also or not. The code below assumes it does
1640 1639 * - If driver sees F_PROT and calls devmap_load with same type,
1641 1640 * then segdev_faultpages will fail with FC_PROT anyway, need to
1642 1641 * change that so calls from devmap_load to segdev_faultpages for
1643 1642 * F_PROT type are retagged to F_INVAL.
1644 1643 * RFE: Today we dont have drivers that use devmap and want to handle
1645 1644 * F_PROT calls. The code in segdev_fault* is written to allow
1646 1645 * this case but is not tested. A driver that needs this capability
1647 1646 * should be able to remove the short-circuit case; resolve the
1648 1647 * above issues and "should" work.
1649 1648 */
1650 1649 if (type == F_PROT) {
1651 1650 return (FC_PROT);
1652 1651 }
1653 1652
1654 1653 /*
1655 1654 * Loop through dhp list calling devmap_access or segdev_faultpages for
1656 1655 * each devmap handle.
1657 1656 * drivers which implement devmap_access can interpose on faults and do
1658 1657 * device-appropriate special actions before calling devmap_load.
1659 1658 */
1660 1659
1661 1660 /*
1662 1661 * Unfortunately, this simple loop has turned out to expose a variety
1663 1662 * of complex problems which results in the following convoluted code.
1664 1663 *
1665 1664 * First, a desire to handle a serialization of F_SOFTLOCK calls
1666 1665 * to the driver within the framework.
1667 1666 * This results in a dh_softlock structure that is on a per device
1668 1667 * (or device instance) basis and serializes devmap_access calls.
1669 1668 * Ideally we would need to do this for underlying
1670 1669 * memory/device regions that are being faulted on
1671 1670 * but that is hard to identify and with REMAP, harder
1672 1671 * Second, a desire to serialize F_INVAL(and F_PROT) calls w.r.t.
1673 1672 * to F_SOFTLOCK calls to the driver.
1674 1673 * These serializations are to simplify the driver programmer model.
1675 1674 * To support these two features, the code first goes through the
1676 1675 * devmap handles and counts the pages (slpage) that are covered
1677 1676 * by devmap_access callbacks.
1678 1677 * This part ends with a devmap_softlock_enter call
1679 1678 * which allows only one F_SOFTLOCK active on a device instance,
1680 1679 * but multiple F_INVAL/F_PROTs can be active except when a
1681 1680 * F_SOFTLOCK is active
1682 1681 *
1683 1682 * Next, we dont short-circuit the fault code upfront to call
1684 1683 * segdev_softunlock for F_SOFTUNLOCK, because we must use
1685 1684 * the same length when we softlock and softunlock.
1686 1685 *
1687 1686 * -Hat layers may not support softunlocking lengths less than the
1688 1687 * original length when there is large page support.
1689 1688 * -kpmem locking is dependent on keeping the lengths same.
1690 1689 * -if drivers handled F_SOFTLOCK, they probably also expect to
1691 1690 * see an F_SOFTUNLOCK of the same length
1692 1691 * Hence, if extending lengths during softlock,
1693 1692 * softunlock has to make the same adjustments and goes through
1694 1693 * the same loop calling segdev_faultpages/segdev_softunlock
1695 1694 * But some of the synchronization and error handling is different
1696 1695 */
1697 1696
1698 1697 if (type != F_SOFTUNLOCK) {
1699 1698 devmap_handle_t *dhpp = dhp;
1700 1699 size_t slen = len;
1701 1700
1702 1701 /*
1703 1702 * Calculate count of pages that are :
1704 1703 * a) within the (potentially extended) fault region
1705 1704 * b) AND covered by devmap handle with devmap_access
1706 1705 */
1707 1706 off = (ulong_t)(addr - dhpp->dh_uvaddr);
1708 1707 while (slen != 0) {
1709 1708 size_t mlen;
1710 1709
1711 1710 /*
1712 1711 * Softlocking on a region that allows remap is
1713 1712 * unsupported due to unresolved locking issues
1714 1713 * XXX: unclear what these are?
1715 1714 * One potential is that if there is a pending
1716 1715 * softlock, then a remap should not be allowed
1717 1716 * until the unlock is done. This is easily
1718 1717 * fixed by returning error in devmap*remap on
1719 1718 * checking the dh->dh_softlock->softlocked value
1720 1719 */
1721 1720 if ((type == F_SOFTLOCK) &&
1722 1721 (dhpp->dh_flags & DEVMAP_ALLOW_REMAP)) {
1723 1722 return (FC_NOSUPPORT);
1724 1723 }
1725 1724
1726 1725 mlen = MIN(slen, (dhpp->dh_len - off));
1727 1726 if (dhpp->dh_callbackops.devmap_access) {
1728 1727 size_t llen;
1729 1728 caddr_t laddr;
1730 1729 /*
1731 1730 * use extended length for large page mappings
1732 1731 */
1733 1732 HOLD_DHP_LOCK(dhpp);
1734 1733 if ((sdp->pageprot == 0) &&
1735 1734 (dhpp->dh_flags & DEVMAP_FLAG_LARGE)) {
1736 1735 devmap_get_large_pgsize(dhpp,
1737 1736 mlen, maddr, &llen, &laddr);
1738 1737 } else {
1739 1738 llen = mlen;
1740 1739 }
1741 1740 RELE_DHP_LOCK(dhpp);
1742 1741
1743 1742 slpage += btopr(llen);
1744 1743 slock = dhpp->dh_softlock;
1745 1744 }
1746 1745 maddr += mlen;
1747 1746 ASSERT(slen >= mlen);
1748 1747 slen -= mlen;
1749 1748 dhpp = dhpp->dh_next;
1750 1749 off = 0;
1751 1750 }
1752 1751 /*
1753 1752 * synchonize with other faulting threads and wait till safe
1754 1753 * devmap_softlock_enter might return due to signal in cv_wait
1755 1754 *
1756 1755 * devmap_softlock_enter has to be called outside of while loop
1757 1756 * to prevent a deadlock if len spans over multiple dhps.
1758 1757 * dh_softlock is based on device instance and if multiple dhps
1759 1758 * use the same device instance, the second dhp's LOCK call
1760 1759 * will hang waiting on the first to complete.
1761 1760 * devmap_setup verifies that slocks in a dhp_chain are same.
1762 1761 * RFE: this deadlock only hold true for F_SOFTLOCK. For
1763 1762 * F_INVAL/F_PROT, since we now allow multiple in parallel,
1764 1763 * we could have done the softlock_enter inside the loop
1765 1764 * and supported multi-dhp mappings with dissimilar devices
1766 1765 */
1767 1766 if (err = devmap_softlock_enter(slock, slpage, type))
1768 1767 return (FC_MAKE_ERR(err));
1769 1768 }
1770 1769
1771 1770 /* reset 'maddr' to the start addr of the range of fault. */
1772 1771 maddr = addr;
1773 1772
1774 1773 /* calculate the offset corresponds to 'addr' in the first dhp. */
1775 1774 off = (ulong_t)(addr - dhp->dh_uvaddr);
1776 1775
1777 1776 /*
1778 1777 * The fault length may span over multiple dhps.
1779 1778 * Loop until the total length is satisfied.
1780 1779 */
1781 1780 while (len != 0) {
1782 1781 size_t llen;
1783 1782 size_t mlen;
1784 1783 caddr_t laddr;
1785 1784
1786 1785 /*
1787 1786 * mlen is the smaller of 'len' and the length
1788 1787 * from addr to the end of mapping defined by dhp.
1789 1788 */
1790 1789 mlen = MIN(len, (dhp->dh_len - off));
1791 1790
1792 1791 HOLD_DHP_LOCK(dhp);
1793 1792 /*
1794 1793 * Pass the extended length and address to devmap_access
1795 1794 * if large pagesize is used for loading address translations.
1796 1795 */
1797 1796 if ((sdp->pageprot == 0) &&
1798 1797 (dhp->dh_flags & DEVMAP_FLAG_LARGE)) {
1799 1798 devmap_get_large_pgsize(dhp, mlen, maddr,
1800 1799 &llen, &laddr);
1801 1800 ASSERT(maddr == addr || laddr == maddr);
1802 1801 } else {
1803 1802 llen = mlen;
1804 1803 laddr = maddr;
1805 1804 }
1806 1805
1807 1806 if (dhp->dh_callbackops.devmap_access != NULL) {
1808 1807 offset_t aoff;
1809 1808
1810 1809 aoff = sdp->offset + (offset_t)(laddr - seg->s_base);
1811 1810
1812 1811 /*
1813 1812 * call driver's devmap_access entry point which will
1814 1813 * call devmap_load/contextmgmt to load the translations
1815 1814 *
1816 1815 * We drop the dhp_lock before calling access so
1817 1816 * drivers can call devmap_*_remap within access
1818 1817 */
1819 1818 RELE_DHP_LOCK(dhp);
1820 1819
1821 1820 err = (*dhp->dh_callbackops.devmap_access)(
1822 1821 dhp, (void *)dhp->dh_pvtp, aoff, llen, type, rw);
1823 1822 } else {
1824 1823 /*
1825 1824 * If no devmap_access entry point, then load mappings
1826 1825 * hold dhp_lock across faultpages if REMAP
1827 1826 */
1828 1827 err = segdev_faultpages(hat, seg, laddr, llen,
1829 1828 type, rw, dhp);
1830 1829 err_is_faultcode = 1;
1831 1830 RELE_DHP_LOCK(dhp);
1832 1831 }
1833 1832
1834 1833 if (err) {
1835 1834 if ((type == F_SOFTLOCK) && (maddr > addr)) {
1836 1835 /*
1837 1836 * If not first dhp, use
1838 1837 * segdev_fault(F_SOFTUNLOCK) for prior dhps
1839 1838 * While this is recursion, it is incorrect to
1840 1839 * call just segdev_softunlock
1841 1840 * if we are using either large pages
1842 1841 * or devmap_access. It will be more right
1843 1842 * to go through the same loop as above
1844 1843 * rather than call segdev_softunlock directly
1845 1844 * It will use the right lenghths as well as
1846 1845 * call into the driver devmap_access routines.
1847 1846 */
1848 1847 size_t done = (size_t)(maddr - addr);
1849 1848 (void) segdev_fault(hat, seg, addr, done,
1850 1849 F_SOFTUNLOCK, S_OTHER);
1851 1850 /*
1852 1851 * reduce slpage by number of pages
1853 1852 * released by segdev_softunlock
1854 1853 */
1855 1854 ASSERT(slpage >= btopr(done));
1856 1855 devmap_softlock_exit(slock,
1857 1856 slpage - btopr(done), type);
1858 1857 } else {
1859 1858 devmap_softlock_exit(slock, slpage, type);
1860 1859 }
1861 1860
1862 1861
1863 1862 /*
1864 1863 * Segdev_faultpages() already returns a faultcode,
1865 1864 * hence, result from segdev_faultpages() should be
1866 1865 * returned directly.
1867 1866 */
1868 1867 if (err_is_faultcode)
1869 1868 return (err);
1870 1869 return (FC_MAKE_ERR(err));
1871 1870 }
1872 1871
1873 1872 maddr += mlen;
1874 1873 ASSERT(len >= mlen);
1875 1874 len -= mlen;
1876 1875 dhp = dhp->dh_next;
1877 1876 off = 0;
1878 1877
1879 1878 ASSERT(!dhp || len == 0 || maddr == dhp->dh_uvaddr);
1880 1879 }
1881 1880 /*
1882 1881 * release the softlock count at end of fault
1883 1882 * For F_SOFTLOCk this is done in the later F_SOFTUNLOCK
1884 1883 */
1885 1884 if ((type == F_INVAL) || (type == F_PROT))
1886 1885 devmap_softlock_exit(slock, slpage, type);
1887 1886 return (0);
1888 1887 }
1889 1888
1890 1889 /*
1891 1890 * segdev_faultpages
1892 1891 *
1893 1892 * Used to fault in seg_dev segment pages. Called by segdev_fault or devmap_load
1894 1893 * This routine assumes that the callers makes sure that the fields
1895 1894 * in dhp used below are not changed due to remap during this call.
1896 1895 * Caller does HOLD_DHP_LOCK if neeed
1897 1896 * This routine returns a faultcode_t as a return value for segdev_fault.
1898 1897 */
1899 1898 static faultcode_t
1900 1899 segdev_faultpages(
1901 1900 struct hat *hat, /* the hat */
1902 1901 struct seg *seg, /* the seg_dev of interest */
1903 1902 caddr_t addr, /* the address of the fault */
1904 1903 size_t len, /* the length of the range */
1905 1904 enum fault_type type, /* type of fault */
1906 1905 enum seg_rw rw, /* type of access at fault */
1907 1906 devmap_handle_t *dhp) /* devmap handle */
1908 1907 {
1909 1908 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1910 1909 register caddr_t a;
1911 1910 struct vpage *vpage;
1912 1911 struct ddi_umem_cookie *kpmem_cookie = NULL;
1913 1912 int err;
1914 1913
1915 1914 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGES,
1916 1915 "segdev_faultpages: dhp=%p seg=%p addr=%p len=%lx",
1917 1916 (void *)dhp, (void *)seg, (void *)addr, len);
1918 1917 DEBUGF(5, (CE_CONT, "segdev_faultpages: "
1919 1918 "dhp %p seg %p addr %p len %lx\n",
1920 1919 (void *)dhp, (void *)seg, (void *)addr, len));
1921 1920
1922 1921 /*
1923 1922 * The seg_dev driver does not implement copy-on-write,
1924 1923 * and always loads translations with maximal allowed permissions
1925 1924 * but we got an fault trying to access the device.
1926 1925 * Servicing the fault is not going to result in any better result
1927 1926 * XXX: If we want to allow devmap_access to handle F_PROT calls,
1928 1927 * This code should be removed and let the normal fault handling
1929 1928 * take care of finding the error
1930 1929 */
1931 1930 if (type == F_PROT) {
1932 1931 return (FC_PROT);
1933 1932 }
1934 1933
1935 1934 if (type == F_SOFTUNLOCK) {
1936 1935 segdev_softunlock(hat, seg, addr, len, rw);
1937 1936 return (0);
1938 1937 }
1939 1938
1940 1939 /*
1941 1940 * For kernel pageable memory, fault/lock segkp pages
1942 1941 * We hold this until the completion of this
1943 1942 * fault (INVAL/PROT) or till unlock (SOFTLOCK).
1944 1943 */
1945 1944 if ((dhp != NULL) && dhp_is_kpmem(dhp)) {
1946 1945 kpmem_cookie = (struct ddi_umem_cookie *)dhp->dh_cookie;
1947 1946 if (err = acquire_kpmem_lock(kpmem_cookie, btopr(len)))
1948 1947 return (err);
1949 1948 }
1950 1949
1951 1950 /*
1952 1951 * If we have the same protections for the entire segment,
1953 1952 * insure that the access being attempted is legitimate.
1954 1953 */
1955 1954 rw_enter(&sdp->lock, RW_READER);
1956 1955 if (sdp->pageprot == 0) {
1957 1956 uint_t protchk;
1958 1957
1959 1958 switch (rw) {
1960 1959 case S_READ:
1961 1960 protchk = PROT_READ;
1962 1961 break;
1963 1962 case S_WRITE:
1964 1963 protchk = PROT_WRITE;
1965 1964 break;
1966 1965 case S_EXEC:
1967 1966 protchk = PROT_EXEC;
1968 1967 break;
1969 1968 case S_OTHER:
1970 1969 default:
1971 1970 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1972 1971 break;
1973 1972 }
1974 1973
1975 1974 if ((sdp->prot & protchk) == 0) {
1976 1975 rw_exit(&sdp->lock);
1977 1976 /* undo kpmem locking */
1978 1977 if (kpmem_cookie != NULL) {
1979 1978 release_kpmem_lock(kpmem_cookie, btopr(len));
1980 1979 }
1981 1980 return (FC_PROT); /* illegal access type */
1982 1981 }
1983 1982 }
1984 1983
1985 1984 /*
1986 1985 * we do a single hat_devload for the range if
1987 1986 * - devmap framework (dhp is not NULL),
1988 1987 * - pageprot == 0, i.e., no per-page protection set and
1989 1988 * - is device pages, irrespective of whether we are using large pages
1990 1989 */
1991 1990 if ((sdp->pageprot == 0) && (dhp != NULL) && dhp_is_devmem(dhp)) {
1992 1991 pfn_t pfnum;
1993 1992 uint_t hat_flags;
1994 1993
1995 1994 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1996 1995 rw_exit(&sdp->lock);
1997 1996 return (FC_NOMAP);
1998 1997 }
1999 1998
2000 1999 if (type == F_SOFTLOCK) {
2001 2000 mutex_enter(&freemem_lock);
2002 2001 sdp->softlockcnt += btopr(len);
2003 2002 mutex_exit(&freemem_lock);
2004 2003 }
2005 2004
2006 2005 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
2007 2006 pfnum = dhp->dh_pfn + btop((uintptr_t)(addr - dhp->dh_uvaddr));
2008 2007 ASSERT(!pf_is_memory(pfnum));
2009 2008
2010 2009 hat_devload(hat, addr, len, pfnum, sdp->prot | dhp->dh_hat_attr,
2011 2010 hat_flags | sdp->hat_flags);
2012 2011 rw_exit(&sdp->lock);
2013 2012 return (0);
2014 2013 }
2015 2014
2016 2015 /* Handle cases where we have to loop through fault handling per-page */
2017 2016
2018 2017 if (sdp->vpage == NULL)
2019 2018 vpage = NULL;
2020 2019 else
2021 2020 vpage = &sdp->vpage[seg_page(seg, addr)];
2022 2021
2023 2022 /* loop over the address range handling each fault */
2024 2023 for (a = addr; a < addr + len; a += PAGESIZE) {
2025 2024 if (err = segdev_faultpage(hat, seg, a, vpage, type, rw, dhp)) {
2026 2025 break;
2027 2026 }
2028 2027 if (vpage != NULL)
2029 2028 vpage++;
2030 2029 }
2031 2030 rw_exit(&sdp->lock);
2032 2031 if (err && (type == F_SOFTLOCK)) { /* error handling for F_SOFTLOCK */
2033 2032 size_t done = (size_t)(a - addr); /* pages fault successfully */
2034 2033 if (done > 0) {
2035 2034 /* use softunlock for those pages */
2036 2035 segdev_softunlock(hat, seg, addr, done, S_OTHER);
2037 2036 }
2038 2037 if (kpmem_cookie != NULL) {
2039 2038 /* release kpmem lock for rest of pages */
2040 2039 ASSERT(len >= done);
2041 2040 release_kpmem_lock(kpmem_cookie, btopr(len - done));
2042 2041 }
2043 2042 } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
2044 2043 /* for non-SOFTLOCK cases, release kpmem */
2045 2044 release_kpmem_lock(kpmem_cookie, btopr(len));
2046 2045 }
2047 2046 return (err);
2048 2047 }
2049 2048
2050 2049 /*
2051 2050 * Asynchronous page fault. We simply do nothing since this
2052 2051 * entry point is not supposed to load up the translation.
2053 2052 */
2054 2053 /*ARGSUSED*/
2055 2054 static faultcode_t
2056 2055 segdev_faulta(struct seg *seg, caddr_t addr)
2057 2056 {
2058 2057 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2059 2058 "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2060 2059 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2061 2060
2062 2061 return (0);
2063 2062 }
2064 2063
2065 2064 static int
2066 2065 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2067 2066 {
2068 2067 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2069 2068 register devmap_handle_t *dhp;
2070 2069 register struct vpage *vp, *evp;
2071 2070 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2072 2071 ulong_t off;
2073 2072 size_t mlen, sz;
2074 2073
2075 2074 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2076 2075 "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2077 2076 (void *)seg, (void *)addr, len, prot);
2078 2077 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2079 2078
2080 2079 if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2081 2080 /*
2082 2081 * Fail the setprot if pages are SOFTLOCKed through this
2083 2082 * mapping.
2084 2083 * Softlockcnt is protected from change by the as read lock.
2085 2084 */
2086 2085 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2087 2086 "segdev_setprot:error softlockcnt=%lx", sz);
2088 2087 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
2089 2088 return (EAGAIN);
2090 2089 }
2091 2090
2092 2091 if (dhp_head != NULL) {
2093 2092 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
2094 2093 return (EINVAL);
2095 2094
2096 2095 /*
2097 2096 * check if violate maxprot.
2098 2097 */
2099 2098 off = (ulong_t)(addr - dhp->dh_uvaddr);
2100 2099 mlen = len;
2101 2100 while (dhp) {
2102 2101 if ((dhp->dh_maxprot & prot) != prot)
2103 2102 return (EACCES); /* violated maxprot */
2104 2103
2105 2104 if (mlen > (dhp->dh_len - off)) {
2106 2105 mlen -= dhp->dh_len - off;
2107 2106 dhp = dhp->dh_next;
2108 2107 off = 0;
2109 2108 } else
2110 2109 break;
2111 2110 }
2112 2111 } else {
2113 2112 if ((sdp->maxprot & prot) != prot)
2114 2113 return (EACCES);
2115 2114 }
2116 2115
2117 2116 rw_enter(&sdp->lock, RW_WRITER);
2118 2117 if (addr == seg->s_base && len == seg->s_size && sdp->pageprot == 0) {
2119 2118 if (sdp->prot == prot) {
2120 2119 rw_exit(&sdp->lock);
2121 2120 return (0); /* all done */
2122 2121 }
2123 2122 sdp->prot = (uchar_t)prot;
2124 2123 } else {
2125 2124 sdp->pageprot = 1;
2126 2125 if (sdp->vpage == NULL) {
2127 2126 /*
2128 2127 * First time through setting per page permissions,
2129 2128 * initialize all the vpage structures to prot
2130 2129 */
2131 2130 sdp->vpage = kmem_zalloc(vpgtob(seg_pages(seg)),
2132 2131 KM_SLEEP);
2133 2132 evp = &sdp->vpage[seg_pages(seg)];
2134 2133 for (vp = sdp->vpage; vp < evp; vp++)
2135 2134 VPP_SETPROT(vp, sdp->prot);
2136 2135 }
2137 2136 /*
2138 2137 * Now go change the needed vpages protections.
2139 2138 */
2140 2139 evp = &sdp->vpage[seg_page(seg, addr + len)];
2141 2140 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++)
2142 2141 VPP_SETPROT(vp, prot);
2143 2142 }
2144 2143 rw_exit(&sdp->lock);
2145 2144
2146 2145 if (dhp_head != NULL) {
2147 2146 devmap_handle_t *tdhp;
2148 2147 /*
2149 2148 * If large page size was used in hat_devload(),
2150 2149 * the same page size must be used in hat_unload().
2151 2150 */
2152 2151 dhp = tdhp = devmap_find_handle(dhp_head, addr);
2153 2152 while (tdhp != NULL) {
2154 2153 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
2155 2154 break;
2156 2155 }
2157 2156 tdhp = tdhp->dh_next;
2158 2157 }
2159 2158 if (tdhp) {
2160 2159 size_t slen = len;
2161 2160 size_t mlen;
2162 2161 size_t soff;
2163 2162
2164 2163 soff = (ulong_t)(addr - dhp->dh_uvaddr);
2165 2164 while (slen != 0) {
2166 2165 mlen = MIN(slen, (dhp->dh_len - soff));
2167 2166 hat_unload(seg->s_as->a_hat, dhp->dh_uvaddr,
2168 2167 dhp->dh_len, HAT_UNLOAD);
2169 2168 dhp = dhp->dh_next;
2170 2169 ASSERT(slen >= mlen);
2171 2170 slen -= mlen;
2172 2171 soff = 0;
2173 2172 }
2174 2173 return (0);
2175 2174 }
2176 2175 }
2177 2176
2178 2177 if ((prot & ~PROT_USER) == PROT_NONE) {
2179 2178 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
2180 2179 } else {
2181 2180 /*
2182 2181 * RFE: the segment should keep track of all attributes
2183 2182 * allowing us to remove the deprecated hat_chgprot
2184 2183 * and use hat_chgattr.
2185 2184 */
2186 2185 hat_chgprot(seg->s_as->a_hat, addr, len, prot);
2187 2186 }
2188 2187
2189 2188 return (0);
2190 2189 }
2191 2190
2192 2191 static int
2193 2192 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2194 2193 {
2195 2194 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2196 2195 struct vpage *vp, *evp;
2197 2196
2198 2197 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2199 2198 "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2200 2199 (void *)seg, (void *)addr, len, prot);
2201 2200 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2202 2201
2203 2202 /*
2204 2203 * If segment protection can be used, simply check against them
2205 2204 */
2206 2205 rw_enter(&sdp->lock, RW_READER);
2207 2206 if (sdp->pageprot == 0) {
2208 2207 register int err;
2209 2208
2210 2209 err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2211 2210 rw_exit(&sdp->lock);
2212 2211 return (err);
2213 2212 }
2214 2213
2215 2214 /*
2216 2215 * Have to check down to the vpage level
2217 2216 */
2218 2217 evp = &sdp->vpage[seg_page(seg, addr + len)];
2219 2218 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
2220 2219 if ((VPP_PROT(vp) & prot) != prot) {
2221 2220 rw_exit(&sdp->lock);
2222 2221 return (EACCES);
2223 2222 }
2224 2223 }
2225 2224 rw_exit(&sdp->lock);
2226 2225 return (0);
2227 2226 }
2228 2227
2229 2228 static int
2230 2229 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2231 2230 {
2232 2231 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2233 2232 size_t pgno;
2234 2233
2235 2234 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2236 2235 "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2237 2236 (void *)seg, (void *)addr, len, (void *)protv);
2238 2237 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2239 2238
2240 2239 pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2241 2240 if (pgno != 0) {
2242 2241 rw_enter(&sdp->lock, RW_READER);
2243 2242 if (sdp->pageprot == 0) {
2244 2243 do {
2245 2244 protv[--pgno] = sdp->prot;
2246 2245 } while (pgno != 0);
2247 2246 } else {
2248 2247 size_t pgoff = seg_page(seg, addr);
2249 2248
2250 2249 do {
2251 2250 pgno--;
2252 2251 protv[pgno] =
2253 2252 VPP_PROT(&sdp->vpage[pgno + pgoff]);
2254 2253 } while (pgno != 0);
2255 2254 }
2256 2255 rw_exit(&sdp->lock);
2257 2256 }
2258 2257 return (0);
2259 2258 }
2260 2259
2261 2260 static u_offset_t
2262 2261 segdev_getoffset(register struct seg *seg, caddr_t addr)
2263 2262 {
2264 2263 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2265 2264
2266 2265 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2267 2266 "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2268 2267
2269 2268 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2270 2269
2271 2270 return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2272 2271 }
2273 2272
2274 2273 /*ARGSUSED*/
2275 2274 static int
2276 2275 segdev_gettype(register struct seg *seg, caddr_t addr)
2277 2276 {
2278 2277 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2279 2278
2280 2279 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2281 2280 "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2282 2281
2283 2282 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2284 2283
2285 2284 return (sdp->type);
2286 2285 }
2287 2286
2288 2287
2289 2288 /*ARGSUSED*/
2290 2289 static int
2291 2290 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2292 2291 {
2293 2292 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2294 2293
2295 2294 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2296 2295 "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2297 2296
2298 2297 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2299 2298
2300 2299 /*
2301 2300 * Note that this vp is the common_vp of the device, where the
2302 2301 * pages are hung ..
2303 2302 */
2304 2303 *vpp = VTOCVP(sdp->vp);
2305 2304
2306 2305 return (0);
2307 2306 }
2308 2307
2309 2308 static void
2310 2309 segdev_badop(void)
2311 2310 {
2312 2311 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
2313 2312 "segdev_badop:start");
2314 2313 panic("segdev_badop");
2315 2314 /*NOTREACHED*/
2316 2315 }
2317 2316
2318 2317 /*
2319 2318 * segdev pages are not in the cache, and thus can't really be controlled.
2320 2319 * Hence, syncs are simply always successful.
2321 2320 */
2322 2321 /*ARGSUSED*/
2323 2322 static int
2324 2323 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2325 2324 {
2326 2325 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2327 2326
2328 2327 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2329 2328
2330 2329 return (0);
2331 2330 }
2332 2331
2333 2332 /*
2334 2333 * segdev pages are always "in core".
2335 2334 */
2336 2335 /*ARGSUSED*/
2337 2336 static size_t
2338 2337 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2339 2338 {
2340 2339 size_t v = 0;
2341 2340
2342 2341 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2343 2342
2344 2343 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2345 2344
2346 2345 for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2347 2346 v += PAGESIZE)
2348 2347 *vec++ = 1;
2349 2348 return (v);
2350 2349 }
2351 2350
2352 2351 /*
2353 2352 * segdev pages are not in the cache, and thus can't really be controlled.
2354 2353 * Hence, locks are simply always successful.
2355 2354 */
2356 2355 /*ARGSUSED*/
2357 2356 static int
2358 2357 segdev_lockop(struct seg *seg, caddr_t addr,
2359 2358 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2360 2359 {
2361 2360 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2362 2361
2363 2362 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2364 2363
2365 2364 return (0);
2366 2365 }
2367 2366
2368 2367 /*
2369 2368 * segdev pages are not in the cache, and thus can't really be controlled.
2370 2369 * Hence, advise is simply always successful.
2371 2370 */
2372 2371 /*ARGSUSED*/
2373 2372 static int
2374 2373 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2375 2374 {
2376 2375 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2377 2376
2378 2377 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2379 2378
2380 2379 return (0);
2381 2380 }
2382 2381
2383 2382 /*
2384 2383 * segdev pages are not dumped, so we just return
2385 2384 */
2386 2385 /*ARGSUSED*/
2387 2386 static void
2388 2387 segdev_dump(struct seg *seg)
2389 2388 {}
2390 2389
2391 2390 /*
2392 2391 * ddi_segmap_setup: Used by drivers who wish specify mapping attributes
2393 2392 * for a segment. Called from a drivers segmap(9E)
2394 2393 * routine.
2395 2394 */
2396 2395 /*ARGSUSED*/
2397 2396 int
2398 2397 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2399 2398 off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2400 2399 ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2401 2400 {
2402 2401 struct segdev_crargs dev_a;
2403 2402 int (*mapfunc)(dev_t dev, off_t off, int prot);
2404 2403 uint_t hat_attr;
2405 2404 pfn_t pfn;
2406 2405 int error, i;
2407 2406
2408 2407 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
2409 2408 "ddi_segmap_setup:start");
2410 2409
2411 2410 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2412 2411 return (ENODEV);
2413 2412
2414 2413 /*
2415 2414 * Character devices that support the d_mmap
2416 2415 * interface can only be mmap'ed shared.
2417 2416 */
2418 2417 if ((flags & MAP_TYPE) != MAP_SHARED)
2419 2418 return (EINVAL);
2420 2419
2421 2420 /*
2422 2421 * Check that this region is indeed mappable on this platform.
2423 2422 * Use the mapping function.
2424 2423 */
2425 2424 if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
2426 2425 return (ENXIO);
2427 2426
2428 2427 /*
2429 2428 * Check to ensure that the entire range is
2430 2429 * legal and we are not trying to map in
2431 2430 * more than the device will let us.
2432 2431 */
2433 2432 for (i = 0; i < len; i += PAGESIZE) {
2434 2433 if (i == 0) {
2435 2434 /*
2436 2435 * Save the pfn at offset here. This pfn will be
2437 2436 * used later to get user address.
2438 2437 */
2439 2438 if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
2440 2439 maxprot)) == PFN_INVALID)
2441 2440 return (ENXIO);
2442 2441 } else {
2443 2442 if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
2444 2443 PFN_INVALID)
2445 2444 return (ENXIO);
2446 2445 }
2447 2446 }
2448 2447
2449 2448 as_rangelock(as);
2450 2449 /* Pick an address w/o worrying about any vac alignment constraints. */
2451 2450 error = choose_addr(as, addrp, len, ptob(pfn), ADDR_NOVACALIGN, flags);
2452 2451 if (error != 0) {
2453 2452 as_rangeunlock(as);
2454 2453 return (error);
2455 2454 }
2456 2455
2457 2456 dev_a.mapfunc = mapfunc;
2458 2457 dev_a.dev = dev;
2459 2458 dev_a.offset = (offset_t)offset;
2460 2459 dev_a.type = flags & MAP_TYPE;
2461 2460 dev_a.prot = (uchar_t)prot;
2462 2461 dev_a.maxprot = (uchar_t)maxprot;
2463 2462 dev_a.hat_attr = hat_attr;
2464 2463 dev_a.hat_flags = 0;
2465 2464 dev_a.devmap_data = NULL;
2466 2465
2467 2466 error = as_map(as, *addrp, len, segdev_create, &dev_a);
2468 2467 as_rangeunlock(as);
2469 2468 return (error);
2470 2469
2471 2470 }
2472 2471
2473 2472 /*ARGSUSED*/
2474 2473 static int
2475 2474 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2476 2475 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2477 2476 {
2478 2477 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2479 2478 "segdev_pagelock:start");
2480 2479 return (ENOTSUP);
2481 2480 }
2482 2481
2483 2482 /*ARGSUSED*/
2484 2483 static int
2485 2484 segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len,
2486 2485 uint_t szc)
2487 2486 {
2488 2487 return (ENOTSUP);
2489 2488 }
2490 2489
2491 2490 /*
2492 2491 * devmap_device: Used by devmap framework to establish mapping
2493 2492 * called by devmap_seup(9F) during map setup time.
2494 2493 */
2495 2494 /*ARGSUSED*/
2496 2495 static int
2497 2496 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2498 2497 offset_t off, size_t len, uint_t flags)
2499 2498 {
2500 2499 devmap_handle_t *rdhp, *maxdhp;
2501 2500 struct segdev_crargs dev_a;
2502 2501 int err;
2503 2502 uint_t maxprot = PROT_ALL;
2504 2503 offset_t offset = 0;
2505 2504 pfn_t pfn;
2506 2505 struct devmap_pmem_cookie *pcp;
2507 2506
2508 2507 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
2509 2508 "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
2510 2509 (void *)dhp, (void *)addr, off, len);
2511 2510
2512 2511 DEBUGF(2, (CE_CONT, "devmap_device: dhp %p addr %p off %llx len %lx\n",
2513 2512 (void *)dhp, (void *)addr, off, len));
2514 2513
2515 2514 as_rangelock(as);
2516 2515 if ((flags & MAP_FIXED) == 0) {
2517 2516 offset_t aligned_off;
2518 2517
2519 2518 rdhp = maxdhp = dhp;
2520 2519 while (rdhp != NULL) {
2521 2520 maxdhp = (maxdhp->dh_len > rdhp->dh_len) ?
2522 2521 maxdhp : rdhp;
2523 2522 rdhp = rdhp->dh_next;
2524 2523 maxprot |= dhp->dh_maxprot;
2525 2524 }
2526 2525 offset = maxdhp->dh_uoff - dhp->dh_uoff;
2527 2526
2528 2527 /*
2529 2528 * Use the dhp that has the
2530 2529 * largest len to get user address.
2531 2530 */
2532 2531 /*
2533 2532 * If MAPPING_INVALID, cannot use dh_pfn/dh_cvaddr,
2534 2533 * use 0 which is as good as any other.
2535 2534 */
2536 2535 if (maxdhp->dh_flags & DEVMAP_MAPPING_INVALID) {
2537 2536 aligned_off = (offset_t)0;
2538 2537 } else if (dhp_is_devmem(maxdhp)) {
2539 2538 aligned_off = (offset_t)ptob(maxdhp->dh_pfn) - offset;
2540 2539 } else if (dhp_is_pmem(maxdhp)) {
2541 2540 pcp = (struct devmap_pmem_cookie *)maxdhp->dh_pcookie;
2542 2541 pfn = page_pptonum(
2543 2542 pcp->dp_pparray[btop(maxdhp->dh_roff)]);
2544 2543 aligned_off = (offset_t)ptob(pfn) - offset;
2545 2544 } else {
2546 2545 aligned_off = (offset_t)(uintptr_t)maxdhp->dh_cvaddr -
2547 2546 offset;
2548 2547 }
2549 2548
2550 2549 /*
2551 2550 * Pick an address aligned to dh_cookie.
2552 2551 * for kernel memory/user memory, cookie is cvaddr.
2553 2552 * for device memory, cookie is physical address.
2554 2553 */
2555 2554 map_addr(addr, len, aligned_off, 1, flags);
2556 2555 if (*addr == NULL) {
2557 2556 as_rangeunlock(as);
2558 2557 return (ENOMEM);
2559 2558 }
2560 2559 } else {
2561 2560 /*
2562 2561 * User-specified address; blow away any previous mappings.
2563 2562 */
2564 2563 (void) as_unmap(as, *addr, len);
2565 2564 }
2566 2565
2567 2566 dev_a.mapfunc = NULL;
2568 2567 dev_a.dev = dhp->dh_dev;
2569 2568 dev_a.type = flags & MAP_TYPE;
2570 2569 dev_a.offset = off;
2571 2570 /*
2572 2571 * sdp->maxprot has the least restrict protection of all dhps.
2573 2572 */
2574 2573 dev_a.maxprot = maxprot;
2575 2574 dev_a.prot = dhp->dh_prot;
2576 2575 /*
2577 2576 * devmap uses dhp->dh_hat_attr for hat.
2578 2577 */
2579 2578 dev_a.hat_flags = 0;
2580 2579 dev_a.hat_attr = 0;
2581 2580 dev_a.devmap_data = (void *)dhp;
2582 2581
2583 2582 err = as_map(as, *addr, len, segdev_create, &dev_a);
2584 2583 as_rangeunlock(as);
2585 2584 return (err);
2586 2585 }
2587 2586
2588 2587 int
2589 2588 devmap_do_ctxmgt(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
2590 2589 uint_t type, uint_t rw, int (*ctxmgt)(devmap_cookie_t, void *, offset_t,
2591 2590 size_t, uint_t, uint_t))
2592 2591 {
2593 2592 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2594 2593 struct devmap_ctx *devctx;
2595 2594 int do_timeout = 0;
2596 2595 int ret;
2597 2596
2598 2597 #ifdef lint
2599 2598 pvtp = pvtp;
2600 2599 #endif
2601 2600
2602 2601 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT,
2603 2602 "devmap_do_ctxmgt:start dhp=%p off=%llx, len=%lx",
2604 2603 (void *)dhp, off, len);
2605 2604 DEBUGF(7, (CE_CONT, "devmap_do_ctxmgt: dhp %p off %llx len %lx\n",
2606 2605 (void *)dhp, off, len));
2607 2606
2608 2607 if (ctxmgt == NULL)
2609 2608 return (FC_HWERR);
2610 2609
2611 2610 devctx = dhp->dh_ctx;
2612 2611
2613 2612 /*
2614 2613 * If we are on an MP system with more than one cpu running
2615 2614 * and if a thread on some CPU already has the context, wait
2616 2615 * for it to finish if there is a hysteresis timeout.
2617 2616 *
2618 2617 * We call cv_wait() instead of cv_wait_sig() because
2619 2618 * it does not matter much if it returned due to a signal
2620 2619 * or due to a cv_signal() or cv_broadcast(). In either event
2621 2620 * we need to complete the mapping otherwise the processes
2622 2621 * will die with a SEGV.
2623 2622 */
2624 2623 if ((dhp->dh_timeout_length > 0) && (ncpus > 1)) {
2625 2624 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK1,
2626 2625 "devmap_do_ctxmgt:doing hysteresis, devctl %p dhp %p",
2627 2626 devctx, dhp);
2628 2627 do_timeout = 1;
2629 2628 mutex_enter(&devctx->lock);
2630 2629 while (devctx->oncpu)
2631 2630 cv_wait(&devctx->cv, &devctx->lock);
2632 2631 devctx->oncpu = 1;
2633 2632 mutex_exit(&devctx->lock);
2634 2633 }
2635 2634
2636 2635 /*
2637 2636 * Call the contextmgt callback so that the driver can handle
2638 2637 * the fault.
2639 2638 */
2640 2639 ret = (*ctxmgt)(dhp, dhp->dh_pvtp, off, len, type, rw);
2641 2640
2642 2641 /*
2643 2642 * If devmap_access() returned -1, then there was a hardware
2644 2643 * error so we need to convert the return value to something
2645 2644 * that trap() will understand. Otherwise, the return value
2646 2645 * is already a fault code generated by devmap_unload()
2647 2646 * or devmap_load().
2648 2647 */
2649 2648 if (ret) {
2650 2649 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK2,
2651 2650 "devmap_do_ctxmgt: ret=%x dhp=%p devctx=%p",
2652 2651 ret, dhp, devctx);
2653 2652 DEBUGF(1, (CE_CONT, "devmap_do_ctxmgt: ret %x dhp %p\n",
2654 2653 ret, (void *)dhp));
2655 2654 if (devctx->oncpu) {
2656 2655 mutex_enter(&devctx->lock);
2657 2656 devctx->oncpu = 0;
2658 2657 cv_signal(&devctx->cv);
2659 2658 mutex_exit(&devctx->lock);
2660 2659 }
2661 2660 return (FC_HWERR);
2662 2661 }
2663 2662
2664 2663 /*
2665 2664 * Setup the timeout if we need to
2666 2665 */
2667 2666 if (do_timeout) {
2668 2667 mutex_enter(&devctx->lock);
2669 2668 if (dhp->dh_timeout_length > 0) {
2670 2669 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK3,
2671 2670 "devmap_do_ctxmgt:timeout set");
2672 2671 devctx->timeout = timeout(devmap_ctxto,
2673 2672 devctx, dhp->dh_timeout_length);
2674 2673 } else {
2675 2674 /*
2676 2675 * We don't want to wait so set oncpu to
2677 2676 * 0 and wake up anyone waiting.
2678 2677 */
2679 2678 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK4,
2680 2679 "devmap_do_ctxmgt:timeout not set");
2681 2680 devctx->oncpu = 0;
2682 2681 cv_signal(&devctx->cv);
2683 2682 }
2684 2683 mutex_exit(&devctx->lock);
2685 2684 }
2686 2685
2687 2686 return (DDI_SUCCESS);
2688 2687 }
2689 2688
2690 2689 /*
2691 2690 * end of mapping
2692 2691 * poff fault_offset |
2693 2692 * base | | |
2694 2693 * | | | |
2695 2694 * V V V V
2696 2695 * +-----------+---------------+-------+---------+-------+
2697 2696 * ^ ^ ^ ^
2698 2697 * |<--- offset--->|<-len->| |
2699 2698 * |<--- dh_len(size of mapping) --->|
2700 2699 * |<-- pg -->|
2701 2700 * -->|rlen|<--
2702 2701 */
2703 2702 static ulong_t
2704 2703 devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
2705 2704 ulong_t *opfn, ulong_t *pagesize)
2706 2705 {
2707 2706 register int level;
2708 2707 ulong_t pg;
2709 2708 ulong_t poff;
2710 2709 ulong_t base;
2711 2710 caddr_t uvaddr;
2712 2711 long rlen;
2713 2712
2714 2713 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP,
2715 2714 "devmap_roundup:start dhp=%p off=%lx len=%lx",
2716 2715 (void *)dhp, offset, len);
2717 2716 DEBUGF(2, (CE_CONT, "devmap_roundup: dhp %p off %lx len %lx\n",
2718 2717 (void *)dhp, offset, len));
2719 2718
2720 2719 /*
2721 2720 * get the max. pagesize that is aligned within the range
2722 2721 * <dh_pfn, dh_pfn+offset>.
2723 2722 *
2724 2723 * The calculations below use physical address to ddetermine
2725 2724 * the page size to use. The same calculations can use the
2726 2725 * virtual address to determine the page size.
2727 2726 */
2728 2727 base = (ulong_t)ptob(dhp->dh_pfn);
2729 2728 for (level = dhp->dh_mmulevel; level >= 0; level--) {
2730 2729 pg = page_get_pagesize(level);
2731 2730 poff = ((base + offset) & ~(pg - 1));
2732 2731 uvaddr = dhp->dh_uvaddr + (poff - base);
2733 2732 if ((poff >= base) &&
2734 2733 ((poff + pg) <= (base + dhp->dh_len)) &&
2735 2734 VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg))
2736 2735 break;
2737 2736 }
2738 2737
2739 2738 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK1,
2740 2739 "devmap_roundup: base=%lx poff=%lx dhp=%p",
2741 2740 base, poff, dhp);
2742 2741 DEBUGF(2, (CE_CONT, "devmap_roundup: base %lx poff %lx pfn %lx\n",
2743 2742 base, poff, dhp->dh_pfn));
2744 2743
2745 2744 ASSERT(VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg));
2746 2745 ASSERT(level >= 0);
2747 2746
2748 2747 *pagesize = pg;
2749 2748 *opfn = dhp->dh_pfn + btop(poff - base);
2750 2749
2751 2750 rlen = len + offset - (poff - base + pg);
2752 2751
2753 2752 ASSERT(rlen < (long)len);
2754 2753
2755 2754 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK2,
2756 2755 "devmap_roundup:ret dhp=%p level=%x rlen=%lx psiz=%p opfn=%p",
2757 2756 (void *)dhp, level, rlen, pagesize, opfn);
2758 2757 DEBUGF(1, (CE_CONT, "devmap_roundup: dhp %p "
2759 2758 "level %x rlen %lx psize %lx opfn %lx\n",
2760 2759 (void *)dhp, level, rlen, *pagesize, *opfn));
2761 2760
2762 2761 return ((ulong_t)((rlen > 0) ? rlen : 0));
2763 2762 }
2764 2763
2765 2764 /*
2766 2765 * find the dhp that contains addr.
2767 2766 */
2768 2767 static devmap_handle_t *
2769 2768 devmap_find_handle(devmap_handle_t *dhp_head, caddr_t addr)
2770 2769 {
2771 2770 devmap_handle_t *dhp;
2772 2771
2773 2772 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_FIND_HANDLE,
2774 2773 "devmap_find_handle:start");
2775 2774
2776 2775 dhp = dhp_head;
2777 2776 while (dhp) {
2778 2777 if (addr >= dhp->dh_uvaddr &&
2779 2778 addr < (dhp->dh_uvaddr + dhp->dh_len))
2780 2779 return (dhp);
2781 2780 dhp = dhp->dh_next;
2782 2781 }
2783 2782
2784 2783 return ((devmap_handle_t *)NULL);
2785 2784 }
2786 2785
2787 2786 /*
2788 2787 * devmap_unload:
2789 2788 * Marks a segdev segment or pages if offset->offset+len
2790 2789 * is not the entire segment as intercept and unloads the
2791 2790 * pages in the range offset -> offset+len.
2792 2791 */
2793 2792 int
2794 2793 devmap_unload(devmap_cookie_t dhc, offset_t offset, size_t len)
2795 2794 {
2796 2795 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2797 2796 caddr_t addr;
2798 2797 ulong_t size;
2799 2798 ssize_t soff;
2800 2799
2801 2800 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_UNLOAD,
2802 2801 "devmap_unload:start dhp=%p offset=%llx len=%lx",
2803 2802 (void *)dhp, offset, len);
2804 2803 DEBUGF(7, (CE_CONT, "devmap_unload: dhp %p offset %llx len %lx\n",
2805 2804 (void *)dhp, offset, len));
2806 2805
2807 2806 soff = (ssize_t)(offset - dhp->dh_uoff);
2808 2807 soff = round_down_p2(soff, PAGESIZE);
2809 2808 if (soff < 0 || soff >= dhp->dh_len)
2810 2809 return (FC_MAKE_ERR(EINVAL));
2811 2810
2812 2811 /*
2813 2812 * Address and size must be page aligned. Len is set to the
2814 2813 * number of bytes in the number of pages that are required to
2815 2814 * support len. Offset is set to the byte offset of the first byte
2816 2815 * of the page that contains offset.
2817 2816 */
2818 2817 len = round_up_p2(len, PAGESIZE);
2819 2818
2820 2819 /*
2821 2820 * If len is == 0, then calculate the size by getting
2822 2821 * the number of bytes from offset to the end of the segment.
2823 2822 */
2824 2823 if (len == 0)
2825 2824 size = dhp->dh_len - soff;
2826 2825 else {
2827 2826 size = len;
2828 2827 if ((soff + size) > dhp->dh_len)
2829 2828 return (FC_MAKE_ERR(EINVAL));
2830 2829 }
2831 2830
2832 2831 /*
2833 2832 * The address is offset bytes from the base address of
2834 2833 * the dhp.
2835 2834 */
2836 2835 addr = (caddr_t)(soff + dhp->dh_uvaddr);
2837 2836
2838 2837 /*
2839 2838 * If large page size was used in hat_devload(),
2840 2839 * the same page size must be used in hat_unload().
2841 2840 */
2842 2841 if (dhp->dh_flags & DEVMAP_FLAG_LARGE) {
2843 2842 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
2844 2843 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
2845 2844 } else {
2846 2845 hat_unload(dhp->dh_seg->s_as->a_hat, addr, size,
2847 2846 HAT_UNLOAD|HAT_UNLOAD_OTHER);
2848 2847 }
2849 2848
2850 2849 return (0);
2851 2850 }
2852 2851
2853 2852 /*
2854 2853 * calculates the optimal page size that will be used for hat_devload().
2855 2854 */
2856 2855 static void
2857 2856 devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, caddr_t addr,
2858 2857 size_t *llen, caddr_t *laddr)
2859 2858 {
2860 2859 ulong_t off;
2861 2860 ulong_t pfn;
2862 2861 ulong_t pgsize;
2863 2862 uint_t first = 1;
2864 2863
2865 2864 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GET_LARGE_PGSIZE,
2866 2865 "devmap_get_large_pgsize:start");
2867 2866
2868 2867 /*
2869 2868 * RFE - Code only supports large page mappings for devmem
2870 2869 * This code could be changed in future if we want to support
2871 2870 * large page mappings for kernel exported memory.
2872 2871 */
2873 2872 ASSERT(dhp_is_devmem(dhp));
2874 2873 ASSERT(!(dhp->dh_flags & DEVMAP_MAPPING_INVALID));
2875 2874
2876 2875 *llen = 0;
2877 2876 off = (ulong_t)(addr - dhp->dh_uvaddr);
2878 2877 while ((long)len > 0) {
2879 2878 /*
2880 2879 * get the optimal pfn to minimize address translations.
2881 2880 * devmap_roundup() returns residue bytes for next round
2882 2881 * calculations.
2883 2882 */
2884 2883 len = devmap_roundup(dhp, off, len, &pfn, &pgsize);
2885 2884
2886 2885 if (first) {
2887 2886 *laddr = dhp->dh_uvaddr + ptob(pfn - dhp->dh_pfn);
2888 2887 first = 0;
2889 2888 }
2890 2889
2891 2890 *llen += pgsize;
2892 2891 off = ptob(pfn - dhp->dh_pfn) + pgsize;
2893 2892 }
2894 2893 /* Large page mapping len/addr cover more range than original fault */
2895 2894 ASSERT(*llen >= len && *laddr <= addr);
2896 2895 ASSERT((*laddr + *llen) >= (addr + len));
2897 2896 }
2898 2897
2899 2898 /*
2900 2899 * Initialize the devmap_softlock structure.
2901 2900 */
2902 2901 static struct devmap_softlock *
2903 2902 devmap_softlock_init(dev_t dev, ulong_t id)
2904 2903 {
2905 2904 struct devmap_softlock *slock;
2906 2905 struct devmap_softlock *tmp;
2907 2906
2908 2907 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_INIT,
2909 2908 "devmap_softlock_init:start");
2910 2909
2911 2910 tmp = kmem_zalloc(sizeof (struct devmap_softlock), KM_SLEEP);
2912 2911 mutex_enter(&devmap_slock);
2913 2912
2914 2913 for (slock = devmap_slist; slock != NULL; slock = slock->next)
2915 2914 if ((slock->dev == dev) && (slock->id == id))
2916 2915 break;
2917 2916
2918 2917 if (slock == NULL) {
2919 2918 slock = tmp;
2920 2919 slock->dev = dev;
2921 2920 slock->id = id;
2922 2921 mutex_init(&slock->lock, NULL, MUTEX_DEFAULT, NULL);
2923 2922 cv_init(&slock->cv, NULL, CV_DEFAULT, NULL);
2924 2923 slock->next = devmap_slist;
2925 2924 devmap_slist = slock;
2926 2925 } else
2927 2926 kmem_free(tmp, sizeof (struct devmap_softlock));
2928 2927
2929 2928 mutex_enter(&slock->lock);
2930 2929 slock->refcnt++;
2931 2930 mutex_exit(&slock->lock);
2932 2931 mutex_exit(&devmap_slock);
2933 2932
2934 2933 return (slock);
2935 2934 }
2936 2935
2937 2936 /*
2938 2937 * Wake up processes that sleep on softlocked.
2939 2938 * Free dh_softlock if refcnt is 0.
2940 2939 */
2941 2940 static void
2942 2941 devmap_softlock_rele(devmap_handle_t *dhp)
2943 2942 {
2944 2943 struct devmap_softlock *slock = dhp->dh_softlock;
2945 2944 struct devmap_softlock *tmp;
2946 2945 struct devmap_softlock *parent;
2947 2946
2948 2947 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_RELE,
2949 2948 "devmap_softlock_rele:start");
2950 2949
2951 2950 mutex_enter(&devmap_slock);
2952 2951 mutex_enter(&slock->lock);
2953 2952
2954 2953 ASSERT(slock->refcnt > 0);
2955 2954
2956 2955 slock->refcnt--;
2957 2956
2958 2957 /*
2959 2958 * If no one is using the device, free up the slock data.
2960 2959 */
2961 2960 if (slock->refcnt == 0) {
2962 2961 slock->softlocked = 0;
2963 2962 cv_signal(&slock->cv);
2964 2963
2965 2964 if (devmap_slist == slock)
2966 2965 devmap_slist = slock->next;
2967 2966 else {
2968 2967 parent = devmap_slist;
2969 2968 for (tmp = devmap_slist->next; tmp != NULL;
2970 2969 tmp = tmp->next) {
2971 2970 if (tmp == slock) {
2972 2971 parent->next = tmp->next;
2973 2972 break;
2974 2973 }
2975 2974 parent = tmp;
2976 2975 }
2977 2976 }
2978 2977 mutex_exit(&slock->lock);
2979 2978 mutex_destroy(&slock->lock);
2980 2979 cv_destroy(&slock->cv);
2981 2980 kmem_free(slock, sizeof (struct devmap_softlock));
2982 2981 } else
2983 2982 mutex_exit(&slock->lock);
2984 2983
2985 2984 mutex_exit(&devmap_slock);
2986 2985 }
2987 2986
2988 2987 /*
2989 2988 * Wake up processes that sleep on dh_ctx->locked.
2990 2989 * Free dh_ctx if refcnt is 0.
2991 2990 */
2992 2991 static void
2993 2992 devmap_ctx_rele(devmap_handle_t *dhp)
2994 2993 {
2995 2994 struct devmap_ctx *devctx = dhp->dh_ctx;
2996 2995 struct devmap_ctx *tmp;
2997 2996 struct devmap_ctx *parent;
2998 2997 timeout_id_t tid;
2999 2998
3000 2999 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE,
3001 3000 "devmap_ctx_rele:start");
3002 3001
3003 3002 mutex_enter(&devmapctx_lock);
3004 3003 mutex_enter(&devctx->lock);
3005 3004
3006 3005 ASSERT(devctx->refcnt > 0);
3007 3006
3008 3007 devctx->refcnt--;
3009 3008
3010 3009 /*
3011 3010 * If no one is using the device, free up the devctx data.
3012 3011 */
3013 3012 if (devctx->refcnt == 0) {
3014 3013 /*
3015 3014 * Untimeout any threads using this mapping as they are about
3016 3015 * to go away.
3017 3016 */
3018 3017 if (devctx->timeout != 0) {
3019 3018 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE_CK1,
3020 3019 "devmap_ctx_rele:untimeout ctx->timeout");
3021 3020
3022 3021 tid = devctx->timeout;
3023 3022 mutex_exit(&devctx->lock);
3024 3023 (void) untimeout(tid);
3025 3024 mutex_enter(&devctx->lock);
3026 3025 }
3027 3026
3028 3027 devctx->oncpu = 0;
3029 3028 cv_signal(&devctx->cv);
3030 3029
3031 3030 if (devmapctx_list == devctx)
3032 3031 devmapctx_list = devctx->next;
3033 3032 else {
3034 3033 parent = devmapctx_list;
3035 3034 for (tmp = devmapctx_list->next; tmp != NULL;
3036 3035 tmp = tmp->next) {
3037 3036 if (tmp == devctx) {
3038 3037 parent->next = tmp->next;
3039 3038 break;
3040 3039 }
3041 3040 parent = tmp;
3042 3041 }
3043 3042 }
3044 3043 mutex_exit(&devctx->lock);
3045 3044 mutex_destroy(&devctx->lock);
3046 3045 cv_destroy(&devctx->cv);
3047 3046 kmem_free(devctx, sizeof (struct devmap_ctx));
3048 3047 } else
3049 3048 mutex_exit(&devctx->lock);
3050 3049
3051 3050 mutex_exit(&devmapctx_lock);
3052 3051 }
3053 3052
3054 3053 /*
3055 3054 * devmap_load:
3056 3055 * Marks a segdev segment or pages if offset->offset+len
3057 3056 * is not the entire segment as nointercept and faults in
3058 3057 * the pages in the range offset -> offset+len.
3059 3058 */
3060 3059 int
3061 3060 devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type,
3062 3061 uint_t rw)
3063 3062 {
3064 3063 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3065 3064 struct as *asp = dhp->dh_seg->s_as;
3066 3065 caddr_t addr;
3067 3066 ulong_t size;
3068 3067 ssize_t soff; /* offset from the beginning of the segment */
3069 3068 int rc;
3070 3069
3071 3070 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
3072 3071 "devmap_load:start dhp=%p offset=%llx len=%lx",
3073 3072 (void *)dhp, offset, len);
3074 3073
3075 3074 DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3076 3075 (void *)dhp, offset, len));
3077 3076
3078 3077 /*
3079 3078 * Hat layer only supports devload to process' context for which
3080 3079 * the as lock is held. Verify here and return error if drivers
3081 3080 * inadvertently call devmap_load on a wrong devmap handle.
3082 3081 */
3083 3082 if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock))
3084 3083 return (FC_MAKE_ERR(EINVAL));
3085 3084
3086 3085 soff = (ssize_t)(offset - dhp->dh_uoff);
3087 3086 soff = round_down_p2(soff, PAGESIZE);
3088 3087 if (soff < 0 || soff >= dhp->dh_len)
3089 3088 return (FC_MAKE_ERR(EINVAL));
3090 3089
3091 3090 /*
3092 3091 * Address and size must be page aligned. Len is set to the
3093 3092 * number of bytes in the number of pages that are required to
3094 3093 * support len. Offset is set to the byte offset of the first byte
3095 3094 * of the page that contains offset.
3096 3095 */
3097 3096 len = round_up_p2(len, PAGESIZE);
3098 3097
3099 3098 /*
3100 3099 * If len == 0, then calculate the size by getting
3101 3100 * the number of bytes from offset to the end of the segment.
3102 3101 */
3103 3102 if (len == 0)
3104 3103 size = dhp->dh_len - soff;
3105 3104 else {
3106 3105 size = len;
3107 3106 if ((soff + size) > dhp->dh_len)
3108 3107 return (FC_MAKE_ERR(EINVAL));
3109 3108 }
3110 3109
3111 3110 /*
3112 3111 * The address is offset bytes from the base address of
3113 3112 * the segment.
3114 3113 */
3115 3114 addr = (caddr_t)(soff + dhp->dh_uvaddr);
3116 3115
3117 3116 HOLD_DHP_LOCK(dhp);
3118 3117 rc = segdev_faultpages(asp->a_hat,
3119 3118 dhp->dh_seg, addr, size, type, rw, dhp);
3120 3119 RELE_DHP_LOCK(dhp);
3121 3120 return (rc);
3122 3121 }
3123 3122
3124 3123 int
3125 3124 devmap_setup(dev_t dev, offset_t off, struct as *as, caddr_t *addrp,
3126 3125 size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3127 3126 {
3128 3127 register devmap_handle_t *dhp;
3129 3128 int (*devmap)(dev_t, devmap_cookie_t, offset_t, size_t,
3130 3129 size_t *, uint_t);
3131 3130 int (*mmap)(dev_t, off_t, int);
3132 3131 struct devmap_callback_ctl *callbackops;
3133 3132 devmap_handle_t *dhp_head = NULL;
3134 3133 devmap_handle_t *dhp_prev = NULL;
3135 3134 devmap_handle_t *dhp_curr;
3136 3135 caddr_t addr;
3137 3136 int map_flag;
3138 3137 int ret;
3139 3138 ulong_t total_len;
3140 3139 size_t map_len;
3141 3140 size_t resid_len = len;
3142 3141 offset_t map_off = off;
3143 3142 struct devmap_softlock *slock = NULL;
3144 3143
3145 3144 #ifdef lint
3146 3145 cred = cred;
3147 3146 #endif
3148 3147
3149 3148 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SETUP,
3150 3149 "devmap_setup:start off=%llx len=%lx", off, len);
3151 3150 DEBUGF(3, (CE_CONT, "devmap_setup: off %llx len %lx\n",
3152 3151 off, len));
3153 3152
3154 3153 devmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_devmap;
3155 3154 mmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap;
3156 3155
3157 3156 /*
3158 3157 * driver must provide devmap(9E) entry point in cb_ops to use the
3159 3158 * devmap framework.
3160 3159 */
3161 3160 if (devmap == NULL || devmap == nulldev || devmap == nodev)
3162 3161 return (EINVAL);
3163 3162
3164 3163 /*
3165 3164 * To protect from an inadvertent entry because the devmap entry point
3166 3165 * is not NULL, return error if D_DEVMAP bit is not set in cb_flag and
3167 3166 * mmap is NULL.
3168 3167 */
3169 3168 map_flag = devopsp[getmajor(dev)]->devo_cb_ops->cb_flag;
3170 3169 if ((map_flag & D_DEVMAP) == 0 && (mmap == NULL || mmap == nulldev))
3171 3170 return (EINVAL);
3172 3171
3173 3172 /*
3174 3173 * devmap allows mmap(2) to map multiple registers.
3175 3174 * one devmap_handle is created for each register mapped.
3176 3175 */
3177 3176 for (total_len = 0; total_len < len; total_len += map_len) {
3178 3177 dhp = kmem_zalloc(sizeof (devmap_handle_t), KM_SLEEP);
3179 3178
3180 3179 if (dhp_prev != NULL)
3181 3180 dhp_prev->dh_next = dhp;
3182 3181 else
3183 3182 dhp_head = dhp;
3184 3183 dhp_prev = dhp;
3185 3184
3186 3185 dhp->dh_prot = prot;
3187 3186 dhp->dh_orig_maxprot = dhp->dh_maxprot = maxprot;
3188 3187 dhp->dh_dev = dev;
3189 3188 dhp->dh_timeout_length = CTX_TIMEOUT_VALUE;
3190 3189 dhp->dh_uoff = map_off;
3191 3190
3192 3191 /*
3193 3192 * Get mapping specific info from
3194 3193 * the driver, such as rnumber, roff, len, callbackops,
3195 3194 * accattrp and, if the mapping is for kernel memory,
3196 3195 * ddi_umem_cookie.
3197 3196 */
3198 3197 if ((ret = cdev_devmap(dev, dhp, map_off,
3199 3198 resid_len, &map_len, get_udatamodel())) != 0) {
3200 3199 free_devmap_handle(dhp_head);
3201 3200 return (ENXIO);
3202 3201 }
3203 3202
3204 3203 if (map_len & PAGEOFFSET) {
3205 3204 free_devmap_handle(dhp_head);
3206 3205 return (EINVAL);
3207 3206 }
3208 3207
3209 3208 callbackops = &dhp->dh_callbackops;
3210 3209
3211 3210 if ((callbackops->devmap_access == NULL) ||
3212 3211 (callbackops->devmap_access == nulldev) ||
3213 3212 (callbackops->devmap_access == nodev)) {
3214 3213 /*
3215 3214 * Normally devmap does not support MAP_PRIVATE unless
3216 3215 * the drivers provide a valid devmap_access routine.
3217 3216 */
3218 3217 if ((flags & MAP_PRIVATE) != 0) {
3219 3218 free_devmap_handle(dhp_head);
3220 3219 return (EINVAL);
3221 3220 }
3222 3221 } else {
3223 3222 /*
3224 3223 * Initialize dhp_softlock and dh_ctx if the drivers
3225 3224 * provide devmap_access.
3226 3225 */
3227 3226 dhp->dh_softlock = devmap_softlock_init(dev,
3228 3227 (ulong_t)callbackops->devmap_access);
3229 3228 dhp->dh_ctx = devmap_ctxinit(dev,
3230 3229 (ulong_t)callbackops->devmap_access);
3231 3230
3232 3231 /*
3233 3232 * segdev_fault can only work when all
3234 3233 * dh_softlock in a multi-dhp mapping
3235 3234 * are same. see comments in segdev_fault
3236 3235 * This code keeps track of the first
3237 3236 * dh_softlock allocated in slock and
3238 3237 * compares all later allocations and if
3239 3238 * not similar, returns an error.
3240 3239 */
3241 3240 if (slock == NULL)
3242 3241 slock = dhp->dh_softlock;
3243 3242 if (slock != dhp->dh_softlock) {
3244 3243 free_devmap_handle(dhp_head);
3245 3244 return (ENOTSUP);
3246 3245 }
3247 3246 }
3248 3247
3249 3248 map_off += map_len;
3250 3249 resid_len -= map_len;
3251 3250 }
3252 3251
3253 3252 /*
3254 3253 * get the user virtual address and establish the mapping between
3255 3254 * uvaddr and device physical address.
3256 3255 */
3257 3256 if ((ret = devmap_device(dhp_head, as, addrp, off, len, flags))
3258 3257 != 0) {
3259 3258 /*
3260 3259 * free devmap handles if error during the mapping.
3261 3260 */
3262 3261 free_devmap_handle(dhp_head);
3263 3262
3264 3263 return (ret);
3265 3264 }
3266 3265
3267 3266 /*
3268 3267 * call the driver's devmap_map callback to do more after the mapping,
3269 3268 * such as to allocate driver private data for context management.
3270 3269 */
3271 3270 dhp = dhp_head;
3272 3271 map_off = off;
3273 3272 addr = *addrp;
3274 3273 while (dhp != NULL) {
3275 3274 callbackops = &dhp->dh_callbackops;
3276 3275 dhp->dh_uvaddr = addr;
3277 3276 dhp_curr = dhp;
3278 3277 if (callbackops->devmap_map != NULL) {
3279 3278 ret = (*callbackops->devmap_map)((devmap_cookie_t)dhp,
3280 3279 dev, flags, map_off,
3281 3280 dhp->dh_len, &dhp->dh_pvtp);
3282 3281 if (ret != 0) {
3283 3282 struct segdev_data *sdp;
3284 3283
3285 3284 /*
3286 3285 * call driver's devmap_unmap entry point
3287 3286 * to free driver resources.
3288 3287 */
3289 3288 dhp = dhp_head;
3290 3289 map_off = off;
3291 3290 while (dhp != dhp_curr) {
3292 3291 callbackops = &dhp->dh_callbackops;
3293 3292 if (callbackops->devmap_unmap != NULL) {
3294 3293 (*callbackops->devmap_unmap)(
3295 3294 dhp, dhp->dh_pvtp,
3296 3295 map_off, dhp->dh_len,
3297 3296 NULL, NULL, NULL, NULL);
3298 3297 }
3299 3298 map_off += dhp->dh_len;
3300 3299 dhp = dhp->dh_next;
3301 3300 }
3302 3301 sdp = dhp_head->dh_seg->s_data;
3303 3302 sdp->devmap_data = NULL;
3304 3303 free_devmap_handle(dhp_head);
3305 3304 return (ENXIO);
3306 3305 }
3307 3306 }
3308 3307 map_off += dhp->dh_len;
3309 3308 addr += dhp->dh_len;
3310 3309 dhp = dhp->dh_next;
3311 3310 }
3312 3311
3313 3312 return (0);
3314 3313 }
3315 3314
3316 3315 int
3317 3316 ddi_devmap_segmap(dev_t dev, off_t off, ddi_as_handle_t as, caddr_t *addrp,
3318 3317 off_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3319 3318 {
3320 3319 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP,
3321 3320 "devmap_segmap:start");
3322 3321 return (devmap_setup(dev, (offset_t)off, (struct as *)as, addrp,
3323 3322 (size_t)len, prot, maxprot, flags, cred));
3324 3323 }
3325 3324
3326 3325 /*
3327 3326 * Called from devmap_devmem_setup/remap to see if can use large pages for
3328 3327 * this device mapping.
3329 3328 * Also calculate the max. page size for this mapping.
3330 3329 * this page size will be used in fault routine for
3331 3330 * optimal page size calculations.
3332 3331 */
3333 3332 static void
3334 3333 devmap_devmem_large_page_setup(devmap_handle_t *dhp)
3335 3334 {
3336 3335 ASSERT(dhp_is_devmem(dhp));
3337 3336 dhp->dh_mmulevel = 0;
3338 3337
3339 3338 /*
3340 3339 * use large page size only if:
3341 3340 * 1. device memory.
3342 3341 * 2. mmu supports multiple page sizes,
3343 3342 * 3. Driver did not disallow it
3344 3343 * 4. dhp length is at least as big as the large pagesize
3345 3344 * 5. the uvaddr and pfn are large pagesize aligned
3346 3345 */
3347 3346 if (page_num_pagesizes() > 1 &&
3348 3347 !(dhp->dh_flags & (DEVMAP_USE_PAGESIZE | DEVMAP_MAPPING_INVALID))) {
3349 3348 ulong_t base;
3350 3349 int level;
3351 3350
3352 3351 base = (ulong_t)ptob(dhp->dh_pfn);
3353 3352 for (level = 1; level < page_num_pagesizes(); level++) {
3354 3353 size_t pgsize = page_get_pagesize(level);
3355 3354 if ((dhp->dh_len < pgsize) ||
3356 3355 (!VA_PA_PGSIZE_ALIGNED((uintptr_t)dhp->dh_uvaddr,
3357 3356 base, pgsize))) {
3358 3357 break;
3359 3358 }
3360 3359 }
3361 3360 dhp->dh_mmulevel = level - 1;
3362 3361 }
3363 3362 if (dhp->dh_mmulevel > 0) {
3364 3363 dhp->dh_flags |= DEVMAP_FLAG_LARGE;
3365 3364 } else {
3366 3365 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3367 3366 }
3368 3367 }
3369 3368
3370 3369 /*
3371 3370 * Called by driver devmap routine to pass device specific info to
3372 3371 * the framework. used for device memory mapping only.
3373 3372 */
3374 3373 int
3375 3374 devmap_devmem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3376 3375 struct devmap_callback_ctl *callbackops, uint_t rnumber, offset_t roff,
3377 3376 size_t len, uint_t maxprot, uint_t flags, ddi_device_acc_attr_t *accattrp)
3378 3377 {
3379 3378 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3380 3379 ddi_acc_handle_t handle;
3381 3380 ddi_map_req_t mr;
3382 3381 ddi_acc_hdl_t *hp;
3383 3382 int err;
3384 3383
3385 3384 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_SETUP,
3386 3385 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3387 3386 (void *)dhp, roff, rnumber, (uint_t)len);
3388 3387 DEBUGF(2, (CE_CONT, "devmap_devmem_setup: dhp %p offset %llx "
3389 3388 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3390 3389
3391 3390 /*
3392 3391 * First to check if this function has been called for this dhp.
3393 3392 */
3394 3393 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3395 3394 return (DDI_FAILURE);
3396 3395
3397 3396 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3398 3397 return (DDI_FAILURE);
3399 3398
3400 3399 if (flags & DEVMAP_MAPPING_INVALID) {
3401 3400 /*
3402 3401 * Don't go up the tree to get pfn if the driver specifies
3403 3402 * DEVMAP_MAPPING_INVALID in flags.
3404 3403 *
3405 3404 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3406 3405 * remap permission.
3407 3406 */
3408 3407 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3409 3408 return (DDI_FAILURE);
3410 3409 }
3411 3410 dhp->dh_pfn = PFN_INVALID;
3412 3411 } else {
3413 3412 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3414 3413 if (handle == NULL)
3415 3414 return (DDI_FAILURE);
3416 3415
3417 3416 hp = impl_acc_hdl_get(handle);
3418 3417 hp->ah_vers = VERS_ACCHDL;
3419 3418 hp->ah_dip = dip;
3420 3419 hp->ah_rnumber = rnumber;
3421 3420 hp->ah_offset = roff;
3422 3421 hp->ah_len = len;
3423 3422 if (accattrp != NULL)
3424 3423 hp->ah_acc = *accattrp;
3425 3424
3426 3425 mr.map_op = DDI_MO_MAP_LOCKED;
3427 3426 mr.map_type = DDI_MT_RNUMBER;
3428 3427 mr.map_obj.rnumber = rnumber;
3429 3428 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3430 3429 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3431 3430 mr.map_handlep = hp;
3432 3431 mr.map_vers = DDI_MAP_VERSION;
3433 3432
3434 3433 /*
3435 3434 * up the device tree to get pfn.
3436 3435 * The rootnex_map_regspec() routine in nexus drivers has been
3437 3436 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3438 3437 */
3439 3438 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&dhp->dh_pfn);
3440 3439 dhp->dh_hat_attr = hp->ah_hat_flags;
3441 3440 impl_acc_hdl_free(handle);
3442 3441
3443 3442 if (err)
3444 3443 return (DDI_FAILURE);
3445 3444 }
3446 3445 /* Should not be using devmem setup for memory pages */
3447 3446 ASSERT(!pf_is_memory(dhp->dh_pfn));
3448 3447
3449 3448 /* Only some of the flags bits are settable by the driver */
3450 3449 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3451 3450 dhp->dh_len = ptob(btopr(len));
3452 3451
3453 3452 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3454 3453 dhp->dh_roff = ptob(btop(roff));
3455 3454
3456 3455 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3457 3456 devmap_devmem_large_page_setup(dhp);
3458 3457 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3459 3458 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3460 3459
3461 3460
3462 3461 if (callbackops != NULL) {
3463 3462 bcopy(callbackops, &dhp->dh_callbackops,
3464 3463 sizeof (struct devmap_callback_ctl));
3465 3464 }
3466 3465
3467 3466 /*
3468 3467 * Initialize dh_lock if we want to do remap.
3469 3468 */
3470 3469 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3471 3470 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3472 3471 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3473 3472 }
3474 3473
3475 3474 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3476 3475
3477 3476 return (DDI_SUCCESS);
3478 3477 }
3479 3478
3480 3479 int
3481 3480 devmap_devmem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3482 3481 uint_t rnumber, offset_t roff, size_t len, uint_t maxprot,
3483 3482 uint_t flags, ddi_device_acc_attr_t *accattrp)
3484 3483 {
3485 3484 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3486 3485 ddi_acc_handle_t handle;
3487 3486 ddi_map_req_t mr;
3488 3487 ddi_acc_hdl_t *hp;
3489 3488 pfn_t pfn;
3490 3489 uint_t hat_flags;
3491 3490 int err;
3492 3491
3493 3492 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_REMAP,
3494 3493 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3495 3494 (void *)dhp, roff, rnumber, (uint_t)len);
3496 3495 DEBUGF(2, (CE_CONT, "devmap_devmem_remap: dhp %p offset %llx "
3497 3496 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3498 3497
3499 3498 /*
3500 3499 * Return failure if setup has not been done or no remap permission
3501 3500 * has been granted during the setup.
3502 3501 */
3503 3502 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3504 3503 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3505 3504 return (DDI_FAILURE);
3506 3505
3507 3506 /* Only DEVMAP_MAPPING_INVALID flag supported for remap */
3508 3507 if ((flags != 0) && (flags != DEVMAP_MAPPING_INVALID))
3509 3508 return (DDI_FAILURE);
3510 3509
3511 3510 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3512 3511 return (DDI_FAILURE);
3513 3512
3514 3513 if (!(flags & DEVMAP_MAPPING_INVALID)) {
3515 3514 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3516 3515 if (handle == NULL)
3517 3516 return (DDI_FAILURE);
3518 3517 }
3519 3518
3520 3519 HOLD_DHP_LOCK(dhp);
3521 3520
3522 3521 /*
3523 3522 * Unload the old mapping, so next fault will setup the new mappings
3524 3523 * Do this while holding the dhp lock so other faults dont reestablish
3525 3524 * the mappings
3526 3525 */
3527 3526 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3528 3527 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3529 3528
3530 3529 if (flags & DEVMAP_MAPPING_INVALID) {
3531 3530 dhp->dh_flags |= DEVMAP_MAPPING_INVALID;
3532 3531 dhp->dh_pfn = PFN_INVALID;
3533 3532 } else {
3534 3533 /* clear any prior DEVMAP_MAPPING_INVALID flag */
3535 3534 dhp->dh_flags &= ~DEVMAP_MAPPING_INVALID;
3536 3535 hp = impl_acc_hdl_get(handle);
3537 3536 hp->ah_vers = VERS_ACCHDL;
3538 3537 hp->ah_dip = dip;
3539 3538 hp->ah_rnumber = rnumber;
3540 3539 hp->ah_offset = roff;
3541 3540 hp->ah_len = len;
3542 3541 if (accattrp != NULL)
3543 3542 hp->ah_acc = *accattrp;
3544 3543
3545 3544 mr.map_op = DDI_MO_MAP_LOCKED;
3546 3545 mr.map_type = DDI_MT_RNUMBER;
3547 3546 mr.map_obj.rnumber = rnumber;
3548 3547 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3549 3548 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3550 3549 mr.map_handlep = hp;
3551 3550 mr.map_vers = DDI_MAP_VERSION;
3552 3551
3553 3552 /*
3554 3553 * up the device tree to get pfn.
3555 3554 * The rootnex_map_regspec() routine in nexus drivers has been
3556 3555 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3557 3556 */
3558 3557 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&pfn);
3559 3558 hat_flags = hp->ah_hat_flags;
3560 3559 impl_acc_hdl_free(handle);
3561 3560 if (err) {
3562 3561 RELE_DHP_LOCK(dhp);
3563 3562 return (DDI_FAILURE);
3564 3563 }
3565 3564 /*
3566 3565 * Store result of ddi_map first in local variables, as we do
3567 3566 * not want to overwrite the existing dhp with wrong data.
3568 3567 */
3569 3568 dhp->dh_pfn = pfn;
3570 3569 dhp->dh_hat_attr = hat_flags;
3571 3570 }
3572 3571
3573 3572 /* clear the large page size flag */
3574 3573 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3575 3574
3576 3575 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3577 3576 dhp->dh_roff = ptob(btop(roff));
3578 3577
3579 3578 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3580 3579 devmap_devmem_large_page_setup(dhp);
3581 3580 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3582 3581 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3583 3582
3584 3583 RELE_DHP_LOCK(dhp);
3585 3584 return (DDI_SUCCESS);
3586 3585 }
3587 3586
3588 3587 /*
3589 3588 * called by driver devmap routine to pass kernel virtual address mapping
3590 3589 * info to the framework. used only for kernel memory
3591 3590 * allocated from ddi_umem_alloc().
3592 3591 */
3593 3592 int
3594 3593 devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3595 3594 struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie,
3596 3595 offset_t off, size_t len, uint_t maxprot, uint_t flags,
3597 3596 ddi_device_acc_attr_t *accattrp)
3598 3597 {
3599 3598 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3600 3599 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3601 3600
3602 3601 #ifdef lint
3603 3602 dip = dip;
3604 3603 #endif
3605 3604
3606 3605 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_SETUP,
3607 3606 "devmap_umem_setup:start dhp=%p offset=%llx cookie=%p len=%lx",
3608 3607 (void *)dhp, off, cookie, len);
3609 3608 DEBUGF(2, (CE_CONT, "devmap_umem_setup: dhp %p offset %llx "
3610 3609 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3611 3610
3612 3611 if (cookie == NULL)
3613 3612 return (DDI_FAILURE);
3614 3613
3615 3614 /* For UMEM_TRASH, this restriction is not needed */
3616 3615 if ((off + len) > cp->size)
3617 3616 return (DDI_FAILURE);
3618 3617
3619 3618 /* check if the cache attributes are supported */
3620 3619 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3621 3620 return (DDI_FAILURE);
3622 3621
3623 3622 /*
3624 3623 * First to check if this function has been called for this dhp.
3625 3624 */
3626 3625 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3627 3626 return (DDI_FAILURE);
3628 3627
3629 3628 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3630 3629 return (DDI_FAILURE);
3631 3630
3632 3631 if (flags & DEVMAP_MAPPING_INVALID) {
3633 3632 /*
3634 3633 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3635 3634 * remap permission.
3636 3635 */
3637 3636 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3638 3637 return (DDI_FAILURE);
3639 3638 }
3640 3639 } else {
3641 3640 dhp->dh_cookie = cookie;
3642 3641 dhp->dh_roff = ptob(btop(off));
3643 3642 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3644 3643 /* set HAT cache attributes */
3645 3644 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3646 3645 /* set HAT endianess attributes */
3647 3646 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3648 3647 }
3649 3648
3650 3649 /*
3651 3650 * The default is _not_ to pass HAT_LOAD_NOCONSIST to hat_devload();
3652 3651 * we pass HAT_LOAD_NOCONSIST _only_ in cases where hat tries to
3653 3652 * create consistent mappings but our intention was to create
3654 3653 * non-consistent mappings.
3655 3654 *
3656 3655 * DEVMEM: hat figures it out it's DEVMEM and creates non-consistent
3657 3656 * mappings.
3658 3657 *
3659 3658 * kernel exported memory: hat figures it out it's memory and always
3660 3659 * creates consistent mappings.
3661 3660 *
3662 3661 * /dev/mem: non-consistent mappings. See comments in common/io/mem.c
3663 3662 *
3664 3663 * /dev/kmem: consistent mappings are created unless they are
3665 3664 * MAP_FIXED. We _explicitly_ tell hat to create non-consistent
3666 3665 * mappings by passing HAT_LOAD_NOCONSIST in case of MAP_FIXED
3667 3666 * mappings of /dev/kmem. See common/io/mem.c
3668 3667 */
3669 3668
3670 3669 /* Only some of the flags bits are settable by the driver */
3671 3670 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3672 3671
3673 3672 dhp->dh_len = ptob(btopr(len));
3674 3673 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3675 3674 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3676 3675
3677 3676 if (callbackops != NULL) {
3678 3677 bcopy(callbackops, &dhp->dh_callbackops,
3679 3678 sizeof (struct devmap_callback_ctl));
3680 3679 }
3681 3680 /*
3682 3681 * Initialize dh_lock if we want to do remap.
3683 3682 */
3684 3683 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3685 3684 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3686 3685 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3687 3686 }
3688 3687
3689 3688 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3690 3689
3691 3690 return (DDI_SUCCESS);
3692 3691 }
3693 3692
3694 3693 int
3695 3694 devmap_umem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3696 3695 ddi_umem_cookie_t cookie, offset_t off, size_t len, uint_t maxprot,
3697 3696 uint_t flags, ddi_device_acc_attr_t *accattrp)
3698 3697 {
3699 3698 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3700 3699 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3701 3700
3702 3701 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_REMAP,
3703 3702 "devmap_umem_remap:start dhp=%p offset=%llx cookie=%p len=%lx",
3704 3703 (void *)dhp, off, cookie, len);
3705 3704 DEBUGF(2, (CE_CONT, "devmap_umem_remap: dhp %p offset %llx "
3706 3705 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3707 3706
3708 3707 #ifdef lint
3709 3708 dip = dip;
3710 3709 accattrp = accattrp;
3711 3710 #endif
3712 3711 /*
3713 3712 * Reture failure if setup has not been done or no remap permission
3714 3713 * has been granted during the setup.
3715 3714 */
3716 3715 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3717 3716 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3718 3717 return (DDI_FAILURE);
3719 3718
3720 3719 /* No flags supported for remap yet */
3721 3720 if (flags != 0)
3722 3721 return (DDI_FAILURE);
3723 3722
3724 3723 /* check if the cache attributes are supported */
3725 3724 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3726 3725 return (DDI_FAILURE);
3727 3726
3728 3727 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3729 3728 return (DDI_FAILURE);
3730 3729
3731 3730 /* For UMEM_TRASH, this restriction is not needed */
3732 3731 if ((off + len) > cp->size)
3733 3732 return (DDI_FAILURE);
3734 3733
3735 3734 HOLD_DHP_LOCK(dhp);
3736 3735 /*
3737 3736 * Unload the old mapping, so next fault will setup the new mappings
3738 3737 * Do this while holding the dhp lock so other faults dont reestablish
3739 3738 * the mappings
3740 3739 */
3741 3740 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3742 3741 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3743 3742
3744 3743 dhp->dh_cookie = cookie;
3745 3744 dhp->dh_roff = ptob(btop(off));
3746 3745 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3747 3746 /* set HAT cache attributes */
3748 3747 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3749 3748 /* set HAT endianess attributes */
3750 3749 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3751 3750
3752 3751 /* clear the large page size flag */
3753 3752 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3754 3753
3755 3754 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3756 3755 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3757 3756 RELE_DHP_LOCK(dhp);
3758 3757 return (DDI_SUCCESS);
3759 3758 }
3760 3759
3761 3760 /*
3762 3761 * to set timeout value for the driver's context management callback, e.g.
3763 3762 * devmap_access().
3764 3763 */
3765 3764 void
3766 3765 devmap_set_ctx_timeout(devmap_cookie_t dhc, clock_t ticks)
3767 3766 {
3768 3767 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3769 3768
3770 3769 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SET_CTX_TIMEOUT,
3771 3770 "devmap_set_ctx_timeout:start dhp=%p ticks=%x",
3772 3771 (void *)dhp, ticks);
3773 3772 dhp->dh_timeout_length = ticks;
3774 3773 }
3775 3774
3776 3775 int
3777 3776 devmap_default_access(devmap_cookie_t dhp, void *pvtp, offset_t off,
3778 3777 size_t len, uint_t type, uint_t rw)
3779 3778 {
3780 3779 #ifdef lint
3781 3780 pvtp = pvtp;
3782 3781 #endif
3783 3782
3784 3783 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DEFAULT_ACCESS,
3785 3784 "devmap_default_access:start");
3786 3785 return (devmap_load(dhp, off, len, type, rw));
3787 3786 }
3788 3787
3789 3788 /*
3790 3789 * segkmem_alloc() wrapper to allocate memory which is both
3791 3790 * non-relocatable (for DR) and sharelocked, since the rest
3792 3791 * of this segment driver requires it.
3793 3792 */
3794 3793 static void *
3795 3794 devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag)
3796 3795 {
3797 3796 ASSERT(vmp != NULL);
3798 3797 ASSERT(kvseg.s_base != NULL);
3799 3798 vmflag |= (VM_NORELOC | SEGKMEM_SHARELOCKED);
3800 3799 return (segkmem_alloc(vmp, size, vmflag));
3801 3800 }
3802 3801
3803 3802 /*
3804 3803 * This is where things are a bit incestuous with seg_kmem: unlike
3805 3804 * seg_kp, seg_kmem does not keep its pages long-term sharelocked, so
3806 3805 * we need to do a bit of a dance around that to prevent duplication of
3807 3806 * code until we decide to bite the bullet and implement a new kernel
3808 3807 * segment for driver-allocated memory that is exported to user space.
3809 3808 */
3810 3809 static void
3811 3810 devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size)
3812 3811 {
3813 3812 page_t *pp;
3814 3813 caddr_t addr = inaddr;
3815 3814 caddr_t eaddr;
3816 3815 pgcnt_t npages = btopr(size);
3817 3816
3818 3817 ASSERT(vmp != NULL);
3819 3818 ASSERT(kvseg.s_base != NULL);
3820 3819 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
3821 3820
3822 3821 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
3823 3822
3824 3823 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
3825 3824 /*
3826 3825 * Use page_find() instead of page_lookup() to find the page
3827 3826 * since we know that it is hashed and has a shared lock.
3828 3827 */
3829 3828 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
3830 3829
3831 3830 if (pp == NULL)
3832 3831 panic("devmap_free_pages: page not found");
3833 3832 if (!page_tryupgrade(pp)) {
3834 3833 page_unlock(pp);
3835 3834 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr,
3836 3835 SE_EXCL);
3837 3836 if (pp == NULL)
3838 3837 panic("devmap_free_pages: page already freed");
3839 3838 }
3840 3839 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
3841 3840 pp->p_lckcnt = 0;
3842 3841 page_destroy(pp, 0);
3843 3842 }
3844 3843 page_unresv(npages);
3845 3844
3846 3845 if (vmp != NULL)
3847 3846 vmem_free(vmp, inaddr, size);
3848 3847 }
3849 3848
3850 3849 /*
3851 3850 * devmap_umem_alloc_np() replaces kmem_zalloc() as the method for
3852 3851 * allocating non-pageable kmem in response to a ddi_umem_alloc()
3853 3852 * default request. For now we allocate our own pages and we keep
3854 3853 * them long-term sharelocked, since: A) the fault routines expect the
3855 3854 * memory to already be locked; B) pageable umem is already long-term
3856 3855 * locked; C) it's a lot of work to make it otherwise, particularly
3857 3856 * since the nexus layer expects the pages to never fault. An RFE is to
3858 3857 * not keep the pages long-term locked, but instead to be able to
3859 3858 * take faults on them and simply look them up in kvp in case we
3860 3859 * fault on them. Even then, we must take care not to let pageout
3861 3860 * steal them from us since the data must remain resident; if we
3862 3861 * do this we must come up with some way to pin the pages to prevent
3863 3862 * faults while a driver is doing DMA to/from them.
3864 3863 */
3865 3864 static void *
3866 3865 devmap_umem_alloc_np(size_t size, size_t flags)
3867 3866 {
3868 3867 void *buf;
3869 3868 int vmflags = (flags & DDI_UMEM_NOSLEEP)? VM_NOSLEEP : VM_SLEEP;
3870 3869
3871 3870 buf = vmem_alloc(umem_np_arena, size, vmflags);
3872 3871 if (buf != NULL)
3873 3872 bzero(buf, size);
3874 3873 return (buf);
3875 3874 }
3876 3875
3877 3876 static void
3878 3877 devmap_umem_free_np(void *addr, size_t size)
3879 3878 {
3880 3879 vmem_free(umem_np_arena, addr, size);
3881 3880 }
3882 3881
3883 3882 /*
3884 3883 * allocate page aligned kernel memory for exporting to user land.
3885 3884 * The devmap framework will use the cookie allocated by ddi_umem_alloc()
3886 3885 * to find a user virtual address that is in same color as the address
3887 3886 * allocated here.
3888 3887 */
3889 3888 void *
3890 3889 ddi_umem_alloc(size_t size, int flags, ddi_umem_cookie_t *cookie)
3891 3890 {
3892 3891 register size_t len = ptob(btopr(size));
3893 3892 void *buf = NULL;
3894 3893 struct ddi_umem_cookie *cp;
3895 3894 int iflags = 0;
3896 3895
3897 3896 *cookie = NULL;
3898 3897
3899 3898 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_ALLOC,
3900 3899 "devmap_umem_alloc:start");
3901 3900 if (len == 0)
3902 3901 return ((void *)NULL);
3903 3902
3904 3903 /*
3905 3904 * allocate cookie
3906 3905 */
3907 3906 if ((cp = kmem_zalloc(sizeof (struct ddi_umem_cookie),
3908 3907 flags & DDI_UMEM_NOSLEEP ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
3909 3908 ASSERT(flags & DDI_UMEM_NOSLEEP);
3910 3909 return ((void *)NULL);
3911 3910 }
3912 3911
3913 3912 if (flags & DDI_UMEM_PAGEABLE) {
3914 3913 /* Only one of the flags is allowed */
3915 3914 ASSERT(!(flags & DDI_UMEM_TRASH));
3916 3915 /* initialize resource with 0 */
3917 3916 iflags = KPD_ZERO;
3918 3917
3919 3918 /*
3920 3919 * to allocate unlocked pageable memory, use segkp_get() to
3921 3920 * create a segkp segment. Since segkp can only service kas,
3922 3921 * other segment drivers such as segdev have to do
3923 3922 * as_fault(segkp, SOFTLOCK) in its fault routine,
3924 3923 */
3925 3924 if (flags & DDI_UMEM_NOSLEEP)
3926 3925 iflags |= KPD_NOWAIT;
3927 3926
3928 3927 if ((buf = segkp_get(segkp, len, iflags)) == NULL) {
3929 3928 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3930 3929 return ((void *)NULL);
3931 3930 }
3932 3931 cp->type = KMEM_PAGEABLE;
3933 3932 mutex_init(&cp->lock, NULL, MUTEX_DEFAULT, NULL);
3934 3933 cp->locked = 0;
3935 3934 } else if (flags & DDI_UMEM_TRASH) {
3936 3935 /* Only one of the flags is allowed */
3937 3936 ASSERT(!(flags & DDI_UMEM_PAGEABLE));
3938 3937 cp->type = UMEM_TRASH;
3939 3938 buf = NULL;
3940 3939 } else {
3941 3940 if ((buf = devmap_umem_alloc_np(len, flags)) == NULL) {
3942 3941 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3943 3942 return ((void *)NULL);
3944 3943 }
3945 3944
3946 3945 cp->type = KMEM_NON_PAGEABLE;
3947 3946 }
3948 3947
3949 3948 /*
3950 3949 * need to save size here. size will be used when
3951 3950 * we do kmem_free.
3952 3951 */
3953 3952 cp->size = len;
3954 3953 cp->cvaddr = (caddr_t)buf;
3955 3954
3956 3955 *cookie = (void *)cp;
3957 3956 return (buf);
3958 3957 }
3959 3958
3960 3959 void
3961 3960 ddi_umem_free(ddi_umem_cookie_t cookie)
3962 3961 {
3963 3962 struct ddi_umem_cookie *cp;
3964 3963
3965 3964 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_FREE,
3966 3965 "devmap_umem_free:start");
3967 3966
3968 3967 /*
3969 3968 * if cookie is NULL, no effects on the system
3970 3969 */
3971 3970 if (cookie == NULL)
3972 3971 return;
3973 3972
3974 3973 cp = (struct ddi_umem_cookie *)cookie;
3975 3974
3976 3975 switch (cp->type) {
3977 3976 case KMEM_PAGEABLE :
3978 3977 ASSERT(cp->cvaddr != NULL && cp->size != 0);
3979 3978 /*
3980 3979 * Check if there are still any pending faults on the cookie
3981 3980 * while the driver is deleting it,
3982 3981 * XXX - could change to an ASSERT but wont catch errant drivers
3983 3982 */
3984 3983 mutex_enter(&cp->lock);
3985 3984 if (cp->locked) {
3986 3985 mutex_exit(&cp->lock);
3987 3986 panic("ddi_umem_free for cookie with pending faults %p",
3988 3987 (void *)cp);
3989 3988 return;
3990 3989 }
3991 3990
3992 3991 segkp_release(segkp, cp->cvaddr);
3993 3992
3994 3993 /*
3995 3994 * release mutex associated with this cookie.
3996 3995 */
3997 3996 mutex_destroy(&cp->lock);
3998 3997 break;
3999 3998 case KMEM_NON_PAGEABLE :
4000 3999 ASSERT(cp->cvaddr != NULL && cp->size != 0);
4001 4000 devmap_umem_free_np(cp->cvaddr, cp->size);
4002 4001 break;
4003 4002 case UMEM_TRASH :
4004 4003 break;
4005 4004 case UMEM_LOCKED :
4006 4005 /* Callers should use ddi_umem_unlock for this type */
4007 4006 ddi_umem_unlock(cookie);
4008 4007 /* Frees the cookie too */
4009 4008 return;
4010 4009 default:
4011 4010 /* panic so we can diagnose the underlying cause */
4012 4011 panic("ddi_umem_free: illegal cookie type 0x%x\n",
4013 4012 cp->type);
4014 4013 }
4015 4014
4016 4015 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4017 4016 }
4018 4017
4019 4018
4020 4019 static int
4021 4020 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4022 4021 {
4023 4022 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4024 4023
4025 4024 /*
4026 4025 * It looks as if it is always mapped shared
4027 4026 */
4028 4027 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4029 4028 "segdev_getmemid:start");
4030 4029 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4031 4030 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4032 4031 return (0);
4033 4032 }
4034 4033
4035 4034 /*ARGSUSED*/
4036 4035 static lgrp_mem_policy_info_t *
4037 4036 segdev_getpolicy(struct seg *seg, caddr_t addr)
4038 4037 {
4039 4038 return (NULL);
4040 4039 }
4041 4040
4042 4041 /*ARGSUSED*/
4043 4042 static int
4044 4043 segdev_capable(struct seg *seg, segcapability_t capability)
4045 4044 {
4046 4045 return (0);
4047 4046 }
4048 4047
4049 4048 /*
4050 4049 * ddi_umem_alloc() non-pageable quantum cache max size.
4051 4050 * This is just a SWAG.
4052 4051 */
4053 4052 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4054 4053
4055 4054 /*
4056 4055 * Initialize seg_dev from boot. This routine sets up the trash page
4057 4056 * and creates the umem_np_arena used to back non-pageable memory
4058 4057 * requests.
4059 4058 */
4060 4059 void
4061 4060 segdev_init(void)
4062 4061 {
4063 4062 struct seg kseg;
4064 4063
4065 4064 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
4066 4065 devmap_alloc_pages, devmap_free_pages, heap_arena,
4067 4066 DEVMAP_UMEM_QUANTUM, VM_SLEEP);
4068 4067
4069 4068 kseg.s_as = &kas;
4070 4069 trashpp = page_create_va(&trashvp, 0, PAGESIZE,
4071 4070 PG_NORELOC | PG_EXCL | PG_WAIT, &kseg, NULL);
4072 4071 if (trashpp == NULL)
4073 4072 panic("segdev_init: failed to create trash page");
4074 4073 pagezero(trashpp, 0, PAGESIZE);
4075 4074 page_downgrade(trashpp);
4076 4075 }
4077 4076
4078 4077 /*
4079 4078 * Invoke platform-dependent support routines so that /proc can have
4080 4079 * the platform code deal with curious hardware.
4081 4080 */
4082 4081 int
4083 4082 segdev_copyfrom(struct seg *seg,
4084 4083 caddr_t uaddr, const void *devaddr, void *kaddr, size_t len)
4085 4084 {
4086 4085 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4087 4086 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4088 4087
4089 4088 return (e_ddi_copyfromdev(sp->s_dip,
4090 4089 (off_t)(uaddr - seg->s_base), devaddr, kaddr, len));
4091 4090 }
4092 4091
4093 4092 int
4094 4093 segdev_copyto(struct seg *seg,
4095 4094 caddr_t uaddr, const void *kaddr, void *devaddr, size_t len)
4096 4095 {
4097 4096 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4098 4097 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4099 4098
4100 4099 return (e_ddi_copytodev(sp->s_dip,
4101 4100 (off_t)(uaddr - seg->s_base), kaddr, devaddr, len));
4102 4101 }
↓ open down ↓ |
3874 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX