1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  28 /*        All Rights Reserved   */
  29 
  30 /*
  31  * University Copyright- Copyright (c) 1982, 1986, 1988
  32  * The Regents of the University of California
  33  * All Rights Reserved
  34  *
  35  * University Acknowledgment- Portions of this document are derived from
  36  * software developed by the University of California, Berkeley, and its
  37  * contributors.
  38  */
  39 
  40 /*
  41  * VM - segment of a mapped device.
  42  *
  43  * This segment driver is used when mapping character special devices.
  44  */
  45 
  46 #include <sys/types.h>
  47 #include <sys/t_lock.h>
  48 #include <sys/sysmacros.h>
  49 #include <sys/vtrace.h>
  50 #include <sys/systm.h>
  51 #include <sys/vmsystm.h>
  52 #include <sys/mman.h>
  53 #include <sys/errno.h>
  54 #include <sys/kmem.h>
  55 #include <sys/cmn_err.h>
  56 #include <sys/vnode.h>
  57 #include <sys/proc.h>
  58 #include <sys/conf.h>
  59 #include <sys/debug.h>
  60 #include <sys/ddidevmap.h>
  61 #include <sys/ddi_implfuncs.h>
  62 #include <sys/lgrp.h>
  63 
  64 #include <vm/page.h>
  65 #include <vm/hat.h>
  66 #include <vm/as.h>
  67 #include <vm/seg.h>
  68 #include <vm/seg_dev.h>
  69 #include <vm/seg_kp.h>
  70 #include <vm/seg_kmem.h>
  71 #include <vm/vpage.h>
  72 
  73 #include <sys/sunddi.h>
  74 #include <sys/esunddi.h>
  75 #include <sys/fs/snode.h>
  76 
  77 
  78 #if DEBUG
  79 int segdev_debug;
  80 #define DEBUGF(level, args) { if (segdev_debug >= (level)) cmn_err args; }
  81 #else
  82 #define DEBUGF(level, args)
  83 #endif
  84 
  85 /* Default timeout for devmap context management */
  86 #define CTX_TIMEOUT_VALUE 0
  87 
  88 #define HOLD_DHP_LOCK(dhp)  if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
  89                         { mutex_enter(&dhp->dh_lock); }
  90 
  91 #define RELE_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
  92                         { mutex_exit(&dhp->dh_lock); }
  93 
  94 #define round_down_p2(a, s)     ((a) & ~((s) - 1))
  95 #define round_up_p2(a, s)       (((a) + (s) - 1) & ~((s) - 1))
  96 
  97 /*
  98  * VA_PA_ALIGNED checks to see if both VA and PA are on pgsize boundary
  99  * VA_PA_PGSIZE_ALIGNED check to see if VA is aligned with PA w.r.t. pgsize
 100  */
 101 #define VA_PA_ALIGNED(uvaddr, paddr, pgsize)            \
 102         (((uvaddr | paddr) & (pgsize - 1)) == 0)
 103 #define VA_PA_PGSIZE_ALIGNED(uvaddr, paddr, pgsize)     \
 104         (((uvaddr ^ paddr) & (pgsize - 1)) == 0)
 105 
 106 #define vpgtob(n)       ((n) * sizeof (struct vpage))   /* For brevity */
 107 
 108 #define VTOCVP(vp)      (VTOS(vp)->s_commonvp)       /* we "know" it's an snode */
 109 
 110 static struct devmap_ctx *devmapctx_list = NULL;
 111 static struct devmap_softlock *devmap_slist = NULL;
 112 
 113 /*
 114  * mutex, vnode and page for the page of zeros we use for the trash mappings.
 115  * One trash page is allocated on the first ddi_umem_setup call that uses it
 116  * XXX Eventually, we may want to combine this with what segnf does when all
 117  * hat layers implement HAT_NOFAULT.
 118  *
 119  * The trash page is used when the backing store for a userland mapping is
 120  * removed but the application semantics do not take kindly to a SIGBUS.
 121  * In that scenario, the applications pages are mapped to some dummy page
 122  * which returns garbage on read and writes go into a common place.
 123  * (Perfect for NO_FAULT semantics)
 124  * The device driver is responsible to communicating to the app with some
 125  * other mechanism that such remapping has happened and the app should take
 126  * corrective action.
 127  * We can also use an anonymous memory page as there is no requirement to
 128  * keep the page locked, however this complicates the fault code. RFE.
 129  */
 130 static struct vnode trashvp;
 131 static struct page *trashpp;
 132 
 133 /* Non-pageable kernel memory is allocated from the umem_np_arena. */
 134 static vmem_t *umem_np_arena;
 135 
 136 /* Set the cookie to a value we know will never be a valid umem_cookie */
 137 #define DEVMAP_DEVMEM_COOKIE    ((ddi_umem_cookie_t)0x1)
 138 
 139 /*
 140  * Macros to check if type of devmap handle
 141  */
 142 #define cookie_is_devmem(c)     \
 143         ((c) == (struct ddi_umem_cookie *)DEVMAP_DEVMEM_COOKIE)
 144 
 145 #define cookie_is_pmem(c)       \
 146         ((c) == (struct ddi_umem_cookie *)DEVMAP_PMEM_COOKIE)
 147 
 148 #define cookie_is_kpmem(c)      (!cookie_is_devmem(c) && !cookie_is_pmem(c) &&\
 149         ((c)->type == KMEM_PAGEABLE))
 150 
 151 #define dhp_is_devmem(dhp)      \
 152         (cookie_is_devmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
 153 
 154 #define dhp_is_pmem(dhp)        \
 155         (cookie_is_pmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
 156 
 157 #define dhp_is_kpmem(dhp)       \
 158         (cookie_is_kpmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
 159 
 160 /*
 161  * Private seg op routines.
 162  */
 163 static int      segdev_dup(struct seg *, struct seg *);
 164 static int      segdev_unmap(struct seg *, caddr_t, size_t);
 165 static void     segdev_free(struct seg *);
 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
 167                     enum fault_type, enum seg_rw);
 168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
 169 static int      segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
 170 static int      segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
 171 static void     segdev_badop(void);
 172 static int      segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
 173 static size_t   segdev_incore(struct seg *, caddr_t, size_t, char *);
 174 static int      segdev_lockop(struct seg *, caddr_t, size_t, int, int,
 175                     ulong_t *, size_t);
 176 static int      segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
 177 static u_offset_t       segdev_getoffset(struct seg *, caddr_t);
 178 static int      segdev_gettype(struct seg *, caddr_t);
 179 static int      segdev_getvp(struct seg *, caddr_t, struct vnode **);
 180 static int      segdev_advise(struct seg *, caddr_t, size_t, uint_t);
 181 static void     segdev_dump(struct seg *);
 182 static int      segdev_pagelock(struct seg *, caddr_t, size_t,
 183                     struct page ***, enum lock_type, enum seg_rw);
 184 static int      segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
 185 static int      segdev_getmemid(struct seg *, caddr_t, memid_t *);
 186 static lgrp_mem_policy_info_t   *segdev_getpolicy(struct seg *, caddr_t);
 187 static int      segdev_capable(struct seg *, segcapability_t);
 188 
 189 /*
 190  * XXX  this struct is used by rootnex_map_fault to identify
 191  *      the segment it has been passed. So if you make it
 192  *      "static" you'll need to fix rootnex_map_fault.
 193  */
 194 struct seg_ops segdev_ops = {
 195         segdev_dup,
 196         segdev_unmap,
 197         segdev_free,
 198         segdev_fault,
 199         segdev_faulta,
 200         segdev_setprot,
 201         segdev_checkprot,
 202         (int (*)())segdev_badop,        /* kluster */
 203         segdev_sync,                    /* sync */
 204         segdev_incore,
 205         segdev_lockop,                  /* lockop */
 206         segdev_getprot,
 207         segdev_getoffset,
 208         segdev_gettype,
 209         segdev_getvp,
 210         segdev_advise,
 211         segdev_dump,
 212         segdev_pagelock,
 213         segdev_setpagesize,
 214         segdev_getmemid,
 215         segdev_getpolicy,
 216         segdev_capable,
 217         seg_inherit_notsup
 218 };
 219 
 220 /*
 221  * Private segdev support routines
 222  */
 223 static struct segdev_data *sdp_alloc(void);
 224 
 225 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
 226     size_t, enum seg_rw);
 227 
 228 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
 229     struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
 230 
 231 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
 232     size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
 233 
 234 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
 235 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
 236 static void devmap_softlock_rele(devmap_handle_t *);
 237 static void devmap_ctx_rele(devmap_handle_t *);
 238 
 239 static void devmap_ctxto(void *);
 240 
 241 static devmap_handle_t *devmap_find_handle(devmap_handle_t *dhp_head,
 242     caddr_t addr);
 243 
 244 static ulong_t devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
 245     ulong_t *opfn, ulong_t *pagesize);
 246 
 247 static void free_devmap_handle(devmap_handle_t *dhp);
 248 
 249 static int devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
 250     struct seg *newseg);
 251 
 252 static devmap_handle_t *devmap_handle_unmap(devmap_handle_t *dhp);
 253 
 254 static void devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len);
 255 
 256 static void devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr);
 257 
 258 static int devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
 259     offset_t off, size_t len, uint_t flags);
 260 
 261 static void devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len,
 262     caddr_t addr, size_t *llen, caddr_t *laddr);
 263 
 264 static void devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len);
 265 
 266 static void *devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag);
 267 static void devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size);
 268 
 269 static void *devmap_umem_alloc_np(size_t size, size_t flags);
 270 static void devmap_umem_free_np(void *addr, size_t size);
 271 
 272 /*
 273  * routines to lock and unlock underlying segkp segment for
 274  * KMEM_PAGEABLE type cookies.
 275  */
 276 static faultcode_t  acquire_kpmem_lock(struct ddi_umem_cookie *, size_t);
 277 static void release_kpmem_lock(struct ddi_umem_cookie *, size_t);
 278 
 279 /*
 280  * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
 281  * drivers with devmap_access callbacks
 282  */
 283 static int devmap_softlock_enter(struct devmap_softlock *, size_t,
 284         enum fault_type);
 285 static void devmap_softlock_exit(struct devmap_softlock *, size_t,
 286         enum fault_type);
 287 
 288 static kmutex_t devmapctx_lock;
 289 
 290 static kmutex_t devmap_slock;
 291 
 292 /*
 293  * Initialize the thread callbacks and thread private data.
 294  */
 295 static struct devmap_ctx *
 296 devmap_ctxinit(dev_t dev, ulong_t id)
 297 {
 298         struct devmap_ctx       *devctx;
 299         struct devmap_ctx       *tmp;
 300         dev_info_t              *dip;
 301 
 302         tmp =  kmem_zalloc(sizeof (struct devmap_ctx), KM_SLEEP);
 303 
 304         mutex_enter(&devmapctx_lock);
 305 
 306         dip = e_ddi_hold_devi_by_dev(dev, 0);
 307         ASSERT(dip != NULL);
 308         ddi_release_devi(dip);
 309 
 310         for (devctx = devmapctx_list; devctx != NULL; devctx = devctx->next)
 311                 if ((devctx->dip == dip) && (devctx->id == id))
 312                         break;
 313 
 314         if (devctx == NULL) {
 315                 devctx = tmp;
 316                 devctx->dip = dip;
 317                 devctx->id = id;
 318                 mutex_init(&devctx->lock, NULL, MUTEX_DEFAULT, NULL);
 319                 cv_init(&devctx->cv, NULL, CV_DEFAULT, NULL);
 320                 devctx->next = devmapctx_list;
 321                 devmapctx_list = devctx;
 322         } else
 323                 kmem_free(tmp, sizeof (struct devmap_ctx));
 324 
 325         mutex_enter(&devctx->lock);
 326         devctx->refcnt++;
 327         mutex_exit(&devctx->lock);
 328         mutex_exit(&devmapctx_lock);
 329 
 330         return (devctx);
 331 }
 332 
 333 /*
 334  * Timeout callback called if a CPU has not given up the device context
 335  * within dhp->dh_timeout_length ticks
 336  */
 337 static void
 338 devmap_ctxto(void *data)
 339 {
 340         struct devmap_ctx *devctx = data;
 341 
 342         TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_CTXTO,
 343             "devmap_ctxto:timeout expired, devctx=%p", (void *)devctx);
 344         mutex_enter(&devctx->lock);
 345         /*
 346          * Set oncpu = 0 so the next mapping trying to get the device context
 347          * can.
 348          */
 349         devctx->oncpu = 0;
 350         devctx->timeout = 0;
 351         cv_signal(&devctx->cv);
 352         mutex_exit(&devctx->lock);
 353 }
 354 
 355 /*
 356  * Create a device segment.
 357  */
 358 int
 359 segdev_create(struct seg *seg, void *argsp)
 360 {
 361         struct segdev_data *sdp;
 362         struct segdev_crargs *a = (struct segdev_crargs *)argsp;
 363         devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
 364         int error;
 365 
 366         /*
 367          * Since the address space is "write" locked, we
 368          * don't need the segment lock to protect "segdev" data.
 369          */
 370         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 371 
 372         hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 373 
 374         sdp = sdp_alloc();
 375 
 376         sdp->mapfunc = a->mapfunc;
 377         sdp->offset = a->offset;
 378         sdp->prot = a->prot;
 379         sdp->maxprot = a->maxprot;
 380         sdp->type = a->type;
 381         sdp->pageprot = 0;
 382         sdp->softlockcnt = 0;
 383         sdp->vpage = NULL;
 384 
 385         if (sdp->mapfunc == NULL)
 386                 sdp->devmap_data = dhp;
 387         else
 388                 sdp->devmap_data = dhp = NULL;
 389 
 390         sdp->hat_flags = a->hat_flags;
 391         sdp->hat_attr = a->hat_attr;
 392 
 393         /*
 394          * Currently, hat_flags supports only HAT_LOAD_NOCONSIST
 395          */
 396         ASSERT(!(sdp->hat_flags & ~HAT_LOAD_NOCONSIST));
 397 
 398         /*
 399          * Hold shadow vnode -- segdev only deals with
 400          * character (VCHR) devices. We use the common
 401          * vp to hang pages on.
 402          */
 403         sdp->vp = specfind(a->dev, VCHR);
 404         ASSERT(sdp->vp != NULL);
 405 
 406         seg->s_ops = &segdev_ops;
 407         seg->s_data = sdp;
 408 
 409         while (dhp != NULL) {
 410                 dhp->dh_seg = seg;
 411                 dhp = dhp->dh_next;
 412         }
 413 
 414         /*
 415          * Inform the vnode of the new mapping.
 416          */
 417         /*
 418          * It is ok to use pass sdp->maxprot to ADDMAP rather than to use
 419          * dhp specific maxprot because spec_addmap does not use maxprot.
 420          */
 421         error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
 422             seg->s_as, seg->s_base, seg->s_size,
 423             sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
 424 
 425         if (error != 0) {
 426                 sdp->devmap_data = NULL;
 427                 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
 428                     HAT_UNLOAD_UNMAP);
 429         } else {
 430                 /*
 431                  * Mappings of /dev/null don't count towards the VSZ of a
 432                  * process.  Mappings of /dev/null have no mapping type.
 433                  */
 434                 if ((SEGOP_GETTYPE(seg, (seg)->s_base) & (MAP_SHARED |
 435                     MAP_PRIVATE)) == 0) {
 436                         seg->s_as->a_resvsize -= seg->s_size;
 437                 }
 438         }
 439 
 440         return (error);
 441 }
 442 
 443 static struct segdev_data *
 444 sdp_alloc(void)
 445 {
 446         struct segdev_data *sdp;
 447 
 448         sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
 449         rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
 450 
 451         return (sdp);
 452 }
 453 
 454 /*
 455  * Duplicate seg and return new segment in newseg.
 456  */
 457 static int
 458 segdev_dup(struct seg *seg, struct seg *newseg)
 459 {
 460         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
 461         struct segdev_data *newsdp;
 462         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
 463         size_t npages;
 464         int ret;
 465 
 466         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
 467             "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
 468 
 469         DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
 470             (void *)dhp, (void *)seg));
 471 
 472         /*
 473          * Since the address space is "write" locked, we
 474          * don't need the segment lock to protect "segdev" data.
 475          */
 476         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 477 
 478         newsdp = sdp_alloc();
 479 
 480         newseg->s_ops = seg->s_ops;
 481         newseg->s_data = (void *)newsdp;
 482 
 483         VN_HOLD(sdp->vp);
 484         newsdp->vp   = sdp->vp;
 485         newsdp->mapfunc = sdp->mapfunc;
 486         newsdp->offset       = sdp->offset;
 487         newsdp->pageprot = sdp->pageprot;
 488         newsdp->prot = sdp->prot;
 489         newsdp->maxprot = sdp->maxprot;
 490         newsdp->type = sdp->type;
 491         newsdp->hat_attr = sdp->hat_attr;
 492         newsdp->hat_flags = sdp->hat_flags;
 493         newsdp->softlockcnt = 0;
 494 
 495         /*
 496          * Initialize per page data if the segment we are
 497          * dup'ing has per page information.
 498          */
 499         npages = seg_pages(newseg);
 500 
 501         if (sdp->vpage != NULL) {
 502                 size_t nbytes = vpgtob(npages);
 503 
 504                 newsdp->vpage = kmem_zalloc(nbytes, KM_SLEEP);
 505                 bcopy(sdp->vpage, newsdp->vpage, nbytes);
 506         } else
 507                 newsdp->vpage = NULL;
 508 
 509         /*
 510          * duplicate devmap handles
 511          */
 512         if (dhp != NULL) {
 513                 ret = devmap_handle_dup(dhp,
 514                     (devmap_handle_t **)&newsdp->devmap_data, newseg);
 515                 if (ret != 0) {
 516                         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DUP_CK1,
 517                             "segdev_dup:ret1 ret=%x, dhp=%p seg=%p",
 518                             ret, (void *)dhp, (void *)seg);
 519                         DEBUGF(1, (CE_CONT,
 520                             "segdev_dup: ret %x dhp %p seg %p\n",
 521                             ret, (void *)dhp, (void *)seg));
 522                         return (ret);
 523                 }
 524         }
 525 
 526         /*
 527          * Inform the common vnode of the new mapping.
 528          */
 529         return (VOP_ADDMAP(VTOCVP(newsdp->vp),
 530             newsdp->offset, newseg->s_as,
 531             newseg->s_base, newseg->s_size, newsdp->prot,
 532             newsdp->maxprot, sdp->type, CRED(), NULL));
 533 }
 534 
 535 /*
 536  * duplicate devmap handles
 537  */
 538 static int
 539 devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
 540     struct seg *newseg)
 541 {
 542         devmap_handle_t *newdhp_save = NULL;
 543         devmap_handle_t *newdhp = NULL;
 544         struct devmap_callback_ctl *callbackops;
 545 
 546         while (dhp != NULL) {
 547                 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
 548 
 549                 /* Need to lock the original dhp while copying if REMAP */
 550                 HOLD_DHP_LOCK(dhp);
 551                 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
 552                 RELE_DHP_LOCK(dhp);
 553                 newdhp->dh_seg = newseg;
 554                 newdhp->dh_next = NULL;
 555                 if (newdhp_save != NULL)
 556                         newdhp_save->dh_next = newdhp;
 557                 else
 558                         *new_dhp = newdhp;
 559                 newdhp_save = newdhp;
 560 
 561                 callbackops = &newdhp->dh_callbackops;
 562 
 563                 if (dhp->dh_softlock != NULL)
 564                         newdhp->dh_softlock = devmap_softlock_init(
 565                             newdhp->dh_dev,
 566                             (ulong_t)callbackops->devmap_access);
 567                 if (dhp->dh_ctx != NULL)
 568                         newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
 569                             (ulong_t)callbackops->devmap_access);
 570 
 571                 /*
 572                  * Initialize dh_lock if we want to do remap.
 573                  */
 574                 if (newdhp->dh_flags & DEVMAP_ALLOW_REMAP) {
 575                         mutex_init(&newdhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
 576                         newdhp->dh_flags |= DEVMAP_LOCK_INITED;
 577                 }
 578 
 579                 if (callbackops->devmap_dup != NULL) {
 580                         int ret;
 581 
 582                         /*
 583                          * Call the dup callback so that the driver can
 584                          * duplicate its private data.
 585                          */
 586                         ret = (*callbackops->devmap_dup)(dhp, dhp->dh_pvtp,
 587                             (devmap_cookie_t *)newdhp, &newdhp->dh_pvtp);
 588 
 589                         if (ret != 0) {
 590                                 /*
 591                                  * We want to free up this segment as the driver
 592                                  * has indicated that we can't dup it.  But we
 593                                  * don't want to call the drivers, devmap_unmap,
 594                                  * callback function as the driver does not
 595                                  * think this segment exists. The caller of
 596                                  * devmap_dup will call seg_free on newseg
 597                                  * as it was the caller that allocated the
 598                                  * segment.
 599                                  */
 600                                 DEBUGF(1, (CE_CONT, "devmap_handle_dup ERROR: "
 601                                     "newdhp %p dhp %p\n", (void *)newdhp,
 602                                     (void *)dhp));
 603                                 callbackops->devmap_unmap = NULL;
 604                                 return (ret);
 605                         }
 606                 }
 607 
 608                 dhp = dhp->dh_next;
 609         }
 610 
 611         return (0);
 612 }
 613 
 614 /*
 615  * Split a segment at addr for length len.
 616  */
 617 /*ARGSUSED*/
 618 static int
 619 segdev_unmap(struct seg *seg, caddr_t addr, size_t len)
 620 {
 621         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
 622         register struct segdev_data *nsdp;
 623         register struct seg *nseg;
 624         register size_t opages;         /* old segment size in pages */
 625         register size_t npages;         /* new segment size in pages */
 626         register size_t dpages;         /* pages being deleted (unmapped) */
 627         register size_t nbytes;
 628         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
 629         devmap_handle_t *dhpp;
 630         devmap_handle_t *newdhp;
 631         struct devmap_callback_ctl *callbackops;
 632         caddr_t nbase;
 633         offset_t off;
 634         ulong_t nsize;
 635         size_t mlen, sz;
 636 
 637         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
 638             "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
 639             (void *)dhp, (void *)seg, (void *)addr, len);
 640 
 641         DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
 642             (void *)dhp, (void *)seg, (void *)addr, len));
 643 
 644         /*
 645          * Since the address space is "write" locked, we
 646          * don't need the segment lock to protect "segdev" data.
 647          */
 648         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 649 
 650         if ((sz = sdp->softlockcnt) > 0) {
 651                 /*
 652                  * Fail the unmap if pages are SOFTLOCKed through this mapping.
 653                  * softlockcnt is protected from change by the as write lock.
 654                  */
 655                 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
 656                     "segdev_unmap:error softlockcnt = %ld", sz);
 657                 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
 658                 return (EAGAIN);
 659         }
 660 
 661         /*
 662          * Check for bad sizes
 663          */
 664         if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
 665             (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
 666                 panic("segdev_unmap");
 667 
 668         if (dhp != NULL) {
 669                 devmap_handle_t *tdhp;
 670                 /*
 671                  * If large page size was used in hat_devload(),
 672                  * the same page size must be used in hat_unload().
 673                  */
 674                 dhpp = tdhp = devmap_find_handle(dhp, addr);
 675                 while (tdhp != NULL) {
 676                         if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
 677                                 break;
 678                         }
 679                         tdhp = tdhp->dh_next;
 680                 }
 681                 if (tdhp != NULL) {     /* found a dhp using large pages */
 682                         size_t slen = len;
 683                         size_t mlen;
 684                         size_t soff;
 685 
 686                         soff = (ulong_t)(addr - dhpp->dh_uvaddr);
 687                         while (slen != 0) {
 688                                 mlen = MIN(slen, (dhpp->dh_len - soff));
 689                                 hat_unload(seg->s_as->a_hat, dhpp->dh_uvaddr,
 690                                     dhpp->dh_len, HAT_UNLOAD_UNMAP);
 691                                 dhpp = dhpp->dh_next;
 692                                 ASSERT(slen >= mlen);
 693                                 slen -= mlen;
 694                                 soff = 0;
 695                         }
 696                 } else
 697                         hat_unload(seg->s_as->a_hat, addr, len,
 698                             HAT_UNLOAD_UNMAP);
 699         } else {
 700                 /*
 701                  * Unload any hardware translations in the range
 702                  * to be taken out.
 703                  */
 704                 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
 705         }
 706 
 707         /*
 708          * get the user offset which will used in the driver callbacks
 709          */
 710         off = sdp->offset + (offset_t)(addr - seg->s_base);
 711 
 712         /*
 713          * Inform the vnode of the unmapping.
 714          */
 715         ASSERT(sdp->vp != NULL);
 716         (void) VOP_DELMAP(VTOCVP(sdp->vp), off, seg->s_as, addr, len,
 717             sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
 718 
 719         /*
 720          * Check for entire segment
 721          */
 722         if (addr == seg->s_base && len == seg->s_size) {
 723                 seg_free(seg);
 724                 return (0);
 725         }
 726 
 727         opages = seg_pages(seg);
 728         dpages = btop(len);
 729         npages = opages - dpages;
 730 
 731         /*
 732          * Check for beginning of segment
 733          */
 734         if (addr == seg->s_base) {
 735                 if (sdp->vpage != NULL) {
 736                         register struct vpage *ovpage;
 737 
 738                         ovpage = sdp->vpage; /* keep pointer to vpage */
 739 
 740                         nbytes = vpgtob(npages);
 741                         sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
 742                         bcopy(&ovpage[dpages], sdp->vpage, nbytes);
 743 
 744                         /* free up old vpage */
 745                         kmem_free(ovpage, vpgtob(opages));
 746                 }
 747 
 748                 /*
 749                  * free devmap handles from the beginning of the mapping.
 750                  */
 751                 if (dhp != NULL)
 752                         devmap_handle_unmap_head(dhp, len);
 753 
 754                 sdp->offset += (offset_t)len;
 755 
 756                 seg->s_base += len;
 757                 seg->s_size -= len;
 758 
 759                 return (0);
 760         }
 761 
 762         /*
 763          * Check for end of segment
 764          */
 765         if (addr + len == seg->s_base + seg->s_size) {
 766                 if (sdp->vpage != NULL) {
 767                         register struct vpage *ovpage;
 768 
 769                         ovpage = sdp->vpage; /* keep pointer to vpage */
 770 
 771                         nbytes = vpgtob(npages);
 772                         sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
 773                         bcopy(ovpage, sdp->vpage, nbytes);
 774 
 775                         /* free up old vpage */
 776                         kmem_free(ovpage, vpgtob(opages));
 777                 }
 778                 seg->s_size -= len;
 779 
 780                 /*
 781                  * free devmap handles from addr to the end of the mapping.
 782                  */
 783                 if (dhp != NULL)
 784                         devmap_handle_unmap_tail(dhp, addr);
 785 
 786                 return (0);
 787         }
 788 
 789         /*
 790          * The section to go is in the middle of the segment,
 791          * have to make it into two segments.  nseg is made for
 792          * the high end while seg is cut down at the low end.
 793          */
 794         nbase = addr + len;                             /* new seg base */
 795         nsize = (seg->s_base + seg->s_size) - nbase;      /* new seg size */
 796         seg->s_size = addr - seg->s_base;         /* shrink old seg */
 797         nseg = seg_alloc(seg->s_as, nbase, nsize);
 798         if (nseg == NULL)
 799                 panic("segdev_unmap seg_alloc");
 800 
 801         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK2,
 802             "segdev_unmap: seg=%p nseg=%p", (void *)seg, (void *)nseg);
 803         DEBUGF(3, (CE_CONT, "segdev_unmap: segdev_dup seg %p nseg %p\n",
 804             (void *)seg, (void *)nseg));
 805         nsdp = sdp_alloc();
 806 
 807         nseg->s_ops = seg->s_ops;
 808         nseg->s_data = (void *)nsdp;
 809 
 810         VN_HOLD(sdp->vp);
 811         nsdp->mapfunc = sdp->mapfunc;
 812         nsdp->offset = sdp->offset + (offset_t)(nseg->s_base - seg->s_base);
 813         nsdp->vp     = sdp->vp;
 814         nsdp->pageprot = sdp->pageprot;
 815         nsdp->prot   = sdp->prot;
 816         nsdp->maxprot = sdp->maxprot;
 817         nsdp->type = sdp->type;
 818         nsdp->hat_attr = sdp->hat_attr;
 819         nsdp->hat_flags = sdp->hat_flags;
 820         nsdp->softlockcnt = 0;
 821 
 822         /*
 823          * Initialize per page data if the segment we are
 824          * dup'ing has per page information.
 825          */
 826         if (sdp->vpage != NULL) {
 827                 /* need to split vpage into two arrays */
 828                 register size_t nnbytes;
 829                 register size_t nnpages;
 830                 register struct vpage *ovpage;
 831 
 832                 ovpage = sdp->vpage;         /* keep pointer to vpage */
 833 
 834                 npages = seg_pages(seg);        /* seg has shrunk */
 835                 nbytes = vpgtob(npages);
 836                 nnpages = seg_pages(nseg);
 837                 nnbytes = vpgtob(nnpages);
 838 
 839                 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
 840                 bcopy(ovpage, sdp->vpage, nbytes);
 841 
 842                 nsdp->vpage = kmem_alloc(nnbytes, KM_SLEEP);
 843                 bcopy(&ovpage[npages + dpages], nsdp->vpage, nnbytes);
 844 
 845                 /* free up old vpage */
 846                 kmem_free(ovpage, vpgtob(opages));
 847         } else
 848                 nsdp->vpage = NULL;
 849 
 850         /*
 851          * unmap dhps.
 852          */
 853         if (dhp == NULL) {
 854                 nsdp->devmap_data = NULL;
 855                 return (0);
 856         }
 857         while (dhp != NULL) {
 858                 callbackops = &dhp->dh_callbackops;
 859                 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK3,
 860                     "segdev_unmap: dhp=%p addr=%p", dhp, addr);
 861                 DEBUGF(3, (CE_CONT, "unmap: dhp %p addr %p uvaddr %p len %lx\n",
 862                     (void *)dhp, (void *)addr,
 863                     (void *)dhp->dh_uvaddr, dhp->dh_len));
 864 
 865                 if (addr == (dhp->dh_uvaddr + dhp->dh_len)) {
 866                         dhpp = dhp->dh_next;
 867                         dhp->dh_next = NULL;
 868                         dhp = dhpp;
 869                 } else if (addr > (dhp->dh_uvaddr + dhp->dh_len)) {
 870                         dhp = dhp->dh_next;
 871                 } else if (addr > dhp->dh_uvaddr &&
 872                     (addr + len) < (dhp->dh_uvaddr + dhp->dh_len)) {
 873                         /*
 874                          * <addr, addr+len> is enclosed by dhp.
 875                          * create a newdhp that begins at addr+len and
 876                          * ends at dhp->dh_uvaddr+dhp->dh_len.
 877                          */
 878                         newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
 879                         HOLD_DHP_LOCK(dhp);
 880                         bcopy(dhp, newdhp, sizeof (devmap_handle_t));
 881                         RELE_DHP_LOCK(dhp);
 882                         newdhp->dh_seg = nseg;
 883                         newdhp->dh_next = dhp->dh_next;
 884                         if (dhp->dh_softlock != NULL)
 885                                 newdhp->dh_softlock = devmap_softlock_init(
 886                                     newdhp->dh_dev,
 887                                     (ulong_t)callbackops->devmap_access);
 888                         if (dhp->dh_ctx != NULL)
 889                                 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
 890                                     (ulong_t)callbackops->devmap_access);
 891                         if (newdhp->dh_flags & DEVMAP_LOCK_INITED) {
 892                                 mutex_init(&newdhp->dh_lock,
 893                                     NULL, MUTEX_DEFAULT, NULL);
 894                         }
 895                         if (callbackops->devmap_unmap != NULL)
 896                                 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
 897                                     off, len, dhp, &dhp->dh_pvtp,
 898                                     newdhp, &newdhp->dh_pvtp);
 899                         mlen = len + (addr - dhp->dh_uvaddr);
 900                         devmap_handle_reduce_len(newdhp, mlen);
 901                         nsdp->devmap_data = newdhp;
 902                         /* XX Changing len should recalculate LARGE flag */
 903                         dhp->dh_len = addr - dhp->dh_uvaddr;
 904                         dhpp = dhp->dh_next;
 905                         dhp->dh_next = NULL;
 906                         dhp = dhpp;
 907                 } else if ((addr > dhp->dh_uvaddr) &&
 908                     ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len))) {
 909                         mlen = dhp->dh_len + dhp->dh_uvaddr - addr;
 910                         /*
 911                          * <addr, addr+len> spans over dhps.
 912                          */
 913                         if (callbackops->devmap_unmap != NULL)
 914                                 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
 915                                     off, mlen, (devmap_cookie_t *)dhp,
 916                                     &dhp->dh_pvtp, NULL, NULL);
 917                         /* XX Changing len should recalculate LARGE flag */
 918                         dhp->dh_len = addr - dhp->dh_uvaddr;
 919                         dhpp = dhp->dh_next;
 920                         dhp->dh_next = NULL;
 921                         dhp = dhpp;
 922                         nsdp->devmap_data = dhp;
 923                 } else if ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len)) {
 924                         /*
 925                          * dhp is enclosed by <addr, addr+len>.
 926                          */
 927                         dhp->dh_seg = nseg;
 928                         nsdp->devmap_data = dhp;
 929                         dhp = devmap_handle_unmap(dhp);
 930                         nsdp->devmap_data = dhp; /* XX redundant? */
 931                 } else if (((addr + len) > dhp->dh_uvaddr) &&
 932                     ((addr + len) < (dhp->dh_uvaddr + dhp->dh_len))) {
 933                         mlen = addr + len - dhp->dh_uvaddr;
 934                         if (callbackops->devmap_unmap != NULL)
 935                                 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
 936                                     dhp->dh_uoff, mlen, NULL,
 937                                     NULL, dhp, &dhp->dh_pvtp);
 938                         devmap_handle_reduce_len(dhp, mlen);
 939                         nsdp->devmap_data = dhp;
 940                         dhp->dh_seg = nseg;
 941                         dhp = dhp->dh_next;
 942                 } else {
 943                         dhp->dh_seg = nseg;
 944                         dhp = dhp->dh_next;
 945                 }
 946         }
 947         return (0);
 948 }
 949 
 950 /*
 951  * Utility function handles reducing the length of a devmap handle during unmap
 952  * Note that is only used for unmapping the front portion of the handler,
 953  * i.e., we are bumping up the offset/pfn etc up by len
 954  * Do not use if reducing length at the tail.
 955  */
 956 static void
 957 devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len)
 958 {
 959         struct ddi_umem_cookie *cp;
 960         struct devmap_pmem_cookie *pcp;
 961         /*
 962          * adjust devmap handle fields
 963          */
 964         ASSERT(len < dhp->dh_len);
 965 
 966         /* Make sure only page-aligned changes are done */
 967         ASSERT((len & PAGEOFFSET) == 0);
 968 
 969         dhp->dh_len -= len;
 970         dhp->dh_uoff += (offset_t)len;
 971         dhp->dh_roff += (offset_t)len;
 972         dhp->dh_uvaddr += len;
 973         /* Need to grab dhp lock if REMAP */
 974         HOLD_DHP_LOCK(dhp);
 975         cp = dhp->dh_cookie;
 976         if (!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)) {
 977                 if (cookie_is_devmem(cp)) {
 978                         dhp->dh_pfn += btop(len);
 979                 } else if (cookie_is_pmem(cp)) {
 980                         pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
 981                         ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
 982                             dhp->dh_roff < ptob(pcp->dp_npages));
 983                 } else {
 984                         ASSERT(dhp->dh_roff < cp->size);
 985                         ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
 986                             dhp->dh_cvaddr < (cp->cvaddr + cp->size));
 987                         ASSERT((dhp->dh_cvaddr + len) <=
 988                             (cp->cvaddr + cp->size));
 989 
 990                         dhp->dh_cvaddr += len;
 991                 }
 992         }
 993         /* XXX - Should recalculate the DEVMAP_FLAG_LARGE after changes */
 994         RELE_DHP_LOCK(dhp);
 995 }
 996 
 997 /*
 998  * Free devmap handle, dhp.
 999  * Return the next devmap handle on the linked list.
1000  */
1001 static devmap_handle_t *
1002 devmap_handle_unmap(devmap_handle_t *dhp)
1003 {
1004         struct devmap_callback_ctl *callbackops = &dhp->dh_callbackops;
1005         struct segdev_data *sdp = (struct segdev_data *)dhp->dh_seg->s_data;
1006         devmap_handle_t *dhpp = (devmap_handle_t *)sdp->devmap_data;
1007 
1008         ASSERT(dhp != NULL);
1009 
1010         /*
1011          * before we free up dhp, call the driver's devmap_unmap entry point
1012          * to free resources allocated for this dhp.
1013          */
1014         if (callbackops->devmap_unmap != NULL) {
1015                 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, dhp->dh_uoff,
1016                     dhp->dh_len, NULL, NULL, NULL, NULL);
1017         }
1018 
1019         if (dhpp == dhp) {      /* releasing first dhp, change sdp data */
1020                 sdp->devmap_data = dhp->dh_next;
1021         } else {
1022                 while (dhpp->dh_next != dhp) {
1023                         dhpp = dhpp->dh_next;
1024                 }
1025                 dhpp->dh_next = dhp->dh_next;
1026         }
1027         dhpp = dhp->dh_next; /* return value is next dhp in chain */
1028 
1029         if (dhp->dh_softlock != NULL)
1030                 devmap_softlock_rele(dhp);
1031 
1032         if (dhp->dh_ctx != NULL)
1033                 devmap_ctx_rele(dhp);
1034 
1035         if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1036                 mutex_destroy(&dhp->dh_lock);
1037         }
1038         kmem_free(dhp, sizeof (devmap_handle_t));
1039 
1040         return (dhpp);
1041 }
1042 
1043 /*
1044  * Free complete devmap handles from dhp for len bytes
1045  * dhp can be either the first handle or a subsequent handle
1046  */
1047 static void
1048 devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len)
1049 {
1050         struct devmap_callback_ctl *callbackops;
1051 
1052         /*
1053          * free the devmap handles covered by len.
1054          */
1055         while (len >= dhp->dh_len) {
1056                 len -= dhp->dh_len;
1057                 dhp = devmap_handle_unmap(dhp);
1058         }
1059         if (len != 0) { /* partial unmap at head of first remaining dhp */
1060                 callbackops = &dhp->dh_callbackops;
1061 
1062                 /*
1063                  * Call the unmap callback so the drivers can make
1064                  * adjustment on its private data.
1065                  */
1066                 if (callbackops->devmap_unmap != NULL)
1067                         (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
1068                             dhp->dh_uoff, len, NULL, NULL, dhp, &dhp->dh_pvtp);
1069                 devmap_handle_reduce_len(dhp, len);
1070         }
1071 }
1072 
1073 /*
1074  * Free devmap handles to truncate  the mapping after addr
1075  * RFE: Simpler to pass in dhp pointing at correct dhp (avoid find again)
1076  *      Also could then use the routine in middle unmap case too
1077  */
1078 static void
1079 devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr)
1080 {
1081         register struct seg *seg = dhp->dh_seg;
1082         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1083         register devmap_handle_t *dhph = (devmap_handle_t *)sdp->devmap_data;
1084         struct devmap_callback_ctl *callbackops;
1085         register devmap_handle_t *dhpp;
1086         size_t maplen;
1087         ulong_t off;
1088         size_t len;
1089 
1090         maplen = (size_t)(addr - dhp->dh_uvaddr);
1091         dhph = devmap_find_handle(dhph, addr);
1092 
1093         while (dhph != NULL) {
1094                 if (maplen == 0) {
1095                         dhph =  devmap_handle_unmap(dhph);
1096                 } else {
1097                         callbackops = &dhph->dh_callbackops;
1098                         len = dhph->dh_len - maplen;
1099                         off = (ulong_t)sdp->offset + (addr - seg->s_base);
1100                         /*
1101                          * Call the unmap callback so the driver
1102                          * can make adjustments on its private data.
1103                          */
1104                         if (callbackops->devmap_unmap != NULL)
1105                                 (*callbackops->devmap_unmap)(dhph,
1106                                     dhph->dh_pvtp, off, len,
1107                                     (devmap_cookie_t *)dhph,
1108                                     &dhph->dh_pvtp, NULL, NULL);
1109                         /* XXX Reducing len needs to recalculate LARGE flag */
1110                         dhph->dh_len = maplen;
1111                         maplen = 0;
1112                         dhpp = dhph->dh_next;
1113                         dhph->dh_next = NULL;
1114                         dhph = dhpp;
1115                 }
1116         } /* end while */
1117 }
1118 
1119 /*
1120  * Free a segment.
1121  */
1122 static void
1123 segdev_free(struct seg *seg)
1124 {
1125         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1126         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
1127 
1128         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1129             "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1130         DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1131             (void *)dhp, (void *)seg));
1132 
1133         /*
1134          * Since the address space is "write" locked, we
1135          * don't need the segment lock to protect "segdev" data.
1136          */
1137         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1138 
1139         while (dhp != NULL)
1140                 dhp = devmap_handle_unmap(dhp);
1141 
1142         VN_RELE(sdp->vp);
1143         if (sdp->vpage != NULL)
1144                 kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1145 
1146         rw_destroy(&sdp->lock);
1147         kmem_free(sdp, sizeof (*sdp));
1148 }
1149 
1150 static void
1151 free_devmap_handle(devmap_handle_t *dhp)
1152 {
1153         register devmap_handle_t *dhpp;
1154 
1155         /*
1156          * free up devmap handle
1157          */
1158         while (dhp != NULL) {
1159                 dhpp = dhp->dh_next;
1160                 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1161                         mutex_destroy(&dhp->dh_lock);
1162                 }
1163 
1164                 if (dhp->dh_softlock != NULL)
1165                         devmap_softlock_rele(dhp);
1166 
1167                 if (dhp->dh_ctx != NULL)
1168                         devmap_ctx_rele(dhp);
1169 
1170                 kmem_free(dhp, sizeof (devmap_handle_t));
1171                 dhp = dhpp;
1172         }
1173 }
1174 
1175 /*
1176  * routines to lock and unlock underlying segkp segment for
1177  * KMEM_PAGEABLE type cookies.
1178  * segkp only allows a single pending F_SOFTLOCK
1179  * we keep track of number of locks in the cookie so we can
1180  * have multiple pending faults and manage the calls to segkp.
1181  * RFE: if segkp supports either pagelock or can support multiple
1182  * calls to F_SOFTLOCK, then these routines can go away.
1183  *      If pagelock, segdev_faultpage can fault on a page by page basis
1184  *              and simplifies the code quite a bit.
1185  *      if multiple calls allowed but not partial ranges, then need for
1186  *      cookie->lock and locked count goes away, code can call as_fault directly
1187  */
1188 static faultcode_t
1189 acquire_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1190 {
1191         int err = 0;
1192         ASSERT(cookie_is_kpmem(cookie));
1193         /*
1194          * Fault in pages in segkp with F_SOFTLOCK.
1195          * We want to hold the lock until all pages have been loaded.
1196          * segkp only allows single caller to hold SOFTLOCK, so cookie
1197          * holds a count so we dont call into segkp multiple times
1198          */
1199         mutex_enter(&cookie->lock);
1200 
1201         /*
1202          * Check for overflow in locked field
1203          */
1204         if ((UINT32_MAX - cookie->locked) < npages) {
1205                 err = FC_MAKE_ERR(ENOMEM);
1206         } else if (cookie->locked == 0) {
1207                 /* First time locking */
1208                 err = as_fault(kas.a_hat, &kas, cookie->cvaddr,
1209                     cookie->size, F_SOFTLOCK, PROT_READ|PROT_WRITE);
1210         }
1211         if (!err) {
1212                 cookie->locked += npages;
1213         }
1214         mutex_exit(&cookie->lock);
1215         return (err);
1216 }
1217 
1218 static void
1219 release_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1220 {
1221         mutex_enter(&cookie->lock);
1222         ASSERT(cookie_is_kpmem(cookie));
1223         ASSERT(cookie->locked >= npages);
1224         cookie->locked -= (uint_t)npages;
1225         if (cookie->locked == 0) {
1226                 /* Last unlock */
1227                 if (as_fault(kas.a_hat, &kas, cookie->cvaddr,
1228                     cookie->size, F_SOFTUNLOCK, PROT_READ|PROT_WRITE))
1229                         panic("segdev releasing kpmem lock %p", (void *)cookie);
1230         }
1231         mutex_exit(&cookie->lock);
1232 }
1233 
1234 /*
1235  * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
1236  * drivers with devmap_access callbacks
1237  * slock->softlocked basically works like a rw lock
1238  *      -ve counts => F_SOFTLOCK in progress
1239  *      +ve counts => F_INVAL/F_PROT in progress
1240  * We allow only one F_SOFTLOCK at a time
1241  * but can have multiple pending F_INVAL/F_PROT calls
1242  *
1243  * This routine waits using cv_wait_sig so killing processes is more graceful
1244  * Returns EINTR if coming out of this routine due to a signal, 0 otherwise
1245  */
1246 static int devmap_softlock_enter(
1247         struct devmap_softlock *slock,
1248         size_t npages,
1249         enum fault_type type)
1250 {
1251         if (npages == 0)
1252                 return (0);
1253         mutex_enter(&(slock->lock));
1254         switch (type) {
1255         case F_SOFTLOCK :
1256                 while (slock->softlocked) {
1257                         if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1258                                 /* signalled */
1259                                 mutex_exit(&(slock->lock));
1260                                 return (EINTR);
1261                         }
1262                 }
1263                 slock->softlocked -= npages; /* -ve count => locked */
1264                 break;
1265         case F_INVAL :
1266         case F_PROT :
1267                 while (slock->softlocked < 0)
1268                         if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1269                                 /* signalled */
1270                                 mutex_exit(&(slock->lock));
1271                                 return (EINTR);
1272                         }
1273                 slock->softlocked += npages; /* +ve count => f_invals */
1274                 break;
1275         default:
1276                 ASSERT(0);
1277         }
1278         mutex_exit(&(slock->lock));
1279         return (0);
1280 }
1281 
1282 static void devmap_softlock_exit(
1283         struct devmap_softlock *slock,
1284         size_t npages,
1285         enum fault_type type)
1286 {
1287         if (slock == NULL)
1288                 return;
1289         mutex_enter(&(slock->lock));
1290         switch (type) {
1291         case F_SOFTLOCK :
1292                 ASSERT(-slock->softlocked >= npages);
1293                 slock->softlocked += npages; /* -ve count is softlocked */
1294                 if (slock->softlocked == 0)
1295                         cv_signal(&slock->cv);
1296                 break;
1297         case F_INVAL :
1298         case F_PROT:
1299                 ASSERT(slock->softlocked >= npages);
1300                 slock->softlocked -= npages;
1301                 if (slock->softlocked == 0)
1302                         cv_signal(&slock->cv);
1303                 break;
1304         default:
1305                 ASSERT(0);
1306         }
1307         mutex_exit(&(slock->lock));
1308 }
1309 
1310 /*
1311  * Do a F_SOFTUNLOCK call over the range requested.
1312  * The range must have already been F_SOFTLOCK'ed.
1313  * The segment lock should be held, (but not the segment private lock?)
1314  *  The softunlock code below does not adjust for large page sizes
1315  *      assumes the caller already did any addr/len adjustments for
1316  *      pagesize mappings before calling.
1317  */
1318 /*ARGSUSED*/
1319 static void
1320 segdev_softunlock(
1321         struct hat *hat,                /* the hat */
1322         struct seg *seg,                /* seg_dev of interest */
1323         caddr_t addr,                   /* base address of range */
1324         size_t len,                     /* number of bytes */
1325         enum seg_rw rw)                 /* type of access at fault */
1326 {
1327         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1328         devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1329 
1330         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SOFTUNLOCK,
1331             "segdev_softunlock:dhp_head=%p sdp=%p addr=%p len=%lx",
1332             dhp_head, sdp, addr, len);
1333         DEBUGF(3, (CE_CONT, "segdev_softunlock: dhp %p lockcnt %lx "
1334             "addr %p len %lx\n",
1335             (void *)dhp_head, sdp->softlockcnt, (void *)addr, len));
1336 
1337         hat_unlock(hat, addr, len);
1338 
1339         if (dhp_head != NULL) {
1340                 devmap_handle_t *dhp;
1341                 size_t mlen;
1342                 size_t tlen = len;
1343                 ulong_t off;
1344 
1345                 dhp = devmap_find_handle(dhp_head, addr);
1346                 ASSERT(dhp != NULL);
1347 
1348                 off = (ulong_t)(addr - dhp->dh_uvaddr);
1349                 while (tlen != 0) {
1350                         mlen = MIN(tlen, (dhp->dh_len - off));
1351 
1352                         /*
1353                          * unlock segkp memory, locked during F_SOFTLOCK
1354                          */
1355                         if (dhp_is_kpmem(dhp)) {
1356                                 release_kpmem_lock(
1357                                     (struct ddi_umem_cookie *)dhp->dh_cookie,
1358                                     btopr(mlen));
1359                         }
1360 
1361                         /*
1362                          * Do the softlock accounting for devmap_access
1363                          */
1364                         if (dhp->dh_callbackops.devmap_access != NULL) {
1365                                 devmap_softlock_exit(dhp->dh_softlock,
1366                                     btopr(mlen), F_SOFTLOCK);
1367                         }
1368 
1369                         tlen -= mlen;
1370                         dhp = dhp->dh_next;
1371                         off = 0;
1372                 }
1373         }
1374 
1375         mutex_enter(&freemem_lock);
1376         ASSERT(sdp->softlockcnt >= btopr(len));
1377         sdp->softlockcnt -= btopr(len);
1378         mutex_exit(&freemem_lock);
1379         if (sdp->softlockcnt == 0) {
1380                 /*
1381                  * All SOFTLOCKS are gone. Wakeup any waiting
1382                  * unmappers so they can try again to unmap.
1383                  * Check for waiters first without the mutex
1384                  * held so we don't always grab the mutex on
1385                  * softunlocks.
1386                  */
1387                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1388                         mutex_enter(&seg->s_as->a_contents);
1389                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1390                                 AS_CLRUNMAPWAIT(seg->s_as);
1391                                 cv_broadcast(&seg->s_as->a_cv);
1392                         }
1393                         mutex_exit(&seg->s_as->a_contents);
1394                 }
1395         }
1396 
1397 }
1398 
1399 /*
1400  * Handle fault for a single page.
1401  * Done in a separate routine so we can handle errors more easily.
1402  * This routine is called only from segdev_faultpages()
1403  * when looping over the range of addresses requested. The segment lock is held.
1404  */
1405 static faultcode_t
1406 segdev_faultpage(
1407         struct hat *hat,                /* the hat */
1408         struct seg *seg,                /* seg_dev of interest */
1409         caddr_t addr,                   /* address in as */
1410         struct vpage *vpage,            /* pointer to vpage for seg, addr */
1411         enum fault_type type,           /* type of fault */
1412         enum seg_rw rw,                 /* type of access at fault */
1413         devmap_handle_t *dhp)           /* devmap handle if any for this page */
1414 {
1415         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1416         uint_t prot;
1417         pfn_t pfnum = PFN_INVALID;
1418         u_offset_t offset;
1419         uint_t hat_flags;
1420         dev_info_t *dip;
1421 
1422         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE,
1423             "segdev_faultpage: dhp=%p seg=%p addr=%p", dhp, seg, addr);
1424         DEBUGF(8, (CE_CONT, "segdev_faultpage: dhp %p seg %p addr %p \n",
1425             (void *)dhp, (void *)seg, (void *)addr));
1426 
1427         /*
1428          * Initialize protection value for this page.
1429          * If we have per page protection values check it now.
1430          */
1431         if (sdp->pageprot) {
1432                 uint_t protchk;
1433 
1434                 switch (rw) {
1435                 case S_READ:
1436                         protchk = PROT_READ;
1437                         break;
1438                 case S_WRITE:
1439                         protchk = PROT_WRITE;
1440                         break;
1441                 case S_EXEC:
1442                         protchk = PROT_EXEC;
1443                         break;
1444                 case S_OTHER:
1445                 default:
1446                         protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1447                         break;
1448                 }
1449 
1450                 prot = VPP_PROT(vpage);
1451                 if ((prot & protchk) == 0)
1452                         return (FC_PROT);       /* illegal access type */
1453         } else {
1454                 prot = sdp->prot;
1455                 /* caller has already done segment level protection check */
1456         }
1457 
1458         if (type == F_SOFTLOCK) {
1459                 mutex_enter(&freemem_lock);
1460                 sdp->softlockcnt++;
1461                 mutex_exit(&freemem_lock);
1462         }
1463 
1464         hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
1465         offset = sdp->offset + (u_offset_t)(addr - seg->s_base);
1466         /*
1467          * In the devmap framework, sdp->mapfunc is set to NULL.  we can get
1468          * pfnum from dhp->dh_pfn (at beginning of segment) and offset from
1469          * seg->s_base.
1470          */
1471         if (dhp == NULL) {
1472                 /* If segment has devmap_data, then dhp should be non-NULL */
1473                 ASSERT(sdp->devmap_data == NULL);
1474                 pfnum = (pfn_t)cdev_mmap(sdp->mapfunc, sdp->vp->v_rdev,
1475                     (off_t)offset, prot);
1476                 prot |= sdp->hat_attr;
1477         } else {
1478                 ulong_t off;
1479                 struct ddi_umem_cookie *cp;
1480                 struct devmap_pmem_cookie *pcp;
1481 
1482                 /* ensure the dhp passed in contains addr. */
1483                 ASSERT(dhp == devmap_find_handle(
1484                     (devmap_handle_t *)sdp->devmap_data, addr));
1485 
1486                 off = addr - dhp->dh_uvaddr;
1487 
1488                 /*
1489                  * This routine assumes that the caller makes sure that the
1490                  * fields in dhp used below are unchanged due to remap during
1491                  * this call. Caller does HOLD_DHP_LOCK if neeed
1492                  */
1493                 cp = dhp->dh_cookie;
1494                 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1495                         pfnum = PFN_INVALID;
1496                 } else if (cookie_is_devmem(cp)) {
1497                         pfnum = dhp->dh_pfn + btop(off);
1498                 } else if (cookie_is_pmem(cp)) {
1499                         pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
1500                         ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
1501                             dhp->dh_roff < ptob(pcp->dp_npages));
1502                         pfnum = page_pptonum(
1503                             pcp->dp_pparray[btop(off + dhp->dh_roff)]);
1504                 } else {
1505                         ASSERT(dhp->dh_roff < cp->size);
1506                         ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
1507                             dhp->dh_cvaddr < (cp->cvaddr + cp->size));
1508                         ASSERT((dhp->dh_cvaddr + off) <=
1509                             (cp->cvaddr + cp->size));
1510                         ASSERT((dhp->dh_cvaddr + off + PAGESIZE) <=
1511                             (cp->cvaddr + cp->size));
1512 
1513                         switch (cp->type) {
1514                         case UMEM_LOCKED :
1515                                 if (cp->pparray != NULL) {
1516                                         ASSERT((dhp->dh_roff &
1517                                             PAGEOFFSET) == 0);
1518                                         pfnum = page_pptonum(
1519                                             cp->pparray[btop(off +
1520                                             dhp->dh_roff)]);
1521                                 } else {
1522                                         pfnum = hat_getpfnum(
1523                                             ((proc_t *)cp->procp)->p_as->a_hat,
1524                                             cp->cvaddr + off);
1525                                 }
1526                         break;
1527                         case UMEM_TRASH :
1528                                 pfnum = page_pptonum(trashpp);
1529                                 /*
1530                                  * We should set hat_flags to HAT_NOFAULT also
1531                                  * However, not all hat layers implement this
1532                                  */
1533                                 break;
1534                         case KMEM_PAGEABLE:
1535                         case KMEM_NON_PAGEABLE:
1536                                 pfnum = hat_getpfnum(kas.a_hat,
1537                                     dhp->dh_cvaddr + off);
1538                                 break;
1539                         default :
1540                                 pfnum = PFN_INVALID;
1541                                 break;
1542                         }
1543                 }
1544                 prot |= dhp->dh_hat_attr;
1545         }
1546         if (pfnum == PFN_INVALID) {
1547                 return (FC_MAKE_ERR(EFAULT));
1548         }
1549         /* prot should already be OR'ed in with hat_attributes if needed */
1550 
1551         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE_CK1,
1552             "segdev_faultpage: pfnum=%lx memory=%x prot=%x flags=%x",
1553             pfnum, pf_is_memory(pfnum), prot, hat_flags);
1554         DEBUGF(9, (CE_CONT, "segdev_faultpage: pfnum %lx memory %x "
1555             "prot %x flags %x\n", pfnum, pf_is_memory(pfnum), prot, hat_flags));
1556 
1557         if (pf_is_memory(pfnum) || (dhp != NULL)) {
1558                 /*
1559                  * It's not _really_ required here to pass sdp->hat_flags
1560                  * to hat_devload even though we do it.
1561                  * This is because hat figures it out DEVMEM mappings
1562                  * are non-consistent, anyway.
1563                  */
1564                 hat_devload(hat, addr, PAGESIZE, pfnum,
1565                     prot, hat_flags | sdp->hat_flags);
1566                 return (0);
1567         }
1568 
1569         /*
1570          * Fall through to the case where devmap is not used and need to call
1571          * up the device tree to set up the mapping
1572          */
1573 
1574         dip = VTOS(VTOCVP(sdp->vp))->s_dip;
1575         ASSERT(dip);
1576 
1577         /*
1578          * When calling ddi_map_fault, we do not OR in sdp->hat_attr
1579          * This is because this calls drivers which may not expect
1580          * prot to have any other values than PROT_ALL
1581          * The root nexus driver has a hack to peek into the segment
1582          * structure and then OR in sdp->hat_attr.
1583          * XX In case the bus_ops interfaces are ever revisited
1584          * we need to fix this. prot should include other hat attributes
1585          */
1586         if (ddi_map_fault(dip, hat, seg, addr, NULL, pfnum, prot & PROT_ALL,
1587             (uint_t)(type == F_SOFTLOCK)) != DDI_SUCCESS) {
1588                 return (FC_MAKE_ERR(EFAULT));
1589         }
1590         return (0);
1591 }
1592 
1593 static faultcode_t
1594 segdev_fault(
1595         struct hat *hat,                /* the hat */
1596         struct seg *seg,                /* the seg_dev of interest */
1597         caddr_t addr,                   /* the address of the fault */
1598         size_t len,                     /* the length of the range */
1599         enum fault_type type,           /* type of fault */
1600         enum seg_rw rw)                 /* type of access at fault */
1601 {
1602         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1603         devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1604         devmap_handle_t *dhp;
1605         struct devmap_softlock *slock = NULL;
1606         ulong_t slpage = 0;
1607         ulong_t off;
1608         caddr_t maddr = addr;
1609         int err;
1610         int err_is_faultcode = 0;
1611 
1612         TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1613             "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1614             (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1615         DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1616             "addr %p len %lx type %x\n",
1617             (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1618 
1619         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1620 
1621         /* Handle non-devmap case */
1622         if (dhp_head == NULL)
1623                 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1624 
1625         /* Find devmap handle */
1626         if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1627                 return (FC_NOMAP);
1628 
1629         /*
1630          * The seg_dev driver does not implement copy-on-write,
1631          * and always loads translations with maximal allowed permissions
1632          * but we got an fault trying to access the device.
1633          * Servicing the fault is not going to result in any better result
1634          * RFE: If we want devmap_access callbacks to be involved in F_PROT
1635          *      faults, then the code below is written for that
1636          *      Pending resolution of the following:
1637          *      - determine if the F_INVAL/F_SOFTLOCK syncing
1638          *      is needed for F_PROT also or not. The code below assumes it does
1639          *      - If driver sees F_PROT and calls devmap_load with same type,
1640          *      then segdev_faultpages will fail with FC_PROT anyway, need to
1641          *      change that so calls from devmap_load to segdev_faultpages for
1642          *      F_PROT type are retagged to F_INVAL.
1643          * RFE: Today we dont have drivers that use devmap and want to handle
1644          *      F_PROT calls. The code in segdev_fault* is written to allow
1645          *      this case but is not tested. A driver that needs this capability
1646          *      should be able to remove the short-circuit case; resolve the
1647          *      above issues and "should" work.
1648          */
1649         if (type == F_PROT) {
1650                 return (FC_PROT);
1651         }
1652 
1653         /*
1654          * Loop through dhp list calling devmap_access or segdev_faultpages for
1655          * each devmap handle.
1656          * drivers which implement devmap_access can interpose on faults and do
1657          * device-appropriate special actions before calling devmap_load.
1658          */
1659 
1660         /*
1661          * Unfortunately, this simple loop has turned out to expose a variety
1662          * of complex problems which results in the following convoluted code.
1663          *
1664          * First, a desire to handle a serialization of F_SOFTLOCK calls
1665          * to the driver within the framework.
1666          *      This results in a dh_softlock structure that is on a per device
1667          *      (or device instance) basis and serializes devmap_access calls.
1668          *      Ideally we would need to do this for underlying
1669          *      memory/device regions that are being faulted on
1670          *      but that is hard to identify and with REMAP, harder
1671          * Second, a desire to serialize F_INVAL(and F_PROT) calls w.r.t.
1672          *      to F_SOFTLOCK calls to the driver.
1673          * These serializations are to simplify the driver programmer model.
1674          * To support these two features, the code first goes through the
1675          *      devmap handles and counts the pages (slpage) that are covered
1676          *      by devmap_access callbacks.
1677          * This part ends with a devmap_softlock_enter call
1678          *      which allows only one F_SOFTLOCK active on a device instance,
1679          *      but multiple F_INVAL/F_PROTs can be active except when a
1680          *      F_SOFTLOCK is active
1681          *
1682          * Next, we dont short-circuit the fault code upfront to call
1683          *      segdev_softunlock for F_SOFTUNLOCK, because we must use
1684          *      the same length when we softlock and softunlock.
1685          *
1686          *      -Hat layers may not support softunlocking lengths less than the
1687          *      original length when there is large page support.
1688          *      -kpmem locking is dependent on keeping the lengths same.
1689          *      -if drivers handled F_SOFTLOCK, they probably also expect to
1690          *              see an F_SOFTUNLOCK of the same length
1691          *      Hence, if extending lengths during softlock,
1692          *      softunlock has to make the same adjustments and goes through
1693          *      the same loop calling segdev_faultpages/segdev_softunlock
1694          *      But some of the synchronization and error handling is different
1695          */
1696 
1697         if (type != F_SOFTUNLOCK) {
1698                 devmap_handle_t *dhpp = dhp;
1699                 size_t slen = len;
1700 
1701                 /*
1702                  * Calculate count of pages that are :
1703                  * a) within the (potentially extended) fault region
1704                  * b) AND covered by devmap handle with devmap_access
1705                  */
1706                 off = (ulong_t)(addr - dhpp->dh_uvaddr);
1707                 while (slen != 0) {
1708                         size_t mlen;
1709 
1710                         /*
1711                          * Softlocking on a region that allows remap is
1712                          * unsupported due to unresolved locking issues
1713                          * XXX: unclear what these are?
1714                          *      One potential is that if there is a pending
1715                          *      softlock, then a remap should not be allowed
1716                          *      until the unlock is done. This is easily
1717                          *      fixed by returning error in devmap*remap on
1718                          *      checking the dh->dh_softlock->softlocked value
1719                          */
1720                         if ((type == F_SOFTLOCK) &&
1721                             (dhpp->dh_flags & DEVMAP_ALLOW_REMAP)) {
1722                                 return (FC_NOSUPPORT);
1723                         }
1724 
1725                         mlen = MIN(slen, (dhpp->dh_len - off));
1726                         if (dhpp->dh_callbackops.devmap_access) {
1727                                 size_t llen;
1728                                 caddr_t laddr;
1729                                 /*
1730                                  * use extended length for large page mappings
1731                                  */
1732                                 HOLD_DHP_LOCK(dhpp);
1733                                 if ((sdp->pageprot == 0) &&
1734                                     (dhpp->dh_flags & DEVMAP_FLAG_LARGE)) {
1735                                         devmap_get_large_pgsize(dhpp,
1736                                             mlen, maddr, &llen, &laddr);
1737                                 } else {
1738                                         llen = mlen;
1739                                 }
1740                                 RELE_DHP_LOCK(dhpp);
1741 
1742                                 slpage += btopr(llen);
1743                                 slock = dhpp->dh_softlock;
1744                         }
1745                         maddr += mlen;
1746                         ASSERT(slen >= mlen);
1747                         slen -= mlen;
1748                         dhpp = dhpp->dh_next;
1749                         off = 0;
1750                 }
1751                 /*
1752                  * synchonize with other faulting threads and wait till safe
1753                  * devmap_softlock_enter might return due to signal in cv_wait
1754                  *
1755                  * devmap_softlock_enter has to be called outside of while loop
1756                  * to prevent a deadlock if len spans over multiple dhps.
1757                  * dh_softlock is based on device instance and if multiple dhps
1758                  * use the same device instance, the second dhp's LOCK call
1759                  * will hang waiting on the first to complete.
1760                  * devmap_setup verifies that slocks in a dhp_chain are same.
1761                  * RFE: this deadlock only hold true for F_SOFTLOCK. For
1762                  *      F_INVAL/F_PROT, since we now allow multiple in parallel,
1763                  *      we could have done the softlock_enter inside the loop
1764                  *      and supported multi-dhp mappings with dissimilar devices
1765                  */
1766                 if (err = devmap_softlock_enter(slock, slpage, type))
1767                         return (FC_MAKE_ERR(err));
1768         }
1769 
1770         /* reset 'maddr' to the start addr of the range of fault. */
1771         maddr = addr;
1772 
1773         /* calculate the offset corresponds to 'addr' in the first dhp. */
1774         off = (ulong_t)(addr - dhp->dh_uvaddr);
1775 
1776         /*
1777          * The fault length may span over multiple dhps.
1778          * Loop until the total length is satisfied.
1779          */
1780         while (len != 0) {
1781                 size_t llen;
1782                 size_t mlen;
1783                 caddr_t laddr;
1784 
1785                 /*
1786                  * mlen is the smaller of 'len' and the length
1787                  * from addr to the end of mapping defined by dhp.
1788                  */
1789                 mlen = MIN(len, (dhp->dh_len - off));
1790 
1791                 HOLD_DHP_LOCK(dhp);
1792                 /*
1793                  * Pass the extended length and address to devmap_access
1794                  * if large pagesize is used for loading address translations.
1795                  */
1796                 if ((sdp->pageprot == 0) &&
1797                     (dhp->dh_flags & DEVMAP_FLAG_LARGE)) {
1798                         devmap_get_large_pgsize(dhp, mlen, maddr,
1799                             &llen, &laddr);
1800                         ASSERT(maddr == addr || laddr == maddr);
1801                 } else {
1802                         llen = mlen;
1803                         laddr = maddr;
1804                 }
1805 
1806                 if (dhp->dh_callbackops.devmap_access != NULL) {
1807                         offset_t aoff;
1808 
1809                         aoff = sdp->offset + (offset_t)(laddr - seg->s_base);
1810 
1811                         /*
1812                          * call driver's devmap_access entry point which will
1813                          * call devmap_load/contextmgmt to load the translations
1814                          *
1815                          * We drop the dhp_lock before calling access so
1816                          * drivers can call devmap_*_remap within access
1817                          */
1818                         RELE_DHP_LOCK(dhp);
1819 
1820                         err = (*dhp->dh_callbackops.devmap_access)(
1821                             dhp, (void *)dhp->dh_pvtp, aoff, llen, type, rw);
1822                 } else {
1823                         /*
1824                          * If no devmap_access entry point, then load mappings
1825                          * hold dhp_lock across faultpages if REMAP
1826                          */
1827                         err = segdev_faultpages(hat, seg, laddr, llen,
1828                             type, rw, dhp);
1829                         err_is_faultcode = 1;
1830                         RELE_DHP_LOCK(dhp);
1831                 }
1832 
1833                 if (err) {
1834                         if ((type == F_SOFTLOCK) && (maddr > addr)) {
1835                                 /*
1836                                  * If not first dhp, use
1837                                  * segdev_fault(F_SOFTUNLOCK) for prior dhps
1838                                  * While this is recursion, it is incorrect to
1839                                  * call just segdev_softunlock
1840                                  * if we are using either large pages
1841                                  * or devmap_access. It will be more right
1842                                  * to go through the same loop as above
1843                                  * rather than call segdev_softunlock directly
1844                                  * It will use the right lenghths as well as
1845                                  * call into the driver devmap_access routines.
1846                                  */
1847                                 size_t done = (size_t)(maddr - addr);
1848                                 (void) segdev_fault(hat, seg, addr, done,
1849                                     F_SOFTUNLOCK, S_OTHER);
1850                                 /*
1851                                  * reduce slpage by number of pages
1852                                  * released by segdev_softunlock
1853                                  */
1854                                 ASSERT(slpage >= btopr(done));
1855                                 devmap_softlock_exit(slock,
1856                                     slpage - btopr(done), type);
1857                         } else {
1858                                 devmap_softlock_exit(slock, slpage, type);
1859                         }
1860 
1861 
1862                         /*
1863                          * Segdev_faultpages() already returns a faultcode,
1864                          * hence, result from segdev_faultpages() should be
1865                          * returned directly.
1866                          */
1867                         if (err_is_faultcode)
1868                                 return (err);
1869                         return (FC_MAKE_ERR(err));
1870                 }
1871 
1872                 maddr += mlen;
1873                 ASSERT(len >= mlen);
1874                 len -= mlen;
1875                 dhp = dhp->dh_next;
1876                 off = 0;
1877 
1878                 ASSERT(!dhp || len == 0 || maddr == dhp->dh_uvaddr);
1879         }
1880         /*
1881          * release the softlock count at end of fault
1882          * For F_SOFTLOCk this is done in the later F_SOFTUNLOCK
1883          */
1884         if ((type == F_INVAL) || (type == F_PROT))
1885                 devmap_softlock_exit(slock, slpage, type);
1886         return (0);
1887 }
1888 
1889 /*
1890  * segdev_faultpages
1891  *
1892  * Used to fault in seg_dev segment pages. Called by segdev_fault or devmap_load
1893  * This routine assumes that the callers makes sure that the fields
1894  * in dhp used below are not changed due to remap during this call.
1895  * Caller does HOLD_DHP_LOCK if neeed
1896  * This routine returns a faultcode_t as a return value for segdev_fault.
1897  */
1898 static faultcode_t
1899 segdev_faultpages(
1900         struct hat *hat,                /* the hat */
1901         struct seg *seg,                /* the seg_dev of interest */
1902         caddr_t addr,                   /* the address of the fault */
1903         size_t len,                     /* the length of the range */
1904         enum fault_type type,           /* type of fault */
1905         enum seg_rw rw,                 /* type of access at fault */
1906         devmap_handle_t *dhp)           /* devmap handle */
1907 {
1908         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1909         register caddr_t a;
1910         struct vpage *vpage;
1911         struct ddi_umem_cookie *kpmem_cookie = NULL;
1912         int err;
1913 
1914         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGES,
1915             "segdev_faultpages: dhp=%p seg=%p addr=%p len=%lx",
1916             (void *)dhp, (void *)seg, (void *)addr, len);
1917         DEBUGF(5, (CE_CONT, "segdev_faultpages: "
1918             "dhp %p seg %p addr %p len %lx\n",
1919             (void *)dhp, (void *)seg, (void *)addr, len));
1920 
1921         /*
1922          * The seg_dev driver does not implement copy-on-write,
1923          * and always loads translations with maximal allowed permissions
1924          * but we got an fault trying to access the device.
1925          * Servicing the fault is not going to result in any better result
1926          * XXX: If we want to allow devmap_access to handle F_PROT calls,
1927          * This code should be removed and let the normal fault handling
1928          * take care of finding the error
1929          */
1930         if (type == F_PROT) {
1931                 return (FC_PROT);
1932         }
1933 
1934         if (type == F_SOFTUNLOCK) {
1935                 segdev_softunlock(hat, seg, addr, len, rw);
1936                 return (0);
1937         }
1938 
1939         /*
1940          * For kernel pageable memory, fault/lock segkp pages
1941          * We hold this until the completion of this
1942          * fault (INVAL/PROT) or till unlock (SOFTLOCK).
1943          */
1944         if ((dhp != NULL) && dhp_is_kpmem(dhp)) {
1945                 kpmem_cookie = (struct ddi_umem_cookie *)dhp->dh_cookie;
1946                 if (err = acquire_kpmem_lock(kpmem_cookie, btopr(len)))
1947                         return (err);
1948         }
1949 
1950         /*
1951          * If we have the same protections for the entire segment,
1952          * insure that the access being attempted is legitimate.
1953          */
1954         rw_enter(&sdp->lock, RW_READER);
1955         if (sdp->pageprot == 0) {
1956                 uint_t protchk;
1957 
1958                 switch (rw) {
1959                 case S_READ:
1960                         protchk = PROT_READ;
1961                         break;
1962                 case S_WRITE:
1963                         protchk = PROT_WRITE;
1964                         break;
1965                 case S_EXEC:
1966                         protchk = PROT_EXEC;
1967                         break;
1968                 case S_OTHER:
1969                 default:
1970                         protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1971                         break;
1972                 }
1973 
1974                 if ((sdp->prot & protchk) == 0) {
1975                         rw_exit(&sdp->lock);
1976                         /* undo kpmem locking */
1977                         if (kpmem_cookie != NULL) {
1978                                 release_kpmem_lock(kpmem_cookie, btopr(len));
1979                         }
1980                         return (FC_PROT);       /* illegal access type */
1981                 }
1982         }
1983 
1984         /*
1985          * we do a single hat_devload for the range if
1986          *   - devmap framework (dhp is not NULL),
1987          *   - pageprot == 0, i.e., no per-page protection set and
1988          *   - is device pages, irrespective of whether we are using large pages
1989          */
1990         if ((sdp->pageprot == 0) && (dhp != NULL) && dhp_is_devmem(dhp)) {
1991                 pfn_t pfnum;
1992                 uint_t hat_flags;
1993 
1994                 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1995                         rw_exit(&sdp->lock);
1996                         return (FC_NOMAP);
1997                 }
1998 
1999                 if (type == F_SOFTLOCK) {
2000                         mutex_enter(&freemem_lock);
2001                         sdp->softlockcnt += btopr(len);
2002                         mutex_exit(&freemem_lock);
2003                 }
2004 
2005                 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
2006                 pfnum = dhp->dh_pfn + btop((uintptr_t)(addr - dhp->dh_uvaddr));
2007                 ASSERT(!pf_is_memory(pfnum));
2008 
2009                 hat_devload(hat, addr, len, pfnum, sdp->prot | dhp->dh_hat_attr,
2010                     hat_flags | sdp->hat_flags);
2011                 rw_exit(&sdp->lock);
2012                 return (0);
2013         }
2014 
2015         /* Handle cases where we have to loop through fault handling per-page */
2016 
2017         if (sdp->vpage == NULL)
2018                 vpage = NULL;
2019         else
2020                 vpage = &sdp->vpage[seg_page(seg, addr)];
2021 
2022         /* loop over the address range handling each fault */
2023         for (a = addr; a < addr + len; a += PAGESIZE) {
2024                 if (err = segdev_faultpage(hat, seg, a, vpage, type, rw, dhp)) {
2025                         break;
2026                 }
2027                 if (vpage != NULL)
2028                         vpage++;
2029         }
2030         rw_exit(&sdp->lock);
2031         if (err && (type == F_SOFTLOCK)) { /* error handling for F_SOFTLOCK */
2032                 size_t done = (size_t)(a - addr); /* pages fault successfully */
2033                 if (done > 0) {
2034                         /* use softunlock for those pages */
2035                         segdev_softunlock(hat, seg, addr, done, S_OTHER);
2036                 }
2037                 if (kpmem_cookie != NULL) {
2038                         /* release kpmem lock for rest of pages */
2039                         ASSERT(len >= done);
2040                         release_kpmem_lock(kpmem_cookie, btopr(len - done));
2041                 }
2042         } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
2043                 /* for non-SOFTLOCK cases, release kpmem */
2044                 release_kpmem_lock(kpmem_cookie, btopr(len));
2045         }
2046         return (err);
2047 }
2048 
2049 /*
2050  * Asynchronous page fault.  We simply do nothing since this
2051  * entry point is not supposed to load up the translation.
2052  */
2053 /*ARGSUSED*/
2054 static faultcode_t
2055 segdev_faulta(struct seg *seg, caddr_t addr)
2056 {
2057         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2058             "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2059         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2060 
2061         return (0);
2062 }
2063 
2064 static int
2065 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2066 {
2067         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2068         register devmap_handle_t *dhp;
2069         register struct vpage *vp, *evp;
2070         devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2071         ulong_t off;
2072         size_t mlen, sz;
2073 
2074         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2075             "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2076             (void *)seg, (void *)addr, len, prot);
2077         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2078 
2079         if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2080                 /*
2081                  * Fail the setprot if pages are SOFTLOCKed through this
2082                  * mapping.
2083                  * Softlockcnt is protected from change by the as read lock.
2084                  */
2085                 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2086                     "segdev_setprot:error softlockcnt=%lx", sz);
2087                 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
2088                 return (EAGAIN);
2089         }
2090 
2091         if (dhp_head != NULL) {
2092                 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
2093                         return (EINVAL);
2094 
2095                 /*
2096                  * check if violate maxprot.
2097                  */
2098                 off = (ulong_t)(addr - dhp->dh_uvaddr);
2099                 mlen  = len;
2100                 while (dhp) {
2101                         if ((dhp->dh_maxprot & prot) != prot)
2102                                 return (EACCES);        /* violated maxprot */
2103 
2104                         if (mlen > (dhp->dh_len - off)) {
2105                                 mlen -= dhp->dh_len - off;
2106                                 dhp = dhp->dh_next;
2107                                 off = 0;
2108                         } else
2109                                 break;
2110                 }
2111         } else {
2112                 if ((sdp->maxprot & prot) != prot)
2113                         return (EACCES);
2114         }
2115 
2116         rw_enter(&sdp->lock, RW_WRITER);
2117         if (addr == seg->s_base && len == seg->s_size && sdp->pageprot == 0) {
2118                 if (sdp->prot == prot) {
2119                         rw_exit(&sdp->lock);
2120                         return (0);                     /* all done */
2121                 }
2122                 sdp->prot = (uchar_t)prot;
2123         } else {
2124                 sdp->pageprot = 1;
2125                 if (sdp->vpage == NULL) {
2126                         /*
2127                          * First time through setting per page permissions,
2128                          * initialize all the vpage structures to prot
2129                          */
2130                         sdp->vpage = kmem_zalloc(vpgtob(seg_pages(seg)),
2131                             KM_SLEEP);
2132                         evp = &sdp->vpage[seg_pages(seg)];
2133                         for (vp = sdp->vpage; vp < evp; vp++)
2134                                 VPP_SETPROT(vp, sdp->prot);
2135                 }
2136                 /*
2137                  * Now go change the needed vpages protections.
2138                  */
2139                 evp = &sdp->vpage[seg_page(seg, addr + len)];
2140                 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++)
2141                         VPP_SETPROT(vp, prot);
2142         }
2143         rw_exit(&sdp->lock);
2144 
2145         if (dhp_head != NULL) {
2146                 devmap_handle_t *tdhp;
2147                 /*
2148                  * If large page size was used in hat_devload(),
2149                  * the same page size must be used in hat_unload().
2150                  */
2151                 dhp = tdhp = devmap_find_handle(dhp_head, addr);
2152                 while (tdhp != NULL) {
2153                         if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
2154                                 break;
2155                         }
2156                         tdhp = tdhp->dh_next;
2157                 }
2158                 if (tdhp) {
2159                         size_t slen = len;
2160                         size_t mlen;
2161                         size_t soff;
2162 
2163                         soff = (ulong_t)(addr - dhp->dh_uvaddr);
2164                         while (slen != 0) {
2165                                 mlen = MIN(slen, (dhp->dh_len - soff));
2166                                 hat_unload(seg->s_as->a_hat, dhp->dh_uvaddr,
2167                                     dhp->dh_len, HAT_UNLOAD);
2168                                 dhp = dhp->dh_next;
2169                                 ASSERT(slen >= mlen);
2170                                 slen -= mlen;
2171                                 soff = 0;
2172                         }
2173                         return (0);
2174                 }
2175         }
2176 
2177         if ((prot & ~PROT_USER) == PROT_NONE) {
2178                 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
2179         } else {
2180                 /*
2181                  * RFE: the segment should keep track of all attributes
2182                  * allowing us to remove the deprecated hat_chgprot
2183                  * and use hat_chgattr.
2184                  */
2185                 hat_chgprot(seg->s_as->a_hat, addr, len, prot);
2186         }
2187 
2188         return (0);
2189 }
2190 
2191 static int
2192 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2193 {
2194         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2195         struct vpage *vp, *evp;
2196 
2197         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2198             "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2199             (void *)seg, (void *)addr, len, prot);
2200         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2201 
2202         /*
2203          * If segment protection can be used, simply check against them
2204          */
2205         rw_enter(&sdp->lock, RW_READER);
2206         if (sdp->pageprot == 0) {
2207                 register int err;
2208 
2209                 err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2210                 rw_exit(&sdp->lock);
2211                 return (err);
2212         }
2213 
2214         /*
2215          * Have to check down to the vpage level
2216          */
2217         evp = &sdp->vpage[seg_page(seg, addr + len)];
2218         for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
2219                 if ((VPP_PROT(vp) & prot) != prot) {
2220                         rw_exit(&sdp->lock);
2221                         return (EACCES);
2222                 }
2223         }
2224         rw_exit(&sdp->lock);
2225         return (0);
2226 }
2227 
2228 static int
2229 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2230 {
2231         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2232         size_t pgno;
2233 
2234         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2235             "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2236             (void *)seg, (void *)addr, len, (void *)protv);
2237         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2238 
2239         pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2240         if (pgno != 0) {
2241                 rw_enter(&sdp->lock, RW_READER);
2242                 if (sdp->pageprot == 0) {
2243                         do {
2244                                 protv[--pgno] = sdp->prot;
2245                         } while (pgno != 0);
2246                 } else {
2247                         size_t pgoff = seg_page(seg, addr);
2248 
2249                         do {
2250                                 pgno--;
2251                                 protv[pgno] =
2252                                     VPP_PROT(&sdp->vpage[pgno + pgoff]);
2253                         } while (pgno != 0);
2254                 }
2255                 rw_exit(&sdp->lock);
2256         }
2257         return (0);
2258 }
2259 
2260 static u_offset_t
2261 segdev_getoffset(register struct seg *seg, caddr_t addr)
2262 {
2263         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2264 
2265         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2266             "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2267 
2268         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2269 
2270         return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2271 }
2272 
2273 /*ARGSUSED*/
2274 static int
2275 segdev_gettype(register struct seg *seg, caddr_t addr)
2276 {
2277         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2278 
2279         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2280             "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2281 
2282         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2283 
2284         return (sdp->type);
2285 }
2286 
2287 
2288 /*ARGSUSED*/
2289 static int
2290 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2291 {
2292         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2293 
2294         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2295             "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2296 
2297         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2298 
2299         /*
2300          * Note that this vp is the common_vp of the device, where the
2301          * pages are hung ..
2302          */
2303         *vpp = VTOCVP(sdp->vp);
2304 
2305         return (0);
2306 }
2307 
2308 static void
2309 segdev_badop(void)
2310 {
2311         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
2312             "segdev_badop:start");
2313         panic("segdev_badop");
2314         /*NOTREACHED*/
2315 }
2316 
2317 /*
2318  * segdev pages are not in the cache, and thus can't really be controlled.
2319  * Hence, syncs are simply always successful.
2320  */
2321 /*ARGSUSED*/
2322 static int
2323 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2324 {
2325         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2326 
2327         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2328 
2329         return (0);
2330 }
2331 
2332 /*
2333  * segdev pages are always "in core".
2334  */
2335 /*ARGSUSED*/
2336 static size_t
2337 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2338 {
2339         size_t v = 0;
2340 
2341         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2342 
2343         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2344 
2345         for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2346             v += PAGESIZE)
2347                 *vec++ = 1;
2348         return (v);
2349 }
2350 
2351 /*
2352  * segdev pages are not in the cache, and thus can't really be controlled.
2353  * Hence, locks are simply always successful.
2354  */
2355 /*ARGSUSED*/
2356 static int
2357 segdev_lockop(struct seg *seg, caddr_t addr,
2358     size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2359 {
2360         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2361 
2362         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2363 
2364         return (0);
2365 }
2366 
2367 /*
2368  * segdev pages are not in the cache, and thus can't really be controlled.
2369  * Hence, advise is simply always successful.
2370  */
2371 /*ARGSUSED*/
2372 static int
2373 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2374 {
2375         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2376 
2377         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2378 
2379         return (0);
2380 }
2381 
2382 /*
2383  * segdev pages are not dumped, so we just return
2384  */
2385 /*ARGSUSED*/
2386 static void
2387 segdev_dump(struct seg *seg)
2388 {}
2389 
2390 /*
2391  * ddi_segmap_setup:    Used by drivers who wish specify mapping attributes
2392  *                      for a segment.  Called from a drivers segmap(9E)
2393  *                      routine.
2394  */
2395 /*ARGSUSED*/
2396 int
2397 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2398     off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2399     ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2400 {
2401         struct segdev_crargs dev_a;
2402         int (*mapfunc)(dev_t dev, off_t off, int prot);
2403         uint_t hat_attr;
2404         pfn_t pfn;
2405         int     error, i;
2406 
2407         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
2408             "ddi_segmap_setup:start");
2409 
2410         if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2411                 return (ENODEV);
2412 
2413         /*
2414          * Character devices that support the d_mmap
2415          * interface can only be mmap'ed shared.
2416          */
2417         if ((flags & MAP_TYPE) != MAP_SHARED)
2418                 return (EINVAL);
2419 
2420         /*
2421          * Check that this region is indeed mappable on this platform.
2422          * Use the mapping function.
2423          */
2424         if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
2425                 return (ENXIO);
2426 
2427         /*
2428          * Check to ensure that the entire range is
2429          * legal and we are not trying to map in
2430          * more than the device will let us.
2431          */
2432         for (i = 0; i < len; i += PAGESIZE) {
2433                 if (i == 0) {
2434                         /*
2435                          * Save the pfn at offset here. This pfn will be
2436                          * used later to get user address.
2437                          */
2438                         if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
2439                             maxprot)) == PFN_INVALID)
2440                                 return (ENXIO);
2441                 } else {
2442                         if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
2443                             PFN_INVALID)
2444                                 return (ENXIO);
2445                 }
2446         }
2447 
2448         as_rangelock(as);
2449         /* Pick an address w/o worrying about any vac alignment constraints. */
2450         error = choose_addr(as, addrp, len, ptob(pfn), ADDR_NOVACALIGN, flags);
2451         if (error != 0) {
2452                 as_rangeunlock(as);
2453                 return (error);
2454         }
2455 
2456         dev_a.mapfunc = mapfunc;
2457         dev_a.dev = dev;
2458         dev_a.offset = (offset_t)offset;
2459         dev_a.type = flags & MAP_TYPE;
2460         dev_a.prot = (uchar_t)prot;
2461         dev_a.maxprot = (uchar_t)maxprot;
2462         dev_a.hat_attr = hat_attr;
2463         dev_a.hat_flags = 0;
2464         dev_a.devmap_data = NULL;
2465 
2466         error = as_map(as, *addrp, len, segdev_create, &dev_a);
2467         as_rangeunlock(as);
2468         return (error);
2469 
2470 }
2471 
2472 /*ARGSUSED*/
2473 static int
2474 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2475     struct page ***ppp, enum lock_type type, enum seg_rw rw)
2476 {
2477         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2478             "segdev_pagelock:start");
2479         return (ENOTSUP);
2480 }
2481 
2482 /*ARGSUSED*/
2483 static int
2484 segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len,
2485     uint_t szc)
2486 {
2487         return (ENOTSUP);
2488 }
2489 
2490 /*
2491  * devmap_device: Used by devmap framework to establish mapping
2492  *                called by devmap_seup(9F) during map setup time.
2493  */
2494 /*ARGSUSED*/
2495 static int
2496 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2497     offset_t off, size_t len, uint_t flags)
2498 {
2499         devmap_handle_t *rdhp, *maxdhp;
2500         struct segdev_crargs dev_a;
2501         int     err;
2502         uint_t maxprot = PROT_ALL;
2503         offset_t offset = 0;
2504         pfn_t pfn;
2505         struct devmap_pmem_cookie *pcp;
2506 
2507         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
2508             "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
2509             (void *)dhp, (void *)addr, off, len);
2510 
2511         DEBUGF(2, (CE_CONT, "devmap_device: dhp %p addr %p off %llx len %lx\n",
2512             (void *)dhp, (void *)addr, off, len));
2513 
2514         as_rangelock(as);
2515         if ((flags & MAP_FIXED) == 0) {
2516                 offset_t aligned_off;
2517 
2518                 rdhp = maxdhp = dhp;
2519                 while (rdhp != NULL) {
2520                         maxdhp = (maxdhp->dh_len > rdhp->dh_len) ?
2521                             maxdhp : rdhp;
2522                         rdhp = rdhp->dh_next;
2523                         maxprot |= dhp->dh_maxprot;
2524                 }
2525                 offset = maxdhp->dh_uoff - dhp->dh_uoff;
2526 
2527                 /*
2528                  * Use the dhp that has the
2529                  * largest len to get user address.
2530                  */
2531                 /*
2532                  * If MAPPING_INVALID, cannot use dh_pfn/dh_cvaddr,
2533                  * use 0 which is as good as any other.
2534                  */
2535                 if (maxdhp->dh_flags & DEVMAP_MAPPING_INVALID) {
2536                         aligned_off = (offset_t)0;
2537                 } else if (dhp_is_devmem(maxdhp)) {
2538                         aligned_off = (offset_t)ptob(maxdhp->dh_pfn) - offset;
2539                 } else if (dhp_is_pmem(maxdhp)) {
2540                         pcp = (struct devmap_pmem_cookie *)maxdhp->dh_pcookie;
2541                         pfn = page_pptonum(
2542                             pcp->dp_pparray[btop(maxdhp->dh_roff)]);
2543                         aligned_off = (offset_t)ptob(pfn) - offset;
2544                 } else {
2545                         aligned_off = (offset_t)(uintptr_t)maxdhp->dh_cvaddr -
2546                             offset;
2547                 }
2548 
2549                 /*
2550                  * Pick an address aligned to dh_cookie.
2551                  * for kernel memory/user memory, cookie is cvaddr.
2552                  * for device memory, cookie is physical address.
2553                  */
2554                 map_addr(addr, len, aligned_off, 1, flags);
2555                 if (*addr == NULL) {
2556                         as_rangeunlock(as);
2557                         return (ENOMEM);
2558                 }
2559         } else {
2560                 /*
2561                  * User-specified address; blow away any previous mappings.
2562                  */
2563                 (void) as_unmap(as, *addr, len);
2564         }
2565 
2566         dev_a.mapfunc = NULL;
2567         dev_a.dev = dhp->dh_dev;
2568         dev_a.type = flags & MAP_TYPE;
2569         dev_a.offset = off;
2570         /*
2571          * sdp->maxprot has the least restrict protection of all dhps.
2572          */
2573         dev_a.maxprot = maxprot;
2574         dev_a.prot = dhp->dh_prot;
2575         /*
2576          * devmap uses dhp->dh_hat_attr for hat.
2577          */
2578         dev_a.hat_flags = 0;
2579         dev_a.hat_attr = 0;
2580         dev_a.devmap_data = (void *)dhp;
2581 
2582         err = as_map(as, *addr, len, segdev_create, &dev_a);
2583         as_rangeunlock(as);
2584         return (err);
2585 }
2586 
2587 int
2588 devmap_do_ctxmgt(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
2589     uint_t type, uint_t rw, int (*ctxmgt)(devmap_cookie_t, void *, offset_t,
2590     size_t, uint_t, uint_t))
2591 {
2592         register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2593         struct devmap_ctx *devctx;
2594         int do_timeout = 0;
2595         int ret;
2596 
2597 #ifdef lint
2598         pvtp = pvtp;
2599 #endif
2600 
2601         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT,
2602             "devmap_do_ctxmgt:start dhp=%p off=%llx, len=%lx",
2603             (void *)dhp, off, len);
2604         DEBUGF(7, (CE_CONT, "devmap_do_ctxmgt: dhp %p off %llx len %lx\n",
2605             (void *)dhp, off, len));
2606 
2607         if (ctxmgt == NULL)
2608                 return (FC_HWERR);
2609 
2610         devctx = dhp->dh_ctx;
2611 
2612         /*
2613          * If we are on an MP system with more than one cpu running
2614          * and if a thread on some CPU already has the context, wait
2615          * for it to finish if there is a hysteresis timeout.
2616          *
2617          * We call cv_wait() instead of cv_wait_sig() because
2618          * it does not matter much if it returned due to a signal
2619          * or due to a cv_signal() or cv_broadcast().  In either event
2620          * we need to complete the mapping otherwise the processes
2621          * will die with a SEGV.
2622          */
2623         if ((dhp->dh_timeout_length > 0) && (ncpus > 1)) {
2624                 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK1,
2625                     "devmap_do_ctxmgt:doing hysteresis, devctl %p dhp %p",
2626                     devctx, dhp);
2627                 do_timeout = 1;
2628                 mutex_enter(&devctx->lock);
2629                 while (devctx->oncpu)
2630                         cv_wait(&devctx->cv, &devctx->lock);
2631                 devctx->oncpu = 1;
2632                 mutex_exit(&devctx->lock);
2633         }
2634 
2635         /*
2636          * Call the contextmgt callback so that the driver can handle
2637          * the fault.
2638          */
2639         ret = (*ctxmgt)(dhp, dhp->dh_pvtp, off, len, type, rw);
2640 
2641         /*
2642          * If devmap_access() returned -1, then there was a hardware
2643          * error so we need to convert the return value to something
2644          * that trap() will understand.  Otherwise, the return value
2645          * is already a fault code generated by devmap_unload()
2646          * or devmap_load().
2647          */
2648         if (ret) {
2649                 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK2,
2650                     "devmap_do_ctxmgt: ret=%x dhp=%p devctx=%p",
2651                     ret, dhp, devctx);
2652                 DEBUGF(1, (CE_CONT, "devmap_do_ctxmgt: ret %x dhp %p\n",
2653                     ret, (void *)dhp));
2654                 if (devctx->oncpu) {
2655                         mutex_enter(&devctx->lock);
2656                         devctx->oncpu = 0;
2657                         cv_signal(&devctx->cv);
2658                         mutex_exit(&devctx->lock);
2659                 }
2660                 return (FC_HWERR);
2661         }
2662 
2663         /*
2664          * Setup the timeout if we need to
2665          */
2666         if (do_timeout) {
2667                 mutex_enter(&devctx->lock);
2668                 if (dhp->dh_timeout_length > 0) {
2669                         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK3,
2670                             "devmap_do_ctxmgt:timeout set");
2671                         devctx->timeout = timeout(devmap_ctxto,
2672                             devctx, dhp->dh_timeout_length);
2673                 } else {
2674                         /*
2675                          * We don't want to wait so set oncpu to
2676                          * 0 and wake up anyone waiting.
2677                          */
2678                         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK4,
2679                             "devmap_do_ctxmgt:timeout not set");
2680                         devctx->oncpu = 0;
2681                         cv_signal(&devctx->cv);
2682                 }
2683                 mutex_exit(&devctx->lock);
2684         }
2685 
2686         return (DDI_SUCCESS);
2687 }
2688 
2689 /*
2690  *                                       end of mapping
2691  *                    poff   fault_offset         |
2692  *            base     |        |                 |
2693  *              |      |        |                 |
2694  *              V      V        V                 V
2695  *  +-----------+---------------+-------+---------+-------+
2696  *              ^               ^       ^         ^
2697  *              |<--- offset--->|<-len->|         |
2698  *              |<--- dh_len(size of mapping) --->|
2699  *                     |<--  pg -->|
2700  *                              -->|rlen|<--
2701  */
2702 static ulong_t
2703 devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
2704     ulong_t *opfn, ulong_t *pagesize)
2705 {
2706         register int level;
2707         ulong_t pg;
2708         ulong_t poff;
2709         ulong_t base;
2710         caddr_t uvaddr;
2711         long rlen;
2712 
2713         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP,
2714             "devmap_roundup:start dhp=%p off=%lx len=%lx",
2715             (void *)dhp, offset, len);
2716         DEBUGF(2, (CE_CONT, "devmap_roundup: dhp %p off %lx len %lx\n",
2717             (void *)dhp, offset, len));
2718 
2719         /*
2720          * get the max. pagesize that is aligned within the range
2721          * <dh_pfn, dh_pfn+offset>.
2722          *
2723          * The calculations below use physical address to ddetermine
2724          * the page size to use. The same calculations can use the
2725          * virtual address to determine the page size.
2726          */
2727         base = (ulong_t)ptob(dhp->dh_pfn);
2728         for (level = dhp->dh_mmulevel; level >= 0; level--) {
2729                 pg = page_get_pagesize(level);
2730                 poff = ((base + offset) & ~(pg - 1));
2731                 uvaddr = dhp->dh_uvaddr + (poff - base);
2732                 if ((poff >= base) &&
2733                     ((poff + pg) <= (base + dhp->dh_len)) &&
2734                     VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg))
2735                         break;
2736         }
2737 
2738         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK1,
2739             "devmap_roundup: base=%lx poff=%lx dhp=%p",
2740             base, poff, dhp);
2741         DEBUGF(2, (CE_CONT, "devmap_roundup: base %lx poff %lx pfn %lx\n",
2742             base, poff, dhp->dh_pfn));
2743 
2744         ASSERT(VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg));
2745         ASSERT(level >= 0);
2746 
2747         *pagesize = pg;
2748         *opfn = dhp->dh_pfn + btop(poff - base);
2749 
2750         rlen = len + offset - (poff - base + pg);
2751 
2752         ASSERT(rlen < (long)len);
2753 
2754         TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK2,
2755             "devmap_roundup:ret dhp=%p level=%x rlen=%lx psiz=%p opfn=%p",
2756             (void *)dhp, level, rlen, pagesize, opfn);
2757         DEBUGF(1, (CE_CONT, "devmap_roundup: dhp %p "
2758             "level %x rlen %lx psize %lx opfn %lx\n",
2759             (void *)dhp, level, rlen, *pagesize, *opfn));
2760 
2761         return ((ulong_t)((rlen > 0) ? rlen : 0));
2762 }
2763 
2764 /*
2765  * find the dhp that contains addr.
2766  */
2767 static devmap_handle_t *
2768 devmap_find_handle(devmap_handle_t *dhp_head, caddr_t addr)
2769 {
2770         devmap_handle_t *dhp;
2771 
2772         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_FIND_HANDLE,
2773             "devmap_find_handle:start");
2774 
2775         dhp = dhp_head;
2776         while (dhp) {
2777                 if (addr >= dhp->dh_uvaddr &&
2778                     addr < (dhp->dh_uvaddr + dhp->dh_len))
2779                         return (dhp);
2780                 dhp = dhp->dh_next;
2781         }
2782 
2783         return ((devmap_handle_t *)NULL);
2784 }
2785 
2786 /*
2787  * devmap_unload:
2788  *                      Marks a segdev segment or pages if offset->offset+len
2789  *                      is not the entire segment as intercept and unloads the
2790  *                      pages in the range offset -> offset+len.
2791  */
2792 int
2793 devmap_unload(devmap_cookie_t dhc, offset_t offset, size_t len)
2794 {
2795         register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2796         caddr_t addr;
2797         ulong_t size;
2798         ssize_t soff;
2799 
2800         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_UNLOAD,
2801             "devmap_unload:start dhp=%p offset=%llx len=%lx",
2802             (void *)dhp, offset, len);
2803         DEBUGF(7, (CE_CONT, "devmap_unload: dhp %p offset %llx len %lx\n",
2804             (void *)dhp, offset, len));
2805 
2806         soff = (ssize_t)(offset - dhp->dh_uoff);
2807         soff = round_down_p2(soff, PAGESIZE);
2808         if (soff < 0 || soff >= dhp->dh_len)
2809                 return (FC_MAKE_ERR(EINVAL));
2810 
2811         /*
2812          * Address and size must be page aligned.  Len is set to the
2813          * number of bytes in the number of pages that are required to
2814          * support len.  Offset is set to the byte offset of the first byte
2815          * of the page that contains offset.
2816          */
2817         len = round_up_p2(len, PAGESIZE);
2818 
2819         /*
2820          * If len is == 0, then calculate the size by getting
2821          * the number of bytes from offset to the end of the segment.
2822          */
2823         if (len == 0)
2824                 size = dhp->dh_len - soff;
2825         else {
2826                 size = len;
2827                 if ((soff + size) > dhp->dh_len)
2828                         return (FC_MAKE_ERR(EINVAL));
2829         }
2830 
2831         /*
2832          * The address is offset bytes from the base address of
2833          * the dhp.
2834          */
2835         addr = (caddr_t)(soff + dhp->dh_uvaddr);
2836 
2837         /*
2838          * If large page size was used in hat_devload(),
2839          * the same page size must be used in hat_unload().
2840          */
2841         if (dhp->dh_flags & DEVMAP_FLAG_LARGE) {
2842                 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
2843                     dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
2844         } else {
2845                 hat_unload(dhp->dh_seg->s_as->a_hat,  addr, size,
2846                     HAT_UNLOAD|HAT_UNLOAD_OTHER);
2847         }
2848 
2849         return (0);
2850 }
2851 
2852 /*
2853  * calculates the optimal page size that will be used for hat_devload().
2854  */
2855 static void
2856 devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, caddr_t addr,
2857     size_t *llen, caddr_t *laddr)
2858 {
2859         ulong_t off;
2860         ulong_t pfn;
2861         ulong_t pgsize;
2862         uint_t first = 1;
2863 
2864         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GET_LARGE_PGSIZE,
2865             "devmap_get_large_pgsize:start");
2866 
2867         /*
2868          * RFE - Code only supports large page mappings for devmem
2869          * This code could be changed in future if we want to support
2870          * large page mappings for kernel exported memory.
2871          */
2872         ASSERT(dhp_is_devmem(dhp));
2873         ASSERT(!(dhp->dh_flags & DEVMAP_MAPPING_INVALID));
2874 
2875         *llen = 0;
2876         off = (ulong_t)(addr - dhp->dh_uvaddr);
2877         while ((long)len > 0) {
2878                 /*
2879                  * get the optimal pfn to minimize address translations.
2880                  * devmap_roundup() returns residue bytes for next round
2881                  * calculations.
2882                  */
2883                 len = devmap_roundup(dhp, off, len, &pfn, &pgsize);
2884 
2885                 if (first) {
2886                         *laddr = dhp->dh_uvaddr + ptob(pfn - dhp->dh_pfn);
2887                         first = 0;
2888                 }
2889 
2890                 *llen += pgsize;
2891                 off = ptob(pfn - dhp->dh_pfn) + pgsize;
2892         }
2893         /* Large page mapping len/addr cover more range than original fault */
2894         ASSERT(*llen >= len && *laddr <= addr);
2895         ASSERT((*laddr + *llen) >= (addr + len));
2896 }
2897 
2898 /*
2899  * Initialize the devmap_softlock structure.
2900  */
2901 static struct devmap_softlock *
2902 devmap_softlock_init(dev_t dev, ulong_t id)
2903 {
2904         struct devmap_softlock *slock;
2905         struct devmap_softlock *tmp;
2906 
2907         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_INIT,
2908             "devmap_softlock_init:start");
2909 
2910         tmp = kmem_zalloc(sizeof (struct devmap_softlock), KM_SLEEP);
2911         mutex_enter(&devmap_slock);
2912 
2913         for (slock = devmap_slist; slock != NULL; slock = slock->next)
2914                 if ((slock->dev == dev) && (slock->id == id))
2915                         break;
2916 
2917         if (slock == NULL) {
2918                 slock = tmp;
2919                 slock->dev = dev;
2920                 slock->id = id;
2921                 mutex_init(&slock->lock, NULL, MUTEX_DEFAULT, NULL);
2922                 cv_init(&slock->cv, NULL, CV_DEFAULT, NULL);
2923                 slock->next = devmap_slist;
2924                 devmap_slist = slock;
2925         } else
2926                 kmem_free(tmp, sizeof (struct devmap_softlock));
2927 
2928         mutex_enter(&slock->lock);
2929         slock->refcnt++;
2930         mutex_exit(&slock->lock);
2931         mutex_exit(&devmap_slock);
2932 
2933         return (slock);
2934 }
2935 
2936 /*
2937  * Wake up processes that sleep on softlocked.
2938  * Free dh_softlock if refcnt is 0.
2939  */
2940 static void
2941 devmap_softlock_rele(devmap_handle_t *dhp)
2942 {
2943         struct devmap_softlock *slock = dhp->dh_softlock;
2944         struct devmap_softlock *tmp;
2945         struct devmap_softlock *parent;
2946 
2947         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_RELE,
2948             "devmap_softlock_rele:start");
2949 
2950         mutex_enter(&devmap_slock);
2951         mutex_enter(&slock->lock);
2952 
2953         ASSERT(slock->refcnt > 0);
2954 
2955         slock->refcnt--;
2956 
2957         /*
2958          * If no one is using the device, free up the slock data.
2959          */
2960         if (slock->refcnt == 0) {
2961                 slock->softlocked = 0;
2962                 cv_signal(&slock->cv);
2963 
2964                 if (devmap_slist == slock)
2965                         devmap_slist = slock->next;
2966                 else {
2967                         parent = devmap_slist;
2968                         for (tmp = devmap_slist->next; tmp != NULL;
2969                             tmp = tmp->next) {
2970                                 if (tmp == slock) {
2971                                         parent->next = tmp->next;
2972                                         break;
2973                                 }
2974                                 parent = tmp;
2975                         }
2976                 }
2977                 mutex_exit(&slock->lock);
2978                 mutex_destroy(&slock->lock);
2979                 cv_destroy(&slock->cv);
2980                 kmem_free(slock, sizeof (struct devmap_softlock));
2981         } else
2982                 mutex_exit(&slock->lock);
2983 
2984         mutex_exit(&devmap_slock);
2985 }
2986 
2987 /*
2988  * Wake up processes that sleep on dh_ctx->locked.
2989  * Free dh_ctx if refcnt is 0.
2990  */
2991 static void
2992 devmap_ctx_rele(devmap_handle_t *dhp)
2993 {
2994         struct devmap_ctx *devctx = dhp->dh_ctx;
2995         struct devmap_ctx *tmp;
2996         struct devmap_ctx *parent;
2997         timeout_id_t tid;
2998 
2999         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE,
3000             "devmap_ctx_rele:start");
3001 
3002         mutex_enter(&devmapctx_lock);
3003         mutex_enter(&devctx->lock);
3004 
3005         ASSERT(devctx->refcnt > 0);
3006 
3007         devctx->refcnt--;
3008 
3009         /*
3010          * If no one is using the device, free up the devctx data.
3011          */
3012         if (devctx->refcnt == 0) {
3013                 /*
3014                  * Untimeout any threads using this mapping as they are about
3015                  * to go away.
3016                  */
3017                 if (devctx->timeout != 0) {
3018                         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE_CK1,
3019                             "devmap_ctx_rele:untimeout ctx->timeout");
3020 
3021                         tid = devctx->timeout;
3022                         mutex_exit(&devctx->lock);
3023                         (void) untimeout(tid);
3024                         mutex_enter(&devctx->lock);
3025                 }
3026 
3027                 devctx->oncpu = 0;
3028                 cv_signal(&devctx->cv);
3029 
3030                 if (devmapctx_list == devctx)
3031                         devmapctx_list = devctx->next;
3032                 else {
3033                         parent = devmapctx_list;
3034                         for (tmp = devmapctx_list->next; tmp != NULL;
3035                             tmp = tmp->next) {
3036                                 if (tmp == devctx) {
3037                                         parent->next = tmp->next;
3038                                         break;
3039                                 }
3040                                 parent = tmp;
3041                         }
3042                 }
3043                 mutex_exit(&devctx->lock);
3044                 mutex_destroy(&devctx->lock);
3045                 cv_destroy(&devctx->cv);
3046                 kmem_free(devctx, sizeof (struct devmap_ctx));
3047         } else
3048                 mutex_exit(&devctx->lock);
3049 
3050         mutex_exit(&devmapctx_lock);
3051 }
3052 
3053 /*
3054  * devmap_load:
3055  *                      Marks a segdev segment or pages if offset->offset+len
3056  *                      is not the entire segment as nointercept and faults in
3057  *                      the pages in the range offset -> offset+len.
3058  */
3059 int
3060 devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type,
3061     uint_t rw)
3062 {
3063         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3064         struct as *asp = dhp->dh_seg->s_as;
3065         caddr_t addr;
3066         ulong_t size;
3067         ssize_t soff;   /* offset from the beginning of the segment */
3068         int rc;
3069 
3070         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
3071             "devmap_load:start dhp=%p offset=%llx len=%lx",
3072             (void *)dhp, offset, len);
3073 
3074         DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3075             (void *)dhp, offset, len));
3076 
3077         /*
3078          *      Hat layer only supports devload to process' context for which
3079          *      the as lock is held. Verify here and return error if drivers
3080          *      inadvertently call devmap_load on a wrong devmap handle.
3081          */
3082         if ((asp != &kas) && !AS_LOCK_HELD(asp))
3083                 return (FC_MAKE_ERR(EINVAL));
3084 
3085         soff = (ssize_t)(offset - dhp->dh_uoff);
3086         soff = round_down_p2(soff, PAGESIZE);
3087         if (soff < 0 || soff >= dhp->dh_len)
3088                 return (FC_MAKE_ERR(EINVAL));
3089 
3090         /*
3091          * Address and size must be page aligned.  Len is set to the
3092          * number of bytes in the number of pages that are required to
3093          * support len.  Offset is set to the byte offset of the first byte
3094          * of the page that contains offset.
3095          */
3096         len = round_up_p2(len, PAGESIZE);
3097 
3098         /*
3099          * If len == 0, then calculate the size by getting
3100          * the number of bytes from offset to the end of the segment.
3101          */
3102         if (len == 0)
3103                 size = dhp->dh_len - soff;
3104         else {
3105                 size = len;
3106                 if ((soff + size) > dhp->dh_len)
3107                         return (FC_MAKE_ERR(EINVAL));
3108         }
3109 
3110         /*
3111          * The address is offset bytes from the base address of
3112          * the segment.
3113          */
3114         addr = (caddr_t)(soff + dhp->dh_uvaddr);
3115 
3116         HOLD_DHP_LOCK(dhp);
3117         rc = segdev_faultpages(asp->a_hat,
3118             dhp->dh_seg, addr, size, type, rw, dhp);
3119         RELE_DHP_LOCK(dhp);
3120         return (rc);
3121 }
3122 
3123 int
3124 devmap_setup(dev_t dev, offset_t off, struct as *as, caddr_t *addrp,
3125     size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3126 {
3127         register devmap_handle_t *dhp;
3128         int (*devmap)(dev_t, devmap_cookie_t, offset_t, size_t,
3129             size_t *, uint_t);
3130         int (*mmap)(dev_t, off_t, int);
3131         struct devmap_callback_ctl *callbackops;
3132         devmap_handle_t *dhp_head = NULL;
3133         devmap_handle_t *dhp_prev = NULL;
3134         devmap_handle_t *dhp_curr;
3135         caddr_t addr;
3136         int map_flag;
3137         int ret;
3138         ulong_t total_len;
3139         size_t map_len;
3140         size_t resid_len = len;
3141         offset_t map_off = off;
3142         struct devmap_softlock *slock = NULL;
3143 
3144 #ifdef lint
3145         cred = cred;
3146 #endif
3147 
3148         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SETUP,
3149             "devmap_setup:start off=%llx len=%lx", off, len);
3150         DEBUGF(3, (CE_CONT, "devmap_setup: off %llx len %lx\n",
3151             off, len));
3152 
3153         devmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_devmap;
3154         mmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap;
3155 
3156         /*
3157          * driver must provide devmap(9E) entry point in cb_ops to use the
3158          * devmap framework.
3159          */
3160         if (devmap == NULL || devmap == nulldev || devmap == nodev)
3161                 return (EINVAL);
3162 
3163         /*
3164          * To protect from an inadvertent entry because the devmap entry point
3165          * is not NULL, return error if D_DEVMAP bit is not set in cb_flag and
3166          * mmap is NULL.
3167          */
3168         map_flag = devopsp[getmajor(dev)]->devo_cb_ops->cb_flag;
3169         if ((map_flag & D_DEVMAP) == 0 && (mmap == NULL || mmap == nulldev))
3170                 return (EINVAL);
3171 
3172         /*
3173          * devmap allows mmap(2) to map multiple registers.
3174          * one devmap_handle is created for each register mapped.
3175          */
3176         for (total_len = 0; total_len < len; total_len += map_len) {
3177                 dhp = kmem_zalloc(sizeof (devmap_handle_t), KM_SLEEP);
3178 
3179                 if (dhp_prev != NULL)
3180                         dhp_prev->dh_next = dhp;
3181                 else
3182                         dhp_head = dhp;
3183                 dhp_prev = dhp;
3184 
3185                 dhp->dh_prot = prot;
3186                 dhp->dh_orig_maxprot = dhp->dh_maxprot = maxprot;
3187                 dhp->dh_dev = dev;
3188                 dhp->dh_timeout_length = CTX_TIMEOUT_VALUE;
3189                 dhp->dh_uoff = map_off;
3190 
3191                 /*
3192                  * Get mapping specific info from
3193                  * the driver, such as rnumber, roff, len, callbackops,
3194                  * accattrp and, if the mapping is for kernel memory,
3195                  * ddi_umem_cookie.
3196                  */
3197                 if ((ret = cdev_devmap(dev, dhp, map_off,
3198                     resid_len, &map_len, get_udatamodel())) != 0) {
3199                         free_devmap_handle(dhp_head);
3200                         return (ENXIO);
3201                 }
3202 
3203                 if (map_len & PAGEOFFSET) {
3204                         free_devmap_handle(dhp_head);
3205                         return (EINVAL);
3206                 }
3207 
3208                 callbackops = &dhp->dh_callbackops;
3209 
3210                 if ((callbackops->devmap_access == NULL) ||
3211                     (callbackops->devmap_access == nulldev) ||
3212                     (callbackops->devmap_access == nodev)) {
3213                         /*
3214                          * Normally devmap does not support MAP_PRIVATE unless
3215                          * the drivers provide a valid devmap_access routine.
3216                          */
3217                         if ((flags & MAP_PRIVATE) != 0) {
3218                                 free_devmap_handle(dhp_head);
3219                                 return (EINVAL);
3220                         }
3221                 } else {
3222                         /*
3223                          * Initialize dhp_softlock and dh_ctx if the drivers
3224                          * provide devmap_access.
3225                          */
3226                         dhp->dh_softlock = devmap_softlock_init(dev,
3227                             (ulong_t)callbackops->devmap_access);
3228                         dhp->dh_ctx = devmap_ctxinit(dev,
3229                             (ulong_t)callbackops->devmap_access);
3230 
3231                         /*
3232                          * segdev_fault can only work when all
3233                          * dh_softlock in a multi-dhp mapping
3234                          * are same. see comments in segdev_fault
3235                          * This code keeps track of the first
3236                          * dh_softlock allocated in slock and
3237                          * compares all later allocations and if
3238                          * not similar, returns an error.
3239                          */
3240                         if (slock == NULL)
3241                                 slock = dhp->dh_softlock;
3242                         if (slock != dhp->dh_softlock) {
3243                                 free_devmap_handle(dhp_head);
3244                                 return (ENOTSUP);
3245                         }
3246                 }
3247 
3248                 map_off += map_len;
3249                 resid_len -= map_len;
3250         }
3251 
3252         /*
3253          * get the user virtual address and establish the mapping between
3254          * uvaddr and device physical address.
3255          */
3256         if ((ret = devmap_device(dhp_head, as, addrp, off, len, flags))
3257             != 0) {
3258                 /*
3259                  * free devmap handles if error during the mapping.
3260                  */
3261                 free_devmap_handle(dhp_head);
3262 
3263                 return (ret);
3264         }
3265 
3266         /*
3267          * call the driver's devmap_map callback to do more after the mapping,
3268          * such as to allocate driver private data for context management.
3269          */
3270         dhp = dhp_head;
3271         map_off = off;
3272         addr = *addrp;
3273         while (dhp != NULL) {
3274                 callbackops = &dhp->dh_callbackops;
3275                 dhp->dh_uvaddr = addr;
3276                 dhp_curr = dhp;
3277                 if (callbackops->devmap_map != NULL) {
3278                         ret = (*callbackops->devmap_map)((devmap_cookie_t)dhp,
3279                             dev, flags, map_off,
3280                             dhp->dh_len, &dhp->dh_pvtp);
3281                         if (ret != 0) {
3282                                 struct segdev_data *sdp;
3283 
3284                                 /*
3285                                  * call driver's devmap_unmap entry point
3286                                  * to free driver resources.
3287                                  */
3288                                 dhp = dhp_head;
3289                                 map_off = off;
3290                                 while (dhp != dhp_curr) {
3291                                         callbackops = &dhp->dh_callbackops;
3292                                         if (callbackops->devmap_unmap != NULL) {
3293                                                 (*callbackops->devmap_unmap)(
3294                                                     dhp, dhp->dh_pvtp,
3295                                                     map_off, dhp->dh_len,
3296                                                     NULL, NULL, NULL, NULL);
3297                                         }
3298                                         map_off += dhp->dh_len;
3299                                         dhp = dhp->dh_next;
3300                                 }
3301                                 sdp = dhp_head->dh_seg->s_data;
3302                                 sdp->devmap_data = NULL;
3303                                 free_devmap_handle(dhp_head);
3304                                 return (ENXIO);
3305                         }
3306                 }
3307                 map_off += dhp->dh_len;
3308                 addr += dhp->dh_len;
3309                 dhp = dhp->dh_next;
3310         }
3311 
3312         return (0);
3313 }
3314 
3315 int
3316 ddi_devmap_segmap(dev_t dev, off_t off, ddi_as_handle_t as, caddr_t *addrp,
3317     off_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3318 {
3319         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP,
3320             "devmap_segmap:start");
3321         return (devmap_setup(dev, (offset_t)off, (struct as *)as, addrp,
3322             (size_t)len, prot, maxprot, flags, cred));
3323 }
3324 
3325 /*
3326  * Called from devmap_devmem_setup/remap to see if can use large pages for
3327  * this device mapping.
3328  * Also calculate the max. page size for this mapping.
3329  * this page size will be used in fault routine for
3330  * optimal page size calculations.
3331  */
3332 static void
3333 devmap_devmem_large_page_setup(devmap_handle_t *dhp)
3334 {
3335         ASSERT(dhp_is_devmem(dhp));
3336         dhp->dh_mmulevel = 0;
3337 
3338         /*
3339          * use large page size only if:
3340          *  1. device memory.
3341          *  2. mmu supports multiple page sizes,
3342          *  3. Driver did not disallow it
3343          *  4. dhp length is at least as big as the large pagesize
3344          *  5. the uvaddr and pfn are large pagesize aligned
3345          */
3346         if (page_num_pagesizes() > 1 &&
3347             !(dhp->dh_flags & (DEVMAP_USE_PAGESIZE | DEVMAP_MAPPING_INVALID))) {
3348                 ulong_t base;
3349                 int level;
3350 
3351                 base = (ulong_t)ptob(dhp->dh_pfn);
3352                 for (level = 1; level < page_num_pagesizes(); level++) {
3353                         size_t pgsize = page_get_pagesize(level);
3354                         if ((dhp->dh_len < pgsize) ||
3355                             (!VA_PA_PGSIZE_ALIGNED((uintptr_t)dhp->dh_uvaddr,
3356                             base, pgsize))) {
3357                                 break;
3358                         }
3359                 }
3360                 dhp->dh_mmulevel = level - 1;
3361         }
3362         if (dhp->dh_mmulevel > 0) {
3363                 dhp->dh_flags |= DEVMAP_FLAG_LARGE;
3364         } else {
3365                 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3366         }
3367 }
3368 
3369 /*
3370  * Called by driver devmap routine to pass device specific info to
3371  * the framework.    used for device memory mapping only.
3372  */
3373 int
3374 devmap_devmem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3375     struct devmap_callback_ctl *callbackops, uint_t rnumber, offset_t roff,
3376     size_t len, uint_t maxprot, uint_t flags, ddi_device_acc_attr_t *accattrp)
3377 {
3378         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3379         ddi_acc_handle_t handle;
3380         ddi_map_req_t mr;
3381         ddi_acc_hdl_t *hp;
3382         int err;
3383 
3384         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_SETUP,
3385             "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3386             (void *)dhp, roff, rnumber, (uint_t)len);
3387         DEBUGF(2, (CE_CONT, "devmap_devmem_setup: dhp %p offset %llx "
3388             "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3389 
3390         /*
3391          * First to check if this function has been called for this dhp.
3392          */
3393         if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3394                 return (DDI_FAILURE);
3395 
3396         if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3397                 return (DDI_FAILURE);
3398 
3399         if (flags & DEVMAP_MAPPING_INVALID) {
3400                 /*
3401                  * Don't go up the tree to get pfn if the driver specifies
3402                  * DEVMAP_MAPPING_INVALID in flags.
3403                  *
3404                  * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3405                  * remap permission.
3406                  */
3407                 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3408                         return (DDI_FAILURE);
3409                 }
3410                 dhp->dh_pfn = PFN_INVALID;
3411         } else {
3412                 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3413                 if (handle == NULL)
3414                         return (DDI_FAILURE);
3415 
3416                 hp = impl_acc_hdl_get(handle);
3417                 hp->ah_vers = VERS_ACCHDL;
3418                 hp->ah_dip = dip;
3419                 hp->ah_rnumber = rnumber;
3420                 hp->ah_offset = roff;
3421                 hp->ah_len = len;
3422                 if (accattrp != NULL)
3423                         hp->ah_acc = *accattrp;
3424 
3425                 mr.map_op = DDI_MO_MAP_LOCKED;
3426                 mr.map_type = DDI_MT_RNUMBER;
3427                 mr.map_obj.rnumber = rnumber;
3428                 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3429                 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3430                 mr.map_handlep = hp;
3431                 mr.map_vers = DDI_MAP_VERSION;
3432 
3433                 /*
3434                  * up the device tree to get pfn.
3435                  * The rootnex_map_regspec() routine in nexus drivers has been
3436                  * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3437                  */
3438                 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&dhp->dh_pfn);
3439                 dhp->dh_hat_attr = hp->ah_hat_flags;
3440                 impl_acc_hdl_free(handle);
3441 
3442                 if (err)
3443                         return (DDI_FAILURE);
3444         }
3445         /* Should not be using devmem setup for memory pages */
3446         ASSERT(!pf_is_memory(dhp->dh_pfn));
3447 
3448         /* Only some of the flags bits are settable by the driver */
3449         dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3450         dhp->dh_len = ptob(btopr(len));
3451 
3452         dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3453         dhp->dh_roff = ptob(btop(roff));
3454 
3455         /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3456         devmap_devmem_large_page_setup(dhp);
3457         dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3458         ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3459 
3460 
3461         if (callbackops != NULL) {
3462                 bcopy(callbackops, &dhp->dh_callbackops,
3463                     sizeof (struct devmap_callback_ctl));
3464         }
3465 
3466         /*
3467          * Initialize dh_lock if we want to do remap.
3468          */
3469         if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3470                 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3471                 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3472         }
3473 
3474         dhp->dh_flags |= DEVMAP_SETUP_DONE;
3475 
3476         return (DDI_SUCCESS);
3477 }
3478 
3479 int
3480 devmap_devmem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3481     uint_t rnumber, offset_t roff, size_t len, uint_t maxprot,
3482     uint_t flags, ddi_device_acc_attr_t *accattrp)
3483 {
3484         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3485         ddi_acc_handle_t handle;
3486         ddi_map_req_t mr;
3487         ddi_acc_hdl_t *hp;
3488         pfn_t   pfn;
3489         uint_t  hat_flags;
3490         int     err;
3491 
3492         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_REMAP,
3493             "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3494             (void *)dhp, roff, rnumber, (uint_t)len);
3495         DEBUGF(2, (CE_CONT, "devmap_devmem_remap: dhp %p offset %llx "
3496             "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3497 
3498         /*
3499          * Return failure if setup has not been done or no remap permission
3500          * has been granted during the setup.
3501          */
3502         if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3503             (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3504                 return (DDI_FAILURE);
3505 
3506         /* Only DEVMAP_MAPPING_INVALID flag supported for remap */
3507         if ((flags != 0) && (flags != DEVMAP_MAPPING_INVALID))
3508                 return (DDI_FAILURE);
3509 
3510         if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3511                 return (DDI_FAILURE);
3512 
3513         if (!(flags & DEVMAP_MAPPING_INVALID)) {
3514                 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3515                 if (handle == NULL)
3516                         return (DDI_FAILURE);
3517         }
3518 
3519         HOLD_DHP_LOCK(dhp);
3520 
3521         /*
3522          * Unload the old mapping, so next fault will setup the new mappings
3523          * Do this while holding the dhp lock so other faults dont reestablish
3524          * the mappings
3525          */
3526         hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3527             dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3528 
3529         if (flags & DEVMAP_MAPPING_INVALID) {
3530                 dhp->dh_flags |= DEVMAP_MAPPING_INVALID;
3531                 dhp->dh_pfn = PFN_INVALID;
3532         } else {
3533                 /* clear any prior DEVMAP_MAPPING_INVALID flag */
3534                 dhp->dh_flags &= ~DEVMAP_MAPPING_INVALID;
3535                 hp = impl_acc_hdl_get(handle);
3536                 hp->ah_vers = VERS_ACCHDL;
3537                 hp->ah_dip = dip;
3538                 hp->ah_rnumber = rnumber;
3539                 hp->ah_offset = roff;
3540                 hp->ah_len = len;
3541                 if (accattrp != NULL)
3542                         hp->ah_acc = *accattrp;
3543 
3544                 mr.map_op = DDI_MO_MAP_LOCKED;
3545                 mr.map_type = DDI_MT_RNUMBER;
3546                 mr.map_obj.rnumber = rnumber;
3547                 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3548                 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3549                 mr.map_handlep = hp;
3550                 mr.map_vers = DDI_MAP_VERSION;
3551 
3552                 /*
3553                  * up the device tree to get pfn.
3554                  * The rootnex_map_regspec() routine in nexus drivers has been
3555                  * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3556                  */
3557                 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&pfn);
3558                 hat_flags = hp->ah_hat_flags;
3559                 impl_acc_hdl_free(handle);
3560                 if (err) {
3561                         RELE_DHP_LOCK(dhp);
3562                         return (DDI_FAILURE);
3563                 }
3564                 /*
3565                  * Store result of ddi_map first in local variables, as we do
3566                  * not want to overwrite the existing dhp with wrong data.
3567                  */
3568                 dhp->dh_pfn = pfn;
3569                 dhp->dh_hat_attr = hat_flags;
3570         }
3571 
3572         /* clear the large page size flag */
3573         dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3574 
3575         dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3576         dhp->dh_roff = ptob(btop(roff));
3577 
3578         /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3579         devmap_devmem_large_page_setup(dhp);
3580         dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3581         ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3582 
3583         RELE_DHP_LOCK(dhp);
3584         return (DDI_SUCCESS);
3585 }
3586 
3587 /*
3588  * called by driver devmap routine to pass kernel virtual address  mapping
3589  * info to the framework.    used only for kernel memory
3590  * allocated from ddi_umem_alloc().
3591  */
3592 int
3593 devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3594     struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie,
3595     offset_t off, size_t len, uint_t maxprot, uint_t flags,
3596     ddi_device_acc_attr_t *accattrp)
3597 {
3598         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3599         struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3600 
3601 #ifdef lint
3602         dip = dip;
3603 #endif
3604 
3605         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_SETUP,
3606             "devmap_umem_setup:start dhp=%p offset=%llx cookie=%p len=%lx",
3607             (void *)dhp, off, cookie, len);
3608         DEBUGF(2, (CE_CONT, "devmap_umem_setup: dhp %p offset %llx "
3609             "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3610 
3611         if (cookie == NULL)
3612                 return (DDI_FAILURE);
3613 
3614         /* For UMEM_TRASH, this restriction is not needed */
3615         if ((off + len) > cp->size)
3616                 return (DDI_FAILURE);
3617 
3618         /* check if the cache attributes are supported */
3619         if (i_ddi_check_cache_attr(flags) == B_FALSE)
3620                 return (DDI_FAILURE);
3621 
3622         /*
3623          * First to check if this function has been called for this dhp.
3624          */
3625         if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3626                 return (DDI_FAILURE);
3627 
3628         if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3629                 return (DDI_FAILURE);
3630 
3631         if (flags & DEVMAP_MAPPING_INVALID) {
3632                 /*
3633                  * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3634                  * remap permission.
3635                  */
3636                 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3637                         return (DDI_FAILURE);
3638                 }
3639         } else {
3640                 dhp->dh_cookie = cookie;
3641                 dhp->dh_roff = ptob(btop(off));
3642                 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3643                 /* set HAT cache attributes */
3644                 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3645                 /* set HAT endianess attributes */
3646                 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3647         }
3648 
3649         /*
3650          * The default is _not_ to pass HAT_LOAD_NOCONSIST to hat_devload();
3651          * we pass HAT_LOAD_NOCONSIST _only_ in cases where hat tries to
3652          * create consistent mappings but our intention was to create
3653          * non-consistent mappings.
3654          *
3655          * DEVMEM: hat figures it out it's DEVMEM and creates non-consistent
3656          * mappings.
3657          *
3658          * kernel exported memory: hat figures it out it's memory and always
3659          * creates consistent mappings.
3660          *
3661          * /dev/mem: non-consistent mappings. See comments in common/io/mem.c
3662          *
3663          * /dev/kmem: consistent mappings are created unless they are
3664          * MAP_FIXED. We _explicitly_ tell hat to create non-consistent
3665          * mappings by passing HAT_LOAD_NOCONSIST in case of MAP_FIXED
3666          * mappings of /dev/kmem. See common/io/mem.c
3667          */
3668 
3669         /* Only some of the flags bits are settable by the driver */
3670         dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3671 
3672         dhp->dh_len = ptob(btopr(len));
3673         dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3674         ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3675 
3676         if (callbackops != NULL) {
3677                 bcopy(callbackops, &dhp->dh_callbackops,
3678                     sizeof (struct devmap_callback_ctl));
3679         }
3680         /*
3681          * Initialize dh_lock if we want to do remap.
3682          */
3683         if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3684                 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3685                 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3686         }
3687 
3688         dhp->dh_flags |= DEVMAP_SETUP_DONE;
3689 
3690         return (DDI_SUCCESS);
3691 }
3692 
3693 int
3694 devmap_umem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3695     ddi_umem_cookie_t cookie, offset_t off, size_t len, uint_t maxprot,
3696     uint_t flags, ddi_device_acc_attr_t *accattrp)
3697 {
3698         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3699         struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3700 
3701         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_REMAP,
3702             "devmap_umem_remap:start dhp=%p offset=%llx cookie=%p len=%lx",
3703             (void *)dhp, off, cookie, len);
3704         DEBUGF(2, (CE_CONT, "devmap_umem_remap: dhp %p offset %llx "
3705             "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3706 
3707 #ifdef lint
3708         dip = dip;
3709         accattrp = accattrp;
3710 #endif
3711         /*
3712          * Reture failure if setup has not been done or no remap permission
3713          * has been granted during the setup.
3714          */
3715         if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3716             (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3717                 return (DDI_FAILURE);
3718 
3719         /* No flags supported for remap yet */
3720         if (flags != 0)
3721                 return (DDI_FAILURE);
3722 
3723         /* check if the cache attributes are supported */
3724         if (i_ddi_check_cache_attr(flags) == B_FALSE)
3725                 return (DDI_FAILURE);
3726 
3727         if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3728                 return (DDI_FAILURE);
3729 
3730         /* For UMEM_TRASH, this restriction is not needed */
3731         if ((off + len) > cp->size)
3732                 return (DDI_FAILURE);
3733 
3734         HOLD_DHP_LOCK(dhp);
3735         /*
3736          * Unload the old mapping, so next fault will setup the new mappings
3737          * Do this while holding the dhp lock so other faults dont reestablish
3738          * the mappings
3739          */
3740         hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3741             dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3742 
3743         dhp->dh_cookie = cookie;
3744         dhp->dh_roff = ptob(btop(off));
3745         dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3746         /* set HAT cache attributes */
3747         i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3748         /* set HAT endianess attributes */
3749         i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3750 
3751         /* clear the large page size flag */
3752         dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3753 
3754         dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3755         ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3756         RELE_DHP_LOCK(dhp);
3757         return (DDI_SUCCESS);
3758 }
3759 
3760 /*
3761  * to set timeout value for the driver's context management callback, e.g.
3762  * devmap_access().
3763  */
3764 void
3765 devmap_set_ctx_timeout(devmap_cookie_t dhc, clock_t ticks)
3766 {
3767         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3768 
3769         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SET_CTX_TIMEOUT,
3770             "devmap_set_ctx_timeout:start dhp=%p ticks=%x",
3771             (void *)dhp, ticks);
3772         dhp->dh_timeout_length = ticks;
3773 }
3774 
3775 int
3776 devmap_default_access(devmap_cookie_t dhp, void *pvtp, offset_t off,
3777     size_t len, uint_t type, uint_t rw)
3778 {
3779 #ifdef lint
3780         pvtp = pvtp;
3781 #endif
3782 
3783         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DEFAULT_ACCESS,
3784             "devmap_default_access:start");
3785         return (devmap_load(dhp, off, len, type, rw));
3786 }
3787 
3788 /*
3789  * segkmem_alloc() wrapper to allocate memory which is both
3790  * non-relocatable (for DR) and sharelocked, since the rest
3791  * of this segment driver requires it.
3792  */
3793 static void *
3794 devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag)
3795 {
3796         ASSERT(vmp != NULL);
3797         ASSERT(kvseg.s_base != NULL);
3798         vmflag |= (VM_NORELOC | SEGKMEM_SHARELOCKED);
3799         return (segkmem_alloc(vmp, size, vmflag));
3800 }
3801 
3802 /*
3803  * This is where things are a bit incestuous with seg_kmem: unlike
3804  * seg_kp, seg_kmem does not keep its pages long-term sharelocked, so
3805  * we need to do a bit of a dance around that to prevent duplication of
3806  * code until we decide to bite the bullet and implement a new kernel
3807  * segment for driver-allocated memory that is exported to user space.
3808  */
3809 static void
3810 devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size)
3811 {
3812         page_t *pp;
3813         caddr_t addr = inaddr;
3814         caddr_t eaddr;
3815         pgcnt_t npages = btopr(size);
3816 
3817         ASSERT(vmp != NULL);
3818         ASSERT(kvseg.s_base != NULL);
3819         ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
3820 
3821         hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
3822 
3823         for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
3824                 /*
3825                  * Use page_find() instead of page_lookup() to find the page
3826                  * since we know that it is hashed and has a shared lock.
3827                  */
3828                 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
3829 
3830                 if (pp == NULL)
3831                         panic("devmap_free_pages: page not found");
3832                 if (!page_tryupgrade(pp)) {
3833                         page_unlock(pp);
3834                         pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr,
3835                             SE_EXCL);
3836                         if (pp == NULL)
3837                                 panic("devmap_free_pages: page already freed");
3838                 }
3839                 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
3840                 pp->p_lckcnt = 0;
3841                 page_destroy(pp, 0);
3842         }
3843         page_unresv(npages);
3844 
3845         if (vmp != NULL)
3846                 vmem_free(vmp, inaddr, size);
3847 }
3848 
3849 /*
3850  * devmap_umem_alloc_np() replaces kmem_zalloc() as the method for
3851  * allocating non-pageable kmem in response to a ddi_umem_alloc()
3852  * default request. For now we allocate our own pages and we keep
3853  * them long-term sharelocked, since: A) the fault routines expect the
3854  * memory to already be locked; B) pageable umem is already long-term
3855  * locked; C) it's a lot of work to make it otherwise, particularly
3856  * since the nexus layer expects the pages to never fault. An RFE is to
3857  * not keep the pages long-term locked, but instead to be able to
3858  * take faults on them and simply look them up in kvp in case we
3859  * fault on them. Even then, we must take care not to let pageout
3860  * steal them from us since the data must remain resident; if we
3861  * do this we must come up with some way to pin the pages to prevent
3862  * faults while a driver is doing DMA to/from them.
3863  */
3864 static void *
3865 devmap_umem_alloc_np(size_t size, size_t flags)
3866 {
3867         void *buf;
3868         int vmflags = (flags & DDI_UMEM_NOSLEEP)? VM_NOSLEEP : VM_SLEEP;
3869 
3870         buf = vmem_alloc(umem_np_arena, size, vmflags);
3871         if (buf != NULL)
3872                 bzero(buf, size);
3873         return (buf);
3874 }
3875 
3876 static void
3877 devmap_umem_free_np(void *addr, size_t size)
3878 {
3879         vmem_free(umem_np_arena, addr, size);
3880 }
3881 
3882 /*
3883  * allocate page aligned kernel memory for exporting to user land.
3884  * The devmap framework will use the cookie allocated by ddi_umem_alloc()
3885  * to find a user virtual address that is in same color as the address
3886  * allocated here.
3887  */
3888 void *
3889 ddi_umem_alloc(size_t size, int flags, ddi_umem_cookie_t *cookie)
3890 {
3891         register size_t len = ptob(btopr(size));
3892         void *buf = NULL;
3893         struct ddi_umem_cookie *cp;
3894         int iflags = 0;
3895 
3896         *cookie = NULL;
3897 
3898         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_ALLOC,
3899             "devmap_umem_alloc:start");
3900         if (len == 0)
3901                 return ((void *)NULL);
3902 
3903         /*
3904          * allocate cookie
3905          */
3906         if ((cp = kmem_zalloc(sizeof (struct ddi_umem_cookie),
3907             flags & DDI_UMEM_NOSLEEP ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
3908                 ASSERT(flags & DDI_UMEM_NOSLEEP);
3909                 return ((void *)NULL);
3910         }
3911 
3912         if (flags & DDI_UMEM_PAGEABLE) {
3913                 /* Only one of the flags is allowed */
3914                 ASSERT(!(flags & DDI_UMEM_TRASH));
3915                 /* initialize resource with 0 */
3916                 iflags = KPD_ZERO;
3917 
3918                 /*
3919                  * to allocate unlocked pageable memory, use segkp_get() to
3920                  * create a segkp segment.  Since segkp can only service kas,
3921                  * other segment drivers such as segdev have to do
3922                  * as_fault(segkp, SOFTLOCK) in its fault routine,
3923                  */
3924                 if (flags & DDI_UMEM_NOSLEEP)
3925                         iflags |= KPD_NOWAIT;
3926 
3927                 if ((buf = segkp_get(segkp, len, iflags)) == NULL) {
3928                         kmem_free(cp, sizeof (struct ddi_umem_cookie));
3929                         return ((void *)NULL);
3930                 }
3931                 cp->type = KMEM_PAGEABLE;
3932                 mutex_init(&cp->lock, NULL, MUTEX_DEFAULT, NULL);
3933                 cp->locked = 0;
3934         } else if (flags & DDI_UMEM_TRASH) {
3935                 /* Only one of the flags is allowed */
3936                 ASSERT(!(flags & DDI_UMEM_PAGEABLE));
3937                 cp->type = UMEM_TRASH;
3938                 buf = NULL;
3939         } else {
3940                 if ((buf = devmap_umem_alloc_np(len, flags)) == NULL) {
3941                         kmem_free(cp, sizeof (struct ddi_umem_cookie));
3942                         return ((void *)NULL);
3943                 }
3944 
3945                 cp->type = KMEM_NON_PAGEABLE;
3946         }
3947 
3948         /*
3949          * need to save size here.  size will be used when
3950          * we do kmem_free.
3951          */
3952         cp->size = len;
3953         cp->cvaddr = (caddr_t)buf;
3954 
3955         *cookie =  (void *)cp;
3956         return (buf);
3957 }
3958 
3959 void
3960 ddi_umem_free(ddi_umem_cookie_t cookie)
3961 {
3962         struct ddi_umem_cookie *cp;
3963 
3964         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_FREE,
3965             "devmap_umem_free:start");
3966 
3967         /*
3968          * if cookie is NULL, no effects on the system
3969          */
3970         if (cookie == NULL)
3971                 return;
3972 
3973         cp = (struct ddi_umem_cookie *)cookie;
3974 
3975         switch (cp->type) {
3976         case KMEM_PAGEABLE :
3977                 ASSERT(cp->cvaddr != NULL && cp->size != 0);
3978                 /*
3979                  * Check if there are still any pending faults on the cookie
3980                  * while the driver is deleting it,
3981                  * XXX - could change to an ASSERT but wont catch errant drivers
3982                  */
3983                 mutex_enter(&cp->lock);
3984                 if (cp->locked) {
3985                         mutex_exit(&cp->lock);
3986                         panic("ddi_umem_free for cookie with pending faults %p",
3987                             (void *)cp);
3988                         return;
3989                 }
3990 
3991                 segkp_release(segkp, cp->cvaddr);
3992 
3993                 /*
3994                  * release mutex associated with this cookie.
3995                  */
3996                 mutex_destroy(&cp->lock);
3997                 break;
3998         case KMEM_NON_PAGEABLE :
3999                 ASSERT(cp->cvaddr != NULL && cp->size != 0);
4000                 devmap_umem_free_np(cp->cvaddr, cp->size);
4001                 break;
4002         case UMEM_TRASH :
4003                 break;
4004         case UMEM_LOCKED :
4005                 /* Callers should use ddi_umem_unlock for this type */
4006                 ddi_umem_unlock(cookie);
4007                 /* Frees the cookie too */
4008                 return;
4009         default:
4010                 /* panic so we can diagnose the underlying cause */
4011                 panic("ddi_umem_free: illegal cookie type 0x%x\n",
4012                     cp->type);
4013         }
4014 
4015         kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4016 }
4017 
4018 
4019 static int
4020 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4021 {
4022         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4023 
4024         /*
4025          * It looks as if it is always mapped shared
4026          */
4027         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4028             "segdev_getmemid:start");
4029         memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4030         memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4031         return (0);
4032 }
4033 
4034 /*ARGSUSED*/
4035 static lgrp_mem_policy_info_t *
4036 segdev_getpolicy(struct seg *seg, caddr_t addr)
4037 {
4038         return (NULL);
4039 }
4040 
4041 /*ARGSUSED*/
4042 static int
4043 segdev_capable(struct seg *seg, segcapability_t capability)
4044 {
4045         return (0);
4046 }
4047 
4048 /*
4049  * ddi_umem_alloc() non-pageable quantum cache max size.
4050  * This is just a SWAG.
4051  */
4052 #define DEVMAP_UMEM_QUANTUM     (8*PAGESIZE)
4053 
4054 /*
4055  * Initialize seg_dev from boot. This routine sets up the trash page
4056  * and creates the umem_np_arena used to back non-pageable memory
4057  * requests.
4058  */
4059 void
4060 segdev_init(void)
4061 {
4062         struct seg kseg;
4063 
4064         umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
4065             devmap_alloc_pages, devmap_free_pages, heap_arena,
4066             DEVMAP_UMEM_QUANTUM, VM_SLEEP);
4067 
4068         kseg.s_as = &kas;
4069         trashpp = page_create_va(&trashvp, 0, PAGESIZE,
4070             PG_NORELOC | PG_EXCL | PG_WAIT, &kseg, NULL);
4071         if (trashpp == NULL)
4072                 panic("segdev_init: failed to create trash page");
4073         pagezero(trashpp, 0, PAGESIZE);
4074         page_downgrade(trashpp);
4075 }
4076 
4077 /*
4078  * Invoke platform-dependent support routines so that /proc can have
4079  * the platform code deal with curious hardware.
4080  */
4081 int
4082 segdev_copyfrom(struct seg *seg,
4083     caddr_t uaddr, const void *devaddr, void *kaddr, size_t len)
4084 {
4085         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4086         struct snode *sp = VTOS(VTOCVP(sdp->vp));
4087 
4088         return (e_ddi_copyfromdev(sp->s_dip,
4089             (off_t)(uaddr - seg->s_base), devaddr, kaddr, len));
4090 }
4091 
4092 int
4093 segdev_copyto(struct seg *seg,
4094     caddr_t uaddr, const void *kaddr, void *devaddr, size_t len)
4095 {
4096         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4097         struct snode *sp = VTOS(VTOCVP(sdp->vp));
4098 
4099         return (e_ddi_copytodev(sp->s_dip,
4100             (off_t)(uaddr - seg->s_base), kaddr, devaddr, len));
4101 }