Print this page
patch as-lock-macro-simplification


 351         devctx->timeout = 0;
 352         cv_signal(&devctx->cv);
 353         mutex_exit(&devctx->lock);
 354 }
 355 
 356 /*
 357  * Create a device segment.
 358  */
 359 int
 360 segdev_create(struct seg *seg, void *argsp)
 361 {
 362         struct segdev_data *sdp;
 363         struct segdev_crargs *a = (struct segdev_crargs *)argsp;
 364         devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
 365         int error;
 366 
 367         /*
 368          * Since the address space is "write" locked, we
 369          * don't need the segment lock to protect "segdev" data.
 370          */
 371         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 372 
 373         hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 374 
 375         sdp = sdp_alloc();
 376 
 377         sdp->mapfunc = a->mapfunc;
 378         sdp->offset = a->offset;
 379         sdp->prot = a->prot;
 380         sdp->maxprot = a->maxprot;
 381         sdp->type = a->type;
 382         sdp->pageprot = 0;
 383         sdp->softlockcnt = 0;
 384         sdp->vpage = NULL;
 385 
 386         if (sdp->mapfunc == NULL)
 387                 sdp->devmap_data = dhp;
 388         else
 389                 sdp->devmap_data = dhp = NULL;
 390 
 391         sdp->hat_flags = a->hat_flags;


 457  */
 458 static int
 459 segdev_dup(struct seg *seg, struct seg *newseg)
 460 {
 461         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
 462         struct segdev_data *newsdp;
 463         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
 464         size_t npages;
 465         int ret;
 466 
 467         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
 468             "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
 469 
 470         DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
 471             (void *)dhp, (void *)seg));
 472 
 473         /*
 474          * Since the address space is "write" locked, we
 475          * don't need the segment lock to protect "segdev" data.
 476          */
 477         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 478 
 479         newsdp = sdp_alloc();
 480 
 481         newseg->s_ops = seg->s_ops;
 482         newseg->s_data = (void *)newsdp;
 483 
 484         VN_HOLD(sdp->vp);
 485         newsdp->vp   = sdp->vp;
 486         newsdp->mapfunc = sdp->mapfunc;
 487         newsdp->offset       = sdp->offset;
 488         newsdp->pageprot = sdp->pageprot;
 489         newsdp->prot = sdp->prot;
 490         newsdp->maxprot = sdp->maxprot;
 491         newsdp->type = sdp->type;
 492         newsdp->hat_attr = sdp->hat_attr;
 493         newsdp->hat_flags = sdp->hat_flags;
 494         newsdp->softlockcnt = 0;
 495 
 496         /*
 497          * Initialize per page data if the segment we are


 629         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
 630         devmap_handle_t *dhpp;
 631         devmap_handle_t *newdhp;
 632         struct devmap_callback_ctl *callbackops;
 633         caddr_t nbase;
 634         offset_t off;
 635         ulong_t nsize;
 636         size_t mlen, sz;
 637 
 638         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
 639             "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
 640             (void *)dhp, (void *)seg, (void *)addr, len);
 641 
 642         DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
 643             (void *)dhp, (void *)seg, (void *)addr, len));
 644 
 645         /*
 646          * Since the address space is "write" locked, we
 647          * don't need the segment lock to protect "segdev" data.
 648          */
 649         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 650 
 651         if ((sz = sdp->softlockcnt) > 0) {
 652                 /*
 653                  * Fail the unmap if pages are SOFTLOCKed through this mapping.
 654                  * softlockcnt is protected from change by the as write lock.
 655                  */
 656                 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
 657                     "segdev_unmap:error softlockcnt = %ld", sz);
 658                 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
 659                 return (EAGAIN);
 660         }
 661 
 662         /*
 663          * Check for bad sizes
 664          */
 665         if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
 666             (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
 667                 panic("segdev_unmap");
 668 
 669         if (dhp != NULL) {


1118 }
1119 
1120 /*
1121  * Free a segment.
1122  */
1123 static void
1124 segdev_free(struct seg *seg)
1125 {
1126         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1127         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
1128 
1129         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1130             "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1131         DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1132             (void *)dhp, (void *)seg));
1133 
1134         /*
1135          * Since the address space is "write" locked, we
1136          * don't need the segment lock to protect "segdev" data.
1137          */
1138         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1139 
1140         while (dhp != NULL)
1141                 dhp = devmap_handle_unmap(dhp);
1142 
1143         VN_RELE(sdp->vp);
1144         if (sdp->vpage != NULL)
1145                 kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1146 
1147         rw_destroy(&sdp->lock);
1148         kmem_free(sdp, sizeof (*sdp));
1149 }
1150 
1151 static void
1152 free_devmap_handle(devmap_handle_t *dhp)
1153 {
1154         register devmap_handle_t *dhpp;
1155 
1156         /*
1157          * free up devmap handle
1158          */


1600         enum fault_type type,           /* type of fault */
1601         enum seg_rw rw)                 /* type of access at fault */
1602 {
1603         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1604         devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1605         devmap_handle_t *dhp;
1606         struct devmap_softlock *slock = NULL;
1607         ulong_t slpage = 0;
1608         ulong_t off;
1609         caddr_t maddr = addr;
1610         int err;
1611         int err_is_faultcode = 0;
1612 
1613         TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1614             "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1615             (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1616         DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1617             "addr %p len %lx type %x\n",
1618             (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1619 
1620         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1621 
1622         /* Handle non-devmap case */
1623         if (dhp_head == NULL)
1624                 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1625 
1626         /* Find devmap handle */
1627         if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1628                 return (FC_NOMAP);
1629 
1630         /*
1631          * The seg_dev driver does not implement copy-on-write,
1632          * and always loads translations with maximal allowed permissions
1633          * but we got an fault trying to access the device.
1634          * Servicing the fault is not going to result in any better result
1635          * RFE: If we want devmap_access callbacks to be involved in F_PROT
1636          *      faults, then the code below is written for that
1637          *      Pending resolution of the following:
1638          *      - determine if the F_INVAL/F_SOFTLOCK syncing
1639          *      is needed for F_PROT also or not. The code below assumes it does
1640          *      - If driver sees F_PROT and calls devmap_load with same type,


2040                         ASSERT(len >= done);
2041                         release_kpmem_lock(kpmem_cookie, btopr(len - done));
2042                 }
2043         } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
2044                 /* for non-SOFTLOCK cases, release kpmem */
2045                 release_kpmem_lock(kpmem_cookie, btopr(len));
2046         }
2047         return (err);
2048 }
2049 
2050 /*
2051  * Asynchronous page fault.  We simply do nothing since this
2052  * entry point is not supposed to load up the translation.
2053  */
2054 /*ARGSUSED*/
2055 static faultcode_t
2056 segdev_faulta(struct seg *seg, caddr_t addr)
2057 {
2058         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2059             "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2060         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2061 
2062         return (0);
2063 }
2064 
2065 static int
2066 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2067 {
2068         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2069         register devmap_handle_t *dhp;
2070         register struct vpage *vp, *evp;
2071         devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2072         ulong_t off;
2073         size_t mlen, sz;
2074 
2075         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2076             "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2077             (void *)seg, (void *)addr, len, prot);
2078         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2079 
2080         if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2081                 /*
2082                  * Fail the setprot if pages are SOFTLOCKed through this
2083                  * mapping.
2084                  * Softlockcnt is protected from change by the as read lock.
2085                  */
2086                 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2087                     "segdev_setprot:error softlockcnt=%lx", sz);
2088                 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
2089                 return (EAGAIN);
2090         }
2091 
2092         if (dhp_head != NULL) {
2093                 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
2094                         return (EINVAL);
2095 
2096                 /*
2097                  * check if violate maxprot.
2098                  */


2181                 /*
2182                  * RFE: the segment should keep track of all attributes
2183                  * allowing us to remove the deprecated hat_chgprot
2184                  * and use hat_chgattr.
2185                  */
2186                 hat_chgprot(seg->s_as->a_hat, addr, len, prot);
2187         }
2188 
2189         return (0);
2190 }
2191 
2192 static int
2193 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2194 {
2195         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2196         struct vpage *vp, *evp;
2197 
2198         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2199             "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2200             (void *)seg, (void *)addr, len, prot);
2201         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2202 
2203         /*
2204          * If segment protection can be used, simply check against them
2205          */
2206         rw_enter(&sdp->lock, RW_READER);
2207         if (sdp->pageprot == 0) {
2208                 register int err;
2209 
2210                 err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2211                 rw_exit(&sdp->lock);
2212                 return (err);
2213         }
2214 
2215         /*
2216          * Have to check down to the vpage level
2217          */
2218         evp = &sdp->vpage[seg_page(seg, addr + len)];
2219         for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
2220                 if ((VPP_PROT(vp) & prot) != prot) {
2221                         rw_exit(&sdp->lock);
2222                         return (EACCES);
2223                 }
2224         }
2225         rw_exit(&sdp->lock);
2226         return (0);
2227 }
2228 
2229 static int
2230 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2231 {
2232         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2233         size_t pgno;
2234 
2235         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2236             "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2237             (void *)seg, (void *)addr, len, (void *)protv);
2238         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2239 
2240         pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2241         if (pgno != 0) {
2242                 rw_enter(&sdp->lock, RW_READER);
2243                 if (sdp->pageprot == 0) {
2244                         do {
2245                                 protv[--pgno] = sdp->prot;
2246                         } while (pgno != 0);
2247                 } else {
2248                         size_t pgoff = seg_page(seg, addr);
2249 
2250                         do {
2251                                 pgno--;
2252                                 protv[pgno] =
2253                                     VPP_PROT(&sdp->vpage[pgno + pgoff]);
2254                         } while (pgno != 0);
2255                 }
2256                 rw_exit(&sdp->lock);
2257         }
2258         return (0);
2259 }
2260 
2261 static u_offset_t
2262 segdev_getoffset(register struct seg *seg, caddr_t addr)
2263 {
2264         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2265 
2266         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2267             "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2268 
2269         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2270 
2271         return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2272 }
2273 
2274 /*ARGSUSED*/
2275 static int
2276 segdev_gettype(register struct seg *seg, caddr_t addr)
2277 {
2278         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2279 
2280         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2281             "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2282 
2283         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2284 
2285         return (sdp->type);
2286 }
2287 
2288 
2289 /*ARGSUSED*/
2290 static int
2291 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2292 {
2293         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2294 
2295         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2296             "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2297 
2298         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2299 
2300         /*
2301          * Note that this vp is the common_vp of the device, where the
2302          * pages are hung ..
2303          */
2304         *vpp = VTOCVP(sdp->vp);
2305 
2306         return (0);
2307 }
2308 
2309 static void
2310 segdev_badop(void)
2311 {
2312         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
2313             "segdev_badop:start");
2314         panic("segdev_badop");
2315         /*NOTREACHED*/
2316 }
2317 
2318 /*
2319  * segdev pages are not in the cache, and thus can't really be controlled.
2320  * Hence, syncs are simply always successful.
2321  */
2322 /*ARGSUSED*/
2323 static int
2324 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2325 {
2326         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2327 
2328         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2329 
2330         return (0);
2331 }
2332 
2333 /*
2334  * segdev pages are always "in core".
2335  */
2336 /*ARGSUSED*/
2337 static size_t
2338 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2339 {
2340         size_t v = 0;
2341 
2342         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2343 
2344         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2345 
2346         for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2347             v += PAGESIZE)
2348                 *vec++ = 1;
2349         return (v);
2350 }
2351 
2352 /*
2353  * segdev pages are not in the cache, and thus can't really be controlled.
2354  * Hence, locks are simply always successful.
2355  */
2356 /*ARGSUSED*/
2357 static int
2358 segdev_lockop(struct seg *seg, caddr_t addr,
2359     size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2360 {
2361         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2362 
2363         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2364 
2365         return (0);
2366 }
2367 
2368 /*
2369  * segdev pages are not in the cache, and thus can't really be controlled.
2370  * Hence, advise is simply always successful.
2371  */
2372 /*ARGSUSED*/
2373 static int
2374 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2375 {
2376         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2377 
2378         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2379 
2380         return (0);
2381 }
2382 
2383 /*
2384  * segdev pages are not dumped, so we just return
2385  */
2386 /*ARGSUSED*/
2387 static void
2388 segdev_dump(struct seg *seg)
2389 {}
2390 
2391 /*
2392  * ddi_segmap_setup:    Used by drivers who wish specify mapping attributes
2393  *                      for a segment.  Called from a drivers segmap(9E)
2394  *                      routine.
2395  */
2396 /*ARGSUSED*/
2397 int
2398 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,


3063 {
3064         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3065         struct as *asp = dhp->dh_seg->s_as;
3066         caddr_t addr;
3067         ulong_t size;
3068         ssize_t soff;   /* offset from the beginning of the segment */
3069         int rc;
3070 
3071         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
3072             "devmap_load:start dhp=%p offset=%llx len=%lx",
3073             (void *)dhp, offset, len);
3074 
3075         DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3076             (void *)dhp, offset, len));
3077 
3078         /*
3079          *      Hat layer only supports devload to process' context for which
3080          *      the as lock is held. Verify here and return error if drivers
3081          *      inadvertently call devmap_load on a wrong devmap handle.
3082          */
3083         if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock))
3084                 return (FC_MAKE_ERR(EINVAL));
3085 
3086         soff = (ssize_t)(offset - dhp->dh_uoff);
3087         soff = round_down_p2(soff, PAGESIZE);
3088         if (soff < 0 || soff >= dhp->dh_len)
3089                 return (FC_MAKE_ERR(EINVAL));
3090 
3091         /*
3092          * Address and size must be page aligned.  Len is set to the
3093          * number of bytes in the number of pages that are required to
3094          * support len.  Offset is set to the byte offset of the first byte
3095          * of the page that contains offset.
3096          */
3097         len = round_up_p2(len, PAGESIZE);
3098 
3099         /*
3100          * If len == 0, then calculate the size by getting
3101          * the number of bytes from offset to the end of the segment.
3102          */
3103         if (len == 0)




 351         devctx->timeout = 0;
 352         cv_signal(&devctx->cv);
 353         mutex_exit(&devctx->lock);
 354 }
 355 
 356 /*
 357  * Create a device segment.
 358  */
 359 int
 360 segdev_create(struct seg *seg, void *argsp)
 361 {
 362         struct segdev_data *sdp;
 363         struct segdev_crargs *a = (struct segdev_crargs *)argsp;
 364         devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
 365         int error;
 366 
 367         /*
 368          * Since the address space is "write" locked, we
 369          * don't need the segment lock to protect "segdev" data.
 370          */
 371         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 372 
 373         hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 374 
 375         sdp = sdp_alloc();
 376 
 377         sdp->mapfunc = a->mapfunc;
 378         sdp->offset = a->offset;
 379         sdp->prot = a->prot;
 380         sdp->maxprot = a->maxprot;
 381         sdp->type = a->type;
 382         sdp->pageprot = 0;
 383         sdp->softlockcnt = 0;
 384         sdp->vpage = NULL;
 385 
 386         if (sdp->mapfunc == NULL)
 387                 sdp->devmap_data = dhp;
 388         else
 389                 sdp->devmap_data = dhp = NULL;
 390 
 391         sdp->hat_flags = a->hat_flags;


 457  */
 458 static int
 459 segdev_dup(struct seg *seg, struct seg *newseg)
 460 {
 461         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
 462         struct segdev_data *newsdp;
 463         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
 464         size_t npages;
 465         int ret;
 466 
 467         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
 468             "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
 469 
 470         DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
 471             (void *)dhp, (void *)seg));
 472 
 473         /*
 474          * Since the address space is "write" locked, we
 475          * don't need the segment lock to protect "segdev" data.
 476          */
 477         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 478 
 479         newsdp = sdp_alloc();
 480 
 481         newseg->s_ops = seg->s_ops;
 482         newseg->s_data = (void *)newsdp;
 483 
 484         VN_HOLD(sdp->vp);
 485         newsdp->vp   = sdp->vp;
 486         newsdp->mapfunc = sdp->mapfunc;
 487         newsdp->offset       = sdp->offset;
 488         newsdp->pageprot = sdp->pageprot;
 489         newsdp->prot = sdp->prot;
 490         newsdp->maxprot = sdp->maxprot;
 491         newsdp->type = sdp->type;
 492         newsdp->hat_attr = sdp->hat_attr;
 493         newsdp->hat_flags = sdp->hat_flags;
 494         newsdp->softlockcnt = 0;
 495 
 496         /*
 497          * Initialize per page data if the segment we are


 629         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
 630         devmap_handle_t *dhpp;
 631         devmap_handle_t *newdhp;
 632         struct devmap_callback_ctl *callbackops;
 633         caddr_t nbase;
 634         offset_t off;
 635         ulong_t nsize;
 636         size_t mlen, sz;
 637 
 638         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
 639             "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
 640             (void *)dhp, (void *)seg, (void *)addr, len);
 641 
 642         DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
 643             (void *)dhp, (void *)seg, (void *)addr, len));
 644 
 645         /*
 646          * Since the address space is "write" locked, we
 647          * don't need the segment lock to protect "segdev" data.
 648          */
 649         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 650 
 651         if ((sz = sdp->softlockcnt) > 0) {
 652                 /*
 653                  * Fail the unmap if pages are SOFTLOCKed through this mapping.
 654                  * softlockcnt is protected from change by the as write lock.
 655                  */
 656                 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
 657                     "segdev_unmap:error softlockcnt = %ld", sz);
 658                 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
 659                 return (EAGAIN);
 660         }
 661 
 662         /*
 663          * Check for bad sizes
 664          */
 665         if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
 666             (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
 667                 panic("segdev_unmap");
 668 
 669         if (dhp != NULL) {


1118 }
1119 
1120 /*
1121  * Free a segment.
1122  */
1123 static void
1124 segdev_free(struct seg *seg)
1125 {
1126         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1127         devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
1128 
1129         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1130             "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1131         DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1132             (void *)dhp, (void *)seg));
1133 
1134         /*
1135          * Since the address space is "write" locked, we
1136          * don't need the segment lock to protect "segdev" data.
1137          */
1138         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1139 
1140         while (dhp != NULL)
1141                 dhp = devmap_handle_unmap(dhp);
1142 
1143         VN_RELE(sdp->vp);
1144         if (sdp->vpage != NULL)
1145                 kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1146 
1147         rw_destroy(&sdp->lock);
1148         kmem_free(sdp, sizeof (*sdp));
1149 }
1150 
1151 static void
1152 free_devmap_handle(devmap_handle_t *dhp)
1153 {
1154         register devmap_handle_t *dhpp;
1155 
1156         /*
1157          * free up devmap handle
1158          */


1600         enum fault_type type,           /* type of fault */
1601         enum seg_rw rw)                 /* type of access at fault */
1602 {
1603         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1604         devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1605         devmap_handle_t *dhp;
1606         struct devmap_softlock *slock = NULL;
1607         ulong_t slpage = 0;
1608         ulong_t off;
1609         caddr_t maddr = addr;
1610         int err;
1611         int err_is_faultcode = 0;
1612 
1613         TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1614             "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1615             (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1616         DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1617             "addr %p len %lx type %x\n",
1618             (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1619 
1620         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1621 
1622         /* Handle non-devmap case */
1623         if (dhp_head == NULL)
1624                 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1625 
1626         /* Find devmap handle */
1627         if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1628                 return (FC_NOMAP);
1629 
1630         /*
1631          * The seg_dev driver does not implement copy-on-write,
1632          * and always loads translations with maximal allowed permissions
1633          * but we got an fault trying to access the device.
1634          * Servicing the fault is not going to result in any better result
1635          * RFE: If we want devmap_access callbacks to be involved in F_PROT
1636          *      faults, then the code below is written for that
1637          *      Pending resolution of the following:
1638          *      - determine if the F_INVAL/F_SOFTLOCK syncing
1639          *      is needed for F_PROT also or not. The code below assumes it does
1640          *      - If driver sees F_PROT and calls devmap_load with same type,


2040                         ASSERT(len >= done);
2041                         release_kpmem_lock(kpmem_cookie, btopr(len - done));
2042                 }
2043         } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
2044                 /* for non-SOFTLOCK cases, release kpmem */
2045                 release_kpmem_lock(kpmem_cookie, btopr(len));
2046         }
2047         return (err);
2048 }
2049 
2050 /*
2051  * Asynchronous page fault.  We simply do nothing since this
2052  * entry point is not supposed to load up the translation.
2053  */
2054 /*ARGSUSED*/
2055 static faultcode_t
2056 segdev_faulta(struct seg *seg, caddr_t addr)
2057 {
2058         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2059             "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2060         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2061 
2062         return (0);
2063 }
2064 
2065 static int
2066 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2067 {
2068         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2069         register devmap_handle_t *dhp;
2070         register struct vpage *vp, *evp;
2071         devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2072         ulong_t off;
2073         size_t mlen, sz;
2074 
2075         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2076             "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2077             (void *)seg, (void *)addr, len, prot);
2078         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2079 
2080         if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2081                 /*
2082                  * Fail the setprot if pages are SOFTLOCKed through this
2083                  * mapping.
2084                  * Softlockcnt is protected from change by the as read lock.
2085                  */
2086                 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2087                     "segdev_setprot:error softlockcnt=%lx", sz);
2088                 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
2089                 return (EAGAIN);
2090         }
2091 
2092         if (dhp_head != NULL) {
2093                 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
2094                         return (EINVAL);
2095 
2096                 /*
2097                  * check if violate maxprot.
2098                  */


2181                 /*
2182                  * RFE: the segment should keep track of all attributes
2183                  * allowing us to remove the deprecated hat_chgprot
2184                  * and use hat_chgattr.
2185                  */
2186                 hat_chgprot(seg->s_as->a_hat, addr, len, prot);
2187         }
2188 
2189         return (0);
2190 }
2191 
2192 static int
2193 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2194 {
2195         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2196         struct vpage *vp, *evp;
2197 
2198         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2199             "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2200             (void *)seg, (void *)addr, len, prot);
2201         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2202 
2203         /*
2204          * If segment protection can be used, simply check against them
2205          */
2206         rw_enter(&sdp->lock, RW_READER);
2207         if (sdp->pageprot == 0) {
2208                 register int err;
2209 
2210                 err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2211                 rw_exit(&sdp->lock);
2212                 return (err);
2213         }
2214 
2215         /*
2216          * Have to check down to the vpage level
2217          */
2218         evp = &sdp->vpage[seg_page(seg, addr + len)];
2219         for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
2220                 if ((VPP_PROT(vp) & prot) != prot) {
2221                         rw_exit(&sdp->lock);
2222                         return (EACCES);
2223                 }
2224         }
2225         rw_exit(&sdp->lock);
2226         return (0);
2227 }
2228 
2229 static int
2230 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2231 {
2232         struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2233         size_t pgno;
2234 
2235         TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2236             "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2237             (void *)seg, (void *)addr, len, (void *)protv);
2238         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2239 
2240         pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2241         if (pgno != 0) {
2242                 rw_enter(&sdp->lock, RW_READER);
2243                 if (sdp->pageprot == 0) {
2244                         do {
2245                                 protv[--pgno] = sdp->prot;
2246                         } while (pgno != 0);
2247                 } else {
2248                         size_t pgoff = seg_page(seg, addr);
2249 
2250                         do {
2251                                 pgno--;
2252                                 protv[pgno] =
2253                                     VPP_PROT(&sdp->vpage[pgno + pgoff]);
2254                         } while (pgno != 0);
2255                 }
2256                 rw_exit(&sdp->lock);
2257         }
2258         return (0);
2259 }
2260 
2261 static u_offset_t
2262 segdev_getoffset(register struct seg *seg, caddr_t addr)
2263 {
2264         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2265 
2266         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2267             "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2268 
2269         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2270 
2271         return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2272 }
2273 
2274 /*ARGSUSED*/
2275 static int
2276 segdev_gettype(register struct seg *seg, caddr_t addr)
2277 {
2278         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2279 
2280         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2281             "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2282 
2283         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2284 
2285         return (sdp->type);
2286 }
2287 
2288 
2289 /*ARGSUSED*/
2290 static int
2291 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2292 {
2293         register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2294 
2295         TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2296             "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2297 
2298         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2299 
2300         /*
2301          * Note that this vp is the common_vp of the device, where the
2302          * pages are hung ..
2303          */
2304         *vpp = VTOCVP(sdp->vp);
2305 
2306         return (0);
2307 }
2308 
2309 static void
2310 segdev_badop(void)
2311 {
2312         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
2313             "segdev_badop:start");
2314         panic("segdev_badop");
2315         /*NOTREACHED*/
2316 }
2317 
2318 /*
2319  * segdev pages are not in the cache, and thus can't really be controlled.
2320  * Hence, syncs are simply always successful.
2321  */
2322 /*ARGSUSED*/
2323 static int
2324 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2325 {
2326         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2327 
2328         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2329 
2330         return (0);
2331 }
2332 
2333 /*
2334  * segdev pages are always "in core".
2335  */
2336 /*ARGSUSED*/
2337 static size_t
2338 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2339 {
2340         size_t v = 0;
2341 
2342         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2343 
2344         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2345 
2346         for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2347             v += PAGESIZE)
2348                 *vec++ = 1;
2349         return (v);
2350 }
2351 
2352 /*
2353  * segdev pages are not in the cache, and thus can't really be controlled.
2354  * Hence, locks are simply always successful.
2355  */
2356 /*ARGSUSED*/
2357 static int
2358 segdev_lockop(struct seg *seg, caddr_t addr,
2359     size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2360 {
2361         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2362 
2363         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2364 
2365         return (0);
2366 }
2367 
2368 /*
2369  * segdev pages are not in the cache, and thus can't really be controlled.
2370  * Hence, advise is simply always successful.
2371  */
2372 /*ARGSUSED*/
2373 static int
2374 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2375 {
2376         TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2377 
2378         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2379 
2380         return (0);
2381 }
2382 
2383 /*
2384  * segdev pages are not dumped, so we just return
2385  */
2386 /*ARGSUSED*/
2387 static void
2388 segdev_dump(struct seg *seg)
2389 {}
2390 
2391 /*
2392  * ddi_segmap_setup:    Used by drivers who wish specify mapping attributes
2393  *                      for a segment.  Called from a drivers segmap(9E)
2394  *                      routine.
2395  */
2396 /*ARGSUSED*/
2397 int
2398 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,


3063 {
3064         devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3065         struct as *asp = dhp->dh_seg->s_as;
3066         caddr_t addr;
3067         ulong_t size;
3068         ssize_t soff;   /* offset from the beginning of the segment */
3069         int rc;
3070 
3071         TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
3072             "devmap_load:start dhp=%p offset=%llx len=%lx",
3073             (void *)dhp, offset, len);
3074 
3075         DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3076             (void *)dhp, offset, len));
3077 
3078         /*
3079          *      Hat layer only supports devload to process' context for which
3080          *      the as lock is held. Verify here and return error if drivers
3081          *      inadvertently call devmap_load on a wrong devmap handle.
3082          */
3083         if ((asp != &kas) && !AS_LOCK_HELD(asp))
3084                 return (FC_MAKE_ERR(EINVAL));
3085 
3086         soff = (ssize_t)(offset - dhp->dh_uoff);
3087         soff = round_down_p2(soff, PAGESIZE);
3088         if (soff < 0 || soff >= dhp->dh_len)
3089                 return (FC_MAKE_ERR(EINVAL));
3090 
3091         /*
3092          * Address and size must be page aligned.  Len is set to the
3093          * number of bytes in the number of pages that are required to
3094          * support len.  Offset is set to the byte offset of the first byte
3095          * of the page that contains offset.
3096          */
3097         len = round_up_p2(len, PAGESIZE);
3098 
3099         /*
3100          * If len == 0, then calculate the size by getting
3101          * the number of bytes from offset to the end of the segment.
3102          */
3103         if (len == 0)