Print this page
patch as-lock-macro-simplification


3003         ASSERT(len <= PAGESIZE);
3004         off = (uintptr_t)dest & PAGEOFFSET; /* offset within the page */
3005         rdest = (caddr_t)((uintptr_t)dest &
3006             (uintptr_t)PAGEMASK);       /* Page boundary */
3007         ASSERT(off + len <= PAGESIZE);
3008 
3009         /*
3010          * Lock down destination page.
3011          */
3012         if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
3013                 return (E2BIG);
3014         /*
3015          * Check if we have a shadow page list from as_pagelock. If not,
3016          * we took the slow path and have to find our page struct the hard
3017          * way.
3018          */
3019         if (pplist == NULL) {
3020                 pfn_t   pfnum;
3021 
3022                 /* MMU mapping is already locked down */
3023                 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
3024                 pfnum = hat_getpfnum(as->a_hat, rdest);
3025                 AS_LOCK_EXIT(as, &as->a_lock);
3026 
3027                 /*
3028                  * TODO: The pfn step should not be necessary - need
3029                  * a hat_getpp() function.
3030                  */
3031                 if (pf_is_memory(pfnum)) {
3032                         pp = page_numtopp_nolock(pfnum);
3033                         ASSERT(pp == NULL || PAGE_LOCKED(pp));
3034                 } else
3035                         pp = NULL;
3036                 if (pp == NULL) {
3037                         as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3038                         return (E2BIG);
3039                 }
3040         } else {
3041                 pp = *pplist;
3042         }
3043         /*
3044          * Map destination page into kernel address
3045          */




3003         ASSERT(len <= PAGESIZE);
3004         off = (uintptr_t)dest & PAGEOFFSET; /* offset within the page */
3005         rdest = (caddr_t)((uintptr_t)dest &
3006             (uintptr_t)PAGEMASK);       /* Page boundary */
3007         ASSERT(off + len <= PAGESIZE);
3008 
3009         /*
3010          * Lock down destination page.
3011          */
3012         if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
3013                 return (E2BIG);
3014         /*
3015          * Check if we have a shadow page list from as_pagelock. If not,
3016          * we took the slow path and have to find our page struct the hard
3017          * way.
3018          */
3019         if (pplist == NULL) {
3020                 pfn_t   pfnum;
3021 
3022                 /* MMU mapping is already locked down */
3023                 AS_LOCK_ENTER(as, RW_READER);
3024                 pfnum = hat_getpfnum(as->a_hat, rdest);
3025                 AS_LOCK_EXIT(as);
3026 
3027                 /*
3028                  * TODO: The pfn step should not be necessary - need
3029                  * a hat_getpp() function.
3030                  */
3031                 if (pf_is_memory(pfnum)) {
3032                         pp = page_numtopp_nolock(pfnum);
3033                         ASSERT(pp == NULL || PAGE_LOCKED(pp));
3034                 } else
3035                         pp = NULL;
3036                 if (pp == NULL) {
3037                         as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
3038                         return (E2BIG);
3039                 }
3040         } else {
3041                 pp = *pplist;
3042         }
3043         /*
3044          * Map destination page into kernel address
3045          */