Print this page
patch as-lock-macro-simplification


 347  * Error return:
 348  * ENOTSUP - operation like this is not supported either on this segment
 349  * type, or on this platform type.
 350  */
 351 int
 352 cow_mapin(struct as *as, caddr_t uaddr, caddr_t kaddr, struct page **cached_ppp,
 353     struct anon **app, size_t *lenp, int cow)
 354 {
 355         struct          hat *hat;
 356         struct seg      *seg;
 357         caddr_t         base;
 358         page_t          *pp, *ppp[MAX_MAPIN_PAGES];
 359         long            i;
 360         int             flags;
 361         size_t          size, total = *lenp;
 362         char            first = 1;
 363         faultcode_t     res;
 364 
 365         *lenp = 0;
 366         if (cow) {
 367                 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 368                 seg = as_findseg(as, uaddr, 0);
 369                 if ((seg == NULL) || ((base = seg->s_base) > uaddr) ||
 370                     (uaddr + total) > base + seg->s_size) {
 371                         AS_LOCK_EXIT(as, &as->a_lock);
 372                         return (EINVAL);
 373                 }
 374                 /*
 375                  * The COW scheme should work for all segment types.
 376                  * But to be safe, we check against segvn.
 377                  */
 378                 if (seg->s_ops != &segvn_ops) {
 379                         AS_LOCK_EXIT(as, &as->a_lock);
 380                         return (ENOTSUP);
 381                 } else if ((SEGOP_GETTYPE(seg, uaddr) & MAP_PRIVATE) == 0) {
 382                         AS_LOCK_EXIT(as, &as->a_lock);
 383                         return (ENOTSUP);
 384                 }
 385         }
 386         hat = as->a_hat;
 387         size = total;
 388 tryagain:
 389         /*
 390          * If (cow), hat_softlock will also change the usr protection to RO.
 391          * This is the first step toward setting up cow. Before we
 392          * bump up an_refcnt, we can't allow any cow-fault on this
 393          * address. Otherwise segvn_fault will change the protection back
 394          * to RW upon seeing an_refcnt == 1.
 395          * The solution is to hold the writer lock on "as".
 396          */
 397         res = hat_softlock(hat, uaddr, &size, &ppp[0], cow ? HAT_COW : 0);
 398         size = total - size;
 399         *lenp += size;
 400         size = size >> PAGESHIFT;
 401         i = 0;
 402         while (i < size) {


 467                                  * mapping on VAC. hat_softlock will flush
 468                                  * a VAC_WRITEBACK cache. Therefore the kaddr
 469                                  * doesn't have to be of the same vcolor as
 470                                  * uaddr.
 471                                  * The alternative is - change hat_devload
 472                                  * to get a cached mapping. Allocate a kaddr
 473                                  * with the same vcolor as uaddr. Then
 474                                  * hat_softlock won't need to flush the VAC.
 475                                  */
 476                                 hat_devload(kas.a_hat, kaddr, PAGESIZE,
 477                                     page_pptonum(pp), PROT_READ, flags);
 478                                 *cached_ppp = pp;
 479                         }
 480                         kaddr += PAGESIZE;
 481                 }
 482                 cached_ppp++;
 483                 app++;
 484                 ++i;
 485         }
 486         if (cow) {
 487                 AS_LOCK_EXIT(as, &as->a_lock);
 488         }
 489         if (first && res == FC_NOMAP) {
 490                 /*
 491                  * If the address is not mapped yet, we call as_fault to
 492                  * fault the pages in. We could've fallen back to copy and
 493                  * let it fault in the pages. But for a mapped file, we
 494                  * normally reference each page only once. For zero-copy to
 495                  * be of any use, we'd better fall in the page now and try
 496                  * again.
 497                  */
 498                 first = 0;
 499                 size = size << PAGESHIFT;
 500                 uaddr += size;
 501                 total -= size;
 502                 size = total;
 503                 res = as_fault(as->a_hat, as, uaddr, size, F_INVAL, S_READ);
 504                 if (cow)
 505                         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 506                 goto tryagain;
 507         }
 508         switch (res) {
 509         case FC_NOSUPPORT:
 510                 return (ENOTSUP);
 511         case FC_PROT:   /* Pretend we don't know about it. This will be */
 512                         /* caught by the caller when uiomove fails. */
 513         case FC_NOMAP:
 514         case FC_OBJERR:
 515         default:
 516                 return (0);
 517         }
 518 }


 347  * Error return:
 348  * ENOTSUP - operation like this is not supported either on this segment
 349  * type, or on this platform type.
 350  */
 351 int
 352 cow_mapin(struct as *as, caddr_t uaddr, caddr_t kaddr, struct page **cached_ppp,
 353     struct anon **app, size_t *lenp, int cow)
 354 {
 355         struct          hat *hat;
 356         struct seg      *seg;
 357         caddr_t         base;
 358         page_t          *pp, *ppp[MAX_MAPIN_PAGES];
 359         long            i;
 360         int             flags;
 361         size_t          size, total = *lenp;
 362         char            first = 1;
 363         faultcode_t     res;
 364 
 365         *lenp = 0;
 366         if (cow) {
 367                 AS_LOCK_ENTER(as, RW_WRITER);
 368                 seg = as_findseg(as, uaddr, 0);
 369                 if ((seg == NULL) || ((base = seg->s_base) > uaddr) ||
 370                     (uaddr + total) > base + seg->s_size) {
 371                         AS_LOCK_EXIT(as);
 372                         return (EINVAL);
 373                 }
 374                 /*
 375                  * The COW scheme should work for all segment types.
 376                  * But to be safe, we check against segvn.
 377                  */
 378                 if (seg->s_ops != &segvn_ops) {
 379                         AS_LOCK_EXIT(as);
 380                         return (ENOTSUP);
 381                 } else if ((SEGOP_GETTYPE(seg, uaddr) & MAP_PRIVATE) == 0) {
 382                         AS_LOCK_EXIT(as);
 383                         return (ENOTSUP);
 384                 }
 385         }
 386         hat = as->a_hat;
 387         size = total;
 388 tryagain:
 389         /*
 390          * If (cow), hat_softlock will also change the usr protection to RO.
 391          * This is the first step toward setting up cow. Before we
 392          * bump up an_refcnt, we can't allow any cow-fault on this
 393          * address. Otherwise segvn_fault will change the protection back
 394          * to RW upon seeing an_refcnt == 1.
 395          * The solution is to hold the writer lock on "as".
 396          */
 397         res = hat_softlock(hat, uaddr, &size, &ppp[0], cow ? HAT_COW : 0);
 398         size = total - size;
 399         *lenp += size;
 400         size = size >> PAGESHIFT;
 401         i = 0;
 402         while (i < size) {


 467                                  * mapping on VAC. hat_softlock will flush
 468                                  * a VAC_WRITEBACK cache. Therefore the kaddr
 469                                  * doesn't have to be of the same vcolor as
 470                                  * uaddr.
 471                                  * The alternative is - change hat_devload
 472                                  * to get a cached mapping. Allocate a kaddr
 473                                  * with the same vcolor as uaddr. Then
 474                                  * hat_softlock won't need to flush the VAC.
 475                                  */
 476                                 hat_devload(kas.a_hat, kaddr, PAGESIZE,
 477                                     page_pptonum(pp), PROT_READ, flags);
 478                                 *cached_ppp = pp;
 479                         }
 480                         kaddr += PAGESIZE;
 481                 }
 482                 cached_ppp++;
 483                 app++;
 484                 ++i;
 485         }
 486         if (cow) {
 487                 AS_LOCK_EXIT(as);
 488         }
 489         if (first && res == FC_NOMAP) {
 490                 /*
 491                  * If the address is not mapped yet, we call as_fault to
 492                  * fault the pages in. We could've fallen back to copy and
 493                  * let it fault in the pages. But for a mapped file, we
 494                  * normally reference each page only once. For zero-copy to
 495                  * be of any use, we'd better fall in the page now and try
 496                  * again.
 497                  */
 498                 first = 0;
 499                 size = size << PAGESHIFT;
 500                 uaddr += size;
 501                 total -= size;
 502                 size = total;
 503                 res = as_fault(as->a_hat, as, uaddr, size, F_INVAL, S_READ);
 504                 if (cow)
 505                         AS_LOCK_ENTER(as, RW_WRITER);
 506                 goto tryagain;
 507         }
 508         switch (res) {
 509         case FC_NOSUPPORT:
 510                 return (ENOTSUP);
 511         case FC_PROT:   /* Pretend we don't know about it. This will be */
 512                         /* caught by the caller when uiomove fails. */
 513         case FC_NOMAP:
 514         case FC_OBJERR:
 515         default:
 516                 return (0);
 517         }
 518 }