Print this page
6583 remove whole-process swapping


  50  * resulting from a fork before one process or the other changes their
  51  * contents).  This pseudo-sharing is present only as an optimization
  52  * and is not to be confused with true sharing in which multiple
  53  * address spaces deliberately contain references to the same object;
  54  * such sharing is managed at a higher level.
  55  *
  56  * The key data structure here is the anon struct, which contains a
  57  * reference count for its associated physical page and a hint about
  58  * the identity of that page.  Anon structs typically live in arrays,
  59  * with an instance's position in its array determining where the
  60  * corresponding backing storage is allocated; however, the swap_xlate()
  61  * routine abstracts away this representation information so that the
  62  * rest of the anon layer need not know it.  (See the swap layer for
  63  * more details on anon struct layout.)
  64  *
  65  * In the future versions of the system, the association between an
  66  * anon struct and its position on backing store will change so that
  67  * we don't require backing store all anonymous pages in the system.
  68  * This is important for consideration for large memory systems.
  69  * We can also use this technique to delay binding physical locations
  70  * to anonymous pages until pageout/swapout time where we can make
  71  * smarter allocation decisions to improve anonymous klustering.
  72  *
  73  * Many of the routines defined here take a (struct anon **) argument,
  74  * which allows the code at this level to manage anon pages directly,
  75  * so that callers can regard anon structs as opaque objects and not be
  76  * concerned with assigning or inspecting their contents.
  77  *
  78  * Clients of this layer refer to anon pages indirectly.  That is, they
  79  * maintain arrays of pointers to anon structs rather than maintaining
  80  * anon structs themselves.  The (struct anon **) arguments mentioned
  81  * above are pointers to entries in these arrays.  It is these arrays
  82  * that capture the mapping between offsets within a given segment and
  83  * the corresponding anonymous backing storage address.
  84  */
  85 
  86 #ifdef DEBUG
  87 #define ANON_DEBUG
  88 #endif
  89 
  90 #include <sys/types.h>
  91 #include <sys/t_lock.h>


3559         /*
3560          * Use szc to determine anon slot(s) to appear atomic.
3561          * If szc = 0, then lock the anon slot and mark it busy.
3562          * If szc > 0, then lock the range of slots by getting the
3563          * anon_array_lock for the first anon slot, and mark only the
3564          * first anon slot busy to represent whole range being busy.
3565          */
3566 
3567         ASSERT(RW_READ_HELD(&amp->a_rwlock));
3568         an_idx = P2ALIGN(an_idx, page_get_pagecnt(amp->a_szc));
3569         hash = ANON_ARRAY_HASH(amp, an_idx);
3570         sobj->sync_mutex = mtx = &anon_array_lock[hash].pad_mutex;
3571         sobj->sync_cv = cv = &anon_array_cv[hash];
3572         mutex_enter(mtx);
3573         ap_slot = anon_get_slot(amp->ahp, an_idx);
3574         while (ANON_ISBUSY(ap_slot))
3575                 cv_wait(cv, mtx);
3576         ANON_SETBUSY(ap_slot);
3577         sobj->sync_data = ap_slot;
3578         mutex_exit(mtx);
3579 }
3580 
3581 int
3582 anon_array_try_enter(struct anon_map *amp, ulong_t an_idx,
3583                         anon_sync_obj_t *sobj)
3584 {
3585         ulong_t         *ap_slot;
3586         kmutex_t        *mtx;
3587         int             hash;
3588 
3589         /*
3590          * Try to lock a range of anon slots.
3591          * Use szc to determine anon slot(s) to appear atomic.
3592          * If szc = 0, then lock the anon slot and mark it busy.
3593          * If szc > 0, then lock the range of slots by getting the
3594          * anon_array_lock for the first anon slot, and mark only the
3595          * first anon slot busy to represent whole range being busy.
3596          * Fail if the mutex or the anon_array are busy.
3597          */
3598 
3599         ASSERT(RW_READ_HELD(&amp->a_rwlock));
3600         an_idx = P2ALIGN(an_idx, page_get_pagecnt(amp->a_szc));
3601         hash = ANON_ARRAY_HASH(amp, an_idx);
3602         sobj->sync_mutex = mtx = &anon_array_lock[hash].pad_mutex;
3603         sobj->sync_cv = &anon_array_cv[hash];
3604         if (!mutex_tryenter(mtx)) {
3605                 return (EWOULDBLOCK);
3606         }
3607         ap_slot = anon_get_slot(amp->ahp, an_idx);
3608         if (ANON_ISBUSY(ap_slot)) {
3609                 mutex_exit(mtx);
3610                 return (EWOULDBLOCK);
3611         }
3612         ANON_SETBUSY(ap_slot);
3613         sobj->sync_data = ap_slot;
3614         mutex_exit(mtx);
3615         return (0);
3616 }
3617 
3618 void
3619 anon_array_exit(anon_sync_obj_t *sobj)
3620 {
3621         mutex_enter(sobj->sync_mutex);
3622         ASSERT(ANON_ISBUSY(sobj->sync_data));
3623         ANON_CLRBUSY(sobj->sync_data);
3624         if (CV_HAS_WAITERS(sobj->sync_cv))
3625                 cv_broadcast(sobj->sync_cv);
3626         mutex_exit(sobj->sync_mutex);
3627 }


  50  * resulting from a fork before one process or the other changes their
  51  * contents).  This pseudo-sharing is present only as an optimization
  52  * and is not to be confused with true sharing in which multiple
  53  * address spaces deliberately contain references to the same object;
  54  * such sharing is managed at a higher level.
  55  *
  56  * The key data structure here is the anon struct, which contains a
  57  * reference count for its associated physical page and a hint about
  58  * the identity of that page.  Anon structs typically live in arrays,
  59  * with an instance's position in its array determining where the
  60  * corresponding backing storage is allocated; however, the swap_xlate()
  61  * routine abstracts away this representation information so that the
  62  * rest of the anon layer need not know it.  (See the swap layer for
  63  * more details on anon struct layout.)
  64  *
  65  * In the future versions of the system, the association between an
  66  * anon struct and its position on backing store will change so that
  67  * we don't require backing store all anonymous pages in the system.
  68  * This is important for consideration for large memory systems.
  69  * We can also use this technique to delay binding physical locations
  70  * to anonymous pages until pageout time where we can make smarter
  71  * allocation decisions to improve anonymous klustering.
  72  *
  73  * Many of the routines defined here take a (struct anon **) argument,
  74  * which allows the code at this level to manage anon pages directly,
  75  * so that callers can regard anon structs as opaque objects and not be
  76  * concerned with assigning or inspecting their contents.
  77  *
  78  * Clients of this layer refer to anon pages indirectly.  That is, they
  79  * maintain arrays of pointers to anon structs rather than maintaining
  80  * anon structs themselves.  The (struct anon **) arguments mentioned
  81  * above are pointers to entries in these arrays.  It is these arrays
  82  * that capture the mapping between offsets within a given segment and
  83  * the corresponding anonymous backing storage address.
  84  */
  85 
  86 #ifdef DEBUG
  87 #define ANON_DEBUG
  88 #endif
  89 
  90 #include <sys/types.h>
  91 #include <sys/t_lock.h>


3559         /*
3560          * Use szc to determine anon slot(s) to appear atomic.
3561          * If szc = 0, then lock the anon slot and mark it busy.
3562          * If szc > 0, then lock the range of slots by getting the
3563          * anon_array_lock for the first anon slot, and mark only the
3564          * first anon slot busy to represent whole range being busy.
3565          */
3566 
3567         ASSERT(RW_READ_HELD(&amp->a_rwlock));
3568         an_idx = P2ALIGN(an_idx, page_get_pagecnt(amp->a_szc));
3569         hash = ANON_ARRAY_HASH(amp, an_idx);
3570         sobj->sync_mutex = mtx = &anon_array_lock[hash].pad_mutex;
3571         sobj->sync_cv = cv = &anon_array_cv[hash];
3572         mutex_enter(mtx);
3573         ap_slot = anon_get_slot(amp->ahp, an_idx);
3574         while (ANON_ISBUSY(ap_slot))
3575                 cv_wait(cv, mtx);
3576         ANON_SETBUSY(ap_slot);
3577         sobj->sync_data = ap_slot;
3578         mutex_exit(mtx);





































3579 }
3580 
3581 void
3582 anon_array_exit(anon_sync_obj_t *sobj)
3583 {
3584         mutex_enter(sobj->sync_mutex);
3585         ASSERT(ANON_ISBUSY(sobj->sync_data));
3586         ANON_CLRBUSY(sobj->sync_data);
3587         if (CV_HAS_WAITERS(sobj->sync_cv))
3588                 cv_broadcast(sobj->sync_cv);
3589         mutex_exit(sobj->sync_mutex);
3590 }