Print this page
6583 remove whole-process swapping

@@ -152,11 +152,10 @@
         segkp_fault,
         SEGKP_BADOP(faultcode_t),       /* faulta */
         SEGKP_BADOP(int),               /* setprot */
         segkp_checkprot,
         segkp_kluster,
-        SEGKP_BADOP(size_t),            /* swapout */
         SEGKP_BADOP(int),               /* sync */
         SEGKP_BADOP(size_t),            /* incore */
         SEGKP_BADOP(int),               /* lockop */
         SEGKP_BADOP(int),               /* getprot */
         SEGKP_BADOP(u_offset_t),                /* getoffset */

@@ -758,19 +757,15 @@
 /*
  * segkp_map_red() will check the current frame pointer against the
  * stack base.  If the amount of stack remaining is questionable
  * (less than red_minavail), then segkp_map_red() will map in the redzone
  * and return 1.  Otherwise, it will return 0.  segkp_map_red() can
- * _only_ be called when:
- *
- *   - it is safe to sleep on page_create_va().
- *   - the caller is non-swappable.
+ * _only_ be called when it is safe to sleep on page_create_va().
  *
  * It is up to the caller to remember whether segkp_map_red() successfully
  * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
- * time.  Note that the caller must _remain_ non-swappable until after
- * calling segkp_unmap_red().
+ * time.
  *
  * Currently, this routine is only called from pagefault() (which necessarily
  * satisfies the above conditions).
  */
 #if defined(STACK_GROWTH_DOWN)

@@ -780,12 +775,10 @@
         uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
 #ifndef _LP64
         caddr_t stkbase;
 #endif
 
-        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
-
         /*
          * Optimize for the common case where we simply return.
          */
         if ((curthread->t_red_pp == NULL) &&
             (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))

@@ -882,11 +875,10 @@
         page_t *pp;
         caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
             (uintptr_t)PAGEMASK) - PAGESIZE);
 
         ASSERT(curthread->t_red_pp != NULL);
-        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 
         /*
          * Because we locked the mapping down, we can't simply rely
          * on page_destroy() to clean everything up;  we need to call
          * hat_unload() to explicitly unlock the mapping resources.

@@ -1345,24 +1337,10 @@
         mutex_exit(&segkp_lock);
         return (NULL);          /* Not found */
 }
 
 /*
- * returns size of swappable area.
- */
-size_t
-swapsize(caddr_t v)
-{
-        struct segkp_data *kpd;
-
-        if ((kpd = segkp_find(segkp, v)) != NULL)
-                return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
-        else
-                return (NULL);
-}
-
-/*
  * Dump out all the active segkp pages
  */
 static void
 segkp_dump(struct seg *seg)
 {