Print this page
patch remove-dont-swap-flag

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_kp.c
          +++ new/usr/src/uts/common/vm/seg_kp.c
↓ open down ↓ 752 lines elided ↑ open up ↑
 753  753  
 754  754          vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
 755  755          kmem_free(kpd, sizeof (struct segkp_data));
 756  756  }
 757  757  
 758  758  /*
 759  759   * segkp_map_red() will check the current frame pointer against the
 760  760   * stack base.  If the amount of stack remaining is questionable
 761  761   * (less than red_minavail), then segkp_map_red() will map in the redzone
 762  762   * and return 1.  Otherwise, it will return 0.  segkp_map_red() can
 763      - * _only_ be called when:
 764      - *
 765      - *   - it is safe to sleep on page_create_va().
 766      - *   - the caller is non-swappable.
      763 + * _only_ be called when it is safe to sleep on page_create_va().
 767  764   *
 768  765   * It is up to the caller to remember whether segkp_map_red() successfully
 769  766   * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
 770      - * time.  Note that the caller must _remain_ non-swappable until after
 771      - * calling segkp_unmap_red().
      767 + * time.
 772  768   *
 773  769   * Currently, this routine is only called from pagefault() (which necessarily
 774  770   * satisfies the above conditions).
 775  771   */
 776  772  #if defined(STACK_GROWTH_DOWN)
 777  773  int
 778  774  segkp_map_red(void)
 779  775  {
 780  776          uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
 781  777  #ifndef _LP64
 782  778          caddr_t stkbase;
 783  779  #endif
 784  780  
 785      -        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 786      -
 787  781          /*
 788  782           * Optimize for the common case where we simply return.
 789  783           */
 790  784          if ((curthread->t_red_pp == NULL) &&
 791  785              (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
 792  786                  return (0);
 793  787  
 794  788  #if defined(_LP64)
 795  789          /*
 796  790           * XXX  We probably need something better than this.
↓ open down ↓ 80 lines elided ↑ open up ↑
 877  871  }
 878  872  
 879  873  void
 880  874  segkp_unmap_red(void)
 881  875  {
 882  876          page_t *pp;
 883  877          caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
 884  878              (uintptr_t)PAGEMASK) - PAGESIZE);
 885  879  
 886  880          ASSERT(curthread->t_red_pp != NULL);
 887      -        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 888  881  
 889  882          /*
 890  883           * Because we locked the mapping down, we can't simply rely
 891  884           * on page_destroy() to clean everything up;  we need to call
 892  885           * hat_unload() to explicitly unlock the mapping resources.
 893  886           */
 894  887          hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
 895  888  
 896  889          pp = curthread->t_red_pp;
 897  890  
↓ open down ↓ 569 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX