743 }
744 }
745
746 /* If locked, release physical memory reservation */
747 if (kpd->kp_flags & KPD_LOCKED) {
748 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
749 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
750 atomic_add_long(&anon_segkp_pages_locked, -pages);
751 page_unresv(pages);
752 }
753
754 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
755 kmem_free(kpd, sizeof (struct segkp_data));
756 }
757
758 /*
759 * segkp_map_red() will check the current frame pointer against the
760 * stack base. If the amount of stack remaining is questionable
761 * (less than red_minavail), then segkp_map_red() will map in the redzone
762 * and return 1. Otherwise, it will return 0. segkp_map_red() can
763 * _only_ be called when:
764 *
765 * - it is safe to sleep on page_create_va().
766 * - the caller is non-swappable.
767 *
768 * It is up to the caller to remember whether segkp_map_red() successfully
769 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
770 * time. Note that the caller must _remain_ non-swappable until after
771 * calling segkp_unmap_red().
772 *
773 * Currently, this routine is only called from pagefault() (which necessarily
774 * satisfies the above conditions).
775 */
776 #if defined(STACK_GROWTH_DOWN)
777 int
778 segkp_map_red(void)
779 {
780 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
781 #ifndef _LP64
782 caddr_t stkbase;
783 #endif
784
785 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
786
787 /*
788 * Optimize for the common case where we simply return.
789 */
790 if ((curthread->t_red_pp == NULL) &&
791 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
792 return (0);
793
794 #if defined(_LP64)
795 /*
796 * XXX We probably need something better than this.
797 */
798 panic("kernel stack overflow");
799 /*NOTREACHED*/
800 #else /* _LP64 */
801 if (curthread->t_red_pp == NULL) {
802 page_t *red_pp;
803 struct seg kseg;
804
805 caddr_t red_va = (caddr_t)
806 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
867 red_deep_hires = hrestime.tv_nsec;
868 red_deep_thread = curthread;
869 }
870
871 /*
872 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
873 */
874 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
875 return (0);
876 #endif /* _LP64 */
877 }
878
879 void
880 segkp_unmap_red(void)
881 {
882 page_t *pp;
883 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
884 (uintptr_t)PAGEMASK) - PAGESIZE);
885
886 ASSERT(curthread->t_red_pp != NULL);
887 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
888
889 /*
890 * Because we locked the mapping down, we can't simply rely
891 * on page_destroy() to clean everything up; we need to call
892 * hat_unload() to explicitly unlock the mapping resources.
893 */
894 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
895
896 pp = curthread->t_red_pp;
897
898 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
899
900 /*
901 * Need to upgrade the SE_SHARED lock to SE_EXCL.
902 */
903 if (!page_tryupgrade(pp)) {
904 /*
905 * As there is now wait for upgrade, release the
906 * SE_SHARED lock and wait for SE_EXCL.
907 */
|
743 }
744 }
745
746 /* If locked, release physical memory reservation */
747 if (kpd->kp_flags & KPD_LOCKED) {
748 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
749 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
750 atomic_add_long(&anon_segkp_pages_locked, -pages);
751 page_unresv(pages);
752 }
753
754 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
755 kmem_free(kpd, sizeof (struct segkp_data));
756 }
757
758 /*
759 * segkp_map_red() will check the current frame pointer against the
760 * stack base. If the amount of stack remaining is questionable
761 * (less than red_minavail), then segkp_map_red() will map in the redzone
762 * and return 1. Otherwise, it will return 0. segkp_map_red() can
763 * _only_ be called when it is safe to sleep on page_create_va().
764 *
765 * It is up to the caller to remember whether segkp_map_red() successfully
766 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
767 * time.
768 *
769 * Currently, this routine is only called from pagefault() (which necessarily
770 * satisfies the above conditions).
771 */
772 #if defined(STACK_GROWTH_DOWN)
773 int
774 segkp_map_red(void)
775 {
776 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
777 #ifndef _LP64
778 caddr_t stkbase;
779 #endif
780
781 /*
782 * Optimize for the common case where we simply return.
783 */
784 if ((curthread->t_red_pp == NULL) &&
785 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
786 return (0);
787
788 #if defined(_LP64)
789 /*
790 * XXX We probably need something better than this.
791 */
792 panic("kernel stack overflow");
793 /*NOTREACHED*/
794 #else /* _LP64 */
795 if (curthread->t_red_pp == NULL) {
796 page_t *red_pp;
797 struct seg kseg;
798
799 caddr_t red_va = (caddr_t)
800 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
861 red_deep_hires = hrestime.tv_nsec;
862 red_deep_thread = curthread;
863 }
864
865 /*
866 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
867 */
868 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
869 return (0);
870 #endif /* _LP64 */
871 }
872
873 void
874 segkp_unmap_red(void)
875 {
876 page_t *pp;
877 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
878 (uintptr_t)PAGEMASK) - PAGESIZE);
879
880 ASSERT(curthread->t_red_pp != NULL);
881
882 /*
883 * Because we locked the mapping down, we can't simply rely
884 * on page_destroy() to clean everything up; we need to call
885 * hat_unload() to explicitly unlock the mapping resources.
886 */
887 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
888
889 pp = curthread->t_red_pp;
890
891 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
892
893 /*
894 * Need to upgrade the SE_SHARED lock to SE_EXCL.
895 */
896 if (!page_tryupgrade(pp)) {
897 /*
898 * As there is now wait for upgrade, release the
899 * SE_SHARED lock and wait for SE_EXCL.
900 */
|