137
138 hrtime_t red_deep_hires;
139 kthread_t *red_deep_thread;
140
141 uint32_t red_nmapped;
142 uint32_t red_closest = UINT_MAX;
143 uint32_t red_ndoubles;
144
145 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
146 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
147
148 static struct seg_ops segkp_ops = {
149 SEGKP_BADOP(int), /* dup */
150 SEGKP_BADOP(int), /* unmap */
151 SEGKP_BADOP(void), /* free */
152 segkp_fault,
153 SEGKP_BADOP(faultcode_t), /* faulta */
154 SEGKP_BADOP(int), /* setprot */
155 segkp_checkprot,
156 segkp_kluster,
157 SEGKP_BADOP(size_t), /* swapout */
158 SEGKP_BADOP(int), /* sync */
159 SEGKP_BADOP(size_t), /* incore */
160 SEGKP_BADOP(int), /* lockop */
161 SEGKP_BADOP(int), /* getprot */
162 SEGKP_BADOP(u_offset_t), /* getoffset */
163 SEGKP_BADOP(int), /* gettype */
164 SEGKP_BADOP(int), /* getvp */
165 SEGKP_BADOP(int), /* advise */
166 segkp_dump, /* dump */
167 segkp_pagelock, /* pagelock */
168 SEGKP_BADOP(int), /* setpgsz */
169 segkp_getmemid, /* getmemid */
170 segkp_getpolicy, /* getpolicy */
171 segkp_capable, /* capable */
172 };
173
174
175 static void
176 segkp_badop(void)
177 {
743 }
744 }
745
746 /* If locked, release physical memory reservation */
747 if (kpd->kp_flags & KPD_LOCKED) {
748 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
749 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
750 atomic_add_long(&anon_segkp_pages_locked, -pages);
751 page_unresv(pages);
752 }
753
754 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
755 kmem_free(kpd, sizeof (struct segkp_data));
756 }
757
758 /*
759 * segkp_map_red() will check the current frame pointer against the
760 * stack base. If the amount of stack remaining is questionable
761 * (less than red_minavail), then segkp_map_red() will map in the redzone
762 * and return 1. Otherwise, it will return 0. segkp_map_red() can
763 * _only_ be called when:
764 *
765 * - it is safe to sleep on page_create_va().
766 * - the caller is non-swappable.
767 *
768 * It is up to the caller to remember whether segkp_map_red() successfully
769 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
770 * time. Note that the caller must _remain_ non-swappable until after
771 * calling segkp_unmap_red().
772 *
773 * Currently, this routine is only called from pagefault() (which necessarily
774 * satisfies the above conditions).
775 */
776 #if defined(STACK_GROWTH_DOWN)
777 int
778 segkp_map_red(void)
779 {
780 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
781 #ifndef _LP64
782 caddr_t stkbase;
783 #endif
784
785 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
786
787 /*
788 * Optimize for the common case where we simply return.
789 */
790 if ((curthread->t_red_pp == NULL) &&
791 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
792 return (0);
793
794 #if defined(_LP64)
795 /*
796 * XXX We probably need something better than this.
797 */
798 panic("kernel stack overflow");
799 /*NOTREACHED*/
800 #else /* _LP64 */
801 if (curthread->t_red_pp == NULL) {
802 page_t *red_pp;
803 struct seg kseg;
804
805 caddr_t red_va = (caddr_t)
806 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
867 red_deep_hires = hrestime.tv_nsec;
868 red_deep_thread = curthread;
869 }
870
871 /*
872 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
873 */
874 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
875 return (0);
876 #endif /* _LP64 */
877 }
878
879 void
880 segkp_unmap_red(void)
881 {
882 page_t *pp;
883 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
884 (uintptr_t)PAGEMASK) - PAGESIZE);
885
886 ASSERT(curthread->t_red_pp != NULL);
887 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
888
889 /*
890 * Because we locked the mapping down, we can't simply rely
891 * on page_destroy() to clean everything up; we need to call
892 * hat_unload() to explicitly unlock the mapping resources.
893 */
894 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
895
896 pp = curthread->t_red_pp;
897
898 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
899
900 /*
901 * Need to upgrade the SE_SHARED lock to SE_EXCL.
902 */
903 if (!page_tryupgrade(pp)) {
904 /*
905 * As there is now wait for upgrade, release the
906 * SE_SHARED lock and wait for SE_EXCL.
907 */
|
137
138 hrtime_t red_deep_hires;
139 kthread_t *red_deep_thread;
140
141 uint32_t red_nmapped;
142 uint32_t red_closest = UINT_MAX;
143 uint32_t red_ndoubles;
144
145 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
146 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
147
148 static struct seg_ops segkp_ops = {
149 SEGKP_BADOP(int), /* dup */
150 SEGKP_BADOP(int), /* unmap */
151 SEGKP_BADOP(void), /* free */
152 segkp_fault,
153 SEGKP_BADOP(faultcode_t), /* faulta */
154 SEGKP_BADOP(int), /* setprot */
155 segkp_checkprot,
156 segkp_kluster,
157 SEGKP_BADOP(int), /* sync */
158 SEGKP_BADOP(size_t), /* incore */
159 SEGKP_BADOP(int), /* lockop */
160 SEGKP_BADOP(int), /* getprot */
161 SEGKP_BADOP(u_offset_t), /* getoffset */
162 SEGKP_BADOP(int), /* gettype */
163 SEGKP_BADOP(int), /* getvp */
164 SEGKP_BADOP(int), /* advise */
165 segkp_dump, /* dump */
166 segkp_pagelock, /* pagelock */
167 SEGKP_BADOP(int), /* setpgsz */
168 segkp_getmemid, /* getmemid */
169 segkp_getpolicy, /* getpolicy */
170 segkp_capable, /* capable */
171 };
172
173
174 static void
175 segkp_badop(void)
176 {
742 }
743 }
744
745 /* If locked, release physical memory reservation */
746 if (kpd->kp_flags & KPD_LOCKED) {
747 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
748 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
749 atomic_add_long(&anon_segkp_pages_locked, -pages);
750 page_unresv(pages);
751 }
752
753 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
754 kmem_free(kpd, sizeof (struct segkp_data));
755 }
756
757 /*
758 * segkp_map_red() will check the current frame pointer against the
759 * stack base. If the amount of stack remaining is questionable
760 * (less than red_minavail), then segkp_map_red() will map in the redzone
761 * and return 1. Otherwise, it will return 0. segkp_map_red() can
762 * _only_ be called when it is safe to sleep on page_create_va().
763 *
764 * It is up to the caller to remember whether segkp_map_red() successfully
765 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
766 * time.
767 *
768 * Currently, this routine is only called from pagefault() (which necessarily
769 * satisfies the above conditions).
770 */
771 #if defined(STACK_GROWTH_DOWN)
772 int
773 segkp_map_red(void)
774 {
775 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
776 #ifndef _LP64
777 caddr_t stkbase;
778 #endif
779
780 /*
781 * Optimize for the common case where we simply return.
782 */
783 if ((curthread->t_red_pp == NULL) &&
784 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
785 return (0);
786
787 #if defined(_LP64)
788 /*
789 * XXX We probably need something better than this.
790 */
791 panic("kernel stack overflow");
792 /*NOTREACHED*/
793 #else /* _LP64 */
794 if (curthread->t_red_pp == NULL) {
795 page_t *red_pp;
796 struct seg kseg;
797
798 caddr_t red_va = (caddr_t)
799 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
860 red_deep_hires = hrestime.tv_nsec;
861 red_deep_thread = curthread;
862 }
863
864 /*
865 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
866 */
867 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
868 return (0);
869 #endif /* _LP64 */
870 }
871
872 void
873 segkp_unmap_red(void)
874 {
875 page_t *pp;
876 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
877 (uintptr_t)PAGEMASK) - PAGESIZE);
878
879 ASSERT(curthread->t_red_pp != NULL);
880
881 /*
882 * Because we locked the mapping down, we can't simply rely
883 * on page_destroy() to clean everything up; we need to call
884 * hat_unload() to explicitly unlock the mapping resources.
885 */
886 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
887
888 pp = curthread->t_red_pp;
889
890 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
891
892 /*
893 * Need to upgrade the SE_SHARED lock to SE_EXCL.
894 */
895 if (!page_tryupgrade(pp)) {
896 /*
897 * As there is now wait for upgrade, release the
898 * SE_SHARED lock and wait for SE_EXCL.
899 */
|