Print this page
5042 stop using deprecated atomic functions


 823                     PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
 824                 ASSERT(red_pp != NULL);
 825 
 826                 /*
 827                  * So we now have a page to jam into the redzone...
 828                  */
 829                 page_io_unlock(red_pp);
 830 
 831                 hat_memload(kas.a_hat, red_va, red_pp,
 832                     (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
 833                 page_downgrade(red_pp);
 834 
 835                 /*
 836                  * The page is left SE_SHARED locked so we can hold on to
 837                  * the page_t pointer.
 838                  */
 839                 curthread->t_red_pp = red_pp;
 840 
 841                 atomic_add_32(&red_nmapped, 1);
 842                 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
 843                         (void) cas32(&red_closest, red_closest,
 844                             (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
 845                 }
 846                 return (1);
 847         }
 848 
 849         stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
 850             (uintptr_t)PAGEMASK) - PAGESIZE);
 851 
 852         atomic_add_32(&red_ndoubles, 1);
 853 
 854         if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
 855                 /*
 856                  * Oh boy.  We're already deep within the mapped-in
 857                  * redzone page, and the caller is trying to prepare
 858                  * for a deep stack run.  We're running without a
 859                  * redzone right now:  if the caller plows off the
 860                  * end of the stack, it'll plow another thread or
 861                  * LWP structure.  That situation could result in
 862                  * a very hard-to-debug panic, so, in the spirit of
 863                  * recording the name of one's killer in one's own




 823                     PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
 824                 ASSERT(red_pp != NULL);
 825 
 826                 /*
 827                  * So we now have a page to jam into the redzone...
 828                  */
 829                 page_io_unlock(red_pp);
 830 
 831                 hat_memload(kas.a_hat, red_va, red_pp,
 832                     (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
 833                 page_downgrade(red_pp);
 834 
 835                 /*
 836                  * The page is left SE_SHARED locked so we can hold on to
 837                  * the page_t pointer.
 838                  */
 839                 curthread->t_red_pp = red_pp;
 840 
 841                 atomic_add_32(&red_nmapped, 1);
 842                 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
 843                         (void) atomic_cas_32(&red_closest, red_closest,
 844                             (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
 845                 }
 846                 return (1);
 847         }
 848 
 849         stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
 850             (uintptr_t)PAGEMASK) - PAGESIZE);
 851 
 852         atomic_add_32(&red_ndoubles, 1);
 853 
 854         if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
 855                 /*
 856                  * Oh boy.  We're already deep within the mapped-in
 857                  * redzone page, and the caller is trying to prepare
 858                  * for a deep stack run.  We're running without a
 859                  * redzone right now:  if the caller plows off the
 860                  * end of the stack, it'll plow another thread or
 861                  * LWP structure.  That situation could result in
 862                  * a very hard-to-debug panic, so, in the spirit of
 863                  * recording the name of one's killer in one's own