697 if (kpd->kp_flags & KPD_LOCKED) {
698 ap = anon_get_ptr(kpd->kp_anon,
699 kpd->kp_anon_idx + i);
700 swap_xlate(ap, &vp, &off);
701 /* Find the shared-locked page. */
702 pp = page_find(vp, (u_offset_t)off);
703 if (pp == NULL) {
704 panic("segkp_release: "
705 "kp_anon: no page to unlock ");
706 /*NOTREACHED*/
707 }
708 if (PP_ISRAF(pp))
709 PP_CLRRAF(pp);
710
711 page_unlock(pp);
712 }
713 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
714 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
715 PAGESIZE);
716 anon_unresv_zone(PAGESIZE, NULL);
717 atomic_add_long(&anon_segkp_pages_resv,
718 -1);
719 }
720 TRACE_5(TR_FAC_VM,
721 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
722 kpd, va, PAGESIZE, 0, 0);
723 } else {
724 if (kpd->kp_flags & KPD_LOCKED) {
725 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
726 if (pp == NULL) {
727 panic("segkp_release: "
728 "no page to unlock");
729 /*NOTREACHED*/
730 }
731 if (PP_ISRAF(pp))
732 PP_CLRRAF(pp);
733 /*
734 * We should just upgrade the lock here
735 * but there is no upgrade that waits.
736 */
737 page_unlock(pp);
738 }
821 kseg.s_as = &kas;
822 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
823 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
824 ASSERT(red_pp != NULL);
825
826 /*
827 * So we now have a page to jam into the redzone...
828 */
829 page_io_unlock(red_pp);
830
831 hat_memload(kas.a_hat, red_va, red_pp,
832 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
833 page_downgrade(red_pp);
834
835 /*
836 * The page is left SE_SHARED locked so we can hold on to
837 * the page_t pointer.
838 */
839 curthread->t_red_pp = red_pp;
840
841 atomic_add_32(&red_nmapped, 1);
842 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
843 (void) atomic_cas_32(&red_closest, red_closest,
844 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
845 }
846 return (1);
847 }
848
849 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
850 (uintptr_t)PAGEMASK) - PAGESIZE);
851
852 atomic_add_32(&red_ndoubles, 1);
853
854 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
855 /*
856 * Oh boy. We're already deep within the mapped-in
857 * redzone page, and the caller is trying to prepare
858 * for a deep stack run. We're running without a
859 * redzone right now: if the caller plows off the
860 * end of the stack, it'll plow another thread or
861 * LWP structure. That situation could result in
862 * a very hard-to-debug panic, so, in the spirit of
863 * recording the name of one's killer in one's own
864 * blood, we're going to record hrestime and the calling
865 * thread.
866 */
867 red_deep_hires = hrestime.tv_nsec;
868 red_deep_thread = curthread;
869 }
870
871 /*
872 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
1420 return (0);
1421 }
1422
1423 #include <sys/mem_config.h>
1424
1425 /*ARGSUSED*/
1426 static void
1427 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1428 {}
1429
1430 /*
1431 * During memory delete, turn off caches so that pages are not held.
1432 * A better solution may be to unlock the pages while they are
1433 * in the cache so that they may be collected naturally.
1434 */
1435
1436 /*ARGSUSED*/
1437 static int
1438 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1439 {
1440 atomic_add_32(&segkp_indel, 1);
1441 segkp_cache_free();
1442 return (0);
1443 }
1444
1445 /*ARGSUSED*/
1446 static void
1447 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1448 {
1449 atomic_add_32(&segkp_indel, -1);
1450 }
1451
1452 static kphysm_setup_vector_t segkp_mem_config_vec = {
1453 KPHYSM_SETUP_VECTOR_VERSION,
1454 segkp_mem_config_post_add,
1455 segkp_mem_config_pre_del,
1456 segkp_mem_config_post_del,
1457 };
1458
1459 static void
1460 segkpinit_mem_config(struct seg *seg)
1461 {
1462 int ret;
1463
1464 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1465 ASSERT(ret == 0);
1466 }
|
697 if (kpd->kp_flags & KPD_LOCKED) {
698 ap = anon_get_ptr(kpd->kp_anon,
699 kpd->kp_anon_idx + i);
700 swap_xlate(ap, &vp, &off);
701 /* Find the shared-locked page. */
702 pp = page_find(vp, (u_offset_t)off);
703 if (pp == NULL) {
704 panic("segkp_release: "
705 "kp_anon: no page to unlock ");
706 /*NOTREACHED*/
707 }
708 if (PP_ISRAF(pp))
709 PP_CLRRAF(pp);
710
711 page_unlock(pp);
712 }
713 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
714 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
715 PAGESIZE);
716 anon_unresv_zone(PAGESIZE, NULL);
717 atomic_dec_ulong(&anon_segkp_pages_resv);
718 }
719 TRACE_5(TR_FAC_VM,
720 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
721 kpd, va, PAGESIZE, 0, 0);
722 } else {
723 if (kpd->kp_flags & KPD_LOCKED) {
724 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
725 if (pp == NULL) {
726 panic("segkp_release: "
727 "no page to unlock");
728 /*NOTREACHED*/
729 }
730 if (PP_ISRAF(pp))
731 PP_CLRRAF(pp);
732 /*
733 * We should just upgrade the lock here
734 * but there is no upgrade that waits.
735 */
736 page_unlock(pp);
737 }
820 kseg.s_as = &kas;
821 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
822 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
823 ASSERT(red_pp != NULL);
824
825 /*
826 * So we now have a page to jam into the redzone...
827 */
828 page_io_unlock(red_pp);
829
830 hat_memload(kas.a_hat, red_va, red_pp,
831 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
832 page_downgrade(red_pp);
833
834 /*
835 * The page is left SE_SHARED locked so we can hold on to
836 * the page_t pointer.
837 */
838 curthread->t_red_pp = red_pp;
839
840 atomic_inc_32(&red_nmapped);
841 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
842 (void) atomic_cas_32(&red_closest, red_closest,
843 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
844 }
845 return (1);
846 }
847
848 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
849 (uintptr_t)PAGEMASK) - PAGESIZE);
850
851 atomic_inc_32(&red_ndoubles);
852
853 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
854 /*
855 * Oh boy. We're already deep within the mapped-in
856 * redzone page, and the caller is trying to prepare
857 * for a deep stack run. We're running without a
858 * redzone right now: if the caller plows off the
859 * end of the stack, it'll plow another thread or
860 * LWP structure. That situation could result in
861 * a very hard-to-debug panic, so, in the spirit of
862 * recording the name of one's killer in one's own
863 * blood, we're going to record hrestime and the calling
864 * thread.
865 */
866 red_deep_hires = hrestime.tv_nsec;
867 red_deep_thread = curthread;
868 }
869
870 /*
871 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
1419 return (0);
1420 }
1421
1422 #include <sys/mem_config.h>
1423
1424 /*ARGSUSED*/
1425 static void
1426 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1427 {}
1428
1429 /*
1430 * During memory delete, turn off caches so that pages are not held.
1431 * A better solution may be to unlock the pages while they are
1432 * in the cache so that they may be collected naturally.
1433 */
1434
1435 /*ARGSUSED*/
1436 static int
1437 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1438 {
1439 atomic_inc_32(&segkp_indel);
1440 segkp_cache_free();
1441 return (0);
1442 }
1443
1444 /*ARGSUSED*/
1445 static void
1446 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1447 {
1448 atomic_dec_32(&segkp_indel);
1449 }
1450
1451 static kphysm_setup_vector_t segkp_mem_config_vec = {
1452 KPHYSM_SETUP_VECTOR_VERSION,
1453 segkp_mem_config_post_add,
1454 segkp_mem_config_pre_del,
1455 segkp_mem_config_post_del,
1456 };
1457
1458 static void
1459 segkpinit_mem_config(struct seg *seg)
1460 {
1461 int ret;
1462
1463 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1464 ASSERT(ret == 0);
1465 }
|