927 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
928 HAT_LOAD | HAT_LOAD_NOCONSIST);
929 }
930 hat_vlp_setup(CPU);
931
932 /*
933 * Create kmap (cached mappings of kernel PTEs)
934 * for 32 bit we map from segmap_start .. ekernelheap
935 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
936 */
937 #if defined(__i386)
938 size = (uintptr_t)ekernelheap - segmap_start;
939 #elif defined(__amd64)
940 size = segmapsize;
941 #endif
942 hat_kmap_init((uintptr_t)segmap_start, size);
943 }
944
945 /*
946 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
947 * are 32 bit, so for safety we must use cas64() to install these.
948 */
949 #ifdef __i386
950 static void
951 reload_pae32(hat_t *hat, cpu_t *cpu)
952 {
953 x86pte_t *src;
954 x86pte_t *dest;
955 x86pte_t pte;
956 int i;
957
958 /*
959 * Load the 4 entries of the level 2 page table into this
960 * cpu's range of the vlp_page and point cr3 at them.
961 */
962 ASSERT(mmu.pae_hat);
963 src = hat->hat_vlp_ptes;
964 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
965 for (i = 0; i < VLP_NUM_PTES; ++i) {
966 for (;;) {
967 pte = dest[i];
968 if (pte == src[i])
969 break;
970 if (cas64(dest + i, pte, src[i]) != src[i])
971 break;
972 }
973 }
974 }
975 #endif
976
977 /*
978 * Switch to a new active hat, maintaining bit masks to track active CPUs.
979 *
980 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
981 * remains a 32-bit value.
982 */
983 void
984 hat_switch(hat_t *hat)
985 {
986 uint64_t newcr3;
987 cpu_t *cpu = CPU;
988 hat_t *old = cpu->cpu_current_hat;
989
990 /*
1971 {
1972 ulong_t cr4 = getcr4();
1973
1974 if (cr4 & CR4_PGE) {
1975 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1976 setcr4(cr4);
1977
1978 /*
1979 * 32 bit PAE also needs to always reload_cr3()
1980 */
1981 if (mmu.max_level == 2)
1982 reload_cr3();
1983 } else {
1984 reload_cr3();
1985 }
1986 }
1987
1988 #define TLB_CPU_HALTED (01ul)
1989 #define TLB_INVAL_ALL (02ul)
1990 #define CAS_TLB_INFO(cpu, old, new) \
1991 caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1992
1993 /*
1994 * Record that a CPU is going idle
1995 */
1996 void
1997 tlb_going_idle(void)
1998 {
1999 atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2000 }
2001
2002 /*
2003 * Service a delayed TLB flush if coming out of being idle.
2004 * It will be called from cpu idle notification with interrupt disabled.
2005 */
2006 void
2007 tlb_service(void)
2008 {
2009 ulong_t tlb_info;
2010 ulong_t found;
2011
2012 /*
2013 * We only have to do something if coming out of being idle.
2014 */
2015 tlb_info = CPU->cpu_m.mcpu_tlb_info;
2016 if (tlb_info & TLB_CPU_HALTED) {
2017 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2018
2019 /*
|
927 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
928 HAT_LOAD | HAT_LOAD_NOCONSIST);
929 }
930 hat_vlp_setup(CPU);
931
932 /*
933 * Create kmap (cached mappings of kernel PTEs)
934 * for 32 bit we map from segmap_start .. ekernelheap
935 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
936 */
937 #if defined(__i386)
938 size = (uintptr_t)ekernelheap - segmap_start;
939 #elif defined(__amd64)
940 size = segmapsize;
941 #endif
942 hat_kmap_init((uintptr_t)segmap_start, size);
943 }
944
945 /*
946 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
947 * are 32 bit, so for safety we must use atomic_cas_64() to install these.
948 */
949 #ifdef __i386
950 static void
951 reload_pae32(hat_t *hat, cpu_t *cpu)
952 {
953 x86pte_t *src;
954 x86pte_t *dest;
955 x86pte_t pte;
956 int i;
957
958 /*
959 * Load the 4 entries of the level 2 page table into this
960 * cpu's range of the vlp_page and point cr3 at them.
961 */
962 ASSERT(mmu.pae_hat);
963 src = hat->hat_vlp_ptes;
964 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
965 for (i = 0; i < VLP_NUM_PTES; ++i) {
966 for (;;) {
967 pte = dest[i];
968 if (pte == src[i])
969 break;
970 if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
971 break;
972 }
973 }
974 }
975 #endif
976
977 /*
978 * Switch to a new active hat, maintaining bit masks to track active CPUs.
979 *
980 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
981 * remains a 32-bit value.
982 */
983 void
984 hat_switch(hat_t *hat)
985 {
986 uint64_t newcr3;
987 cpu_t *cpu = CPU;
988 hat_t *old = cpu->cpu_current_hat;
989
990 /*
1971 {
1972 ulong_t cr4 = getcr4();
1973
1974 if (cr4 & CR4_PGE) {
1975 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1976 setcr4(cr4);
1977
1978 /*
1979 * 32 bit PAE also needs to always reload_cr3()
1980 */
1981 if (mmu.max_level == 2)
1982 reload_cr3();
1983 } else {
1984 reload_cr3();
1985 }
1986 }
1987
1988 #define TLB_CPU_HALTED (01ul)
1989 #define TLB_INVAL_ALL (02ul)
1990 #define CAS_TLB_INFO(cpu, old, new) \
1991 atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1992
1993 /*
1994 * Record that a CPU is going idle
1995 */
1996 void
1997 tlb_going_idle(void)
1998 {
1999 atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2000 }
2001
2002 /*
2003 * Service a delayed TLB flush if coming out of being idle.
2004 * It will be called from cpu idle notification with interrupt disabled.
2005 */
2006 void
2007 tlb_service(void)
2008 {
2009 ulong_t tlb_info;
2010 ulong_t found;
2011
2012 /*
2013 * We only have to do something if coming out of being idle.
2014 */
2015 tlb_info = CPU->cpu_m.mcpu_tlb_info;
2016 if (tlb_info & TLB_CPU_HALTED) {
2017 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2018
2019 /*
|