1953 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1954 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1955 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1956 SFMMU_L2_HMERLINKS_SIZE);
1957 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1958 }
1959 }
1960 }
1961 sfmmu_free_sfmmu(sfmmup);
1962
1963 #ifdef DEBUG
1964 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1965 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1966 }
1967 #endif
1968
1969 kmem_cache_free(sfmmuid_cache, sfmmup);
1970 }
1971
1972 /*
1973 * Set up any translation structures, for the specified address space,
1974 * that are needed or preferred when the process is being swapped in.
1975 */
1976 /* ARGSUSED */
1977 void
1978 hat_swapin(struct hat *hat)
1979 {
1980 }
1981
1982 /*
1983 * Free all of the translation resources, for the specified address space,
1984 * that can be freed while the process is swapped out. Called from as_swapout.
1985 * Also, free up the ctx that this process was using.
1986 */
1987 void
1988 hat_swapout(struct hat *sfmmup)
1989 {
1990 struct hmehash_bucket *hmebp;
1991 struct hme_blk *hmeblkp;
1992 struct hme_blk *pr_hblk = NULL;
1993 struct hme_blk *nx_hblk;
1994 int i;
1995 struct hme_blk *list = NULL;
1996 hatlock_t *hatlockp;
1997 struct tsb_info *tsbinfop;
1998 struct free_tsb {
1999 struct free_tsb *next;
2000 struct tsb_info *tsbinfop;
2001 }; /* free list of TSBs */
2002 struct free_tsb *freelist, *last, *next;
2003
2004 SFMMU_STAT(sf_swapout);
2005
2006 /*
2007 * There is no way to go from an as to all its translations in sfmmu.
2008 * Here is one of the times when we take the big hit and traverse
2009 * the hash looking for hme_blks to free up. Not only do we free up
2010 * this as hme_blks but all those that are free. We are obviously
2011 * swapping because we need memory so let's free up as much
2012 * as we can.
2013 *
2014 * Note that we don't flush TLB/TSB here -- it's not necessary
2015 * because:
2016 * 1) we free the ctx we're using and throw away the TSB(s);
2017 * 2) processes aren't runnable while being swapped out.
2018 */
2019 ASSERT(sfmmup != KHATID);
2020 for (i = 0; i <= UHMEHASH_SZ; i++) {
2021 hmebp = &uhme_hash[i];
2022 SFMMU_HASH_LOCK(hmebp);
2023 hmeblkp = hmebp->hmeblkp;
2024 pr_hblk = NULL;
2025 while (hmeblkp) {
2026
2027 if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2028 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2029 ASSERT(!hmeblkp->hblk_shared);
2030 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2031 (caddr_t)get_hblk_base(hmeblkp),
2032 get_hblk_endaddr(hmeblkp),
2033 NULL, HAT_UNLOAD);
2034 }
2035 nx_hblk = hmeblkp->hblk_next;
2036 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2037 ASSERT(!hmeblkp->hblk_lckcnt);
2038 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2039 &list, 0);
2040 } else {
2041 pr_hblk = hmeblkp;
2042 }
2043 hmeblkp = nx_hblk;
2044 }
2045 SFMMU_HASH_UNLOCK(hmebp);
2046 }
2047
2048 sfmmu_hblks_list_purge(&list, 0);
2049
2050 /*
2051 * Now free up the ctx so that others can reuse it.
2052 */
2053 hatlockp = sfmmu_hat_enter(sfmmup);
2054
2055 sfmmu_invalidate_ctx(sfmmup);
2056
2057 /*
2058 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2059 * If TSBs were never swapped in, just return.
2060 * This implies that we don't support partial swapping
2061 * of TSBs -- either all are swapped out, or none are.
2062 *
2063 * We must hold the HAT lock here to prevent racing with another
2064 * thread trying to unmap TTEs from the TSB or running the post-
2065 * relocator after relocating the TSB's memory. Unfortunately, we
2066 * can't free memory while holding the HAT lock or we could
2067 * deadlock, so we build a list of TSBs to be freed after marking
2068 * the tsbinfos as swapped out and free them after dropping the
2069 * lock.
2070 */
2071 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2072 sfmmu_hat_exit(hatlockp);
2073 return;
2074 }
2075
2076 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2077 last = freelist = NULL;
2078 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2079 tsbinfop = tsbinfop->tsb_next) {
2080 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2081
2082 /*
2083 * Cast the TSB into a struct free_tsb and put it on the free
2084 * list.
2085 */
2086 if (freelist == NULL) {
2087 last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2088 } else {
2089 last->next = (struct free_tsb *)tsbinfop->tsb_va;
2090 last = last->next;
2091 }
2092 last->next = NULL;
2093 last->tsbinfop = tsbinfop;
2094 tsbinfop->tsb_flags |= TSB_SWAPPED;
2095 /*
2096 * Zero out the TTE to clear the valid bit.
2097 * Note we can't use a value like 0xbad because we want to
2098 * ensure diagnostic bits are NEVER set on TTEs that might
2099 * be loaded. The intent is to catch any invalid access
2100 * to the swapped TSB, such as a thread running with a valid
2101 * context without first calling sfmmu_tsb_swapin() to
2102 * allocate TSB memory.
2103 */
2104 tsbinfop->tsb_tte.ll = 0;
2105 }
2106
2107 /* Now we can drop the lock and free the TSB memory. */
2108 sfmmu_hat_exit(hatlockp);
2109 for (; freelist != NULL; freelist = next) {
2110 next = freelist->next;
2111 sfmmu_tsb_free(freelist->tsbinfop);
2112 }
2113 }
2114
2115 /*
2116 * Duplicate the translations of an as into another newas
2117 */
2118 /* ARGSUSED */
2119 int
2120 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2121 uint_t flag)
2122 {
2123 sf_srd_t *srdp;
2124 sf_scd_t *scdp;
2125 int i;
2126 extern uint_t get_color_start(struct as *);
2127
2128 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2129 (flag == HAT_DUP_SRD));
2130 ASSERT(hat != ksfmmup);
2131 ASSERT(newhat != ksfmmup);
2132 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2133
2134 if (flag == HAT_DUP_COW) {
2135 panic("hat_dup: HAT_DUP_COW not supported");
9793 curcnum = sfmmu_getctx_sec();
9794 if (curcnum == cnum)
9795 sfmmu_load_mmustate(sfmmup);
9796 sfmmu_enable_intrs(pstate_save);
9797 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9798 }
9799 } else {
9800 /*
9801 * multi-thread
9802 * or when sfmmup is not the same as the curproc.
9803 */
9804 sfmmu_invalidate_ctx(sfmmup);
9805 }
9806
9807 kpreempt_enable();
9808 }
9809
9810
9811 /*
9812 * Replace the specified TSB with a new TSB. This function gets called when
9813 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the
9814 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9815 * (8K).
9816 *
9817 * Caller must hold the HAT lock, but should assume any tsb_info
9818 * pointers it has are no longer valid after calling this function.
9819 *
9820 * Return values:
9821 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
9822 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
9823 * something to this tsbinfo/TSB
9824 * TSB_SUCCESS Operation succeeded
9825 */
9826 static tsb_replace_rc_t
9827 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9828 hatlock_t *hatlockp, uint_t flags)
9829 {
9830 struct tsb_info *new_tsbinfo = NULL;
9831 struct tsb_info *curtsb, *prevtsb;
9832 uint_t tte_sz_mask;
9833 int i;
|
1953 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1954 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1955 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1956 SFMMU_L2_HMERLINKS_SIZE);
1957 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1958 }
1959 }
1960 }
1961 sfmmu_free_sfmmu(sfmmup);
1962
1963 #ifdef DEBUG
1964 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1965 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1966 }
1967 #endif
1968
1969 kmem_cache_free(sfmmuid_cache, sfmmup);
1970 }
1971
1972 /*
1973 * Duplicate the translations of an as into another newas
1974 */
1975 /* ARGSUSED */
1976 int
1977 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
1978 uint_t flag)
1979 {
1980 sf_srd_t *srdp;
1981 sf_scd_t *scdp;
1982 int i;
1983 extern uint_t get_color_start(struct as *);
1984
1985 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
1986 (flag == HAT_DUP_SRD));
1987 ASSERT(hat != ksfmmup);
1988 ASSERT(newhat != ksfmmup);
1989 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
1990
1991 if (flag == HAT_DUP_COW) {
1992 panic("hat_dup: HAT_DUP_COW not supported");
9650 curcnum = sfmmu_getctx_sec();
9651 if (curcnum == cnum)
9652 sfmmu_load_mmustate(sfmmup);
9653 sfmmu_enable_intrs(pstate_save);
9654 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9655 }
9656 } else {
9657 /*
9658 * multi-thread
9659 * or when sfmmup is not the same as the curproc.
9660 */
9661 sfmmu_invalidate_ctx(sfmmup);
9662 }
9663
9664 kpreempt_enable();
9665 }
9666
9667
9668 /*
9669 * Replace the specified TSB with a new TSB. This function gets called when
9670 * we grow, or shrink a TSB. When swapping in a TSB (TSB_SWAPIN), the
9671 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9672 * (8K).
9673 *
9674 * Caller must hold the HAT lock, but should assume any tsb_info
9675 * pointers it has are no longer valid after calling this function.
9676 *
9677 * Return values:
9678 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
9679 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
9680 * something to this tsbinfo/TSB
9681 * TSB_SUCCESS Operation succeeded
9682 */
9683 static tsb_replace_rc_t
9684 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9685 hatlock_t *hatlockp, uint_t flags)
9686 {
9687 struct tsb_info *new_tsbinfo = NULL;
9688 struct tsb_info *curtsb, *prevtsb;
9689 uint_t tte_sz_mask;
9690 int i;
|