Print this page
patch vm-cleanup

*** 1974,2130 **** kmem_cache_free(sfmmuid_cache, sfmmup); } /* - * Set up any translation structures, for the specified address space, - * that are needed or preferred when the process is being swapped in. - */ - /* ARGSUSED */ - void - hat_swapin(struct hat *hat) - { - ASSERT(hat->sfmmu_xhat_provider == NULL); - } - - /* - * Free all of the translation resources, for the specified address space, - * that can be freed while the process is swapped out. Called from as_swapout. - * Also, free up the ctx that this process was using. - */ - void - hat_swapout(struct hat *sfmmup) - { - struct hmehash_bucket *hmebp; - struct hme_blk *hmeblkp; - struct hme_blk *pr_hblk = NULL; - struct hme_blk *nx_hblk; - int i; - struct hme_blk *list = NULL; - hatlock_t *hatlockp; - struct tsb_info *tsbinfop; - struct free_tsb { - struct free_tsb *next; - struct tsb_info *tsbinfop; - }; /* free list of TSBs */ - struct free_tsb *freelist, *last, *next; - - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - SFMMU_STAT(sf_swapout); - - /* - * There is no way to go from an as to all its translations in sfmmu. - * Here is one of the times when we take the big hit and traverse - * the hash looking for hme_blks to free up. Not only do we free up - * this as hme_blks but all those that are free. We are obviously - * swapping because we need memory so let's free up as much - * as we can. - * - * Note that we don't flush TLB/TSB here -- it's not necessary - * because: - * 1) we free the ctx we're using and throw away the TSB(s); - * 2) processes aren't runnable while being swapped out. - */ - ASSERT(sfmmup != KHATID); - for (i = 0; i <= UHMEHASH_SZ; i++) { - hmebp = &uhme_hash[i]; - SFMMU_HASH_LOCK(hmebp); - hmeblkp = hmebp->hmeblkp; - pr_hblk = NULL; - while (hmeblkp) { - - ASSERT(!hmeblkp->hblk_xhat_bit); - - if ((hmeblkp->hblk_tag.htag_id == sfmmup) && - !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { - ASSERT(!hmeblkp->hblk_shared); - (void) sfmmu_hblk_unload(sfmmup, hmeblkp, - (caddr_t)get_hblk_base(hmeblkp), - get_hblk_endaddr(hmeblkp), - NULL, HAT_UNLOAD); - } - nx_hblk = hmeblkp->hblk_next; - if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { - ASSERT(!hmeblkp->hblk_lckcnt); - sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, - &list, 0); - } else { - pr_hblk = hmeblkp; - } - hmeblkp = nx_hblk; - } - SFMMU_HASH_UNLOCK(hmebp); - } - - sfmmu_hblks_list_purge(&list, 0); - - /* - * Now free up the ctx so that others can reuse it. - */ - hatlockp = sfmmu_hat_enter(sfmmup); - - sfmmu_invalidate_ctx(sfmmup); - - /* - * Free TSBs, but not tsbinfos, and set SWAPPED flag. - * If TSBs were never swapped in, just return. - * This implies that we don't support partial swapping - * of TSBs -- either all are swapped out, or none are. - * - * We must hold the HAT lock here to prevent racing with another - * thread trying to unmap TTEs from the TSB or running the post- - * relocator after relocating the TSB's memory. Unfortunately, we - * can't free memory while holding the HAT lock or we could - * deadlock, so we build a list of TSBs to be freed after marking - * the tsbinfos as swapped out and free them after dropping the - * lock. - */ - if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { - sfmmu_hat_exit(hatlockp); - return; - } - - SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); - last = freelist = NULL; - for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; - tsbinfop = tsbinfop->tsb_next) { - ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); - - /* - * Cast the TSB into a struct free_tsb and put it on the free - * list. - */ - if (freelist == NULL) { - last = freelist = (struct free_tsb *)tsbinfop->tsb_va; - } else { - last->next = (struct free_tsb *)tsbinfop->tsb_va; - last = last->next; - } - last->next = NULL; - last->tsbinfop = tsbinfop; - tsbinfop->tsb_flags |= TSB_SWAPPED; - /* - * Zero out the TTE to clear the valid bit. - * Note we can't use a value like 0xbad because we want to - * ensure diagnostic bits are NEVER set on TTEs that might - * be loaded. The intent is to catch any invalid access - * to the swapped TSB, such as a thread running with a valid - * context without first calling sfmmu_tsb_swapin() to - * allocate TSB memory. - */ - tsbinfop->tsb_tte.ll = 0; - } - - /* Now we can drop the lock and free the TSB memory. */ - sfmmu_hat_exit(hatlockp); - for (; freelist != NULL; freelist = next) { - next = freelist->next; - sfmmu_tsb_free(freelist->tsbinfop); - } - } - - /* * Duplicate the translations of an as into another newas */ /* ARGSUSED */ int hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, --- 1974,1983 ----
*** 9994,10004 **** } /* * Replace the specified TSB with a new TSB. This function gets called when ! * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB * (8K). * * Caller must hold the HAT lock, but should assume any tsb_info * pointers it has are no longer valid after calling this function. --- 9847,9857 ---- } /* * Replace the specified TSB with a new TSB. This function gets called when ! * we grow, or shrink a TSB. When swapping in a TSB (TSB_SWAPIN), the * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB * (8K). * * Caller must hold the HAT lock, but should assume any tsb_info * pointers it has are no longer valid after calling this function.