286 /*
287 * This code makes htable_steal() easier to test. By setting
288 * force_steal we force pagetable allocations to fall
289 * into the stealing code. Roughly 1 in ever "force_steal"
290 * page table allocations will fail.
291 */
292 if (proc_pageout != NULL && force_steal > 1 &&
293 ++ptable_cnt > force_steal) {
294 ptable_cnt = 0;
295 return (PFN_INVALID);
296 }
297 #endif /* DEBUG */
298
299 pp = page_get_physical(seed);
300 if (pp == NULL)
301 return (PFN_INVALID);
302 ASSERT(PAGE_SHARED(pp));
303 pfn = pp->p_pagenum;
304 if (pfn == PFN_INVALID)
305 panic("ptable_alloc(): Invalid PFN!!");
306 atomic_add_32(&active_ptables, 1);
307 HATSTAT_INC(hs_ptable_allocs);
308 return (pfn);
309 }
310
311 /*
312 * Free an htable's associated page table page. See the comments
313 * for ptable_alloc().
314 */
315 static void
316 ptable_free(pfn_t pfn)
317 {
318 page_t *pp = page_numtopp_nolock(pfn);
319
320 /*
321 * need to destroy the page used for the pagetable
322 */
323 ASSERT(pfn != PFN_INVALID);
324 HATSTAT_INC(hs_ptable_frees);
325 atomic_add_32(&active_ptables, -1);
326 if (pp == NULL)
327 panic("ptable_free(): no page for pfn!");
328 ASSERT(PAGE_SHARED(pp));
329 ASSERT(pfn == pp->p_pagenum);
330 ASSERT(!IN_XPV_PANIC());
331
332 /*
333 * Get an exclusive lock, might have to wait for a kmem reader.
334 */
335 if (!page_tryupgrade(pp)) {
336 u_offset_t off = pp->p_offset;
337 page_unlock(pp);
338 pp = page_lookup(&kvp, off, SE_EXCL);
339 if (pp == NULL)
340 panic("page not found");
341 }
342 #ifdef __xpv
343 if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0)
344 panic("failure making kpm r/w pfn=0x%lx", pfn);
345 #endif
443 static uint_t h_seed = 0;
444 uint_t e;
445 uintptr_t va;
446 x86pte_t pte;
447 uint_t stolen = 0;
448 uint_t pass;
449 uint_t threshold;
450
451 /*
452 * Limit htable_steal_passes to something reasonable
453 */
454 if (htable_steal_passes == 0)
455 htable_steal_passes = 1;
456 if (htable_steal_passes > mmu.ptes_per_table)
457 htable_steal_passes = mmu.ptes_per_table;
458
459 /*
460 * Loop through all user hats. The 1st pass takes cached htables that
461 * aren't in use. The later passes steal by removing mappings, too.
462 */
463 atomic_add_32(&htable_dont_cache, 1);
464 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) {
465 threshold = pass * mmu.ptes_per_table / htable_steal_passes;
466 hat = kas.a_hat;
467 for (;;) {
468
469 /*
470 * Clear the victim flag and move to next hat
471 */
472 mutex_enter(&hat_list_lock);
473 if (hat != kas.a_hat) {
474 hat->hat_flags &= ~HAT_VICTIM;
475 cv_broadcast(&hat_list_cv);
476 }
477 hat = hat->hat_next;
478
479 /*
480 * Skip any hat that is already being stolen from.
481 *
482 * We skip SHARED hats, as these are dummy
483 * hats that host ISM shared page tables.
652
653 /*
654 * Break to outer loop to release the
655 * higher (ht_parent) pagetable. This
656 * spreads out the pain caused by
657 * pagefaults.
658 */
659 ht->ht_next = list;
660 list = ht;
661 ++stolen;
662 break;
663 }
664 HTABLE_EXIT(h);
665 if (higher != NULL)
666 htable_release(higher);
667 if (++h == hat->hat_num_hash)
668 h = 0;
669 } while (stolen < cnt && h != h_start);
670 }
671 }
672 atomic_add_32(&htable_dont_cache, -1);
673 return (list);
674 }
675
676 /*
677 * This is invoked from kmem when the system is low on memory. We try
678 * to free hments, htables, and ptables to improve the memory situation.
679 */
680 /*ARGSUSED*/
681 static void
682 htable_reap(void *handle)
683 {
684 uint_t reap_cnt;
685 htable_t *list;
686 htable_t *ht;
687
688 HATSTAT_INC(hs_reap_attempts);
689 if (!can_steal_post_boot)
690 return;
691
692 /*
968 }
969
970
971 /*
972 * This is called when a hat is being destroyed or swapped out. We reap all
973 * the remaining htables in the hat cache. If destroying all left over
974 * htables are also destroyed.
975 *
976 * We also don't need to invalidate any of the PTPs nor do any demapping.
977 */
978 void
979 htable_purge_hat(hat_t *hat)
980 {
981 htable_t *ht;
982 int h;
983
984 /*
985 * Purge the htable cache if just reaping.
986 */
987 if (!(hat->hat_flags & HAT_FREEING)) {
988 atomic_add_32(&htable_dont_cache, 1);
989 for (;;) {
990 hat_enter(hat);
991 ht = hat->hat_ht_cached;
992 if (ht == NULL) {
993 hat_exit(hat);
994 break;
995 }
996 hat->hat_ht_cached = ht->ht_next;
997 hat_exit(hat);
998 htable_free(ht);
999 }
1000 atomic_add_32(&htable_dont_cache, -1);
1001 return;
1002 }
1003
1004 /*
1005 * if freeing, no locking is needed
1006 */
1007 while ((ht = hat->hat_ht_cached) != NULL) {
1008 hat->hat_ht_cached = ht->ht_next;
1009 htable_free(ht);
1010 }
1011
1012 /*
1013 * walk thru the htable hash table and free all the htables in it.
1014 */
1015 for (h = 0; h < hat->hat_num_hash; ++h) {
1016 while ((ht = hat->hat_ht_hash[h]) != NULL) {
1017 if (ht->ht_next)
1018 ht->ht_next->ht_prev = ht->ht_prev;
1019
1020 if (ht->ht_prev) {
|
286 /*
287 * This code makes htable_steal() easier to test. By setting
288 * force_steal we force pagetable allocations to fall
289 * into the stealing code. Roughly 1 in ever "force_steal"
290 * page table allocations will fail.
291 */
292 if (proc_pageout != NULL && force_steal > 1 &&
293 ++ptable_cnt > force_steal) {
294 ptable_cnt = 0;
295 return (PFN_INVALID);
296 }
297 #endif /* DEBUG */
298
299 pp = page_get_physical(seed);
300 if (pp == NULL)
301 return (PFN_INVALID);
302 ASSERT(PAGE_SHARED(pp));
303 pfn = pp->p_pagenum;
304 if (pfn == PFN_INVALID)
305 panic("ptable_alloc(): Invalid PFN!!");
306 atomic_inc_32(&active_ptables);
307 HATSTAT_INC(hs_ptable_allocs);
308 return (pfn);
309 }
310
311 /*
312 * Free an htable's associated page table page. See the comments
313 * for ptable_alloc().
314 */
315 static void
316 ptable_free(pfn_t pfn)
317 {
318 page_t *pp = page_numtopp_nolock(pfn);
319
320 /*
321 * need to destroy the page used for the pagetable
322 */
323 ASSERT(pfn != PFN_INVALID);
324 HATSTAT_INC(hs_ptable_frees);
325 atomic_dec_32(&active_ptables);
326 if (pp == NULL)
327 panic("ptable_free(): no page for pfn!");
328 ASSERT(PAGE_SHARED(pp));
329 ASSERT(pfn == pp->p_pagenum);
330 ASSERT(!IN_XPV_PANIC());
331
332 /*
333 * Get an exclusive lock, might have to wait for a kmem reader.
334 */
335 if (!page_tryupgrade(pp)) {
336 u_offset_t off = pp->p_offset;
337 page_unlock(pp);
338 pp = page_lookup(&kvp, off, SE_EXCL);
339 if (pp == NULL)
340 panic("page not found");
341 }
342 #ifdef __xpv
343 if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0)
344 panic("failure making kpm r/w pfn=0x%lx", pfn);
345 #endif
443 static uint_t h_seed = 0;
444 uint_t e;
445 uintptr_t va;
446 x86pte_t pte;
447 uint_t stolen = 0;
448 uint_t pass;
449 uint_t threshold;
450
451 /*
452 * Limit htable_steal_passes to something reasonable
453 */
454 if (htable_steal_passes == 0)
455 htable_steal_passes = 1;
456 if (htable_steal_passes > mmu.ptes_per_table)
457 htable_steal_passes = mmu.ptes_per_table;
458
459 /*
460 * Loop through all user hats. The 1st pass takes cached htables that
461 * aren't in use. The later passes steal by removing mappings, too.
462 */
463 atomic_inc_32(&htable_dont_cache);
464 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) {
465 threshold = pass * mmu.ptes_per_table / htable_steal_passes;
466 hat = kas.a_hat;
467 for (;;) {
468
469 /*
470 * Clear the victim flag and move to next hat
471 */
472 mutex_enter(&hat_list_lock);
473 if (hat != kas.a_hat) {
474 hat->hat_flags &= ~HAT_VICTIM;
475 cv_broadcast(&hat_list_cv);
476 }
477 hat = hat->hat_next;
478
479 /*
480 * Skip any hat that is already being stolen from.
481 *
482 * We skip SHARED hats, as these are dummy
483 * hats that host ISM shared page tables.
652
653 /*
654 * Break to outer loop to release the
655 * higher (ht_parent) pagetable. This
656 * spreads out the pain caused by
657 * pagefaults.
658 */
659 ht->ht_next = list;
660 list = ht;
661 ++stolen;
662 break;
663 }
664 HTABLE_EXIT(h);
665 if (higher != NULL)
666 htable_release(higher);
667 if (++h == hat->hat_num_hash)
668 h = 0;
669 } while (stolen < cnt && h != h_start);
670 }
671 }
672 atomic_dec_32(&htable_dont_cache);
673 return (list);
674 }
675
676 /*
677 * This is invoked from kmem when the system is low on memory. We try
678 * to free hments, htables, and ptables to improve the memory situation.
679 */
680 /*ARGSUSED*/
681 static void
682 htable_reap(void *handle)
683 {
684 uint_t reap_cnt;
685 htable_t *list;
686 htable_t *ht;
687
688 HATSTAT_INC(hs_reap_attempts);
689 if (!can_steal_post_boot)
690 return;
691
692 /*
968 }
969
970
971 /*
972 * This is called when a hat is being destroyed or swapped out. We reap all
973 * the remaining htables in the hat cache. If destroying all left over
974 * htables are also destroyed.
975 *
976 * We also don't need to invalidate any of the PTPs nor do any demapping.
977 */
978 void
979 htable_purge_hat(hat_t *hat)
980 {
981 htable_t *ht;
982 int h;
983
984 /*
985 * Purge the htable cache if just reaping.
986 */
987 if (!(hat->hat_flags & HAT_FREEING)) {
988 atomic_inc_32(&htable_dont_cache);
989 for (;;) {
990 hat_enter(hat);
991 ht = hat->hat_ht_cached;
992 if (ht == NULL) {
993 hat_exit(hat);
994 break;
995 }
996 hat->hat_ht_cached = ht->ht_next;
997 hat_exit(hat);
998 htable_free(ht);
999 }
1000 atomic_dec_32(&htable_dont_cache);
1001 return;
1002 }
1003
1004 /*
1005 * if freeing, no locking is needed
1006 */
1007 while ((ht = hat->hat_ht_cached) != NULL) {
1008 hat->hat_ht_cached = ht->ht_next;
1009 htable_free(ht);
1010 }
1011
1012 /*
1013 * walk thru the htable hash table and free all the htables in it.
1014 */
1015 for (h = 0; h < hat->hat_num_hash; ++h) {
1016 while ((ht = hat->hat_ht_hash[h]) != NULL) {
1017 if (ht->ht_next)
1018 ht->ht_next->ht_prev = ht->ht_prev;
1019
1020 if (ht->ht_prev) {
|