Print this page
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/hat_i86.c
          +++ new/usr/src/uts/i86pc/vm/hat_i86.c
↓ open down ↓ 252 lines elided ↑ open up ↑
 253  253          uint_t                  cnt;
 254  254          htable_t                *src;
 255  255  
 256  256          /*
 257  257           * Once we start creating user process HATs we can enable
 258  258           * the htable_steal() code.
 259  259           */
 260  260          if (can_steal_post_boot == 0)
 261  261                  can_steal_post_boot = 1;
 262  262  
 263      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
      263 +        ASSERT(AS_WRITE_HELD(as));
 264  264          hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
 265  265          hat->hat_as = as;
 266  266          mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 267  267          ASSERT(hat->hat_flags == 0);
 268  268  
 269  269  #if defined(__xpv)
 270  270          /*
 271  271           * No VLP stuff on the hypervisor due to the 64-bit split top level
 272  272           * page tables.  On 32-bit it's not needed as the hypervisor takes
 273  273           * care of copying the top level PTEs to a below 4Gig page.
↓ open down ↓ 112 lines elided ↑ open up ↑
 386  386          return (hat);
 387  387  }
 388  388  
 389  389  /*
 390  390   * process has finished executing but as has not been cleaned up yet.
 391  391   */
 392  392  /*ARGSUSED*/
 393  393  void
 394  394  hat_free_start(hat_t *hat)
 395  395  {
 396      -        ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
      396 +        ASSERT(AS_WRITE_HELD(hat->hat_as));
 397  397  
 398  398          /*
 399  399           * If the hat is currently a stealing victim, wait for the stealing
 400  400           * to finish.  Once we mark it as HAT_FREEING, htable_steal()
 401  401           * won't look at its pagetables anymore.
 402  402           */
 403  403          mutex_enter(&hat_list_lock);
 404  404          while (hat->hat_flags & HAT_VICTIM)
 405  405                  cv_wait(&hat_list_cv, &hat_list_lock);
 406  406          hat->hat_flags |= HAT_FREEING;
↓ open down ↓ 312 lines elided ↑ open up ↑
 719  719                  vlp_hash_cache = hat_hash_cache;
 720  720          } else {
 721  721                  vlp_hash_cache = kmem_cache_create("HatVlpHash",
 722  722                      mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 723  723                      NULL, 0, 0);
 724  724          }
 725  725  
 726  726          /*
 727  727           * Set up the kernel's hat
 728  728           */
 729      -        AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
      729 +        AS_LOCK_ENTER(&kas, RW_WRITER);
 730  730          kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
 731  731          mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 732  732          kas.a_hat->hat_as = &kas;
 733  733          kas.a_hat->hat_flags = 0;
 734      -        AS_LOCK_EXIT(&kas, &kas.a_lock);
      734 +        AS_LOCK_EXIT(&kas);
 735  735  
 736  736          CPUSET_ZERO(khat_cpuset);
 737  737          CPUSET_ADD(khat_cpuset, CPU->cpu_id);
 738  738  
 739  739          /*
 740  740           * The kernel hat's next pointer serves as the head of the hat list .
 741  741           * The kernel hat's prev pointer tracks the last hat on the list for
 742  742           * htable_steal() to use.
 743  743           */
 744  744          kas.a_hat->hat_next = NULL;
↓ open down ↓ 405 lines elided ↑ open up ↑
1150 1150          /*
1151 1151           * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1152 1152           * seg_spt and shared pagetables can't be swapped out.
1153 1153           * Take a look at segspt_shmswapout() - it's a big no-op.
1154 1154           *
1155 1155           * Instead we'll walk through all the address space and unload
1156 1156           * any mappings which we are sure are not shared, not locked.
1157 1157           */
1158 1158          ASSERT(IS_PAGEALIGNED(vaddr));
1159 1159          ASSERT(IS_PAGEALIGNED(eaddr));
1160      -        ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     1160 +        ASSERT(AS_LOCK_HELD(hat->hat_as));
1161 1161          if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1162 1162                  eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1163 1163  
1164 1164          while (vaddr < eaddr) {
1165 1165                  (void) htable_walk(hat, &ht, &vaddr, eaddr);
1166 1166                  if (ht == NULL)
1167 1167                          break;
1168 1168  
1169 1169                  ASSERT(!IN_VA_HOLE(vaddr));
1170 1170  
↓ open down ↓ 260 lines elided ↑ open up ↑
1431 1431          x86pte_t        pte;
1432 1432          int             rv = 0;
1433 1433  
1434 1434          /*
1435 1435           * The number 16 is arbitrary and here to catch a recursion problem
1436 1436           * early before we blow out the kernel stack.
1437 1437           */
1438 1438          ++curthread->t_hatdepth;
1439 1439          ASSERT(curthread->t_hatdepth < 16);
1440 1440  
1441      -        ASSERT(hat == kas.a_hat ||
1442      -            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     1441 +        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1443 1442  
1444 1443          if (flags & HAT_LOAD_SHARE)
1445 1444                  hat->hat_flags |= HAT_SHARED;
1446 1445  
1447 1446          /*
1448 1447           * Find the page table that maps this page if it already exists.
1449 1448           */
1450 1449          ht = htable_lookup(hat, va, level);
1451 1450  
1452 1451          /*
↓ open down ↓ 127 lines elided ↑ open up ↑
1580 1579          uint_t          attr,
1581 1580          uint_t          flags)
1582 1581  {
1583 1582          uintptr_t       va = (uintptr_t)addr;
1584 1583          level_t         level = 0;
1585 1584          pfn_t           pfn = page_pptonum(pp);
1586 1585  
1587 1586          XPV_DISALLOW_MIGRATE();
1588 1587          ASSERT(IS_PAGEALIGNED(va));
1589 1588          ASSERT(hat == kas.a_hat || va < _userlimit);
1590      -        ASSERT(hat == kas.a_hat ||
1591      -            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     1589 +        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1592 1590          ASSERT((flags & supported_memload_flags) == flags);
1593 1591  
1594 1592          ASSERT(!IN_VA_HOLE(va));
1595 1593          ASSERT(!PP_ISFREE(pp));
1596 1594  
1597 1595          /*
1598 1596           * kernel address special case for performance.
1599 1597           */
1600 1598          if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1601 1599                  ASSERT(hat == kas.a_hat);
↓ open down ↓ 36 lines elided ↑ open up ↑
1638 1636          uintptr_t       eaddr = va + len;
1639 1637          level_t         level;
1640 1638          size_t          pgsize;
1641 1639          pgcnt_t         pgindx = 0;
1642 1640          pfn_t           pfn;
1643 1641          pgcnt_t         i;
1644 1642  
1645 1643          XPV_DISALLOW_MIGRATE();
1646 1644          ASSERT(IS_PAGEALIGNED(va));
1647 1645          ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1648      -        ASSERT(hat == kas.a_hat ||
1649      -            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     1646 +        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1650 1647          ASSERT((flags & supported_memload_flags) == flags);
1651 1648  
1652 1649          /*
1653 1650           * memload is used for memory with full caching enabled, so
1654 1651           * set HAT_STORECACHING_OK.
1655 1652           */
1656 1653          attr |= HAT_STORECACHING_OK;
1657 1654  
1658 1655          /*
1659 1656           * handle all pages using largest possible pagesize
↓ open down ↓ 114 lines elided ↑ open up ↑
1774 1771          uintptr_t       eva = va + len;
1775 1772          level_t         level;
1776 1773          size_t          pgsize;
1777 1774          page_t          *pp;
1778 1775          int             f;      /* per PTE copy of flags  - maybe modified */
1779 1776          uint_t          a;      /* per PTE copy of attr */
1780 1777  
1781 1778          XPV_DISALLOW_MIGRATE();
1782 1779          ASSERT(IS_PAGEALIGNED(va));
1783 1780          ASSERT(hat == kas.a_hat || eva <= _userlimit);
1784      -        ASSERT(hat == kas.a_hat ||
1785      -            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     1781 +        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1786 1782          ASSERT((flags & supported_devload_flags) == flags);
1787 1783  
1788 1784          /*
1789 1785           * handle all pages
1790 1786           */
1791 1787          while (va < eva) {
1792 1788  
1793 1789                  /*
1794 1790                   * decide what level mapping to use (ie. pagesize)
1795 1791                   */
↓ open down ↓ 87 lines elided ↑ open up ↑
1883 1879           */
1884 1880          ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1885 1881          ASSERT(IS_PAGEALIGNED(vaddr));
1886 1882          ASSERT(IS_PAGEALIGNED(eaddr));
1887 1883          if (hat == kas.a_hat)
1888 1884                  return;
1889 1885          if (eaddr > _userlimit)
1890 1886                  panic("hat_unlock() address out of range - above _userlimit");
1891 1887  
1892 1888          XPV_DISALLOW_MIGRATE();
1893      -        ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     1889 +        ASSERT(AS_LOCK_HELD(hat->hat_as));
1894 1890          while (vaddr < eaddr) {
1895 1891                  (void) htable_walk(hat, &ht, &vaddr, eaddr);
1896 1892                  if (ht == NULL)
1897 1893                          break;
1898 1894  
1899 1895                  ASSERT(!IN_VA_HOLE(vaddr));
1900 1896  
1901 1897                  if (ht->ht_lock_cnt < 1)
1902 1898                          panic("hat_unlock(): lock_cnt < 1, "
1903 1899                              "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
↓ open down ↓ 734 lines elided ↑ open up ↑
2638 2634          uintptr_t       vaddr = (uintptr_t)addr;
2639 2635          uintptr_t       eaddr = (uintptr_t)addr + len;
2640 2636          htable_t        *ht = NULL;
2641 2637          uint_t          entry;
2642 2638          x86pte_t        oldpte, newpte;
2643 2639          page_t          *pp;
2644 2640  
2645 2641          XPV_DISALLOW_MIGRATE();
2646 2642          ASSERT(IS_PAGEALIGNED(vaddr));
2647 2643          ASSERT(IS_PAGEALIGNED(eaddr));
2648      -        ASSERT(hat == kas.a_hat ||
2649      -            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     2644 +        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2650 2645          for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2651 2646  try_again:
2652 2647                  oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2653 2648                  if (ht == NULL)
2654 2649                          break;
2655 2650                  if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2656 2651                          continue;
2657 2652  
2658 2653                  pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2659 2654                  if (pp == NULL)
↓ open down ↓ 189 lines elided ↑ open up ↑
2849 2844   */
2850 2845  int
2851 2846  hat_probe(hat_t *hat, caddr_t addr)
2852 2847  {
2853 2848          uintptr_t       vaddr = ALIGN2PAGE(addr);
2854 2849          uint_t          entry;
2855 2850          htable_t        *ht;
2856 2851          pgcnt_t         pg_off;
2857 2852  
2858 2853          ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2859      -        ASSERT(hat == kas.a_hat ||
2860      -            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
     2854 +        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2861 2855          if (IN_VA_HOLE(vaddr))
2862 2856                  return (0);
2863 2857  
2864 2858          /*
2865 2859           * Most common use of hat_probe is from segmap. We special case it
2866 2860           * for performance.
2867 2861           */
2868 2862          if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2869 2863                  pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2870 2864                  if (mmu.pae_hat)
↓ open down ↓ 1628 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX