Print this page
patch as-lock-macro-simplification

@@ -258,11 +258,11 @@
          * the htable_steal() code.
          */
         if (can_steal_post_boot == 0)
                 can_steal_post_boot = 1;
 
-        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(AS_WRITE_HELD(as));
         hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
         hat->hat_as = as;
         mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
         ASSERT(hat->hat_flags == 0);
 

@@ -391,11 +391,11 @@
  */
 /*ARGSUSED*/
 void
 hat_free_start(hat_t *hat)
 {
-        ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(AS_WRITE_HELD(hat->hat_as));
 
         /*
          * If the hat is currently a stealing victim, wait for the stealing
          * to finish.  Once we mark it as HAT_FREEING, htable_steal()
          * won't look at its pagetables anymore.

@@ -724,16 +724,16 @@
         }
 
         /*
          * Set up the kernel's hat
          */
-        AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
+        AS_LOCK_ENTER(&kas, RW_WRITER);
         kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
         mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
         kas.a_hat->hat_as = &kas;
         kas.a_hat->hat_flags = 0;
-        AS_LOCK_EXIT(&kas, &kas.a_lock);
+        AS_LOCK_EXIT(&kas);
 
         CPUSET_ZERO(khat_cpuset);
         CPUSET_ADD(khat_cpuset, CPU->cpu_id);
 
         /*

@@ -1155,11 +1155,11 @@
          * Instead we'll walk through all the address space and unload
          * any mappings which we are sure are not shared, not locked.
          */
         ASSERT(IS_PAGEALIGNED(vaddr));
         ASSERT(IS_PAGEALIGNED(eaddr));
-        ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(AS_LOCK_HELD(hat->hat_as));
         if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
                 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
 
         while (vaddr < eaddr) {
                 (void) htable_walk(hat, &ht, &vaddr, eaddr);

@@ -1436,12 +1436,11 @@
          * early before we blow out the kernel stack.
          */
         ++curthread->t_hatdepth;
         ASSERT(curthread->t_hatdepth < 16);
 
-        ASSERT(hat == kas.a_hat ||
-            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
 
         if (flags & HAT_LOAD_SHARE)
                 hat->hat_flags |= HAT_SHARED;
 
         /*

@@ -1585,12 +1584,11 @@
         pfn_t           pfn = page_pptonum(pp);
 
         XPV_DISALLOW_MIGRATE();
         ASSERT(IS_PAGEALIGNED(va));
         ASSERT(hat == kas.a_hat || va < _userlimit);
-        ASSERT(hat == kas.a_hat ||
-            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
         ASSERT((flags & supported_memload_flags) == flags);
 
         ASSERT(!IN_VA_HOLE(va));
         ASSERT(!PP_ISFREE(pp));
 

@@ -1643,12 +1641,11 @@
         pgcnt_t         i;
 
         XPV_DISALLOW_MIGRATE();
         ASSERT(IS_PAGEALIGNED(va));
         ASSERT(hat == kas.a_hat || va + len <= _userlimit);
-        ASSERT(hat == kas.a_hat ||
-            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
         ASSERT((flags & supported_memload_flags) == flags);
 
         /*
          * memload is used for memory with full caching enabled, so
          * set HAT_STORECACHING_OK.

@@ -1779,12 +1776,11 @@
         uint_t          a;      /* per PTE copy of attr */
 
         XPV_DISALLOW_MIGRATE();
         ASSERT(IS_PAGEALIGNED(va));
         ASSERT(hat == kas.a_hat || eva <= _userlimit);
-        ASSERT(hat == kas.a_hat ||
-            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
         ASSERT((flags & supported_devload_flags) == flags);
 
         /*
          * handle all pages
          */

@@ -1888,11 +1884,11 @@
                 return;
         if (eaddr > _userlimit)
                 panic("hat_unlock() address out of range - above _userlimit");
 
         XPV_DISALLOW_MIGRATE();
-        ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(AS_LOCK_HELD(hat->hat_as));
         while (vaddr < eaddr) {
                 (void) htable_walk(hat, &ht, &vaddr, eaddr);
                 if (ht == NULL)
                         break;
 

@@ -2643,12 +2639,11 @@
         page_t          *pp;
 
         XPV_DISALLOW_MIGRATE();
         ASSERT(IS_PAGEALIGNED(vaddr));
         ASSERT(IS_PAGEALIGNED(eaddr));
-        ASSERT(hat == kas.a_hat ||
-            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
         for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
 try_again:
                 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
                 if (ht == NULL)
                         break;

@@ -2854,12 +2849,11 @@
         uint_t          entry;
         htable_t        *ht;
         pgcnt_t         pg_off;
 
         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
-        ASSERT(hat == kas.a_hat ||
-            AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
+        ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
         if (IN_VA_HOLE(vaddr))
                 return (0);
 
         /*
          * Most common use of hat_probe is from segmap. We special case it