Print this page
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
↓ open down ↓ 1310 lines elided ↑ open up ↑
1311 1311              sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1312 1312              NULL, NULL, static_arena, KMC_NOHASH);
1313 1313  
1314 1314          ism_ment_cache = kmem_cache_create("ism_ment_cache",
1315 1315              sizeof (ism_ment_t), 0, NULL, NULL,
1316 1316              NULL, NULL, NULL, 0);
1317 1317  
1318 1318          /*
1319 1319           * We grab the first hat for the kernel,
1320 1320           */
1321      -        AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
     1321 +        AS_LOCK_ENTER(&kas, RW_WRITER);
1322 1322          kas.a_hat = hat_alloc(&kas);
1323      -        AS_LOCK_EXIT(&kas, &kas.a_lock);
     1323 +        AS_LOCK_EXIT(&kas);
1324 1324  
1325 1325          /*
1326 1326           * Initialize hblk_reserve.
1327 1327           */
1328 1328          ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1329 1329              va_to_pa((caddr_t)hblk_reserve);
1330 1330  
1331 1331  #ifndef UTSB_PHYS
1332 1332          /*
1333 1333           * Reserve some kernel virtual address space for the locked TTEs
↓ open down ↓ 122 lines elided ↑ open up ↑
1456 1456   * Called when an address space first uses a hat.
1457 1457   */
1458 1458  struct hat *
1459 1459  hat_alloc(struct as *as)
1460 1460  {
1461 1461          sfmmu_t *sfmmup;
1462 1462          int i;
1463 1463          uint64_t cnum;
1464 1464          extern uint_t get_color_start(struct as *);
1465 1465  
1466      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     1466 +        ASSERT(AS_WRITE_HELD(as));
1467 1467          sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1468 1468          sfmmup->sfmmu_as = as;
1469 1469          sfmmup->sfmmu_flags = 0;
1470 1470          sfmmup->sfmmu_tteflags = 0;
1471 1471          sfmmup->sfmmu_rtteflags = 0;
1472 1472          LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1473 1473  
1474 1474          if (as == &kas) {
1475 1475                  ksfmmup = sfmmup;
1476 1476                  sfmmup->sfmmu_cext = 0;
↓ open down ↓ 433 lines elided ↑ open up ↑
1910 1910          }
1911 1911  }
1912 1912  
1913 1913  /*
1914 1914   * Free all the translation resources for the specified address space.
1915 1915   * Called from as_free when an address space is being destroyed.
1916 1916   */
1917 1917  void
1918 1918  hat_free_start(struct hat *sfmmup)
1919 1919  {
1920      -        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     1920 +        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
1921 1921          ASSERT(sfmmup != ksfmmup);
1922 1922          ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1923 1923  
1924 1924          sfmmup->sfmmu_free = 1;
1925 1925          if (sfmmup->sfmmu_scdp != NULL) {
1926 1926                  sfmmu_leave_scd(sfmmup, 0);
1927 1927          }
1928 1928  
1929 1929          ASSERT(sfmmup->sfmmu_scdp == NULL);
1930 1930  }
↓ open down ↓ 309 lines elided ↑ open up ↑
2240 2240                      (void *)pp);
2241 2241          }
2242 2242  
2243 2243          if (hat->sfmmu_xhat_provider) {
2244 2244                  /* no regions for xhats */
2245 2245                  ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2246 2246                  XHAT_MEMLOAD(hat, addr, pp, attr, flags);
2247 2247                  return;
2248 2248          }
2249 2249  
2250      -        ASSERT((hat == ksfmmup) ||
2251      -            AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
     2250 +        ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2252 2251  
2253 2252          if (flags & ~SFMMU_LOAD_ALLFLAG)
2254 2253                  cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2255 2254                      flags & ~SFMMU_LOAD_ALLFLAG);
2256 2255  
2257 2256          if (hat->sfmmu_rmstat)
2258 2257                  hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2259 2258  
2260 2259  #if defined(SF_ERRATA_57)
2261 2260          if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
↓ open down ↓ 34 lines elided ↑ open up ↑
2296 2295  
2297 2296          ASSERT(hat != NULL);
2298 2297  
2299 2298          if (hat->sfmmu_xhat_provider) {
2300 2299                  XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
2301 2300                  return;
2302 2301          }
2303 2302  
2304 2303          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2305 2304          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2306      -        ASSERT((hat == ksfmmup) ||
2307      -            AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
     2305 +        ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2308 2306          if (len == 0)
2309 2307                  panic("hat_devload: zero len");
2310 2308          if (flags & ~SFMMU_LOAD_ALLFLAG)
2311 2309                  cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2312 2310                      flags & ~SFMMU_LOAD_ALLFLAG);
2313 2311  
2314 2312  #if defined(SF_ERRATA_57)
2315 2313          if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2316 2314              (addr < errata57_limit) && (attr & PROT_EXEC) &&
2317 2315              !(flags & HAT_LOAD_SHARE)) {
↓ open down ↓ 1648 lines elided ↑ open up ↑
3966 3964  {
3967 3965          struct hmehash_bucket *hmebp;
3968 3966          hmeblk_tag hblktag;
3969 3967          int hmeshift, hashno = 1;
3970 3968          struct hme_blk *hmeblkp, *list = NULL;
3971 3969          caddr_t endaddr;
3972 3970  
3973 3971          ASSERT(sfmmup != NULL);
3974 3972          ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3975 3973  
3976      -        ASSERT((sfmmup == ksfmmup) ||
3977      -            AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     3974 +        ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
3978 3975          ASSERT((len & MMU_PAGEOFFSET) == 0);
3979 3976          endaddr = addr + len;
3980 3977          hblktag.htag_id = sfmmup;
3981 3978          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3982 3979  
3983 3980          /*
3984 3981           * Spitfire supports 4 page sizes.
3985 3982           * Most pages are expected to be of the smallest page size (8K) and
3986 3983           * these will not need to be rehashed. 64K pages also don't need to be
3987 3984           * rehashed because an hmeblk spans 64K of address space. 512K pages
↓ open down ↓ 775 lines elided ↑ open up ↑
4763 4760   */
4764 4761  int
4765 4762  hat_probe(struct hat *sfmmup, caddr_t addr)
4766 4763  {
4767 4764          pfn_t pfn;
4768 4765          tte_t tte;
4769 4766  
4770 4767          ASSERT(sfmmup != NULL);
4771 4768          ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4772 4769  
4773      -        ASSERT((sfmmup == ksfmmup) ||
4774      -            AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     4770 +        ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4775 4771  
4776 4772          if (sfmmup == ksfmmup) {
4777 4773                  while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4778 4774                      == PFN_SUSPENDED) {
4779 4775                          sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4780 4776                  }
4781 4777          } else {
4782 4778                  pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4783 4779          }
4784 4780  
↓ open down ↓ 128 lines elided ↑ open up ↑
4913 4909          struct hmehash_bucket *hmebp;
4914 4910          hmeblk_tag hblktag;
4915 4911          int hmeshift, hashno = 1;
4916 4912          struct hme_blk *hmeblkp, *list = NULL;
4917 4913          caddr_t endaddr;
4918 4914          cpuset_t cpuset;
4919 4915          demap_range_t dmr;
4920 4916  
4921 4917          CPUSET_ZERO(cpuset);
4922 4918  
4923      -        ASSERT((sfmmup == ksfmmup) ||
4924      -            AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     4919 +        ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4925 4920          ASSERT((len & MMU_PAGEOFFSET) == 0);
4926 4921          ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4927 4922  
4928 4923          if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4929 4924              ((addr + len) > (caddr_t)USERLIMIT)) {
4930 4925                  panic("user addr %p in kernel space",
4931 4926                      (void *)addr);
4932 4927          }
4933 4928  
4934 4929          endaddr = addr + len;
↓ open down ↓ 772 lines elided ↑ open up ↑
5707 5702                   * XHATs attached, unload the mappings for all of them,
5708 5703                   * just in case
5709 5704                   */
5710 5705                  ASSERT(sfmmup->sfmmu_as != NULL);
5711 5706                  if (sfmmup->sfmmu_as->a_xhat != NULL)
5712 5707                          xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
5713 5708                              len, flags, callback);
5714 5709          }
5715 5710  
5716 5711          ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5717      -            AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     5712 +            AS_LOCK_HELD(sfmmup->sfmmu_as));
5718 5713  
5719 5714          ASSERT(sfmmup != NULL);
5720 5715          ASSERT((len & MMU_PAGEOFFSET) == 0);
5721 5716          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5722 5717  
5723 5718          /*
5724 5719           * Probing through a large VA range (say 63 bits) will be slow, even
5725 5720           * at 4 Meg steps between the probes. So, when the virtual address range
5726 5721           * is very large, search the HME entries for what to unload.
5727 5722           *
↓ open down ↓ 597 lines elided ↑ open up ↑
6325 6320  hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6326 6321  {
6327 6322          struct hmehash_bucket *hmebp;
6328 6323          hmeblk_tag hblktag;
6329 6324          int hmeshift, hashno = 1;
6330 6325          struct hme_blk *hmeblkp, *list = NULL;
6331 6326          caddr_t endaddr;
6332 6327          cpuset_t cpuset;
6333 6328  
6334 6329          ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
6335      -        ASSERT((sfmmup == ksfmmup) ||
6336      -            AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     6330 +        ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
6337 6331          ASSERT((len & MMU_PAGEOFFSET) == 0);
6338 6332          ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6339 6333              (clearflag == HAT_SYNC_ZERORM));
6340 6334  
6341 6335          CPUSET_ZERO(cpuset);
6342 6336  
6343 6337          endaddr = addr + len;
6344 6338          hblktag.htag_id = sfmmup;
6345 6339          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6346 6340  
↓ open down ↓ 1622 lines elided ↑ open up ↑
7969 7963   * Returns PFN_INVALID to indicate an invalid mapping
7970 7964   */
7971 7965  pfn_t
7972 7966  hat_getpfnum(struct hat *hat, caddr_t addr)
7973 7967  {
7974 7968          pfn_t pfn;
7975 7969          tte_t tte;
7976 7970  
7977 7971          /*
7978 7972           * We would like to
7979      -         * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
     7973 +         * ASSERT(AS_LOCK_HELD(as));
7980 7974           * but we can't because the iommu driver will call this
7981 7975           * routine at interrupt time and it can't grab the as lock
7982 7976           * or it will deadlock: A thread could have the as lock
7983 7977           * and be waiting for io.  The io can't complete
7984 7978           * because the interrupt thread is blocked trying to grab
7985 7979           * the as lock.
7986 7980           */
7987 7981  
7988 7982          ASSERT(hat->sfmmu_xhat_provider == NULL);
7989 7983  
↓ open down ↓ 6034 lines elided ↑ open up ↑
14024 14018          uchar_t tteflag;
14025 14019          uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14026 14020          int text = (r_type == HAT_REGION_TEXT);
14027 14021  
14028 14022          if (srdp == NULL || r_size == 0) {
14029 14023                  return (HAT_INVALID_REGION_COOKIE);
14030 14024          }
14031 14025  
14032 14026          ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14033 14027          ASSERT(sfmmup != ksfmmup);
14034      -        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     14028 +        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14035 14029          ASSERT(srdp->srd_refcnt > 0);
14036 14030          ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14037 14031          ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14038 14032          ASSERT(r_pgszc < mmu_page_sizes);
14039 14033          if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
14040 14034              !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
14041 14035                  panic("hat_join_region: region addr or size is not aligned\n");
14042 14036          }
14043 14037  
14044 14038  
↓ open down ↓ 282 lines elided ↑ open up ↑
14327 14321                  rgnp = srdp->srd_ismrgnp[rid];
14328 14322          } else {
14329 14323                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14330 14324                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14331 14325                  rgnp = srdp->srd_hmergnp[rid];
14332 14326          }
14333 14327          ASSERT(rgnp != NULL);
14334 14328          ASSERT(rgnp->rgn_id == rid);
14335 14329          ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14336 14330          ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14337      -        ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     14331 +        ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
14338 14332  
14339 14333          ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14340 14334          if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
14341 14335                  xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
14342 14336                      rgnp->rgn_size, 0, NULL);
14343 14337          }
14344 14338  
14345 14339          if (sfmmup->sfmmu_free) {
14346 14340                  ulong_t rttecnt;
14347 14341                  r_pgszc = rgnp->rgn_pgszc;
↓ open down ↓ 775 lines elided ↑ open up ↑
15123 15117  sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
15124 15118  {
15125 15119          hatlock_t *hatlockp;
15126 15120          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15127 15121          int i;
15128 15122          sf_scd_t *old_scdp;
15129 15123  
15130 15124          ASSERT(srdp != NULL);
15131 15125          ASSERT(scdp != NULL);
15132 15126          ASSERT(scdp->scd_refcnt > 0);
15133      -        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     15127 +        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15134 15128  
15135 15129          if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
15136 15130                  ASSERT(old_scdp != scdp);
15137 15131  
15138 15132                  mutex_enter(&old_scdp->scd_mutex);
15139 15133                  sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
15140 15134                  mutex_exit(&old_scdp->scd_mutex);
15141 15135                  /*
15142 15136                   * sfmmup leaves the old scd. Update sfmmu_ttecnt to
15143 15137                   * include the shme rgn ttecnt for rgns that
↓ open down ↓ 91 lines elided ↑ open up ↑
15235 15229   * the process's region map if not then a new SCD may be created.
15236 15230   */
15237 15231  static void
15238 15232  sfmmu_find_scd(sfmmu_t *sfmmup)
15239 15233  {
15240 15234          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15241 15235          sf_scd_t *scdp, *new_scdp;
15242 15236          int ret;
15243 15237  
15244 15238          ASSERT(srdp != NULL);
15245      -        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     15239 +        ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15246 15240  
15247 15241          mutex_enter(&srdp->srd_scd_mutex);
15248 15242          for (scdp = srdp->srd_scdp; scdp != NULL;
15249 15243              scdp = scdp->scd_next) {
15250 15244                  SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15251 15245                      &sfmmup->sfmmu_region_map, ret);
15252 15246                  if (ret == 1) {
15253 15247                          SF_SCD_INCR_REF(scdp);
15254 15248                          mutex_exit(&srdp->srd_scd_mutex);
15255 15249                          sfmmu_join_scd(scdp, sfmmup);
↓ open down ↓ 85 lines elided ↑ open up ↑
15341 15335  
15342 15336                  SF_SCD_DECR_REF(srdp, scdp);
15343 15337                  return;
15344 15338          }
15345 15339  
15346 15340          ASSERT(r_type != SFMMU_REGION_ISM ||
15347 15341              SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15348 15342          ASSERT(scdp->scd_refcnt);
15349 15343          ASSERT(!sfmmup->sfmmu_free);
15350 15344          ASSERT(sfmmu_hat_lock_held(sfmmup));
15351      -        ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
     15345 +        ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
15352 15346  
15353 15347          /*
15354 15348           * Wait for ISM maps to be updated.
15355 15349           */
15356 15350          if (r_type != SFMMU_REGION_ISM) {
15357 15351                  while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15358 15352                      sfmmup->sfmmu_scdp != NULL) {
15359 15353                          cv_wait(&sfmmup->sfmmu_tsb_cv,
15360 15354                              HATLOCK_MUTEXP(hatlockp));
15361 15355                  }
↓ open down ↓ 492 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX