Print this page
6065 page hash: use a static inline instead of a macro

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/vm_page.c
          +++ new/usr/src/uts/common/vm/vm_page.c
↓ open down ↓ 12 lines elided ↑ open up ↑
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
       23 + * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  23   24   */
  24   25  
  25   26  /*      Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989  AT&T    */
  26   27  /*        All Rights Reserved   */
  27   28  
  28   29  /*
  29   30   * University Copyright- Copyright (c) 1982, 1986, 1988
  30   31   * The Regents of the University of California
  31   32   * All Rights Reserved
  32   33   *
↓ open down ↓ 227 lines elided ↑ open up ↑
 260  261  uint_t  page_find_cnt;
 261  262  uint_t  page_exists_cnt;
 262  263  uint_t  page_exists_forreal_cnt;
 263  264  uint_t  page_lookup_dev_cnt;
 264  265  uint_t  get_cachelist_cnt;
 265  266  uint_t  page_create_cnt[10];
 266  267  uint_t  alloc_pages[9];
 267  268  uint_t  page_exphcontg[19];
 268  269  uint_t  page_create_large_cnt[10];
 269  270  
 270      -/*
 271      - * Collects statistics.
 272      - */
 273      -#define PAGE_HASH_SEARCH(index, pp, vp, off) { \
 274      -        uint_t  mylen = 0; \
 275      -                        \
 276      -        for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \
 277      -                if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
 278      -                        break; \
 279      -        } \
 280      -        if ((pp) != NULL) \
 281      -                pagecnt.pc_find_hit++; \
 282      -        else \
 283      -                pagecnt.pc_find_miss++; \
 284      -        if (mylen > PC_HASH_CNT) \
 285      -                mylen = PC_HASH_CNT; \
 286      -        pagecnt.pc_find_hashlen[mylen]++; \
 287      -}
 288      -
 289      -#else   /* VM_STATS */
 290      -
 291      -/*
 292      - * Don't collect statistics
 293      - */
 294      -#define PAGE_HASH_SEARCH(index, pp, vp, off) { \
 295      -        for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
 296      -                if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
 297      -                        break; \
 298      -        } \
 299      -}
      271 +#endif
 300  272  
 301      -#endif  /* VM_STATS */
      273 +static inline page_t *
      274 +page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off)
      275 +{
      276 +        uint_t mylen = 0;
      277 +        page_t *page;
 302  278  
      279 +        for (page = page_hash[index]; page; page = page->p_hash, mylen++)
      280 +                if (page->p_vnode == vnode && page->p_offset == off)
      281 +                        break;
      282 +
      283 +#ifdef  VM_STATS
      284 +        if (page != NULL)
      285 +                pagecnt.pc_find_hit++;
      286 +        else
      287 +                pagecnt.pc_find_miss++;
      288 +
      289 +        pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++;
      290 +#endif
      291 +
      292 +        return (page);
      293 +}
 303  294  
 304  295  
 305  296  #ifdef DEBUG
 306  297  #define MEMSEG_SEARCH_STATS
 307  298  #endif
 308  299  
 309  300  #ifdef MEMSEG_SEARCH_STATS
 310  301  struct memseg_stats {
 311  302      uint_t nsearch;
 312  303      uint_t nlastwon;
↓ open down ↓ 430 lines elided ↑ open up ↑
 743  734          /*
 744  735           * Acquire the appropriate page hash lock since
 745  736           * we have to search the hash list.  Pages that
 746  737           * hash to this list can't change identity while
 747  738           * this lock is held.
 748  739           */
 749  740          hash_locked = 0;
 750  741          index = PAGE_HASH_FUNC(vp, off);
 751  742          phm = NULL;
 752  743  top:
 753      -        PAGE_HASH_SEARCH(index, pp, vp, off);
      744 +        pp = page_hash_search(index, vp, off);
 754  745          if (pp != NULL) {
 755  746                  VM_STAT_ADD(page_lookup_cnt[1]);
 756  747                  es = (newpp != NULL) ? 1 : 0;
 757  748                  es |= flags;
 758  749                  if (!hash_locked) {
 759  750                          VM_STAT_ADD(page_lookup_cnt[2]);
 760  751                          if (!page_try_reclaim_lock(pp, se, es)) {
 761  752                                  /*
 762  753                                   * On a miss, acquire the phm.  Then
 763  754                                   * next time, page_lock() will be called,
↓ open down ↓ 13 lines elided ↑ open up ↑
 777  768                                  VM_STAT_ADD(page_lookup_cnt[5]);
 778  769                                  goto top;
 779  770                          }
 780  771                  }
 781  772  
 782  773                  /*
 783  774                   * Since `pp' is locked it can not change identity now.
 784  775                   * Reconfirm we locked the correct page.
 785  776                   *
 786  777                   * Both the p_vnode and p_offset *must* be cast volatile
 787      -                 * to force a reload of their values: The PAGE_HASH_SEARCH
 788      -                 * macro will have stuffed p_vnode and p_offset into
      778 +                 * to force a reload of their values: The page_hash_search
      779 +                 * function will have stuffed p_vnode and p_offset into
 789  780                   * registers before calling page_trylock(); another thread,
 790  781                   * actually holding the hash lock, could have changed the
 791  782                   * page's identity in memory, but our registers would not
 792  783                   * be changed, fooling the reconfirmation.  If the hash
 793  784                   * lock was held during the search, the casting would
 794  785                   * not be needed.
 795  786                   */
 796  787                  VM_STAT_ADD(page_lookup_cnt[6]);
 797  788                  if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
 798  789                      ((volatile u_offset_t)(pp->p_offset) != off)) {
↓ open down ↓ 142 lines elided ↑ open up ↑
 941  932  {
 942  933          page_t          *pp;
 943  934          kmutex_t        *phm;
 944  935          ulong_t         index;
 945  936          uint_t          locked;
 946  937  
 947  938          ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
 948  939          VM_STAT_ADD(page_lookup_nowait_cnt[0]);
 949  940  
 950  941          index = PAGE_HASH_FUNC(vp, off);
 951      -        PAGE_HASH_SEARCH(index, pp, vp, off);
      942 +        pp = page_hash_search(index, vp, off);
 952  943          locked = 0;
 953  944          if (pp == NULL) {
 954  945  top:
 955  946                  VM_STAT_ADD(page_lookup_nowait_cnt[1]);
 956  947                  locked = 1;
 957  948                  phm = PAGE_HASH_MUTEX(index);
 958  949                  mutex_enter(phm);
 959      -                PAGE_HASH_SEARCH(index, pp, vp, off);
      950 +                pp = page_hash_search(index, vp, off);
 960  951          }
 961  952  
 962  953          if (pp == NULL || PP_ISFREE(pp)) {
 963  954                  VM_STAT_ADD(page_lookup_nowait_cnt[2]);
 964  955                  pp = NULL;
 965  956          } else {
 966  957                  if (!page_trylock(pp, se)) {
 967  958                          VM_STAT_ADD(page_lookup_nowait_cnt[3]);
 968  959                          pp = NULL;
 969  960                  } else {
↓ open down ↓ 41 lines elided ↑ open up ↑
1011 1002          kmutex_t        *phm;
1012 1003          ulong_t         index;
1013 1004  
1014 1005          ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1015 1006          VM_STAT_ADD(page_find_cnt);
1016 1007  
1017 1008          index = PAGE_HASH_FUNC(vp, off);
1018 1009          phm = PAGE_HASH_MUTEX(index);
1019 1010  
1020 1011          mutex_enter(phm);
1021      -        PAGE_HASH_SEARCH(index, pp, vp, off);
     1012 +        pp = page_hash_search(index, vp, off);
1022 1013          mutex_exit(phm);
1023 1014  
1024 1015          ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
1025 1016          return (pp);
1026 1017  }
1027 1018  
1028 1019  /*
1029 1020   * Determine whether a page with the specified [vp, off]
1030 1021   * currently exists in the system.  Obviously this should
1031 1022   * only be considered as a hint since nothing prevents the
1032 1023   * page from disappearing or appearing immediately after
1033 1024   * the return from this routine. Subsequently, we don't
1034 1025   * even bother to lock the list.
1035 1026   */
1036 1027  page_t *
1037 1028  page_exists(vnode_t *vp, u_offset_t off)
1038 1029  {
1039      -        page_t  *pp;
1040 1030          ulong_t         index;
1041 1031  
1042 1032          ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1043 1033          VM_STAT_ADD(page_exists_cnt);
1044 1034  
1045 1035          index = PAGE_HASH_FUNC(vp, off);
1046      -        PAGE_HASH_SEARCH(index, pp, vp, off);
1047 1036  
1048      -        return (pp);
     1037 +        return (page_hash_search(index, vp, off));
1049 1038  }
1050 1039  
1051 1040  /*
1052 1041   * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
1053 1042   * page_size(szc)) range.  if they exist and ppa is not NULL fill ppa array
1054 1043   * with these pages locked SHARED. If necessary reclaim pages from
1055 1044   * freelist. Return 1 if contiguous pages exist and 0 otherwise.
1056 1045   *
1057 1046   * If we fail to lock pages still return 1 if pages exist and contiguous.
1058 1047   * But in this case return value is just a hint. ppa array won't be filled.
↓ open down ↓ 26 lines elided ↑ open up ↑
1085 1074  again:
1086 1075          if (++loopcnt > 3) {
1087 1076                  VM_STAT_ADD(page_exphcontg[0]);
1088 1077                  return (0);
1089 1078          }
1090 1079  
1091 1080          index = PAGE_HASH_FUNC(vp, off);
1092 1081          phm = PAGE_HASH_MUTEX(index);
1093 1082  
1094 1083          mutex_enter(phm);
1095      -        PAGE_HASH_SEARCH(index, pp, vp, off);
     1084 +        pp = page_hash_search(index, vp, off);
1096 1085          mutex_exit(phm);
1097 1086  
1098 1087          VM_STAT_ADD(page_exphcontg[1]);
1099 1088  
1100 1089          if (pp == NULL) {
1101 1090                  VM_STAT_ADD(page_exphcontg[2]);
1102 1091                  return (0);
1103 1092          }
1104 1093  
1105 1094          pages = page_get_pagecnt(szc);
↓ open down ↓ 206 lines elided ↑ open up ↑
1312 1301          int             rc = 0;
1313 1302  
1314 1303          ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1315 1304          ASSERT(szc != NULL);
1316 1305          VM_STAT_ADD(page_exists_forreal_cnt);
1317 1306  
1318 1307          index = PAGE_HASH_FUNC(vp, off);
1319 1308          phm = PAGE_HASH_MUTEX(index);
1320 1309  
1321 1310          mutex_enter(phm);
1322      -        PAGE_HASH_SEARCH(index, pp, vp, off);
     1311 +        pp = page_hash_search(index, vp, off);
1323 1312          if (pp != NULL) {
1324 1313                  *szc = pp->p_szc;
1325 1314                  rc = 1;
1326 1315          }
1327 1316          mutex_exit(phm);
1328 1317          return (rc);
1329 1318  }
1330 1319  
1331 1320  /* wakeup threads waiting for pages in page_create_get_something() */
1332 1321  void
↓ open down ↓ 1107 lines elided ↑ open up ↑
2440 2429                  PP_CLRAGED(npp);
2441 2430  
2442 2431                  /*
2443 2432                   * Here we have a page in our hot little mits and are
2444 2433                   * just waiting to stuff it on the appropriate lists.
2445 2434                   * Get the mutex and check to see if it really does
2446 2435                   * not exist.
2447 2436                   */
2448 2437                  phm = PAGE_HASH_MUTEX(index);
2449 2438                  mutex_enter(phm);
2450      -                PAGE_HASH_SEARCH(index, pp, vp, off);
     2439 +                pp = page_hash_search(index, vp, off);
2451 2440                  if (pp == NULL) {
2452 2441                          VM_STAT_ADD(page_create_new);
2453 2442                          pp = npp;
2454 2443                          npp = NULL;
2455 2444                          if (!page_hashin(pp, vp, off, phm)) {
2456 2445                                  /*
2457 2446                                   * Since we hold the page hash mutex and
2458 2447                                   * just searched for this page, page_hashin
2459 2448                                   * had better not fail.  If it does, that
2460 2449                                   * means somethread did not follow the
↓ open down ↓ 809 lines elided ↑ open up ↑
3270 3259  top:
3271 3260          /*
3272 3261           * Look for an existing page with this name and destroy it if found.
3273 3262           * By holding the page hash lock all the way to the page_hashin()
3274 3263           * call, we are assured that no page can be created with this
3275 3264           * identity.  In the case when the phm lock is dropped to undo any
3276 3265           * hat layer mappings, the existing page is held with an "exclusive"
3277 3266           * lock, again preventing another page from being created with
3278 3267           * this identity.
3279 3268           */
3280      -        PAGE_HASH_SEARCH(index, pp, vp, off);
     3269 +        pp = page_hash_search(index, vp, off);
3281 3270          if (pp != NULL) {
3282 3271                  VM_STAT_ADD(page_rename_exists);
3283 3272  
3284 3273                  /*
3285 3274                   * As it turns out, this is one of only two places where
3286 3275                   * page_lock() needs to hold the passed in lock in the
3287 3276                   * successful case.  In all of the others, the lock could
3288 3277                   * be dropped as soon as the attempt is made to lock
3289 3278                   * the page.  It is tempting to add yet another arguement,
3290 3279                   * PL_KEEP or PL_DROP, to let page_lock know what to do.
↓ open down ↓ 4200 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX