Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


1071                 /*
1072                  * seg_pinsert failed. We return
1073                  * ENOTSUP, so that the as_pagelock() code will
1074                  * then try the slower F_SOFTLOCK path.
1075                  */
1076                 if (pl_built) {
1077                         /*
1078                          * No one else has referenced the ppa[].
1079                          * We created it and we need to destroy it.
1080                          */
1081                         sptd->spt_ppa = NULL;
1082                 }
1083                 ret = ENOTSUP;
1084                 goto insert_fail;
1085         }
1086 
1087         /*
1088          * In either case, we increment softlockcnt on the 'real' segment.
1089          */
1090         sptd->spt_pcachecnt++;
1091         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1092 
1093         ppa = sptd->spt_ppa;
1094         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1095                 if (ppa[an_idx] == NULL) {
1096                         mutex_exit(&sptd->spt_lock);
1097                         seg_pinactive(seg, NULL, seg->s_base,
1098                             sptd->spt_amp->size,
1099                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1100                         *ppp = NULL;
1101                         return (ENOTSUP);
1102                 }
1103                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1104                         npgs = page_get_pagecnt(szc);
1105                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1106                 } else {
1107                         an_idx++;
1108                 }
1109         }
1110         /*
1111          * We can now drop the sptd->spt_lock since the ppa[]


1350                 /*
1351                  * seg_pinsert failed. We return
1352                  * ENOTSUP, so that the as_pagelock() code will
1353                  * then try the slower F_SOFTLOCK path.
1354                  */
1355                 if (pl_built) {
1356                         /*
1357                          * No one else has referenced the ppa[].
1358                          * We created it and we need to destroy it.
1359                          */
1360                         sptd->spt_ppa = NULL;
1361                 }
1362                 ret = ENOTSUP;
1363                 goto insert_fail;
1364         }
1365 
1366         /*
1367          * In either case, we increment softlockcnt on the 'real' segment.
1368          */
1369         sptd->spt_pcachecnt++;
1370         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
1371 
1372         /*
1373          * We can now drop the sptd->spt_lock since the ppa[]
1374          * exists and he have incremented pacachecnt.
1375          */
1376         mutex_exit(&sptd->spt_lock);
1377 
1378         /*
1379          * Since we cache the entire segment, we want to
1380          * set ppp to point to the first slot that corresponds
1381          * to the requested addr, i.e. page_index.
1382          */
1383         *ppp = &(sptd->spt_ppa[page_index]);
1384         return (0);
1385 
1386 insert_fail:
1387         /*
1388          * We will only reach this code if we tried and failed.
1389          *
1390          * And we can drop the lock on the dummy seg, once we've failed


1502          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1503          * this mutex as a barrier to make sure this routine completes before
1504          * segment is freed.
1505          *
1506          * The second complication we have to deal with in async case is a
1507          * possibility of missed wake up of unmap wait thread. When we don't
1508          * hold as lock here we may take a_contents lock before unmap wait
1509          * thread that was first to see softlockcnt was still not 0. As a
1510          * result we'll fail to wake up an unmap wait thread. To avoid this
1511          * race we set nounmapwait flag in as structure if we drop softlockcnt
1512          * to 0 if async is not 0.  unmapwait thread
1513          * will not block if this flag is set.
1514          */
1515         if (async)
1516                 mutex_enter(&shmd->shm_segfree_syncmtx);
1517 
1518         /*
1519          * Now decrement softlockcnt.
1520          */
1521         ASSERT(shmd->shm_softlockcnt > 0);
1522         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
1523 
1524         if (shmd->shm_softlockcnt <= 0) {
1525                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1526                         mutex_enter(&seg->s_as->a_contents);
1527                         if (async)
1528                                 AS_SETNOUNMAPWAIT(seg->s_as);
1529                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1530                                 AS_CLRUNMAPWAIT(seg->s_as);
1531                                 cv_broadcast(&seg->s_as->a_cv);
1532                         }
1533                         mutex_exit(&seg->s_as->a_contents);
1534                 }
1535         }
1536 
1537         if (async)
1538                 mutex_exit(&shmd->shm_segfree_syncmtx);
1539 
1540         return (done);
1541 }
1542 


2894                         return (0);
2895                 }
2896 
2897                 sptd->spt_flags |= DISM_PPA_CHANGED;
2898                 gen = sptd->spt_gen;
2899 
2900                 mutex_exit(&sptd->spt_lock);
2901 
2902                 /*
2903                  * Purge all DISM cached pages
2904                  */
2905                 seg_ppurge_wiredpp(ppa);
2906 
2907                 /*
2908                  * Drop the AS_LOCK so that other threads can grab it
2909                  * in the as_pageunlock path and hopefully get the segment
2910                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2911                  * to keep this segment resident.
2912                  */
2913                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2914                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
2915                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2916 
2917                 mutex_enter(&sptd->spt_lock);
2918 
2919                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2920 
2921                 /*
2922                  * Try to wait for pages to get kicked out of the seg_pcache.
2923                  */
2924                 while (sptd->spt_gen == gen &&
2925                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2926                     ddi_get_lbolt() < end_lbolt) {
2927                         if (!cv_timedwait_sig(&sptd->spt_cv,
2928                             &sptd->spt_lock, end_lbolt)) {
2929                                 break;
2930                         }
2931                 }
2932 
2933                 mutex_exit(&sptd->spt_lock);
2934 
2935                 /* Regrab the AS_LOCK and release our hold on the segment */
2936                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2937                     writer ? RW_WRITER : RW_READER);
2938                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
2939                 if (shmd->shm_softlockcnt <= 0) {
2940                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2941                                 mutex_enter(&seg->s_as->a_contents);
2942                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2943                                         AS_CLRUNMAPWAIT(seg->s_as);
2944                                         cv_broadcast(&seg->s_as->a_cv);
2945                                 }
2946                                 mutex_exit(&seg->s_as->a_contents);
2947                         }
2948                 }
2949 
2950                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2951                 anon_disclaim(amp, pg_idx, len);
2952                 ANON_LOCK_EXIT(&amp->a_rwlock);
2953         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2954             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2955                 int                     already_set;
2956                 ulong_t                 anon_index;
2957                 lgrp_mem_policy_t       policy;
2958                 caddr_t                 shm_addr;




1071                 /*
1072                  * seg_pinsert failed. We return
1073                  * ENOTSUP, so that the as_pagelock() code will
1074                  * then try the slower F_SOFTLOCK path.
1075                  */
1076                 if (pl_built) {
1077                         /*
1078                          * No one else has referenced the ppa[].
1079                          * We created it and we need to destroy it.
1080                          */
1081                         sptd->spt_ppa = NULL;
1082                 }
1083                 ret = ENOTSUP;
1084                 goto insert_fail;
1085         }
1086 
1087         /*
1088          * In either case, we increment softlockcnt on the 'real' segment.
1089          */
1090         sptd->spt_pcachecnt++;
1091         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1092 
1093         ppa = sptd->spt_ppa;
1094         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1095                 if (ppa[an_idx] == NULL) {
1096                         mutex_exit(&sptd->spt_lock);
1097                         seg_pinactive(seg, NULL, seg->s_base,
1098                             sptd->spt_amp->size,
1099                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1100                         *ppp = NULL;
1101                         return (ENOTSUP);
1102                 }
1103                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1104                         npgs = page_get_pagecnt(szc);
1105                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1106                 } else {
1107                         an_idx++;
1108                 }
1109         }
1110         /*
1111          * We can now drop the sptd->spt_lock since the ppa[]


1350                 /*
1351                  * seg_pinsert failed. We return
1352                  * ENOTSUP, so that the as_pagelock() code will
1353                  * then try the slower F_SOFTLOCK path.
1354                  */
1355                 if (pl_built) {
1356                         /*
1357                          * No one else has referenced the ppa[].
1358                          * We created it and we need to destroy it.
1359                          */
1360                         sptd->spt_ppa = NULL;
1361                 }
1362                 ret = ENOTSUP;
1363                 goto insert_fail;
1364         }
1365 
1366         /*
1367          * In either case, we increment softlockcnt on the 'real' segment.
1368          */
1369         sptd->spt_pcachecnt++;
1370         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1371 
1372         /*
1373          * We can now drop the sptd->spt_lock since the ppa[]
1374          * exists and he have incremented pacachecnt.
1375          */
1376         mutex_exit(&sptd->spt_lock);
1377 
1378         /*
1379          * Since we cache the entire segment, we want to
1380          * set ppp to point to the first slot that corresponds
1381          * to the requested addr, i.e. page_index.
1382          */
1383         *ppp = &(sptd->spt_ppa[page_index]);
1384         return (0);
1385 
1386 insert_fail:
1387         /*
1388          * We will only reach this code if we tried and failed.
1389          *
1390          * And we can drop the lock on the dummy seg, once we've failed


1502          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1503          * this mutex as a barrier to make sure this routine completes before
1504          * segment is freed.
1505          *
1506          * The second complication we have to deal with in async case is a
1507          * possibility of missed wake up of unmap wait thread. When we don't
1508          * hold as lock here we may take a_contents lock before unmap wait
1509          * thread that was first to see softlockcnt was still not 0. As a
1510          * result we'll fail to wake up an unmap wait thread. To avoid this
1511          * race we set nounmapwait flag in as structure if we drop softlockcnt
1512          * to 0 if async is not 0.  unmapwait thread
1513          * will not block if this flag is set.
1514          */
1515         if (async)
1516                 mutex_enter(&shmd->shm_segfree_syncmtx);
1517 
1518         /*
1519          * Now decrement softlockcnt.
1520          */
1521         ASSERT(shmd->shm_softlockcnt > 0);
1522         atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1523 
1524         if (shmd->shm_softlockcnt <= 0) {
1525                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1526                         mutex_enter(&seg->s_as->a_contents);
1527                         if (async)
1528                                 AS_SETNOUNMAPWAIT(seg->s_as);
1529                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1530                                 AS_CLRUNMAPWAIT(seg->s_as);
1531                                 cv_broadcast(&seg->s_as->a_cv);
1532                         }
1533                         mutex_exit(&seg->s_as->a_contents);
1534                 }
1535         }
1536 
1537         if (async)
1538                 mutex_exit(&shmd->shm_segfree_syncmtx);
1539 
1540         return (done);
1541 }
1542 


2894                         return (0);
2895                 }
2896 
2897                 sptd->spt_flags |= DISM_PPA_CHANGED;
2898                 gen = sptd->spt_gen;
2899 
2900                 mutex_exit(&sptd->spt_lock);
2901 
2902                 /*
2903                  * Purge all DISM cached pages
2904                  */
2905                 seg_ppurge_wiredpp(ppa);
2906 
2907                 /*
2908                  * Drop the AS_LOCK so that other threads can grab it
2909                  * in the as_pageunlock path and hopefully get the segment
2910                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2911                  * to keep this segment resident.
2912                  */
2913                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2914                 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2915                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2916 
2917                 mutex_enter(&sptd->spt_lock);
2918 
2919                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2920 
2921                 /*
2922                  * Try to wait for pages to get kicked out of the seg_pcache.
2923                  */
2924                 while (sptd->spt_gen == gen &&
2925                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2926                     ddi_get_lbolt() < end_lbolt) {
2927                         if (!cv_timedwait_sig(&sptd->spt_cv,
2928                             &sptd->spt_lock, end_lbolt)) {
2929                                 break;
2930                         }
2931                 }
2932 
2933                 mutex_exit(&sptd->spt_lock);
2934 
2935                 /* Regrab the AS_LOCK and release our hold on the segment */
2936                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2937                     writer ? RW_WRITER : RW_READER);
2938                 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2939                 if (shmd->shm_softlockcnt <= 0) {
2940                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2941                                 mutex_enter(&seg->s_as->a_contents);
2942                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2943                                         AS_CLRUNMAPWAIT(seg->s_as);
2944                                         cv_broadcast(&seg->s_as->a_cv);
2945                                 }
2946                                 mutex_exit(&seg->s_as->a_contents);
2947                         }
2948                 }
2949 
2950                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2951                 anon_disclaim(amp, pg_idx, len);
2952                 ANON_LOCK_EXIT(&amp->a_rwlock);
2953         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2954             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2955                 int                     already_set;
2956                 ulong_t                 anon_index;
2957                 lgrp_mem_policy_t       policy;
2958                 caddr_t                 shm_addr;