Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
@@ -301,11 +301,11 @@
return (PFN_INVALID);
ASSERT(PAGE_SHARED(pp));
pfn = pp->p_pagenum;
if (pfn == PFN_INVALID)
panic("ptable_alloc(): Invalid PFN!!");
- atomic_add_32(&active_ptables, 1);
+ atomic_inc_32(&active_ptables);
HATSTAT_INC(hs_ptable_allocs);
return (pfn);
}
/*
@@ -320,11 +320,11 @@
/*
* need to destroy the page used for the pagetable
*/
ASSERT(pfn != PFN_INVALID);
HATSTAT_INC(hs_ptable_frees);
- atomic_add_32(&active_ptables, -1);
+ atomic_dec_32(&active_ptables);
if (pp == NULL)
panic("ptable_free(): no page for pfn!");
ASSERT(PAGE_SHARED(pp));
ASSERT(pfn == pp->p_pagenum);
ASSERT(!IN_XPV_PANIC());
@@ -458,11 +458,11 @@
/*
* Loop through all user hats. The 1st pass takes cached htables that
* aren't in use. The later passes steal by removing mappings, too.
*/
- atomic_add_32(&htable_dont_cache, 1);
+ atomic_inc_32(&htable_dont_cache);
for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) {
threshold = pass * mmu.ptes_per_table / htable_steal_passes;
hat = kas.a_hat;
for (;;) {
@@ -667,11 +667,11 @@
if (++h == hat->hat_num_hash)
h = 0;
} while (stolen < cnt && h != h_start);
}
}
- atomic_add_32(&htable_dont_cache, -1);
+ atomic_dec_32(&htable_dont_cache);
return (list);
}
/*
* This is invoked from kmem when the system is low on memory. We try
@@ -983,11 +983,11 @@
/*
* Purge the htable cache if just reaping.
*/
if (!(hat->hat_flags & HAT_FREEING)) {
- atomic_add_32(&htable_dont_cache, 1);
+ atomic_inc_32(&htable_dont_cache);
for (;;) {
hat_enter(hat);
ht = hat->hat_ht_cached;
if (ht == NULL) {
hat_exit(hat);
@@ -995,11 +995,11 @@
}
hat->hat_ht_cached = ht->ht_next;
hat_exit(hat);
htable_free(ht);
}
- atomic_add_32(&htable_dont_cache, -1);
+ atomic_dec_32(&htable_dont_cache);
return;
}
/*
* if freeing, no locking is needed