Print this page
XXXX kmem: fix comment typo


3154          * We use taskq_dispatch() to schedule a timeout to clear
3155          * the flag so that kmem_reap() becomes self-throttling:
3156          * we won't reap again until the current reap completes *and*
3157          * at least kmem_reap_interval ticks have elapsed.
3158          */
3159         if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3160                 kmem_reap_done(flag);
3161 }
3162 
3163 static void
3164 kmem_reap_common(void *flag_arg)
3165 {
3166         uint32_t *flag = (uint32_t *)flag_arg;
3167 
3168         if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3169             atomic_cas_32(flag, 0, 1) != 0)
3170                 return;
3171 
3172         /*
3173          * It may not be kosher to do memory allocation when a reap is called
3174          * is called (for example, if vmem_populate() is in the call chain).
3175          * So we start the reap going with a TQ_NOALLOC dispatch.  If the
3176          * dispatch fails, we reset the flag, and the next reap will try again.
3177          */
3178         if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3179                 *flag = 0;
3180 }
3181 
3182 /*
3183  * Reclaim all unused memory from all caches.  Called from the VM system
3184  * when memory gets tight.
3185  */
3186 void
3187 kmem_reap(void)
3188 {
3189         kmem_reap_common(&kmem_reaping);
3190 }
3191 
3192 /*
3193  * Reclaim all unused memory from identifier arenas, called when a vmem
3194  * arena not back by memory is exhausted.  Since reaping memory-backed caches
3195  * cannot help with identifier exhaustion, we avoid both a large amount of
3196  * work and unwanted side-effects from reclaim callbacks.




3154          * We use taskq_dispatch() to schedule a timeout to clear
3155          * the flag so that kmem_reap() becomes self-throttling:
3156          * we won't reap again until the current reap completes *and*
3157          * at least kmem_reap_interval ticks have elapsed.
3158          */
3159         if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3160                 kmem_reap_done(flag);
3161 }
3162 
3163 static void
3164 kmem_reap_common(void *flag_arg)
3165 {
3166         uint32_t *flag = (uint32_t *)flag_arg;
3167 
3168         if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3169             atomic_cas_32(flag, 0, 1) != 0)
3170                 return;
3171 
3172         /*
3173          * It may not be kosher to do memory allocation when a reap is called
3174          * (for example, if vmem_populate() is in the call chain).  So we
3175          * start the reap going with a TQ_NOALLOC dispatch.  If the dispatch
3176          * fails, we reset the flag, and the next reap will try again.
3177          */
3178         if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3179                 *flag = 0;
3180 }
3181 
3182 /*
3183  * Reclaim all unused memory from all caches.  Called from the VM system
3184  * when memory gets tight.
3185  */
3186 void
3187 kmem_reap(void)
3188 {
3189         kmem_reap_common(&kmem_reaping);
3190 }
3191 
3192 /*
3193  * Reclaim all unused memory from identifier arenas, called when a vmem
3194  * arena not back by memory is exhausted.  Since reaping memory-backed caches
3195  * cannot help with identifier exhaustion, we avoid both a large amount of
3196  * work and unwanted side-effects from reclaim callbacks.