8075 */
8076 void
8077 umem_lock_undo(struct as *as, void *arg, uint_t event)
8078 {
8079 _NOTE(ARGUNUSED(as, event))
8080 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8081
8082 /*
8083 * Call the cleanup function. Decrement the cookie reference
8084 * count, if it goes to zero, return the memory for the cookie.
8085 * The i_ddi_umem_unlock for this cookie may or may not have been
8086 * called already. It is the responsibility of the caller of
8087 * umem_lockmemory to handle the case of the cleanup routine
8088 * being called after a ddi_umem_unlock for the cookie
8089 * was called.
8090 */
8091
8092 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8093
8094 /* remove the cookie if reference goes to zero */
8095 if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8096 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8097 }
8098 }
8099
8100 /*
8101 * The following two Consolidation Private routines provide generic
8102 * interfaces to increase/decrease the amount of device-locked memory.
8103 *
8104 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8105 * must be called every time i_ddi_incr_locked_memory() is called.
8106 */
8107 int
8108 /* ARGSUSED */
8109 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8110 {
8111 ASSERT(procp != NULL);
8112 mutex_enter(&procp->p_lock);
8113 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8114 mutex_exit(&procp->p_lock);
8115 return (ENOMEM);
8470 /*
8471 * Now that we have unlocked the memory decrement the
8472 * *.max-locked-memory rctl
8473 */
8474 umem_decr_devlockmem(p);
8475
8476 if (rc == AS_CALLBACK_DELETED) {
8477 /* umem_lock_undo will not happen, return the cookie memory */
8478 ASSERT(p->cook_refcnt == 2);
8479 kmem_free(p, sizeof (struct ddi_umem_cookie));
8480 } else {
8481 /*
8482 * umem_undo_lock may happen if as_delete_callback returned
8483 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8484 * reference count, atomically, and return the cookie
8485 * memory if the reference count goes to zero. The only
8486 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8487 * case, just return the cookie memory.
8488 */
8489 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8490 (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8491 == 0)) {
8492 kmem_free(p, sizeof (struct ddi_umem_cookie));
8493 }
8494 }
8495 }
8496
8497 /*
8498 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8499 *
8500 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8501 * until it is empty. Then, wait for more to be added. This thread is awoken
8502 * via calls to ddi_umem_unlock.
8503 */
8504
8505 static void
8506 i_ddi_umem_unlock_thread(void)
8507 {
8508 struct ddi_umem_cookie *ret_cookie;
8509 callb_cpr_t cprinfo;
8510
|
8075 */
8076 void
8077 umem_lock_undo(struct as *as, void *arg, uint_t event)
8078 {
8079 _NOTE(ARGUNUSED(as, event))
8080 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8081
8082 /*
8083 * Call the cleanup function. Decrement the cookie reference
8084 * count, if it goes to zero, return the memory for the cookie.
8085 * The i_ddi_umem_unlock for this cookie may or may not have been
8086 * called already. It is the responsibility of the caller of
8087 * umem_lockmemory to handle the case of the cleanup routine
8088 * being called after a ddi_umem_unlock for the cookie
8089 * was called.
8090 */
8091
8092 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8093
8094 /* remove the cookie if reference goes to zero */
8095 if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8096 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8097 }
8098 }
8099
8100 /*
8101 * The following two Consolidation Private routines provide generic
8102 * interfaces to increase/decrease the amount of device-locked memory.
8103 *
8104 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8105 * must be called every time i_ddi_incr_locked_memory() is called.
8106 */
8107 int
8108 /* ARGSUSED */
8109 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8110 {
8111 ASSERT(procp != NULL);
8112 mutex_enter(&procp->p_lock);
8113 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8114 mutex_exit(&procp->p_lock);
8115 return (ENOMEM);
8470 /*
8471 * Now that we have unlocked the memory decrement the
8472 * *.max-locked-memory rctl
8473 */
8474 umem_decr_devlockmem(p);
8475
8476 if (rc == AS_CALLBACK_DELETED) {
8477 /* umem_lock_undo will not happen, return the cookie memory */
8478 ASSERT(p->cook_refcnt == 2);
8479 kmem_free(p, sizeof (struct ddi_umem_cookie));
8480 } else {
8481 /*
8482 * umem_undo_lock may happen if as_delete_callback returned
8483 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8484 * reference count, atomically, and return the cookie
8485 * memory if the reference count goes to zero. The only
8486 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8487 * case, just return the cookie memory.
8488 */
8489 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8490 (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8491 == 0)) {
8492 kmem_free(p, sizeof (struct ddi_umem_cookie));
8493 }
8494 }
8495 }
8496
8497 /*
8498 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8499 *
8500 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8501 * until it is empty. Then, wait for more to be added. This thread is awoken
8502 * via calls to ddi_umem_unlock.
8503 */
8504
8505 static void
8506 i_ddi_umem_unlock_thread(void)
8507 {
8508 struct ddi_umem_cookie *ret_cookie;
8509 callb_cpr_t cprinfo;
8510
|