178 while ((ent = *prev) != NULL) {
179 addr = ent->lwpchan_addr;
180 if (start <= addr && addr < end) {
181 *prev = ent->lwpchan_next;
182 /*
183 * We do this only for the obsolete type
184 * USYNC_PROCESS_ROBUST. Otherwise robust
185 * locks do not draw ELOCKUNMAPPED or
186 * EOWNERDEAD due to being unmapped.
187 */
188 if (ent->lwpchan_pool == LWPCHAN_MPPOOL &&
189 (ent->lwpchan_type & USYNC_PROCESS_ROBUST))
190 lwp_mutex_cleanup(ent, LOCK_UNMAPPED);
191 /*
192 * If there is a user-level robust lock
193 * registration, mark it as invalid.
194 */
195 if ((addr = ent->lwpchan_uaddr) != NULL)
196 lwp_mutex_unregister(addr);
197 kmem_free(ent, sizeof (*ent));
198 atomic_add_32(&lcp->lwpchan_entries, -1);
199 } else {
200 prev = &ent->lwpchan_next;
201 }
202 }
203 mutex_exit(&hashbucket->lwpchan_lock);
204 }
205 mutex_exit(&p->p_lcp_lock);
206 }
207
208 /*
209 * Given an lwpchan cache pointer and a process virtual address,
210 * return a pointer to the corresponding lwpchan hash bucket.
211 */
212 static lwpchan_hashbucket_t *
213 lwpchan_bucket(lwpchan_data_t *lcp, uintptr_t addr)
214 {
215 uint_t i;
216
217 /*
218 * All user-level sync object addresses are 8-byte aligned.
451 /* someone else added this entry to the cache */
452 mutex_exit(&hashbucket->lwpchan_lock);
453 kmem_free(ent, sizeof (*ent));
454 return (1);
455 }
456 if (count > lcp->lwpchan_bits + 2 && /* larger table, longer chains */
457 (bits = lcp->lwpchan_bits) < LWPCHAN_MAX_BITS) {
458 /* hash chain too long; reallocate the hash table */
459 mutex_exit(&hashbucket->lwpchan_lock);
460 kmem_free(ent, sizeof (*ent));
461 lwpchan_alloc_cache(p, bits + 1);
462 goto top;
463 }
464 ent->lwpchan_addr = addr;
465 ent->lwpchan_uaddr = uaddr;
466 ent->lwpchan_type = (uint16_t)type;
467 ent->lwpchan_pool = (uint16_t)pool;
468 ent->lwpchan_lwpchan = *lwpchan;
469 ent->lwpchan_next = hashbucket->lwpchan_chain;
470 hashbucket->lwpchan_chain = ent;
471 atomic_add_32(&lcp->lwpchan_entries, 1);
472 mutex_exit(&hashbucket->lwpchan_lock);
473 return (1);
474 }
475
476 /*
477 * Return a unique pair of identifiers that corresponds to a
478 * synchronization object's virtual address. Process-shared
479 * sync objects usually get vnode/offset from as_getmemid().
480 */
481 static int
482 get_lwpchan(struct as *as, caddr_t addr, int type, lwpchan_t *lwpchan, int pool)
483 {
484 /*
485 * If the lwp synch object is defined to be process-private,
486 * we just make the first field of the lwpchan be 'as' and
487 * the second field be the synch object's virtual address.
488 * (segvn_getmemid() does the same for MAP_PRIVATE mappings.)
489 * The lwpchan cache is used only for process-shared objects.
490 */
491 if (!(type & USYNC_PROCESS)) {
|
178 while ((ent = *prev) != NULL) {
179 addr = ent->lwpchan_addr;
180 if (start <= addr && addr < end) {
181 *prev = ent->lwpchan_next;
182 /*
183 * We do this only for the obsolete type
184 * USYNC_PROCESS_ROBUST. Otherwise robust
185 * locks do not draw ELOCKUNMAPPED or
186 * EOWNERDEAD due to being unmapped.
187 */
188 if (ent->lwpchan_pool == LWPCHAN_MPPOOL &&
189 (ent->lwpchan_type & USYNC_PROCESS_ROBUST))
190 lwp_mutex_cleanup(ent, LOCK_UNMAPPED);
191 /*
192 * If there is a user-level robust lock
193 * registration, mark it as invalid.
194 */
195 if ((addr = ent->lwpchan_uaddr) != NULL)
196 lwp_mutex_unregister(addr);
197 kmem_free(ent, sizeof (*ent));
198 atomic_dec_32(&lcp->lwpchan_entries);
199 } else {
200 prev = &ent->lwpchan_next;
201 }
202 }
203 mutex_exit(&hashbucket->lwpchan_lock);
204 }
205 mutex_exit(&p->p_lcp_lock);
206 }
207
208 /*
209 * Given an lwpchan cache pointer and a process virtual address,
210 * return a pointer to the corresponding lwpchan hash bucket.
211 */
212 static lwpchan_hashbucket_t *
213 lwpchan_bucket(lwpchan_data_t *lcp, uintptr_t addr)
214 {
215 uint_t i;
216
217 /*
218 * All user-level sync object addresses are 8-byte aligned.
451 /* someone else added this entry to the cache */
452 mutex_exit(&hashbucket->lwpchan_lock);
453 kmem_free(ent, sizeof (*ent));
454 return (1);
455 }
456 if (count > lcp->lwpchan_bits + 2 && /* larger table, longer chains */
457 (bits = lcp->lwpchan_bits) < LWPCHAN_MAX_BITS) {
458 /* hash chain too long; reallocate the hash table */
459 mutex_exit(&hashbucket->lwpchan_lock);
460 kmem_free(ent, sizeof (*ent));
461 lwpchan_alloc_cache(p, bits + 1);
462 goto top;
463 }
464 ent->lwpchan_addr = addr;
465 ent->lwpchan_uaddr = uaddr;
466 ent->lwpchan_type = (uint16_t)type;
467 ent->lwpchan_pool = (uint16_t)pool;
468 ent->lwpchan_lwpchan = *lwpchan;
469 ent->lwpchan_next = hashbucket->lwpchan_chain;
470 hashbucket->lwpchan_chain = ent;
471 atomic_inc_32(&lcp->lwpchan_entries);
472 mutex_exit(&hashbucket->lwpchan_lock);
473 return (1);
474 }
475
476 /*
477 * Return a unique pair of identifiers that corresponds to a
478 * synchronization object's virtual address. Process-shared
479 * sync objects usually get vnode/offset from as_getmemid().
480 */
481 static int
482 get_lwpchan(struct as *as, caddr_t addr, int type, lwpchan_t *lwpchan, int pool)
483 {
484 /*
485 * If the lwp synch object is defined to be process-private,
486 * we just make the first field of the lwpchan be 'as' and
487 * the second field be the synch object's virtual address.
488 * (segvn_getmemid() does the same for MAP_PRIVATE mappings.)
489 * The lwpchan cache is used only for process-shared objects.
490 */
491 if (!(type & USYNC_PROCESS)) {
|