Print this page
patch as-lock-macro-simplification

*** 239,249 **** void segspt_free(struct seg *seg) { struct spt_data *sptd = (struct spt_data *)seg->s_data; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); if (sptd != NULL) { if (sptd->spt_realsize) segspt_free_pages(seg, seg->s_base, sptd->spt_realsize); --- 239,249 ---- void segspt_free(struct seg *seg) { struct spt_data *sptd = (struct spt_data *)seg->s_data; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); if (sptd != NULL) { if (sptd->spt_realsize) segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
*** 261,271 **** /*ARGSUSED*/ static int segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) { ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); return (0); } /*ARGSUSED*/ --- 261,271 ---- /*ARGSUSED*/ static int segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) { ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (0); } /*ARGSUSED*/
*** 276,286 **** pgcnt_t npages; struct shm_data *shmd = (struct shm_data *)seg->s_data; struct seg *sptseg; struct spt_data *sptd; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); #ifdef lint seg = seg; #endif sptseg = shmd->shm_sptseg; sptd = sptseg->s_data; --- 276,286 ---- pgcnt_t npages; struct shm_data *shmd = (struct shm_data *)seg->s_data; struct seg *sptseg; struct spt_data *sptd; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); #ifdef lint seg = seg; #endif sptseg = shmd->shm_sptseg; sptd = sptseg->s_data;
*** 340,350 **** static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) { size_t share_size; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); /* * seg.s_size may have been rounded up to the largest page size * in shmat(). * XXX This should be cleanedup. sptdestroy should take a length --- 340,350 ---- static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) { size_t share_size; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); /* * seg.s_size may have been rounded up to the largest page size * in shmat(). * XXX This should be cleanedup. sptdestroy should take a length
*** 391,401 **** /* * We are holding the a_lock on the underlying dummy as, * so we can make calls to the HAT layer. */ ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); ASSERT(sp != NULL); #ifdef DEBUG TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size); --- 391,401 ---- /* * We are holding the a_lock on the underlying dummy as, * so we can make calls to the HAT layer. */ ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); ASSERT(sp != NULL); #ifdef DEBUG TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */, tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
*** 627,637 **** page_t *rootpp; rctl_qty_t unlocked_bytes = 0; kproject_t *proj; kshmid_t *sp; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); len = P2ROUNDUP(len, PAGESIZE); npages = btop(len); --- 627,637 ---- page_t *rootpp; rctl_qty_t unlocked_bytes = 0; kproject_t *proj; kshmid_t *sp; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); len = P2ROUNDUP(len, PAGESIZE); npages = btop(len);
*** 836,846 **** struct vnode *vp; u_offset_t off; pgcnt_t claim_availrmem = 0; uint_t szc; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); /* * We want to lock/unlock the entire ISM segment. Therefore, * we will be using the underlying sptseg and it's base address --- 836,846 ---- struct vnode *vp; u_offset_t off; pgcnt_t claim_availrmem = 0; uint_t szc; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); /* * We want to lock/unlock the entire ISM segment. Therefore, * we will be using the underlying sptseg and it's base address
*** 1191,1201 **** uint_t pl_built = 0; struct anon *ap; struct vnode *vp; u_offset_t off; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); /* * We want to lock/unlock the entire ISM segment. Therefore, --- 1191,1201 ---- uint_t pl_built = 0; struct anon *ap; struct vnode *vp; u_offset_t off; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); /* * We want to lock/unlock the entire ISM segment. Therefore,
*** 1449,1459 **** npages = (len >> PAGESHIFT); ASSERT(npages); ASSERT(sptd->spt_pcachecnt != 0); ASSERT(sptd->spt_ppa == pplist); ASSERT(npages == btopr(sptd->spt_amp->size)); ! ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); /* * Acquire the lock on the dummy seg and destroy the * ppa array IF this is the last pcachecnt. */ --- 1449,1459 ---- npages = (len >> PAGESHIFT); ASSERT(npages); ASSERT(sptd->spt_pcachecnt != 0); ASSERT(sptd->spt_ppa == pplist); ASSERT(npages == btopr(sptd->spt_amp->size)); ! ASSERT(async || AS_LOCK_HELD(seg->s_as)); /* * Acquire the lock on the dummy seg and destroy the * ppa array IF this is the last pcachecnt. */
*** 1583,1593 **** ulong_t anon_index; struct anon_map *amp; /* XXX - for locknest */ struct anon *ap = NULL; pgcnt_t npages; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); sptseg = shmd->shm_sptseg; sptd = sptseg->s_data; /* --- 1583,1593 ---- ulong_t anon_index; struct anon_map *amp; /* XXX - for locknest */ struct anon *ap = NULL; pgcnt_t npages; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); sptseg = shmd->shm_sptseg; sptd = sptseg->s_data; /*
*** 1607,1619 **** * We are already holding the as->a_lock on the user's * real segment, but we need to hold the a_lock on the * underlying dummy as. This is mostly to satisfy the * underlying HAT layer. */ ! AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); ! AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); amp = sptd->spt_amp; ASSERT(amp != NULL); anon_index = seg_page(sptseg, sptseg_addr); --- 1607,1619 ---- * We are already holding the as->a_lock on the user's * real segment, but we need to hold the a_lock on the * underlying dummy as. This is mostly to satisfy the * underlying HAT layer. */ ! AS_LOCK_ENTER(sptseg->s_as, RW_READER); hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); ! AS_LOCK_EXIT(sptseg->s_as); amp = sptd->spt_amp; ASSERT(amp != NULL); anon_index = seg_page(sptseg, sptseg_addr);
*** 1674,1684 **** struct shm_data *shmd; struct anon_map *shm_amp = shmd_arg->shm_amp; struct spt_data *sptd; int error = 0; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); if (shmd == NULL) return (ENOMEM); --- 1674,1684 ---- struct shm_data *shmd; struct anon_map *shm_amp = shmd_arg->shm_amp; struct spt_data *sptd; int error = 0; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); if (shmd == NULL) return (ENOMEM);
*** 1733,1743 **** segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) { struct shm_data *shmd = (struct shm_data *)seg->s_data; int reclaim = 1; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); retry: if (shmd->shm_softlockcnt > 0) { if (reclaim == 1) { segspt_purge(seg); reclaim = 0; --- 1733,1743 ---- segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) { struct shm_data *shmd = (struct shm_data *)seg->s_data; int reclaim = 1; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); retry: if (shmd->shm_softlockcnt > 0) { if (reclaim == 1) { segspt_purge(seg); reclaim = 0;
*** 1767,1777 **** segspt_shmfree(struct seg *seg) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct anon_map *shm_amp = shmd->shm_amp; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, MC_UNLOCK, NULL, 0); /* --- 1767,1777 ---- segspt_shmfree(struct seg *seg) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct anon_map *shm_amp = shmd->shm_amp; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, MC_UNLOCK, NULL, 0); /*
*** 1800,1810 **** /*ARGSUSED*/ int segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) { ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); /* * Shared page table is more than shared mapping. * Individual process sharing page tables can't change prot * because there is only one set of page tables. --- 1800,1810 ---- /*ARGSUSED*/ int segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) { ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * Shared page table is more than shared mapping. * Individual process sharing page tables can't change prot * because there is only one set of page tables.
*** 1838,1848 **** pgcnt_t pidx; #ifdef lint hat = hat; #endif ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); /* * Because of the way spt is implemented * the realsize of the segment does not have to be * equal to the segment size itself. The segment size is --- 1838,1848 ---- pgcnt_t pidx; #ifdef lint hat = hat; #endif ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * Because of the way spt is implemented * the realsize of the segment does not have to be * equal to the segment size itself. The segment size is
*** 1907,1917 **** atomic_add_long((ulong_t *)( &(shmd->shm_softlockcnt)), -npages); } goto dism_err; } ! AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); a = segspt_addr; pidx = 0; if (type == F_SOFTLOCK) { /* --- 1907,1917 ---- atomic_add_long((ulong_t *)( &(shmd->shm_softlockcnt)), -npages); } goto dism_err; } ! AS_LOCK_ENTER(sptseg->s_as, RW_READER); a = segspt_addr; pidx = 0; if (type == F_SOFTLOCK) { /*
*** 1968,1978 **** for (i = 0; i < npages; i++) { page_unlock(ppa[i]); } } } ! AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); dism_err: kmem_free(ppa, npages * sizeof (page_t *)); return (err); case F_SOFTUNLOCK: --- 1968,1978 ---- for (i = 0; i < npages; i++) { page_unlock(ppa[i]); } } } ! AS_LOCK_EXIT(sptseg->s_as); dism_err: kmem_free(ppa, npages * sizeof (page_t *)); return (err); case F_SOFTUNLOCK:
*** 2035,2045 **** #ifdef lint hat = hat; #endif ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); if (sptd->spt_flags & SHM_PAGEABLE) { return (segspt_dismfault(hat, seg, addr, len, type, rw)); } --- 2035,2045 ---- #ifdef lint hat = hat; #endif ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if (sptd->spt_flags & SHM_PAGEABLE) { return (segspt_dismfault(hat, seg, addr, len, type, rw)); }
*** 2167,2177 **** * We are already holding the as->a_lock on the user's * real segment, but we need to hold the a_lock on the * underlying dummy as. This is mostly to satisfy the * underlying HAT layer. */ ! AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); a = sptseg_addr; pidx = 0; if (type == F_SOFTLOCK) { /* * Load up the translation keeping it --- 2167,2177 ---- * We are already holding the as->a_lock on the user's * real segment, but we need to hold the a_lock on the * underlying dummy as. This is mostly to satisfy the * underlying HAT layer. */ ! AS_LOCK_ENTER(sptseg->s_as, RW_READER); a = sptseg_addr; pidx = 0; if (type == F_SOFTLOCK) { /* * Load up the translation keeping it
*** 2212,2222 **** * And now drop the SE_SHARED lock(s). */ for (i = 0; i < npages; i++) page_unlock(ppa[i]); } ! AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); kmem_free(ppa, sizeof (page_t *) * npages); return (0); case F_SOFTUNLOCK: --- 2212,2222 ---- * And now drop the SE_SHARED lock(s). */ for (i = 0; i < npages; i++) page_unlock(ppa[i]); } ! AS_LOCK_EXIT(sptseg->s_as); kmem_free(ppa, sizeof (page_t *) * npages); return (0); case F_SOFTUNLOCK:
*** 2282,2292 **** struct shm_data *shmd_new; struct seg *spt_seg = shmd->shm_sptseg; struct spt_data *sptd = spt_seg->s_data; int error = 0; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); newseg->s_data = (void *)shmd_new; shmd_new->shm_sptas = shmd->shm_sptas; shmd_new->shm_amp = amp; --- 2282,2292 ---- struct shm_data *shmd_new; struct seg *spt_seg = shmd->shm_sptseg; struct spt_data *sptd = spt_seg->s_data; int error = 0; ! ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); newseg->s_data = (void *)shmd_new; shmd_new->shm_sptas = shmd->shm_sptas; shmd_new->shm_amp = amp;
*** 2324,2334 **** segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); /* * ISM segment is always rw. */ return (((sptd->spt_prot & prot) != prot) ? EACCES : 0); --- 2324,2334 ---- segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * ISM segment is always rw. */ return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
*** 2675,2685 **** rctl_qty_t unlocked = 0; rctl_qty_t locked = 0; struct proc *p = curproc; kproject_t *proj; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); ASSERT(sp != NULL); if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { return (0); } --- 2675,2685 ---- rctl_qty_t unlocked = 0; rctl_qty_t locked = 0; struct proc *p = curproc; kproject_t *proj; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(sp != NULL); if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { return (0); }
*** 2805,2815 **** { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); /* * ISM segment is always rw. */ while (--pgno >= 0) --- 2805,2815 ---- { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * ISM segment is always rw. */ while (--pgno >= 0)
*** 2819,2829 **** /*ARGSUSED*/ u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr) { ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); /* Offset does not matter in ISM memory */ return ((u_offset_t)0); } --- 2819,2829 ---- /*ARGSUSED*/ u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr) { ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* Offset does not matter in ISM memory */ return ((u_offset_t)0); }
*** 2833,2843 **** segspt_shmgettype(struct seg *seg, caddr_t addr) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); /* * The shared memory mapping is always MAP_SHARED, SWAP is only * reserved for DISM */ --- 2833,2843 ---- segspt_shmgettype(struct seg *seg, caddr_t addr) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * The shared memory mapping is always MAP_SHARED, SWAP is only * reserved for DISM */
*** 2850,2860 **** segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); *vpp = sptd->spt_vp; return (0); } --- 2850,2860 ---- segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) { struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); *vpp = sptd->spt_vp; return (0); }
*** 2876,2886 **** ushort_t gen; clock_t end_lbolt; int writer; page_t **ppa; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); if (behav == MADV_FREE) { if ((sptd->spt_flags & SHM_PAGEABLE) == 0) return (0); --- 2876,2886 ---- ushort_t gen; clock_t end_lbolt; int writer; page_t **ppa; ! ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if (behav == MADV_FREE) { if ((sptd->spt_flags & SHM_PAGEABLE) == 0) return (0);
*** 2910,2922 **** * Drop the AS_LOCK so that other threads can grab it * in the as_pageunlock path and hopefully get the segment * kicked out of the seg_pcache. We bump the shm_softlockcnt * to keep this segment resident. */ ! writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock); atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt))); ! AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock); mutex_enter(&sptd->spt_lock); end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait); --- 2910,2922 ---- * Drop the AS_LOCK so that other threads can grab it * in the as_pageunlock path and hopefully get the segment * kicked out of the seg_pcache. We bump the shm_softlockcnt * to keep this segment resident. */ ! writer = AS_WRITE_HELD(seg->s_as); atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt))); ! AS_LOCK_EXIT(seg->s_as); mutex_enter(&sptd->spt_lock); end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
*** 2933,2944 **** } mutex_exit(&sptd->spt_lock); /* Regrab the AS_LOCK and release our hold on the segment */ ! AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock, ! writer ? RW_WRITER : RW_READER); atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt))); if (shmd->shm_softlockcnt <= 0) { if (AS_ISUNMAPWAIT(seg->s_as)) { mutex_enter(&seg->s_as->a_contents); if (AS_ISUNMAPWAIT(seg->s_as)) { --- 2933,2943 ---- } mutex_exit(&sptd->spt_lock); /* Regrab the AS_LOCK and release our hold on the segment */ ! AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER); atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt))); if (shmd->shm_softlockcnt <= 0) { if (AS_ISUNMAPWAIT(seg->s_as)) { mutex_enter(&seg->s_as->a_contents); if (AS_ISUNMAPWAIT(seg->s_as)) {