4348 * In this case the map request must be refused. We use
4349 * rp->r_lkserlock to avoid a race with concurrent lock requests.
4350 */
4351 rp = VTOR(vp);
4352
4353 /*
4354 * Atomically increment r_inmap after acquiring r_rwlock. The
4355 * idea here is to acquire r_rwlock to block read/write and
4356 * not to protect r_inmap. r_inmap will inform nfs_read/write()
4357 * that we are in nfs_map(). Now, r_rwlock is acquired in order
4358 * and we can prevent the deadlock that would have occurred
4359 * when nfs_addmap() would have acquired it out of order.
4360 *
4361 * Since we are not protecting r_inmap by any lock, we do not
4362 * hold any lock when we decrement it. We atomically decrement
4363 * r_inmap after we release r_lkserlock.
4364 */
4365
4366 if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
4367 return (EINTR);
4368 atomic_add_int(&rp->r_inmap, 1);
4369 nfs_rw_exit(&rp->r_rwlock);
4370
4371 if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
4372 atomic_add_int(&rp->r_inmap, -1);
4373 return (EINTR);
4374 }
4375 if (vp->v_flag & VNOCACHE) {
4376 error = EAGAIN;
4377 goto done;
4378 }
4379
4380 /*
4381 * Don't allow concurrent locks and mapping if mandatory locking is
4382 * enabled.
4383 */
4384 if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
4385 MANDLOCK(vp, va.va_mode)) {
4386 error = EAGAIN;
4387 goto done;
4388 }
4389
4390 as_rangelock(as);
4391 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4392 if (error != 0) {
4393 as_rangeunlock(as);
4394 goto done;
4395 }
4396
4397 vn_a.vp = vp;
4398 vn_a.offset = off;
4399 vn_a.type = (flags & MAP_TYPE);
4400 vn_a.prot = (uchar_t)prot;
4401 vn_a.maxprot = (uchar_t)maxprot;
4402 vn_a.flags = (flags & ~MAP_TYPE);
4403 vn_a.cred = cr;
4404 vn_a.amp = NULL;
4405 vn_a.szc = 0;
4406 vn_a.lgrp_mem_policy_flags = 0;
4407
4408 error = as_map(as, *addrp, len, segvn_create, &vn_a);
4409 as_rangeunlock(as);
4410
4411 done:
4412 nfs_rw_exit(&rp->r_lkserlock);
4413 atomic_add_int(&rp->r_inmap, -1);
4414 return (error);
4415 }
4416
4417 /* ARGSUSED */
4418 static int
4419 nfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4420 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4421 caller_context_t *ct)
4422 {
4423 rnode_t *rp;
4424
4425 if (vp->v_flag & VNOMAP)
4426 return (ENOSYS);
4427 if (nfs_zone() != VTOMI(vp)->mi_zone)
4428 return (EIO);
4429
4430 rp = VTOR(vp);
4431 atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
4432
4433 return (0);
|
4348 * In this case the map request must be refused. We use
4349 * rp->r_lkserlock to avoid a race with concurrent lock requests.
4350 */
4351 rp = VTOR(vp);
4352
4353 /*
4354 * Atomically increment r_inmap after acquiring r_rwlock. The
4355 * idea here is to acquire r_rwlock to block read/write and
4356 * not to protect r_inmap. r_inmap will inform nfs_read/write()
4357 * that we are in nfs_map(). Now, r_rwlock is acquired in order
4358 * and we can prevent the deadlock that would have occurred
4359 * when nfs_addmap() would have acquired it out of order.
4360 *
4361 * Since we are not protecting r_inmap by any lock, we do not
4362 * hold any lock when we decrement it. We atomically decrement
4363 * r_inmap after we release r_lkserlock.
4364 */
4365
4366 if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
4367 return (EINTR);
4368 atomic_inc_uint(&rp->r_inmap);
4369 nfs_rw_exit(&rp->r_rwlock);
4370
4371 if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
4372 atomic_dec_uint(&rp->r_inmap);
4373 return (EINTR);
4374 }
4375 if (vp->v_flag & VNOCACHE) {
4376 error = EAGAIN;
4377 goto done;
4378 }
4379
4380 /*
4381 * Don't allow concurrent locks and mapping if mandatory locking is
4382 * enabled.
4383 */
4384 if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
4385 MANDLOCK(vp, va.va_mode)) {
4386 error = EAGAIN;
4387 goto done;
4388 }
4389
4390 as_rangelock(as);
4391 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4392 if (error != 0) {
4393 as_rangeunlock(as);
4394 goto done;
4395 }
4396
4397 vn_a.vp = vp;
4398 vn_a.offset = off;
4399 vn_a.type = (flags & MAP_TYPE);
4400 vn_a.prot = (uchar_t)prot;
4401 vn_a.maxprot = (uchar_t)maxprot;
4402 vn_a.flags = (flags & ~MAP_TYPE);
4403 vn_a.cred = cr;
4404 vn_a.amp = NULL;
4405 vn_a.szc = 0;
4406 vn_a.lgrp_mem_policy_flags = 0;
4407
4408 error = as_map(as, *addrp, len, segvn_create, &vn_a);
4409 as_rangeunlock(as);
4410
4411 done:
4412 nfs_rw_exit(&rp->r_lkserlock);
4413 atomic_dec_uint(&rp->r_inmap);
4414 return (error);
4415 }
4416
4417 /* ARGSUSED */
4418 static int
4419 nfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4420 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4421 caller_context_t *ct)
4422 {
4423 rnode_t *rp;
4424
4425 if (vp->v_flag & VNOMAP)
4426 return (ENOSYS);
4427 if (nfs_zone() != VTOMI(vp)->mi_zone)
4428 return (EIO);
4429
4430 rp = VTOR(vp);
4431 atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
4432
4433 return (0);
|