1527 * If a translation already exists, the data can be written
1528 * through since the old data has already been saved off.
1529 */
1530 if (isset(cmap->cmap_hastrans, cowchunk)) {
1531 continue;
1532 }
1533
1534
1535 /*
1536 * Throttle translations if there are too many outstanding
1537 * chunks in memory. The semaphore is sema_v'd by the taskq.
1538 *
1539 * You can't keep the sid_rwlock if you would go to sleep.
1540 * This will result in deadlock when someone tries to delete
1541 * the snapshot (wants the sid_rwlock as a writer, but can't
1542 * get it).
1543 */
1544 if (throttle_write) {
1545 if (sema_tryp(&cmap->cmap_throttle_sem) == 0) {
1546 rw_exit(&sidp->sid_rwlock);
1547 atomic_add_32(&cmap->cmap_waiters, 1);
1548 sema_p(&cmap->cmap_throttle_sem);
1549 atomic_add_32(&cmap->cmap_waiters, -1);
1550 rw_enter(&sidp->sid_rwlock, RW_READER);
1551
1552 /*
1553 * Now since we released the sid_rwlock the state may
1554 * have transitioned underneath us. so check that again.
1555 */
1556 if (sidp != *sidpp || SID_INACTIVE(sidp)) {
1557 sema_v(&cmap->cmap_throttle_sem);
1558 return (ENXIO);
1559 }
1560 }
1561 }
1562
1563 /*
1564 * Acquire the lock as a writer and check to see if a
1565 * translation has been added in the meantime.
1566 */
1567 rw_enter(&cmap->cmap_rwlock, RW_WRITER);
1568 if (isset(cmap->cmap_hastrans, cowchunk)) {
1569 if (throttle_write)
1663 int bf_index;
1664 int release_sem = cmn->release_sem;
1665
1666 /*
1667 * The sid_rwlock does not need to be held here because the taskqs
1668 * are destroyed explicitly by fssnap_delete (with the sid_rwlock
1669 * held as a writer). taskq_destroy() will flush all of the tasks
1670 * out before fssnap_delete frees up all of the structures.
1671 */
1672
1673 /* if the snapshot was disabled from under us, drop the request. */
1674 rw_enter(&sidp->sid_rwlock, RW_READER);
1675 if (SID_INACTIVE(sidp)) {
1676 rw_exit(&sidp->sid_rwlock);
1677 if (release_sem)
1678 sema_v(&cmap->cmap_throttle_sem);
1679 return;
1680 }
1681 rw_exit(&sidp->sid_rwlock);
1682
1683 atomic_add_64((uint64_t *)&cmap->cmap_nchunks, 1);
1684
1685 if ((cmap->cmap_maxsize != 0) &&
1686 ((cmap->cmap_nchunks * cmap->cmap_chunksz) > cmap->cmap_maxsize)) {
1687 cmn_err(CE_WARN, "fssnap_write_taskq: snapshot %d (%s) has "
1688 "reached the maximum backing file size specified (%llu "
1689 "bytes) and will be deleted.", sidp->sid_snapnumber,
1690 (char *)cowp->cow_kstat_mntpt->ks_data,
1691 cmap->cmap_maxsize);
1692 if (release_sem)
1693 sema_v(&cmap->cmap_throttle_sem);
1694 atomic_or_uint(&sidp->sid_flags, SID_DELETE);
1695 return;
1696 }
1697
1698 /* perform the write */
1699 bf_index = cmn->cmn_chunk / cmap->cmap_chunksperbf;
1700
1701 if (error = vn_rdwr(UIO_WRITE, (cowp->cow_backfile_array)[bf_index],
1702 cmn->cmn_buf, cmap->cmap_chunksz,
1703 (cmn->cmn_chunk % cmap->cmap_chunksperbf) * cmap->cmap_chunksz,
|
1527 * If a translation already exists, the data can be written
1528 * through since the old data has already been saved off.
1529 */
1530 if (isset(cmap->cmap_hastrans, cowchunk)) {
1531 continue;
1532 }
1533
1534
1535 /*
1536 * Throttle translations if there are too many outstanding
1537 * chunks in memory. The semaphore is sema_v'd by the taskq.
1538 *
1539 * You can't keep the sid_rwlock if you would go to sleep.
1540 * This will result in deadlock when someone tries to delete
1541 * the snapshot (wants the sid_rwlock as a writer, but can't
1542 * get it).
1543 */
1544 if (throttle_write) {
1545 if (sema_tryp(&cmap->cmap_throttle_sem) == 0) {
1546 rw_exit(&sidp->sid_rwlock);
1547 atomic_inc_32(&cmap->cmap_waiters);
1548 sema_p(&cmap->cmap_throttle_sem);
1549 atomic_dec_32(&cmap->cmap_waiters);
1550 rw_enter(&sidp->sid_rwlock, RW_READER);
1551
1552 /*
1553 * Now since we released the sid_rwlock the state may
1554 * have transitioned underneath us. so check that again.
1555 */
1556 if (sidp != *sidpp || SID_INACTIVE(sidp)) {
1557 sema_v(&cmap->cmap_throttle_sem);
1558 return (ENXIO);
1559 }
1560 }
1561 }
1562
1563 /*
1564 * Acquire the lock as a writer and check to see if a
1565 * translation has been added in the meantime.
1566 */
1567 rw_enter(&cmap->cmap_rwlock, RW_WRITER);
1568 if (isset(cmap->cmap_hastrans, cowchunk)) {
1569 if (throttle_write)
1663 int bf_index;
1664 int release_sem = cmn->release_sem;
1665
1666 /*
1667 * The sid_rwlock does not need to be held here because the taskqs
1668 * are destroyed explicitly by fssnap_delete (with the sid_rwlock
1669 * held as a writer). taskq_destroy() will flush all of the tasks
1670 * out before fssnap_delete frees up all of the structures.
1671 */
1672
1673 /* if the snapshot was disabled from under us, drop the request. */
1674 rw_enter(&sidp->sid_rwlock, RW_READER);
1675 if (SID_INACTIVE(sidp)) {
1676 rw_exit(&sidp->sid_rwlock);
1677 if (release_sem)
1678 sema_v(&cmap->cmap_throttle_sem);
1679 return;
1680 }
1681 rw_exit(&sidp->sid_rwlock);
1682
1683 atomic_inc_64((uint64_t *)&cmap->cmap_nchunks);
1684
1685 if ((cmap->cmap_maxsize != 0) &&
1686 ((cmap->cmap_nchunks * cmap->cmap_chunksz) > cmap->cmap_maxsize)) {
1687 cmn_err(CE_WARN, "fssnap_write_taskq: snapshot %d (%s) has "
1688 "reached the maximum backing file size specified (%llu "
1689 "bytes) and will be deleted.", sidp->sid_snapnumber,
1690 (char *)cowp->cow_kstat_mntpt->ks_data,
1691 cmap->cmap_maxsize);
1692 if (release_sem)
1693 sema_v(&cmap->cmap_throttle_sem);
1694 atomic_or_uint(&sidp->sid_flags, SID_DELETE);
1695 return;
1696 }
1697
1698 /* perform the write */
1699 bf_index = cmn->cmn_chunk / cmap->cmap_chunksperbf;
1700
1701 if (error = vn_rdwr(UIO_WRITE, (cowp->cow_backfile_array)[bf_index],
1702 cmn->cmn_buf, cmap->cmap_chunksz,
1703 (cmn->cmn_chunk % cmap->cmap_chunksperbf) * cmap->cmap_chunksz,
|