74 {
75 if ((vec->addr + vec->bytes <= vec->addr) ||
76 (vec->bytes > (uint64_t)UINT_MAX)) {
77 return (0);
78 }
79
80 return (((vec->addr + vec->bytes + PAGESIZE - 1) >>
81 PAGESHIFT) - (vec->addr >> PAGESHIFT));
82 }
83
84 static struct rdsv3_mr *
85 rdsv3_mr_tree_walk(struct avl_tree *root, uint32_t key,
86 struct rdsv3_mr *insert)
87 {
88 struct rdsv3_mr *mr;
89 avl_index_t where;
90
91 mr = avl_find(root, &key, &where);
92 if ((mr == NULL) && (insert != NULL)) {
93 avl_insert(root, (void *)insert, where);
94 atomic_add_32(&insert->r_refcount, 1);
95 return (NULL);
96 }
97
98 return (mr);
99 }
100
101 /*
102 * Destroy the transport-specific part of a MR.
103 */
104 static void
105 rdsv3_destroy_mr(struct rdsv3_mr *mr)
106 {
107 struct rdsv3_sock *rs = mr->r_sock;
108 void *trans_private = NULL;
109 avl_node_t *np;
110
111 RDSV3_DPRINTF5("rdsv3_destroy_mr",
112 "RDS: destroy mr key is %x refcnt %u",
113 mr->r_key, atomic_get(&mr->r_refcount));
114
237 if (ret != 0) {
238 ret = -EFAULT;
239 goto out;
240 }
241 }
242
243 RDSV3_DPRINTF5("__rdsv3_rdma_map",
244 "RDS: get_mr mr 0x%p addr 0x%llx key 0x%x",
245 mr, args->vec.addr, mr->r_key);
246 /*
247 * Inserting the new MR into the rbtree bumps its
248 * reference count.
249 */
250 mutex_enter(&rs->rs_rdma_lock);
251 found = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
252 mutex_exit(&rs->rs_rdma_lock);
253
254 ASSERT(!(found && found != mr));
255
256 if (mr_ret) {
257 atomic_add_32(&mr->r_refcount, 1);
258 *mr_ret = mr;
259 }
260
261 ret = 0;
262 out:
263 if (mr)
264 rdsv3_mr_put(mr);
265 return (ret);
266 }
267
268 int
269 rdsv3_get_mr(struct rdsv3_sock *rs, const void *optval, int optlen)
270 {
271 struct rds_get_mr_args args;
272
273 if (optlen != sizeof (struct rds_get_mr_args))
274 return (-EINVAL);
275
276 #if 1
277 bcopy((struct rds_get_mr_args *)optval, &args,
382 {
383 struct rdsv3_mr *mr;
384 int zot_me = 0;
385
386 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Enter rkey: 0x%x", r_key);
387
388 mutex_enter(&rs->rs_rdma_lock);
389 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
390 if (!mr) {
391 RDSV3_DPRINTF4("rdsv3_rdma_unuse",
392 "rdsv3: trying to unuse MR with unknown r_key %u!", r_key);
393 mutex_exit(&rs->rs_rdma_lock);
394 return;
395 }
396
397 if (mr->r_use_once || force) {
398 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node);
399 RB_CLEAR_NODE(&mr->r_rb_node);
400 zot_me = 1;
401 } else {
402 atomic_add_32(&mr->r_refcount, 1);
403 }
404 mutex_exit(&rs->rs_rdma_lock);
405
406 /*
407 * May have to issue a dma_sync on this memory region.
408 * Note we could avoid this if the operation was a RDMA READ,
409 * but at this point we can't tell.
410 */
411 if (mr->r_trans->sync_mr)
412 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
413
414 /*
415 * If the MR was marked as invalidate, this will
416 * trigger an async flush.
417 */
418 if (zot_me)
419 rdsv3_destroy_mr(mr);
420 rdsv3_mr_put(mr);
421 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Return");
422 }
621 if (cmsg->cmsg_len != CMSG_LEN(sizeof (rds_rdma_cookie_t)) ||
622 rm->m_rdma_cookie != 0)
623 return (-EINVAL);
624
625 (void) memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg),
626 sizeof (rm->m_rdma_cookie));
627
628 /*
629 * We are reusing a previously mapped MR here. Most likely, the
630 * application has written to the buffer, so we need to explicitly
631 * flush those writes to RAM. Otherwise the HCA may not see them
632 * when doing a DMA from that buffer.
633 */
634 r_key = rdsv3_rdma_cookie_key(rm->m_rdma_cookie);
635
636 mutex_enter(&rs->rs_rdma_lock);
637 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
638 if (!mr)
639 err = -EINVAL; /* invalid r_key */
640 else
641 atomic_add_32(&mr->r_refcount, 1);
642 mutex_exit(&rs->rs_rdma_lock);
643
644 if (mr) {
645 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
646 rm->m_rdma_mr = mr;
647 }
648 return (err);
649 }
650
651 /*
652 * The application passes us an address range it wants to enable RDMA
653 * to/from. We map the area, and save the <R_Key,offset> pair
654 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
655 * in an extension header.
656 */
657 int
658 rdsv3_cmsg_rdma_map(struct rdsv3_sock *rs, struct rdsv3_message *rm,
659 struct cmsghdr *cmsg)
660 {
661 /* uint64_t alignment on the buffer */
|
74 {
75 if ((vec->addr + vec->bytes <= vec->addr) ||
76 (vec->bytes > (uint64_t)UINT_MAX)) {
77 return (0);
78 }
79
80 return (((vec->addr + vec->bytes + PAGESIZE - 1) >>
81 PAGESHIFT) - (vec->addr >> PAGESHIFT));
82 }
83
84 static struct rdsv3_mr *
85 rdsv3_mr_tree_walk(struct avl_tree *root, uint32_t key,
86 struct rdsv3_mr *insert)
87 {
88 struct rdsv3_mr *mr;
89 avl_index_t where;
90
91 mr = avl_find(root, &key, &where);
92 if ((mr == NULL) && (insert != NULL)) {
93 avl_insert(root, (void *)insert, where);
94 atomic_inc_32(&insert->r_refcount);
95 return (NULL);
96 }
97
98 return (mr);
99 }
100
101 /*
102 * Destroy the transport-specific part of a MR.
103 */
104 static void
105 rdsv3_destroy_mr(struct rdsv3_mr *mr)
106 {
107 struct rdsv3_sock *rs = mr->r_sock;
108 void *trans_private = NULL;
109 avl_node_t *np;
110
111 RDSV3_DPRINTF5("rdsv3_destroy_mr",
112 "RDS: destroy mr key is %x refcnt %u",
113 mr->r_key, atomic_get(&mr->r_refcount));
114
237 if (ret != 0) {
238 ret = -EFAULT;
239 goto out;
240 }
241 }
242
243 RDSV3_DPRINTF5("__rdsv3_rdma_map",
244 "RDS: get_mr mr 0x%p addr 0x%llx key 0x%x",
245 mr, args->vec.addr, mr->r_key);
246 /*
247 * Inserting the new MR into the rbtree bumps its
248 * reference count.
249 */
250 mutex_enter(&rs->rs_rdma_lock);
251 found = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
252 mutex_exit(&rs->rs_rdma_lock);
253
254 ASSERT(!(found && found != mr));
255
256 if (mr_ret) {
257 atomic_inc_32(&mr->r_refcount);
258 *mr_ret = mr;
259 }
260
261 ret = 0;
262 out:
263 if (mr)
264 rdsv3_mr_put(mr);
265 return (ret);
266 }
267
268 int
269 rdsv3_get_mr(struct rdsv3_sock *rs, const void *optval, int optlen)
270 {
271 struct rds_get_mr_args args;
272
273 if (optlen != sizeof (struct rds_get_mr_args))
274 return (-EINVAL);
275
276 #if 1
277 bcopy((struct rds_get_mr_args *)optval, &args,
382 {
383 struct rdsv3_mr *mr;
384 int zot_me = 0;
385
386 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Enter rkey: 0x%x", r_key);
387
388 mutex_enter(&rs->rs_rdma_lock);
389 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
390 if (!mr) {
391 RDSV3_DPRINTF4("rdsv3_rdma_unuse",
392 "rdsv3: trying to unuse MR with unknown r_key %u!", r_key);
393 mutex_exit(&rs->rs_rdma_lock);
394 return;
395 }
396
397 if (mr->r_use_once || force) {
398 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node);
399 RB_CLEAR_NODE(&mr->r_rb_node);
400 zot_me = 1;
401 } else {
402 atomic_inc_32(&mr->r_refcount);
403 }
404 mutex_exit(&rs->rs_rdma_lock);
405
406 /*
407 * May have to issue a dma_sync on this memory region.
408 * Note we could avoid this if the operation was a RDMA READ,
409 * but at this point we can't tell.
410 */
411 if (mr->r_trans->sync_mr)
412 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
413
414 /*
415 * If the MR was marked as invalidate, this will
416 * trigger an async flush.
417 */
418 if (zot_me)
419 rdsv3_destroy_mr(mr);
420 rdsv3_mr_put(mr);
421 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Return");
422 }
621 if (cmsg->cmsg_len != CMSG_LEN(sizeof (rds_rdma_cookie_t)) ||
622 rm->m_rdma_cookie != 0)
623 return (-EINVAL);
624
625 (void) memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg),
626 sizeof (rm->m_rdma_cookie));
627
628 /*
629 * We are reusing a previously mapped MR here. Most likely, the
630 * application has written to the buffer, so we need to explicitly
631 * flush those writes to RAM. Otherwise the HCA may not see them
632 * when doing a DMA from that buffer.
633 */
634 r_key = rdsv3_rdma_cookie_key(rm->m_rdma_cookie);
635
636 mutex_enter(&rs->rs_rdma_lock);
637 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
638 if (!mr)
639 err = -EINVAL; /* invalid r_key */
640 else
641 atomic_inc_32(&mr->r_refcount);
642 mutex_exit(&rs->rs_rdma_lock);
643
644 if (mr) {
645 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
646 rm->m_rdma_mr = mr;
647 }
648 return (err);
649 }
650
651 /*
652 * The application passes us an address range it wants to enable RDMA
653 * to/from. We map the area, and save the <R_Key,offset> pair
654 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
655 * in an extension header.
656 */
657 int
658 rdsv3_cmsg_rdma_map(struct rdsv3_sock *rs, struct rdsv3_message *rm,
659 struct cmsghdr *cmsg)
660 {
661 /* uint64_t alignment on the buffer */
|