Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c
+++ new/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c
1 1 /*
2 2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
3 3 */
4 4
5 5 /*
6 6 * This file contains code imported from the OFED rds source file rdma.c
7 7 * Oracle elects to have and use the contents of rdma.c under and governed
8 8 * by the OpenIB.org BSD license (see below for full license text). However,
9 9 * the following notice accompanied the original version of this file:
10 10 */
11 11
12 12 /*
13 13 * Copyright (c) 2007 Oracle. All rights reserved.
14 14 *
15 15 * This software is available to you under a choice of one of two
16 16 * licenses. You may choose to be licensed under the terms of the GNU
17 17 * General Public License (GPL) Version 2, available from the file
18 18 * COPYING in the main directory of this source tree, or the
19 19 * OpenIB.org BSD license below:
20 20 *
21 21 * Redistribution and use in source and binary forms, with or
22 22 * without modification, are permitted provided that the following
23 23 * conditions are met:
24 24 *
25 25 * - Redistributions of source code must retain the above
26 26 * copyright notice, this list of conditions and the following
27 27 * disclaimer.
28 28 *
29 29 * - Redistributions in binary form must reproduce the above
30 30 * copyright notice, this list of conditions and the following
31 31 * disclaimer in the documentation and/or other materials
32 32 * provided with the distribution.
33 33 *
34 34 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35 35 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36 36 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
37 37 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
38 38 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
39 39 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40 40 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
41 41 * SOFTWARE.
42 42 *
43 43 */
44 44 #include <sys/ib/clients/of/rdma/ib_verbs.h>
45 45 #include <sys/ib/clients/of/rdma/ib_addr.h>
46 46 #include <sys/ib/clients/of/rdma/rdma_cm.h>
47 47
48 48 #include <sys/ib/clients/rdsv3/ib.h>
49 49 #include <sys/ib/clients/rdsv3/rdma.h>
50 50 #include <sys/ib/clients/rdsv3/rdsv3_debug.h>
51 51
52 52 #define DMA_TO_DEVICE 0
53 53 #define DMA_FROM_DEVICE 1
54 54 #define RB_CLEAR_NODE(nodep) AVL_SETPARENT(nodep, nodep);
55 55
56 56 /*
57 57 * XXX
58 58 * - build with sparse
59 59 * - should we limit the size of a mr region? let transport return failure?
60 60 * - should we detect duplicate keys on a socket? hmm.
61 61 * - an rdma is an mlock, apply rlimit?
62 62 */
63 63
64 64 /*
65 65 * get the number of pages by looking at the page indices that the start and
66 66 * end addresses fall in.
67 67 *
68 68 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
69 69 * causes the address to wrap or overflows an unsigned int. This comes
70 70 * from being stored in the 'length' member of 'struct rdsv3_scatterlist'.
71 71 */
72 72 static unsigned int
73 73 rdsv3_pages_in_vec(struct rds_iovec *vec)
74 74 {
75 75 if ((vec->addr + vec->bytes <= vec->addr) ||
76 76 (vec->bytes > (uint64_t)UINT_MAX)) {
77 77 return (0);
78 78 }
79 79
80 80 return (((vec->addr + vec->bytes + PAGESIZE - 1) >>
81 81 PAGESHIFT) - (vec->addr >> PAGESHIFT));
82 82 }
83 83
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
84 84 static struct rdsv3_mr *
85 85 rdsv3_mr_tree_walk(struct avl_tree *root, uint32_t key,
86 86 struct rdsv3_mr *insert)
87 87 {
88 88 struct rdsv3_mr *mr;
89 89 avl_index_t where;
90 90
91 91 mr = avl_find(root, &key, &where);
92 92 if ((mr == NULL) && (insert != NULL)) {
93 93 avl_insert(root, (void *)insert, where);
94 - atomic_add_32(&insert->r_refcount, 1);
94 + atomic_inc_32(&insert->r_refcount);
95 95 return (NULL);
96 96 }
97 97
98 98 return (mr);
99 99 }
100 100
101 101 /*
102 102 * Destroy the transport-specific part of a MR.
103 103 */
104 104 static void
105 105 rdsv3_destroy_mr(struct rdsv3_mr *mr)
106 106 {
107 107 struct rdsv3_sock *rs = mr->r_sock;
108 108 void *trans_private = NULL;
109 109 avl_node_t *np;
110 110
111 111 RDSV3_DPRINTF5("rdsv3_destroy_mr",
112 112 "RDS: destroy mr key is %x refcnt %u",
113 113 mr->r_key, atomic_get(&mr->r_refcount));
114 114
115 115 if (test_and_set_bit(RDSV3_MR_DEAD, &mr->r_state))
116 116 return;
117 117
118 118 mutex_enter(&rs->rs_rdma_lock);
119 119 np = &mr->r_rb_node;
120 120 if (AVL_XPARENT(np) != np)
121 121 avl_remove(&rs->rs_rdma_keys, mr);
122 122 trans_private = mr->r_trans_private;
123 123 mr->r_trans_private = NULL;
124 124 mutex_exit(&rs->rs_rdma_lock);
125 125
126 126 if (trans_private)
127 127 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
128 128 }
129 129
130 130 void
131 131 __rdsv3_put_mr_final(struct rdsv3_mr *mr)
132 132 {
133 133 rdsv3_destroy_mr(mr);
134 134 kmem_free(mr, sizeof (*mr));
135 135 }
136 136
137 137 /*
138 138 * By the time this is called we can't have any more ioctls called on
139 139 * the socket so we don't need to worry about racing with others.
140 140 */
141 141 void
142 142 rdsv3_rdma_drop_keys(struct rdsv3_sock *rs)
143 143 {
144 144 struct rdsv3_mr *mr;
145 145 struct avl_node *node;
146 146
147 147 /* Release any MRs associated with this socket */
148 148 mutex_enter(&rs->rs_rdma_lock);
149 149 while ((node = avl_first(&rs->rs_rdma_keys))) {
150 150 mr = container_of(node, struct rdsv3_mr, r_rb_node);
151 151 if (mr->r_trans == rs->rs_transport)
152 152 mr->r_invalidate = 0;
153 153 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node);
154 154 RB_CLEAR_NODE(&mr->r_rb_node)
155 155 mutex_exit(&rs->rs_rdma_lock);
156 156 rdsv3_destroy_mr(mr);
157 157 rdsv3_mr_put(mr);
158 158 mutex_enter(&rs->rs_rdma_lock);
159 159 }
160 160 mutex_exit(&rs->rs_rdma_lock);
161 161
162 162 if (rs->rs_transport && rs->rs_transport->flush_mrs)
163 163 rs->rs_transport->flush_mrs();
164 164 }
165 165
166 166 static int
167 167 __rdsv3_rdma_map(struct rdsv3_sock *rs, struct rds_get_mr_args *args,
168 168 uint64_t *cookie_ret, struct rdsv3_mr **mr_ret)
169 169 {
170 170 struct rdsv3_mr *mr = NULL, *found;
171 171 void *trans_private;
172 172 rds_rdma_cookie_t cookie;
173 173 unsigned int nents = 0;
174 174 int ret;
175 175
176 176 if (rs->rs_bound_addr == 0) {
177 177 ret = -ENOTCONN; /* XXX not a great errno */
178 178 goto out;
179 179 }
180 180
181 181 if (!rs->rs_transport->get_mr) {
182 182 ret = -EOPNOTSUPP;
183 183 goto out;
184 184 }
185 185
186 186 mr = kmem_zalloc(sizeof (struct rdsv3_mr), KM_NOSLEEP);
187 187 if (!mr) {
188 188 ret = -ENOMEM;
189 189 goto out;
190 190 }
191 191
192 192 mr->r_refcount = 1;
193 193 RB_CLEAR_NODE(&mr->r_rb_node);
194 194 mr->r_trans = rs->rs_transport;
195 195 mr->r_sock = rs;
196 196
197 197 if (args->flags & RDS_RDMA_USE_ONCE)
198 198 mr->r_use_once = 1;
199 199 if (args->flags & RDS_RDMA_INVALIDATE)
200 200 mr->r_invalidate = 1;
201 201 if (args->flags & RDS_RDMA_READWRITE)
202 202 mr->r_write = 1;
203 203
204 204 /*
205 205 * Obtain a transport specific MR. If this succeeds, the
206 206 * s/g list is now owned by the MR.
207 207 * Note that dma_map() implies that pending writes are
208 208 * flushed to RAM, so no dma_sync is needed here.
209 209 */
210 210 trans_private = rs->rs_transport->get_mr(&args->vec, nents, rs,
211 211 &mr->r_key);
212 212
213 213 if (IS_ERR(trans_private)) {
214 214 ret = PTR_ERR(trans_private);
215 215 goto out;
216 216 }
217 217
218 218 mr->r_trans_private = trans_private;
219 219
220 220 /*
221 221 * The user may pass us an unaligned address, but we can only
222 222 * map page aligned regions. So we keep the offset, and build
223 223 * a 64bit cookie containing <R_Key, offset> and pass that
224 224 * around.
225 225 */
226 226 cookie = rdsv3_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGEMASK);
227 227 if (cookie_ret)
228 228 *cookie_ret = cookie;
229 229
230 230 /*
231 231 * copy value of cookie to user address at args->cookie_addr
232 232 */
233 233 if (args->cookie_addr) {
234 234 ret = ddi_copyout((void *)&cookie,
235 235 (void *)((intptr_t)args->cookie_addr),
236 236 sizeof (rds_rdma_cookie_t), 0);
237 237 if (ret != 0) {
238 238 ret = -EFAULT;
239 239 goto out;
240 240 }
241 241 }
242 242
243 243 RDSV3_DPRINTF5("__rdsv3_rdma_map",
244 244 "RDS: get_mr mr 0x%p addr 0x%llx key 0x%x",
245 245 mr, args->vec.addr, mr->r_key);
246 246 /*
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
247 247 * Inserting the new MR into the rbtree bumps its
248 248 * reference count.
249 249 */
250 250 mutex_enter(&rs->rs_rdma_lock);
251 251 found = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
252 252 mutex_exit(&rs->rs_rdma_lock);
253 253
254 254 ASSERT(!(found && found != mr));
255 255
256 256 if (mr_ret) {
257 - atomic_add_32(&mr->r_refcount, 1);
257 + atomic_inc_32(&mr->r_refcount);
258 258 *mr_ret = mr;
259 259 }
260 260
261 261 ret = 0;
262 262 out:
263 263 if (mr)
264 264 rdsv3_mr_put(mr);
265 265 return (ret);
266 266 }
267 267
268 268 int
269 269 rdsv3_get_mr(struct rdsv3_sock *rs, const void *optval, int optlen)
270 270 {
271 271 struct rds_get_mr_args args;
272 272
273 273 if (optlen != sizeof (struct rds_get_mr_args))
274 274 return (-EINVAL);
275 275
276 276 #if 1
277 277 bcopy((struct rds_get_mr_args *)optval, &args,
278 278 sizeof (struct rds_get_mr_args));
279 279 #else
280 280 if (ddi_copyin(optval, &args, optlen, 0))
281 281 return (-EFAULT);
282 282 #endif
283 283
284 284 return (__rdsv3_rdma_map(rs, &args, NULL, NULL));
285 285 }
286 286
287 287 int
288 288 rdsv3_get_mr_for_dest(struct rdsv3_sock *rs, const void *optval,
289 289 int optlen)
290 290 {
291 291 struct rds_get_mr_for_dest_args args;
292 292 struct rds_get_mr_args new_args;
293 293
294 294 if (optlen != sizeof (struct rds_get_mr_for_dest_args))
295 295 return (-EINVAL);
296 296
297 297 #if 1
298 298 bcopy((struct rds_get_mr_for_dest_args *)optval, &args,
299 299 sizeof (struct rds_get_mr_for_dest_args));
300 300 #else
301 301 if (ddi_copyin(optval, &args, optlen, 0))
302 302 return (-EFAULT);
303 303 #endif
304 304
305 305 /*
306 306 * Initially, just behave like get_mr().
307 307 * TODO: Implement get_mr as wrapper around this
308 308 * and deprecate it.
309 309 */
310 310 new_args.vec = args.vec;
311 311 new_args.cookie_addr = args.cookie_addr;
312 312 new_args.flags = args.flags;
313 313
314 314 return (__rdsv3_rdma_map(rs, &new_args, NULL, NULL));
315 315 }
316 316
317 317 /*
318 318 * Free the MR indicated by the given R_Key
319 319 */
320 320 int
321 321 rdsv3_free_mr(struct rdsv3_sock *rs, const void *optval, int optlen)
322 322 {
323 323 struct rds_free_mr_args args;
324 324 struct rdsv3_mr *mr;
325 325
326 326 if (optlen != sizeof (struct rds_free_mr_args))
327 327 return (-EINVAL);
328 328
329 329 #if 1
330 330 bcopy((struct rds_free_mr_args *)optval, &args,
331 331 sizeof (struct rds_free_mr_args));
332 332 #else
333 333 if (ddi_copyin((struct rds_free_mr_args *)optval, &args,
334 334 sizeof (struct rds_free_mr_args), 0))
335 335 return (-EFAULT);
336 336 #endif
337 337
338 338 /* Special case - a null cookie means flush all unused MRs */
339 339 if (args.cookie == 0) {
340 340 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
341 341 return (-EINVAL);
342 342 rs->rs_transport->flush_mrs();
343 343 return (0);
344 344 }
345 345
346 346 /*
347 347 * Look up the MR given its R_key and remove it from the rbtree
348 348 * so nobody else finds it.
349 349 * This should also prevent races with rdsv3_rdma_unuse.
350 350 */
351 351 mutex_enter(&rs->rs_rdma_lock);
352 352 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys,
353 353 rdsv3_rdma_cookie_key(args.cookie), NULL);
354 354 if (mr) {
355 355 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node);
356 356 RB_CLEAR_NODE(&mr->r_rb_node);
357 357 if (args.flags & RDS_RDMA_INVALIDATE)
358 358 mr->r_invalidate = 1;
359 359 }
360 360 mutex_exit(&rs->rs_rdma_lock);
361 361
362 362 if (!mr)
363 363 return (-EINVAL);
364 364
365 365 /*
366 366 * call rdsv3_destroy_mr() ourselves so that we're sure it's done
367 367 * by time we return. If we let rdsv3_mr_put() do it it might not
368 368 * happen until someone else drops their ref.
369 369 */
370 370 rdsv3_destroy_mr(mr);
371 371 rdsv3_mr_put(mr);
372 372 return (0);
373 373 }
374 374
375 375 /*
376 376 * This is called when we receive an extension header that
377 377 * tells us this MR was used. It allows us to implement
378 378 * use_once semantics
379 379 */
380 380 void
381 381 rdsv3_rdma_unuse(struct rdsv3_sock *rs, uint32_t r_key, int force)
382 382 {
383 383 struct rdsv3_mr *mr;
384 384 int zot_me = 0;
385 385
386 386 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Enter rkey: 0x%x", r_key);
387 387
388 388 mutex_enter(&rs->rs_rdma_lock);
389 389 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
390 390 if (!mr) {
391 391 RDSV3_DPRINTF4("rdsv3_rdma_unuse",
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
392 392 "rdsv3: trying to unuse MR with unknown r_key %u!", r_key);
393 393 mutex_exit(&rs->rs_rdma_lock);
394 394 return;
395 395 }
396 396
397 397 if (mr->r_use_once || force) {
398 398 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node);
399 399 RB_CLEAR_NODE(&mr->r_rb_node);
400 400 zot_me = 1;
401 401 } else {
402 - atomic_add_32(&mr->r_refcount, 1);
402 + atomic_inc_32(&mr->r_refcount);
403 403 }
404 404 mutex_exit(&rs->rs_rdma_lock);
405 405
406 406 /*
407 407 * May have to issue a dma_sync on this memory region.
408 408 * Note we could avoid this if the operation was a RDMA READ,
409 409 * but at this point we can't tell.
410 410 */
411 411 if (mr->r_trans->sync_mr)
412 412 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
413 413
414 414 /*
415 415 * If the MR was marked as invalidate, this will
416 416 * trigger an async flush.
417 417 */
418 418 if (zot_me)
419 419 rdsv3_destroy_mr(mr);
420 420 rdsv3_mr_put(mr);
421 421 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Return");
422 422 }
423 423
424 424 void
425 425 rdsv3_rdma_free_op(struct rdsv3_rdma_op *ro)
426 426 {
427 427 unsigned int i;
428 428
429 429 /* deallocate RDMA resources on rdsv3_message */
430 430 for (i = 0; i < ro->r_nents; i++) {
431 431 ddi_umem_unlock(ro->r_rdma_sg[i].umem_cookie);
432 432 }
433 433
434 434 if (ro->r_notifier)
435 435 kmem_free(ro->r_notifier, sizeof (*ro->r_notifier));
436 436 kmem_free(ro, sizeof (*ro));
437 437 }
438 438
439 439 /*
440 440 * args is a pointer to an in-kernel copy in the sendmsg cmsg.
441 441 */
442 442 static struct rdsv3_rdma_op *
443 443 rdsv3_rdma_prepare(struct rdsv3_sock *rs, struct rds_rdma_args *args)
444 444 {
445 445 struct rds_iovec vec;
446 446 struct rdsv3_rdma_op *op = NULL;
447 447 unsigned int nr_bytes;
448 448 struct rds_iovec *local_vec;
449 449 unsigned int nr;
450 450 unsigned int i;
451 451 ddi_umem_cookie_t umem_cookie;
452 452 size_t umem_len;
453 453 caddr_t umem_addr;
454 454 int ret;
455 455
456 456 if (rs->rs_bound_addr == 0) {
457 457 ret = -ENOTCONN; /* XXX not a great errno */
458 458 goto out;
459 459 }
460 460
461 461 if (args->nr_local > (uint64_t)UINT_MAX) {
462 462 ret = -EMSGSIZE;
463 463 goto out;
464 464 }
465 465
466 466 op = kmem_zalloc(offsetof(struct rdsv3_rdma_op,
467 467 r_rdma_sg[args->nr_local]), KM_NOSLEEP);
468 468 if (op == NULL) {
469 469 ret = -ENOMEM;
470 470 goto out;
471 471 }
472 472
473 473 op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
474 474 op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
475 475 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
476 476 op->r_recverr = rs->rs_recverr;
477 477
478 478 if (op->r_notify || op->r_recverr) {
479 479 /*
480 480 * We allocate an uninitialized notifier here, because
481 481 * we don't want to do that in the completion handler. We
482 482 * would have to use GFP_ATOMIC there, and don't want to deal
483 483 * with failed allocations.
484 484 */
485 485 op->r_notifier = kmem_alloc(sizeof (struct rdsv3_notifier),
486 486 KM_NOSLEEP);
487 487 if (!op->r_notifier) {
488 488 ret = -ENOMEM;
489 489 goto out;
490 490 }
491 491 op->r_notifier->n_user_token = args->user_token;
492 492 op->r_notifier->n_status = RDS_RDMA_SUCCESS;
493 493 }
494 494
495 495 /*
496 496 * The cookie contains the R_Key of the remote memory region, and
497 497 * optionally an offset into it. This is how we implement RDMA into
498 498 * unaligned memory.
499 499 * When setting up the RDMA, we need to add that offset to the
500 500 * destination address (which is really an offset into the MR)
501 501 * FIXME: We may want to move this into ib_rdma.c
502 502 */
503 503 op->r_key = rdsv3_rdma_cookie_key(args->cookie);
504 504 op->r_remote_addr = args->remote_vec.addr +
505 505 rdsv3_rdma_cookie_offset(args->cookie);
506 506
507 507 nr_bytes = 0;
508 508
509 509 RDSV3_DPRINTF5("rdsv3_rdma_prepare",
510 510 "RDS: rdma prepare nr_local %llu rva %llx rkey %x",
511 511 (unsigned long long)args->nr_local,
512 512 (unsigned long long)args->remote_vec.addr,
513 513 op->r_key);
514 514
515 515 local_vec = (struct rds_iovec *)(unsigned long) args->local_vec_addr;
516 516
517 517 /* pin the scatter list of user buffers */
518 518 for (i = 0; i < args->nr_local; i++) {
519 519 if (ddi_copyin(&local_vec[i], &vec,
520 520 sizeof (struct rds_iovec), 0)) {
521 521 ret = -EFAULT;
522 522 goto out;
523 523 }
524 524
525 525 nr = rdsv3_pages_in_vec(&vec);
526 526 if (nr == 0) {
527 527 RDSV3_DPRINTF2("rdsv3_rdma_prepare",
528 528 "rdsv3_pages_in_vec returned 0");
529 529 ret = -EINVAL;
530 530 goto out;
531 531 }
532 532
533 533 rs->rs_user_addr = vec.addr;
534 534 rs->rs_user_bytes = vec.bytes;
535 535
536 536 /* pin user memory pages */
537 537 umem_len = ptob(btopr(vec.bytes +
538 538 ((uintptr_t)vec.addr & PAGEOFFSET)));
539 539 umem_addr = (caddr_t)((uintptr_t)vec.addr & ~PAGEOFFSET);
540 540 ret = umem_lockmemory(umem_addr, umem_len,
541 541 DDI_UMEMLOCK_WRITE | DDI_UMEMLOCK_READ,
542 542 &umem_cookie, NULL, NULL);
543 543 if (ret != 0) {
544 544 RDSV3_DPRINTF2("rdsv3_rdma_prepare",
545 545 "umem_lockmemory() returned %d", ret);
546 546 ret = -EFAULT;
547 547 goto out;
548 548 }
549 549 op->r_rdma_sg[i].umem_cookie = umem_cookie;
550 550 op->r_rdma_sg[i].iovec = vec;
551 551 nr_bytes += vec.bytes;
552 552
553 553 RDSV3_DPRINTF5("rdsv3_rdma_prepare",
554 554 "RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx",
555 555 nr_bytes, nr, vec.bytes, vec.addr);
556 556 }
557 557 op->r_nents = i;
558 558
559 559 if (nr_bytes > args->remote_vec.bytes) {
560 560 RDSV3_DPRINTF2("rdsv3_rdma_prepare",
561 561 "RDS nr_bytes %u remote_bytes %u do not match",
562 562 nr_bytes, (unsigned int) args->remote_vec.bytes);
563 563 ret = -EINVAL;
564 564 goto out;
565 565 }
566 566 op->r_bytes = nr_bytes;
567 567
568 568 ret = 0;
569 569 out:
570 570 if (ret) {
571 571 if (op)
572 572 rdsv3_rdma_free_op(op);
573 573 op = ERR_PTR(ret);
574 574 }
575 575 return (op);
576 576 }
577 577
578 578 #define CEIL(x, y) (((x) + (y) - 1) / (y))
579 579
580 580 /*
581 581 * The application asks for a RDMA transfer.
582 582 * Extract all arguments and set up the rdma_op
583 583 */
584 584 int
585 585 rdsv3_cmsg_rdma_args(struct rdsv3_sock *rs, struct rdsv3_message *rm,
586 586 struct cmsghdr *cmsg)
587 587 {
588 588 struct rdsv3_rdma_op *op;
589 589 /* uint64_t alignment on the buffer */
590 590 uint64_t buf[CEIL(CMSG_LEN(sizeof (struct rds_rdma_args)),
591 591 sizeof (uint64_t))];
592 592
593 593 if (cmsg->cmsg_len != CMSG_LEN(sizeof (struct rds_rdma_args)) ||
594 594 rm->m_rdma_op != NULL)
595 595 return (-EINVAL);
596 596
597 597 ASSERT(sizeof (buf) >= cmsg->cmsg_len && ((uintptr_t)buf & 0x7) == 0);
598 598
599 599 bcopy(CMSG_DATA(cmsg), (char *)buf, cmsg->cmsg_len);
600 600 op = rdsv3_rdma_prepare(rs, (struct rds_rdma_args *)buf);
601 601
602 602 if (IS_ERR(op))
603 603 return (PTR_ERR(op));
604 604 rdsv3_stats_inc(s_send_rdma);
605 605 rm->m_rdma_op = op;
606 606 return (0);
607 607 }
608 608
609 609 /*
610 610 * The application wants us to pass an RDMA destination (aka MR)
611 611 * to the remote
612 612 */
613 613 int
614 614 rdsv3_cmsg_rdma_dest(struct rdsv3_sock *rs, struct rdsv3_message *rm,
615 615 struct cmsghdr *cmsg)
616 616 {
617 617 struct rdsv3_mr *mr;
618 618 uint32_t r_key;
619 619 int err = 0;
620 620
621 621 if (cmsg->cmsg_len != CMSG_LEN(sizeof (rds_rdma_cookie_t)) ||
622 622 rm->m_rdma_cookie != 0)
623 623 return (-EINVAL);
624 624
625 625 (void) memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg),
626 626 sizeof (rm->m_rdma_cookie));
627 627
628 628 /*
629 629 * We are reusing a previously mapped MR here. Most likely, the
630 630 * application has written to the buffer, so we need to explicitly
↓ open down ↓ |
218 lines elided |
↑ open up ↑ |
631 631 * flush those writes to RAM. Otherwise the HCA may not see them
632 632 * when doing a DMA from that buffer.
633 633 */
634 634 r_key = rdsv3_rdma_cookie_key(rm->m_rdma_cookie);
635 635
636 636 mutex_enter(&rs->rs_rdma_lock);
637 637 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
638 638 if (!mr)
639 639 err = -EINVAL; /* invalid r_key */
640 640 else
641 - atomic_add_32(&mr->r_refcount, 1);
641 + atomic_inc_32(&mr->r_refcount);
642 642 mutex_exit(&rs->rs_rdma_lock);
643 643
644 644 if (mr) {
645 645 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
646 646 rm->m_rdma_mr = mr;
647 647 }
648 648 return (err);
649 649 }
650 650
651 651 /*
652 652 * The application passes us an address range it wants to enable RDMA
653 653 * to/from. We map the area, and save the <R_Key,offset> pair
654 654 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
655 655 * in an extension header.
656 656 */
657 657 int
658 658 rdsv3_cmsg_rdma_map(struct rdsv3_sock *rs, struct rdsv3_message *rm,
659 659 struct cmsghdr *cmsg)
660 660 {
661 661 /* uint64_t alignment on the buffer */
662 662 uint64_t buf[CEIL(CMSG_LEN(sizeof (struct rds_get_mr_args)),
663 663 sizeof (uint64_t))];
664 664 int status;
665 665
666 666 if (cmsg->cmsg_len != CMSG_LEN(sizeof (struct rds_get_mr_args)) ||
667 667 rm->m_rdma_cookie != 0)
668 668 return (-EINVAL);
669 669
670 670 ASSERT(sizeof (buf) >= cmsg->cmsg_len && ((uintptr_t)buf & 0x7) == 0);
671 671
672 672 bcopy(CMSG_DATA(cmsg), (char *)buf, cmsg->cmsg_len);
673 673 status = __rdsv3_rdma_map(rs, (struct rds_get_mr_args *)buf,
674 674 &rm->m_rdma_cookie, &rm->m_rdma_mr);
675 675
676 676 return (status);
677 677 }
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX