112 rdsv3_ib_recv_refill_one(struct rdsv3_connection *conn,
113 struct rdsv3_ib_recv_work *recv)
114 {
115 struct rdsv3_ib_connection *ic = conn->c_transport_data;
116 ibt_mi_hdl_t mi_hdl;
117 ibt_iov_attr_t iov_attr;
118 ibt_iov_t iov_arr[1];
119
120 RDSV3_DPRINTF5("rdsv3_ib_recv_refill_one", "conn: %p, recv: %p",
121 conn, recv);
122
123 if (!recv->r_ibinc) {
124 if (!atomic_add_unless(&rdsv3_ib_allocation, 1,
125 ic->i_max_recv_alloc)) {
126 rdsv3_ib_stats_inc(s_ib_rx_alloc_limit);
127 goto out;
128 }
129 recv->r_ibinc = kmem_cache_alloc(rdsv3_ib_incoming_slab,
130 KM_NOSLEEP);
131 if (recv->r_ibinc == NULL) {
132 atomic_add_32(&rdsv3_ib_allocation, -1);
133 goto out;
134 }
135 rdsv3_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
136 recv->r_ibinc->ii_ibdev = ic->rds_ibdev;
137 recv->r_ibinc->ii_pool = ic->rds_ibdev->inc_pool;
138 }
139
140 if (!recv->r_frag) {
141 recv->r_frag = kmem_cache_alloc(ic->rds_ibdev->ib_frag_slab,
142 KM_NOSLEEP);
143 if (!recv->r_frag)
144 goto out;
145 }
146
147 /* Data sge, structure copy */
148 recv->r_sge[1] = recv->r_frag->f_sge;
149
150 RDSV3_DPRINTF5("rdsv3_ib_recv_refill_one", "Return: conn: %p, recv: %p",
151 conn, recv);
152
153 return (0);
154 out:
155 if (recv->r_ibinc) {
156 kmem_cache_free(rdsv3_ib_incoming_slab, recv->r_ibinc);
157 atomic_add_32(&rdsv3_ib_allocation, -1);
158 recv->r_ibinc = NULL;
159 }
160 return (-ENOMEM);
161 }
162
163 /*
164 * This tries to allocate and post unused work requests after making sure that
165 * they have all the allocations they need to queue received fragments into
166 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
167 * pairs don't go unmatched.
168 *
169 * -1 is returned if posting fails due to temporary resource exhaustion.
170 */
171 int
172 rdsv3_ib_recv_refill(struct rdsv3_connection *conn, int prefill)
173 {
174 struct rdsv3_ib_connection *ic = conn->c_transport_data;
175 struct rdsv3_ib_recv_work *recv;
176 unsigned int posted = 0;
177 int ret = 0, avail;
|
112 rdsv3_ib_recv_refill_one(struct rdsv3_connection *conn,
113 struct rdsv3_ib_recv_work *recv)
114 {
115 struct rdsv3_ib_connection *ic = conn->c_transport_data;
116 ibt_mi_hdl_t mi_hdl;
117 ibt_iov_attr_t iov_attr;
118 ibt_iov_t iov_arr[1];
119
120 RDSV3_DPRINTF5("rdsv3_ib_recv_refill_one", "conn: %p, recv: %p",
121 conn, recv);
122
123 if (!recv->r_ibinc) {
124 if (!atomic_add_unless(&rdsv3_ib_allocation, 1,
125 ic->i_max_recv_alloc)) {
126 rdsv3_ib_stats_inc(s_ib_rx_alloc_limit);
127 goto out;
128 }
129 recv->r_ibinc = kmem_cache_alloc(rdsv3_ib_incoming_slab,
130 KM_NOSLEEP);
131 if (recv->r_ibinc == NULL) {
132 atomic_dec_32(&rdsv3_ib_allocation);
133 goto out;
134 }
135 rdsv3_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
136 recv->r_ibinc->ii_ibdev = ic->rds_ibdev;
137 recv->r_ibinc->ii_pool = ic->rds_ibdev->inc_pool;
138 }
139
140 if (!recv->r_frag) {
141 recv->r_frag = kmem_cache_alloc(ic->rds_ibdev->ib_frag_slab,
142 KM_NOSLEEP);
143 if (!recv->r_frag)
144 goto out;
145 }
146
147 /* Data sge, structure copy */
148 recv->r_sge[1] = recv->r_frag->f_sge;
149
150 RDSV3_DPRINTF5("rdsv3_ib_recv_refill_one", "Return: conn: %p, recv: %p",
151 conn, recv);
152
153 return (0);
154 out:
155 if (recv->r_ibinc) {
156 kmem_cache_free(rdsv3_ib_incoming_slab, recv->r_ibinc);
157 atomic_dec_32(&rdsv3_ib_allocation);
158 recv->r_ibinc = NULL;
159 }
160 return (-ENOMEM);
161 }
162
163 /*
164 * This tries to allocate and post unused work requests after making sure that
165 * they have all the allocations they need to queue received fragments into
166 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
167 * pairs don't go unmatched.
168 *
169 * -1 is returned if posting fails due to temporary resource exhaustion.
170 */
171 int
172 rdsv3_ib_recv_refill(struct rdsv3_connection *conn, int prefill)
173 {
174 struct rdsv3_ib_connection *ic = conn->c_transport_data;
175 struct rdsv3_ib_recv_work *recv;
176 unsigned int posted = 0;
177 int ret = 0, avail;
|