static int
fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
{
- struct ib_send_wr *bad_wr;
-
- return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr);
+ return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL);
}
/* Invalidate all memory regions that were registered for "req".
static int
frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
{
- struct ib_send_wr *post_wr, *bad_wr;
+ struct ib_send_wr *post_wr;
struct rpcrdma_mr *mr;
post_wr = &req->rl_sendctx->sc_wr;
/* If ib_post_send fails, the next ->send_request for
* @req will queue these MWs for recovery.
*/
- return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
+ return ib_post_send(ia->ri_id->qp, post_wr, NULL);
}
/* Handle a remotely invalidated mr on the @mrs list
static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt)
{
- struct ib_recv_wr *bad_recv_wr;
int ret;
svc_xprt_get(&rdma->sc_xprt);
- ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr);
+ ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
if (ret)
goto err_post;
do {
if (atomic_sub_return(cc->cc_sqecount,
&rdma->sc_sq_avail) > 0) {
- ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
+ ret = ib_post_send(rdma->sc_qp, first_wr, NULL);
trace_svcrdma_post_rw(&cc->cc_cqe,
cc->cc_sqecount, ret);
if (ret)
*/
int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
{
- struct ib_send_wr *bad_wr;
int ret;
might_sleep();
}
svc_xprt_get(&rdma->sc_xprt);
- ret = ib_post_send(rdma->sc_qp, wr, &bad_wr);
+ ret = ib_post_send(rdma->sc_qp, wr, NULL);
trace_svcrdma_post_send(wr, ret);
if (ret) {
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);