2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
38 #include "rxe_queue.h"
39 #include "rxe_hw_counters.h"
41 static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
45 struct rxe_dev *rxe = to_rdev(dev);
47 if (uhw->inlen || uhw->outlen)
54 static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
63 /* *attr being zeroed by the caller, avoid zeroing it here */
66 mutex_lock(&rxe->usdev_lock);
67 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
70 if (attr->state == IB_PORT_ACTIVE)
71 attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
72 else if (dev_get_flags(rxe->ndev) & IFF_UP)
73 attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
75 attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
77 mutex_unlock(&rxe->usdev_lock);
82 static struct net_device *rxe_get_netdev(struct ib_device *device,
85 struct rxe_dev *rxe = to_rdev(device);
95 static int rxe_query_pkey(struct ib_device *device,
96 u8 port_num, u16 index, u16 *pkey)
98 struct rxe_dev *rxe = to_rdev(device);
99 struct rxe_port *port;
103 if (unlikely(index >= port->attr.pkey_tbl_len)) {
104 dev_warn(device->dev.parent, "invalid index = %d\n",
109 *pkey = port->pkey_tbl[index];
116 static int rxe_modify_device(struct ib_device *dev,
117 int mask, struct ib_device_modify *attr)
119 struct rxe_dev *rxe = to_rdev(dev);
121 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
122 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
124 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
125 memcpy(rxe->ib_dev.node_desc,
126 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
132 static int rxe_modify_port(struct ib_device *dev,
133 u8 port_num, int mask, struct ib_port_modify *attr)
135 struct rxe_dev *rxe = to_rdev(dev);
136 struct rxe_port *port;
140 port->attr.port_cap_flags |= attr->set_port_cap_mask;
141 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
143 if (mask & IB_PORT_RESET_QKEY_CNTR)
144 port->attr.qkey_viol_cntr = 0;
149 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
152 struct rxe_dev *rxe = to_rdev(dev);
154 return rxe_link_layer(rxe, port_num);
157 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
158 struct ib_udata *udata)
160 struct rxe_dev *rxe = to_rdev(dev);
161 struct rxe_ucontext *uc;
163 uc = rxe_alloc(&rxe->uc_pool);
164 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
167 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
169 struct rxe_ucontext *uc = to_ruc(ibuc);
175 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
176 struct ib_port_immutable *immutable)
179 struct ib_port_attr attr;
181 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
183 err = ib_query_port(dev, port_num, &attr);
187 immutable->pkey_tbl_len = attr.pkey_tbl_len;
188 immutable->gid_tbl_len = attr.gid_tbl_len;
189 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
194 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
195 struct ib_ucontext *context,
196 struct ib_udata *udata)
198 struct rxe_dev *rxe = to_rdev(dev);
201 pd = rxe_alloc(&rxe->pd_pool);
202 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
205 static int rxe_dealloc_pd(struct ib_pd *ibpd)
207 struct rxe_pd *pd = to_rpd(ibpd);
213 static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
216 rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
217 rxe_av_fill_ip_info(av, attr);
220 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
221 struct rdma_ah_attr *attr,
223 struct ib_udata *udata)
227 struct rxe_dev *rxe = to_rdev(ibpd->device);
228 struct rxe_pd *pd = to_rpd(ibpd);
231 err = rxe_av_chk_attr(rxe, attr);
235 ah = rxe_alloc(&rxe->ah_pool);
237 return ERR_PTR(-ENOMEM);
242 rxe_init_av(rxe, attr, &ah->av);
246 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
249 struct rxe_dev *rxe = to_rdev(ibah->device);
250 struct rxe_ah *ah = to_rah(ibah);
252 err = rxe_av_chk_attr(rxe, attr);
256 rxe_init_av(rxe, attr, &ah->av);
260 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
262 struct rxe_ah *ah = to_rah(ibah);
264 memset(attr, 0, sizeof(*attr));
265 attr->type = ibah->type;
266 rxe_av_to_attr(&ah->av, attr);
270 static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
272 struct rxe_ah *ah = to_rah(ibah);
274 rxe_drop_ref(ah->pd);
279 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
284 struct rxe_recv_wqe *recv_wqe;
285 int num_sge = ibwr->num_sge;
287 if (unlikely(queue_full(rq->queue))) {
292 if (unlikely(num_sge > rq->max_sge)) {
298 for (i = 0; i < num_sge; i++)
299 length += ibwr->sg_list[i].length;
301 recv_wqe = producer_addr(rq->queue);
302 recv_wqe->wr_id = ibwr->wr_id;
303 recv_wqe->num_sge = num_sge;
305 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
306 num_sge * sizeof(struct ib_sge));
308 recv_wqe->dma.length = length;
309 recv_wqe->dma.resid = length;
310 recv_wqe->dma.num_sge = num_sge;
311 recv_wqe->dma.cur_sge = 0;
312 recv_wqe->dma.sge_offset = 0;
314 /* make sure all changes to the work queue are written before we
315 * update the producer pointer
319 advance_producer(rq->queue);
326 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
327 struct ib_srq_init_attr *init,
328 struct ib_udata *udata)
331 struct rxe_dev *rxe = to_rdev(ibpd->device);
332 struct rxe_pd *pd = to_rpd(ibpd);
334 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
335 struct rxe_create_srq_resp __user *uresp = NULL;
338 if (udata->outlen < sizeof(*uresp))
339 return ERR_PTR(-EINVAL);
340 uresp = udata->outbuf;
343 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
347 srq = rxe_alloc(&rxe->srq_pool);
357 err = rxe_srq_from_init(rxe, srq, init, context, uresp);
371 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
372 enum ib_srq_attr_mask mask,
373 struct ib_udata *udata)
376 struct rxe_srq *srq = to_rsrq(ibsrq);
377 struct rxe_dev *rxe = to_rdev(ibsrq->device);
378 struct rxe_modify_srq_cmd ucmd = {};
381 if (udata->inlen < sizeof(ucmd))
384 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
389 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
393 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
403 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
405 struct rxe_srq *srq = to_rsrq(ibsrq);
410 attr->max_wr = srq->rq.queue->buf->index_mask;
411 attr->max_sge = srq->rq.max_sge;
412 attr->srq_limit = srq->limit;
416 static int rxe_destroy_srq(struct ib_srq *ibsrq)
418 struct rxe_srq *srq = to_rsrq(ibsrq);
421 rxe_queue_cleanup(srq->rq.queue);
423 rxe_drop_ref(srq->pd);
430 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
431 const struct ib_recv_wr **bad_wr)
435 struct rxe_srq *srq = to_rsrq(ibsrq);
437 spin_lock_irqsave(&srq->rq.producer_lock, flags);
440 err = post_one_recv(&srq->rq, wr);
446 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
454 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
455 struct ib_qp_init_attr *init,
456 struct ib_udata *udata)
459 struct rxe_dev *rxe = to_rdev(ibpd->device);
460 struct rxe_pd *pd = to_rpd(ibpd);
462 struct rxe_create_qp_resp __user *uresp = NULL;
465 if (udata->outlen < sizeof(*uresp))
466 return ERR_PTR(-EINVAL);
467 uresp = udata->outbuf;
470 err = rxe_qp_chk_init(rxe, init);
474 qp = rxe_alloc(&rxe->qp_pool);
490 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
504 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
505 int mask, struct ib_udata *udata)
508 struct rxe_dev *rxe = to_rdev(ibqp->device);
509 struct rxe_qp *qp = to_rqp(ibqp);
511 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
515 err = rxe_qp_from_attr(qp, attr, mask, udata);
525 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
526 int mask, struct ib_qp_init_attr *init)
528 struct rxe_qp *qp = to_rqp(ibqp);
530 rxe_qp_to_init(qp, init);
531 rxe_qp_to_attr(qp, attr, mask);
536 static int rxe_destroy_qp(struct ib_qp *ibqp)
538 struct rxe_qp *qp = to_rqp(ibqp);
546 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
547 unsigned int mask, unsigned int length)
549 int num_sge = ibwr->num_sge;
550 struct rxe_sq *sq = &qp->sq;
552 if (unlikely(num_sge > sq->max_sge))
555 if (unlikely(mask & WR_ATOMIC_MASK)) {
559 if (atomic_wr(ibwr)->remote_addr & 0x7)
563 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
564 (length > sq->max_inline)))
573 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
574 const struct ib_send_wr *ibwr)
576 wr->wr_id = ibwr->wr_id;
577 wr->num_sge = ibwr->num_sge;
578 wr->opcode = ibwr->opcode;
579 wr->send_flags = ibwr->send_flags;
581 if (qp_type(qp) == IB_QPT_UD ||
582 qp_type(qp) == IB_QPT_SMI ||
583 qp_type(qp) == IB_QPT_GSI) {
584 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
585 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
586 if (qp_type(qp) == IB_QPT_GSI)
587 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
588 if (wr->opcode == IB_WR_SEND_WITH_IMM)
589 wr->ex.imm_data = ibwr->ex.imm_data;
591 switch (wr->opcode) {
592 case IB_WR_RDMA_WRITE_WITH_IMM:
593 wr->ex.imm_data = ibwr->ex.imm_data;
595 case IB_WR_RDMA_READ:
596 case IB_WR_RDMA_WRITE:
597 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
598 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
600 case IB_WR_SEND_WITH_IMM:
601 wr->ex.imm_data = ibwr->ex.imm_data;
603 case IB_WR_SEND_WITH_INV:
604 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
606 case IB_WR_ATOMIC_CMP_AND_SWP:
607 case IB_WR_ATOMIC_FETCH_AND_ADD:
608 wr->wr.atomic.remote_addr =
609 atomic_wr(ibwr)->remote_addr;
610 wr->wr.atomic.compare_add =
611 atomic_wr(ibwr)->compare_add;
612 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
613 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
615 case IB_WR_LOCAL_INV:
616 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
619 wr->wr.reg.mr = reg_wr(ibwr)->mr;
620 wr->wr.reg.key = reg_wr(ibwr)->key;
621 wr->wr.reg.access = reg_wr(ibwr)->access;
629 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
630 unsigned int mask, unsigned int length,
631 struct rxe_send_wqe *wqe)
633 int num_sge = ibwr->num_sge;
638 init_send_wr(qp, &wqe->wr, ibwr);
640 if (qp_type(qp) == IB_QPT_UD ||
641 qp_type(qp) == IB_QPT_SMI ||
642 qp_type(qp) == IB_QPT_GSI)
643 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
645 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
646 p = wqe->dma.inline_data;
649 for (i = 0; i < num_sge; i++, sge++) {
650 memcpy(p, (void *)(uintptr_t)sge->addr,
655 } else if (mask & WR_REG_MASK) {
657 wqe->state = wqe_state_posted;
660 memcpy(wqe->dma.sge, ibwr->sg_list,
661 num_sge * sizeof(struct ib_sge));
663 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
664 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
666 wqe->dma.length = length;
667 wqe->dma.resid = length;
668 wqe->dma.num_sge = num_sge;
669 wqe->dma.cur_sge = 0;
670 wqe->dma.sge_offset = 0;
671 wqe->state = wqe_state_posted;
672 wqe->ssn = atomic_add_return(1, &qp->ssn);
677 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
678 unsigned int mask, u32 length)
681 struct rxe_sq *sq = &qp->sq;
682 struct rxe_send_wqe *send_wqe;
685 err = validate_send_wr(qp, ibwr, mask, length);
689 spin_lock_irqsave(&qp->sq.sq_lock, flags);
691 if (unlikely(queue_full(sq->queue))) {
696 send_wqe = producer_addr(sq->queue);
698 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
703 * make sure all changes to the work queue are
704 * written before we update the producer pointer
708 advance_producer(sq->queue);
709 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
714 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
718 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
719 const struct ib_send_wr **bad_wr)
723 unsigned int length = 0;
727 mask = wr_opcode_mask(wr->opcode, qp);
728 if (unlikely(!mask)) {
734 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
735 !(mask & WR_INLINE_MASK))) {
742 for (i = 0; i < wr->num_sge; i++)
743 length += wr->sg_list[i].length;
745 err = post_one_send(qp, wr, mask, length);
754 rxe_run_task(&qp->req.task, 1);
755 if (unlikely(qp->req.state == QP_STATE_ERROR))
756 rxe_run_task(&qp->comp.task, 1);
761 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
762 const struct ib_send_wr **bad_wr)
764 struct rxe_qp *qp = to_rqp(ibqp);
766 if (unlikely(!qp->valid)) {
771 if (unlikely(qp->req.state < QP_STATE_READY)) {
777 /* Utilize process context to do protocol processing */
778 rxe_run_task(&qp->req.task, 0);
781 return rxe_post_send_kernel(qp, wr, bad_wr);
784 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
785 const struct ib_recv_wr **bad_wr)
788 struct rxe_qp *qp = to_rqp(ibqp);
789 struct rxe_rq *rq = &qp->rq;
792 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
798 if (unlikely(qp->srq)) {
804 spin_lock_irqsave(&rq->producer_lock, flags);
807 err = post_one_recv(rq, wr);
815 spin_unlock_irqrestore(&rq->producer_lock, flags);
817 if (qp->resp.state == QP_STATE_ERROR)
818 rxe_run_task(&qp->resp.task, 1);
824 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
825 const struct ib_cq_init_attr *attr,
826 struct ib_ucontext *context,
827 struct ib_udata *udata)
830 struct rxe_dev *rxe = to_rdev(dev);
832 struct rxe_create_cq_resp __user *uresp = NULL;
835 if (udata->outlen < sizeof(*uresp))
836 return ERR_PTR(-EINVAL);
837 uresp = udata->outbuf;
841 return ERR_PTR(-EINVAL);
843 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
847 cq = rxe_alloc(&rxe->cq_pool);
853 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
866 static int rxe_destroy_cq(struct ib_cq *ibcq)
868 struct rxe_cq *cq = to_rcq(ibcq);
876 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
879 struct rxe_cq *cq = to_rcq(ibcq);
880 struct rxe_dev *rxe = to_rdev(ibcq->device);
881 struct rxe_resize_cq_resp __user *uresp = NULL;
884 if (udata->outlen < sizeof(*uresp))
886 uresp = udata->outbuf;
889 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
893 err = rxe_cq_resize_queue(cq, cqe, uresp);
903 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
906 struct rxe_cq *cq = to_rcq(ibcq);
910 spin_lock_irqsave(&cq->cq_lock, flags);
911 for (i = 0; i < num_entries; i++) {
912 cqe = queue_head(cq->queue);
916 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
917 advance_consumer(cq->queue);
919 spin_unlock_irqrestore(&cq->cq_lock, flags);
924 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
926 struct rxe_cq *cq = to_rcq(ibcq);
927 int count = queue_count(cq->queue);
929 return (count > wc_cnt) ? wc_cnt : count;
932 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
934 struct rxe_cq *cq = to_rcq(ibcq);
935 unsigned long irq_flags;
938 spin_lock_irqsave(&cq->cq_lock, irq_flags);
939 if (cq->notify != IB_CQ_NEXT_COMP)
940 cq->notify = flags & IB_CQ_SOLICITED_MASK;
942 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
945 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
950 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
952 struct rxe_dev *rxe = to_rdev(ibpd->device);
953 struct rxe_pd *pd = to_rpd(ibpd);
957 mr = rxe_alloc(&rxe->mr_pool);
967 err = rxe_mem_init_dma(pd, access, mr);
981 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
985 int access, struct ib_udata *udata)
988 struct rxe_dev *rxe = to_rdev(ibpd->device);
989 struct rxe_pd *pd = to_rpd(ibpd);
992 mr = rxe_alloc(&rxe->mr_pool);
1002 err = rxe_mem_init_user(pd, start, length, iova,
1014 return ERR_PTR(err);
1017 static int rxe_dereg_mr(struct ib_mr *ibmr)
1019 struct rxe_mem *mr = to_rmr(ibmr);
1021 mr->state = RXE_MEM_STATE_ZOMBIE;
1022 rxe_drop_ref(mr->pd);
1028 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1029 enum ib_mr_type mr_type,
1032 struct rxe_dev *rxe = to_rdev(ibpd->device);
1033 struct rxe_pd *pd = to_rpd(ibpd);
1037 if (mr_type != IB_MR_TYPE_MEM_REG)
1038 return ERR_PTR(-EINVAL);
1040 mr = rxe_alloc(&rxe->mr_pool);
1050 err = rxe_mem_init_fast(pd, max_num_sg, mr);
1061 return ERR_PTR(err);
1064 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1066 struct rxe_mem *mr = to_rmr(ibmr);
1067 struct rxe_map *map;
1068 struct rxe_phys_buf *buf;
1070 if (unlikely(mr->nbuf == mr->num_buf))
1073 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1074 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1077 buf->size = ibmr->page_size;
1083 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1084 int sg_nents, unsigned int *sg_offset)
1086 struct rxe_mem *mr = to_rmr(ibmr);
1091 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1093 mr->va = ibmr->iova;
1094 mr->iova = ibmr->iova;
1095 mr->length = ibmr->length;
1096 mr->page_shift = ilog2(ibmr->page_size);
1097 mr->page_mask = ibmr->page_size - 1;
1098 mr->offset = mr->iova & mr->page_mask;
1103 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1106 struct rxe_dev *rxe = to_rdev(ibqp->device);
1107 struct rxe_qp *qp = to_rqp(ibqp);
1108 struct rxe_mc_grp *grp;
1110 /* takes a ref on grp if successful */
1111 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1115 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1121 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1123 struct rxe_dev *rxe = to_rdev(ibqp->device);
1124 struct rxe_qp *qp = to_rqp(ibqp);
1126 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1129 static ssize_t parent_show(struct device *device,
1130 struct device_attribute *attr, char *buf)
1132 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1135 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1138 static DEVICE_ATTR_RO(parent);
1140 static struct attribute *rxe_dev_attributes[] = {
1141 &dev_attr_parent.attr,
1145 static const struct attribute_group rxe_attr_group = {
1146 .attrs = rxe_dev_attributes,
1149 static const struct ib_device_ops rxe_dev_ops = {
1150 .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1151 .alloc_mr = rxe_alloc_mr,
1152 .alloc_pd = rxe_alloc_pd,
1153 .alloc_ucontext = rxe_alloc_ucontext,
1154 .attach_mcast = rxe_attach_mcast,
1155 .create_ah = rxe_create_ah,
1156 .create_cq = rxe_create_cq,
1157 .create_qp = rxe_create_qp,
1158 .create_srq = rxe_create_srq,
1159 .dealloc_pd = rxe_dealloc_pd,
1160 .dealloc_ucontext = rxe_dealloc_ucontext,
1161 .dereg_mr = rxe_dereg_mr,
1162 .destroy_ah = rxe_destroy_ah,
1163 .destroy_cq = rxe_destroy_cq,
1164 .destroy_qp = rxe_destroy_qp,
1165 .destroy_srq = rxe_destroy_srq,
1166 .detach_mcast = rxe_detach_mcast,
1167 .get_dma_mr = rxe_get_dma_mr,
1168 .get_hw_stats = rxe_ib_get_hw_stats,
1169 .get_link_layer = rxe_get_link_layer,
1170 .get_netdev = rxe_get_netdev,
1171 .get_port_immutable = rxe_port_immutable,
1172 .map_mr_sg = rxe_map_mr_sg,
1174 .modify_ah = rxe_modify_ah,
1175 .modify_device = rxe_modify_device,
1176 .modify_port = rxe_modify_port,
1177 .modify_qp = rxe_modify_qp,
1178 .modify_srq = rxe_modify_srq,
1179 .peek_cq = rxe_peek_cq,
1180 .poll_cq = rxe_poll_cq,
1181 .post_recv = rxe_post_recv,
1182 .post_send = rxe_post_send,
1183 .post_srq_recv = rxe_post_srq_recv,
1184 .query_ah = rxe_query_ah,
1185 .query_device = rxe_query_device,
1186 .query_pkey = rxe_query_pkey,
1187 .query_port = rxe_query_port,
1188 .query_qp = rxe_query_qp,
1189 .query_srq = rxe_query_srq,
1190 .reg_user_mr = rxe_reg_user_mr,
1191 .req_notify_cq = rxe_req_notify_cq,
1192 .resize_cq = rxe_resize_cq,
1195 int rxe_register_device(struct rxe_dev *rxe)
1198 struct ib_device *dev = &rxe->ib_dev;
1199 struct crypto_shash *tfm;
1201 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1203 dev->owner = THIS_MODULE;
1204 dev->node_type = RDMA_NODE_IB_CA;
1205 dev->phys_port_cnt = 1;
1206 dev->num_comp_vectors = num_possible_cpus();
1207 dev->dev.parent = rxe_dma_device(rxe);
1208 dev->local_dma_lkey = 0;
1209 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1210 rxe->ndev->dev_addr);
1211 dev->dev.dma_ops = &dma_virt_ops;
1212 dma_coerce_mask_and_coherent(&dev->dev,
1213 dma_get_required_mask(&dev->dev));
1215 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1216 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1217 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1218 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1219 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1220 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1221 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1222 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1223 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1224 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1225 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1226 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1227 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1228 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1229 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1230 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1231 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1232 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1233 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1234 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1235 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1236 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1237 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1238 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1239 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1240 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1241 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1242 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1243 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1244 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1245 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1246 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1249 ib_set_device_ops(dev, &rxe_dev_ops);
1251 tfm = crypto_alloc_shash("crc32", 0, 0);
1253 pr_err("failed to allocate crc algorithm err:%ld\n",
1255 return PTR_ERR(tfm);
1259 rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1260 dev->driver_id = RDMA_DRIVER_RXE;
1261 err = ib_register_device(dev, "rxe%d");
1263 pr_warn("%s failed with error %d\n", __func__, err);
1270 crypto_free_shash(rxe->tfm);
1275 void rxe_unregister_device(struct rxe_dev *rxe)
1277 struct ib_device *dev = &rxe->ib_dev;
1279 ib_unregister_device(dev);