]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
IB/mlx5: Add ODP WQE handlers for kernel QPs
authorMoni Shoua <monis@mellanox.com>
Wed, 15 Jan 2020 12:43:34 +0000 (14:43 +0200)
committerLeon Romanovsky <leonro@mellanox.com>
Thu, 16 Jan 2020 14:14:47 +0000 (16:14 +0200)
One of the steps in ODP page fault handler for WQEs is to read a WQE
from a QP send queue or receive queue buffer at a specific index.

Since the implementation of this buffer is different between kernel and
user QP the implementation of the handler needs to be aware of that and
handle it in a different way.

ODP for kernel MRs is currently supported only for RDMA_READ
and RDMA_WRITE operations so change the handler to
- read a WQE from a kernel QP send queue
- fail if access to receive queue or shared receive queue is
  required for a kernel QP

Signed-off-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c

index b06f32ff5748f51b64fc24640816621719d2fba4..77d495b2032dbd57dfd913200261150f53099499 100644 (file)
@@ -1153,12 +1153,12 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
                      const struct ib_send_wr **bad_wr);
 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
                      const struct ib_recv_wr **bad_wr);
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
-                            int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
-                            int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
-                             void *buffer, int buflen, size_t *bc);
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+                       size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+                       size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+                        size_t buflen, size_t *bc);
 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                      struct ib_udata *udata);
 void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
index 3b3ceb5acdd3a81abaa10885ac1eb2f88d62e066..3642c6a491c266b76ee81724d3826399506b8634 100644 (file)
@@ -1237,15 +1237,15 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
        wqe = wqe_start;
        qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
        if (qp && sq) {
-               ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
-                                              &bytes_copied);
+               ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
+                                         &bytes_copied);
                if (ret)
                        goto read_user;
                ret = mlx5_ib_mr_initiator_pfault_handler(
                        dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
        } else if (qp && !sq) {
-               ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
-                                              &bytes_copied);
+               ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
+                                         &bytes_copied);
                if (ret)
                        goto read_user;
                ret = mlx5_ib_mr_responder_pfault_handler_rq(
@@ -1253,8 +1253,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
        } else if (!qp) {
                struct mlx5_ib_srq *srq = res_to_srq(res);
 
-               ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
-                                               &bytes_copied);
+               ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
+                                          &bytes_copied);
                if (ret)
                        goto read_user;
                ret = mlx5_ib_mr_responder_pfault_handler_srq(
index 7f0bde3135605ab4647187a4dcd8884cfbc2b9c2..ae7cbd9c9bca577d935de918898b86a83d1cdbb2 100644 (file)
@@ -129,14 +129,10 @@ static int is_sqp(enum ib_qp_type qp_type)
  *
  * Return: zero on success, or an error code.
  */
-static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
-                                       void *buffer,
-                                       u32 buflen,
-                                       int wqe_index,
-                                       int wq_offset,
-                                       int wq_wqe_cnt,
-                                       int wq_wqe_shift,
-                                       int bcnt,
+static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
+                                       size_t buflen, int wqe_index,
+                                       int wq_offset, int wq_wqe_cnt,
+                                       int wq_wqe_shift, int bcnt,
                                        size_t *bytes_copied)
 {
        size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
@@ -160,11 +156,43 @@ static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
        return 0;
 }
 
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
-                            int wqe_index,
-                            void *buffer,
-                            int buflen,
-                            size_t *bc)
+static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+                                     void *buffer, size_t buflen, size_t *bc)
+{
+       struct mlx5_wqe_ctrl_seg *ctrl;
+       size_t bytes_copied = 0;
+       size_t wqe_length;
+       void *p;
+       int ds;
+
+       wqe_index = wqe_index & qp->sq.fbc.sz_m1;
+
+       /* read the control segment first */
+       p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+       ctrl = p;
+       ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+       wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+       /* read rest of WQE if it spreads over more than one stride */
+       while (bytes_copied < wqe_length) {
+               size_t copy_length =
+                       min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
+
+               if (!copy_length)
+                       break;
+
+               memcpy(buffer + bytes_copied, p, copy_length);
+               bytes_copied += copy_length;
+
+               wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
+               p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+       }
+       *bc = bytes_copied;
+       return 0;
+}
+
+static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+                                   void *buffer, size_t buflen, size_t *bc)
 {
        struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
        struct ib_umem *umem = base->ubuffer.umem;
@@ -176,18 +204,10 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
        int ret;
        int ds;
 
-       if (buflen < sizeof(*ctrl))
-               return -EINVAL;
-
        /* at first read as much as possible */
-       ret = mlx5_ib_read_user_wqe_common(umem,
-                                          buffer,
-                                          buflen,
-                                          wqe_index,
-                                          wq->offset,
-                                          wq->wqe_cnt,
-                                          wq->wqe_shift,
-                                          buflen,
+       ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+                                          wq->offset, wq->wqe_cnt,
+                                          wq->wqe_shift, buflen,
                                           &bytes_copied);
        if (ret)
                return ret;
@@ -210,13 +230,9 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
         * so read the remaining bytes starting
         * from  wqe_index 0
         */
-       ret = mlx5_ib_read_user_wqe_common(umem,
-                                          buffer + bytes_copied,
-                                          buflen - bytes_copied,
-                                          0,
-                                          wq->offset,
-                                          wq->wqe_cnt,
-                                          wq->wqe_shift,
+       ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
+                                          buflen - bytes_copied, 0, wq->offset,
+                                          wq->wqe_cnt, wq->wqe_shift,
                                           wqe_length - bytes_copied,
                                           &bytes_copied2);
 
@@ -226,11 +242,24 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
        return 0;
 }
 
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
-                            int wqe_index,
-                            void *buffer,
-                            int buflen,
-                            size_t *bc)
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+                       size_t buflen, size_t *bc)
+{
+       struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+       struct ib_umem *umem = base->ubuffer.umem;
+
+       if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
+               return -EINVAL;
+
+       if (!umem)
+               return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
+                                                 buflen, bc);
+
+       return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
+                                   void *buffer, size_t buflen, size_t *bc)
 {
        struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
        struct ib_umem *umem = base->ubuffer.umem;
@@ -238,14 +267,9 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
        size_t bytes_copied;
        int ret;
 
-       ret = mlx5_ib_read_user_wqe_common(umem,
-                                          buffer,
-                                          buflen,
-                                          wqe_index,
-                                          wq->offset,
-                                          wq->wqe_cnt,
-                                          wq->wqe_shift,
-                                          buflen,
+       ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+                                          wq->offset, wq->wqe_cnt,
+                                          wq->wqe_shift, buflen,
                                           &bytes_copied);
 
        if (ret)
@@ -254,25 +278,33 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
        return 0;
 }
 
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
-                             int wqe_index,
-                             void *buffer,
-                             int buflen,
-                             size_t *bc)
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+                       size_t buflen, size_t *bc)
+{
+       struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+       struct ib_umem *umem = base->ubuffer.umem;
+       struct mlx5_ib_wq *wq = &qp->rq;
+       size_t wqe_size = 1 << wq->wqe_shift;
+
+       if (buflen < wqe_size)
+               return -EINVAL;
+
+       if (!umem)
+               return -EOPNOTSUPP;
+
+       return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
+                                    void *buffer, size_t buflen, size_t *bc)
 {
        struct ib_umem *umem = srq->umem;
        size_t bytes_copied;
        int ret;
 
-       ret = mlx5_ib_read_user_wqe_common(umem,
-                                          buffer,
-                                          buflen,
-                                          wqe_index,
-                                          0,
-                                          srq->msrq.max,
-                                          srq->msrq.wqe_shift,
-                                          buflen,
-                                          &bytes_copied);
+       ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
+                                          srq->msrq.max, srq->msrq.wqe_shift,
+                                          buflen, &bytes_copied);
 
        if (ret)
                return ret;
@@ -280,6 +312,21 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
        return 0;
 }
 
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+                        size_t buflen, size_t *bc)
+{
+       struct ib_umem *umem = srq->umem;
+       size_t wqe_size = 1 << srq->msrq.wqe_shift;
+
+       if (buflen < wqe_size)
+               return -EINVAL;
+
+       if (!umem)
+               return -EOPNOTSUPP;
+
+       return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
+}
+
 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
 {
        struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;