]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/infiniband/hw/mlx5/main.c
Merge branch 'mlx5-packet-credit-fc' into rdma.git
[linux.git] / drivers / infiniband / hw / mlx5 / main.c
index f985d0d9b883743ddd47d34d26a1fd1130aad019..1b2e5465b882960b43a2b0c7c3a2c5fe77eb41a6 100644 (file)
@@ -1766,7 +1766,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 #endif
 
        if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
-               err = mlx5_ib_devx_create(dev);
+               err = mlx5_ib_devx_create(dev, true);
                if (err < 0)
                        goto out_uars;
                context->devx_uid = err;
@@ -3720,7 +3720,8 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
                      struct mlx5_flow_destination *dst,
                      struct mlx5_ib_flow_matcher  *fs_matcher,
                      struct mlx5_flow_act *flow_act,
-                     void *cmd_in, int inlen)
+                     void *cmd_in, int inlen,
+                     int dst_num)
 {
        struct mlx5_ib_flow_handler *handler;
        struct mlx5_flow_spec *spec;
@@ -3742,7 +3743,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
        spec->match_criteria_enable = fs_matcher->match_criteria_enable;
 
        handler->rule = mlx5_add_flow_rules(ft, spec,
-                                           flow_act, dst, 1);
+                                           flow_act, dst, dst_num);
 
        if (IS_ERR(handler->rule)) {
                err = PTR_ERR(handler->rule);
@@ -3805,12 +3806,14 @@ struct mlx5_ib_flow_handler *
 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
                        struct mlx5_ib_flow_matcher *fs_matcher,
                        struct mlx5_flow_act *flow_act,
+                       u32 counter_id,
                        void *cmd_in, int inlen, int dest_id,
                        int dest_type)
 {
        struct mlx5_flow_destination *dst;
        struct mlx5_ib_flow_prio *ft_prio;
        struct mlx5_ib_flow_handler *handler;
+       int dst_num = 0;
        bool mcast;
        int err;
 
@@ -3820,7 +3823,7 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
        if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
                return ERR_PTR(-ENOMEM);
 
-       dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+       dst = kzalloc(sizeof(*dst) * 2, GFP_KERNEL);
        if (!dst)
                return ERR_PTR(-ENOMEM);
 
@@ -3834,20 +3837,28 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
        }
 
        if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
-               dst->type = dest_type;
-               dst->tir_num = dest_id;
+               dst[dst_num].type = dest_type;
+               dst[dst_num].tir_num = dest_id;
                flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
-               dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
-               dst->ft_num = dest_id;
+               dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
+               dst[dst_num].ft_num = dest_id;
                flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        } else {
-               dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+               dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
                flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
        }
 
+       dst_num++;
+
+       if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+               dst[dst_num].counter_id = counter_id;
+               dst_num++;
+       }
+
        handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
-                                       cmd_in, inlen);
+                                       cmd_in, inlen, dst_num);
 
        if (IS_ERR(handler)) {
                err = PTR_ERR(handler);
@@ -5389,14 +5400,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
                mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
 }
 
-static const struct cpumask *
-mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
-{
-       struct mlx5_ib_dev *dev = to_mdev(ibdev);
-
-       return mlx5_comp_irq_get_affinity_mask(dev->mdev, comp_vector);
-}
-
 /* The mlx5_ib_multiport_mutex should be held when calling this function */
 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
                                      struct mlx5_ib_multiport_info *mpi)
@@ -5624,30 +5627,17 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
        UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
                             enum mlx5_ib_uapi_flow_action_flags));
 
-static int populate_specs_root(struct mlx5_ib_dev *dev)
-{
-       const struct uverbs_object_tree_def **trees = dev->driver_trees;
-       size_t num_trees = 0;
-
-       if (mlx5_accel_ipsec_device_caps(dev->mdev) &
-           MLX5_ACCEL_IPSEC_CAP_DEVICE)
-               trees[num_trees++] = &mlx5_ib_flow_action;
-
-       if (MLX5_CAP_DEV_MEM(dev->mdev, memic))
-               trees[num_trees++] = &mlx5_ib_dm;
-
-       if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
-           MLX5_GENERAL_OBJ_TYPES_CAP_UCTX)
-               trees[num_trees++] = mlx5_ib_get_devx_tree();
-
-       num_trees += mlx5_ib_get_flow_trees(trees + num_trees);
-
-       WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees));
-       trees[num_trees] = NULL;
-       dev->ib_dev.driver_specs = trees;
+static const struct uapi_definition mlx5_ib_defs[] = {
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+       UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
+       UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
+#endif
 
-       return 0;
-}
+       UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
+                               &mlx5_ib_flow_action),
+       UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+       {}
+};
 
 static int mlx5_ib_read_counters(struct ib_counters *counters,
                                 struct ib_counters_read_attr *read_attr,
@@ -5910,7 +5900,6 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
        dev->ib_dev.map_mr_sg           = mlx5_ib_map_mr_sg;
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
        dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
-       dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
        if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
            IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
                dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params;
@@ -5953,14 +5942,22 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
        dev->ib_dev.uverbs_ex_cmd_mask |=
                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
                        (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
-       dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp;
+       if (mlx5_accel_ipsec_device_caps(dev->mdev) &
+           MLX5_ACCEL_IPSEC_CAP_DEVICE) {
+               dev->ib_dev.create_flow_action_esp =
+                       mlx5_ib_create_flow_action_esp;
+               dev->ib_dev.modify_flow_action_esp =
+                       mlx5_ib_modify_flow_action_esp;
+       }
        dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
-       dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
        dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
        dev->ib_dev.create_counters = mlx5_ib_create_counters;
        dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
        dev->ib_dev.read_counters = mlx5_ib_read_counters;
 
+       if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
+               dev->ib_dev.driver_def = mlx5_ib_defs;
+
        err = init_node_data(dev);
        if (err)
                return err;
@@ -6173,11 +6170,6 @@ void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
        mlx5_free_bfreg(dev->mdev, &dev->bfreg);
 }
 
-static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
-{
-       return populate_specs_root(dev);
-}
-
 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
 {
        const char *name;
@@ -6245,7 +6237,7 @@ static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
 {
        int uid;
 
-       uid = mlx5_ib_devx_create(dev);
+       uid = mlx5_ib_devx_create(dev, false);
        if (uid > 0)
                dev->devx_whitelist_uid = uid;
 
@@ -6339,9 +6331,6 @@ static const struct mlx5_ib_profile pf_profile = {
        STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
                     NULL,
                     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
-       STAGE_CREATE(MLX5_IB_STAGE_SPECS,
-                    mlx5_ib_stage_populate_specs,
-                    NULL),
        STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
                     mlx5_ib_stage_devx_init,
                     mlx5_ib_stage_devx_cleanup),
@@ -6393,9 +6382,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
        STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
                     NULL,
                     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
-       STAGE_CREATE(MLX5_IB_STAGE_SPECS,
-                    mlx5_ib_stage_populate_specs,
-                    NULL),
        STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
                     mlx5_ib_stage_ib_reg_init,
                     mlx5_ib_stage_ib_reg_cleanup),