1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #include <linux/mlx5/eswitch.h>
7 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
9 /* Per vport cached FW FT for checksum recalculation, this
10 * recalculation is needed due to a HW bug.
12 dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
13 sizeof(dmn->cache.recalc_cs_ft[0]),
15 if (!dmn->cache.recalc_cs_ft)
21 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
25 for (i = 0; i < dmn->info.caps.num_vports; i++) {
26 if (!dmn->cache.recalc_cs_ft[i])
29 mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
32 kfree(dmn->cache.recalc_cs_ft);
35 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
39 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
41 recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
43 /* Table not in cache, need to allocate a new one */
44 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
48 dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
51 *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
56 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
60 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
62 mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
66 dmn->uar = mlx5_get_uars_page(dmn->mdev);
68 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
73 dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
74 if (!dmn->ste_icm_pool) {
75 mlx5dr_err(dmn, "Couldn't get icm memory for %s\n",
76 dev_name(dmn->mdev->device));
81 dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
82 if (!dmn->action_icm_pool) {
83 mlx5dr_err(dmn, "Couldn't get action icm memory for %s\n",
84 dev_name(dmn->mdev->device));
86 goto free_ste_icm_pool;
89 ret = mlx5dr_send_ring_alloc(dmn);
91 mlx5dr_err(dmn, "Couldn't create send-ring for %s\n",
92 dev_name(dmn->mdev->device));
93 goto free_action_icm_pool;
99 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
101 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
103 mlx5_put_uars_page(dmn->mdev, dmn->uar);
105 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
110 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
112 mlx5dr_send_ring_free(dmn, dmn->send_ring);
113 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
114 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
115 mlx5_put_uars_page(dmn->mdev, dmn->uar);
116 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
119 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
123 struct mlx5dr_cmd_vport_cap *vport_caps;
126 vport_caps = &dmn->info.caps.vports_caps[vport_number];
128 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
131 &vport_caps->icm_address_rx,
132 &vport_caps->icm_address_tx);
136 ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
139 &vport_caps->vport_gvmi);
143 vport_caps->num = vport_number;
144 vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
149 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
151 struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
152 struct mlx5dr_cmd_vport_cap *wire_vport;
156 /* Query vports (except wire vport) */
157 for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
158 ret = dr_domain_query_vport(dmn, !!vport, vport);
163 /* Last vport is the wire port */
164 wire_vport = &dmn->info.caps.vports_caps[vport];
165 wire_vport->num = WIRE_PORT;
166 wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
167 wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
168 wire_vport->vport_gvmi = 0;
169 wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
174 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
175 struct mlx5dr_domain *dmn)
179 if (!dmn->info.caps.eswitch_manager)
182 ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
186 dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
187 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
188 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
190 dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
191 sizeof(dmn->info.caps.vports_caps[0]),
193 if (!dmn->info.caps.vports_caps)
196 ret = dr_domain_query_vports(dmn);
198 mlx5dr_dbg(dmn, "Failed to query vports caps\n");
199 goto free_vports_caps;
202 dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
207 kfree(dmn->info.caps.vports_caps);
208 dmn->info.caps.vports_caps = NULL;
212 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
213 struct mlx5dr_domain *dmn)
215 struct mlx5dr_cmd_vport_cap *vport_cap;
218 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
219 mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
223 dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
225 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
229 ret = dr_domain_query_fdb_caps(mdev, dmn);
234 case MLX5DR_DOMAIN_TYPE_NIC_RX:
235 if (!dmn->info.caps.rx_sw_owner)
238 dmn->info.supp_sw_steering = true;
239 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
240 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
241 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
243 case MLX5DR_DOMAIN_TYPE_NIC_TX:
244 if (!dmn->info.caps.tx_sw_owner)
247 dmn->info.supp_sw_steering = true;
248 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
249 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
250 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
252 case MLX5DR_DOMAIN_TYPE_FDB:
253 if (!dmn->info.caps.eswitch_manager)
256 if (!dmn->info.caps.fdb_sw_owner)
259 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
260 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
261 vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
263 mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
267 dmn->info.supp_sw_steering = true;
268 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
269 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
270 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
271 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
274 mlx5dr_dbg(dmn, "Invalid domain\n");
282 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
284 kfree(dmn->info.caps.vports_caps);
287 struct mlx5dr_domain *
288 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
290 struct mlx5dr_domain *dmn;
293 if (type > MLX5DR_DOMAIN_TYPE_FDB)
296 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
302 refcount_set(&dmn->refcount, 1);
303 mutex_init(&dmn->mutex);
305 if (dr_domain_caps_init(mdev, dmn)) {
306 mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
310 dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
311 dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
312 dmn->info.caps.log_icm_size);
314 if (!dmn->info.supp_sw_steering) {
315 mlx5dr_err(dmn, "SW steering not supported for %s\n",
316 dev_name(mdev->device));
320 /* Allocate resources */
321 ret = dr_domain_init_resources(dmn);
323 mlx5dr_err(dmn, "Failed init domain resources for %s\n",
324 dev_name(mdev->device));
328 ret = dr_domain_init_cache(dmn);
330 mlx5dr_err(dmn, "Failed initialize domain cache\n");
331 goto uninit_resourses;
334 /* Init CRC table for htbl CRC calculation */
335 mlx5dr_crc32_init_table();
340 dr_domain_uninit_resources(dmn);
342 dr_domain_caps_uninit(dmn);
348 /* Assure synchronization of the device steering tables with updates made by SW
351 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
355 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
356 mutex_lock(&dmn->mutex);
357 ret = mlx5dr_send_ring_force_drain(dmn);
358 mutex_unlock(&dmn->mutex);
363 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
364 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
369 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
371 if (refcount_read(&dmn->refcount) > 1)
374 /* make sure resources are not used by the hardware */
375 mlx5dr_cmd_sync_steering(dmn->mdev);
376 dr_domain_uninit_cache(dmn);
377 dr_domain_uninit_resources(dmn);
378 dr_domain_caps_uninit(dmn);
379 mutex_destroy(&dmn->mutex);
384 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
385 struct mlx5dr_domain *peer_dmn)
387 mutex_lock(&dmn->mutex);
390 refcount_dec(&dmn->peer_dmn->refcount);
392 dmn->peer_dmn = peer_dmn;
395 refcount_inc(&dmn->peer_dmn->refcount);
397 mutex_unlock(&dmn->mutex);