]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
net/mlx5: DR, Fix error return code in dr_domain_init_resources()
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / steering / dr_domain.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/mlx5/eswitch.h>
5 #include "dr_types.h"
6
7 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
8 {
9         /* Per vport cached FW FT for checksum recalculation, this
10          * recalculation is needed due to a HW bug.
11          */
12         dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
13                                           sizeof(dmn->cache.recalc_cs_ft[0]),
14                                           GFP_KERNEL);
15         if (!dmn->cache.recalc_cs_ft)
16                 return -ENOMEM;
17
18         return 0;
19 }
20
21 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
22 {
23         int i;
24
25         for (i = 0; i < dmn->info.caps.num_vports; i++) {
26                 if (!dmn->cache.recalc_cs_ft[i])
27                         continue;
28
29                 mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
30         }
31
32         kfree(dmn->cache.recalc_cs_ft);
33 }
34
35 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
36                                               u32 vport_num,
37                                               u64 *rx_icm_addr)
38 {
39         struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
40
41         recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
42         if (!recalc_cs_ft) {
43                 /* Table not in cache, need to allocate a new one */
44                 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
45                 if (!recalc_cs_ft)
46                         return -EINVAL;
47
48                 dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
49         }
50
51         *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
52
53         return 0;
54 }
55
56 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
57 {
58         int ret;
59
60         ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
61         if (ret) {
62                 mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
63                 return ret;
64         }
65
66         dmn->uar = mlx5_get_uars_page(dmn->mdev);
67         if (!dmn->uar) {
68                 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
69                 ret = -ENOMEM;
70                 goto clean_pd;
71         }
72
73         dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
74         if (!dmn->ste_icm_pool) {
75                 mlx5dr_err(dmn, "Couldn't get icm memory for %s\n",
76                            dev_name(dmn->mdev->device));
77                 ret = -ENOMEM;
78                 goto clean_uar;
79         }
80
81         dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
82         if (!dmn->action_icm_pool) {
83                 mlx5dr_err(dmn, "Couldn't get action icm memory for %s\n",
84                            dev_name(dmn->mdev->device));
85                 ret = -ENOMEM;
86                 goto free_ste_icm_pool;
87         }
88
89         ret = mlx5dr_send_ring_alloc(dmn);
90         if (ret) {
91                 mlx5dr_err(dmn, "Couldn't create send-ring for %s\n",
92                            dev_name(dmn->mdev->device));
93                 goto free_action_icm_pool;
94         }
95
96         return 0;
97
98 free_action_icm_pool:
99         mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
100 free_ste_icm_pool:
101         mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
102 clean_uar:
103         mlx5_put_uars_page(dmn->mdev, dmn->uar);
104 clean_pd:
105         mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
106
107         return ret;
108 }
109
110 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
111 {
112         mlx5dr_send_ring_free(dmn, dmn->send_ring);
113         mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
114         mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
115         mlx5_put_uars_page(dmn->mdev, dmn->uar);
116         mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
117 }
118
119 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
120                                  bool other_vport,
121                                  u16 vport_number)
122 {
123         struct mlx5dr_cmd_vport_cap *vport_caps;
124         int ret;
125
126         vport_caps = &dmn->info.caps.vports_caps[vport_number];
127
128         ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
129                                                  other_vport,
130                                                  vport_number,
131                                                  &vport_caps->icm_address_rx,
132                                                  &vport_caps->icm_address_tx);
133         if (ret)
134                 return ret;
135
136         ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
137                                     other_vport,
138                                     vport_number,
139                                     &vport_caps->vport_gvmi);
140         if (ret)
141                 return ret;
142
143         vport_caps->num = vport_number;
144         vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
145
146         return 0;
147 }
148
149 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
150 {
151         struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
152         struct mlx5dr_cmd_vport_cap *wire_vport;
153         int vport;
154         int ret;
155
156         /* Query vports (except wire vport) */
157         for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
158                 ret = dr_domain_query_vport(dmn, !!vport, vport);
159                 if (ret)
160                         return ret;
161         }
162
163         /* Last vport is the wire port */
164         wire_vport = &dmn->info.caps.vports_caps[vport];
165         wire_vport->num = WIRE_PORT;
166         wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
167         wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
168         wire_vport->vport_gvmi = 0;
169         wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
170
171         return 0;
172 }
173
174 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
175                                     struct mlx5dr_domain *dmn)
176 {
177         int ret;
178
179         if (!dmn->info.caps.eswitch_manager)
180                 return -EOPNOTSUPP;
181
182         ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
183         if (ret)
184                 return ret;
185
186         dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
187         dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
188         dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
189
190         dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
191                                              sizeof(dmn->info.caps.vports_caps[0]),
192                                              GFP_KERNEL);
193         if (!dmn->info.caps.vports_caps)
194                 return -ENOMEM;
195
196         ret = dr_domain_query_vports(dmn);
197         if (ret) {
198                 mlx5dr_dbg(dmn, "Failed to query vports caps\n");
199                 goto free_vports_caps;
200         }
201
202         dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
203
204         return 0;
205
206 free_vports_caps:
207         kfree(dmn->info.caps.vports_caps);
208         dmn->info.caps.vports_caps = NULL;
209         return ret;
210 }
211
212 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
213                                struct mlx5dr_domain *dmn)
214 {
215         struct mlx5dr_cmd_vport_cap *vport_cap;
216         int ret;
217
218         if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
219                 mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
220                 return -EOPNOTSUPP;
221         }
222
223         dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
224
225         ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
226         if (ret)
227                 return ret;
228
229         ret = dr_domain_query_fdb_caps(mdev, dmn);
230         if (ret)
231                 return ret;
232
233         switch (dmn->type) {
234         case MLX5DR_DOMAIN_TYPE_NIC_RX:
235                 if (!dmn->info.caps.rx_sw_owner)
236                         return -ENOTSUPP;
237
238                 dmn->info.supp_sw_steering = true;
239                 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
240                 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
241                 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
242                 break;
243         case MLX5DR_DOMAIN_TYPE_NIC_TX:
244                 if (!dmn->info.caps.tx_sw_owner)
245                         return -ENOTSUPP;
246
247                 dmn->info.supp_sw_steering = true;
248                 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
249                 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
250                 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
251                 break;
252         case MLX5DR_DOMAIN_TYPE_FDB:
253                 if (!dmn->info.caps.eswitch_manager)
254                         return -ENOTSUPP;
255
256                 if (!dmn->info.caps.fdb_sw_owner)
257                         return -ENOTSUPP;
258
259                 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
260                 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
261                 vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
262                 if (!vport_cap) {
263                         mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
264                         return -ENOENT;
265                 }
266
267                 dmn->info.supp_sw_steering = true;
268                 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
269                 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
270                 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
271                 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
272                 break;
273         default:
274                 mlx5dr_dbg(dmn, "Invalid domain\n");
275                 ret = -EINVAL;
276                 break;
277         }
278
279         return ret;
280 }
281
282 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
283 {
284         kfree(dmn->info.caps.vports_caps);
285 }
286
287 struct mlx5dr_domain *
288 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
289 {
290         struct mlx5dr_domain *dmn;
291         int ret;
292
293         if (type > MLX5DR_DOMAIN_TYPE_FDB)
294                 return NULL;
295
296         dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
297         if (!dmn)
298                 return NULL;
299
300         dmn->mdev = mdev;
301         dmn->type = type;
302         refcount_set(&dmn->refcount, 1);
303         mutex_init(&dmn->mutex);
304
305         if (dr_domain_caps_init(mdev, dmn)) {
306                 mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
307                 goto free_domain;
308         }
309
310         dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
311         dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
312                                             dmn->info.caps.log_icm_size);
313
314         if (!dmn->info.supp_sw_steering) {
315                 mlx5dr_err(dmn, "SW steering not supported for %s\n",
316                            dev_name(mdev->device));
317                 goto uninit_caps;
318         }
319
320         /* Allocate resources */
321         ret = dr_domain_init_resources(dmn);
322         if (ret) {
323                 mlx5dr_err(dmn, "Failed init domain resources for %s\n",
324                            dev_name(mdev->device));
325                 goto uninit_caps;
326         }
327
328         ret = dr_domain_init_cache(dmn);
329         if (ret) {
330                 mlx5dr_err(dmn, "Failed initialize domain cache\n");
331                 goto uninit_resourses;
332         }
333
334         /* Init CRC table for htbl CRC calculation */
335         mlx5dr_crc32_init_table();
336
337         return dmn;
338
339 uninit_resourses:
340         dr_domain_uninit_resources(dmn);
341 uninit_caps:
342         dr_domain_caps_uninit(dmn);
343 free_domain:
344         kfree(dmn);
345         return NULL;
346 }
347
348 /* Assure synchronization of the device steering tables with updates made by SW
349  * insertion.
350  */
351 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
352 {
353         int ret = 0;
354
355         if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
356                 mutex_lock(&dmn->mutex);
357                 ret = mlx5dr_send_ring_force_drain(dmn);
358                 mutex_unlock(&dmn->mutex);
359                 if (ret)
360                         return ret;
361         }
362
363         if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
364                 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
365
366         return ret;
367 }
368
369 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
370 {
371         if (refcount_read(&dmn->refcount) > 1)
372                 return -EBUSY;
373
374         /* make sure resources are not used by the hardware */
375         mlx5dr_cmd_sync_steering(dmn->mdev);
376         dr_domain_uninit_cache(dmn);
377         dr_domain_uninit_resources(dmn);
378         dr_domain_caps_uninit(dmn);
379         mutex_destroy(&dmn->mutex);
380         kfree(dmn);
381         return 0;
382 }
383
384 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
385                             struct mlx5dr_domain *peer_dmn)
386 {
387         mutex_lock(&dmn->mutex);
388
389         if (dmn->peer_dmn)
390                 refcount_dec(&dmn->peer_dmn->refcount);
391
392         dmn->peer_dmn = peer_dmn;
393
394         if (dmn->peer_dmn)
395                 refcount_inc(&dmn->peer_dmn->refcount);
396
397         mutex_unlock(&dmn->mutex);
398 }