1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/interrupt.h>
16 #include <linux/etherdevice.h>
20 #include "bnxt_sriov.h"
22 #include "bnxt_ethtool.h"
24 #ifdef CONFIG_BNXT_SRIOV
25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
26 struct bnxt_vf_info *vf, u16 event_id)
28 struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
29 struct hwrm_fwd_async_event_cmpl_input req = {0};
30 struct hwrm_async_event_cmpl *async_cmpl;
33 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
35 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
37 /* broadcast this async event to all VFs */
38 req.encap_async_event_target_id = cpu_to_le16(0xffff);
39 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
40 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
41 async_cmpl->event_id = cpu_to_le16(event_id);
43 mutex_lock(&bp->hwrm_cmd_lock);
44 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
47 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
49 goto fwd_async_event_cmpl_exit;
52 if (resp->error_code) {
53 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
58 fwd_async_event_cmpl_exit:
59 mutex_unlock(&bp->hwrm_cmd_lock);
63 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
65 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
66 netdev_err(bp->dev, "vf ndo called though PF is down\n");
69 if (!bp->pf.active_vfs) {
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
80 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
82 struct hwrm_func_cfg_input req = {0};
83 struct bnxt *bp = netdev_priv(dev);
84 struct bnxt_vf_info *vf;
85 bool old_setting = false;
89 if (bp->hwrm_spec_code < 0x10701)
92 rc = bnxt_vf_ndo_prep(bp, vf_id);
96 vf = &bp->pf.vf[vf_id];
97 if (vf->flags & BNXT_VF_SPOOFCHK)
99 if (old_setting == setting)
102 func_flags = vf->func_flags;
104 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
106 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
107 /*TODO: if the driver supports VLAN filter on guest VLAN,
108 * the spoof check should also include vlan anti-spoofing
110 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
111 req.fid = cpu_to_le16(vf->fw_fid);
112 req.flags = cpu_to_le32(func_flags);
113 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
115 vf->func_flags = func_flags;
117 vf->flags |= BNXT_VF_SPOOFCHK;
119 vf->flags &= ~BNXT_VF_SPOOFCHK;
124 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
126 struct bnxt *bp = netdev_priv(dev);
127 struct bnxt_vf_info *vf;
129 if (bnxt_vf_ndo_prep(bp, vf_id))
132 vf = &bp->pf.vf[vf_id];
134 vf->flags |= BNXT_VF_TRUST;
136 vf->flags &= ~BNXT_VF_TRUST;
141 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
142 struct ifla_vf_info *ivi)
144 struct bnxt *bp = netdev_priv(dev);
145 struct bnxt_vf_info *vf;
148 rc = bnxt_vf_ndo_prep(bp, vf_id);
153 vf = &bp->pf.vf[vf_id];
155 if (is_valid_ether_addr(vf->mac_addr))
156 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
158 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
159 ivi->max_tx_rate = vf->max_tx_rate;
160 ivi->min_tx_rate = vf->min_tx_rate;
161 ivi->vlan = vf->vlan;
162 if (vf->flags & BNXT_VF_QOS)
163 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
166 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
167 ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
168 if (!(vf->flags & BNXT_VF_LINK_FORCED))
169 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
170 else if (vf->flags & BNXT_VF_LINK_UP)
171 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
173 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
178 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
180 struct hwrm_func_cfg_input req = {0};
181 struct bnxt *bp = netdev_priv(dev);
182 struct bnxt_vf_info *vf;
185 rc = bnxt_vf_ndo_prep(bp, vf_id);
188 /* reject bc or mc mac addr, zero mac addr means allow
189 * VF to use its own mac addr
191 if (is_multicast_ether_addr(mac)) {
192 netdev_err(dev, "Invalid VF ethernet address\n");
195 vf = &bp->pf.vf[vf_id];
197 memcpy(vf->mac_addr, mac, ETH_ALEN);
198 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
199 req.fid = cpu_to_le16(vf->fw_fid);
200 req.flags = cpu_to_le32(vf->func_flags);
201 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
202 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
203 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
206 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
209 struct hwrm_func_cfg_input req = {0};
210 struct bnxt *bp = netdev_priv(dev);
211 struct bnxt_vf_info *vf;
215 if (bp->hwrm_spec_code < 0x10201)
218 if (vlan_proto != htons(ETH_P_8021Q))
219 return -EPROTONOSUPPORT;
221 rc = bnxt_vf_ndo_prep(bp, vf_id);
225 /* TODO: needed to implement proper handling of user priority,
226 * currently fail the command if there is valid priority
228 if (vlan_id > 4095 || qos)
231 vf = &bp->pf.vf[vf_id];
233 if (vlan_tag == vf->vlan)
236 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
237 req.fid = cpu_to_le16(vf->fw_fid);
238 req.flags = cpu_to_le32(vf->func_flags);
239 req.dflt_vlan = cpu_to_le16(vlan_tag);
240 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
241 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
247 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
250 struct hwrm_func_cfg_input req = {0};
251 struct bnxt *bp = netdev_priv(dev);
252 struct bnxt_vf_info *vf;
256 rc = bnxt_vf_ndo_prep(bp, vf_id);
260 vf = &bp->pf.vf[vf_id];
261 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
262 if (max_tx_rate > pf_link_speed) {
263 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
268 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
269 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
273 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
275 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
276 req.fid = cpu_to_le16(vf->fw_fid);
277 req.flags = cpu_to_le32(vf->func_flags);
278 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
279 req.max_bw = cpu_to_le32(max_tx_rate);
280 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
281 req.min_bw = cpu_to_le32(min_tx_rate);
282 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
284 vf->min_tx_rate = min_tx_rate;
285 vf->max_tx_rate = max_tx_rate;
290 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
292 struct bnxt *bp = netdev_priv(dev);
293 struct bnxt_vf_info *vf;
296 rc = bnxt_vf_ndo_prep(bp, vf_id);
300 vf = &bp->pf.vf[vf_id];
302 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
304 case IFLA_VF_LINK_STATE_AUTO:
305 vf->flags |= BNXT_VF_LINK_UP;
307 case IFLA_VF_LINK_STATE_DISABLE:
308 vf->flags |= BNXT_VF_LINK_FORCED;
310 case IFLA_VF_LINK_STATE_ENABLE:
311 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
314 netdev_err(bp->dev, "Invalid link option\n");
318 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
319 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
320 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
324 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
327 struct bnxt_vf_info *vf;
329 for (i = 0; i < num_vfs; i++) {
331 memset(vf, 0, sizeof(*vf));
336 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
339 struct bnxt_pf_info *pf = &bp->pf;
340 struct hwrm_func_vf_resc_free_input req = {0};
342 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
344 mutex_lock(&bp->hwrm_cmd_lock);
345 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
346 req.vf_id = cpu_to_le16(i);
347 rc = _hwrm_send_message(bp, &req, sizeof(req),
352 mutex_unlock(&bp->hwrm_cmd_lock);
356 static void bnxt_free_vf_resources(struct bnxt *bp)
358 struct pci_dev *pdev = bp->pdev;
361 kfree(bp->pf.vf_event_bmap);
362 bp->pf.vf_event_bmap = NULL;
364 for (i = 0; i < 4; i++) {
365 if (bp->pf.hwrm_cmd_req_addr[i]) {
366 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
367 bp->pf.hwrm_cmd_req_addr[i],
368 bp->pf.hwrm_cmd_req_dma_addr[i]);
369 bp->pf.hwrm_cmd_req_addr[i] = NULL;
377 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
379 struct pci_dev *pdev = bp->pdev;
380 u32 nr_pages, size, i, j, k = 0;
382 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
386 bnxt_set_vf_attr(bp, num_vfs);
388 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
389 nr_pages = size / BNXT_PAGE_SIZE;
390 if (size & (BNXT_PAGE_SIZE - 1))
393 for (i = 0; i < nr_pages; i++) {
394 bp->pf.hwrm_cmd_req_addr[i] =
395 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
396 &bp->pf.hwrm_cmd_req_dma_addr[i],
399 if (!bp->pf.hwrm_cmd_req_addr[i])
402 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
403 struct bnxt_vf_info *vf = &bp->pf.vf[k];
405 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
406 j * BNXT_HWRM_REQ_MAX_SIZE;
407 vf->hwrm_cmd_req_dma_addr =
408 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
409 BNXT_HWRM_REQ_MAX_SIZE;
415 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
416 if (!bp->pf.vf_event_bmap)
419 bp->pf.hwrm_cmd_req_pages = nr_pages;
423 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
425 struct hwrm_func_buf_rgtr_input req = {0};
427 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
429 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
430 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
431 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
432 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
433 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
434 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
435 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
437 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
440 /* Only called by PF to reserve resources for VFs, returns actual number of
441 * VFs configured, or < 0 on error.
443 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
445 struct hwrm_func_vf_resource_cfg_input req = {0};
446 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
447 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
448 u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
449 struct bnxt_pf_info *pf = &bp->pf;
450 int i, rc = 0, min = 1;
452 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
454 vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
455 vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
456 if (bp->flags & BNXT_FLAG_AGG_RINGS)
457 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
459 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
460 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
461 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
462 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
463 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
465 req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
466 req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
467 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
469 req.min_rsscos_ctx = cpu_to_le16(min);
471 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
472 pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
473 req.min_cmpl_rings = cpu_to_le16(min);
474 req.min_tx_rings = cpu_to_le16(min);
475 req.min_rx_rings = cpu_to_le16(min);
476 req.min_l2_ctxs = cpu_to_le16(min);
477 req.min_vnics = cpu_to_le16(min);
478 req.min_stat_ctx = cpu_to_le16(min);
479 req.min_hw_ring_grps = cpu_to_le16(min);
481 vf_cp_rings /= num_vfs;
482 vf_tx_rings /= num_vfs;
483 vf_rx_rings /= num_vfs;
485 vf_stat_ctx /= num_vfs;
486 vf_ring_grps /= num_vfs;
488 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
489 req.min_tx_rings = cpu_to_le16(vf_tx_rings);
490 req.min_rx_rings = cpu_to_le16(vf_rx_rings);
491 req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
492 req.min_vnics = cpu_to_le16(vf_vnics);
493 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
494 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
496 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
497 req.max_tx_rings = cpu_to_le16(vf_tx_rings);
498 req.max_rx_rings = cpu_to_le16(vf_rx_rings);
499 req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
500 req.max_vnics = cpu_to_le16(vf_vnics);
501 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
502 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
504 mutex_lock(&bp->hwrm_cmd_lock);
505 for (i = 0; i < num_vfs; i++) {
506 req.vf_id = cpu_to_le16(pf->first_vf_id + i);
507 rc = _hwrm_send_message(bp, &req, sizeof(req),
513 pf->active_vfs = i + 1;
514 pf->vf[i].fw_fid = pf->first_vf_id + i;
516 mutex_unlock(&bp->hwrm_cmd_lock);
517 if (pf->active_vfs) {
518 u16 n = pf->active_vfs;
520 hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
521 hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
522 hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
524 hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
525 hw_resc->max_rsscos_ctxs -= pf->active_vfs;
526 hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
527 hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
534 /* Only called by PF to reserve resources for VFs, returns actual number of
535 * VFs configured, or < 0 on error.
537 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
540 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
541 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
542 u16 vf_ring_grps, max_stat_ctxs;
543 struct hwrm_func_cfg_input req = {0};
544 struct bnxt_pf_info *pf = &bp->pf;
545 int total_vf_tx_rings = 0;
547 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
549 max_stat_ctxs = hw_resc->max_stat_ctxs;
551 /* Remaining rings are distributed equally amongs VF's for now */
552 vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
553 bp->cp_nr_rings) / num_vfs;
554 vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
555 if (bp->flags & BNXT_FLAG_AGG_RINGS)
556 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
559 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
561 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
562 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
563 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
564 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
566 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
567 FUNC_CFG_REQ_ENABLES_MRU |
568 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
569 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
570 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
571 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
572 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
573 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
574 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
575 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
577 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
578 req.mru = cpu_to_le16(mtu);
579 req.mtu = cpu_to_le16(mtu);
581 req.num_rsscos_ctxs = cpu_to_le16(1);
582 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
583 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
584 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
585 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
586 req.num_l2_ctxs = cpu_to_le16(4);
588 req.num_vnics = cpu_to_le16(vf_vnics);
589 /* FIXME spec currently uses 1 bit for stats ctx */
590 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
592 mutex_lock(&bp->hwrm_cmd_lock);
593 for (i = 0; i < num_vfs; i++) {
594 int vf_tx_rsvd = vf_tx_rings;
596 req.fid = cpu_to_le16(pf->first_vf_id + i);
597 rc = _hwrm_send_message(bp, &req, sizeof(req),
601 pf->active_vfs = i + 1;
602 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
603 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
607 total_vf_tx_rings += vf_tx_rsvd;
609 mutex_unlock(&bp->hwrm_cmd_lock);
612 if (pf->active_vfs) {
613 hw_resc->max_tx_rings -= total_vf_tx_rings;
614 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
615 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
616 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
617 hw_resc->max_rsscos_ctxs -= num_vfs;
618 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
619 hw_resc->max_vnics -= vf_vnics * num_vfs;
625 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
628 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
630 return bnxt_hwrm_func_cfg(bp, num_vfs);
633 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
635 int rc = 0, vfs_supported;
636 int min_rx_rings, min_tx_rings, min_rss_ctxs;
637 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
638 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
639 int avail_cp, avail_stat;
641 /* Check if we can enable requested num of vf's. At a mininum
642 * we require 1 RX 1 TX rings for each VF. In this minimum conf
643 * features like TPA will not be available.
645 vfs_supported = *num_vfs;
647 avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
648 avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
649 avail_cp = min_t(int, avail_cp, avail_stat);
651 while (vfs_supported) {
652 min_rx_rings = vfs_supported;
653 min_tx_rings = vfs_supported;
654 min_rss_ctxs = vfs_supported;
656 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
657 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
661 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
665 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
666 avail_cp < min_rx_rings)
669 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
670 avail_cp >= min_tx_rings)
673 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
677 if (tx_ok && rx_ok && rss_ok)
683 if (!vfs_supported) {
684 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
688 if (vfs_supported != *num_vfs) {
689 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
690 *num_vfs, vfs_supported);
691 *num_vfs = vfs_supported;
694 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
698 /* Reserve resources for VFs */
699 rc = bnxt_func_cfg(bp, *num_vfs);
700 if (rc != *num_vfs) {
702 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
706 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
710 /* Register buffers for VFs */
711 rc = bnxt_hwrm_func_buf_rgtr(bp);
715 bnxt_ulp_sriov_cfg(bp, *num_vfs);
717 rc = pci_enable_sriov(bp->pdev, *num_vfs);
724 /* Free the resources reserved for various VF's */
725 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
728 bnxt_free_vf_resources(bp);
733 void bnxt_sriov_disable(struct bnxt *bp)
735 u16 num_vfs = pci_num_vf(bp->pdev);
740 /* synchronize VF and VF-rep create and destroy */
741 mutex_lock(&bp->sriov_lock);
742 bnxt_vf_reps_destroy(bp);
744 if (pci_vfs_assigned(bp->pdev)) {
745 bnxt_hwrm_fwd_async_event_cmpl(
746 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
747 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
750 pci_disable_sriov(bp->pdev);
751 /* Free the HW resources reserved for various VF's */
752 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
754 mutex_unlock(&bp->sriov_lock);
756 bnxt_free_vf_resources(bp);
758 bp->pf.active_vfs = 0;
759 /* Reclaim all resources for the PF. */
761 bnxt_restore_pf_fw_resources(bp);
764 bnxt_ulp_sriov_cfg(bp, 0);
767 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
769 struct net_device *dev = pci_get_drvdata(pdev);
770 struct bnxt *bp = netdev_priv(dev);
772 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
773 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
778 if (!netif_running(dev)) {
779 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
783 bp->sriov_cfg = true;
786 if (pci_vfs_assigned(bp->pdev)) {
787 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
792 /* Check if enabled VFs is same as requested */
793 if (num_vfs && num_vfs == bp->pf.active_vfs)
796 /* if there are previous existing VFs, clean them up */
797 bnxt_sriov_disable(bp);
801 bnxt_sriov_enable(bp, &num_vfs);
804 bp->sriov_cfg = false;
805 wake_up(&bp->sriov_cfg_wait);
810 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
811 void *encap_resp, __le64 encap_resp_addr,
812 __le16 encap_resp_cpr, u32 msg_size)
815 struct hwrm_fwd_resp_input req = {0};
816 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
818 if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
821 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
823 /* Set the new target id */
824 req.target_id = cpu_to_le16(vf->fw_fid);
825 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
826 req.encap_resp_len = cpu_to_le16(msg_size);
827 req.encap_resp_addr = encap_resp_addr;
828 req.encap_resp_cmpl_ring = encap_resp_cpr;
829 memcpy(req.encap_resp, encap_resp, msg_size);
831 mutex_lock(&bp->hwrm_cmd_lock);
832 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
835 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
839 if (resp->error_code) {
840 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
846 mutex_unlock(&bp->hwrm_cmd_lock);
850 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
854 struct hwrm_reject_fwd_resp_input req = {0};
855 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
857 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
860 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
861 /* Set the new target id */
862 req.target_id = cpu_to_le16(vf->fw_fid);
863 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
864 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
866 mutex_lock(&bp->hwrm_cmd_lock);
867 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
870 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
871 goto fwd_err_resp_exit;
874 if (resp->error_code) {
875 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
881 mutex_unlock(&bp->hwrm_cmd_lock);
885 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
889 struct hwrm_exec_fwd_resp_input req = {0};
890 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
892 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
895 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
896 /* Set the new target id */
897 req.target_id = cpu_to_le16(vf->fw_fid);
898 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
899 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
901 mutex_lock(&bp->hwrm_cmd_lock);
902 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
905 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
906 goto exec_fwd_resp_exit;
909 if (resp->error_code) {
910 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
916 mutex_unlock(&bp->hwrm_cmd_lock);
920 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
922 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
923 struct hwrm_func_vf_cfg_input *req =
924 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
926 /* Allow VF to set a valid MAC address, if trust is set to on or
927 * if the PF assigned MAC address is zero
929 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
930 if (is_valid_ether_addr(req->dflt_mac_addr) &&
931 ((vf->flags & BNXT_VF_TRUST) ||
932 !is_valid_ether_addr(vf->mac_addr) ||
933 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
934 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
935 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
937 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
939 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
942 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
944 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
945 struct hwrm_cfa_l2_filter_alloc_input *req =
946 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
949 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
950 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
952 /* Allow VF to set a valid MAC address, if trust is set to on.
953 * Or VF MAC address must first match MAC address in PF's context.
954 * Otherwise, it must match the VF MAC address if firmware spec >=
957 if (vf->flags & BNXT_VF_TRUST) {
959 } else if (is_valid_ether_addr(vf->mac_addr)) {
960 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
962 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
963 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
966 /* There are two cases:
967 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
968 * to the PF and so it doesn't have to match
969 * 2.Allow VF to modify it's own MAC when PF has not assigned a
970 * valid MAC address and firmware spec >= 0x10202
975 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
976 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
979 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
983 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
985 rc = bnxt_hwrm_exec_fwd_resp(
986 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
988 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
989 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
992 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
993 mutex_lock(&bp->hwrm_cmd_lock);
994 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
995 sizeof(phy_qcfg_resp));
996 mutex_unlock(&bp->hwrm_cmd_lock);
997 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
998 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
999 phy_qcfg_resp.valid = 1;
1001 if (vf->flags & BNXT_VF_LINK_UP) {
1002 /* if physical link is down, force link up on VF */
1003 if (phy_qcfg_resp.link !=
1004 PORT_PHY_QCFG_RESP_LINK_LINK) {
1005 phy_qcfg_resp.link =
1006 PORT_PHY_QCFG_RESP_LINK_LINK;
1007 phy_qcfg_resp.link_speed = cpu_to_le16(
1008 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1009 phy_qcfg_resp.duplex_cfg =
1010 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1011 phy_qcfg_resp.duplex_state =
1012 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1013 phy_qcfg_resp.pause =
1014 (PORT_PHY_QCFG_RESP_PAUSE_TX |
1015 PORT_PHY_QCFG_RESP_PAUSE_RX);
1018 /* force link down */
1019 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1020 phy_qcfg_resp.link_speed = 0;
1021 phy_qcfg_resp.duplex_state =
1022 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1023 phy_qcfg_resp.pause = 0;
1025 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1026 phy_qcfg_req->resp_addr,
1027 phy_qcfg_req->cmpl_ring,
1028 sizeof(phy_qcfg_resp));
1033 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1036 struct input *encap_req = vf->hwrm_cmd_req_addr;
1037 u32 req_type = le16_to_cpu(encap_req->req_type);
1040 case HWRM_FUNC_VF_CFG:
1041 rc = bnxt_vf_configure_mac(bp, vf);
1043 case HWRM_CFA_L2_FILTER_ALLOC:
1044 rc = bnxt_vf_validate_set_mac(bp, vf);
1047 /* TODO Validate if VF is allowed to change mac address,
1048 * mtu, num of rings etc
1050 rc = bnxt_hwrm_exec_fwd_resp(
1051 bp, vf, sizeof(struct hwrm_func_cfg_input));
1053 case HWRM_PORT_PHY_QCFG:
1054 rc = bnxt_vf_set_link(bp, vf);
1062 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1064 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1066 /* Scan through VF's and process commands */
1068 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1069 if (vf_id >= active_vfs)
1072 clear_bit(vf_id, bp->pf.vf_event_bmap);
1073 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1078 void bnxt_update_vf_mac(struct bnxt *bp)
1080 struct hwrm_func_qcaps_input req = {0};
1081 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1083 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1084 req.fid = cpu_to_le16(0xffff);
1086 mutex_lock(&bp->hwrm_cmd_lock);
1087 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1088 goto update_vf_mac_exit;
1090 /* Store MAC address from the firmware. There are 2 cases:
1091 * 1. MAC address is valid. It is assigned from the PF and we
1092 * need to override the current VF MAC address with it.
1093 * 2. MAC address is zero. The VF will use a random MAC address by
1094 * default but the stored zero MAC will allow the VF user to change
1095 * the random MAC address using ndo_set_mac_address() if he wants.
1097 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1098 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1100 /* overwrite netdev dev_addr with admin VF MAC */
1101 if (is_valid_ether_addr(bp->vf.mac_addr))
1102 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1104 mutex_unlock(&bp->hwrm_cmd_lock);
1107 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1109 struct hwrm_func_vf_cfg_input req = {0};
1115 if (bp->hwrm_spec_code < 0x10202) {
1116 if (is_valid_ether_addr(bp->vf.mac_addr))
1117 rc = -EADDRNOTAVAIL;
1120 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1121 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1122 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1123 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1126 rc = -EADDRNOTAVAIL;
1127 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1135 void bnxt_sriov_disable(struct bnxt *bp)
1139 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1141 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1144 void bnxt_update_vf_mac(struct bnxt *bp)
1148 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)