1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
11 #include "i40e_diag.h"
13 #include <net/udp_tunnel.h>
14 #include <net/xdp_sock.h>
15 /* All i40e tracepoints are defined by the include below, which
16 * must be included exactly once across the whole kernel with
17 * CREATE_TRACE_POINTS defined
19 #define CREATE_TRACE_POINTS
20 #include "i40e_trace.h"
22 const char i40e_driver_name[] = "i40e";
23 static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
28 #define DRV_VERSION_MAJOR 2
29 #define DRV_VERSION_MINOR 8
30 #define DRV_VERSION_BUILD 20
31 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34 const char i40e_driver_version_str[] = DRV_VERSION;
35 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
37 /* a bit of forward declarations */
38 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40 static int i40e_add_vsi(struct i40e_vsi *vsi);
41 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43 static int i40e_setup_misc_vector(struct i40e_pf *pf);
44 static void i40e_determine_queue_usage(struct i40e_pf *pf);
45 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47 static int i40e_reset(struct i40e_pf *pf);
48 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
50 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
51 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
52 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
53 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
54 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
55 static int i40e_get_capabilities(struct i40e_pf *pf,
56 enum i40e_admin_queue_opc list_type);
59 /* i40e_pci_tbl - PCI Device ID Table
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static const struct pci_device_id i40e_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
90 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
95 #define I40E_MAX_VF_COUNT 128
96 static int debug = -1;
97 module_param(debug, uint, 0);
98 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
100 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
101 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
102 MODULE_LICENSE("GPL v2");
103 MODULE_VERSION(DRV_VERSION);
105 static struct workqueue_struct *i40e_wq;
108 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
109 * @hw: pointer to the HW structure
110 * @mem: ptr to mem struct to fill out
111 * @size: size of memory requested
112 * @alignment: what to align the allocation to
114 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
115 u64 size, u32 alignment)
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
119 mem->size = ALIGN(size, alignment);
120 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
129 * i40e_free_dma_mem_d - OS specific memory free for shared code
130 * @hw: pointer to the HW structure
131 * @mem: ptr to mem struct to free
133 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
135 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
137 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
146 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to fill out
149 * @size: size of memory requested
151 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
155 mem->va = kzalloc(size, GFP_KERNEL);
164 * i40e_free_virt_mem_d - OS specific memory free for shared code
165 * @hw: pointer to the HW structure
166 * @mem: ptr to mem struct to free
168 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
170 /* it's ok to kfree a NULL pointer */
179 * i40e_get_lump - find a lump of free generic resource
180 * @pf: board private structure
181 * @pile: the pile of resource to search
182 * @needed: the number of items needed
183 * @id: an owner id to stick on the items assigned
185 * Returns the base item index of the lump, or negative for error
187 * The search_hint trick and lack of advanced fit-finding only work
188 * because we're highly likely to have all the same size lump requests.
189 * Linear search time and any fragmentation should be minimal.
191 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
197 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
198 dev_info(&pf->pdev->dev,
199 "param err: pile=%s needed=%d id=0x%04x\n",
200 pile ? "<valid>" : "<null>", needed, id);
204 /* start the linear search with an imperfect hint */
205 i = pile->search_hint;
206 while (i < pile->num_entries) {
207 /* skip already allocated entries */
208 if (pile->list[i] & I40E_PILE_VALID_BIT) {
213 /* do we have enough in this lump? */
214 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
215 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
220 /* there was enough, so assign it to the requestor */
221 for (j = 0; j < needed; j++)
222 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
224 pile->search_hint = i + j;
228 /* not enough, so skip over it and continue looking */
236 * i40e_put_lump - return a lump of generic resource
237 * @pile: the pile of resource to search
238 * @index: the base item index
239 * @id: the owner id of the items assigned
241 * Returns the count of items in the lump
243 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
245 int valid_id = (id | I40E_PILE_VALID_BIT);
249 if (!pile || index >= pile->num_entries)
253 i < pile->num_entries && pile->list[i] == valid_id;
259 if (count && index < pile->search_hint)
260 pile->search_hint = index;
266 * i40e_find_vsi_from_id - searches for the vsi with the given id
267 * @pf: the pf structure to search for the vsi
268 * @id: id of the vsi it is searching for
270 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
274 for (i = 0; i < pf->num_alloc_vsi; i++)
275 if (pf->vsi[i] && (pf->vsi[i]->id == id))
282 * i40e_service_event_schedule - Schedule the service task to wake up
283 * @pf: board private structure
285 * If not already scheduled, this puts the task into the work queue
287 void i40e_service_event_schedule(struct i40e_pf *pf)
289 if ((!test_bit(__I40E_DOWN, pf->state) &&
290 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
291 test_bit(__I40E_RECOVERY_MODE, pf->state))
292 queue_work(i40e_wq, &pf->service_task);
296 * i40e_tx_timeout - Respond to a Tx Hang
297 * @netdev: network interface device structure
299 * If any port has noticed a Tx timeout, it is likely that the whole
300 * device is munged, not just the one netdev port, so go for the full
303 static void i40e_tx_timeout(struct net_device *netdev)
305 struct i40e_netdev_priv *np = netdev_priv(netdev);
306 struct i40e_vsi *vsi = np->vsi;
307 struct i40e_pf *pf = vsi->back;
308 struct i40e_ring *tx_ring = NULL;
309 unsigned int i, hung_queue = 0;
312 pf->tx_timeout_count++;
314 /* find the stopped queue the same way the stack does */
315 for (i = 0; i < netdev->num_tx_queues; i++) {
316 struct netdev_queue *q;
317 unsigned long trans_start;
319 q = netdev_get_tx_queue(netdev, i);
320 trans_start = q->trans_start;
321 if (netif_xmit_stopped(q) &&
323 (trans_start + netdev->watchdog_timeo))) {
329 if (i == netdev->num_tx_queues) {
330 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
332 /* now that we have an index, find the tx_ring struct */
333 for (i = 0; i < vsi->num_queue_pairs; i++) {
334 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
336 vsi->tx_rings[i]->queue_index) {
337 tx_ring = vsi->tx_rings[i];
344 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
345 pf->tx_timeout_recovery_level = 1; /* reset after some time */
346 else if (time_before(jiffies,
347 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
348 return; /* don't do any new action before the next timeout */
350 /* don't kick off another recovery if one is already pending */
351 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
355 head = i40e_get_head(tx_ring);
356 /* Read interrupt register */
357 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
359 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
360 tx_ring->vsi->base_vector - 1));
362 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
364 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
365 vsi->seid, hung_queue, tx_ring->next_to_clean,
366 head, tx_ring->next_to_use,
367 readl(tx_ring->tail), val);
370 pf->tx_timeout_last_recovery = jiffies;
371 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
372 pf->tx_timeout_recovery_level, hung_queue);
374 switch (pf->tx_timeout_recovery_level) {
376 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
379 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
382 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
385 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
389 i40e_service_event_schedule(pf);
390 pf->tx_timeout_recovery_level++;
394 * i40e_get_vsi_stats_struct - Get System Network Statistics
395 * @vsi: the VSI we care about
397 * Returns the address of the device statistics structure.
398 * The statistics are actually updated from the service task.
400 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
402 return &vsi->net_stats;
406 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
407 * @ring: Tx ring to get statistics from
408 * @stats: statistics entry to be updated
410 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
411 struct rtnl_link_stats64 *stats)
417 start = u64_stats_fetch_begin_irq(&ring->syncp);
418 packets = ring->stats.packets;
419 bytes = ring->stats.bytes;
420 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
422 stats->tx_packets += packets;
423 stats->tx_bytes += bytes;
427 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
428 * @netdev: network interface device structure
429 * @stats: data structure to store statistics
431 * Returns the address of the device statistics structure.
432 * The statistics are actually updated from the service task.
434 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
437 struct i40e_netdev_priv *np = netdev_priv(netdev);
438 struct i40e_vsi *vsi = np->vsi;
439 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
440 struct i40e_ring *ring;
443 if (test_bit(__I40E_VSI_DOWN, vsi->state))
450 for (i = 0; i < vsi->num_queue_pairs; i++) {
454 ring = READ_ONCE(vsi->tx_rings[i]);
457 i40e_get_netdev_stats_struct_tx(ring, stats);
459 if (i40e_enabled_xdp_vsi(vsi)) {
461 i40e_get_netdev_stats_struct_tx(ring, stats);
466 start = u64_stats_fetch_begin_irq(&ring->syncp);
467 packets = ring->stats.packets;
468 bytes = ring->stats.bytes;
469 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
471 stats->rx_packets += packets;
472 stats->rx_bytes += bytes;
477 /* following stats updated by i40e_watchdog_subtask() */
478 stats->multicast = vsi_stats->multicast;
479 stats->tx_errors = vsi_stats->tx_errors;
480 stats->tx_dropped = vsi_stats->tx_dropped;
481 stats->rx_errors = vsi_stats->rx_errors;
482 stats->rx_dropped = vsi_stats->rx_dropped;
483 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
484 stats->rx_length_errors = vsi_stats->rx_length_errors;
488 * i40e_vsi_reset_stats - Resets all stats of the given vsi
489 * @vsi: the VSI to have its stats reset
491 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
493 struct rtnl_link_stats64 *ns;
499 ns = i40e_get_vsi_stats_struct(vsi);
500 memset(ns, 0, sizeof(*ns));
501 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
502 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
503 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
504 if (vsi->rx_rings && vsi->rx_rings[0]) {
505 for (i = 0; i < vsi->num_queue_pairs; i++) {
506 memset(&vsi->rx_rings[i]->stats, 0,
507 sizeof(vsi->rx_rings[i]->stats));
508 memset(&vsi->rx_rings[i]->rx_stats, 0,
509 sizeof(vsi->rx_rings[i]->rx_stats));
510 memset(&vsi->tx_rings[i]->stats, 0,
511 sizeof(vsi->tx_rings[i]->stats));
512 memset(&vsi->tx_rings[i]->tx_stats, 0,
513 sizeof(vsi->tx_rings[i]->tx_stats));
516 vsi->stat_offsets_loaded = false;
520 * i40e_pf_reset_stats - Reset all of the stats for the given PF
521 * @pf: the PF to be reset
523 void i40e_pf_reset_stats(struct i40e_pf *pf)
527 memset(&pf->stats, 0, sizeof(pf->stats));
528 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
529 pf->stat_offsets_loaded = false;
531 for (i = 0; i < I40E_MAX_VEB; i++) {
533 memset(&pf->veb[i]->stats, 0,
534 sizeof(pf->veb[i]->stats));
535 memset(&pf->veb[i]->stats_offsets, 0,
536 sizeof(pf->veb[i]->stats_offsets));
537 pf->veb[i]->stat_offsets_loaded = false;
540 pf->hw_csum_rx_error = 0;
544 * i40e_stat_update48 - read and update a 48 bit stat from the chip
545 * @hw: ptr to the hardware info
546 * @hireg: the high 32 bit reg to read
547 * @loreg: the low 32 bit reg to read
548 * @offset_loaded: has the initial offset been loaded yet
549 * @offset: ptr to current offset value
550 * @stat: ptr to the stat
552 * Since the device stats are not reset at PFReset, they likely will not
553 * be zeroed when the driver starts. We'll save the first values read
554 * and use them as offsets to be subtracted from the raw values in order
555 * to report stats that count from zero. In the process, we also manage
556 * the potential roll-over.
558 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
559 bool offset_loaded, u64 *offset, u64 *stat)
563 if (hw->device_id == I40E_DEV_ID_QEMU) {
564 new_data = rd32(hw, loreg);
565 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
567 new_data = rd64(hw, loreg);
571 if (likely(new_data >= *offset))
572 *stat = new_data - *offset;
574 *stat = (new_data + BIT_ULL(48)) - *offset;
575 *stat &= 0xFFFFFFFFFFFFULL;
579 * i40e_stat_update32 - read and update a 32 bit stat from the chip
580 * @hw: ptr to the hardware info
581 * @reg: the hw reg to read
582 * @offset_loaded: has the initial offset been loaded yet
583 * @offset: ptr to current offset value
584 * @stat: ptr to the stat
586 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
587 bool offset_loaded, u64 *offset, u64 *stat)
591 new_data = rd32(hw, reg);
594 if (likely(new_data >= *offset))
595 *stat = (u32)(new_data - *offset);
597 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
601 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
602 * @hw: ptr to the hardware info
603 * @reg: the hw reg to read and clear
604 * @stat: ptr to the stat
606 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
608 u32 new_data = rd32(hw, reg);
610 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
615 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
616 * @vsi: the VSI to be updated
618 void i40e_update_eth_stats(struct i40e_vsi *vsi)
620 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
621 struct i40e_pf *pf = vsi->back;
622 struct i40e_hw *hw = &pf->hw;
623 struct i40e_eth_stats *oes;
624 struct i40e_eth_stats *es; /* device's eth stats */
626 es = &vsi->eth_stats;
627 oes = &vsi->eth_stats_offsets;
629 /* Gather up the stats that the hw collects */
630 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->tx_errors, &es->tx_errors);
633 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
634 vsi->stat_offsets_loaded,
635 &oes->rx_discards, &es->rx_discards);
636 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
637 vsi->stat_offsets_loaded,
638 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
639 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->tx_errors, &es->tx_errors);
643 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
644 I40E_GLV_GORCL(stat_idx),
645 vsi->stat_offsets_loaded,
646 &oes->rx_bytes, &es->rx_bytes);
647 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
648 I40E_GLV_UPRCL(stat_idx),
649 vsi->stat_offsets_loaded,
650 &oes->rx_unicast, &es->rx_unicast);
651 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
652 I40E_GLV_MPRCL(stat_idx),
653 vsi->stat_offsets_loaded,
654 &oes->rx_multicast, &es->rx_multicast);
655 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
656 I40E_GLV_BPRCL(stat_idx),
657 vsi->stat_offsets_loaded,
658 &oes->rx_broadcast, &es->rx_broadcast);
660 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
661 I40E_GLV_GOTCL(stat_idx),
662 vsi->stat_offsets_loaded,
663 &oes->tx_bytes, &es->tx_bytes);
664 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
665 I40E_GLV_UPTCL(stat_idx),
666 vsi->stat_offsets_loaded,
667 &oes->tx_unicast, &es->tx_unicast);
668 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
669 I40E_GLV_MPTCL(stat_idx),
670 vsi->stat_offsets_loaded,
671 &oes->tx_multicast, &es->tx_multicast);
672 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
673 I40E_GLV_BPTCL(stat_idx),
674 vsi->stat_offsets_loaded,
675 &oes->tx_broadcast, &es->tx_broadcast);
676 vsi->stat_offsets_loaded = true;
680 * i40e_update_veb_stats - Update Switch component statistics
681 * @veb: the VEB being updated
683 static void i40e_update_veb_stats(struct i40e_veb *veb)
685 struct i40e_pf *pf = veb->pf;
686 struct i40e_hw *hw = &pf->hw;
687 struct i40e_eth_stats *oes;
688 struct i40e_eth_stats *es; /* device's eth stats */
689 struct i40e_veb_tc_stats *veb_oes;
690 struct i40e_veb_tc_stats *veb_es;
693 idx = veb->stats_idx;
695 oes = &veb->stats_offsets;
696 veb_es = &veb->tc_stats;
697 veb_oes = &veb->tc_stats_offsets;
699 /* Gather up the stats that the hw collects */
700 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
701 veb->stat_offsets_loaded,
702 &oes->tx_discards, &es->tx_discards);
703 if (hw->revision_id > 0)
704 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
705 veb->stat_offsets_loaded,
706 &oes->rx_unknown_protocol,
707 &es->rx_unknown_protocol);
708 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->rx_bytes, &es->rx_bytes);
711 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
712 veb->stat_offsets_loaded,
713 &oes->rx_unicast, &es->rx_unicast);
714 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
715 veb->stat_offsets_loaded,
716 &oes->rx_multicast, &es->rx_multicast);
717 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
718 veb->stat_offsets_loaded,
719 &oes->rx_broadcast, &es->rx_broadcast);
721 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
722 veb->stat_offsets_loaded,
723 &oes->tx_bytes, &es->tx_bytes);
724 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
725 veb->stat_offsets_loaded,
726 &oes->tx_unicast, &es->tx_unicast);
727 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
728 veb->stat_offsets_loaded,
729 &oes->tx_multicast, &es->tx_multicast);
730 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
731 veb->stat_offsets_loaded,
732 &oes->tx_broadcast, &es->tx_broadcast);
733 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
734 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
735 I40E_GLVEBTC_RPCL(i, idx),
736 veb->stat_offsets_loaded,
737 &veb_oes->tc_rx_packets[i],
738 &veb_es->tc_rx_packets[i]);
739 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
740 I40E_GLVEBTC_RBCL(i, idx),
741 veb->stat_offsets_loaded,
742 &veb_oes->tc_rx_bytes[i],
743 &veb_es->tc_rx_bytes[i]);
744 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
745 I40E_GLVEBTC_TPCL(i, idx),
746 veb->stat_offsets_loaded,
747 &veb_oes->tc_tx_packets[i],
748 &veb_es->tc_tx_packets[i]);
749 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
750 I40E_GLVEBTC_TBCL(i, idx),
751 veb->stat_offsets_loaded,
752 &veb_oes->tc_tx_bytes[i],
753 &veb_es->tc_tx_bytes[i]);
755 veb->stat_offsets_loaded = true;
759 * i40e_update_vsi_stats - Update the vsi statistics counters.
760 * @vsi: the VSI to be updated
762 * There are a few instances where we store the same stat in a
763 * couple of different structs. This is partly because we have
764 * the netdev stats that need to be filled out, which is slightly
765 * different from the "eth_stats" defined by the chip and used in
766 * VF communications. We sort it out here.
768 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
770 struct i40e_pf *pf = vsi->back;
771 struct rtnl_link_stats64 *ons;
772 struct rtnl_link_stats64 *ns; /* netdev stats */
773 struct i40e_eth_stats *oes;
774 struct i40e_eth_stats *es; /* device's eth stats */
775 u32 tx_restart, tx_busy;
786 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
787 test_bit(__I40E_CONFIG_BUSY, pf->state))
790 ns = i40e_get_vsi_stats_struct(vsi);
791 ons = &vsi->net_stats_offsets;
792 es = &vsi->eth_stats;
793 oes = &vsi->eth_stats_offsets;
795 /* Gather up the netdev and vsi stats that the driver collects
796 * on the fly during packet processing
800 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
804 for (q = 0; q < vsi->num_queue_pairs; q++) {
806 p = READ_ONCE(vsi->tx_rings[q]);
809 start = u64_stats_fetch_begin_irq(&p->syncp);
810 packets = p->stats.packets;
811 bytes = p->stats.bytes;
812 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 tx_restart += p->tx_stats.restart_queue;
816 tx_busy += p->tx_stats.tx_busy;
817 tx_linearize += p->tx_stats.tx_linearize;
818 tx_force_wb += p->tx_stats.tx_force_wb;
820 /* Rx queue is part of the same block as Tx queue */
823 start = u64_stats_fetch_begin_irq(&p->syncp);
824 packets = p->stats.packets;
825 bytes = p->stats.bytes;
826 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
829 rx_buf += p->rx_stats.alloc_buff_failed;
830 rx_page += p->rx_stats.alloc_page_failed;
833 vsi->tx_restart = tx_restart;
834 vsi->tx_busy = tx_busy;
835 vsi->tx_linearize = tx_linearize;
836 vsi->tx_force_wb = tx_force_wb;
837 vsi->rx_page_failed = rx_page;
838 vsi->rx_buf_failed = rx_buf;
840 ns->rx_packets = rx_p;
842 ns->tx_packets = tx_p;
845 /* update netdev stats from eth stats */
846 i40e_update_eth_stats(vsi);
847 ons->tx_errors = oes->tx_errors;
848 ns->tx_errors = es->tx_errors;
849 ons->multicast = oes->rx_multicast;
850 ns->multicast = es->rx_multicast;
851 ons->rx_dropped = oes->rx_discards;
852 ns->rx_dropped = es->rx_discards;
853 ons->tx_dropped = oes->tx_discards;
854 ns->tx_dropped = es->tx_discards;
856 /* pull in a couple PF stats if this is the main vsi */
857 if (vsi == pf->vsi[pf->lan_vsi]) {
858 ns->rx_crc_errors = pf->stats.crc_errors;
859 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
860 ns->rx_length_errors = pf->stats.rx_length_errors;
865 * i40e_update_pf_stats - Update the PF statistics counters.
866 * @pf: the PF to be updated
868 static void i40e_update_pf_stats(struct i40e_pf *pf)
870 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
871 struct i40e_hw_port_stats *nsd = &pf->stats;
872 struct i40e_hw *hw = &pf->hw;
876 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
877 I40E_GLPRT_GORCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
880 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
881 I40E_GLPRT_GOTCL(hw->port),
882 pf->stat_offsets_loaded,
883 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
884 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
885 pf->stat_offsets_loaded,
886 &osd->eth.rx_discards,
887 &nsd->eth.rx_discards);
888 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
889 I40E_GLPRT_UPRCL(hw->port),
890 pf->stat_offsets_loaded,
891 &osd->eth.rx_unicast,
892 &nsd->eth.rx_unicast);
893 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
894 I40E_GLPRT_MPRCL(hw->port),
895 pf->stat_offsets_loaded,
896 &osd->eth.rx_multicast,
897 &nsd->eth.rx_multicast);
898 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
899 I40E_GLPRT_BPRCL(hw->port),
900 pf->stat_offsets_loaded,
901 &osd->eth.rx_broadcast,
902 &nsd->eth.rx_broadcast);
903 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
904 I40E_GLPRT_UPTCL(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->eth.tx_unicast,
907 &nsd->eth.tx_unicast);
908 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
909 I40E_GLPRT_MPTCL(hw->port),
910 pf->stat_offsets_loaded,
911 &osd->eth.tx_multicast,
912 &nsd->eth.tx_multicast);
913 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
914 I40E_GLPRT_BPTCL(hw->port),
915 pf->stat_offsets_loaded,
916 &osd->eth.tx_broadcast,
917 &nsd->eth.tx_broadcast);
919 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->tx_dropped_link_down,
922 &nsd->tx_dropped_link_down);
924 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->crc_errors, &nsd->crc_errors);
928 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->illegal_bytes, &nsd->illegal_bytes);
932 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->mac_local_faults,
935 &nsd->mac_local_faults);
936 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
937 pf->stat_offsets_loaded,
938 &osd->mac_remote_faults,
939 &nsd->mac_remote_faults);
941 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->rx_length_errors,
944 &nsd->rx_length_errors);
946 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->link_xon_rx, &nsd->link_xon_rx);
949 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->link_xon_tx, &nsd->link_xon_tx);
952 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->link_xoff_rx, &nsd->link_xoff_rx);
955 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
956 pf->stat_offsets_loaded,
957 &osd->link_xoff_tx, &nsd->link_xoff_tx);
959 for (i = 0; i < 8; i++) {
960 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
961 pf->stat_offsets_loaded,
962 &osd->priority_xoff_rx[i],
963 &nsd->priority_xoff_rx[i]);
964 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
965 pf->stat_offsets_loaded,
966 &osd->priority_xon_rx[i],
967 &nsd->priority_xon_rx[i]);
968 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
969 pf->stat_offsets_loaded,
970 &osd->priority_xon_tx[i],
971 &nsd->priority_xon_tx[i]);
972 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
973 pf->stat_offsets_loaded,
974 &osd->priority_xoff_tx[i],
975 &nsd->priority_xoff_tx[i]);
976 i40e_stat_update32(hw,
977 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
978 pf->stat_offsets_loaded,
979 &osd->priority_xon_2_xoff[i],
980 &nsd->priority_xon_2_xoff[i]);
983 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
984 I40E_GLPRT_PRC64L(hw->port),
985 pf->stat_offsets_loaded,
986 &osd->rx_size_64, &nsd->rx_size_64);
987 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
988 I40E_GLPRT_PRC127L(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->rx_size_127, &nsd->rx_size_127);
991 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
992 I40E_GLPRT_PRC255L(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->rx_size_255, &nsd->rx_size_255);
995 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
996 I40E_GLPRT_PRC511L(hw->port),
997 pf->stat_offsets_loaded,
998 &osd->rx_size_511, &nsd->rx_size_511);
999 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1000 I40E_GLPRT_PRC1023L(hw->port),
1001 pf->stat_offsets_loaded,
1002 &osd->rx_size_1023, &nsd->rx_size_1023);
1003 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1004 I40E_GLPRT_PRC1522L(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->rx_size_1522, &nsd->rx_size_1522);
1007 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1008 I40E_GLPRT_PRC9522L(hw->port),
1009 pf->stat_offsets_loaded,
1010 &osd->rx_size_big, &nsd->rx_size_big);
1012 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1013 I40E_GLPRT_PTC64L(hw->port),
1014 pf->stat_offsets_loaded,
1015 &osd->tx_size_64, &nsd->tx_size_64);
1016 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1017 I40E_GLPRT_PTC127L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->tx_size_127, &nsd->tx_size_127);
1020 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1021 I40E_GLPRT_PTC255L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->tx_size_255, &nsd->tx_size_255);
1024 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1025 I40E_GLPRT_PTC511L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->tx_size_511, &nsd->tx_size_511);
1028 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1029 I40E_GLPRT_PTC1023L(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->tx_size_1023, &nsd->tx_size_1023);
1032 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1033 I40E_GLPRT_PTC1522L(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->tx_size_1522, &nsd->tx_size_1522);
1036 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1037 I40E_GLPRT_PTC9522L(hw->port),
1038 pf->stat_offsets_loaded,
1039 &osd->tx_size_big, &nsd->tx_size_big);
1041 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_undersize, &nsd->rx_undersize);
1044 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->rx_fragments, &nsd->rx_fragments);
1047 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->rx_oversize, &nsd->rx_oversize);
1050 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1051 pf->stat_offsets_loaded,
1052 &osd->rx_jabber, &nsd->rx_jabber);
1055 i40e_stat_update_and_clear32(hw,
1056 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1057 &nsd->fd_atr_match);
1058 i40e_stat_update_and_clear32(hw,
1059 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1061 i40e_stat_update_and_clear32(hw,
1062 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1063 &nsd->fd_atr_tunnel_match);
1065 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1066 nsd->tx_lpi_status =
1067 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1068 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1069 nsd->rx_lpi_status =
1070 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1071 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1072 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1073 pf->stat_offsets_loaded,
1074 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1075 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1076 pf->stat_offsets_loaded,
1077 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1079 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1080 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1081 nsd->fd_sb_status = true;
1083 nsd->fd_sb_status = false;
1085 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1086 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1087 nsd->fd_atr_status = true;
1089 nsd->fd_atr_status = false;
1091 pf->stat_offsets_loaded = true;
1095 * i40e_update_stats - Update the various statistics counters.
1096 * @vsi: the VSI to be updated
1098 * Update the various stats for this VSI and its related entities.
1100 void i40e_update_stats(struct i40e_vsi *vsi)
1102 struct i40e_pf *pf = vsi->back;
1104 if (vsi == pf->vsi[pf->lan_vsi])
1105 i40e_update_pf_stats(pf);
1107 i40e_update_vsi_stats(vsi);
1111 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1112 * @vsi: the VSI to be searched
1113 * @macaddr: the MAC address
1116 * Returns ptr to the filter object or NULL
1118 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1119 const u8 *macaddr, s16 vlan)
1121 struct i40e_mac_filter *f;
1124 if (!vsi || !macaddr)
1127 key = i40e_addr_to_hkey(macaddr);
1128 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1129 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1137 * i40e_find_mac - Find a mac addr in the macvlan filters list
1138 * @vsi: the VSI to be searched
1139 * @macaddr: the MAC address we are searching for
1141 * Returns the first filter with the provided MAC address or NULL if
1142 * MAC address was not found
1144 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1146 struct i40e_mac_filter *f;
1149 if (!vsi || !macaddr)
1152 key = i40e_addr_to_hkey(macaddr);
1153 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1154 if ((ether_addr_equal(macaddr, f->macaddr)))
1161 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1162 * @vsi: the VSI to be searched
1164 * Returns true if VSI is in vlan mode or false otherwise
1166 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1168 /* If we have a PVID, always operate in VLAN mode */
1172 /* We need to operate in VLAN mode whenever we have any filters with
1173 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1174 * time, incurring search cost repeatedly. However, we can notice two
1177 * 1) the only place where we can gain a VLAN filter is in
1180 * 2) the only place where filters are actually removed is in
1181 * i40e_sync_filters_subtask.
1183 * Thus, we can simply use a boolean value, has_vlan_filters which we
1184 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1185 * we have to perform the full search after deleting filters in
1186 * i40e_sync_filters_subtask, but we already have to search
1187 * filters here and can perform the check at the same time. This
1188 * results in avoiding embedding a loop for VLAN mode inside another
1189 * loop over all the filters, and should maintain correctness as noted
1192 return vsi->has_vlan_filter;
1196 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1197 * @vsi: the VSI to configure
1198 * @tmp_add_list: list of filters ready to be added
1199 * @tmp_del_list: list of filters ready to be deleted
1200 * @vlan_filters: the number of active VLAN filters
1202 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1203 * behave as expected. If we have any active VLAN filters remaining or about
1204 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1205 * so that they only match against untagged traffic. If we no longer have any
1206 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1207 * so that they match against both tagged and untagged traffic. In this way,
1208 * we ensure that we correctly receive the desired traffic. This ensures that
1209 * when we have an active VLAN we will receive only untagged traffic and
1210 * traffic matching active VLANs. If we have no active VLANs then we will
1211 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1213 * Finally, in a similar fashion, this function also corrects filters when
1214 * there is an active PVID assigned to this VSI.
1216 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1218 * This function is only expected to be called from within
1219 * i40e_sync_vsi_filters.
1221 * NOTE: This function expects to be called while under the
1222 * mac_filter_hash_lock
1224 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1225 struct hlist_head *tmp_add_list,
1226 struct hlist_head *tmp_del_list,
1229 s16 pvid = le16_to_cpu(vsi->info.pvid);
1230 struct i40e_mac_filter *f, *add_head;
1231 struct i40e_new_mac_filter *new;
1232 struct hlist_node *h;
1235 /* To determine if a particular filter needs to be replaced we
1236 * have the three following conditions:
1238 * a) if we have a PVID assigned, then all filters which are
1239 * not marked as VLAN=PVID must be replaced with filters that
1241 * b) otherwise, if we have any active VLANS, all filters
1242 * which are marked as VLAN=-1 must be replaced with
1243 * filters marked as VLAN=0
1244 * c) finally, if we do not have any active VLANS, all filters
1245 * which are marked as VLAN=0 must be replaced with filters
1249 /* Update the filters about to be added in place */
1250 hlist_for_each_entry(new, tmp_add_list, hlist) {
1251 if (pvid && new->f->vlan != pvid)
1252 new->f->vlan = pvid;
1253 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1255 else if (!vlan_filters && new->f->vlan == 0)
1256 new->f->vlan = I40E_VLAN_ANY;
1259 /* Update the remaining active filters */
1260 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1261 /* Combine the checks for whether a filter needs to be changed
1262 * and then determine the new VLAN inside the if block, in
1263 * order to avoid duplicating code for adding the new filter
1264 * then deleting the old filter.
1266 if ((pvid && f->vlan != pvid) ||
1267 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1268 (!vlan_filters && f->vlan == 0)) {
1269 /* Determine the new vlan we will be adding */
1272 else if (vlan_filters)
1275 new_vlan = I40E_VLAN_ANY;
1277 /* Create the new filter */
1278 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1282 /* Create a temporary i40e_new_mac_filter */
1283 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1288 new->state = add_head->state;
1290 /* Add the new filter to the tmp list */
1291 hlist_add_head(&new->hlist, tmp_add_list);
1293 /* Put the original filter into the delete list */
1294 f->state = I40E_FILTER_REMOVE;
1295 hash_del(&f->hlist);
1296 hlist_add_head(&f->hlist, tmp_del_list);
1300 vsi->has_vlan_filter = !!vlan_filters;
1306 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1307 * @vsi: the PF Main VSI - inappropriate for any other VSI
1308 * @macaddr: the MAC address
1310 * Remove whatever filter the firmware set up so the driver can manage
1311 * its own filtering intelligently.
1313 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1315 struct i40e_aqc_remove_macvlan_element_data element;
1316 struct i40e_pf *pf = vsi->back;
1318 /* Only appropriate for the PF main VSI */
1319 if (vsi->type != I40E_VSI_MAIN)
1322 memset(&element, 0, sizeof(element));
1323 ether_addr_copy(element.mac_addr, macaddr);
1324 element.vlan_tag = 0;
1325 /* Ignore error returns, some firmware does it this way... */
1326 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1327 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1329 memset(&element, 0, sizeof(element));
1330 ether_addr_copy(element.mac_addr, macaddr);
1331 element.vlan_tag = 0;
1332 /* ...and some firmware does it this way. */
1333 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1334 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1335 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1339 * i40e_add_filter - Add a mac/vlan filter to the VSI
1340 * @vsi: the VSI to be searched
1341 * @macaddr: the MAC address
1344 * Returns ptr to the filter object or NULL when no memory available.
1346 * NOTE: This function is expected to be called with mac_filter_hash_lock
1349 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1350 const u8 *macaddr, s16 vlan)
1352 struct i40e_mac_filter *f;
1355 if (!vsi || !macaddr)
1358 f = i40e_find_filter(vsi, macaddr, vlan);
1360 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1364 /* Update the boolean indicating if we need to function in
1368 vsi->has_vlan_filter = true;
1370 ether_addr_copy(f->macaddr, macaddr);
1372 f->state = I40E_FILTER_NEW;
1373 INIT_HLIST_NODE(&f->hlist);
1375 key = i40e_addr_to_hkey(macaddr);
1376 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1378 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1379 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1382 /* If we're asked to add a filter that has been marked for removal, it
1383 * is safe to simply restore it to active state. __i40e_del_filter
1384 * will have simply deleted any filters which were previously marked
1385 * NEW or FAILED, so if it is currently marked REMOVE it must have
1386 * previously been ACTIVE. Since we haven't yet run the sync filters
1387 * task, just restore this filter to the ACTIVE state so that the
1388 * sync task leaves it in place
1390 if (f->state == I40E_FILTER_REMOVE)
1391 f->state = I40E_FILTER_ACTIVE;
1397 * __i40e_del_filter - Remove a specific filter from the VSI
1398 * @vsi: VSI to remove from
1399 * @f: the filter to remove from the list
1401 * This function should be called instead of i40e_del_filter only if you know
1402 * the exact filter you will remove already, such as via i40e_find_filter or
1405 * NOTE: This function is expected to be called with mac_filter_hash_lock
1407 * ANOTHER NOTE: This function MUST be called from within the context of
1408 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1409 * instead of list_for_each_entry().
1411 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1416 /* If the filter was never added to firmware then we can just delete it
1417 * directly and we don't want to set the status to remove or else an
1418 * admin queue command will unnecessarily fire.
1420 if ((f->state == I40E_FILTER_FAILED) ||
1421 (f->state == I40E_FILTER_NEW)) {
1422 hash_del(&f->hlist);
1425 f->state = I40E_FILTER_REMOVE;
1428 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1429 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1433 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1434 * @vsi: the VSI to be searched
1435 * @macaddr: the MAC address
1438 * NOTE: This function is expected to be called with mac_filter_hash_lock
1440 * ANOTHER NOTE: This function MUST be called from within the context of
1441 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1442 * instead of list_for_each_entry().
1444 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1446 struct i40e_mac_filter *f;
1448 if (!vsi || !macaddr)
1451 f = i40e_find_filter(vsi, macaddr, vlan);
1452 __i40e_del_filter(vsi, f);
1456 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1457 * @vsi: the VSI to be searched
1458 * @macaddr: the mac address to be filtered
1460 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1461 * go through all the macvlan filters and add a macvlan filter for each
1462 * unique vlan that already exists. If a PVID has been assigned, instead only
1463 * add the macaddr to that VLAN.
1465 * Returns last filter added on success, else NULL
1467 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1470 struct i40e_mac_filter *f, *add = NULL;
1471 struct hlist_node *h;
1475 return i40e_add_filter(vsi, macaddr,
1476 le16_to_cpu(vsi->info.pvid));
1478 if (!i40e_is_vsi_in_vlan(vsi))
1479 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1481 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1482 if (f->state == I40E_FILTER_REMOVE)
1484 add = i40e_add_filter(vsi, macaddr, f->vlan);
1493 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1494 * @vsi: the VSI to be searched
1495 * @macaddr: the mac address to be removed
1497 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1500 * Returns 0 for success, or error
1502 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1504 struct i40e_mac_filter *f;
1505 struct hlist_node *h;
1509 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1510 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1511 if (ether_addr_equal(macaddr, f->macaddr)) {
1512 __i40e_del_filter(vsi, f);
1524 * i40e_set_mac - NDO callback to set mac address
1525 * @netdev: network interface device structure
1526 * @p: pointer to an address structure
1528 * Returns 0 on success, negative on failure
1530 static int i40e_set_mac(struct net_device *netdev, void *p)
1532 struct i40e_netdev_priv *np = netdev_priv(netdev);
1533 struct i40e_vsi *vsi = np->vsi;
1534 struct i40e_pf *pf = vsi->back;
1535 struct i40e_hw *hw = &pf->hw;
1536 struct sockaddr *addr = p;
1538 if (!is_valid_ether_addr(addr->sa_data))
1539 return -EADDRNOTAVAIL;
1541 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1542 netdev_info(netdev, "already using mac address %pM\n",
1547 if (test_bit(__I40E_DOWN, pf->state) ||
1548 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1549 return -EADDRNOTAVAIL;
1551 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1552 netdev_info(netdev, "returning to hw mac address %pM\n",
1555 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1557 /* Copy the address first, so that we avoid a possible race with
1559 * - Remove old address from MAC filter
1560 * - Copy new address
1561 * - Add new address to MAC filter
1563 spin_lock_bh(&vsi->mac_filter_hash_lock);
1564 i40e_del_mac_filter(vsi, netdev->dev_addr);
1565 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1566 i40e_add_mac_filter(vsi, netdev->dev_addr);
1567 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1569 if (vsi->type == I40E_VSI_MAIN) {
1572 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1573 addr->sa_data, NULL);
1575 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1576 i40e_stat_str(hw, ret),
1577 i40e_aq_str(hw, hw->aq.asq_last_status));
1580 /* schedule our worker thread which will take care of
1581 * applying the new filter changes
1583 i40e_service_event_schedule(pf);
1588 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1589 * @vsi: vsi structure
1590 * @seed: RSS hash seed
1592 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1593 u8 *lut, u16 lut_size)
1595 struct i40e_pf *pf = vsi->back;
1596 struct i40e_hw *hw = &pf->hw;
1600 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1601 (struct i40e_aqc_get_set_rss_key_data *)seed;
1602 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1604 dev_info(&pf->pdev->dev,
1605 "Cannot set RSS key, err %s aq_err %s\n",
1606 i40e_stat_str(hw, ret),
1607 i40e_aq_str(hw, hw->aq.asq_last_status));
1612 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1614 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1616 dev_info(&pf->pdev->dev,
1617 "Cannot set RSS lut, err %s aq_err %s\n",
1618 i40e_stat_str(hw, ret),
1619 i40e_aq_str(hw, hw->aq.asq_last_status));
1627 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1628 * @vsi: VSI structure
1630 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1632 struct i40e_pf *pf = vsi->back;
1633 u8 seed[I40E_HKEY_ARRAY_SIZE];
1637 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1640 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1641 vsi->num_queue_pairs);
1644 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1648 /* Use the user configured hash keys and lookup table if there is one,
1649 * otherwise use default
1651 if (vsi->rss_lut_user)
1652 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1654 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1655 if (vsi->rss_hkey_user)
1656 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1658 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1659 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1665 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1666 * @vsi: the VSI being configured,
1667 * @ctxt: VSI context structure
1668 * @enabled_tc: number of traffic classes to enable
1670 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1672 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1673 struct i40e_vsi_context *ctxt,
1676 u16 qcount = 0, max_qcount, qmap, sections = 0;
1677 int i, override_q, pow, num_qps, ret;
1678 u8 netdev_tc = 0, offset = 0;
1680 if (vsi->type != I40E_VSI_MAIN)
1682 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1683 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1684 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1685 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1686 num_qps = vsi->mqprio_qopt.qopt.count[0];
1688 /* find the next higher power-of-2 of num queue pairs */
1689 pow = ilog2(num_qps);
1690 if (!is_power_of_2(num_qps))
1692 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1693 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1695 /* Setup queue offset/count for all TCs for given VSI */
1696 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1697 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1698 /* See if the given TC is enabled for the given VSI */
1699 if (vsi->tc_config.enabled_tc & BIT(i)) {
1700 offset = vsi->mqprio_qopt.qopt.offset[i];
1701 qcount = vsi->mqprio_qopt.qopt.count[i];
1702 if (qcount > max_qcount)
1703 max_qcount = qcount;
1704 vsi->tc_config.tc_info[i].qoffset = offset;
1705 vsi->tc_config.tc_info[i].qcount = qcount;
1706 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1708 /* TC is not enabled so set the offset to
1709 * default queue and allocate one queue
1712 vsi->tc_config.tc_info[i].qoffset = 0;
1713 vsi->tc_config.tc_info[i].qcount = 1;
1714 vsi->tc_config.tc_info[i].netdev_tc = 0;
1718 /* Set actual Tx/Rx queue pairs */
1719 vsi->num_queue_pairs = offset + qcount;
1721 /* Setup queue TC[0].qmap for given VSI context */
1722 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1723 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1724 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1725 ctxt->info.valid_sections |= cpu_to_le16(sections);
1727 /* Reconfigure RSS for main VSI with max queue count */
1728 vsi->rss_size = max_qcount;
1729 ret = i40e_vsi_config_rss(vsi);
1731 dev_info(&vsi->back->pdev->dev,
1732 "Failed to reconfig rss for num_queues (%u)\n",
1736 vsi->reconfig_rss = true;
1737 dev_dbg(&vsi->back->pdev->dev,
1738 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1740 /* Find queue count available for channel VSIs and starting offset
1743 override_q = vsi->mqprio_qopt.qopt.count[0];
1744 if (override_q && override_q < vsi->num_queue_pairs) {
1745 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1746 vsi->next_base_queue = override_q;
1752 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1753 * @vsi: the VSI being setup
1754 * @ctxt: VSI context structure
1755 * @enabled_tc: Enabled TCs bitmap
1756 * @is_add: True if called before Add VSI
1758 * Setup VSI queue mapping for enabled traffic classes.
1760 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1761 struct i40e_vsi_context *ctxt,
1765 struct i40e_pf *pf = vsi->back;
1775 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1778 /* Number of queues per enabled TC */
1779 num_tc_qps = vsi->alloc_queue_pairs;
1780 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1781 /* Find numtc from enabled TC bitmap */
1782 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1783 if (enabled_tc & BIT(i)) /* TC is enabled */
1787 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1790 num_tc_qps = num_tc_qps / numtc;
1791 num_tc_qps = min_t(int, num_tc_qps,
1792 i40e_pf_get_max_q_per_tc(pf));
1795 vsi->tc_config.numtc = numtc;
1796 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1798 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1799 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1800 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1802 /* Setup queue offset/count for all TCs for given VSI */
1803 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1804 /* See if the given TC is enabled for the given VSI */
1805 if (vsi->tc_config.enabled_tc & BIT(i)) {
1809 switch (vsi->type) {
1811 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1812 I40E_FLAG_FD_ATR_ENABLED)) ||
1813 vsi->tc_config.enabled_tc != 1) {
1814 qcount = min_t(int, pf->alloc_rss_size,
1820 case I40E_VSI_SRIOV:
1821 case I40E_VSI_VMDQ2:
1823 qcount = num_tc_qps;
1827 vsi->tc_config.tc_info[i].qoffset = offset;
1828 vsi->tc_config.tc_info[i].qcount = qcount;
1830 /* find the next higher power-of-2 of num queue pairs */
1833 while (num_qps && (BIT_ULL(pow) < qcount)) {
1838 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1840 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1841 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1845 /* TC is not enabled so set the offset to
1846 * default queue and allocate one queue
1849 vsi->tc_config.tc_info[i].qoffset = 0;
1850 vsi->tc_config.tc_info[i].qcount = 1;
1851 vsi->tc_config.tc_info[i].netdev_tc = 0;
1855 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1858 /* Set actual Tx/Rx queue pairs */
1859 vsi->num_queue_pairs = offset;
1860 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1861 if (vsi->req_queue_pairs > 0)
1862 vsi->num_queue_pairs = vsi->req_queue_pairs;
1863 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1864 vsi->num_queue_pairs = pf->num_lan_msix;
1867 /* Scheduler section valid can only be set for ADD VSI */
1869 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1871 ctxt->info.up_enable_bits = enabled_tc;
1873 if (vsi->type == I40E_VSI_SRIOV) {
1874 ctxt->info.mapping_flags |=
1875 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1876 for (i = 0; i < vsi->num_queue_pairs; i++)
1877 ctxt->info.queue_mapping[i] =
1878 cpu_to_le16(vsi->base_queue + i);
1880 ctxt->info.mapping_flags |=
1881 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1882 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1884 ctxt->info.valid_sections |= cpu_to_le16(sections);
1888 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1889 * @netdev: the netdevice
1890 * @addr: address to add
1892 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1893 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1895 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1897 struct i40e_netdev_priv *np = netdev_priv(netdev);
1898 struct i40e_vsi *vsi = np->vsi;
1900 if (i40e_add_mac_filter(vsi, addr))
1907 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1908 * @netdev: the netdevice
1909 * @addr: address to add
1911 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1912 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1914 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1916 struct i40e_netdev_priv *np = netdev_priv(netdev);
1917 struct i40e_vsi *vsi = np->vsi;
1919 /* Under some circumstances, we might receive a request to delete
1920 * our own device address from our uc list. Because we store the
1921 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1922 * such requests and not delete our device address from this list.
1924 if (ether_addr_equal(addr, netdev->dev_addr))
1927 i40e_del_mac_filter(vsi, addr);
1933 * i40e_set_rx_mode - NDO callback to set the netdev filters
1934 * @netdev: network interface device structure
1936 static void i40e_set_rx_mode(struct net_device *netdev)
1938 struct i40e_netdev_priv *np = netdev_priv(netdev);
1939 struct i40e_vsi *vsi = np->vsi;
1941 spin_lock_bh(&vsi->mac_filter_hash_lock);
1943 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1944 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1946 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1948 /* check for other flag changes */
1949 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1950 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1951 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1956 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1957 * @vsi: Pointer to VSI struct
1958 * @from: Pointer to list which contains MAC filter entries - changes to
1959 * those entries needs to be undone.
1961 * MAC filter entries from this list were slated for deletion.
1963 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1964 struct hlist_head *from)
1966 struct i40e_mac_filter *f;
1967 struct hlist_node *h;
1969 hlist_for_each_entry_safe(f, h, from, hlist) {
1970 u64 key = i40e_addr_to_hkey(f->macaddr);
1972 /* Move the element back into MAC filter list*/
1973 hlist_del(&f->hlist);
1974 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1979 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1980 * @vsi: Pointer to vsi struct
1981 * @from: Pointer to list which contains MAC filter entries - changes to
1982 * those entries needs to be undone.
1984 * MAC filter entries from this list were slated for addition.
1986 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1987 struct hlist_head *from)
1989 struct i40e_new_mac_filter *new;
1990 struct hlist_node *h;
1992 hlist_for_each_entry_safe(new, h, from, hlist) {
1993 /* We can simply free the wrapper structure */
1994 hlist_del(&new->hlist);
2000 * i40e_next_entry - Get the next non-broadcast filter from a list
2001 * @next: pointer to filter in list
2003 * Returns the next non-broadcast filter in the list. Required so that we
2004 * ignore broadcast filters within the list, since these are not handled via
2005 * the normal firmware update path.
2008 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2010 hlist_for_each_entry_continue(next, hlist) {
2011 if (!is_broadcast_ether_addr(next->f->macaddr))
2019 * i40e_update_filter_state - Update filter state based on return data
2021 * @count: Number of filters added
2022 * @add_list: return data from fw
2023 * @add_head: pointer to first filter in current batch
2025 * MAC filter entries from list were slated to be added to device. Returns
2026 * number of successful filters. Note that 0 does NOT mean success!
2029 i40e_update_filter_state(int count,
2030 struct i40e_aqc_add_macvlan_element_data *add_list,
2031 struct i40e_new_mac_filter *add_head)
2036 for (i = 0; i < count; i++) {
2037 /* Always check status of each filter. We don't need to check
2038 * the firmware return status because we pre-set the filter
2039 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2040 * request to the adminq. Thus, if it no longer matches then
2041 * we know the filter is active.
2043 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2044 add_head->state = I40E_FILTER_FAILED;
2046 add_head->state = I40E_FILTER_ACTIVE;
2050 add_head = i40e_next_filter(add_head);
2059 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2060 * @vsi: ptr to the VSI
2061 * @vsi_name: name to display in messages
2062 * @list: the list of filters to send to firmware
2063 * @num_del: the number of filters to delete
2064 * @retval: Set to -EIO on failure to delete
2066 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2067 * *retval instead of a return value so that success does not force ret_val to
2068 * be set to 0. This ensures that a sequence of calls to this function
2069 * preserve the previous value of *retval on successful delete.
2072 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2073 struct i40e_aqc_remove_macvlan_element_data *list,
2074 int num_del, int *retval)
2076 struct i40e_hw *hw = &vsi->back->hw;
2080 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2081 aq_err = hw->aq.asq_last_status;
2083 /* Explicitly ignore and do not report when firmware returns ENOENT */
2084 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2086 dev_info(&vsi->back->pdev->dev,
2087 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2088 vsi_name, i40e_stat_str(hw, aq_ret),
2089 i40e_aq_str(hw, aq_err));
2094 * i40e_aqc_add_filters - Request firmware to add a set of filters
2095 * @vsi: ptr to the VSI
2096 * @vsi_name: name to display in messages
2097 * @list: the list of filters to send to firmware
2098 * @add_head: Position in the add hlist
2099 * @num_add: the number of filters to add
2101 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2102 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2103 * space for more filters.
2106 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2107 struct i40e_aqc_add_macvlan_element_data *list,
2108 struct i40e_new_mac_filter *add_head,
2111 struct i40e_hw *hw = &vsi->back->hw;
2114 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2115 aq_err = hw->aq.asq_last_status;
2116 fcnt = i40e_update_filter_state(num_add, list, add_head);
2118 if (fcnt != num_add) {
2119 if (vsi->type == I40E_VSI_MAIN) {
2120 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2121 dev_warn(&vsi->back->pdev->dev,
2122 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2123 i40e_aq_str(hw, aq_err), vsi_name);
2124 } else if (vsi->type == I40E_VSI_SRIOV ||
2125 vsi->type == I40E_VSI_VMDQ1 ||
2126 vsi->type == I40E_VSI_VMDQ2) {
2127 dev_warn(&vsi->back->pdev->dev,
2128 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2129 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2131 dev_warn(&vsi->back->pdev->dev,
2132 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2133 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2139 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2140 * @vsi: pointer to the VSI
2141 * @vsi_name: the VSI name
2144 * This function sets or clears the promiscuous broadcast flags for VLAN
2145 * filters in order to properly receive broadcast frames. Assumes that only
2146 * broadcast filters are passed.
2148 * Returns status indicating success or failure;
2151 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2152 struct i40e_mac_filter *f)
2154 bool enable = f->state == I40E_FILTER_NEW;
2155 struct i40e_hw *hw = &vsi->back->hw;
2158 if (f->vlan == I40E_VLAN_ANY) {
2159 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2164 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2172 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2173 dev_warn(&vsi->back->pdev->dev,
2174 "Error %s, forcing overflow promiscuous on %s\n",
2175 i40e_aq_str(hw, hw->aq.asq_last_status),
2183 * i40e_set_promiscuous - set promiscuous mode
2184 * @pf: board private structure
2185 * @promisc: promisc on or off
2187 * There are different ways of setting promiscuous mode on a PF depending on
2188 * what state/environment we're in. This identifies and sets it appropriately.
2189 * Returns 0 on success.
2191 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2193 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2194 struct i40e_hw *hw = &pf->hw;
2197 if (vsi->type == I40E_VSI_MAIN &&
2198 pf->lan_veb != I40E_NO_VEB &&
2199 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2200 /* set defport ON for Main VSI instead of true promisc
2201 * this way we will get all unicast/multicast and VLAN
2202 * promisc behavior but will not get VF or VMDq traffic
2203 * replicated on the Main VSI.
2206 aq_ret = i40e_aq_set_default_vsi(hw,
2210 aq_ret = i40e_aq_clear_default_vsi(hw,
2214 dev_info(&pf->pdev->dev,
2215 "Set default VSI failed, err %s, aq_err %s\n",
2216 i40e_stat_str(hw, aq_ret),
2217 i40e_aq_str(hw, hw->aq.asq_last_status));
2220 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2226 dev_info(&pf->pdev->dev,
2227 "set unicast promisc failed, err %s, aq_err %s\n",
2228 i40e_stat_str(hw, aq_ret),
2229 i40e_aq_str(hw, hw->aq.asq_last_status));
2231 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2236 dev_info(&pf->pdev->dev,
2237 "set multicast promisc failed, err %s, aq_err %s\n",
2238 i40e_stat_str(hw, aq_ret),
2239 i40e_aq_str(hw, hw->aq.asq_last_status));
2244 pf->cur_promisc = promisc;
2250 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2251 * @vsi: ptr to the VSI
2253 * Push any outstanding VSI filter changes through the AdminQ.
2255 * Returns 0 or error value
2257 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2259 struct hlist_head tmp_add_list, tmp_del_list;
2260 struct i40e_mac_filter *f;
2261 struct i40e_new_mac_filter *new, *add_head = NULL;
2262 struct i40e_hw *hw = &vsi->back->hw;
2263 bool old_overflow, new_overflow;
2264 unsigned int failed_filters = 0;
2265 unsigned int vlan_filters = 0;
2266 char vsi_name[16] = "PF";
2267 int filter_list_len = 0;
2268 i40e_status aq_ret = 0;
2269 u32 changed_flags = 0;
2270 struct hlist_node *h;
2279 /* empty array typed pointers, kcalloc later */
2280 struct i40e_aqc_add_macvlan_element_data *add_list;
2281 struct i40e_aqc_remove_macvlan_element_data *del_list;
2283 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2284 usleep_range(1000, 2000);
2287 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2290 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2291 vsi->current_netdev_flags = vsi->netdev->flags;
2294 INIT_HLIST_HEAD(&tmp_add_list);
2295 INIT_HLIST_HEAD(&tmp_del_list);
2297 if (vsi->type == I40E_VSI_SRIOV)
2298 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2299 else if (vsi->type != I40E_VSI_MAIN)
2300 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2302 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2303 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2305 spin_lock_bh(&vsi->mac_filter_hash_lock);
2306 /* Create a list of filters to delete. */
2307 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2308 if (f->state == I40E_FILTER_REMOVE) {
2309 /* Move the element into temporary del_list */
2310 hash_del(&f->hlist);
2311 hlist_add_head(&f->hlist, &tmp_del_list);
2313 /* Avoid counting removed filters */
2316 if (f->state == I40E_FILTER_NEW) {
2317 /* Create a temporary i40e_new_mac_filter */
2318 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2320 goto err_no_memory_locked;
2322 /* Store pointer to the real filter */
2324 new->state = f->state;
2326 /* Add it to the hash list */
2327 hlist_add_head(&new->hlist, &tmp_add_list);
2330 /* Count the number of active (current and new) VLAN
2331 * filters we have now. Does not count filters which
2332 * are marked for deletion.
2338 retval = i40e_correct_mac_vlan_filters(vsi,
2343 goto err_no_memory_locked;
2345 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2348 /* Now process 'del_list' outside the lock */
2349 if (!hlist_empty(&tmp_del_list)) {
2350 filter_list_len = hw->aq.asq_buf_size /
2351 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2352 list_size = filter_list_len *
2353 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2354 del_list = kzalloc(list_size, GFP_ATOMIC);
2358 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2361 /* handle broadcast filters by updating the broadcast
2362 * promiscuous flag and release filter list.
2364 if (is_broadcast_ether_addr(f->macaddr)) {
2365 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2367 hlist_del(&f->hlist);
2372 /* add to delete list */
2373 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2374 if (f->vlan == I40E_VLAN_ANY) {
2375 del_list[num_del].vlan_tag = 0;
2376 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2378 del_list[num_del].vlan_tag =
2379 cpu_to_le16((u16)(f->vlan));
2382 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2383 del_list[num_del].flags = cmd_flags;
2386 /* flush a full buffer */
2387 if (num_del == filter_list_len) {
2388 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2390 memset(del_list, 0, list_size);
2393 /* Release memory for MAC filter entries which were
2394 * synced up with HW.
2396 hlist_del(&f->hlist);
2401 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2409 if (!hlist_empty(&tmp_add_list)) {
2410 /* Do all the adds now. */
2411 filter_list_len = hw->aq.asq_buf_size /
2412 sizeof(struct i40e_aqc_add_macvlan_element_data);
2413 list_size = filter_list_len *
2414 sizeof(struct i40e_aqc_add_macvlan_element_data);
2415 add_list = kzalloc(list_size, GFP_ATOMIC);
2420 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2421 /* handle broadcast filters by updating the broadcast
2422 * promiscuous flag instead of adding a MAC filter.
2424 if (is_broadcast_ether_addr(new->f->macaddr)) {
2425 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2427 new->state = I40E_FILTER_FAILED;
2429 new->state = I40E_FILTER_ACTIVE;
2433 /* add to add array */
2437 ether_addr_copy(add_list[num_add].mac_addr,
2439 if (new->f->vlan == I40E_VLAN_ANY) {
2440 add_list[num_add].vlan_tag = 0;
2441 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2443 add_list[num_add].vlan_tag =
2444 cpu_to_le16((u16)(new->f->vlan));
2446 add_list[num_add].queue_number = 0;
2447 /* set invalid match method for later detection */
2448 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2449 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2450 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2453 /* flush a full buffer */
2454 if (num_add == filter_list_len) {
2455 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2457 memset(add_list, 0, list_size);
2462 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2465 /* Now move all of the filters from the temp add list back to
2468 spin_lock_bh(&vsi->mac_filter_hash_lock);
2469 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2470 /* Only update the state if we're still NEW */
2471 if (new->f->state == I40E_FILTER_NEW)
2472 new->f->state = new->state;
2473 hlist_del(&new->hlist);
2476 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2481 /* Determine the number of active and failed filters. */
2482 spin_lock_bh(&vsi->mac_filter_hash_lock);
2483 vsi->active_filters = 0;
2484 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2485 if (f->state == I40E_FILTER_ACTIVE)
2486 vsi->active_filters++;
2487 else if (f->state == I40E_FILTER_FAILED)
2490 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2492 /* Check if we are able to exit overflow promiscuous mode. We can
2493 * safely exit if we didn't just enter, we no longer have any failed
2494 * filters, and we have reduced filters below the threshold value.
2496 if (old_overflow && !failed_filters &&
2497 vsi->active_filters < vsi->promisc_threshold) {
2498 dev_info(&pf->pdev->dev,
2499 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2501 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2502 vsi->promisc_threshold = 0;
2505 /* if the VF is not trusted do not do promisc */
2506 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2507 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2511 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2513 /* If we are entering overflow promiscuous, we need to calculate a new
2514 * threshold for when we are safe to exit
2516 if (!old_overflow && new_overflow)
2517 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2519 /* check for changes in promiscuous modes */
2520 if (changed_flags & IFF_ALLMULTI) {
2521 bool cur_multipromisc;
2523 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2524 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2529 retval = i40e_aq_rc_to_posix(aq_ret,
2530 hw->aq.asq_last_status);
2531 dev_info(&pf->pdev->dev,
2532 "set multi promisc failed on %s, err %s aq_err %s\n",
2534 i40e_stat_str(hw, aq_ret),
2535 i40e_aq_str(hw, hw->aq.asq_last_status));
2539 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2542 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2544 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2546 retval = i40e_aq_rc_to_posix(aq_ret,
2547 hw->aq.asq_last_status);
2548 dev_info(&pf->pdev->dev,
2549 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2550 cur_promisc ? "on" : "off",
2552 i40e_stat_str(hw, aq_ret),
2553 i40e_aq_str(hw, hw->aq.asq_last_status));
2557 /* if something went wrong then set the changed flag so we try again */
2559 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2561 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2565 /* Restore elements on the temporary add and delete lists */
2566 spin_lock_bh(&vsi->mac_filter_hash_lock);
2567 err_no_memory_locked:
2568 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2569 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2570 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2572 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2573 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2578 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2579 * @pf: board private structure
2581 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2587 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2590 for (v = 0; v < pf->num_alloc_vsi; v++) {
2592 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2593 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2596 /* come back and try again later */
2597 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2606 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2609 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2611 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2612 return I40E_RXBUFFER_2048;
2614 return I40E_RXBUFFER_3072;
2618 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2619 * @netdev: network interface device structure
2620 * @new_mtu: new value for maximum frame size
2622 * Returns 0 on success, negative on failure
2624 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2626 struct i40e_netdev_priv *np = netdev_priv(netdev);
2627 struct i40e_vsi *vsi = np->vsi;
2628 struct i40e_pf *pf = vsi->back;
2630 if (i40e_enabled_xdp_vsi(vsi)) {
2631 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2633 if (frame_size > i40e_max_xdp_frame_size(vsi))
2637 netdev_info(netdev, "changing MTU from %d to %d\n",
2638 netdev->mtu, new_mtu);
2639 netdev->mtu = new_mtu;
2640 if (netif_running(netdev))
2641 i40e_vsi_reinit_locked(vsi);
2642 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2643 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2648 * i40e_ioctl - Access the hwtstamp interface
2649 * @netdev: network interface device structure
2650 * @ifr: interface request data
2651 * @cmd: ioctl command
2653 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2655 struct i40e_netdev_priv *np = netdev_priv(netdev);
2656 struct i40e_pf *pf = np->vsi->back;
2660 return i40e_ptp_get_ts_config(pf, ifr);
2662 return i40e_ptp_set_ts_config(pf, ifr);
2669 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2670 * @vsi: the vsi being adjusted
2672 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2674 struct i40e_vsi_context ctxt;
2677 /* Don't modify stripping options if a port VLAN is active */
2681 if ((vsi->info.valid_sections &
2682 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2683 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2684 return; /* already enabled */
2686 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2687 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2688 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2690 ctxt.seid = vsi->seid;
2691 ctxt.info = vsi->info;
2692 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2694 dev_info(&vsi->back->pdev->dev,
2695 "update vlan stripping failed, err %s aq_err %s\n",
2696 i40e_stat_str(&vsi->back->hw, ret),
2697 i40e_aq_str(&vsi->back->hw,
2698 vsi->back->hw.aq.asq_last_status));
2703 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2704 * @vsi: the vsi being adjusted
2706 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2708 struct i40e_vsi_context ctxt;
2711 /* Don't modify stripping options if a port VLAN is active */
2715 if ((vsi->info.valid_sections &
2716 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2717 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2718 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2719 return; /* already disabled */
2721 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2722 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2723 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2725 ctxt.seid = vsi->seid;
2726 ctxt.info = vsi->info;
2727 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2729 dev_info(&vsi->back->pdev->dev,
2730 "update vlan stripping failed, err %s aq_err %s\n",
2731 i40e_stat_str(&vsi->back->hw, ret),
2732 i40e_aq_str(&vsi->back->hw,
2733 vsi->back->hw.aq.asq_last_status));
2738 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2739 * @vsi: the vsi being configured
2740 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2742 * This is a helper function for adding a new MAC/VLAN filter with the
2743 * specified VLAN for each existing MAC address already in the hash table.
2744 * This function does *not* perform any accounting to update filters based on
2747 * NOTE: this function expects to be called while under the
2748 * mac_filter_hash_lock
2750 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2752 struct i40e_mac_filter *f, *add_f;
2753 struct hlist_node *h;
2756 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2757 if (f->state == I40E_FILTER_REMOVE)
2759 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2761 dev_info(&vsi->back->pdev->dev,
2762 "Could not add vlan filter %d for %pM\n",
2772 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2773 * @vsi: the VSI being configured
2774 * @vid: VLAN id to be added
2776 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2783 /* The network stack will attempt to add VID=0, with the intention to
2784 * receive priority tagged packets with a VLAN of 0. Our HW receives
2785 * these packets by default when configured to receive untagged
2786 * packets, so we don't need to add a filter for this case.
2787 * Additionally, HW interprets adding a VID=0 filter as meaning to
2788 * receive *only* tagged traffic and stops receiving untagged traffic.
2789 * Thus, we do not want to actually add a filter for VID=0
2794 /* Locked once because all functions invoked below iterates list*/
2795 spin_lock_bh(&vsi->mac_filter_hash_lock);
2796 err = i40e_add_vlan_all_mac(vsi, vid);
2797 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2801 /* schedule our worker thread which will take care of
2802 * applying the new filter changes
2804 i40e_service_event_schedule(vsi->back);
2809 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2810 * @vsi: the vsi being configured
2811 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2813 * This function should be used to remove all VLAN filters which match the
2814 * given VID. It does not schedule the service event and does not take the
2815 * mac_filter_hash_lock so it may be combined with other operations under
2816 * a single invocation of the mac_filter_hash_lock.
2818 * NOTE: this function expects to be called while under the
2819 * mac_filter_hash_lock
2821 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2823 struct i40e_mac_filter *f;
2824 struct hlist_node *h;
2827 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2829 __i40e_del_filter(vsi, f);
2834 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2835 * @vsi: the VSI being configured
2836 * @vid: VLAN id to be removed
2838 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2840 if (!vid || vsi->info.pvid)
2843 spin_lock_bh(&vsi->mac_filter_hash_lock);
2844 i40e_rm_vlan_all_mac(vsi, vid);
2845 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2847 /* schedule our worker thread which will take care of
2848 * applying the new filter changes
2850 i40e_service_event_schedule(vsi->back);
2854 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2855 * @netdev: network interface to be adjusted
2856 * @proto: unused protocol value
2857 * @vid: vlan id to be added
2859 * net_device_ops implementation for adding vlan ids
2861 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2862 __always_unused __be16 proto, u16 vid)
2864 struct i40e_netdev_priv *np = netdev_priv(netdev);
2865 struct i40e_vsi *vsi = np->vsi;
2868 if (vid >= VLAN_N_VID)
2871 ret = i40e_vsi_add_vlan(vsi, vid);
2873 set_bit(vid, vsi->active_vlans);
2879 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2880 * @netdev: network interface to be adjusted
2881 * @proto: unused protocol value
2882 * @vid: vlan id to be added
2884 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2885 __always_unused __be16 proto, u16 vid)
2887 struct i40e_netdev_priv *np = netdev_priv(netdev);
2888 struct i40e_vsi *vsi = np->vsi;
2890 if (vid >= VLAN_N_VID)
2892 set_bit(vid, vsi->active_vlans);
2896 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2897 * @netdev: network interface to be adjusted
2898 * @proto: unused protocol value
2899 * @vid: vlan id to be removed
2901 * net_device_ops implementation for removing vlan ids
2903 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2904 __always_unused __be16 proto, u16 vid)
2906 struct i40e_netdev_priv *np = netdev_priv(netdev);
2907 struct i40e_vsi *vsi = np->vsi;
2909 /* return code is ignored as there is nothing a user
2910 * can do about failure to remove and a log message was
2911 * already printed from the other function
2913 i40e_vsi_kill_vlan(vsi, vid);
2915 clear_bit(vid, vsi->active_vlans);
2921 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2922 * @vsi: the vsi being brought back up
2924 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2931 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2932 i40e_vlan_stripping_enable(vsi);
2934 i40e_vlan_stripping_disable(vsi);
2936 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2937 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2942 * i40e_vsi_add_pvid - Add pvid for the VSI
2943 * @vsi: the vsi being adjusted
2944 * @vid: the vlan id to set as a PVID
2946 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2948 struct i40e_vsi_context ctxt;
2951 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2952 vsi->info.pvid = cpu_to_le16(vid);
2953 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2954 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2955 I40E_AQ_VSI_PVLAN_EMOD_STR;
2957 ctxt.seid = vsi->seid;
2958 ctxt.info = vsi->info;
2959 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2961 dev_info(&vsi->back->pdev->dev,
2962 "add pvid failed, err %s aq_err %s\n",
2963 i40e_stat_str(&vsi->back->hw, ret),
2964 i40e_aq_str(&vsi->back->hw,
2965 vsi->back->hw.aq.asq_last_status));
2973 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2974 * @vsi: the vsi being adjusted
2976 * Just use the vlan_rx_register() service to put it back to normal
2978 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2982 i40e_vlan_stripping_disable(vsi);
2986 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2987 * @vsi: ptr to the VSI
2989 * If this function returns with an error, then it's possible one or
2990 * more of the rings is populated (while the rest are not). It is the
2991 * callers duty to clean those orphaned rings.
2993 * Return 0 on success, negative on failure
2995 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2999 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3000 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3002 if (!i40e_enabled_xdp_vsi(vsi))
3005 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3006 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3012 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3013 * @vsi: ptr to the VSI
3015 * Free VSI's transmit software resources
3017 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3021 if (vsi->tx_rings) {
3022 for (i = 0; i < vsi->num_queue_pairs; i++)
3023 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3024 i40e_free_tx_resources(vsi->tx_rings[i]);
3027 if (vsi->xdp_rings) {
3028 for (i = 0; i < vsi->num_queue_pairs; i++)
3029 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3030 i40e_free_tx_resources(vsi->xdp_rings[i]);
3035 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3036 * @vsi: ptr to the VSI
3038 * If this function returns with an error, then it's possible one or
3039 * more of the rings is populated (while the rest are not). It is the
3040 * callers duty to clean those orphaned rings.
3042 * Return 0 on success, negative on failure
3044 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3048 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3049 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3054 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3055 * @vsi: ptr to the VSI
3057 * Free all receive software resources
3059 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3066 for (i = 0; i < vsi->num_queue_pairs; i++)
3067 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3068 i40e_free_rx_resources(vsi->rx_rings[i]);
3072 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3073 * @ring: The Tx ring to configure
3075 * This enables/disables XPS for a given Tx descriptor ring
3076 * based on the TCs enabled for the VSI that ring belongs to.
3078 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3082 if (!ring->q_vector || !ring->netdev || ring->ch)
3085 /* We only initialize XPS once, so as not to overwrite user settings */
3086 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3089 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3090 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3095 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3096 * @ring: The Tx or Rx ring
3098 * Returns the UMEM or NULL.
3100 static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3102 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3103 int qid = ring->queue_index;
3105 if (ring_is_xdp(ring))
3106 qid -= ring->vsi->alloc_queue_pairs;
3108 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3111 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3115 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3116 * @ring: The Tx ring to configure
3118 * Configure the Tx descriptor ring in the HMC context.
3120 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3122 struct i40e_vsi *vsi = ring->vsi;
3123 u16 pf_q = vsi->base_queue + ring->queue_index;
3124 struct i40e_hw *hw = &vsi->back->hw;
3125 struct i40e_hmc_obj_txq tx_ctx;
3126 i40e_status err = 0;
3129 if (ring_is_xdp(ring))
3130 ring->xsk_umem = i40e_xsk_umem(ring);
3132 /* some ATR related tx ring init */
3133 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3134 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3135 ring->atr_count = 0;
3137 ring->atr_sample_rate = 0;
3141 i40e_config_xps_tx_ring(ring);
3143 /* clear the context structure first */
3144 memset(&tx_ctx, 0, sizeof(tx_ctx));
3146 tx_ctx.new_context = 1;
3147 tx_ctx.base = (ring->dma / 128);
3148 tx_ctx.qlen = ring->count;
3149 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3150 I40E_FLAG_FD_ATR_ENABLED));
3151 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3152 /* FDIR VSI tx ring can still use RS bit and writebacks */
3153 if (vsi->type != I40E_VSI_FDIR)
3154 tx_ctx.head_wb_ena = 1;
3155 tx_ctx.head_wb_addr = ring->dma +
3156 (ring->count * sizeof(struct i40e_tx_desc));
3158 /* As part of VSI creation/update, FW allocates certain
3159 * Tx arbitration queue sets for each TC enabled for
3160 * the VSI. The FW returns the handles to these queue
3161 * sets as part of the response buffer to Add VSI,
3162 * Update VSI, etc. AQ commands. It is expected that
3163 * these queue set handles be associated with the Tx
3164 * queues by the driver as part of the TX queue context
3165 * initialization. This has to be done regardless of
3166 * DCB as by default everything is mapped to TC0.
3171 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3174 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3176 tx_ctx.rdylist_act = 0;
3178 /* clear the context in the HMC */
3179 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3181 dev_info(&vsi->back->pdev->dev,
3182 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3183 ring->queue_index, pf_q, err);
3187 /* set the context in the HMC */
3188 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3190 dev_info(&vsi->back->pdev->dev,
3191 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3192 ring->queue_index, pf_q, err);
3196 /* Now associate this queue with this PCI function */
3198 if (ring->ch->type == I40E_VSI_VMDQ2)
3199 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3203 qtx_ctl |= (ring->ch->vsi_number <<
3204 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3205 I40E_QTX_CTL_VFVM_INDX_MASK;
3207 if (vsi->type == I40E_VSI_VMDQ2) {
3208 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3209 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3210 I40E_QTX_CTL_VFVM_INDX_MASK;
3212 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3216 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3217 I40E_QTX_CTL_PF_INDX_MASK);
3218 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3221 /* cache tail off for easier writes later */
3222 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3228 * i40e_configure_rx_ring - Configure a receive ring context
3229 * @ring: The Rx ring to configure
3231 * Configure the Rx descriptor ring in the HMC context.
3233 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3235 struct i40e_vsi *vsi = ring->vsi;
3236 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3237 u16 pf_q = vsi->base_queue + ring->queue_index;
3238 struct i40e_hw *hw = &vsi->back->hw;
3239 struct i40e_hmc_obj_rxq rx_ctx;
3240 i40e_status err = 0;
3244 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3246 /* clear the context structure first */
3247 memset(&rx_ctx, 0, sizeof(rx_ctx));
3249 if (ring->vsi->type == I40E_VSI_MAIN)
3250 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3252 ring->xsk_umem = i40e_xsk_umem(ring);
3253 if (ring->xsk_umem) {
3254 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3255 XDP_PACKET_HEADROOM;
3256 /* For AF_XDP ZC, we disallow packets to span on
3257 * multiple buffers, thus letting us skip that
3258 * handling in the fast-path.
3261 ring->zca.free = i40e_zca_free;
3262 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3267 dev_info(&vsi->back->pdev->dev,
3268 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3272 ring->rx_buf_len = vsi->rx_buf_len;
3273 if (ring->vsi->type == I40E_VSI_MAIN) {
3274 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3275 MEM_TYPE_PAGE_SHARED,
3282 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3283 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3285 rx_ctx.base = (ring->dma / 128);
3286 rx_ctx.qlen = ring->count;
3288 /* use 32 byte descriptors */
3291 /* descriptor type is always zero
3294 rx_ctx.hsplit_0 = 0;
3296 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3297 if (hw->revision_id == 0)
3298 rx_ctx.lrxqthresh = 0;
3300 rx_ctx.lrxqthresh = 1;
3301 rx_ctx.crcstrip = 1;
3303 /* this controls whether VLAN is stripped from inner headers */
3305 /* set the prefena field to 1 because the manual says to */
3308 /* clear the context in the HMC */
3309 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3311 dev_info(&vsi->back->pdev->dev,
3312 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3313 ring->queue_index, pf_q, err);
3317 /* set the context in the HMC */
3318 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3320 dev_info(&vsi->back->pdev->dev,
3321 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3322 ring->queue_index, pf_q, err);
3326 /* configure Rx buffer alignment */
3327 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3328 clear_ring_build_skb_enabled(ring);
3330 set_ring_build_skb_enabled(ring);
3332 /* cache tail for quicker writes, and clear the reg before use */
3333 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3334 writel(0, ring->tail);
3336 ok = ring->xsk_umem ?
3337 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3338 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3340 /* Log this in case the user has forgotten to give the kernel
3341 * any buffers, even later in the application.
3343 dev_info(&vsi->back->pdev->dev,
3344 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3345 ring->xsk_umem ? "UMEM enabled " : "",
3346 ring->queue_index, pf_q);
3353 * i40e_vsi_configure_tx - Configure the VSI for Tx
3354 * @vsi: VSI structure describing this set of rings and resources
3356 * Configure the Tx VSI for operation.
3358 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3363 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3364 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3366 if (!i40e_enabled_xdp_vsi(vsi))
3369 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3370 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3376 * i40e_vsi_configure_rx - Configure the VSI for Rx
3377 * @vsi: the VSI being configured
3379 * Configure the Rx VSI for operation.
3381 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3386 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3387 vsi->max_frame = I40E_MAX_RXBUFFER;
3388 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3389 #if (PAGE_SIZE < 8192)
3390 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3391 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3392 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3393 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3396 vsi->max_frame = I40E_MAX_RXBUFFER;
3397 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3401 /* set up individual rings */
3402 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3403 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3409 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3410 * @vsi: ptr to the VSI
3412 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3414 struct i40e_ring *tx_ring, *rx_ring;
3415 u16 qoffset, qcount;
3418 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3419 /* Reset the TC information */
3420 for (i = 0; i < vsi->num_queue_pairs; i++) {
3421 rx_ring = vsi->rx_rings[i];
3422 tx_ring = vsi->tx_rings[i];
3423 rx_ring->dcb_tc = 0;
3424 tx_ring->dcb_tc = 0;
3429 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3430 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3433 qoffset = vsi->tc_config.tc_info[n].qoffset;
3434 qcount = vsi->tc_config.tc_info[n].qcount;
3435 for (i = qoffset; i < (qoffset + qcount); i++) {
3436 rx_ring = vsi->rx_rings[i];
3437 tx_ring = vsi->tx_rings[i];
3438 rx_ring->dcb_tc = n;
3439 tx_ring->dcb_tc = n;
3445 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3446 * @vsi: ptr to the VSI
3448 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3451 i40e_set_rx_mode(vsi->netdev);
3455 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3456 * @vsi: Pointer to the targeted VSI
3458 * This function replays the hlist on the hw where all the SB Flow Director
3459 * filters were saved.
3461 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3463 struct i40e_fdir_filter *filter;
3464 struct i40e_pf *pf = vsi->back;
3465 struct hlist_node *node;
3467 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3470 /* Reset FDir counters as we're replaying all existing filters */
3471 pf->fd_tcp4_filter_cnt = 0;
3472 pf->fd_udp4_filter_cnt = 0;
3473 pf->fd_sctp4_filter_cnt = 0;
3474 pf->fd_ip4_filter_cnt = 0;
3476 hlist_for_each_entry_safe(filter, node,
3477 &pf->fdir_filter_list, fdir_node) {
3478 i40e_add_del_fdir(vsi, filter, true);
3483 * i40e_vsi_configure - Set up the VSI for action
3484 * @vsi: the VSI being configured
3486 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3490 i40e_set_vsi_rx_mode(vsi);
3491 i40e_restore_vlan(vsi);
3492 i40e_vsi_config_dcb_rings(vsi);
3493 err = i40e_vsi_configure_tx(vsi);
3495 err = i40e_vsi_configure_rx(vsi);
3501 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3502 * @vsi: the VSI being configured
3504 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3506 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3507 struct i40e_pf *pf = vsi->back;
3508 struct i40e_hw *hw = &pf->hw;
3513 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3514 * and PFINT_LNKLSTn registers, e.g.:
3515 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3517 qp = vsi->base_queue;
3518 vector = vsi->base_vector;
3519 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3520 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3522 q_vector->rx.next_update = jiffies + 1;
3523 q_vector->rx.target_itr =
3524 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3525 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3526 q_vector->rx.target_itr);
3527 q_vector->rx.current_itr = q_vector->rx.target_itr;
3529 q_vector->tx.next_update = jiffies + 1;
3530 q_vector->tx.target_itr =
3531 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3532 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3533 q_vector->tx.target_itr);
3534 q_vector->tx.current_itr = q_vector->tx.target_itr;
3536 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3537 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3539 /* Linked list for the queuepairs assigned to this vector */
3540 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3541 for (q = 0; q < q_vector->num_ringpairs; q++) {
3542 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3545 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3546 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3547 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3548 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3549 (I40E_QUEUE_TYPE_TX <<
3550 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3552 wr32(hw, I40E_QINT_RQCTL(qp), val);
3555 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3556 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3557 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3558 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3559 (I40E_QUEUE_TYPE_TX <<
3560 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3562 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3565 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3566 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3567 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3568 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3569 (I40E_QUEUE_TYPE_RX <<
3570 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3572 /* Terminate the linked list */
3573 if (q == (q_vector->num_ringpairs - 1))
3574 val |= (I40E_QUEUE_END_OF_LIST <<
3575 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3577 wr32(hw, I40E_QINT_TQCTL(qp), val);
3586 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3587 * @pf: pointer to private device data structure
3589 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3591 struct i40e_hw *hw = &pf->hw;
3594 /* clear things first */
3595 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3596 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3598 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3599 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3600 I40E_PFINT_ICR0_ENA_GRST_MASK |
3601 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3602 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3603 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3604 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3605 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3607 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3608 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3610 if (pf->flags & I40E_FLAG_PTP)
3611 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3613 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3615 /* SW_ITR_IDX = 0, but don't change INTENA */
3616 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3617 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3619 /* OTHER_ITR_IDX = 0 */
3620 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3624 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3625 * @vsi: the VSI being configured
3627 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3629 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3630 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3631 struct i40e_pf *pf = vsi->back;
3632 struct i40e_hw *hw = &pf->hw;
3635 /* set the ITR configuration */
3636 q_vector->rx.next_update = jiffies + 1;
3637 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3638 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
3639 q_vector->rx.current_itr = q_vector->rx.target_itr;
3640 q_vector->tx.next_update = jiffies + 1;
3641 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3642 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
3643 q_vector->tx.current_itr = q_vector->tx.target_itr;
3645 i40e_enable_misc_int_causes(pf);
3647 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3648 wr32(hw, I40E_PFINT_LNKLST0, 0);
3650 /* Associate the queue pair to the vector and enable the queue int */
3651 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3652 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3653 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3654 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3656 wr32(hw, I40E_QINT_RQCTL(0), val);
3658 if (i40e_enabled_xdp_vsi(vsi)) {
3659 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3660 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3662 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3664 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3667 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3668 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3669 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3671 wr32(hw, I40E_QINT_TQCTL(0), val);
3676 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3677 * @pf: board private structure
3679 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3681 struct i40e_hw *hw = &pf->hw;
3683 wr32(hw, I40E_PFINT_DYN_CTL0,
3684 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3689 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3690 * @pf: board private structure
3692 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3694 struct i40e_hw *hw = &pf->hw;
3697 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3698 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3699 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3701 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3706 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3707 * @irq: interrupt number
3708 * @data: pointer to a q_vector
3710 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3712 struct i40e_q_vector *q_vector = data;
3714 if (!q_vector->tx.ring && !q_vector->rx.ring)
3717 napi_schedule_irqoff(&q_vector->napi);
3723 * i40e_irq_affinity_notify - Callback for affinity changes
3724 * @notify: context as to what irq was changed
3725 * @mask: the new affinity mask
3727 * This is a callback function used by the irq_set_affinity_notifier function
3728 * so that we may register to receive changes to the irq affinity masks.
3730 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3731 const cpumask_t *mask)
3733 struct i40e_q_vector *q_vector =
3734 container_of(notify, struct i40e_q_vector, affinity_notify);
3736 cpumask_copy(&q_vector->affinity_mask, mask);
3740 * i40e_irq_affinity_release - Callback for affinity notifier release
3741 * @ref: internal core kernel usage
3743 * This is a callback function used by the irq_set_affinity_notifier function
3744 * to inform the current notification subscriber that they will no longer
3745 * receive notifications.
3747 static void i40e_irq_affinity_release(struct kref *ref) {}
3750 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3751 * @vsi: the VSI being configured
3752 * @basename: name for the vector
3754 * Allocates MSI-X vectors and requests interrupts from the kernel.
3756 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3758 int q_vectors = vsi->num_q_vectors;
3759 struct i40e_pf *pf = vsi->back;
3760 int base = vsi->base_vector;
3767 for (vector = 0; vector < q_vectors; vector++) {
3768 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3770 irq_num = pf->msix_entries[base + vector].vector;
3772 if (q_vector->tx.ring && q_vector->rx.ring) {
3773 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3774 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3776 } else if (q_vector->rx.ring) {
3777 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3778 "%s-%s-%d", basename, "rx", rx_int_idx++);
3779 } else if (q_vector->tx.ring) {
3780 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3781 "%s-%s-%d", basename, "tx", tx_int_idx++);
3783 /* skip this unused q_vector */
3786 err = request_irq(irq_num,
3792 dev_info(&pf->pdev->dev,
3793 "MSIX request_irq failed, error: %d\n", err);
3794 goto free_queue_irqs;
3797 /* register for affinity change notifications */
3798 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3799 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3800 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3801 /* Spread affinity hints out across online CPUs.
3803 * get_cpu_mask returns a static constant mask with
3804 * a permanent lifetime so it's ok to pass to
3805 * irq_set_affinity_hint without making a copy.
3807 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3808 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3811 vsi->irqs_ready = true;
3817 irq_num = pf->msix_entries[base + vector].vector;
3818 irq_set_affinity_notifier(irq_num, NULL);
3819 irq_set_affinity_hint(irq_num, NULL);
3820 free_irq(irq_num, &vsi->q_vectors[vector]);
3826 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3827 * @vsi: the VSI being un-configured
3829 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3831 struct i40e_pf *pf = vsi->back;
3832 struct i40e_hw *hw = &pf->hw;
3833 int base = vsi->base_vector;
3836 /* disable interrupt causation from each queue */
3837 for (i = 0; i < vsi->num_queue_pairs; i++) {
3840 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3841 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3842 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3844 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3845 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3846 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3848 if (!i40e_enabled_xdp_vsi(vsi))
3850 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3853 /* disable each interrupt */
3854 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3855 for (i = vsi->base_vector;
3856 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3857 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3860 for (i = 0; i < vsi->num_q_vectors; i++)
3861 synchronize_irq(pf->msix_entries[i + base].vector);
3863 /* Legacy and MSI mode - this stops all interrupt handling */
3864 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3865 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3867 synchronize_irq(pf->pdev->irq);
3872 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3873 * @vsi: the VSI being configured
3875 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3877 struct i40e_pf *pf = vsi->back;
3880 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3881 for (i = 0; i < vsi->num_q_vectors; i++)
3882 i40e_irq_dynamic_enable(vsi, i);
3884 i40e_irq_dynamic_enable_icr0(pf);
3887 i40e_flush(&pf->hw);
3892 * i40e_free_misc_vector - Free the vector that handles non-queue events
3893 * @pf: board private structure
3895 static void i40e_free_misc_vector(struct i40e_pf *pf)
3898 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3899 i40e_flush(&pf->hw);
3901 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3902 synchronize_irq(pf->msix_entries[0].vector);
3903 free_irq(pf->msix_entries[0].vector, pf);
3904 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3909 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3910 * @irq: interrupt number
3911 * @data: pointer to a q_vector
3913 * This is the handler used for all MSI/Legacy interrupts, and deals
3914 * with both queue and non-queue interrupts. This is also used in
3915 * MSIX mode to handle the non-queue interrupts.
3917 static irqreturn_t i40e_intr(int irq, void *data)
3919 struct i40e_pf *pf = (struct i40e_pf *)data;
3920 struct i40e_hw *hw = &pf->hw;
3921 irqreturn_t ret = IRQ_NONE;
3922 u32 icr0, icr0_remaining;
3925 icr0 = rd32(hw, I40E_PFINT_ICR0);
3926 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3928 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3929 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3932 /* if interrupt but no bits showing, must be SWINT */
3933 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3934 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3937 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3938 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3939 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3940 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3941 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3944 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3945 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3946 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3947 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3949 /* We do not have a way to disarm Queue causes while leaving
3950 * interrupt enabled for all other causes, ideally
3951 * interrupt should be disabled while we are in NAPI but
3952 * this is not a performance path and napi_schedule()
3953 * can deal with rescheduling.
3955 if (!test_bit(__I40E_DOWN, pf->state))
3956 napi_schedule_irqoff(&q_vector->napi);
3959 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3960 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3961 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3962 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3965 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3966 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3967 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3970 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3971 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3972 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3975 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3976 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3977 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3978 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3979 val = rd32(hw, I40E_GLGEN_RSTAT);
3980 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3981 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3982 if (val == I40E_RESET_CORER) {
3984 } else if (val == I40E_RESET_GLOBR) {
3986 } else if (val == I40E_RESET_EMPR) {
3988 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3992 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3993 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3994 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3995 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3996 rd32(hw, I40E_PFHMC_ERRORINFO),
3997 rd32(hw, I40E_PFHMC_ERRORDATA));
4000 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4001 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4003 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4004 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4005 i40e_ptp_tx_hwtstamp(pf);
4009 /* If a critical error is pending we have no choice but to reset the
4011 * Report and mask out any remaining unexpected interrupts.
4013 icr0_remaining = icr0 & ena_mask;
4014 if (icr0_remaining) {
4015 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4017 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4018 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4019 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4020 dev_info(&pf->pdev->dev, "device will be reset\n");
4021 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4022 i40e_service_event_schedule(pf);
4024 ena_mask &= ~icr0_remaining;
4029 /* re-enable interrupt causes */
4030 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4031 if (!test_bit(__I40E_DOWN, pf->state) ||
4032 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4033 i40e_service_event_schedule(pf);
4034 i40e_irq_dynamic_enable_icr0(pf);
4041 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4042 * @tx_ring: tx ring to clean
4043 * @budget: how many cleans we're allowed
4045 * Returns true if there's any budget left (e.g. the clean is finished)
4047 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4049 struct i40e_vsi *vsi = tx_ring->vsi;
4050 u16 i = tx_ring->next_to_clean;
4051 struct i40e_tx_buffer *tx_buf;
4052 struct i40e_tx_desc *tx_desc;
4054 tx_buf = &tx_ring->tx_bi[i];
4055 tx_desc = I40E_TX_DESC(tx_ring, i);
4056 i -= tx_ring->count;
4059 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4061 /* if next_to_watch is not set then there is no work pending */
4065 /* prevent any other reads prior to eop_desc */
4068 /* if the descriptor isn't done, no work yet to do */
4069 if (!(eop_desc->cmd_type_offset_bsz &
4070 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4073 /* clear next_to_watch to prevent false hangs */
4074 tx_buf->next_to_watch = NULL;
4076 tx_desc->buffer_addr = 0;
4077 tx_desc->cmd_type_offset_bsz = 0;
4078 /* move past filter desc */
4083 i -= tx_ring->count;
4084 tx_buf = tx_ring->tx_bi;
4085 tx_desc = I40E_TX_DESC(tx_ring, 0);
4087 /* unmap skb header data */
4088 dma_unmap_single(tx_ring->dev,
4089 dma_unmap_addr(tx_buf, dma),
4090 dma_unmap_len(tx_buf, len),
4092 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4093 kfree(tx_buf->raw_buf);
4095 tx_buf->raw_buf = NULL;
4096 tx_buf->tx_flags = 0;
4097 tx_buf->next_to_watch = NULL;
4098 dma_unmap_len_set(tx_buf, len, 0);
4099 tx_desc->buffer_addr = 0;
4100 tx_desc->cmd_type_offset_bsz = 0;
4102 /* move us past the eop_desc for start of next FD desc */
4107 i -= tx_ring->count;
4108 tx_buf = tx_ring->tx_bi;
4109 tx_desc = I40E_TX_DESC(tx_ring, 0);
4112 /* update budget accounting */
4114 } while (likely(budget));
4116 i += tx_ring->count;
4117 tx_ring->next_to_clean = i;
4119 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4120 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4126 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4127 * @irq: interrupt number
4128 * @data: pointer to a q_vector
4130 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4132 struct i40e_q_vector *q_vector = data;
4133 struct i40e_vsi *vsi;
4135 if (!q_vector->tx.ring)
4138 vsi = q_vector->tx.ring->vsi;
4139 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4145 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4146 * @vsi: the VSI being configured
4147 * @v_idx: vector index
4148 * @qp_idx: queue pair index
4150 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4152 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4153 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4154 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4156 tx_ring->q_vector = q_vector;
4157 tx_ring->next = q_vector->tx.ring;
4158 q_vector->tx.ring = tx_ring;
4159 q_vector->tx.count++;
4161 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4162 if (i40e_enabled_xdp_vsi(vsi)) {
4163 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4165 xdp_ring->q_vector = q_vector;
4166 xdp_ring->next = q_vector->tx.ring;
4167 q_vector->tx.ring = xdp_ring;
4168 q_vector->tx.count++;
4171 rx_ring->q_vector = q_vector;
4172 rx_ring->next = q_vector->rx.ring;
4173 q_vector->rx.ring = rx_ring;
4174 q_vector->rx.count++;
4178 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4179 * @vsi: the VSI being configured
4181 * This function maps descriptor rings to the queue-specific vectors
4182 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4183 * one vector per queue pair, but on a constrained vector budget, we
4184 * group the queue pairs as "efficiently" as possible.
4186 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4188 int qp_remaining = vsi->num_queue_pairs;
4189 int q_vectors = vsi->num_q_vectors;
4194 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4195 * group them so there are multiple queues per vector.
4196 * It is also important to go through all the vectors available to be
4197 * sure that if we don't use all the vectors, that the remaining vectors
4198 * are cleared. This is especially important when decreasing the
4199 * number of queues in use.
4201 for (; v_start < q_vectors; v_start++) {
4202 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4204 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4206 q_vector->num_ringpairs = num_ringpairs;
4207 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4209 q_vector->rx.count = 0;
4210 q_vector->tx.count = 0;
4211 q_vector->rx.ring = NULL;
4212 q_vector->tx.ring = NULL;
4214 while (num_ringpairs--) {
4215 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4223 * i40e_vsi_request_irq - Request IRQ from the OS
4224 * @vsi: the VSI being configured
4225 * @basename: name for the vector
4227 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4229 struct i40e_pf *pf = vsi->back;
4232 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4233 err = i40e_vsi_request_irq_msix(vsi, basename);
4234 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4235 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4238 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4242 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4247 #ifdef CONFIG_NET_POLL_CONTROLLER
4249 * i40e_netpoll - A Polling 'interrupt' handler
4250 * @netdev: network interface device structure
4252 * This is used by netconsole to send skbs without having to re-enable
4253 * interrupts. It's not called while the normal interrupt routine is executing.
4255 static void i40e_netpoll(struct net_device *netdev)
4257 struct i40e_netdev_priv *np = netdev_priv(netdev);
4258 struct i40e_vsi *vsi = np->vsi;
4259 struct i40e_pf *pf = vsi->back;
4262 /* if interface is down do nothing */
4263 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4266 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4267 for (i = 0; i < vsi->num_q_vectors; i++)
4268 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4270 i40e_intr(pf->pdev->irq, netdev);
4275 #define I40E_QTX_ENA_WAIT_COUNT 50
4278 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4279 * @pf: the PF being configured
4280 * @pf_q: the PF queue
4281 * @enable: enable or disable state of the queue
4283 * This routine will wait for the given Tx queue of the PF to reach the
4284 * enabled or disabled state.
4285 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4286 * multiple retries; else will return 0 in case of success.
4288 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4293 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4294 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4295 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4298 usleep_range(10, 20);
4300 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4307 * i40e_control_tx_q - Start or stop a particular Tx queue
4308 * @pf: the PF structure
4309 * @pf_q: the PF queue to configure
4310 * @enable: start or stop the queue
4312 * This function enables or disables a single queue. Note that any delay
4313 * required after the operation is expected to be handled by the caller of
4316 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4318 struct i40e_hw *hw = &pf->hw;
4322 /* warn the TX unit of coming changes */
4323 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4325 usleep_range(10, 20);
4327 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4328 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4329 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4330 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4332 usleep_range(1000, 2000);
4335 /* Skip if the queue is already in the requested state */
4336 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4339 /* turn on/off the queue */
4341 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4342 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4344 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4347 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4351 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4353 * @pf: the PF structure
4354 * @pf_q: the PF queue to configure
4355 * @is_xdp: true if the queue is used for XDP
4356 * @enable: start or stop the queue
4358 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4359 bool is_xdp, bool enable)
4363 i40e_control_tx_q(pf, pf_q, enable);
4365 /* wait for the change to finish */
4366 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4368 dev_info(&pf->pdev->dev,
4369 "VSI seid %d %sTx ring %d %sable timeout\n",
4370 seid, (is_xdp ? "XDP " : ""), pf_q,
4371 (enable ? "en" : "dis"));
4378 * i40e_vsi_control_tx - Start or stop a VSI's rings
4379 * @vsi: the VSI being configured
4380 * @enable: start or stop the rings
4382 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4384 struct i40e_pf *pf = vsi->back;
4385 int i, pf_q, ret = 0;
4387 pf_q = vsi->base_queue;
4388 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4389 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4391 false /*is xdp*/, enable);
4395 if (!i40e_enabled_xdp_vsi(vsi))
4398 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4399 pf_q + vsi->alloc_queue_pairs,
4400 true /*is xdp*/, enable);
4408 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4409 * @pf: the PF being configured
4410 * @pf_q: the PF queue
4411 * @enable: enable or disable state of the queue
4413 * This routine will wait for the given Rx queue of the PF to reach the
4414 * enabled or disabled state.
4415 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4416 * multiple retries; else will return 0 in case of success.
4418 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4423 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4424 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4425 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4428 usleep_range(10, 20);
4430 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4437 * i40e_control_rx_q - Start or stop a particular Rx queue
4438 * @pf: the PF structure
4439 * @pf_q: the PF queue to configure
4440 * @enable: start or stop the queue
4442 * This function enables or disables a single queue. Note that
4443 * any delay required after the operation is expected to be
4444 * handled by the caller of this function.
4446 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4448 struct i40e_hw *hw = &pf->hw;
4452 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4453 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4454 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4455 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4457 usleep_range(1000, 2000);
4460 /* Skip if the queue is already in the requested state */
4461 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4464 /* turn on/off the queue */
4466 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4468 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4470 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4474 * i40e_control_wait_rx_q
4475 * @pf: the PF structure
4476 * @pf_q: queue being configured
4477 * @enable: start or stop the rings
4479 * This function enables or disables a single queue along with waiting
4480 * for the change to finish. The caller of this function should handle
4481 * the delays needed in the case of disabling queues.
4483 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4487 i40e_control_rx_q(pf, pf_q, enable);
4489 /* wait for the change to finish */
4490 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4498 * i40e_vsi_control_rx - Start or stop a VSI's rings
4499 * @vsi: the VSI being configured
4500 * @enable: start or stop the rings
4502 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4504 struct i40e_pf *pf = vsi->back;
4505 int i, pf_q, ret = 0;
4507 pf_q = vsi->base_queue;
4508 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4509 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4511 dev_info(&pf->pdev->dev,
4512 "VSI seid %d Rx ring %d %sable timeout\n",
4513 vsi->seid, pf_q, (enable ? "en" : "dis"));
4518 /* Due to HW errata, on Rx disable only, the register can indicate done
4519 * before it really is. Needs 50ms to be sure
4528 * i40e_vsi_start_rings - Start a VSI's rings
4529 * @vsi: the VSI being configured
4531 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4535 /* do rx first for enable and last for disable */
4536 ret = i40e_vsi_control_rx(vsi, true);
4539 ret = i40e_vsi_control_tx(vsi, true);
4545 * i40e_vsi_stop_rings - Stop a VSI's rings
4546 * @vsi: the VSI being configured
4548 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4550 /* When port TX is suspended, don't wait */
4551 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4552 return i40e_vsi_stop_rings_no_wait(vsi);
4554 /* do rx first for enable and last for disable
4555 * Ignore return value, we need to shutdown whatever we can
4557 i40e_vsi_control_tx(vsi, false);
4558 i40e_vsi_control_rx(vsi, false);
4562 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4563 * @vsi: the VSI being shutdown
4565 * This function stops all the rings for a VSI but does not delay to verify
4566 * that rings have been disabled. It is expected that the caller is shutting
4567 * down multiple VSIs at once and will delay together for all the VSIs after
4568 * initiating the shutdown. This is particularly useful for shutting down lots
4569 * of VFs together. Otherwise, a large delay can be incurred while configuring
4570 * each VSI in serial.
4572 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4574 struct i40e_pf *pf = vsi->back;
4577 pf_q = vsi->base_queue;
4578 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4579 i40e_control_tx_q(pf, pf_q, false);
4580 i40e_control_rx_q(pf, pf_q, false);
4585 * i40e_vsi_free_irq - Free the irq association with the OS
4586 * @vsi: the VSI being configured
4588 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4590 struct i40e_pf *pf = vsi->back;
4591 struct i40e_hw *hw = &pf->hw;
4592 int base = vsi->base_vector;
4596 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4597 if (!vsi->q_vectors)
4600 if (!vsi->irqs_ready)
4603 vsi->irqs_ready = false;
4604 for (i = 0; i < vsi->num_q_vectors; i++) {
4609 irq_num = pf->msix_entries[vector].vector;
4611 /* free only the irqs that were actually requested */
4612 if (!vsi->q_vectors[i] ||
4613 !vsi->q_vectors[i]->num_ringpairs)
4616 /* clear the affinity notifier in the IRQ descriptor */
4617 irq_set_affinity_notifier(irq_num, NULL);
4618 /* remove our suggested affinity mask for this IRQ */
4619 irq_set_affinity_hint(irq_num, NULL);
4620 synchronize_irq(irq_num);
4621 free_irq(irq_num, vsi->q_vectors[i]);
4623 /* Tear down the interrupt queue link list
4625 * We know that they come in pairs and always
4626 * the Rx first, then the Tx. To clear the
4627 * link list, stick the EOL value into the
4628 * next_q field of the registers.
4630 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4631 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4632 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4633 val |= I40E_QUEUE_END_OF_LIST
4634 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4635 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4637 while (qp != I40E_QUEUE_END_OF_LIST) {
4640 val = rd32(hw, I40E_QINT_RQCTL(qp));
4642 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4643 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4644 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4645 I40E_QINT_RQCTL_INTEVENT_MASK);
4647 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4648 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4650 wr32(hw, I40E_QINT_RQCTL(qp), val);
4652 val = rd32(hw, I40E_QINT_TQCTL(qp));
4654 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4655 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4657 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4658 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4659 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4660 I40E_QINT_TQCTL_INTEVENT_MASK);
4662 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4663 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4665 wr32(hw, I40E_QINT_TQCTL(qp), val);
4670 free_irq(pf->pdev->irq, pf);
4672 val = rd32(hw, I40E_PFINT_LNKLST0);
4673 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4674 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4675 val |= I40E_QUEUE_END_OF_LIST
4676 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4677 wr32(hw, I40E_PFINT_LNKLST0, val);
4679 val = rd32(hw, I40E_QINT_RQCTL(qp));
4680 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4681 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4682 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4683 I40E_QINT_RQCTL_INTEVENT_MASK);
4685 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4686 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4688 wr32(hw, I40E_QINT_RQCTL(qp), val);
4690 val = rd32(hw, I40E_QINT_TQCTL(qp));
4692 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4693 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4694 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4695 I40E_QINT_TQCTL_INTEVENT_MASK);
4697 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4698 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4700 wr32(hw, I40E_QINT_TQCTL(qp), val);
4705 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4706 * @vsi: the VSI being configured
4707 * @v_idx: Index of vector to be freed
4709 * This function frees the memory allocated to the q_vector. In addition if
4710 * NAPI is enabled it will delete any references to the NAPI struct prior
4711 * to freeing the q_vector.
4713 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4715 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4716 struct i40e_ring *ring;
4721 /* disassociate q_vector from rings */
4722 i40e_for_each_ring(ring, q_vector->tx)
4723 ring->q_vector = NULL;
4725 i40e_for_each_ring(ring, q_vector->rx)
4726 ring->q_vector = NULL;
4728 /* only VSI w/ an associated netdev is set up w/ NAPI */
4730 netif_napi_del(&q_vector->napi);
4732 vsi->q_vectors[v_idx] = NULL;
4734 kfree_rcu(q_vector, rcu);
4738 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4739 * @vsi: the VSI being un-configured
4741 * This frees the memory allocated to the q_vectors and
4742 * deletes references to the NAPI struct.
4744 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4748 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4749 i40e_free_q_vector(vsi, v_idx);
4753 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4754 * @pf: board private structure
4756 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4758 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4759 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4760 pci_disable_msix(pf->pdev);
4761 kfree(pf->msix_entries);
4762 pf->msix_entries = NULL;
4763 kfree(pf->irq_pile);
4764 pf->irq_pile = NULL;
4765 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4766 pci_disable_msi(pf->pdev);
4768 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4772 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4773 * @pf: board private structure
4775 * We go through and clear interrupt specific resources and reset the structure
4776 * to pre-load conditions
4778 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4782 i40e_free_misc_vector(pf);
4784 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4785 I40E_IWARP_IRQ_PILE_ID);
4787 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4788 for (i = 0; i < pf->num_alloc_vsi; i++)
4790 i40e_vsi_free_q_vectors(pf->vsi[i]);
4791 i40e_reset_interrupt_capability(pf);
4795 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4796 * @vsi: the VSI being configured
4798 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4805 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4806 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4808 if (q_vector->rx.ring || q_vector->tx.ring)
4809 napi_enable(&q_vector->napi);
4814 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4815 * @vsi: the VSI being configured
4817 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4824 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4825 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4827 if (q_vector->rx.ring || q_vector->tx.ring)
4828 napi_disable(&q_vector->napi);
4833 * i40e_vsi_close - Shut down a VSI
4834 * @vsi: the vsi to be quelled
4836 static void i40e_vsi_close(struct i40e_vsi *vsi)
4838 struct i40e_pf *pf = vsi->back;
4839 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4841 i40e_vsi_free_irq(vsi);
4842 i40e_vsi_free_tx_resources(vsi);
4843 i40e_vsi_free_rx_resources(vsi);
4844 vsi->current_netdev_flags = 0;
4845 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4846 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4847 set_bit(__I40E_CLIENT_RESET, pf->state);
4851 * i40e_quiesce_vsi - Pause a given VSI
4852 * @vsi: the VSI being paused
4854 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4856 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4859 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4860 if (vsi->netdev && netif_running(vsi->netdev))
4861 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4863 i40e_vsi_close(vsi);
4867 * i40e_unquiesce_vsi - Resume a given VSI
4868 * @vsi: the VSI being resumed
4870 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4872 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4875 if (vsi->netdev && netif_running(vsi->netdev))
4876 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4878 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4882 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4885 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4889 for (v = 0; v < pf->num_alloc_vsi; v++) {
4891 i40e_quiesce_vsi(pf->vsi[v]);
4896 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4899 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4903 for (v = 0; v < pf->num_alloc_vsi; v++) {
4905 i40e_unquiesce_vsi(pf->vsi[v]);
4910 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4911 * @vsi: the VSI being configured
4913 * Wait until all queues on a given VSI have been disabled.
4915 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4917 struct i40e_pf *pf = vsi->back;
4920 pf_q = vsi->base_queue;
4921 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4922 /* Check and wait for the Tx queue */
4923 ret = i40e_pf_txq_wait(pf, pf_q, false);
4925 dev_info(&pf->pdev->dev,
4926 "VSI seid %d Tx ring %d disable timeout\n",
4931 if (!i40e_enabled_xdp_vsi(vsi))
4934 /* Check and wait for the XDP Tx queue */
4935 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4938 dev_info(&pf->pdev->dev,
4939 "VSI seid %d XDP Tx ring %d disable timeout\n",
4944 /* Check and wait for the Rx queue */
4945 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4947 dev_info(&pf->pdev->dev,
4948 "VSI seid %d Rx ring %d disable timeout\n",
4957 #ifdef CONFIG_I40E_DCB
4959 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4962 * This function waits for the queues to be in disabled state for all the
4963 * VSIs that are managed by this PF.
4965 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4969 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4971 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4983 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4984 * @pf: pointer to PF
4986 * Get TC map for ISCSI PF type that will include iSCSI TC
4989 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4991 struct i40e_dcb_app_priority_table app;
4992 struct i40e_hw *hw = &pf->hw;
4993 u8 enabled_tc = 1; /* TC0 is always enabled */
4995 /* Get the iSCSI APP TLV */
4996 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4998 for (i = 0; i < dcbcfg->numapps; i++) {
4999 app = dcbcfg->app[i];
5000 if (app.selector == I40E_APP_SEL_TCPIP &&
5001 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5002 tc = dcbcfg->etscfg.prioritytable[app.priority];
5003 enabled_tc |= BIT(tc);
5012 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5013 * @dcbcfg: the corresponding DCBx configuration structure
5015 * Return the number of TCs from given DCBx configuration
5017 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5019 int i, tc_unused = 0;
5023 /* Scan the ETS Config Priority Table to find
5024 * traffic class enabled for a given priority
5025 * and create a bitmask of enabled TCs
5027 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5028 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5030 /* Now scan the bitmask to check for
5031 * contiguous TCs starting with TC0
5033 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5034 if (num_tc & BIT(i)) {
5038 pr_err("Non-contiguous TC - Disabling DCB\n");
5046 /* There is always at least TC0 */
5054 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5055 * @dcbcfg: the corresponding DCBx configuration structure
5057 * Query the current DCB configuration and return the number of
5058 * traffic classes enabled from the given DCBX config
5060 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5062 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5066 for (i = 0; i < num_tc; i++)
5067 enabled_tc |= BIT(i);
5073 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5074 * @pf: PF being queried
5076 * Query the current MQPRIO configuration and return the number of
5077 * traffic classes enabled.
5079 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5081 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5082 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5083 u8 enabled_tc = 1, i;
5085 for (i = 1; i < num_tc; i++)
5086 enabled_tc |= BIT(i);
5091 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5092 * @pf: PF being queried
5094 * Return number of traffic classes enabled for the given PF
5096 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5098 struct i40e_hw *hw = &pf->hw;
5099 u8 i, enabled_tc = 1;
5101 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5103 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5104 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5106 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5107 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5110 /* SFP mode will be enabled for all TCs on port */
5111 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5112 return i40e_dcb_get_num_tc(dcbcfg);
5114 /* MFP mode return count of enabled TCs for this PF */
5115 if (pf->hw.func_caps.iscsi)
5116 enabled_tc = i40e_get_iscsi_tc_map(pf);
5118 return 1; /* Only TC0 */
5120 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5121 if (enabled_tc & BIT(i))
5128 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5129 * @pf: PF being queried
5131 * Return a bitmap for enabled traffic classes for this PF.
5133 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5135 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5136 return i40e_mqprio_get_enabled_tc(pf);
5138 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5141 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5142 return I40E_DEFAULT_TRAFFIC_CLASS;
5144 /* SFP mode we want PF to be enabled for all TCs */
5145 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5146 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5148 /* MFP enabled and iSCSI PF type */
5149 if (pf->hw.func_caps.iscsi)
5150 return i40e_get_iscsi_tc_map(pf);
5152 return I40E_DEFAULT_TRAFFIC_CLASS;
5156 * i40e_vsi_get_bw_info - Query VSI BW Information
5157 * @vsi: the VSI being queried
5159 * Returns 0 on success, negative value on failure
5161 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5163 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5164 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5165 struct i40e_pf *pf = vsi->back;
5166 struct i40e_hw *hw = &pf->hw;
5171 /* Get the VSI level BW configuration */
5172 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5174 dev_info(&pf->pdev->dev,
5175 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5176 i40e_stat_str(&pf->hw, ret),
5177 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5181 /* Get the VSI level BW configuration per TC */
5182 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5185 dev_info(&pf->pdev->dev,
5186 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5187 i40e_stat_str(&pf->hw, ret),
5188 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5192 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5193 dev_info(&pf->pdev->dev,
5194 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5195 bw_config.tc_valid_bits,
5196 bw_ets_config.tc_valid_bits);
5197 /* Still continuing */
5200 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5201 vsi->bw_max_quanta = bw_config.max_bw;
5202 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5203 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5204 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5205 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5206 vsi->bw_ets_limit_credits[i] =
5207 le16_to_cpu(bw_ets_config.credits[i]);
5208 /* 3 bits out of 4 for each TC */
5209 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5216 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5217 * @vsi: the VSI being configured
5218 * @enabled_tc: TC bitmap
5219 * @bw_share: BW shared credits per TC
5221 * Returns 0 on success, negative value on failure
5223 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5226 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5227 struct i40e_pf *pf = vsi->back;
5231 /* There is no need to reset BW when mqprio mode is on. */
5232 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5234 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5235 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5237 dev_info(&pf->pdev->dev,
5238 "Failed to reset tx rate for vsi->seid %u\n",
5242 bw_data.tc_valid_bits = enabled_tc;
5243 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5244 bw_data.tc_bw_credits[i] = bw_share[i];
5246 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5248 dev_info(&pf->pdev->dev,
5249 "AQ command Config VSI BW allocation per TC failed = %d\n",
5250 pf->hw.aq.asq_last_status);
5254 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5255 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5261 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5262 * @vsi: the VSI being configured
5263 * @enabled_tc: TC map to be enabled
5266 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5268 struct net_device *netdev = vsi->netdev;
5269 struct i40e_pf *pf = vsi->back;
5270 struct i40e_hw *hw = &pf->hw;
5273 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5279 netdev_reset_tc(netdev);
5283 /* Set up actual enabled TCs on the VSI */
5284 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5287 /* set per TC queues for the VSI */
5288 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5289 /* Only set TC queues for enabled tcs
5291 * e.g. For a VSI that has TC0 and TC3 enabled the
5292 * enabled_tc bitmap would be 0x00001001; the driver
5293 * will set the numtc for netdev as 2 that will be
5294 * referenced by the netdev layer as TC 0 and 1.
5296 if (vsi->tc_config.enabled_tc & BIT(i))
5297 netdev_set_tc_queue(netdev,
5298 vsi->tc_config.tc_info[i].netdev_tc,
5299 vsi->tc_config.tc_info[i].qcount,
5300 vsi->tc_config.tc_info[i].qoffset);
5303 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5306 /* Assign UP2TC map for the VSI */
5307 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5308 /* Get the actual TC# for the UP */
5309 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5310 /* Get the mapped netdev TC# for the UP */
5311 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5312 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5317 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5318 * @vsi: the VSI being configured
5319 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5321 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5322 struct i40e_vsi_context *ctxt)
5324 /* copy just the sections touched not the entire info
5325 * since not all sections are valid as returned by
5328 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5329 memcpy(&vsi->info.queue_mapping,
5330 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5331 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5332 sizeof(vsi->info.tc_mapping));
5336 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5337 * @vsi: VSI to be configured
5338 * @enabled_tc: TC bitmap
5340 * This configures a particular VSI for TCs that are mapped to the
5341 * given TC bitmap. It uses default bandwidth share for TCs across
5342 * VSIs to configure TC for a particular VSI.
5345 * It is expected that the VSI queues have been quisced before calling
5348 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5350 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5351 struct i40e_pf *pf = vsi->back;
5352 struct i40e_hw *hw = &pf->hw;
5353 struct i40e_vsi_context ctxt;
5357 /* Check if enabled_tc is same as existing or new TCs */
5358 if (vsi->tc_config.enabled_tc == enabled_tc &&
5359 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5362 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5363 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5364 if (enabled_tc & BIT(i))
5368 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5370 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5372 dev_info(&pf->pdev->dev,
5373 "Failed configuring TC map %d for VSI %d\n",
5374 enabled_tc, vsi->seid);
5375 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5378 dev_info(&pf->pdev->dev,
5379 "Failed querying vsi bw info, err %s aq_err %s\n",
5380 i40e_stat_str(hw, ret),
5381 i40e_aq_str(hw, hw->aq.asq_last_status));
5384 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5385 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5388 valid_tc = bw_config.tc_valid_bits;
5389 /* Always enable TC0, no matter what */
5391 dev_info(&pf->pdev->dev,
5392 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5393 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5394 enabled_tc = valid_tc;
5397 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5399 dev_err(&pf->pdev->dev,
5400 "Unable to configure TC map %d for VSI %d\n",
5401 enabled_tc, vsi->seid);
5406 /* Update Queue Pairs Mapping for currently enabled UPs */
5407 ctxt.seid = vsi->seid;
5408 ctxt.pf_num = vsi->back->hw.pf_id;
5410 ctxt.uplink_seid = vsi->uplink_seid;
5411 ctxt.info = vsi->info;
5412 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5413 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5417 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5420 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5423 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5424 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5425 vsi->num_queue_pairs);
5426 ret = i40e_vsi_config_rss(vsi);
5428 dev_info(&vsi->back->pdev->dev,
5429 "Failed to reconfig rss for num_queues\n");
5432 vsi->reconfig_rss = false;
5434 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5435 ctxt.info.valid_sections |=
5436 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5437 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5440 /* Update the VSI after updating the VSI queue-mapping
5443 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5445 dev_info(&pf->pdev->dev,
5446 "Update vsi tc config failed, err %s aq_err %s\n",
5447 i40e_stat_str(hw, ret),
5448 i40e_aq_str(hw, hw->aq.asq_last_status));
5451 /* update the local VSI info with updated queue map */
5452 i40e_vsi_update_queue_map(vsi, &ctxt);
5453 vsi->info.valid_sections = 0;
5455 /* Update current VSI BW information */
5456 ret = i40e_vsi_get_bw_info(vsi);
5458 dev_info(&pf->pdev->dev,
5459 "Failed updating vsi bw info, err %s aq_err %s\n",
5460 i40e_stat_str(hw, ret),
5461 i40e_aq_str(hw, hw->aq.asq_last_status));
5465 /* Update the netdev TC setup */
5466 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5472 * i40e_get_link_speed - Returns link speed for the interface
5473 * @vsi: VSI to be configured
5476 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5478 struct i40e_pf *pf = vsi->back;
5480 switch (pf->hw.phy.link_info.link_speed) {
5481 case I40E_LINK_SPEED_40GB:
5483 case I40E_LINK_SPEED_25GB:
5485 case I40E_LINK_SPEED_20GB:
5487 case I40E_LINK_SPEED_10GB:
5489 case I40E_LINK_SPEED_1GB:
5497 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5498 * @vsi: VSI to be configured
5499 * @seid: seid of the channel/VSI
5500 * @max_tx_rate: max TX rate to be configured as BW limit
5502 * Helper function to set BW limit for a given VSI
5504 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5506 struct i40e_pf *pf = vsi->back;
5511 speed = i40e_get_link_speed(vsi);
5512 if (max_tx_rate > speed) {
5513 dev_err(&pf->pdev->dev,
5514 "Invalid max tx rate %llu specified for VSI seid %d.",
5518 if (max_tx_rate && max_tx_rate < 50) {
5519 dev_warn(&pf->pdev->dev,
5520 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5524 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5525 credits = max_tx_rate;
5526 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5527 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5528 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5530 dev_err(&pf->pdev->dev,
5531 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5532 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5533 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5538 * i40e_remove_queue_channels - Remove queue channels for the TCs
5539 * @vsi: VSI to be configured
5541 * Remove queue channels for the TCs
5543 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5545 enum i40e_admin_queue_err last_aq_status;
5546 struct i40e_cloud_filter *cfilter;
5547 struct i40e_channel *ch, *ch_tmp;
5548 struct i40e_pf *pf = vsi->back;
5549 struct hlist_node *node;
5552 /* Reset rss size that was stored when reconfiguring rss for
5553 * channel VSIs with non-power-of-2 queue count.
5555 vsi->current_rss_size = 0;
5557 /* perform cleanup for channels if they exist */
5558 if (list_empty(&vsi->ch_list))
5561 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5562 struct i40e_vsi *p_vsi;
5564 list_del(&ch->list);
5565 p_vsi = ch->parent_vsi;
5566 if (!p_vsi || !ch->initialized) {
5570 /* Reset queue contexts */
5571 for (i = 0; i < ch->num_queue_pairs; i++) {
5572 struct i40e_ring *tx_ring, *rx_ring;
5575 pf_q = ch->base_queue + i;
5576 tx_ring = vsi->tx_rings[pf_q];
5579 rx_ring = vsi->rx_rings[pf_q];
5583 /* Reset BW configured for this VSI via mqprio */
5584 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5586 dev_info(&vsi->back->pdev->dev,
5587 "Failed to reset tx rate for ch->seid %u\n",
5590 /* delete cloud filters associated with this channel */
5591 hlist_for_each_entry_safe(cfilter, node,
5592 &pf->cloud_filter_list, cloud_node) {
5593 if (cfilter->seid != ch->seid)
5596 hash_del(&cfilter->cloud_node);
5597 if (cfilter->dst_port)
5598 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5602 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5604 last_aq_status = pf->hw.aq.asq_last_status;
5606 dev_info(&pf->pdev->dev,
5607 "Failed to delete cloud filter, err %s aq_err %s\n",
5608 i40e_stat_str(&pf->hw, ret),
5609 i40e_aq_str(&pf->hw, last_aq_status));
5613 /* delete VSI from FW */
5614 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5617 dev_err(&vsi->back->pdev->dev,
5618 "unable to remove channel (%d) for parent VSI(%d)\n",
5619 ch->seid, p_vsi->seid);
5622 INIT_LIST_HEAD(&vsi->ch_list);
5626 * i40e_is_any_channel - channel exist or not
5627 * @vsi: ptr to VSI to which channels are associated with
5629 * Returns true or false if channel(s) exist for associated VSI or not
5631 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5633 struct i40e_channel *ch, *ch_tmp;
5635 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5636 if (ch->initialized)
5644 * i40e_get_max_queues_for_channel
5645 * @vsi: ptr to VSI to which channels are associated with
5647 * Helper function which returns max value among the queue counts set on the
5648 * channels/TCs created.
5650 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5652 struct i40e_channel *ch, *ch_tmp;
5655 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5656 if (!ch->initialized)
5658 if (ch->num_queue_pairs > max)
5659 max = ch->num_queue_pairs;
5666 * i40e_validate_num_queues - validate num_queues w.r.t channel
5667 * @pf: ptr to PF device
5668 * @num_queues: number of queues
5669 * @vsi: the parent VSI
5670 * @reconfig_rss: indicates should the RSS be reconfigured or not
5672 * This function validates number of queues in the context of new channel
5673 * which is being established and determines if RSS should be reconfigured
5674 * or not for parent VSI.
5676 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5677 struct i40e_vsi *vsi, bool *reconfig_rss)
5684 *reconfig_rss = false;
5685 if (vsi->current_rss_size) {
5686 if (num_queues > vsi->current_rss_size) {
5687 dev_dbg(&pf->pdev->dev,
5688 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5689 num_queues, vsi->current_rss_size);
5691 } else if ((num_queues < vsi->current_rss_size) &&
5692 (!is_power_of_2(num_queues))) {
5693 dev_dbg(&pf->pdev->dev,
5694 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5695 num_queues, vsi->current_rss_size);
5700 if (!is_power_of_2(num_queues)) {
5701 /* Find the max num_queues configured for channel if channel
5703 * if channel exist, then enforce 'num_queues' to be more than
5704 * max ever queues configured for channel.
5706 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5707 if (num_queues < max_ch_queues) {
5708 dev_dbg(&pf->pdev->dev,
5709 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5710 num_queues, max_ch_queues);
5713 *reconfig_rss = true;
5720 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5721 * @vsi: the VSI being setup
5722 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5724 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5726 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5728 struct i40e_pf *pf = vsi->back;
5729 u8 seed[I40E_HKEY_ARRAY_SIZE];
5730 struct i40e_hw *hw = &pf->hw;
5738 if (rss_size > vsi->rss_size)
5741 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5742 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5746 /* Ignoring user configured lut if there is one */
5747 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5749 /* Use user configured hash key if there is one, otherwise
5752 if (vsi->rss_hkey_user)
5753 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5755 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5757 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5759 dev_info(&pf->pdev->dev,
5760 "Cannot set RSS lut, err %s aq_err %s\n",
5761 i40e_stat_str(hw, ret),
5762 i40e_aq_str(hw, hw->aq.asq_last_status));
5768 /* Do the update w.r.t. storing rss_size */
5769 if (!vsi->orig_rss_size)
5770 vsi->orig_rss_size = vsi->rss_size;
5771 vsi->current_rss_size = local_rss_size;
5777 * i40e_channel_setup_queue_map - Setup a channel queue map
5778 * @pf: ptr to PF device
5779 * @vsi: the VSI being setup
5780 * @ctxt: VSI context structure
5781 * @ch: ptr to channel structure
5783 * Setup queue map for a specific channel
5785 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5786 struct i40e_vsi_context *ctxt,
5787 struct i40e_channel *ch)
5789 u16 qcount, qmap, sections = 0;
5793 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5794 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5796 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5797 ch->num_queue_pairs = qcount;
5799 /* find the next higher power-of-2 of num queue pairs */
5800 pow = ilog2(qcount);
5801 if (!is_power_of_2(qcount))
5804 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5805 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5807 /* Setup queue TC[0].qmap for given VSI context */
5808 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5810 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5811 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5812 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5813 ctxt->info.valid_sections |= cpu_to_le16(sections);
5817 * i40e_add_channel - add a channel by adding VSI
5818 * @pf: ptr to PF device
5819 * @uplink_seid: underlying HW switching element (VEB) ID
5820 * @ch: ptr to channel structure
5822 * Add a channel (VSI) using add_vsi and queue_map
5824 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5825 struct i40e_channel *ch)
5827 struct i40e_hw *hw = &pf->hw;
5828 struct i40e_vsi_context ctxt;
5829 u8 enabled_tc = 0x1; /* TC0 enabled */
5832 if (ch->type != I40E_VSI_VMDQ2) {
5833 dev_info(&pf->pdev->dev,
5834 "add new vsi failed, ch->type %d\n", ch->type);
5838 memset(&ctxt, 0, sizeof(ctxt));
5839 ctxt.pf_num = hw->pf_id;
5841 ctxt.uplink_seid = uplink_seid;
5842 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5843 if (ch->type == I40E_VSI_VMDQ2)
5844 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5846 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5847 ctxt.info.valid_sections |=
5848 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5849 ctxt.info.switch_id =
5850 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5853 /* Set queue map for a given VSI context */
5854 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5856 /* Now time to create VSI */
5857 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5859 dev_info(&pf->pdev->dev,
5860 "add new vsi failed, err %s aq_err %s\n",
5861 i40e_stat_str(&pf->hw, ret),
5862 i40e_aq_str(&pf->hw,
5863 pf->hw.aq.asq_last_status));
5867 /* Success, update channel */
5868 ch->enabled_tc = enabled_tc;
5869 ch->seid = ctxt.seid;
5870 ch->vsi_number = ctxt.vsi_number;
5871 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5873 /* copy just the sections touched not the entire info
5874 * since not all sections are valid as returned by
5877 ch->info.mapping_flags = ctxt.info.mapping_flags;
5878 memcpy(&ch->info.queue_mapping,
5879 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5880 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5881 sizeof(ctxt.info.tc_mapping));
5886 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5889 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5893 bw_data.tc_valid_bits = ch->enabled_tc;
5894 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5895 bw_data.tc_bw_credits[i] = bw_share[i];
5897 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5900 dev_info(&vsi->back->pdev->dev,
5901 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5902 vsi->back->hw.aq.asq_last_status, ch->seid);
5906 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5907 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5913 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5914 * @pf: ptr to PF device
5915 * @vsi: the VSI being setup
5916 * @ch: ptr to channel structure
5918 * Configure TX rings associated with channel (VSI) since queues are being
5921 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5922 struct i40e_vsi *vsi,
5923 struct i40e_channel *ch)
5927 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5929 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5930 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5931 if (ch->enabled_tc & BIT(i))
5935 /* configure BW for new VSI */
5936 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5938 dev_info(&vsi->back->pdev->dev,
5939 "Failed configuring TC map %d for channel (seid %u)\n",
5940 ch->enabled_tc, ch->seid);
5944 for (i = 0; i < ch->num_queue_pairs; i++) {
5945 struct i40e_ring *tx_ring, *rx_ring;
5948 pf_q = ch->base_queue + i;
5950 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5953 tx_ring = vsi->tx_rings[pf_q];
5956 /* Get the RX ring ptr */
5957 rx_ring = vsi->rx_rings[pf_q];
5965 * i40e_setup_hw_channel - setup new channel
5966 * @pf: ptr to PF device
5967 * @vsi: the VSI being setup
5968 * @ch: ptr to channel structure
5969 * @uplink_seid: underlying HW switching element (VEB) ID
5970 * @type: type of channel to be created (VMDq2/VF)
5972 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5973 * and configures TX rings accordingly
5975 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5976 struct i40e_vsi *vsi,
5977 struct i40e_channel *ch,
5978 u16 uplink_seid, u8 type)
5982 ch->initialized = false;
5983 ch->base_queue = vsi->next_base_queue;
5986 /* Proceed with creation of channel (VMDq2) VSI */
5987 ret = i40e_add_channel(pf, uplink_seid, ch);
5989 dev_info(&pf->pdev->dev,
5990 "failed to add_channel using uplink_seid %u\n",
5995 /* Mark the successful creation of channel */
5996 ch->initialized = true;
5998 /* Reconfigure TX queues using QTX_CTL register */
5999 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6001 dev_info(&pf->pdev->dev,
6002 "failed to configure TX rings for channel %u\n",
6007 /* update 'next_base_queue' */
6008 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6009 dev_dbg(&pf->pdev->dev,
6010 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6011 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6012 ch->num_queue_pairs,
6013 vsi->next_base_queue);
6018 * i40e_setup_channel - setup new channel using uplink element
6019 * @pf: ptr to PF device
6020 * @type: type of channel to be created (VMDq2/VF)
6021 * @uplink_seid: underlying HW switching element (VEB) ID
6022 * @ch: ptr to channel structure
6024 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6025 * and uplink switching element (uplink_seid)
6027 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6028 struct i40e_channel *ch)
6034 if (vsi->type == I40E_VSI_MAIN) {
6035 vsi_type = I40E_VSI_VMDQ2;
6037 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6042 /* underlying switching element */
6043 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6045 /* create channel (VSI), configure TX rings */
6046 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6048 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6052 return ch->initialized ? true : false;
6056 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6057 * @vsi: ptr to VSI which has PF backing
6059 * Sets up switch mode correctly if it needs to be changed and perform
6060 * what are allowed modes.
6062 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6065 struct i40e_pf *pf = vsi->back;
6066 struct i40e_hw *hw = &pf->hw;
6069 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6073 if (hw->dev_caps.switch_mode) {
6074 /* if switch mode is set, support mode2 (non-tunneled for
6075 * cloud filter) for now
6077 u32 switch_mode = hw->dev_caps.switch_mode &
6078 I40E_SWITCH_MODE_MASK;
6079 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6080 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6082 dev_err(&pf->pdev->dev,
6083 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6084 hw->dev_caps.switch_mode);
6089 /* Set Bit 7 to be valid */
6090 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6092 /* Set L4type for TCP support */
6093 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6095 /* Set cloud filter mode */
6096 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6098 /* Prep mode field for set_switch_config */
6099 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6100 pf->last_sw_conf_valid_flags,
6102 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6103 dev_err(&pf->pdev->dev,
6104 "couldn't set switch config bits, err %s aq_err %s\n",
6105 i40e_stat_str(hw, ret),
6107 hw->aq.asq_last_status));
6113 * i40e_create_queue_channel - function to create channel
6114 * @vsi: VSI to be configured
6115 * @ch: ptr to channel (it contains channel specific params)
6117 * This function creates channel (VSI) using num_queues specified by user,
6118 * reconfigs RSS if needed.
6120 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6121 struct i40e_channel *ch)
6123 struct i40e_pf *pf = vsi->back;
6130 if (!ch->num_queue_pairs) {
6131 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6132 ch->num_queue_pairs);
6136 /* validate user requested num_queues for channel */
6137 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6140 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6141 ch->num_queue_pairs);
6145 /* By default we are in VEPA mode, if this is the first VF/VMDq
6146 * VSI to be added switch to VEB mode.
6148 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6149 (!i40e_is_any_channel(vsi))) {
6150 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6151 dev_dbg(&pf->pdev->dev,
6152 "Failed to create channel. Override queues (%u) not power of 2\n",
6153 vsi->tc_config.tc_info[0].qcount);
6157 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6158 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6160 if (vsi->type == I40E_VSI_MAIN) {
6161 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6162 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6165 i40e_do_reset_safe(pf,
6166 I40E_PF_RESET_FLAG);
6169 /* now onwards for main VSI, number of queues will be value
6170 * of TC0's queue count
6174 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6175 * it should be more than num_queues
6177 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6178 dev_dbg(&pf->pdev->dev,
6179 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6180 vsi->cnt_q_avail, ch->num_queue_pairs);
6184 /* reconfig_rss only if vsi type is MAIN_VSI */
6185 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6186 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6188 dev_info(&pf->pdev->dev,
6189 "Error: unable to reconfig rss for num_queues (%u)\n",
6190 ch->num_queue_pairs);
6195 if (!i40e_setup_channel(pf, vsi, ch)) {
6196 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6200 dev_info(&pf->pdev->dev,
6201 "Setup channel (id:%u) utilizing num_queues %d\n",
6202 ch->seid, ch->num_queue_pairs);
6204 /* configure VSI for BW limit */
6205 if (ch->max_tx_rate) {
6206 u64 credits = ch->max_tx_rate;
6208 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6211 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6212 dev_dbg(&pf->pdev->dev,
6213 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6219 /* in case of VF, this will be main SRIOV VSI */
6220 ch->parent_vsi = vsi;
6222 /* and update main_vsi's count for queue_available to use */
6223 vsi->cnt_q_avail -= ch->num_queue_pairs;
6229 * i40e_configure_queue_channels - Add queue channel for the given TCs
6230 * @vsi: VSI to be configured
6232 * Configures queue channel mapping to the given TCs
6234 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6236 struct i40e_channel *ch;
6240 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6241 vsi->tc_seid_map[0] = vsi->seid;
6242 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6243 if (vsi->tc_config.enabled_tc & BIT(i)) {
6244 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6250 INIT_LIST_HEAD(&ch->list);
6251 ch->num_queue_pairs =
6252 vsi->tc_config.tc_info[i].qcount;
6254 vsi->tc_config.tc_info[i].qoffset;
6256 /* Bandwidth limit through tc interface is in bytes/s,
6259 max_rate = vsi->mqprio_qopt.max_rate[i];
6260 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6261 ch->max_tx_rate = max_rate;
6263 list_add_tail(&ch->list, &vsi->ch_list);
6265 ret = i40e_create_queue_channel(vsi, ch);
6267 dev_err(&vsi->back->pdev->dev,
6268 "Failed creating queue channel with TC%d: queues %d\n",
6269 i, ch->num_queue_pairs);
6272 vsi->tc_seid_map[i] = ch->seid;
6278 i40e_remove_queue_channels(vsi);
6283 * i40e_veb_config_tc - Configure TCs for given VEB
6285 * @enabled_tc: TC bitmap
6287 * Configures given TC bitmap for VEB (switching) element
6289 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6291 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6292 struct i40e_pf *pf = veb->pf;
6296 /* No TCs or already enabled TCs just return */
6297 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6300 bw_data.tc_valid_bits = enabled_tc;
6301 /* bw_data.absolute_credits is not set (relative) */
6303 /* Enable ETS TCs with equal BW Share for now */
6304 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6305 if (enabled_tc & BIT(i))
6306 bw_data.tc_bw_share_credits[i] = 1;
6309 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6312 dev_info(&pf->pdev->dev,
6313 "VEB bw config failed, err %s aq_err %s\n",
6314 i40e_stat_str(&pf->hw, ret),
6315 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6319 /* Update the BW information */
6320 ret = i40e_veb_get_bw_info(veb);
6322 dev_info(&pf->pdev->dev,
6323 "Failed getting veb bw config, err %s aq_err %s\n",
6324 i40e_stat_str(&pf->hw, ret),
6325 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6332 #ifdef CONFIG_I40E_DCB
6334 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6337 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6338 * the caller would've quiesce all the VSIs before calling
6341 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6347 /* Enable the TCs available on PF to all VEBs */
6348 tc_map = i40e_pf_get_tc_map(pf);
6349 for (v = 0; v < I40E_MAX_VEB; v++) {
6352 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6354 dev_info(&pf->pdev->dev,
6355 "Failed configuring TC for VEB seid=%d\n",
6357 /* Will try to configure as many components */
6361 /* Update each VSI */
6362 for (v = 0; v < pf->num_alloc_vsi; v++) {
6366 /* - Enable all TCs for the LAN VSI
6367 * - For all others keep them at TC0 for now
6369 if (v == pf->lan_vsi)
6370 tc_map = i40e_pf_get_tc_map(pf);
6372 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6374 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6376 dev_info(&pf->pdev->dev,
6377 "Failed configuring TC for VSI seid=%d\n",
6379 /* Will try to configure as many components */
6381 /* Re-configure VSI vectors based on updated TC map */
6382 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6383 if (pf->vsi[v]->netdev)
6384 i40e_dcbnl_set_all(pf->vsi[v]);
6390 * i40e_resume_port_tx - Resume port Tx
6393 * Resume a port's Tx and issue a PF reset in case of failure to
6396 static int i40e_resume_port_tx(struct i40e_pf *pf)
6398 struct i40e_hw *hw = &pf->hw;
6401 ret = i40e_aq_resume_port_tx(hw, NULL);
6403 dev_info(&pf->pdev->dev,
6404 "Resume Port Tx failed, err %s aq_err %s\n",
6405 i40e_stat_str(&pf->hw, ret),
6406 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6407 /* Schedule PF reset to recover */
6408 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6409 i40e_service_event_schedule(pf);
6416 * i40e_init_pf_dcb - Initialize DCB configuration
6417 * @pf: PF being configured
6419 * Query the current DCB configuration and cache it
6420 * in the hardware structure
6422 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6424 struct i40e_hw *hw = &pf->hw;
6427 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6428 * Also do not enable DCBx if FW LLDP agent is disabled
6430 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6431 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
6434 /* Get the initial DCB configuration */
6435 err = i40e_init_dcb(hw, true);
6437 /* Device/Function is not DCBX capable */
6438 if ((!hw->func_caps.dcb) ||
6439 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6440 dev_info(&pf->pdev->dev,
6441 "DCBX offload is not supported or is disabled for this PF.\n");
6443 /* When status is not DISABLED then DCBX in FW */
6444 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6445 DCB_CAP_DCBX_VER_IEEE;
6447 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6448 /* Enable DCB tagging only when more than one TC
6449 * or explicitly disable if only one TC
6451 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6452 pf->flags |= I40E_FLAG_DCB_ENABLED;
6454 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6455 dev_dbg(&pf->pdev->dev,
6456 "DCBX offload is supported for this PF.\n");
6458 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6459 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6460 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6462 dev_info(&pf->pdev->dev,
6463 "Query for DCB configuration failed, err %s aq_err %s\n",
6464 i40e_stat_str(&pf->hw, err),
6465 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6471 #endif /* CONFIG_I40E_DCB */
6472 #define SPEED_SIZE 14
6475 * i40e_print_link_message - print link up or down
6476 * @vsi: the VSI for which link needs a message
6477 * @isup: true of link is up, false otherwise
6479 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6481 enum i40e_aq_link_speed new_speed;
6482 struct i40e_pf *pf = vsi->back;
6483 char *speed = "Unknown";
6484 char *fc = "Unknown";
6490 new_speed = pf->hw.phy.link_info.link_speed;
6492 new_speed = I40E_LINK_SPEED_UNKNOWN;
6494 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6496 vsi->current_isup = isup;
6497 vsi->current_speed = new_speed;
6499 netdev_info(vsi->netdev, "NIC Link is Down\n");
6503 /* Warn user if link speed on NPAR enabled partition is not at
6506 if (pf->hw.func_caps.npar_enable &&
6507 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6508 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6509 netdev_warn(vsi->netdev,
6510 "The partition detected link speed that is less than 10Gbps\n");
6512 switch (pf->hw.phy.link_info.link_speed) {
6513 case I40E_LINK_SPEED_40GB:
6516 case I40E_LINK_SPEED_20GB:
6519 case I40E_LINK_SPEED_25GB:
6522 case I40E_LINK_SPEED_10GB:
6525 case I40E_LINK_SPEED_5GB:
6528 case I40E_LINK_SPEED_2_5GB:
6531 case I40E_LINK_SPEED_1GB:
6534 case I40E_LINK_SPEED_100MB:
6541 switch (pf->hw.fc.current_mode) {
6545 case I40E_FC_TX_PAUSE:
6548 case I40E_FC_RX_PAUSE:
6556 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6557 req_fec = ", Requested FEC: None";
6558 fec = ", FEC: None";
6559 an = ", Autoneg: False";
6561 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6562 an = ", Autoneg: True";
6564 if (pf->hw.phy.link_info.fec_info &
6565 I40E_AQ_CONFIG_FEC_KR_ENA)
6566 fec = ", FEC: CL74 FC-FEC/BASE-R";
6567 else if (pf->hw.phy.link_info.fec_info &
6568 I40E_AQ_CONFIG_FEC_RS_ENA)
6569 fec = ", FEC: CL108 RS-FEC";
6571 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6572 * both RS and FC are requested
6574 if (vsi->back->hw.phy.link_info.req_fec_info &
6575 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6576 if (vsi->back->hw.phy.link_info.req_fec_info &
6577 I40E_AQ_REQUEST_FEC_RS)
6578 req_fec = ", Requested FEC: CL108 RS-FEC";
6580 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6584 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6585 speed, req_fec, fec, an, fc);
6589 * i40e_up_complete - Finish the last steps of bringing up a connection
6590 * @vsi: the VSI being configured
6592 static int i40e_up_complete(struct i40e_vsi *vsi)
6594 struct i40e_pf *pf = vsi->back;
6597 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6598 i40e_vsi_configure_msix(vsi);
6600 i40e_configure_msi_and_legacy(vsi);
6603 err = i40e_vsi_start_rings(vsi);
6607 clear_bit(__I40E_VSI_DOWN, vsi->state);
6608 i40e_napi_enable_all(vsi);
6609 i40e_vsi_enable_irq(vsi);
6611 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6613 i40e_print_link_message(vsi, true);
6614 netif_tx_start_all_queues(vsi->netdev);
6615 netif_carrier_on(vsi->netdev);
6618 /* replay FDIR SB filters */
6619 if (vsi->type == I40E_VSI_FDIR) {
6620 /* reset fd counters */
6623 i40e_fdir_filter_restore(vsi);
6626 /* On the next run of the service_task, notify any clients of the new
6629 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6630 i40e_service_event_schedule(pf);
6636 * i40e_vsi_reinit_locked - Reset the VSI
6637 * @vsi: the VSI being configured
6639 * Rebuild the ring structs after some configuration
6640 * has changed, e.g. MTU size.
6642 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6644 struct i40e_pf *pf = vsi->back;
6646 WARN_ON(in_interrupt());
6647 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6648 usleep_range(1000, 2000);
6652 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6656 * i40e_up - Bring the connection back up after being down
6657 * @vsi: the VSI being configured
6659 int i40e_up(struct i40e_vsi *vsi)
6663 err = i40e_vsi_configure(vsi);
6665 err = i40e_up_complete(vsi);
6671 * i40e_force_link_state - Force the link status
6672 * @pf: board private structure
6673 * @is_up: whether the link state should be forced up or down
6675 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6677 struct i40e_aq_get_phy_abilities_resp abilities;
6678 struct i40e_aq_set_phy_config config = {0};
6679 struct i40e_hw *hw = &pf->hw;
6684 /* Card might've been put in an unstable state by other drivers
6685 * and applications, which causes incorrect speed values being
6686 * set on startup. In order to clear speed registers, we call
6687 * get_phy_capabilities twice, once to get initial state of
6688 * available speeds, and once to get current PHY config.
6690 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6693 dev_err(&pf->pdev->dev,
6694 "failed to get phy cap., ret = %s last_status = %s\n",
6695 i40e_stat_str(hw, err),
6696 i40e_aq_str(hw, hw->aq.asq_last_status));
6699 speed = abilities.link_speed;
6701 /* Get the current phy config */
6702 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6705 dev_err(&pf->pdev->dev,
6706 "failed to get phy cap., ret = %s last_status = %s\n",
6707 i40e_stat_str(hw, err),
6708 i40e_aq_str(hw, hw->aq.asq_last_status));
6712 /* If link needs to go up, but was not forced to go down,
6713 * and its speed values are OK, no need for a flap
6715 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6716 return I40E_SUCCESS;
6718 /* To force link we need to set bits for all supported PHY types,
6719 * but there are now more than 32, so we need to split the bitmap
6720 * across two fields.
6722 mask = I40E_PHY_TYPES_BITMASK;
6723 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6724 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6725 /* Copy the old settings, except of phy_type */
6726 config.abilities = abilities.abilities;
6727 if (abilities.link_speed != 0)
6728 config.link_speed = abilities.link_speed;
6730 config.link_speed = speed;
6731 config.eee_capability = abilities.eee_capability;
6732 config.eeer = abilities.eeer_val;
6733 config.low_power_ctrl = abilities.d3_lpan;
6734 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6735 I40E_AQ_PHY_FEC_CONFIG_MASK;
6736 err = i40e_aq_set_phy_config(hw, &config, NULL);
6739 dev_err(&pf->pdev->dev,
6740 "set phy config ret = %s last_status = %s\n",
6741 i40e_stat_str(&pf->hw, err),
6742 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6746 /* Update the link info */
6747 err = i40e_update_link_info(hw);
6749 /* Wait a little bit (on 40G cards it sometimes takes a really
6750 * long time for link to come back from the atomic reset)
6754 i40e_update_link_info(hw);
6757 i40e_aq_set_link_restart_an(hw, true, NULL);
6759 return I40E_SUCCESS;
6763 * i40e_down - Shutdown the connection processing
6764 * @vsi: the VSI being stopped
6766 void i40e_down(struct i40e_vsi *vsi)
6770 /* It is assumed that the caller of this function
6771 * sets the vsi->state __I40E_VSI_DOWN bit.
6774 netif_carrier_off(vsi->netdev);
6775 netif_tx_disable(vsi->netdev);
6777 i40e_vsi_disable_irq(vsi);
6778 i40e_vsi_stop_rings(vsi);
6779 if (vsi->type == I40E_VSI_MAIN &&
6780 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6781 i40e_force_link_state(vsi->back, false);
6782 i40e_napi_disable_all(vsi);
6784 for (i = 0; i < vsi->num_queue_pairs; i++) {
6785 i40e_clean_tx_ring(vsi->tx_rings[i]);
6786 if (i40e_enabled_xdp_vsi(vsi)) {
6787 /* Make sure that in-progress ndo_xdp_xmit
6788 * calls are completed.
6791 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6793 i40e_clean_rx_ring(vsi->rx_rings[i]);
6799 * i40e_validate_mqprio_qopt- validate queue mapping info
6800 * @vsi: the VSI being configured
6801 * @mqprio_qopt: queue parametrs
6803 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6804 struct tc_mqprio_qopt_offload *mqprio_qopt)
6806 u64 sum_max_rate = 0;
6810 if (mqprio_qopt->qopt.offset[0] != 0 ||
6811 mqprio_qopt->qopt.num_tc < 1 ||
6812 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6814 for (i = 0; ; i++) {
6815 if (!mqprio_qopt->qopt.count[i])
6817 if (mqprio_qopt->min_rate[i]) {
6818 dev_err(&vsi->back->pdev->dev,
6819 "Invalid min tx rate (greater than 0) specified\n");
6822 max_rate = mqprio_qopt->max_rate[i];
6823 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6824 sum_max_rate += max_rate;
6826 if (i >= mqprio_qopt->qopt.num_tc - 1)
6828 if (mqprio_qopt->qopt.offset[i + 1] !=
6829 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6832 if (vsi->num_queue_pairs <
6833 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6836 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6837 dev_err(&vsi->back->pdev->dev,
6838 "Invalid max tx rate specified\n");
6845 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6846 * @vsi: the VSI being configured
6848 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6853 /* Only TC0 is enabled */
6854 vsi->tc_config.numtc = 1;
6855 vsi->tc_config.enabled_tc = 1;
6856 qcount = min_t(int, vsi->alloc_queue_pairs,
6857 i40e_pf_get_max_q_per_tc(vsi->back));
6858 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6859 /* For the TC that is not enabled set the offset to to default
6860 * queue and allocate one queue for the given TC.
6862 vsi->tc_config.tc_info[i].qoffset = 0;
6864 vsi->tc_config.tc_info[i].qcount = qcount;
6866 vsi->tc_config.tc_info[i].qcount = 1;
6867 vsi->tc_config.tc_info[i].netdev_tc = 0;
6872 * i40e_setup_tc - configure multiple traffic classes
6873 * @netdev: net device to configure
6874 * @type_data: tc offload data
6876 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
6878 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
6879 struct i40e_netdev_priv *np = netdev_priv(netdev);
6880 struct i40e_vsi *vsi = np->vsi;
6881 struct i40e_pf *pf = vsi->back;
6882 u8 enabled_tc = 0, num_tc, hw;
6883 bool need_reset = false;
6884 int old_queue_pairs;
6889 old_queue_pairs = vsi->num_queue_pairs;
6890 num_tc = mqprio_qopt->qopt.num_tc;
6891 hw = mqprio_qopt->qopt.hw;
6892 mode = mqprio_qopt->mode;
6894 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6895 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6899 /* Check if MFP enabled */
6900 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6902 "Configuring TC not supported in MFP mode\n");
6906 case TC_MQPRIO_MODE_DCB:
6907 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6909 /* Check if DCB enabled to continue */
6910 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6912 "DCB is not enabled for adapter\n");
6916 /* Check whether tc count is within enabled limit */
6917 if (num_tc > i40e_pf_get_num_tc(pf)) {
6919 "TC count greater than enabled on link for adapter\n");
6923 case TC_MQPRIO_MODE_CHANNEL:
6924 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6926 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6929 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6931 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6934 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6935 sizeof(*mqprio_qopt));
6936 pf->flags |= I40E_FLAG_TC_MQPRIO;
6937 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6944 /* Generate TC map for number of tc requested */
6945 for (i = 0; i < num_tc; i++)
6946 enabled_tc |= BIT(i);
6948 /* Requesting same TC configuration as already enabled */
6949 if (enabled_tc == vsi->tc_config.enabled_tc &&
6950 mode != TC_MQPRIO_MODE_CHANNEL)
6953 /* Quiesce VSI queues */
6954 i40e_quiesce_vsi(vsi);
6956 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6957 i40e_remove_queue_channels(vsi);
6959 /* Configure VSI for enabled TCs */
6960 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6962 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6968 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
6969 if (vsi->mqprio_qopt.max_rate[0]) {
6970 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
6972 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
6973 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6975 u64 credits = max_tx_rate;
6977 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6978 dev_dbg(&vsi->back->pdev->dev,
6979 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6988 ret = i40e_configure_queue_channels(vsi);
6990 vsi->num_queue_pairs = old_queue_pairs;
6992 "Failed configuring queue channels\n");
6999 /* Reset the configuration data to defaults, only TC0 is enabled */
7001 i40e_vsi_set_default_tc_config(vsi);
7006 i40e_unquiesce_vsi(vsi);
7011 * i40e_set_cld_element - sets cloud filter element data
7012 * @filter: cloud filter rule
7013 * @cld: ptr to cloud filter element data
7015 * This is helper function to copy data into cloud filter element
7018 i40e_set_cld_element(struct i40e_cloud_filter *filter,
7019 struct i40e_aqc_cloud_filters_element_data *cld)
7024 memset(cld, 0, sizeof(*cld));
7025 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7026 ether_addr_copy(cld->inner_mac, filter->src_mac);
7028 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7031 if (filter->n_proto == ETH_P_IPV6) {
7032 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7033 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7035 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7036 ipa = cpu_to_le32(ipa);
7037 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7040 ipa = be32_to_cpu(filter->dst_ipv4);
7041 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7044 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7046 /* tenant_id is not supported by FW now, once the support is enabled
7047 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
7049 if (filter->tenant_id)
7054 * i40e_add_del_cloud_filter - Add/del cloud filter
7055 * @vsi: pointer to VSI
7056 * @filter: cloud filter rule
7057 * @add: if true, add, if false, delete
7059 * Add or delete a cloud filter for a specific flow spec.
7060 * Returns 0 if the filter were successfully added.
7062 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7063 struct i40e_cloud_filter *filter, bool add)
7065 struct i40e_aqc_cloud_filters_element_data cld_filter;
7066 struct i40e_pf *pf = vsi->back;
7068 static const u16 flag_table[128] = {
7069 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7070 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7071 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7072 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7073 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7074 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7075 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7076 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7077 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7078 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7079 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7080 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7081 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7082 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7085 if (filter->flags >= ARRAY_SIZE(flag_table))
7086 return I40E_ERR_CONFIG;
7088 /* copy element needed to add cloud filter from filter */
7089 i40e_set_cld_element(filter, &cld_filter);
7091 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7092 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7093 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7095 if (filter->n_proto == ETH_P_IPV6)
7096 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7097 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7099 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7100 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7103 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7106 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7109 dev_dbg(&pf->pdev->dev,
7110 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7111 add ? "add" : "delete", filter->dst_port, ret,
7112 pf->hw.aq.asq_last_status);
7114 dev_info(&pf->pdev->dev,
7115 "%s cloud filter for VSI: %d\n",
7116 add ? "Added" : "Deleted", filter->seid);
7121 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7122 * @vsi: pointer to VSI
7123 * @filter: cloud filter rule
7124 * @add: if true, add, if false, delete
7126 * Add or delete a cloud filter for a specific flow spec using big buffer.
7127 * Returns 0 if the filter were successfully added.
7129 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7130 struct i40e_cloud_filter *filter,
7133 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7134 struct i40e_pf *pf = vsi->back;
7137 /* Both (src/dst) valid mac_addr are not supported */
7138 if ((is_valid_ether_addr(filter->dst_mac) &&
7139 is_valid_ether_addr(filter->src_mac)) ||
7140 (is_multicast_ether_addr(filter->dst_mac) &&
7141 is_multicast_ether_addr(filter->src_mac)))
7144 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7145 * ports are not supported via big buffer now.
7147 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7150 /* adding filter using src_port/src_ip is not supported at this stage */
7151 if (filter->src_port || filter->src_ipv4 ||
7152 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7155 /* copy element needed to add cloud filter from filter */
7156 i40e_set_cld_element(filter, &cld_filter.element);
7158 if (is_valid_ether_addr(filter->dst_mac) ||
7159 is_valid_ether_addr(filter->src_mac) ||
7160 is_multicast_ether_addr(filter->dst_mac) ||
7161 is_multicast_ether_addr(filter->src_mac)) {
7162 /* MAC + IP : unsupported mode */
7163 if (filter->dst_ipv4)
7166 /* since we validated that L4 port must be valid before
7167 * we get here, start with respective "flags" value
7168 * and update if vlan is present or not
7170 cld_filter.element.flags =
7171 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7173 if (filter->vlan_id) {
7174 cld_filter.element.flags =
7175 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7178 } else if (filter->dst_ipv4 ||
7179 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7180 cld_filter.element.flags =
7181 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7182 if (filter->n_proto == ETH_P_IPV6)
7183 cld_filter.element.flags |=
7184 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7186 cld_filter.element.flags |=
7187 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7189 dev_err(&pf->pdev->dev,
7190 "either mac or ip has to be valid for cloud filter\n");
7194 /* Now copy L4 port in Byte 6..7 in general fields */
7195 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7196 be16_to_cpu(filter->dst_port);
7199 /* Validate current device switch mode, change if necessary */
7200 ret = i40e_validate_and_set_switch_mode(vsi);
7202 dev_err(&pf->pdev->dev,
7203 "failed to set switch mode, ret %d\n",
7208 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7211 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7216 dev_dbg(&pf->pdev->dev,
7217 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7218 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7220 dev_info(&pf->pdev->dev,
7221 "%s cloud filter for VSI: %d, L4 port: %d\n",
7222 add ? "add" : "delete", filter->seid,
7223 ntohs(filter->dst_port));
7228 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7229 * @vsi: Pointer to VSI
7230 * @cls_flower: Pointer to struct tc_cls_flower_offload
7231 * @filter: Pointer to cloud filter structure
7234 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7235 struct tc_cls_flower_offload *f,
7236 struct i40e_cloud_filter *filter)
7238 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
7239 struct flow_dissector *dissector = rule->match.dissector;
7240 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7241 struct i40e_pf *pf = vsi->back;
7244 if (dissector->used_keys &
7245 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7246 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7247 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7248 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7249 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7250 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7251 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7252 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7253 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7254 dissector->used_keys);
7258 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7259 struct flow_match_enc_keyid match;
7261 flow_rule_match_enc_keyid(rule, &match);
7262 if (match.mask->keyid != 0)
7263 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7265 filter->tenant_id = be32_to_cpu(match.key->keyid);
7268 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7269 struct flow_match_basic match;
7271 flow_rule_match_basic(rule, &match);
7272 n_proto_key = ntohs(match.key->n_proto);
7273 n_proto_mask = ntohs(match.mask->n_proto);
7275 if (n_proto_key == ETH_P_ALL) {
7279 filter->n_proto = n_proto_key & n_proto_mask;
7280 filter->ip_proto = match.key->ip_proto;
7283 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7284 struct flow_match_eth_addrs match;
7286 flow_rule_match_eth_addrs(rule, &match);
7288 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7289 if (!is_zero_ether_addr(match.mask->dst)) {
7290 if (is_broadcast_ether_addr(match.mask->dst)) {
7291 field_flags |= I40E_CLOUD_FIELD_OMAC;
7293 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7295 return I40E_ERR_CONFIG;
7299 if (!is_zero_ether_addr(match.mask->src)) {
7300 if (is_broadcast_ether_addr(match.mask->src)) {
7301 field_flags |= I40E_CLOUD_FIELD_IMAC;
7303 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7305 return I40E_ERR_CONFIG;
7308 ether_addr_copy(filter->dst_mac, match.key->dst);
7309 ether_addr_copy(filter->src_mac, match.key->src);
7312 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7313 struct flow_match_vlan match;
7315 flow_rule_match_vlan(rule, &match);
7316 if (match.mask->vlan_id) {
7317 if (match.mask->vlan_id == VLAN_VID_MASK) {
7318 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7321 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7322 match.mask->vlan_id);
7323 return I40E_ERR_CONFIG;
7327 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7330 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7331 struct flow_match_control match;
7333 flow_rule_match_control(rule, &match);
7334 addr_type = match.key->addr_type;
7337 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7338 struct flow_match_ipv4_addrs match;
7340 flow_rule_match_ipv4_addrs(rule, &match);
7341 if (match.mask->dst) {
7342 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7343 field_flags |= I40E_CLOUD_FIELD_IIP;
7345 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7347 return I40E_ERR_CONFIG;
7351 if (match.mask->src) {
7352 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7353 field_flags |= I40E_CLOUD_FIELD_IIP;
7355 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7357 return I40E_ERR_CONFIG;
7361 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7362 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7363 return I40E_ERR_CONFIG;
7365 filter->dst_ipv4 = match.key->dst;
7366 filter->src_ipv4 = match.key->src;
7369 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7370 struct flow_match_ipv6_addrs match;
7372 flow_rule_match_ipv6_addrs(rule, &match);
7374 /* src and dest IPV6 address should not be LOOPBACK
7375 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7377 if (ipv6_addr_loopback(&match.key->dst) ||
7378 ipv6_addr_loopback(&match.key->src)) {
7379 dev_err(&pf->pdev->dev,
7380 "Bad ipv6, addr is LOOPBACK\n");
7381 return I40E_ERR_CONFIG;
7383 if (!ipv6_addr_any(&match.mask->dst) ||
7384 !ipv6_addr_any(&match.mask->src))
7385 field_flags |= I40E_CLOUD_FIELD_IIP;
7387 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7388 sizeof(filter->src_ipv6));
7389 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7390 sizeof(filter->dst_ipv6));
7393 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7394 struct flow_match_ports match;
7396 flow_rule_match_ports(rule, &match);
7397 if (match.mask->src) {
7398 if (match.mask->src == cpu_to_be16(0xffff)) {
7399 field_flags |= I40E_CLOUD_FIELD_IIP;
7401 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7402 be16_to_cpu(match.mask->src));
7403 return I40E_ERR_CONFIG;
7407 if (match.mask->dst) {
7408 if (match.mask->dst == cpu_to_be16(0xffff)) {
7409 field_flags |= I40E_CLOUD_FIELD_IIP;
7411 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7412 be16_to_cpu(match.mask->dst));
7413 return I40E_ERR_CONFIG;
7417 filter->dst_port = match.key->dst;
7418 filter->src_port = match.key->src;
7420 switch (filter->ip_proto) {
7425 dev_err(&pf->pdev->dev,
7426 "Only UDP and TCP transport are supported\n");
7430 filter->flags = field_flags;
7435 * i40e_handle_tclass: Forward to a traffic class on the device
7436 * @vsi: Pointer to VSI
7437 * @tc: traffic class index on the device
7438 * @filter: Pointer to cloud filter structure
7441 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7442 struct i40e_cloud_filter *filter)
7444 struct i40e_channel *ch, *ch_tmp;
7446 /* direct to a traffic class on the same device */
7448 filter->seid = vsi->seid;
7450 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7451 if (!filter->dst_port) {
7452 dev_err(&vsi->back->pdev->dev,
7453 "Specify destination port to direct to traffic class that is not default\n");
7456 if (list_empty(&vsi->ch_list))
7458 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7460 if (ch->seid == vsi->tc_seid_map[tc])
7461 filter->seid = ch->seid;
7465 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7470 * i40e_configure_clsflower - Configure tc flower filters
7471 * @vsi: Pointer to VSI
7472 * @cls_flower: Pointer to struct tc_cls_flower_offload
7475 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7476 struct tc_cls_flower_offload *cls_flower)
7478 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7479 struct i40e_cloud_filter *filter = NULL;
7480 struct i40e_pf *pf = vsi->back;
7484 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7488 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7489 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7492 if (pf->fdir_pf_active_filters ||
7493 (!hlist_empty(&pf->fdir_filter_list))) {
7494 dev_err(&vsi->back->pdev->dev,
7495 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7499 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7500 dev_err(&vsi->back->pdev->dev,
7501 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7502 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7503 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7506 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7510 filter->cookie = cls_flower->cookie;
7512 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7516 err = i40e_handle_tclass(vsi, tc, filter);
7520 /* Add cloud filter */
7521 if (filter->dst_port)
7522 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7524 err = i40e_add_del_cloud_filter(vsi, filter, true);
7527 dev_err(&pf->pdev->dev,
7528 "Failed to add cloud filter, err %s\n",
7529 i40e_stat_str(&pf->hw, err));
7533 /* add filter to the ordered list */
7534 INIT_HLIST_NODE(&filter->cloud_node);
7536 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7538 pf->num_cloud_filters++;
7547 * i40e_find_cloud_filter - Find the could filter in the list
7548 * @vsi: Pointer to VSI
7549 * @cookie: filter specific cookie
7552 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7553 unsigned long *cookie)
7555 struct i40e_cloud_filter *filter = NULL;
7556 struct hlist_node *node2;
7558 hlist_for_each_entry_safe(filter, node2,
7559 &vsi->back->cloud_filter_list, cloud_node)
7560 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7566 * i40e_delete_clsflower - Remove tc flower filters
7567 * @vsi: Pointer to VSI
7568 * @cls_flower: Pointer to struct tc_cls_flower_offload
7571 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7572 struct tc_cls_flower_offload *cls_flower)
7574 struct i40e_cloud_filter *filter = NULL;
7575 struct i40e_pf *pf = vsi->back;
7578 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
7583 hash_del(&filter->cloud_node);
7585 if (filter->dst_port)
7586 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7588 err = i40e_add_del_cloud_filter(vsi, filter, false);
7592 dev_err(&pf->pdev->dev,
7593 "Failed to delete cloud filter, err %s\n",
7594 i40e_stat_str(&pf->hw, err));
7595 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7598 pf->num_cloud_filters--;
7599 if (!pf->num_cloud_filters)
7600 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7601 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7602 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7603 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7604 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7610 * i40e_setup_tc_cls_flower - flower classifier offloads
7611 * @netdev: net device to configure
7612 * @type_data: offload data
7614 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7615 struct tc_cls_flower_offload *cls_flower)
7617 struct i40e_vsi *vsi = np->vsi;
7619 switch (cls_flower->command) {
7620 case TC_CLSFLOWER_REPLACE:
7621 return i40e_configure_clsflower(vsi, cls_flower);
7622 case TC_CLSFLOWER_DESTROY:
7623 return i40e_delete_clsflower(vsi, cls_flower);
7624 case TC_CLSFLOWER_STATS:
7631 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7634 struct i40e_netdev_priv *np = cb_priv;
7636 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
7640 case TC_SETUP_CLSFLOWER:
7641 return i40e_setup_tc_cls_flower(np, type_data);
7648 static int i40e_setup_tc_block(struct net_device *dev,
7649 struct tc_block_offload *f)
7651 struct i40e_netdev_priv *np = netdev_priv(dev);
7653 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7656 switch (f->command) {
7658 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7660 case TC_BLOCK_UNBIND:
7661 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7668 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7672 case TC_SETUP_QDISC_MQPRIO:
7673 return i40e_setup_tc(netdev, type_data);
7674 case TC_SETUP_BLOCK:
7675 return i40e_setup_tc_block(netdev, type_data);
7682 * i40e_open - Called when a network interface is made active
7683 * @netdev: network interface device structure
7685 * The open entry point is called when a network interface is made
7686 * active by the system (IFF_UP). At this point all resources needed
7687 * for transmit and receive operations are allocated, the interrupt
7688 * handler is registered with the OS, the netdev watchdog subtask is
7689 * enabled, and the stack is notified that the interface is ready.
7691 * Returns 0 on success, negative value on failure
7693 int i40e_open(struct net_device *netdev)
7695 struct i40e_netdev_priv *np = netdev_priv(netdev);
7696 struct i40e_vsi *vsi = np->vsi;
7697 struct i40e_pf *pf = vsi->back;
7700 /* disallow open during test or if eeprom is broken */
7701 if (test_bit(__I40E_TESTING, pf->state) ||
7702 test_bit(__I40E_BAD_EEPROM, pf->state))
7705 netif_carrier_off(netdev);
7707 if (i40e_force_link_state(pf, true))
7710 err = i40e_vsi_open(vsi);
7714 /* configure global TSO hardware offload settings */
7715 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7716 TCP_FLAG_FIN) >> 16);
7717 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7719 TCP_FLAG_CWR) >> 16);
7720 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7722 udp_tunnel_get_rx_info(netdev);
7729 * @vsi: the VSI to open
7731 * Finish initialization of the VSI.
7733 * Returns 0 on success, negative value on failure
7735 * Note: expects to be called while under rtnl_lock()
7737 int i40e_vsi_open(struct i40e_vsi *vsi)
7739 struct i40e_pf *pf = vsi->back;
7740 char int_name[I40E_INT_NAME_STR_LEN];
7743 /* allocate descriptors */
7744 err = i40e_vsi_setup_tx_resources(vsi);
7747 err = i40e_vsi_setup_rx_resources(vsi);
7751 err = i40e_vsi_configure(vsi);
7756 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7757 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7758 err = i40e_vsi_request_irq(vsi, int_name);
7762 /* Notify the stack of the actual queue counts. */
7763 err = netif_set_real_num_tx_queues(vsi->netdev,
7764 vsi->num_queue_pairs);
7766 goto err_set_queues;
7768 err = netif_set_real_num_rx_queues(vsi->netdev,
7769 vsi->num_queue_pairs);
7771 goto err_set_queues;
7773 } else if (vsi->type == I40E_VSI_FDIR) {
7774 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
7775 dev_driver_string(&pf->pdev->dev),
7776 dev_name(&pf->pdev->dev));
7777 err = i40e_vsi_request_irq(vsi, int_name);
7784 err = i40e_up_complete(vsi);
7786 goto err_up_complete;
7793 i40e_vsi_free_irq(vsi);
7795 i40e_vsi_free_rx_resources(vsi);
7797 i40e_vsi_free_tx_resources(vsi);
7798 if (vsi == pf->vsi[pf->lan_vsi])
7799 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7805 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7806 * @pf: Pointer to PF
7808 * This function destroys the hlist where all the Flow Director
7809 * filters were saved.
7811 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7813 struct i40e_fdir_filter *filter;
7814 struct i40e_flex_pit *pit_entry, *tmp;
7815 struct hlist_node *node2;
7817 hlist_for_each_entry_safe(filter, node2,
7818 &pf->fdir_filter_list, fdir_node) {
7819 hlist_del(&filter->fdir_node);
7823 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7824 list_del(&pit_entry->list);
7827 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7829 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7830 list_del(&pit_entry->list);
7833 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7835 pf->fdir_pf_active_filters = 0;
7836 pf->fd_tcp4_filter_cnt = 0;
7837 pf->fd_udp4_filter_cnt = 0;
7838 pf->fd_sctp4_filter_cnt = 0;
7839 pf->fd_ip4_filter_cnt = 0;
7841 /* Reprogram the default input set for TCP/IPv4 */
7842 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7843 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7844 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7846 /* Reprogram the default input set for UDP/IPv4 */
7847 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7848 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7849 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7851 /* Reprogram the default input set for SCTP/IPv4 */
7852 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7853 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7854 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7856 /* Reprogram the default input set for Other/IPv4 */
7857 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7858 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7860 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
7861 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7865 * i40e_cloud_filter_exit - Cleans up the cloud filters
7866 * @pf: Pointer to PF
7868 * This function destroys the hlist where all the cloud filters
7871 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7873 struct i40e_cloud_filter *cfilter;
7874 struct hlist_node *node;
7876 hlist_for_each_entry_safe(cfilter, node,
7877 &pf->cloud_filter_list, cloud_node) {
7878 hlist_del(&cfilter->cloud_node);
7881 pf->num_cloud_filters = 0;
7883 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7884 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7885 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7886 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7887 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7892 * i40e_close - Disables a network interface
7893 * @netdev: network interface device structure
7895 * The close entry point is called when an interface is de-activated
7896 * by the OS. The hardware is still under the driver's control, but
7897 * this netdev interface is disabled.
7899 * Returns 0, this is not allowed to fail
7901 int i40e_close(struct net_device *netdev)
7903 struct i40e_netdev_priv *np = netdev_priv(netdev);
7904 struct i40e_vsi *vsi = np->vsi;
7906 i40e_vsi_close(vsi);
7912 * i40e_do_reset - Start a PF or Core Reset sequence
7913 * @pf: board private structure
7914 * @reset_flags: which reset is requested
7915 * @lock_acquired: indicates whether or not the lock has been acquired
7916 * before this function was called.
7918 * The essential difference in resets is that the PF Reset
7919 * doesn't clear the packet buffers, doesn't reset the PE
7920 * firmware, and doesn't bother the other PFs on the chip.
7922 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
7926 WARN_ON(in_interrupt());
7929 /* do the biggest reset indicated */
7930 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
7932 /* Request a Global Reset
7934 * This will start the chip's countdown to the actual full
7935 * chip reset event, and a warning interrupt to be sent
7936 * to all PFs, including the requestor. Our handler
7937 * for the warning interrupt will deal with the shutdown
7938 * and recovery of the switch setup.
7940 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
7941 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7942 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
7943 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7945 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
7947 /* Request a Core Reset
7949 * Same as Global Reset, except does *not* include the MAC/PHY
7951 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
7952 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7953 val |= I40E_GLGEN_RTRIG_CORER_MASK;
7954 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7955 i40e_flush(&pf->hw);
7957 } else if (reset_flags & I40E_PF_RESET_FLAG) {
7959 /* Request a PF Reset
7961 * Resets only the PF-specific registers
7963 * This goes directly to the tear-down and rebuild of
7964 * the switch, since we need to do all the recovery as
7965 * for the Core Reset.
7967 dev_dbg(&pf->pdev->dev, "PFR requested\n");
7968 i40e_handle_reset_warning(pf, lock_acquired);
7970 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
7973 /* Find the VSI(s) that requested a re-init */
7974 dev_info(&pf->pdev->dev,
7975 "VSI reinit requested\n");
7976 for (v = 0; v < pf->num_alloc_vsi; v++) {
7977 struct i40e_vsi *vsi = pf->vsi[v];
7980 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
7982 i40e_vsi_reinit_locked(pf->vsi[v]);
7984 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
7987 /* Find the VSI(s) that needs to be brought down */
7988 dev_info(&pf->pdev->dev, "VSI down requested\n");
7989 for (v = 0; v < pf->num_alloc_vsi; v++) {
7990 struct i40e_vsi *vsi = pf->vsi[v];
7993 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
7995 set_bit(__I40E_VSI_DOWN, vsi->state);
8000 dev_info(&pf->pdev->dev,
8001 "bad reset request 0x%08x\n", reset_flags);
8005 #ifdef CONFIG_I40E_DCB
8007 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
8008 * @pf: board private structure
8009 * @old_cfg: current DCB config
8010 * @new_cfg: new DCB config
8012 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8013 struct i40e_dcbx_config *old_cfg,
8014 struct i40e_dcbx_config *new_cfg)
8016 bool need_reconfig = false;
8018 /* Check if ETS configuration has changed */
8019 if (memcmp(&new_cfg->etscfg,
8021 sizeof(new_cfg->etscfg))) {
8022 /* If Priority Table has changed reconfig is needed */
8023 if (memcmp(&new_cfg->etscfg.prioritytable,
8024 &old_cfg->etscfg.prioritytable,
8025 sizeof(new_cfg->etscfg.prioritytable))) {
8026 need_reconfig = true;
8027 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8030 if (memcmp(&new_cfg->etscfg.tcbwtable,
8031 &old_cfg->etscfg.tcbwtable,
8032 sizeof(new_cfg->etscfg.tcbwtable)))
8033 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8035 if (memcmp(&new_cfg->etscfg.tsatable,
8036 &old_cfg->etscfg.tsatable,
8037 sizeof(new_cfg->etscfg.tsatable)))
8038 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8041 /* Check if PFC configuration has changed */
8042 if (memcmp(&new_cfg->pfc,
8044 sizeof(new_cfg->pfc))) {
8045 need_reconfig = true;
8046 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8049 /* Check if APP Table has changed */
8050 if (memcmp(&new_cfg->app,
8052 sizeof(new_cfg->app))) {
8053 need_reconfig = true;
8054 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8057 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8058 return need_reconfig;
8062 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8063 * @pf: board private structure
8064 * @e: event info posted on ARQ
8066 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8067 struct i40e_arq_event_info *e)
8069 struct i40e_aqc_lldp_get_mib *mib =
8070 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8071 struct i40e_hw *hw = &pf->hw;
8072 struct i40e_dcbx_config tmp_dcbx_cfg;
8073 bool need_reconfig = false;
8077 /* Not DCB capable or capability disabled */
8078 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8081 /* Ignore if event is not for Nearest Bridge */
8082 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8083 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8084 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8085 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8088 /* Check MIB Type and return if event for Remote MIB update */
8089 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8090 dev_dbg(&pf->pdev->dev,
8091 "LLDP event mib type %s\n", type ? "remote" : "local");
8092 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8093 /* Update the remote cached instance and return */
8094 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8095 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8096 &hw->remote_dcbx_config);
8100 /* Store the old configuration */
8101 tmp_dcbx_cfg = hw->local_dcbx_config;
8103 /* Reset the old DCBx configuration data */
8104 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8105 /* Get updated DCBX data from firmware */
8106 ret = i40e_get_dcb_config(&pf->hw);
8108 dev_info(&pf->pdev->dev,
8109 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8110 i40e_stat_str(&pf->hw, ret),
8111 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8115 /* No change detected in DCBX configs */
8116 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8117 sizeof(tmp_dcbx_cfg))) {
8118 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8122 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8123 &hw->local_dcbx_config);
8125 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8130 /* Enable DCB tagging only when more than one TC */
8131 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8132 pf->flags |= I40E_FLAG_DCB_ENABLED;
8134 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8136 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8137 /* Reconfiguration needed quiesce all VSIs */
8138 i40e_pf_quiesce_all_vsi(pf);
8140 /* Changes in configuration update VEB/VSI */
8141 i40e_dcb_reconfigure(pf);
8143 ret = i40e_resume_port_tx(pf);
8145 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8146 /* In case of error no point in resuming VSIs */
8150 /* Wait for the PF's queues to be disabled */
8151 ret = i40e_pf_wait_queues_disabled(pf);
8153 /* Schedule PF reset to recover */
8154 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8155 i40e_service_event_schedule(pf);
8157 i40e_pf_unquiesce_all_vsi(pf);
8158 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8159 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8165 #endif /* CONFIG_I40E_DCB */
8168 * i40e_do_reset_safe - Protected reset path for userland calls.
8169 * @pf: board private structure
8170 * @reset_flags: which reset is requested
8173 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8176 i40e_do_reset(pf, reset_flags, true);
8181 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8182 * @pf: board private structure
8183 * @e: event info posted on ARQ
8185 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8188 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8189 struct i40e_arq_event_info *e)
8191 struct i40e_aqc_lan_overflow *data =
8192 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8193 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8194 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8195 struct i40e_hw *hw = &pf->hw;
8199 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8202 /* Queue belongs to VF, find the VF and issue VF reset */
8203 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8204 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8205 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8206 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8207 vf_id -= hw->func_caps.vf_base_id;
8208 vf = &pf->vf[vf_id];
8209 i40e_vc_notify_vf_reset(vf);
8210 /* Allow VF to process pending reset notification */
8212 i40e_reset_vf(vf, false);
8217 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8218 * @pf: board private structure
8220 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8224 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8225 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8230 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8231 * @pf: board private structure
8233 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8237 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8238 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8239 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8240 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8245 * i40e_get_global_fd_count - Get total FD filters programmed on device
8246 * @pf: board private structure
8248 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8252 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8253 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8254 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8255 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8260 * i40e_reenable_fdir_sb - Restore FDir SB capability
8261 * @pf: board private structure
8263 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8265 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8266 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8267 (I40E_DEBUG_FD & pf->hw.debug_mask))
8268 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8272 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8273 * @pf: board private structure
8275 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8277 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8278 /* ATR uses the same filtering logic as SB rules. It only
8279 * functions properly if the input set mask is at the default
8280 * settings. It is safe to restore the default input set
8281 * because there are no active TCPv4 filter rules.
8283 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8284 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8285 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8287 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8288 (I40E_DEBUG_FD & pf->hw.debug_mask))
8289 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8294 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8295 * @pf: board private structure
8296 * @filter: FDir filter to remove
8298 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8299 struct i40e_fdir_filter *filter)
8301 /* Update counters */
8302 pf->fdir_pf_active_filters--;
8305 switch (filter->flow_type) {
8307 pf->fd_tcp4_filter_cnt--;
8310 pf->fd_udp4_filter_cnt--;
8313 pf->fd_sctp4_filter_cnt--;
8316 switch (filter->ip4_proto) {
8318 pf->fd_tcp4_filter_cnt--;
8321 pf->fd_udp4_filter_cnt--;
8324 pf->fd_sctp4_filter_cnt--;
8327 pf->fd_ip4_filter_cnt--;
8333 /* Remove the filter from the list and free memory */
8334 hlist_del(&filter->fdir_node);
8339 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8340 * @pf: board private structure
8342 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8344 struct i40e_fdir_filter *filter;
8345 u32 fcnt_prog, fcnt_avail;
8346 struct hlist_node *node;
8348 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8351 /* Check if we have enough room to re-enable FDir SB capability. */
8352 fcnt_prog = i40e_get_global_fd_count(pf);
8353 fcnt_avail = pf->fdir_pf_filter_count;
8354 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8355 (pf->fd_add_err == 0) ||
8356 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8357 i40e_reenable_fdir_sb(pf);
8359 /* We should wait for even more space before re-enabling ATR.
8360 * Additionally, we cannot enable ATR as long as we still have TCP SB
8363 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8364 (pf->fd_tcp4_filter_cnt == 0))
8365 i40e_reenable_fdir_atr(pf);
8367 /* if hw had a problem adding a filter, delete it */
8368 if (pf->fd_inv > 0) {
8369 hlist_for_each_entry_safe(filter, node,
8370 &pf->fdir_filter_list, fdir_node)
8371 if (filter->fd_id == pf->fd_inv)
8372 i40e_delete_invalid_filter(pf, filter);
8376 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8377 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8379 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8380 * @pf: board private structure
8382 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8384 unsigned long min_flush_time;
8385 int flush_wait_retry = 50;
8386 bool disable_atr = false;
8390 if (!time_after(jiffies, pf->fd_flush_timestamp +
8391 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8394 /* If the flush is happening too quick and we have mostly SB rules we
8395 * should not re-enable ATR for some time.
8397 min_flush_time = pf->fd_flush_timestamp +
8398 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8399 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8401 if (!(time_after(jiffies, min_flush_time)) &&
8402 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8403 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8404 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8408 pf->fd_flush_timestamp = jiffies;
8409 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8410 /* flush all filters */
8411 wr32(&pf->hw, I40E_PFQF_CTL_1,
8412 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8413 i40e_flush(&pf->hw);
8417 /* Check FD flush status every 5-6msec */
8418 usleep_range(5000, 6000);
8419 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8420 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8422 } while (flush_wait_retry--);
8423 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8424 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8426 /* replay sideband filters */
8427 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8428 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8429 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8430 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8431 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8432 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8437 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8438 * @pf: board private structure
8440 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8442 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8445 /* We can see up to 256 filter programming desc in transit if the filters are
8446 * being applied really fast; before we see the first
8447 * filter miss error on Rx queue 0. Accumulating enough error messages before
8448 * reacting will make sure we don't cause flush too often.
8450 #define I40E_MAX_FD_PROGRAM_ERROR 256
8453 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8454 * @pf: board private structure
8456 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8459 /* if interface is down do nothing */
8460 if (test_bit(__I40E_DOWN, pf->state))
8463 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8464 i40e_fdir_flush_and_replay(pf);
8466 i40e_fdir_check_and_reenable(pf);
8471 * i40e_vsi_link_event - notify VSI of a link event
8472 * @vsi: vsi to be notified
8473 * @link_up: link up or down
8475 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8477 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8480 switch (vsi->type) {
8482 if (!vsi->netdev || !vsi->netdev_registered)
8486 netif_carrier_on(vsi->netdev);
8487 netif_tx_wake_all_queues(vsi->netdev);
8489 netif_carrier_off(vsi->netdev);
8490 netif_tx_stop_all_queues(vsi->netdev);
8494 case I40E_VSI_SRIOV:
8495 case I40E_VSI_VMDQ2:
8497 case I40E_VSI_IWARP:
8498 case I40E_VSI_MIRROR:
8500 /* there is no notification for other VSIs */
8506 * i40e_veb_link_event - notify elements on the veb of a link event
8507 * @veb: veb to be notified
8508 * @link_up: link up or down
8510 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8515 if (!veb || !veb->pf)
8519 /* depth first... */
8520 for (i = 0; i < I40E_MAX_VEB; i++)
8521 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8522 i40e_veb_link_event(pf->veb[i], link_up);
8524 /* ... now the local VSIs */
8525 for (i = 0; i < pf->num_alloc_vsi; i++)
8526 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8527 i40e_vsi_link_event(pf->vsi[i], link_up);
8531 * i40e_link_event - Update netif_carrier status
8532 * @pf: board private structure
8534 static void i40e_link_event(struct i40e_pf *pf)
8536 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8537 u8 new_link_speed, old_link_speed;
8539 bool new_link, old_link;
8541 /* set this to force the get_link_status call to refresh state */
8542 pf->hw.phy.get_link_info = true;
8543 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8544 status = i40e_get_link_status(&pf->hw, &new_link);
8546 /* On success, disable temp link polling */
8547 if (status == I40E_SUCCESS) {
8548 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8550 /* Enable link polling temporarily until i40e_get_link_status
8551 * returns I40E_SUCCESS
8553 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8554 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8559 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8560 new_link_speed = pf->hw.phy.link_info.link_speed;
8562 if (new_link == old_link &&
8563 new_link_speed == old_link_speed &&
8564 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
8565 new_link == netif_carrier_ok(vsi->netdev)))
8568 i40e_print_link_message(vsi, new_link);
8570 /* Notify the base of the switch tree connected to
8571 * the link. Floating VEBs are not notified.
8573 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
8574 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8576 i40e_vsi_link_event(vsi, new_link);
8579 i40e_vc_notify_link_state(pf);
8581 if (pf->flags & I40E_FLAG_PTP)
8582 i40e_ptp_set_increment(pf);
8586 * i40e_watchdog_subtask - periodic checks not using event driven response
8587 * @pf: board private structure
8589 static void i40e_watchdog_subtask(struct i40e_pf *pf)
8593 /* if interface is down do nothing */
8594 if (test_bit(__I40E_DOWN, pf->state) ||
8595 test_bit(__I40E_CONFIG_BUSY, pf->state))
8598 /* make sure we don't do these things too often */
8599 if (time_before(jiffies, (pf->service_timer_previous +
8600 pf->service_timer_period)))
8602 pf->service_timer_previous = jiffies;
8604 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
8605 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
8606 i40e_link_event(pf);
8608 /* Update the stats for active netdevs so the network stack
8609 * can look at updated numbers whenever it cares to
8611 for (i = 0; i < pf->num_alloc_vsi; i++)
8612 if (pf->vsi[i] && pf->vsi[i]->netdev)
8613 i40e_update_stats(pf->vsi[i]);
8615 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8616 /* Update the stats for the active switching components */
8617 for (i = 0; i < I40E_MAX_VEB; i++)
8619 i40e_update_veb_stats(pf->veb[i]);
8622 i40e_ptp_rx_hang(pf);
8623 i40e_ptp_tx_hang(pf);
8627 * i40e_reset_subtask - Set up for resetting the device and driver
8628 * @pf: board private structure
8630 static void i40e_reset_subtask(struct i40e_pf *pf)
8632 u32 reset_flags = 0;
8634 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
8635 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
8636 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
8638 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
8639 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
8640 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8642 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
8643 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
8644 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
8646 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
8647 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
8648 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
8650 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8651 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8652 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
8655 /* If there's a recovery already waiting, it takes
8656 * precedence before starting a new reset sequence.
8658 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
8659 i40e_prep_for_reset(pf, false);
8661 i40e_rebuild(pf, false, false);
8664 /* If we're already down or resetting, just bail */
8666 !test_bit(__I40E_DOWN, pf->state) &&
8667 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
8668 i40e_do_reset(pf, reset_flags, false);
8673 * i40e_handle_link_event - Handle link event
8674 * @pf: board private structure
8675 * @e: event info posted on ARQ
8677 static void i40e_handle_link_event(struct i40e_pf *pf,
8678 struct i40e_arq_event_info *e)
8680 struct i40e_aqc_get_link_status *status =
8681 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
8683 /* Do a new status request to re-enable LSE reporting
8684 * and load new status information into the hw struct
8685 * This completely ignores any state information
8686 * in the ARQ event info, instead choosing to always
8687 * issue the AQ update link status command.
8689 i40e_link_event(pf);
8691 /* Check if module meets thermal requirements */
8692 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
8693 dev_err(&pf->pdev->dev,
8694 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8695 dev_err(&pf->pdev->dev,
8696 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8698 /* check for unqualified module, if link is down, suppress
8699 * the message if link was forced to be down.
8701 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8702 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8703 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8704 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8705 dev_err(&pf->pdev->dev,
8706 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8707 dev_err(&pf->pdev->dev,
8708 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8714 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8715 * @pf: board private structure
8717 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8719 struct i40e_arq_event_info event;
8720 struct i40e_hw *hw = &pf->hw;
8727 /* Do not run clean AQ when PF reset fails */
8728 if (test_bit(__I40E_RESET_FAILED, pf->state))
8731 /* check for error indications */
8732 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8734 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
8735 if (hw->debug_mask & I40E_DEBUG_AQ)
8736 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
8737 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8739 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
8740 if (hw->debug_mask & I40E_DEBUG_AQ)
8741 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
8742 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
8743 pf->arq_overflows++;
8745 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
8746 if (hw->debug_mask & I40E_DEBUG_AQ)
8747 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
8748 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8751 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8753 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8755 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
8756 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8757 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
8758 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8760 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
8761 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8762 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
8763 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8765 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
8766 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8767 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
8768 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8771 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8773 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8774 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
8779 ret = i40e_clean_arq_element(hw, &event, &pending);
8780 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
8783 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8787 opcode = le16_to_cpu(event.desc.opcode);
8790 case i40e_aqc_opc_get_link_status:
8791 i40e_handle_link_event(pf, &event);
8793 case i40e_aqc_opc_send_msg_to_pf:
8794 ret = i40e_vc_process_vf_msg(pf,
8795 le16_to_cpu(event.desc.retval),
8796 le32_to_cpu(event.desc.cookie_high),
8797 le32_to_cpu(event.desc.cookie_low),
8801 case i40e_aqc_opc_lldp_update_mib:
8802 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
8803 #ifdef CONFIG_I40E_DCB
8805 ret = i40e_handle_lldp_event(pf, &event);
8807 #endif /* CONFIG_I40E_DCB */
8809 case i40e_aqc_opc_event_lan_overflow:
8810 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
8811 i40e_handle_lan_overflow_event(pf, &event);
8813 case i40e_aqc_opc_send_msg_to_peer:
8814 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8816 case i40e_aqc_opc_nvm_erase:
8817 case i40e_aqc_opc_nvm_update:
8818 case i40e_aqc_opc_oem_post_update:
8819 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8820 "ARQ NVM operation 0x%04x completed\n",
8824 dev_info(&pf->pdev->dev,
8825 "ARQ: Unknown event 0x%04x ignored\n",
8829 } while (i++ < pf->adminq_work_limit);
8831 if (i < pf->adminq_work_limit)
8832 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
8834 /* re-enable Admin queue interrupt cause */
8835 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8836 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8837 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8840 kfree(event.msg_buf);
8844 * i40e_verify_eeprom - make sure eeprom is good to use
8845 * @pf: board private structure
8847 static void i40e_verify_eeprom(struct i40e_pf *pf)
8851 err = i40e_diag_eeprom_test(&pf->hw);
8853 /* retry in case of garbage read */
8854 err = i40e_diag_eeprom_test(&pf->hw);
8856 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8858 set_bit(__I40E_BAD_EEPROM, pf->state);
8862 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
8863 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
8864 clear_bit(__I40E_BAD_EEPROM, pf->state);
8869 * i40e_enable_pf_switch_lb
8870 * @pf: pointer to the PF structure
8872 * enable switch loop back or die - no point in a return value
8874 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8876 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8877 struct i40e_vsi_context ctxt;
8880 ctxt.seid = pf->main_vsi_seid;
8881 ctxt.pf_num = pf->hw.pf_id;
8883 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8885 dev_info(&pf->pdev->dev,
8886 "couldn't get PF vsi config, err %s aq_err %s\n",
8887 i40e_stat_str(&pf->hw, ret),
8888 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8891 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8892 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8893 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8895 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8897 dev_info(&pf->pdev->dev,
8898 "update vsi switch failed, err %s aq_err %s\n",
8899 i40e_stat_str(&pf->hw, ret),
8900 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8905 * i40e_disable_pf_switch_lb
8906 * @pf: pointer to the PF structure
8908 * disable switch loop back or die - no point in a return value
8910 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8912 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8913 struct i40e_vsi_context ctxt;
8916 ctxt.seid = pf->main_vsi_seid;
8917 ctxt.pf_num = pf->hw.pf_id;
8919 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8921 dev_info(&pf->pdev->dev,
8922 "couldn't get PF vsi config, err %s aq_err %s\n",
8923 i40e_stat_str(&pf->hw, ret),
8924 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8927 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8928 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8929 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8931 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8933 dev_info(&pf->pdev->dev,
8934 "update vsi switch failed, err %s aq_err %s\n",
8935 i40e_stat_str(&pf->hw, ret),
8936 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8941 * i40e_config_bridge_mode - Configure the HW bridge mode
8942 * @veb: pointer to the bridge instance
8944 * Configure the loop back mode for the LAN VSI that is downlink to the
8945 * specified HW bridge instance. It is expected this function is called
8946 * when a new HW bridge is instantiated.
8948 static void i40e_config_bridge_mode(struct i40e_veb *veb)
8950 struct i40e_pf *pf = veb->pf;
8952 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
8953 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
8954 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8955 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
8956 i40e_disable_pf_switch_lb(pf);
8958 i40e_enable_pf_switch_lb(pf);
8962 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8963 * @veb: pointer to the VEB instance
8965 * This is a recursive function that first builds the attached VSIs then
8966 * recurses in to build the next layer of VEB. We track the connections
8967 * through our own index numbers because the seid's from the HW could
8968 * change across the reset.
8970 static int i40e_reconstitute_veb(struct i40e_veb *veb)
8972 struct i40e_vsi *ctl_vsi = NULL;
8973 struct i40e_pf *pf = veb->pf;
8977 /* build VSI that owns this VEB, temporarily attached to base VEB */
8978 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
8980 pf->vsi[v]->veb_idx == veb->idx &&
8981 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
8982 ctl_vsi = pf->vsi[v];
8987 dev_info(&pf->pdev->dev,
8988 "missing owner VSI for veb_idx %d\n", veb->idx);
8990 goto end_reconstitute;
8992 if (ctl_vsi != pf->vsi[pf->lan_vsi])
8993 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8994 ret = i40e_add_vsi(ctl_vsi);
8996 dev_info(&pf->pdev->dev,
8997 "rebuild of veb_idx %d owner VSI failed: %d\n",
8999 goto end_reconstitute;
9001 i40e_vsi_reset_stats(ctl_vsi);
9003 /* create the VEB in the switch and move the VSI onto the VEB */
9004 ret = i40e_add_veb(veb, ctl_vsi);
9006 goto end_reconstitute;
9008 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9009 veb->bridge_mode = BRIDGE_MODE_VEB;
9011 veb->bridge_mode = BRIDGE_MODE_VEPA;
9012 i40e_config_bridge_mode(veb);
9014 /* create the remaining VSIs attached to this VEB */
9015 for (v = 0; v < pf->num_alloc_vsi; v++) {
9016 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9019 if (pf->vsi[v]->veb_idx == veb->idx) {
9020 struct i40e_vsi *vsi = pf->vsi[v];
9022 vsi->uplink_seid = veb->seid;
9023 ret = i40e_add_vsi(vsi);
9025 dev_info(&pf->pdev->dev,
9026 "rebuild of vsi_idx %d failed: %d\n",
9028 goto end_reconstitute;
9030 i40e_vsi_reset_stats(vsi);
9034 /* create any VEBs attached to this VEB - RECURSION */
9035 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9036 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9037 pf->veb[veb_idx]->uplink_seid = veb->seid;
9038 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9049 * i40e_get_capabilities - get info about the HW
9050 * @pf: the PF struct
9052 static int i40e_get_capabilities(struct i40e_pf *pf,
9053 enum i40e_admin_queue_opc list_type)
9055 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9060 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9062 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9066 /* this loads the data into the hw struct for us */
9067 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9068 &data_size, list_type,
9070 /* data loaded, buffer no longer needed */
9073 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9074 /* retry with a larger buffer */
9075 buf_len = data_size;
9076 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9077 dev_info(&pf->pdev->dev,
9078 "capability discovery failed, err %s aq_err %s\n",
9079 i40e_stat_str(&pf->hw, err),
9080 i40e_aq_str(&pf->hw,
9081 pf->hw.aq.asq_last_status));
9086 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9087 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9088 dev_info(&pf->pdev->dev,
9089 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9090 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9091 pf->hw.func_caps.num_msix_vectors,
9092 pf->hw.func_caps.num_msix_vectors_vf,
9093 pf->hw.func_caps.fd_filters_guaranteed,
9094 pf->hw.func_caps.fd_filters_best_effort,
9095 pf->hw.func_caps.num_tx_qp,
9096 pf->hw.func_caps.num_vsis);
9097 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9098 dev_info(&pf->pdev->dev,
9099 "switch_mode=0x%04x, function_valid=0x%08x\n",
9100 pf->hw.dev_caps.switch_mode,
9101 pf->hw.dev_caps.valid_functions);
9102 dev_info(&pf->pdev->dev,
9103 "SR-IOV=%d, num_vfs for all function=%u\n",
9104 pf->hw.dev_caps.sr_iov_1_1,
9105 pf->hw.dev_caps.num_vfs);
9106 dev_info(&pf->pdev->dev,
9107 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9108 pf->hw.dev_caps.num_vsis,
9109 pf->hw.dev_caps.num_rx_qp,
9110 pf->hw.dev_caps.num_tx_qp);
9113 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9114 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9115 + pf->hw.func_caps.num_vfs)
9116 if (pf->hw.revision_id == 0 &&
9117 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9118 dev_info(&pf->pdev->dev,
9119 "got num_vsis %d, setting num_vsis to %d\n",
9120 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9121 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9127 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9130 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9131 * @pf: board private structure
9133 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9135 struct i40e_vsi *vsi;
9137 /* quick workaround for an NVM issue that leaves a critical register
9140 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9141 static const u32 hkey[] = {
9142 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9143 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9144 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9148 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9149 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9152 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9155 /* find existing VSI and see if it needs configuring */
9156 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9158 /* create a new VSI if none exists */
9160 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9161 pf->vsi[pf->lan_vsi]->seid, 0);
9163 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9164 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9165 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9170 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9174 * i40e_fdir_teardown - release the Flow Director resources
9175 * @pf: board private structure
9177 static void i40e_fdir_teardown(struct i40e_pf *pf)
9179 struct i40e_vsi *vsi;
9181 i40e_fdir_filter_exit(pf);
9182 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9184 i40e_vsi_release(vsi);
9188 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9190 * @seid: seid of main or channel VSIs
9192 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9193 * existed before reset
9195 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9197 struct i40e_cloud_filter *cfilter;
9198 struct i40e_pf *pf = vsi->back;
9199 struct hlist_node *node;
9202 /* Add cloud filters back if they exist */
9203 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9205 if (cfilter->seid != seid)
9208 if (cfilter->dst_port)
9209 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9212 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9215 dev_dbg(&pf->pdev->dev,
9216 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9217 i40e_stat_str(&pf->hw, ret),
9218 i40e_aq_str(&pf->hw,
9219 pf->hw.aq.asq_last_status));
9227 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9230 * Rebuilds channel VSIs if they existed before reset
9232 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9234 struct i40e_channel *ch, *ch_tmp;
9237 if (list_empty(&vsi->ch_list))
9240 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9241 if (!ch->initialized)
9243 /* Proceed with creation of channel (VMDq2) VSI */
9244 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9246 dev_info(&vsi->back->pdev->dev,
9247 "failed to rebuild channels using uplink_seid %u\n",
9251 /* Reconfigure TX queues using QTX_CTL register */
9252 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9254 dev_info(&vsi->back->pdev->dev,
9255 "failed to configure TX rings for channel %u\n",
9259 /* update 'next_base_queue' */
9260 vsi->next_base_queue = vsi->next_base_queue +
9261 ch->num_queue_pairs;
9262 if (ch->max_tx_rate) {
9263 u64 credits = ch->max_tx_rate;
9265 if (i40e_set_bw_limit(vsi, ch->seid,
9269 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9270 dev_dbg(&vsi->back->pdev->dev,
9271 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9276 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9278 dev_dbg(&vsi->back->pdev->dev,
9279 "Failed to rebuild cloud filters for channel VSI %u\n",
9288 * i40e_prep_for_reset - prep for the core to reset
9289 * @pf: board private structure
9290 * @lock_acquired: indicates whether or not the lock has been acquired
9291 * before this function was called.
9293 * Close up the VFs and other things in prep for PF Reset.
9295 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9297 struct i40e_hw *hw = &pf->hw;
9298 i40e_status ret = 0;
9301 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9302 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9304 if (i40e_check_asq_alive(&pf->hw))
9305 i40e_vc_notify_reset(pf);
9307 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9309 /* quiesce the VSIs and their queues that are not already DOWN */
9310 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9313 i40e_pf_quiesce_all_vsi(pf);
9317 for (v = 0; v < pf->num_alloc_vsi; v++) {
9319 pf->vsi[v]->seid = 0;
9322 i40e_shutdown_adminq(&pf->hw);
9324 /* call shutdown HMC */
9325 if (hw->hmc.hmc_obj) {
9326 ret = i40e_shutdown_lan_hmc(hw);
9328 dev_warn(&pf->pdev->dev,
9329 "shutdown_lan_hmc failed: %d\n", ret);
9332 /* Save the current PTP time so that we can restore the time after the
9335 i40e_ptp_save_hw_time(pf);
9339 * i40e_send_version - update firmware with driver version
9342 static void i40e_send_version(struct i40e_pf *pf)
9344 struct i40e_driver_version dv;
9346 dv.major_version = DRV_VERSION_MAJOR;
9347 dv.minor_version = DRV_VERSION_MINOR;
9348 dv.build_version = DRV_VERSION_BUILD;
9349 dv.subbuild_version = 0;
9350 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9351 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9355 * i40e_get_oem_version - get OEM specific version information
9356 * @hw: pointer to the hardware structure
9358 static void i40e_get_oem_version(struct i40e_hw *hw)
9360 u16 block_offset = 0xffff;
9361 u16 block_length = 0;
9362 u16 capabilities = 0;
9366 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9367 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9368 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9369 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9370 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9371 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9372 #define I40E_NVM_OEM_LENGTH 3
9374 /* Check if pointer to OEM version block is valid. */
9375 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9376 if (block_offset == 0xffff)
9379 /* Check if OEM version block has correct length. */
9380 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9382 if (block_length < I40E_NVM_OEM_LENGTH)
9385 /* Check if OEM version format is as expected. */
9386 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9388 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9391 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9393 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9395 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9396 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9400 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9401 * @pf: board private structure
9403 static int i40e_reset(struct i40e_pf *pf)
9405 struct i40e_hw *hw = &pf->hw;
9408 ret = i40e_pf_reset(hw);
9410 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9411 set_bit(__I40E_RESET_FAILED, pf->state);
9412 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9420 * i40e_rebuild - rebuild using a saved config
9421 * @pf: board private structure
9422 * @reinit: if the Main VSI needs to re-initialized.
9423 * @lock_acquired: indicates whether or not the lock has been acquired
9424 * before this function was called.
9426 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9428 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
9429 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9430 struct i40e_hw *hw = &pf->hw;
9431 u8 set_fc_aq_fail = 0;
9436 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9437 i40e_check_recovery_mode(pf)) {
9438 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
9441 if (test_bit(__I40E_DOWN, pf->state) &&
9442 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
9443 !old_recovery_mode_bit)
9444 goto clear_recovery;
9445 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9447 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9448 ret = i40e_init_adminq(&pf->hw);
9450 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9451 i40e_stat_str(&pf->hw, ret),
9452 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9453 goto clear_recovery;
9455 i40e_get_oem_version(&pf->hw);
9457 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9458 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9459 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9460 /* The following delay is necessary for 4.33 firmware and older
9461 * to recover after EMP reset. 200 ms should suffice but we
9462 * put here 300 ms to be sure that FW is ready to operate
9468 /* re-verify the eeprom if we just had an EMP reset */
9469 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9470 i40e_verify_eeprom(pf);
9472 /* if we are going out of or into recovery mode we have to act
9473 * accordingly with regard to resources initialization
9474 * and deinitialization
9476 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
9477 old_recovery_mode_bit) {
9478 if (i40e_get_capabilities(pf,
9479 i40e_aqc_opc_list_func_capabilities))
9482 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
9483 /* we're staying in recovery mode so we'll reinitialize
9486 if (i40e_setup_misc_vector_for_recovery_mode(pf))
9491 /* we're going out of recovery mode so we'll free
9492 * the IRQ allocated specifically for recovery mode
9493 * and restore the interrupt scheme
9495 free_irq(pf->pdev->irq, pf);
9496 i40e_clear_interrupt_scheme(pf);
9497 if (i40e_restore_interrupt_scheme(pf))
9501 /* tell the firmware that we're starting */
9502 i40e_send_version(pf);
9504 /* bail out in case recovery mode was detected, as there is
9505 * no need for further configuration.
9510 i40e_clear_pxe_mode(hw);
9511 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
9513 goto end_core_reset;
9515 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9516 hw->func_caps.num_rx_qp, 0, 0);
9518 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9519 goto end_core_reset;
9521 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9523 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9524 goto end_core_reset;
9527 /* Enable FW to write a default DCB config on link-up */
9528 i40e_aq_set_dcb_parameters(hw, true, NULL);
9530 #ifdef CONFIG_I40E_DCB
9531 ret = i40e_init_pf_dcb(pf);
9533 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9534 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9535 /* Continue without DCB enabled */
9537 #endif /* CONFIG_I40E_DCB */
9538 /* do basic switch setup */
9541 ret = i40e_setup_pf_switch(pf, reinit);
9545 /* The driver only wants link up/down and module qualification
9546 * reports from firmware. Note the negative logic.
9548 ret = i40e_aq_set_phy_int_mask(&pf->hw,
9549 ~(I40E_AQ_EVENT_LINK_UPDOWN |
9550 I40E_AQ_EVENT_MEDIA_NA |
9551 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
9553 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9554 i40e_stat_str(&pf->hw, ret),
9555 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9557 /* make sure our flow control settings are restored */
9558 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
9560 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
9561 i40e_stat_str(&pf->hw, ret),
9562 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9564 /* Rebuild the VSIs and VEBs that existed before reset.
9565 * They are still in our local switch element arrays, so only
9566 * need to rebuild the switch model in the HW.
9568 * If there were VEBs but the reconstitution failed, we'll try
9569 * try to recover minimal use by getting the basic PF VSI working.
9571 if (vsi->uplink_seid != pf->mac_seid) {
9572 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
9573 /* find the one VEB connected to the MAC, and find orphans */
9574 for (v = 0; v < I40E_MAX_VEB; v++) {
9578 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9579 pf->veb[v]->uplink_seid == 0) {
9580 ret = i40e_reconstitute_veb(pf->veb[v]);
9585 /* If Main VEB failed, we're in deep doodoo,
9586 * so give up rebuilding the switch and set up
9587 * for minimal rebuild of PF VSI.
9588 * If orphan failed, we'll report the error
9589 * but try to keep going.
9591 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9592 dev_info(&pf->pdev->dev,
9593 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9595 vsi->uplink_seid = pf->mac_seid;
9597 } else if (pf->veb[v]->uplink_seid == 0) {
9598 dev_info(&pf->pdev->dev,
9599 "rebuild of orphan VEB failed: %d\n",
9606 if (vsi->uplink_seid == pf->mac_seid) {
9607 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
9608 /* no VEB, so rebuild only the Main VSI */
9609 ret = i40e_add_vsi(vsi);
9611 dev_info(&pf->pdev->dev,
9612 "rebuild of Main VSI failed: %d\n", ret);
9617 if (vsi->mqprio_qopt.max_rate[0]) {
9618 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9621 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
9622 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
9626 credits = max_tx_rate;
9627 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9628 dev_dbg(&vsi->back->pdev->dev,
9629 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9635 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9639 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9640 * for this main VSI if they exist
9642 ret = i40e_rebuild_channels(vsi);
9646 /* Reconfigure hardware for allowing smaller MSS in the case
9647 * of TSO, so that we avoid the MDD being fired and causing
9648 * a reset in the case of small MSS+TSO.
9650 #define I40E_REG_MSS 0x000E64DC
9651 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9652 #define I40E_64BYTE_MSS 0x400000
9653 val = rd32(hw, I40E_REG_MSS);
9654 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9655 val &= ~I40E_REG_MSS_MIN_MASK;
9656 val |= I40E_64BYTE_MSS;
9657 wr32(hw, I40E_REG_MSS, val);
9660 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
9662 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9664 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9665 i40e_stat_str(&pf->hw, ret),
9666 i40e_aq_str(&pf->hw,
9667 pf->hw.aq.asq_last_status));
9669 /* reinit the misc interrupt */
9670 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9671 ret = i40e_setup_misc_vector(pf);
9673 /* Add a filter to drop all Flow control frames from any VSI from being
9674 * transmitted. By doing so we stop a malicious VF from sending out
9675 * PAUSE or PFC frames and potentially controlling traffic for other
9677 * The FW can still send Flow control frames if enabled.
9679 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9682 /* restart the VSIs that were rebuilt and running before the reset */
9683 i40e_pf_unquiesce_all_vsi(pf);
9685 /* Release the RTNL lock before we start resetting VFs */
9689 /* Restore promiscuous settings */
9690 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9692 dev_warn(&pf->pdev->dev,
9693 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9694 pf->cur_promisc ? "on" : "off",
9695 i40e_stat_str(&pf->hw, ret),
9696 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9698 i40e_reset_all_vfs(pf, true);
9700 /* tell the firmware that we're starting */
9701 i40e_send_version(pf);
9703 /* We've already released the lock, so don't do it again */
9704 goto end_core_reset;
9710 clear_bit(__I40E_RESET_FAILED, pf->state);
9712 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9713 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
9717 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9718 * @pf: board private structure
9719 * @reinit: if the Main VSI needs to re-initialized.
9720 * @lock_acquired: indicates whether or not the lock has been acquired
9721 * before this function was called.
9723 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9727 /* Now we wait for GRST to settle out.
9728 * We don't have to delete the VEBs or VSIs from the hw switch
9729 * because the reset will make them disappear.
9731 ret = i40e_reset(pf);
9733 i40e_rebuild(pf, reinit, lock_acquired);
9737 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9738 * @pf: board private structure
9740 * Close up the VFs and other things in prep for a Core Reset,
9741 * then get ready to rebuild the world.
9742 * @lock_acquired: indicates whether or not the lock has been acquired
9743 * before this function was called.
9745 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
9747 i40e_prep_for_reset(pf, lock_acquired);
9748 i40e_reset_and_rebuild(pf, false, lock_acquired);
9752 * i40e_handle_mdd_event
9753 * @pf: pointer to the PF structure
9755 * Called from the MDD irq handler to identify possibly malicious vfs
9757 static void i40e_handle_mdd_event(struct i40e_pf *pf)
9759 struct i40e_hw *hw = &pf->hw;
9760 bool mdd_detected = false;
9765 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
9768 /* find what triggered the MDD event */
9769 reg = rd32(hw, I40E_GL_MDET_TX);
9770 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
9771 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9772 I40E_GL_MDET_TX_PF_NUM_SHIFT;
9773 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
9774 I40E_GL_MDET_TX_VF_NUM_SHIFT;
9775 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
9776 I40E_GL_MDET_TX_EVENT_SHIFT;
9777 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9778 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9779 pf->hw.func_caps.base_queue;
9780 if (netif_msg_tx_err(pf))
9781 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9782 event, queue, pf_num, vf_num);
9783 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9784 mdd_detected = true;
9786 reg = rd32(hw, I40E_GL_MDET_RX);
9787 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
9788 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9789 I40E_GL_MDET_RX_FUNCTION_SHIFT;
9790 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
9791 I40E_GL_MDET_RX_EVENT_SHIFT;
9792 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9793 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9794 pf->hw.func_caps.base_queue;
9795 if (netif_msg_rx_err(pf))
9796 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9797 event, queue, func);
9798 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9799 mdd_detected = true;
9803 reg = rd32(hw, I40E_PF_MDET_TX);
9804 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9805 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9806 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
9808 reg = rd32(hw, I40E_PF_MDET_RX);
9809 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9810 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9811 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
9815 /* see if one of the VFs needs its hand slapped */
9816 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9818 reg = rd32(hw, I40E_VP_MDET_TX(i));
9819 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9820 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9821 vf->num_mdd_events++;
9822 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9824 dev_info(&pf->pdev->dev,
9825 "Use PF Control I/F to re-enable the VF\n");
9826 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9829 reg = rd32(hw, I40E_VP_MDET_RX(i));
9830 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9831 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9832 vf->num_mdd_events++;
9833 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9835 dev_info(&pf->pdev->dev,
9836 "Use PF Control I/F to re-enable the VF\n");
9837 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9841 /* re-enable mdd interrupt cause */
9842 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
9843 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9844 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9845 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9849 static const char *i40e_tunnel_name(u8 type)
9852 case UDP_TUNNEL_TYPE_VXLAN:
9854 case UDP_TUNNEL_TYPE_GENEVE:
9862 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9863 * @pf: board private structure
9865 static void i40e_sync_udp_filters(struct i40e_pf *pf)
9869 /* loop through and set pending bit for all active UDP filters */
9870 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9871 if (pf->udp_ports[i].port)
9872 pf->pending_udp_bitmap |= BIT_ULL(i);
9875 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
9879 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9880 * @pf: board private structure
9882 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9884 struct i40e_hw *hw = &pf->hw;
9885 u8 filter_index, type;
9889 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
9892 /* acquire RTNL to maintain state of flags and port requests */
9895 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9896 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9897 struct i40e_udp_port_config *udp_port;
9898 i40e_status ret = 0;
9900 udp_port = &pf->udp_ports[i];
9901 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9903 port = READ_ONCE(udp_port->port);
9904 type = READ_ONCE(udp_port->type);
9905 filter_index = READ_ONCE(udp_port->filter_index);
9907 /* release RTNL while we wait on AQ command */
9911 ret = i40e_aq_add_udp_tunnel(hw, port,
9915 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9916 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9919 /* reacquire RTNL so we can update filter_index */
9923 dev_info(&pf->pdev->dev,
9924 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9925 i40e_tunnel_name(type),
9926 port ? "add" : "delete",
9929 i40e_stat_str(&pf->hw, ret),
9930 i40e_aq_str(&pf->hw,
9931 pf->hw.aq.asq_last_status));
9933 /* failed to add, just reset port,
9934 * drop pending bit for any deletion
9937 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9940 /* record filter index on success */
9941 udp_port->filter_index = filter_index;
9950 * i40e_service_task - Run the driver's async subtasks
9951 * @work: pointer to work_struct containing our data
9953 static void i40e_service_task(struct work_struct *work)
9955 struct i40e_pf *pf = container_of(work,
9958 unsigned long start_time = jiffies;
9960 /* don't bother with service tasks if a reset is in progress */
9961 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
9962 test_bit(__I40E_SUSPENDED, pf->state))
9965 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
9968 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
9969 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
9970 i40e_sync_filters_subtask(pf);
9971 i40e_reset_subtask(pf);
9972 i40e_handle_mdd_event(pf);
9973 i40e_vc_process_vflr_event(pf);
9974 i40e_watchdog_subtask(pf);
9975 i40e_fdir_reinit_subtask(pf);
9976 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
9977 /* Client subtask will reopen next time through. */
9978 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
9981 i40e_client_subtask(pf);
9982 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
9984 i40e_notify_client_of_l2_param_changes(
9985 pf->vsi[pf->lan_vsi]);
9987 i40e_sync_filters_subtask(pf);
9988 i40e_sync_udp_filters_subtask(pf);
9990 i40e_reset_subtask(pf);
9993 i40e_clean_adminq_subtask(pf);
9995 /* flush memory to make sure state is correct before next watchdog */
9996 smp_mb__before_atomic();
9997 clear_bit(__I40E_SERVICE_SCHED, pf->state);
9999 /* If the tasks have taken longer than one timer cycle or there
10000 * is more work to be done, reschedule the service task now
10001 * rather than wait for the timer to tick again.
10003 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10004 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10005 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10006 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10007 i40e_service_event_schedule(pf);
10011 * i40e_service_timer - timer callback
10012 * @data: pointer to PF struct
10014 static void i40e_service_timer(struct timer_list *t)
10016 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10018 mod_timer(&pf->service_timer,
10019 round_jiffies(jiffies + pf->service_timer_period));
10020 i40e_service_event_schedule(pf);
10024 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
10025 * @vsi: the VSI being configured
10027 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10029 struct i40e_pf *pf = vsi->back;
10031 switch (vsi->type) {
10032 case I40E_VSI_MAIN:
10033 vsi->alloc_queue_pairs = pf->num_lan_qps;
10034 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10035 I40E_REQ_DESCRIPTOR_MULTIPLE);
10036 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10037 vsi->num_q_vectors = pf->num_lan_msix;
10039 vsi->num_q_vectors = 1;
10043 case I40E_VSI_FDIR:
10044 vsi->alloc_queue_pairs = 1;
10045 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
10046 I40E_REQ_DESCRIPTOR_MULTIPLE);
10047 vsi->num_q_vectors = pf->num_fdsb_msix;
10050 case I40E_VSI_VMDQ2:
10051 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10052 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10053 I40E_REQ_DESCRIPTOR_MULTIPLE);
10054 vsi->num_q_vectors = pf->num_vmdq_msix;
10057 case I40E_VSI_SRIOV:
10058 vsi->alloc_queue_pairs = pf->num_vf_qps;
10059 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10060 I40E_REQ_DESCRIPTOR_MULTIPLE);
10072 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
10073 * @vsi: VSI pointer
10074 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
10076 * On error: returns error code (negative)
10077 * On success: returns 0
10079 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10081 struct i40e_ring **next_rings;
10085 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10086 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10087 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10088 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10089 if (!vsi->tx_rings)
10091 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10092 if (i40e_enabled_xdp_vsi(vsi)) {
10093 vsi->xdp_rings = next_rings;
10094 next_rings += vsi->alloc_queue_pairs;
10096 vsi->rx_rings = next_rings;
10098 if (alloc_qvectors) {
10099 /* allocate memory for q_vector pointers */
10100 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10101 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10102 if (!vsi->q_vectors) {
10110 kfree(vsi->tx_rings);
10115 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10116 * @pf: board private structure
10117 * @type: type of VSI
10119 * On error: returns error code (negative)
10120 * On success: returns vsi index in PF (positive)
10122 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10125 struct i40e_vsi *vsi;
10129 /* Need to protect the allocation of the VSIs at the PF level */
10130 mutex_lock(&pf->switch_mutex);
10132 /* VSI list may be fragmented if VSI creation/destruction has
10133 * been happening. We can afford to do a quick scan to look
10134 * for any free VSIs in the list.
10136 * find next empty vsi slot, looping back around if necessary
10139 while (i < pf->num_alloc_vsi && pf->vsi[i])
10141 if (i >= pf->num_alloc_vsi) {
10143 while (i < pf->next_vsi && pf->vsi[i])
10147 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10148 vsi_idx = i; /* Found one! */
10151 goto unlock_pf; /* out of VSI slots! */
10153 pf->next_vsi = ++i;
10155 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10162 set_bit(__I40E_VSI_DOWN, vsi->state);
10164 vsi->idx = vsi_idx;
10165 vsi->int_rate_limit = 0;
10166 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10167 pf->rss_table_size : 64;
10168 vsi->netdev_registered = false;
10169 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10170 hash_init(vsi->mac_filter_hash);
10171 vsi->irqs_ready = false;
10173 if (type == I40E_VSI_MAIN) {
10174 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10175 if (!vsi->af_xdp_zc_qps)
10179 ret = i40e_set_num_rings_in_vsi(vsi);
10183 ret = i40e_vsi_alloc_arrays(vsi, true);
10187 /* Setup default MSIX irq handler for VSI */
10188 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10190 /* Initialize VSI lock */
10191 spin_lock_init(&vsi->mac_filter_hash_lock);
10192 pf->vsi[vsi_idx] = vsi;
10197 bitmap_free(vsi->af_xdp_zc_qps);
10198 pf->next_vsi = i - 1;
10201 mutex_unlock(&pf->switch_mutex);
10206 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10207 * @vsi: VSI pointer
10208 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10210 * On error: returns error code (negative)
10211 * On success: returns 0
10213 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10215 /* free the ring and vector containers */
10216 if (free_qvectors) {
10217 kfree(vsi->q_vectors);
10218 vsi->q_vectors = NULL;
10220 kfree(vsi->tx_rings);
10221 vsi->tx_rings = NULL;
10222 vsi->rx_rings = NULL;
10223 vsi->xdp_rings = NULL;
10227 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10229 * @vsi: Pointer to VSI structure
10231 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10236 kfree(vsi->rss_hkey_user);
10237 vsi->rss_hkey_user = NULL;
10239 kfree(vsi->rss_lut_user);
10240 vsi->rss_lut_user = NULL;
10244 * i40e_vsi_clear - Deallocate the VSI provided
10245 * @vsi: the VSI being un-configured
10247 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10249 struct i40e_pf *pf;
10258 mutex_lock(&pf->switch_mutex);
10259 if (!pf->vsi[vsi->idx]) {
10260 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10261 vsi->idx, vsi->idx, vsi->type);
10265 if (pf->vsi[vsi->idx] != vsi) {
10266 dev_err(&pf->pdev->dev,
10267 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10268 pf->vsi[vsi->idx]->idx,
10269 pf->vsi[vsi->idx]->type,
10270 vsi->idx, vsi->type);
10274 /* updates the PF for this cleared vsi */
10275 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10276 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10278 bitmap_free(vsi->af_xdp_zc_qps);
10279 i40e_vsi_free_arrays(vsi, true);
10280 i40e_clear_rss_config_user(vsi);
10282 pf->vsi[vsi->idx] = NULL;
10283 if (vsi->idx < pf->next_vsi)
10284 pf->next_vsi = vsi->idx;
10287 mutex_unlock(&pf->switch_mutex);
10295 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10296 * @vsi: the VSI being cleaned
10298 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10302 if (vsi->tx_rings && vsi->tx_rings[0]) {
10303 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10304 kfree_rcu(vsi->tx_rings[i], rcu);
10305 vsi->tx_rings[i] = NULL;
10306 vsi->rx_rings[i] = NULL;
10307 if (vsi->xdp_rings)
10308 vsi->xdp_rings[i] = NULL;
10314 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10315 * @vsi: the VSI being configured
10317 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10319 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10320 struct i40e_pf *pf = vsi->back;
10321 struct i40e_ring *ring;
10323 /* Set basic values in the rings to be used later during open() */
10324 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10325 /* allocate space for both Tx and Rx in one shot */
10326 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10330 ring->queue_index = i;
10331 ring->reg_idx = vsi->base_queue + i;
10332 ring->ring_active = false;
10334 ring->netdev = vsi->netdev;
10335 ring->dev = &pf->pdev->dev;
10336 ring->count = vsi->num_desc;
10339 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10340 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10341 ring->itr_setting = pf->tx_itr_default;
10342 vsi->tx_rings[i] = ring++;
10344 if (!i40e_enabled_xdp_vsi(vsi))
10347 ring->queue_index = vsi->alloc_queue_pairs + i;
10348 ring->reg_idx = vsi->base_queue + ring->queue_index;
10349 ring->ring_active = false;
10351 ring->netdev = NULL;
10352 ring->dev = &pf->pdev->dev;
10353 ring->count = vsi->num_desc;
10356 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10357 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10358 set_ring_xdp(ring);
10359 ring->itr_setting = pf->tx_itr_default;
10360 vsi->xdp_rings[i] = ring++;
10363 ring->queue_index = i;
10364 ring->reg_idx = vsi->base_queue + i;
10365 ring->ring_active = false;
10367 ring->netdev = vsi->netdev;
10368 ring->dev = &pf->pdev->dev;
10369 ring->count = vsi->num_desc;
10372 ring->itr_setting = pf->rx_itr_default;
10373 vsi->rx_rings[i] = ring;
10379 i40e_vsi_clear_rings(vsi);
10384 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10385 * @pf: board private structure
10386 * @vectors: the number of MSI-X vectors to request
10388 * Returns the number of vectors reserved, or error
10390 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10392 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10393 I40E_MIN_MSIX, vectors);
10395 dev_info(&pf->pdev->dev,
10396 "MSI-X vector reservation failed: %d\n", vectors);
10404 * i40e_init_msix - Setup the MSIX capability
10405 * @pf: board private structure
10407 * Work with the OS to set up the MSIX vectors needed.
10409 * Returns the number of vectors reserved or negative on failure
10411 static int i40e_init_msix(struct i40e_pf *pf)
10413 struct i40e_hw *hw = &pf->hw;
10414 int cpus, extra_vectors;
10418 int iwarp_requested = 0;
10420 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10423 /* The number of vectors we'll request will be comprised of:
10424 * - Add 1 for "other" cause for Admin Queue events, etc.
10425 * - The number of LAN queue pairs
10426 * - Queues being used for RSS.
10427 * We don't need as many as max_rss_size vectors.
10428 * use rss_size instead in the calculation since that
10429 * is governed by number of cpus in the system.
10430 * - assumes symmetric Tx/Rx pairing
10431 * - The number of VMDq pairs
10432 * - The CPU count within the NUMA node if iWARP is enabled
10433 * Once we count this up, try the request.
10435 * If we can't get what we want, we'll simplify to nearly nothing
10436 * and try again. If that still fails, we punt.
10438 vectors_left = hw->func_caps.num_msix_vectors;
10441 /* reserve one vector for miscellaneous handler */
10442 if (vectors_left) {
10447 /* reserve some vectors for the main PF traffic queues. Initially we
10448 * only reserve at most 50% of the available vectors, in the case that
10449 * the number of online CPUs is large. This ensures that we can enable
10450 * extra features as well. Once we've enabled the other features, we
10451 * will use any remaining vectors to reach as close as we can to the
10452 * number of online CPUs.
10454 cpus = num_online_cpus();
10455 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10456 vectors_left -= pf->num_lan_msix;
10458 /* reserve one vector for sideband flow director */
10459 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10460 if (vectors_left) {
10461 pf->num_fdsb_msix = 1;
10465 pf->num_fdsb_msix = 0;
10469 /* can we reserve enough for iWARP? */
10470 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10471 iwarp_requested = pf->num_iwarp_msix;
10474 pf->num_iwarp_msix = 0;
10475 else if (vectors_left < pf->num_iwarp_msix)
10476 pf->num_iwarp_msix = 1;
10477 v_budget += pf->num_iwarp_msix;
10478 vectors_left -= pf->num_iwarp_msix;
10481 /* any vectors left over go for VMDq support */
10482 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10483 if (!vectors_left) {
10484 pf->num_vmdq_msix = 0;
10485 pf->num_vmdq_qps = 0;
10487 int vmdq_vecs_wanted =
10488 pf->num_vmdq_vsis * pf->num_vmdq_qps;
10490 min_t(int, vectors_left, vmdq_vecs_wanted);
10492 /* if we're short on vectors for what's desired, we limit
10493 * the queues per vmdq. If this is still more than are
10494 * available, the user will need to change the number of
10495 * queues/vectors used by the PF later with the ethtool
10498 if (vectors_left < vmdq_vecs_wanted) {
10499 pf->num_vmdq_qps = 1;
10500 vmdq_vecs_wanted = pf->num_vmdq_vsis;
10501 vmdq_vecs = min_t(int,
10505 pf->num_vmdq_msix = pf->num_vmdq_qps;
10507 v_budget += vmdq_vecs;
10508 vectors_left -= vmdq_vecs;
10512 /* On systems with a large number of SMP cores, we previously limited
10513 * the number of vectors for num_lan_msix to be at most 50% of the
10514 * available vectors, to allow for other features. Now, we add back
10515 * the remaining vectors. However, we ensure that the total
10516 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10517 * calculate the number of vectors we can add without going over the
10518 * cap of CPUs. For systems with a small number of CPUs this will be
10521 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10522 pf->num_lan_msix += extra_vectors;
10523 vectors_left -= extra_vectors;
10525 WARN(vectors_left < 0,
10526 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10528 v_budget += pf->num_lan_msix;
10529 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10531 if (!pf->msix_entries)
10534 for (i = 0; i < v_budget; i++)
10535 pf->msix_entries[i].entry = i;
10536 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
10538 if (v_actual < I40E_MIN_MSIX) {
10539 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10540 kfree(pf->msix_entries);
10541 pf->msix_entries = NULL;
10542 pci_disable_msix(pf->pdev);
10545 } else if (v_actual == I40E_MIN_MSIX) {
10546 /* Adjust for minimal MSIX use */
10547 pf->num_vmdq_vsis = 0;
10548 pf->num_vmdq_qps = 0;
10549 pf->num_lan_qps = 1;
10550 pf->num_lan_msix = 1;
10552 } else if (v_actual != v_budget) {
10553 /* If we have limited resources, we will start with no vectors
10554 * for the special features and then allocate vectors to some
10555 * of these features based on the policy and at the end disable
10556 * the features that did not get any vectors.
10560 dev_info(&pf->pdev->dev,
10561 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10562 v_actual, v_budget);
10563 /* reserve the misc vector */
10564 vec = v_actual - 1;
10566 /* Scale vector usage down */
10567 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
10568 pf->num_vmdq_vsis = 1;
10569 pf->num_vmdq_qps = 1;
10571 /* partition out the remaining vectors */
10574 pf->num_lan_msix = 1;
10577 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10578 pf->num_lan_msix = 1;
10579 pf->num_iwarp_msix = 1;
10581 pf->num_lan_msix = 2;
10585 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10586 pf->num_iwarp_msix = min_t(int, (vec / 3),
10588 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10589 I40E_DEFAULT_NUM_VMDQ_VSI);
10591 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10592 I40E_DEFAULT_NUM_VMDQ_VSI);
10594 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10595 pf->num_fdsb_msix = 1;
10598 pf->num_lan_msix = min_t(int,
10599 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10601 pf->num_lan_qps = pf->num_lan_msix;
10606 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10607 (pf->num_fdsb_msix == 0)) {
10608 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10609 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10610 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10612 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10613 (pf->num_vmdq_msix == 0)) {
10614 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10615 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10618 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10619 (pf->num_iwarp_msix == 0)) {
10620 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10621 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10623 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10624 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10626 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10628 pf->num_iwarp_msix);
10634 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10635 * @vsi: the VSI being configured
10636 * @v_idx: index of the vector in the vsi struct
10637 * @cpu: cpu to be used on affinity_mask
10639 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10641 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
10643 struct i40e_q_vector *q_vector;
10645 /* allocate q_vector */
10646 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10650 q_vector->vsi = vsi;
10651 q_vector->v_idx = v_idx;
10652 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
10655 netif_napi_add(vsi->netdev, &q_vector->napi,
10656 i40e_napi_poll, NAPI_POLL_WEIGHT);
10658 /* tie q_vector and vsi together */
10659 vsi->q_vectors[v_idx] = q_vector;
10665 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10666 * @vsi: the VSI being configured
10668 * We allocate one q_vector per queue interrupt. If allocation fails we
10671 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
10673 struct i40e_pf *pf = vsi->back;
10674 int err, v_idx, num_q_vectors, current_cpu;
10676 /* if not MSIX, give the one vector only to the LAN VSI */
10677 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10678 num_q_vectors = vsi->num_q_vectors;
10679 else if (vsi == pf->vsi[pf->lan_vsi])
10684 current_cpu = cpumask_first(cpu_online_mask);
10686 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10687 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
10690 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10691 if (unlikely(current_cpu >= nr_cpu_ids))
10692 current_cpu = cpumask_first(cpu_online_mask);
10699 i40e_free_q_vector(vsi, v_idx);
10705 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10706 * @pf: board private structure to initialize
10708 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10713 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10714 vectors = i40e_init_msix(pf);
10716 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
10717 I40E_FLAG_IWARP_ENABLED |
10718 I40E_FLAG_RSS_ENABLED |
10719 I40E_FLAG_DCB_CAPABLE |
10720 I40E_FLAG_DCB_ENABLED |
10721 I40E_FLAG_SRIOV_ENABLED |
10722 I40E_FLAG_FD_SB_ENABLED |
10723 I40E_FLAG_FD_ATR_ENABLED |
10724 I40E_FLAG_VMDQ_ENABLED);
10725 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10727 /* rework the queue expectations without MSIX */
10728 i40e_determine_queue_usage(pf);
10732 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10733 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
10734 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
10735 vectors = pci_enable_msi(pf->pdev);
10737 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10739 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10741 vectors = 1; /* one MSI or Legacy vector */
10744 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
10745 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10747 /* set up vector assignment tracking */
10748 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10749 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10753 pf->irq_pile->num_entries = vectors;
10754 pf->irq_pile->search_hint = 0;
10756 /* track first vector for misc interrupts, ignore return */
10757 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
10763 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10764 * @pf: private board data structure
10766 * Restore the interrupt scheme that was cleared when we suspended the
10767 * device. This should be called during resume to re-allocate the q_vectors
10768 * and reacquire IRQs.
10770 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10774 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10775 * scheme. We need to re-enabled them here in order to attempt to
10776 * re-acquire the MSI or MSI-X vectors
10778 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10780 err = i40e_init_interrupt_scheme(pf);
10784 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10785 * rings together again.
10787 for (i = 0; i < pf->num_alloc_vsi; i++) {
10789 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10792 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10796 err = i40e_setup_misc_vector(pf);
10800 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
10801 i40e_client_update_msix_info(pf);
10808 i40e_vsi_free_q_vectors(pf->vsi[i]);
10815 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
10816 * non queue events in recovery mode
10817 * @pf: board private structure
10819 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
10820 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
10821 * This is handled differently than in recovery mode since no Tx/Rx resources
10822 * are being allocated.
10824 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
10828 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10829 err = i40e_setup_misc_vector(pf);
10832 dev_info(&pf->pdev->dev,
10833 "MSI-X misc vector request failed, error %d\n",
10838 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
10840 err = request_irq(pf->pdev->irq, i40e_intr, flags,
10844 dev_info(&pf->pdev->dev,
10845 "MSI/legacy misc vector request failed, error %d\n",
10849 i40e_enable_misc_int_causes(pf);
10850 i40e_irq_dynamic_enable_icr0(pf);
10857 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10858 * @pf: board private structure
10860 * This sets up the handler for MSIX 0, which is used to manage the
10861 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10862 * when in MSI or Legacy interrupt mode.
10864 static int i40e_setup_misc_vector(struct i40e_pf *pf)
10866 struct i40e_hw *hw = &pf->hw;
10869 /* Only request the IRQ once, the first time through. */
10870 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
10871 err = request_irq(pf->msix_entries[0].vector,
10872 i40e_intr, 0, pf->int_name, pf);
10874 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
10875 dev_info(&pf->pdev->dev,
10876 "request_irq for %s failed: %d\n",
10877 pf->int_name, err);
10882 i40e_enable_misc_int_causes(pf);
10884 /* associate no queues to the misc vector */
10885 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10886 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
10890 i40e_irq_dynamic_enable_icr0(pf);
10896 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10897 * @vsi: Pointer to vsi structure
10898 * @seed: Buffter to store the hash keys
10899 * @lut: Buffer to store the lookup table entries
10900 * @lut_size: Size of buffer to store the lookup table entries
10902 * Return 0 on success, negative on failure
10904 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10905 u8 *lut, u16 lut_size)
10907 struct i40e_pf *pf = vsi->back;
10908 struct i40e_hw *hw = &pf->hw;
10912 ret = i40e_aq_get_rss_key(hw, vsi->id,
10913 (struct i40e_aqc_get_set_rss_key_data *)seed);
10915 dev_info(&pf->pdev->dev,
10916 "Cannot get RSS key, err %s aq_err %s\n",
10917 i40e_stat_str(&pf->hw, ret),
10918 i40e_aq_str(&pf->hw,
10919 pf->hw.aq.asq_last_status));
10925 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10927 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10929 dev_info(&pf->pdev->dev,
10930 "Cannot get RSS lut, err %s aq_err %s\n",
10931 i40e_stat_str(&pf->hw, ret),
10932 i40e_aq_str(&pf->hw,
10933 pf->hw.aq.asq_last_status));
10942 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10943 * @vsi: Pointer to vsi structure
10944 * @seed: RSS hash seed
10945 * @lut: Lookup table
10946 * @lut_size: Lookup table size
10948 * Returns 0 on success, negative on failure
10950 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10951 const u8 *lut, u16 lut_size)
10953 struct i40e_pf *pf = vsi->back;
10954 struct i40e_hw *hw = &pf->hw;
10955 u16 vf_id = vsi->vf_id;
10958 /* Fill out hash function seed */
10960 u32 *seed_dw = (u32 *)seed;
10962 if (vsi->type == I40E_VSI_MAIN) {
10963 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10964 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
10965 } else if (vsi->type == I40E_VSI_SRIOV) {
10966 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
10967 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
10969 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10974 u32 *lut_dw = (u32 *)lut;
10976 if (vsi->type == I40E_VSI_MAIN) {
10977 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10979 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10980 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10981 } else if (vsi->type == I40E_VSI_SRIOV) {
10982 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10984 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
10985 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
10987 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10996 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10997 * @vsi: Pointer to VSI structure
10998 * @seed: Buffer to store the keys
10999 * @lut: Buffer to store the lookup table entries
11000 * @lut_size: Size of buffer to store the lookup table entries
11002 * Returns 0 on success, negative on failure
11004 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11005 u8 *lut, u16 lut_size)
11007 struct i40e_pf *pf = vsi->back;
11008 struct i40e_hw *hw = &pf->hw;
11012 u32 *seed_dw = (u32 *)seed;
11014 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11015 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11018 u32 *lut_dw = (u32 *)lut;
11020 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11022 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11023 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11030 * i40e_config_rss - Configure RSS keys and lut
11031 * @vsi: Pointer to VSI structure
11032 * @seed: RSS hash seed
11033 * @lut: Lookup table
11034 * @lut_size: Lookup table size
11036 * Returns 0 on success, negative on failure
11038 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11040 struct i40e_pf *pf = vsi->back;
11042 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11043 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11045 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11049 * i40e_get_rss - Get RSS keys and lut
11050 * @vsi: Pointer to VSI structure
11051 * @seed: Buffer to store the keys
11052 * @lut: Buffer to store the lookup table entries
11053 * @lut_size: Size of buffer to store the lookup table entries
11055 * Returns 0 on success, negative on failure
11057 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11059 struct i40e_pf *pf = vsi->back;
11061 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11062 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11064 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11068 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
11069 * @pf: Pointer to board private structure
11070 * @lut: Lookup table
11071 * @rss_table_size: Lookup table size
11072 * @rss_size: Range of queue number for hashing
11074 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11075 u16 rss_table_size, u16 rss_size)
11079 for (i = 0; i < rss_table_size; i++)
11080 lut[i] = i % rss_size;
11084 * i40e_pf_config_rss - Prepare for RSS if used
11085 * @pf: board private structure
11087 static int i40e_pf_config_rss(struct i40e_pf *pf)
11089 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11090 u8 seed[I40E_HKEY_ARRAY_SIZE];
11092 struct i40e_hw *hw = &pf->hw;
11097 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
11098 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11099 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11100 hena |= i40e_pf_get_default_rss_hena(pf);
11102 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11103 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11105 /* Determine the RSS table size based on the hardware capabilities */
11106 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11107 reg_val = (pf->rss_table_size == 512) ?
11108 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11109 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11110 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11112 /* Determine the RSS size of the VSI */
11113 if (!vsi->rss_size) {
11115 /* If the firmware does something weird during VSI init, we
11116 * could end up with zero TCs. Check for that to avoid
11117 * divide-by-zero. It probably won't pass traffic, but it also
11120 qcount = vsi->num_queue_pairs /
11121 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11122 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11124 if (!vsi->rss_size)
11127 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11131 /* Use user configured lut if there is one, otherwise use default */
11132 if (vsi->rss_lut_user)
11133 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11135 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11137 /* Use user configured hash key if there is one, otherwise
11140 if (vsi->rss_hkey_user)
11141 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11143 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11144 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11151 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11152 * @pf: board private structure
11153 * @queue_count: the requested queue count for rss.
11155 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11156 * count which may be different from the requested queue count.
11157 * Note: expects to be called while under rtnl_lock()
11159 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11161 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11164 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11167 queue_count = min_t(int, queue_count, num_online_cpus());
11168 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11170 if (queue_count != vsi->num_queue_pairs) {
11173 vsi->req_queue_pairs = queue_count;
11174 i40e_prep_for_reset(pf, true);
11176 pf->alloc_rss_size = new_rss_size;
11178 i40e_reset_and_rebuild(pf, true, true);
11180 /* Discard the user configured hash keys and lut, if less
11181 * queues are enabled.
11183 if (queue_count < vsi->rss_size) {
11184 i40e_clear_rss_config_user(vsi);
11185 dev_dbg(&pf->pdev->dev,
11186 "discard user configured hash keys and lut\n");
11189 /* Reset vsi->rss_size, as number of enabled queues changed */
11190 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11191 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11193 i40e_pf_config_rss(pf);
11195 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11196 vsi->req_queue_pairs, pf->rss_size_max);
11197 return pf->alloc_rss_size;
11201 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11202 * @pf: board private structure
11204 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11206 i40e_status status;
11207 bool min_valid, max_valid;
11208 u32 max_bw, min_bw;
11210 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11211 &min_valid, &max_valid);
11215 pf->min_bw = min_bw;
11217 pf->max_bw = max_bw;
11224 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11225 * @pf: board private structure
11227 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11229 struct i40e_aqc_configure_partition_bw_data bw_data;
11230 i40e_status status;
11232 /* Set the valid bit for this PF */
11233 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11234 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11235 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11237 /* Set the new bandwidths */
11238 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11244 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11245 * @pf: board private structure
11247 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11249 /* Commit temporary BW setting to permanent NVM image */
11250 enum i40e_admin_queue_err last_aq_status;
11254 if (pf->hw.partition_id != 1) {
11255 dev_info(&pf->pdev->dev,
11256 "Commit BW only works on partition 1! This is partition %d",
11257 pf->hw.partition_id);
11258 ret = I40E_NOT_SUPPORTED;
11259 goto bw_commit_out;
11262 /* Acquire NVM for read access */
11263 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11264 last_aq_status = pf->hw.aq.asq_last_status;
11266 dev_info(&pf->pdev->dev,
11267 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11268 i40e_stat_str(&pf->hw, ret),
11269 i40e_aq_str(&pf->hw, last_aq_status));
11270 goto bw_commit_out;
11273 /* Read word 0x10 of NVM - SW compatibility word 1 */
11274 ret = i40e_aq_read_nvm(&pf->hw,
11275 I40E_SR_NVM_CONTROL_WORD,
11276 0x10, sizeof(nvm_word), &nvm_word,
11278 /* Save off last admin queue command status before releasing
11281 last_aq_status = pf->hw.aq.asq_last_status;
11282 i40e_release_nvm(&pf->hw);
11284 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11285 i40e_stat_str(&pf->hw, ret),
11286 i40e_aq_str(&pf->hw, last_aq_status));
11287 goto bw_commit_out;
11290 /* Wait a bit for NVM release to complete */
11293 /* Acquire NVM for write access */
11294 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11295 last_aq_status = pf->hw.aq.asq_last_status;
11297 dev_info(&pf->pdev->dev,
11298 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11299 i40e_stat_str(&pf->hw, ret),
11300 i40e_aq_str(&pf->hw, last_aq_status));
11301 goto bw_commit_out;
11303 /* Write it back out unchanged to initiate update NVM,
11304 * which will force a write of the shadow (alt) RAM to
11305 * the NVM - thus storing the bandwidth values permanently.
11307 ret = i40e_aq_update_nvm(&pf->hw,
11308 I40E_SR_NVM_CONTROL_WORD,
11309 0x10, sizeof(nvm_word),
11310 &nvm_word, true, 0, NULL);
11311 /* Save off last admin queue command status before releasing
11314 last_aq_status = pf->hw.aq.asq_last_status;
11315 i40e_release_nvm(&pf->hw);
11317 dev_info(&pf->pdev->dev,
11318 "BW settings NOT SAVED, err %s aq_err %s\n",
11319 i40e_stat_str(&pf->hw, ret),
11320 i40e_aq_str(&pf->hw, last_aq_status));
11327 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11328 * @pf: board private structure to initialize
11330 * i40e_sw_init initializes the Adapter private data structure.
11331 * Fields are initialized based on PCI device information and
11332 * OS network device settings (MTU size).
11334 static int i40e_sw_init(struct i40e_pf *pf)
11339 /* Set default capability flags */
11340 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11341 I40E_FLAG_MSI_ENABLED |
11342 I40E_FLAG_MSIX_ENABLED;
11344 /* Set default ITR */
11345 pf->rx_itr_default = I40E_ITR_RX_DEF;
11346 pf->tx_itr_default = I40E_ITR_TX_DEF;
11348 /* Depending on PF configurations, it is possible that the RSS
11349 * maximum might end up larger than the available queues
11351 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11352 pf->alloc_rss_size = 1;
11353 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11354 pf->rss_size_max = min_t(int, pf->rss_size_max,
11355 pf->hw.func_caps.num_tx_qp);
11356 if (pf->hw.func_caps.rss) {
11357 pf->flags |= I40E_FLAG_RSS_ENABLED;
11358 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11359 num_online_cpus());
11362 /* MFP mode enabled */
11363 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11364 pf->flags |= I40E_FLAG_MFP_ENABLED;
11365 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11366 if (i40e_get_partition_bw_setting(pf)) {
11367 dev_warn(&pf->pdev->dev,
11368 "Could not get partition bw settings\n");
11370 dev_info(&pf->pdev->dev,
11371 "Partition BW Min = %8.8x, Max = %8.8x\n",
11372 pf->min_bw, pf->max_bw);
11374 /* nudge the Tx scheduler */
11375 i40e_set_partition_bw_setting(pf);
11379 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11380 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11381 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11382 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11383 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11384 pf->hw.num_partitions > 1)
11385 dev_info(&pf->pdev->dev,
11386 "Flow Director Sideband mode Disabled in MFP mode\n");
11388 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11389 pf->fdir_pf_filter_count =
11390 pf->hw.func_caps.fd_filters_guaranteed;
11391 pf->hw.fdir_shared_filter_count =
11392 pf->hw.func_caps.fd_filters_best_effort;
11395 if (pf->hw.mac.type == I40E_MAC_X722) {
11396 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11397 I40E_HW_128_QP_RSS_CAPABLE |
11398 I40E_HW_ATR_EVICT_CAPABLE |
11399 I40E_HW_WB_ON_ITR_CAPABLE |
11400 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11401 I40E_HW_NO_PCI_LINK_CHECK |
11402 I40E_HW_USE_SET_LLDP_MIB |
11403 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11404 I40E_HW_PTP_L4_CAPABLE |
11405 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11406 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11408 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11409 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11410 I40E_FDEVICT_PCTYPE_DEFAULT) {
11411 dev_warn(&pf->pdev->dev,
11412 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11413 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11415 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11416 ((pf->hw.aq.api_maj_ver == 1) &&
11417 (pf->hw.aq.api_min_ver > 4))) {
11418 /* Supported in FW API version higher than 1.4 */
11419 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11422 /* Enable HW ATR eviction if possible */
11423 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11424 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11426 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11427 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11428 (pf->hw.aq.fw_maj_ver < 4))) {
11429 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11430 /* No DCB support for FW < v4.33 */
11431 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11434 /* Disable FW LLDP if FW < v4.3 */
11435 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11436 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11437 (pf->hw.aq.fw_maj_ver < 4)))
11438 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11440 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11441 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11442 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11443 (pf->hw.aq.fw_maj_ver >= 5)))
11444 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11446 /* Enable PTP L4 if FW > v6.0 */
11447 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11448 pf->hw.aq.fw_maj_ver >= 6)
11449 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11451 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11452 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11453 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11454 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11457 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11458 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11459 /* IWARP needs one extra vector for CQP just like MISC.*/
11460 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11462 /* Stopping FW LLDP engine is supported on XL710 and X722
11463 * starting from FW versions determined in i40e_init_adminq.
11464 * Stopping the FW LLDP engine is not supported on XL710
11465 * if NPAR is functioning so unset this hw flag in this case.
11467 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11468 pf->hw.func_caps.npar_enable &&
11469 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
11470 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
11472 #ifdef CONFIG_PCI_IOV
11473 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11474 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11475 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11476 pf->num_req_vfs = min_t(int,
11477 pf->hw.func_caps.num_vfs,
11478 I40E_MAX_VF_COUNT);
11480 #endif /* CONFIG_PCI_IOV */
11481 pf->eeprom_version = 0xDEAD;
11482 pf->lan_veb = I40E_NO_VEB;
11483 pf->lan_vsi = I40E_NO_VSI;
11485 /* By default FW has this off for performance reasons */
11486 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11488 /* set up queue assignment tracking */
11489 size = sizeof(struct i40e_lump_tracking)
11490 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11491 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11492 if (!pf->qp_pile) {
11496 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11497 pf->qp_pile->search_hint = 0;
11499 pf->tx_timeout_recovery_level = 1;
11501 mutex_init(&pf->switch_mutex);
11508 * i40e_set_ntuple - set the ntuple feature flag and take action
11509 * @pf: board private structure to initialize
11510 * @features: the feature set that the stack is suggesting
11512 * returns a bool to indicate if reset needs to happen
11514 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11516 bool need_reset = false;
11518 /* Check if Flow Director n-tuple support was enabled or disabled. If
11519 * the state changed, we need to reset.
11521 if (features & NETIF_F_NTUPLE) {
11522 /* Enable filters and mark for reset */
11523 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11525 /* enable FD_SB only if there is MSI-X vector and no cloud
11528 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
11529 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11530 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11533 /* turn off filters, mark for reset and clear SW filter list */
11534 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11536 i40e_fdir_filter_exit(pf);
11538 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11539 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
11540 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11542 /* reset fd counters */
11543 pf->fd_add_err = 0;
11544 pf->fd_atr_cnt = 0;
11545 /* if ATR was auto disabled it can be re-enabled. */
11546 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
11547 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11548 (I40E_DEBUG_FD & pf->hw.debug_mask))
11549 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
11555 * i40e_clear_rss_lut - clear the rx hash lookup table
11556 * @vsi: the VSI being configured
11558 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11560 struct i40e_pf *pf = vsi->back;
11561 struct i40e_hw *hw = &pf->hw;
11562 u16 vf_id = vsi->vf_id;
11565 if (vsi->type == I40E_VSI_MAIN) {
11566 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11567 wr32(hw, I40E_PFQF_HLUT(i), 0);
11568 } else if (vsi->type == I40E_VSI_SRIOV) {
11569 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11570 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11572 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11577 * i40e_set_features - set the netdev feature flags
11578 * @netdev: ptr to the netdev being adjusted
11579 * @features: the feature set that the stack is suggesting
11580 * Note: expects to be called while under rtnl_lock()
11582 static int i40e_set_features(struct net_device *netdev,
11583 netdev_features_t features)
11585 struct i40e_netdev_priv *np = netdev_priv(netdev);
11586 struct i40e_vsi *vsi = np->vsi;
11587 struct i40e_pf *pf = vsi->back;
11590 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11591 i40e_pf_config_rss(pf);
11592 else if (!(features & NETIF_F_RXHASH) &&
11593 netdev->features & NETIF_F_RXHASH)
11594 i40e_clear_rss_lut(vsi);
11596 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11597 i40e_vlan_stripping_enable(vsi);
11599 i40e_vlan_stripping_disable(vsi);
11601 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11602 dev_err(&pf->pdev->dev,
11603 "Offloaded tc filters active, can't turn hw_tc_offload off");
11607 need_reset = i40e_set_ntuple(pf, features);
11610 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11616 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11617 * @pf: board private structure
11618 * @port: The UDP port to look up
11620 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11622 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11626 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11627 /* Do not report ports with pending deletions as
11630 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11632 if (pf->udp_ports[i].port == port)
11640 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11641 * @netdev: This physical port's netdev
11642 * @ti: Tunnel endpoint information
11644 static void i40e_udp_tunnel_add(struct net_device *netdev,
11645 struct udp_tunnel_info *ti)
11647 struct i40e_netdev_priv *np = netdev_priv(netdev);
11648 struct i40e_vsi *vsi = np->vsi;
11649 struct i40e_pf *pf = vsi->back;
11650 u16 port = ntohs(ti->port);
11654 idx = i40e_get_udp_port_idx(pf, port);
11656 /* Check if port already exists */
11657 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11658 netdev_info(netdev, "port %d already offloaded\n", port);
11662 /* Now check if there is space to add the new port */
11663 next_idx = i40e_get_udp_port_idx(pf, 0);
11665 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11666 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11671 switch (ti->type) {
11672 case UDP_TUNNEL_TYPE_VXLAN:
11673 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11675 case UDP_TUNNEL_TYPE_GENEVE:
11676 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11678 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11684 /* New port: add it and mark its index in the bitmap */
11685 pf->udp_ports[next_idx].port = port;
11686 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
11687 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11688 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11692 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11693 * @netdev: This physical port's netdev
11694 * @ti: Tunnel endpoint information
11696 static void i40e_udp_tunnel_del(struct net_device *netdev,
11697 struct udp_tunnel_info *ti)
11699 struct i40e_netdev_priv *np = netdev_priv(netdev);
11700 struct i40e_vsi *vsi = np->vsi;
11701 struct i40e_pf *pf = vsi->back;
11702 u16 port = ntohs(ti->port);
11705 idx = i40e_get_udp_port_idx(pf, port);
11707 /* Check if port already exists */
11708 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11711 switch (ti->type) {
11712 case UDP_TUNNEL_TYPE_VXLAN:
11713 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11716 case UDP_TUNNEL_TYPE_GENEVE:
11717 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11724 /* if port exists, set it to 0 (mark for deletion)
11725 * and make it pending
11727 pf->udp_ports[idx].port = 0;
11729 /* Toggle pending bit instead of setting it. This way if we are
11730 * deleting a port that has yet to be added we just clear the pending
11731 * bit and don't have to worry about it.
11733 pf->pending_udp_bitmap ^= BIT_ULL(idx);
11734 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11738 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11742 static int i40e_get_phys_port_id(struct net_device *netdev,
11743 struct netdev_phys_item_id *ppid)
11745 struct i40e_netdev_priv *np = netdev_priv(netdev);
11746 struct i40e_pf *pf = np->vsi->back;
11747 struct i40e_hw *hw = &pf->hw;
11749 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
11750 return -EOPNOTSUPP;
11752 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11753 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11759 * i40e_ndo_fdb_add - add an entry to the hardware database
11760 * @ndm: the input from the stack
11761 * @tb: pointer to array of nladdr (unused)
11762 * @dev: the net device pointer
11763 * @addr: the MAC address entry being added
11765 * @flags: instructions from stack about fdb operation
11767 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11768 struct net_device *dev,
11769 const unsigned char *addr, u16 vid,
11771 struct netlink_ext_ack *extack)
11773 struct i40e_netdev_priv *np = netdev_priv(dev);
11774 struct i40e_pf *pf = np->vsi->back;
11777 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11778 return -EOPNOTSUPP;
11781 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11785 /* Hardware does not support aging addresses so if a
11786 * ndm_state is given only allow permanent addresses
11788 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11789 netdev_info(dev, "FDB only supports static addresses\n");
11793 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11794 err = dev_uc_add_excl(dev, addr);
11795 else if (is_multicast_ether_addr(addr))
11796 err = dev_mc_add_excl(dev, addr);
11800 /* Only return duplicate errors if NLM_F_EXCL is set */
11801 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11808 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11809 * @dev: the netdev being configured
11810 * @nlh: RTNL message
11811 * @flags: bridge flags
11812 * @extack: netlink extended ack
11814 * Inserts a new hardware bridge if not already created and
11815 * enables the bridging mode requested (VEB or VEPA). If the
11816 * hardware bridge has already been inserted and the request
11817 * is to change the mode then that requires a PF reset to
11818 * allow rebuild of the components with required hardware
11819 * bridge mode enabled.
11821 * Note: expects to be called while under rtnl_lock()
11823 static int i40e_ndo_bridge_setlink(struct net_device *dev,
11824 struct nlmsghdr *nlh,
11826 struct netlink_ext_ack *extack)
11828 struct i40e_netdev_priv *np = netdev_priv(dev);
11829 struct i40e_vsi *vsi = np->vsi;
11830 struct i40e_pf *pf = vsi->back;
11831 struct i40e_veb *veb = NULL;
11832 struct nlattr *attr, *br_spec;
11835 /* Only for PF VSI for now */
11836 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11837 return -EOPNOTSUPP;
11839 /* Find the HW bridge for PF VSI */
11840 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11841 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11845 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11847 nla_for_each_nested(attr, br_spec, rem) {
11850 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11853 mode = nla_get_u16(attr);
11854 if ((mode != BRIDGE_MODE_VEPA) &&
11855 (mode != BRIDGE_MODE_VEB))
11858 /* Insert a new HW bridge */
11860 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11861 vsi->tc_config.enabled_tc);
11863 veb->bridge_mode = mode;
11864 i40e_config_bridge_mode(veb);
11866 /* No Bridge HW offload available */
11870 } else if (mode != veb->bridge_mode) {
11871 /* Existing HW bridge but different mode needs reset */
11872 veb->bridge_mode = mode;
11873 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11874 if (mode == BRIDGE_MODE_VEB)
11875 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11877 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
11878 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11887 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11890 * @seq: RTNL message seq #
11891 * @dev: the netdev being configured
11892 * @filter_mask: unused
11893 * @nlflags: netlink flags passed in
11895 * Return the mode in which the hardware bridge is operating in
11898 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11899 struct net_device *dev,
11900 u32 __always_unused filter_mask,
11903 struct i40e_netdev_priv *np = netdev_priv(dev);
11904 struct i40e_vsi *vsi = np->vsi;
11905 struct i40e_pf *pf = vsi->back;
11906 struct i40e_veb *veb = NULL;
11909 /* Only for PF VSI for now */
11910 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11911 return -EOPNOTSUPP;
11913 /* Find the HW bridge for the PF VSI */
11914 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11915 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11922 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
11923 0, 0, nlflags, filter_mask, NULL);
11927 * i40e_features_check - Validate encapsulated packet conforms to limits
11929 * @dev: This physical port's netdev
11930 * @features: Offload features that the stack believes apply
11932 static netdev_features_t i40e_features_check(struct sk_buff *skb,
11933 struct net_device *dev,
11934 netdev_features_t features)
11938 /* No point in doing any of this if neither checksum nor GSO are
11939 * being requested for this frame. We can rule out both by just
11940 * checking for CHECKSUM_PARTIAL
11942 if (skb->ip_summed != CHECKSUM_PARTIAL)
11945 /* We cannot support GSO if the MSS is going to be less than
11946 * 64 bytes. If it is then we need to drop support for GSO.
11948 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11949 features &= ~NETIF_F_GSO_MASK;
11951 /* MACLEN can support at most 63 words */
11952 len = skb_network_header(skb) - skb->data;
11953 if (len & ~(63 * 2))
11956 /* IPLEN and EIPLEN can support at most 127 dwords */
11957 len = skb_transport_header(skb) - skb_network_header(skb);
11958 if (len & ~(127 * 4))
11961 if (skb->encapsulation) {
11962 /* L4TUNLEN can support 127 words */
11963 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11964 if (len & ~(127 * 2))
11967 /* IPLEN can support at most 127 dwords */
11968 len = skb_inner_transport_header(skb) -
11969 skb_inner_network_header(skb);
11970 if (len & ~(127 * 4))
11974 /* No need to validate L4LEN as TCP is the only protocol with a
11975 * a flexible value and we support all possible values supported
11976 * by TCP, which is at most 15 dwords
11981 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11985 * i40e_xdp_setup - add/remove an XDP program
11986 * @vsi: VSI to changed
11987 * @prog: XDP program
11989 static int i40e_xdp_setup(struct i40e_vsi *vsi,
11990 struct bpf_prog *prog)
11992 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11993 struct i40e_pf *pf = vsi->back;
11994 struct bpf_prog *old_prog;
11998 /* Don't allow frames that span over multiple buffers */
11999 if (frame_size > vsi->rx_buf_len)
12002 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12005 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
12006 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12009 i40e_prep_for_reset(pf, true);
12011 old_prog = xchg(&vsi->xdp_prog, prog);
12014 i40e_reset_and_rebuild(pf, true, true);
12016 for (i = 0; i < vsi->num_queue_pairs; i++)
12017 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12020 bpf_prog_put(old_prog);
12022 /* Kick start the NAPI context if there is an AF_XDP socket open
12023 * on that queue id. This so that receiving will start.
12025 if (need_reset && prog)
12026 for (i = 0; i < vsi->num_queue_pairs; i++)
12027 if (vsi->xdp_rings[i]->xsk_umem)
12028 (void)i40e_xsk_async_xmit(vsi->netdev, i);
12034 * i40e_enter_busy_conf - Enters busy config state
12037 * Returns 0 on success, <0 for failure.
12039 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12041 struct i40e_pf *pf = vsi->back;
12044 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12048 usleep_range(1000, 2000);
12055 * i40e_exit_busy_conf - Exits busy config state
12058 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12060 struct i40e_pf *pf = vsi->back;
12062 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12066 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
12068 * @queue_pair: queue pair
12070 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12072 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12073 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12074 memset(&vsi->tx_rings[queue_pair]->stats, 0,
12075 sizeof(vsi->tx_rings[queue_pair]->stats));
12076 if (i40e_enabled_xdp_vsi(vsi)) {
12077 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12078 sizeof(vsi->xdp_rings[queue_pair]->stats));
12083 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
12085 * @queue_pair: queue pair
12087 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12089 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12090 if (i40e_enabled_xdp_vsi(vsi)) {
12091 /* Make sure that in-progress ndo_xdp_xmit calls are
12095 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12097 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12101 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
12103 * @queue_pair: queue pair
12104 * @enable: true for enable, false for disable
12106 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12109 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12110 struct i40e_q_vector *q_vector = rxr->q_vector;
12115 /* All rings in a qp belong to the same qvector. */
12116 if (q_vector->rx.ring || q_vector->tx.ring) {
12118 napi_enable(&q_vector->napi);
12120 napi_disable(&q_vector->napi);
12125 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
12127 * @queue_pair: queue pair
12128 * @enable: true for enable, false for disable
12130 * Returns 0 on success, <0 on failure.
12132 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12135 struct i40e_pf *pf = vsi->back;
12138 pf_q = vsi->base_queue + queue_pair;
12139 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12140 false /*is xdp*/, enable);
12142 dev_info(&pf->pdev->dev,
12143 "VSI seid %d Tx ring %d %sable timeout\n",
12144 vsi->seid, pf_q, (enable ? "en" : "dis"));
12148 i40e_control_rx_q(pf, pf_q, enable);
12149 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12151 dev_info(&pf->pdev->dev,
12152 "VSI seid %d Rx ring %d %sable timeout\n",
12153 vsi->seid, pf_q, (enable ? "en" : "dis"));
12157 /* Due to HW errata, on Rx disable only, the register can
12158 * indicate done before it really is. Needs 50ms to be sure
12163 if (!i40e_enabled_xdp_vsi(vsi))
12166 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12167 pf_q + vsi->alloc_queue_pairs,
12168 true /*is xdp*/, enable);
12170 dev_info(&pf->pdev->dev,
12171 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12172 vsi->seid, pf_q, (enable ? "en" : "dis"));
12179 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12181 * @queue_pair: queue_pair
12183 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12185 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12186 struct i40e_pf *pf = vsi->back;
12187 struct i40e_hw *hw = &pf->hw;
12189 /* All rings in a qp belong to the same qvector. */
12190 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12191 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12193 i40e_irq_dynamic_enable_icr0(pf);
12199 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12201 * @queue_pair: queue_pair
12203 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12205 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12206 struct i40e_pf *pf = vsi->back;
12207 struct i40e_hw *hw = &pf->hw;
12209 /* For simplicity, instead of removing the qp interrupt causes
12210 * from the interrupt linked list, we simply disable the interrupt, and
12211 * leave the list intact.
12213 * All rings in a qp belong to the same qvector.
12215 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12216 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12218 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12220 synchronize_irq(pf->msix_entries[intpf].vector);
12222 /* Legacy and MSI mode - this stops all interrupt handling */
12223 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12224 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12226 synchronize_irq(pf->pdev->irq);
12231 * i40e_queue_pair_disable - Disables a queue pair
12233 * @queue_pair: queue pair
12235 * Returns 0 on success, <0 on failure.
12237 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12241 err = i40e_enter_busy_conf(vsi);
12245 i40e_queue_pair_disable_irq(vsi, queue_pair);
12246 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
12247 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
12248 i40e_queue_pair_clean_rings(vsi, queue_pair);
12249 i40e_queue_pair_reset_stats(vsi, queue_pair);
12255 * i40e_queue_pair_enable - Enables a queue pair
12257 * @queue_pair: queue pair
12259 * Returns 0 on success, <0 on failure.
12261 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12265 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12269 if (i40e_enabled_xdp_vsi(vsi)) {
12270 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12275 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12279 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
12280 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
12281 i40e_queue_pair_enable_irq(vsi, queue_pair);
12283 i40e_exit_busy_conf(vsi);
12289 * i40e_xdp - implements ndo_bpf for i40e
12291 * @xdp: XDP command
12293 static int i40e_xdp(struct net_device *dev,
12294 struct netdev_bpf *xdp)
12296 struct i40e_netdev_priv *np = netdev_priv(dev);
12297 struct i40e_vsi *vsi = np->vsi;
12299 if (vsi->type != I40E_VSI_MAIN)
12302 switch (xdp->command) {
12303 case XDP_SETUP_PROG:
12304 return i40e_xdp_setup(vsi, xdp->prog);
12305 case XDP_QUERY_PROG:
12306 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12308 case XDP_SETUP_XSK_UMEM:
12309 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12310 xdp->xsk.queue_id);
12316 static const struct net_device_ops i40e_netdev_ops = {
12317 .ndo_open = i40e_open,
12318 .ndo_stop = i40e_close,
12319 .ndo_start_xmit = i40e_lan_xmit_frame,
12320 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12321 .ndo_set_rx_mode = i40e_set_rx_mode,
12322 .ndo_validate_addr = eth_validate_addr,
12323 .ndo_set_mac_address = i40e_set_mac,
12324 .ndo_change_mtu = i40e_change_mtu,
12325 .ndo_do_ioctl = i40e_ioctl,
12326 .ndo_tx_timeout = i40e_tx_timeout,
12327 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12328 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12329 #ifdef CONFIG_NET_POLL_CONTROLLER
12330 .ndo_poll_controller = i40e_netpoll,
12332 .ndo_setup_tc = __i40e_setup_tc,
12333 .ndo_set_features = i40e_set_features,
12334 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12335 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12336 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12337 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12338 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12339 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12340 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12341 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12342 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12343 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12344 .ndo_fdb_add = i40e_ndo_fdb_add,
12345 .ndo_features_check = i40e_features_check,
12346 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12347 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12348 .ndo_bpf = i40e_xdp,
12349 .ndo_xdp_xmit = i40e_xdp_xmit,
12350 .ndo_xsk_async_xmit = i40e_xsk_async_xmit,
12354 * i40e_config_netdev - Setup the netdev flags
12355 * @vsi: the VSI being configured
12357 * Returns 0 on success, negative value on failure
12359 static int i40e_config_netdev(struct i40e_vsi *vsi)
12361 struct i40e_pf *pf = vsi->back;
12362 struct i40e_hw *hw = &pf->hw;
12363 struct i40e_netdev_priv *np;
12364 struct net_device *netdev;
12365 u8 broadcast[ETH_ALEN];
12366 u8 mac_addr[ETH_ALEN];
12368 netdev_features_t hw_enc_features;
12369 netdev_features_t hw_features;
12371 etherdev_size = sizeof(struct i40e_netdev_priv);
12372 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12376 vsi->netdev = netdev;
12377 np = netdev_priv(netdev);
12380 hw_enc_features = NETIF_F_SG |
12382 NETIF_F_IPV6_CSUM |
12384 NETIF_F_SOFT_FEATURES |
12389 NETIF_F_GSO_GRE_CSUM |
12390 NETIF_F_GSO_PARTIAL |
12391 NETIF_F_GSO_IPXIP4 |
12392 NETIF_F_GSO_IPXIP6 |
12393 NETIF_F_GSO_UDP_TUNNEL |
12394 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12400 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12401 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12403 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12405 netdev->hw_enc_features |= hw_enc_features;
12407 /* record features VLANs can make use of */
12408 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12410 hw_features = hw_enc_features |
12411 NETIF_F_HW_VLAN_CTAG_TX |
12412 NETIF_F_HW_VLAN_CTAG_RX;
12414 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12415 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12417 netdev->hw_features |= hw_features;
12419 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12420 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12422 if (vsi->type == I40E_VSI_MAIN) {
12423 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12424 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12425 /* The following steps are necessary for two reasons. First,
12426 * some older NVM configurations load a default MAC-VLAN
12427 * filter that will accept any tagged packet, and we want to
12428 * replace this with a normal filter. Additionally, it is
12429 * possible our MAC address was provided by the platform using
12430 * Open Firmware or similar.
12432 * Thus, we need to remove the default filter and install one
12433 * specific to the MAC address.
12435 i40e_rm_default_mac_filter(vsi, mac_addr);
12436 spin_lock_bh(&vsi->mac_filter_hash_lock);
12437 i40e_add_mac_filter(vsi, mac_addr);
12438 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12440 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12441 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12442 * the end, which is 4 bytes long, so force truncation of the
12443 * original name by IFNAMSIZ - 4
12445 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12447 pf->vsi[pf->lan_vsi]->netdev->name);
12448 eth_random_addr(mac_addr);
12450 spin_lock_bh(&vsi->mac_filter_hash_lock);
12451 i40e_add_mac_filter(vsi, mac_addr);
12452 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12455 /* Add the broadcast filter so that we initially will receive
12456 * broadcast packets. Note that when a new VLAN is first added the
12457 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
12458 * specific filters as part of transitioning into "vlan" operation.
12459 * When more VLANs are added, the driver will copy each existing MAC
12460 * filter and add it for the new VLAN.
12462 * Broadcast filters are handled specially by
12463 * i40e_sync_filters_subtask, as the driver must to set the broadcast
12464 * promiscuous bit instead of adding this directly as a MAC/VLAN
12465 * filter. The subtask will update the correct broadcast promiscuous
12466 * bits as VLANs become active or inactive.
12468 eth_broadcast_addr(broadcast);
12469 spin_lock_bh(&vsi->mac_filter_hash_lock);
12470 i40e_add_mac_filter(vsi, broadcast);
12471 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12473 ether_addr_copy(netdev->dev_addr, mac_addr);
12474 ether_addr_copy(netdev->perm_addr, mac_addr);
12476 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
12477 netdev->neigh_priv_len = sizeof(u32) * 4;
12479 netdev->priv_flags |= IFF_UNICAST_FLT;
12480 netdev->priv_flags |= IFF_SUPP_NOFCS;
12481 /* Setup netdev TC information */
12482 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
12484 netdev->netdev_ops = &i40e_netdev_ops;
12485 netdev->watchdog_timeo = 5 * HZ;
12486 i40e_set_ethtool_ops(netdev);
12488 /* MTU range: 68 - 9706 */
12489 netdev->min_mtu = ETH_MIN_MTU;
12490 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
12496 * i40e_vsi_delete - Delete a VSI from the switch
12497 * @vsi: the VSI being removed
12499 * Returns 0 on success, negative value on failure
12501 static void i40e_vsi_delete(struct i40e_vsi *vsi)
12503 /* remove default VSI is not allowed */
12504 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
12507 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
12511 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
12512 * @vsi: the VSI being queried
12514 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
12516 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
12518 struct i40e_veb *veb;
12519 struct i40e_pf *pf = vsi->back;
12521 /* Uplink is not a bridge so default to VEB */
12522 if (vsi->veb_idx >= I40E_MAX_VEB)
12525 veb = pf->veb[vsi->veb_idx];
12527 dev_info(&pf->pdev->dev,
12528 "There is no veb associated with the bridge\n");
12532 /* Uplink is a bridge in VEPA mode */
12533 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
12536 /* Uplink is a bridge in VEB mode */
12540 /* VEPA is now default bridge, so return 0 */
12545 * i40e_add_vsi - Add a VSI to the switch
12546 * @vsi: the VSI being configured
12548 * This initializes a VSI context depending on the VSI type to be added and
12549 * passes it down to the add_vsi aq command.
12551 static int i40e_add_vsi(struct i40e_vsi *vsi)
12554 struct i40e_pf *pf = vsi->back;
12555 struct i40e_hw *hw = &pf->hw;
12556 struct i40e_vsi_context ctxt;
12557 struct i40e_mac_filter *f;
12558 struct hlist_node *h;
12561 u8 enabled_tc = 0x1; /* TC0 enabled */
12564 memset(&ctxt, 0, sizeof(ctxt));
12565 switch (vsi->type) {
12566 case I40E_VSI_MAIN:
12567 /* The PF's main VSI is already setup as part of the
12568 * device initialization, so we'll not bother with
12569 * the add_vsi call, but we will retrieve the current
12572 ctxt.seid = pf->main_vsi_seid;
12573 ctxt.pf_num = pf->hw.pf_id;
12575 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
12576 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12578 dev_info(&pf->pdev->dev,
12579 "couldn't get PF vsi config, err %s aq_err %s\n",
12580 i40e_stat_str(&pf->hw, ret),
12581 i40e_aq_str(&pf->hw,
12582 pf->hw.aq.asq_last_status));
12585 vsi->info = ctxt.info;
12586 vsi->info.valid_sections = 0;
12588 vsi->seid = ctxt.seid;
12589 vsi->id = ctxt.vsi_number;
12591 enabled_tc = i40e_pf_get_tc_map(pf);
12593 /* Source pruning is enabled by default, so the flag is
12594 * negative logic - if it's set, we need to fiddle with
12595 * the VSI to disable source pruning.
12597 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
12598 memset(&ctxt, 0, sizeof(ctxt));
12599 ctxt.seid = pf->main_vsi_seid;
12600 ctxt.pf_num = pf->hw.pf_id;
12602 ctxt.info.valid_sections |=
12603 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12604 ctxt.info.switch_id =
12605 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
12606 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12608 dev_info(&pf->pdev->dev,
12609 "update vsi failed, err %s aq_err %s\n",
12610 i40e_stat_str(&pf->hw, ret),
12611 i40e_aq_str(&pf->hw,
12612 pf->hw.aq.asq_last_status));
12618 /* MFP mode setup queue map and update VSI */
12619 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
12620 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
12621 memset(&ctxt, 0, sizeof(ctxt));
12622 ctxt.seid = pf->main_vsi_seid;
12623 ctxt.pf_num = pf->hw.pf_id;
12625 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
12626 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12628 dev_info(&pf->pdev->dev,
12629 "update vsi failed, err %s aq_err %s\n",
12630 i40e_stat_str(&pf->hw, ret),
12631 i40e_aq_str(&pf->hw,
12632 pf->hw.aq.asq_last_status));
12636 /* update the local VSI info queue map */
12637 i40e_vsi_update_queue_map(vsi, &ctxt);
12638 vsi->info.valid_sections = 0;
12640 /* Default/Main VSI is only enabled for TC0
12641 * reconfigure it to enable all TCs that are
12642 * available on the port in SFP mode.
12643 * For MFP case the iSCSI PF would use this
12644 * flow to enable LAN+iSCSI TC.
12646 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12648 /* Single TC condition is not fatal,
12649 * message and continue
12651 dev_info(&pf->pdev->dev,
12652 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12654 i40e_stat_str(&pf->hw, ret),
12655 i40e_aq_str(&pf->hw,
12656 pf->hw.aq.asq_last_status));
12661 case I40E_VSI_FDIR:
12662 ctxt.pf_num = hw->pf_id;
12664 ctxt.uplink_seid = vsi->uplink_seid;
12665 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12666 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12667 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12668 (i40e_is_vsi_uplink_mode_veb(vsi))) {
12669 ctxt.info.valid_sections |=
12670 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12671 ctxt.info.switch_id =
12672 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12674 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12677 case I40E_VSI_VMDQ2:
12678 ctxt.pf_num = hw->pf_id;
12680 ctxt.uplink_seid = vsi->uplink_seid;
12681 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12682 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12684 /* This VSI is connected to VEB so the switch_id
12685 * should be set to zero by default.
12687 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12688 ctxt.info.valid_sections |=
12689 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12690 ctxt.info.switch_id =
12691 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12694 /* Setup the VSI tx/rx queue map for TC0 only for now */
12695 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12698 case I40E_VSI_SRIOV:
12699 ctxt.pf_num = hw->pf_id;
12700 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12701 ctxt.uplink_seid = vsi->uplink_seid;
12702 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12703 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12705 /* This VSI is connected to VEB so the switch_id
12706 * should be set to zero by default.
12708 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12709 ctxt.info.valid_sections |=
12710 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12711 ctxt.info.switch_id =
12712 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12715 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12716 ctxt.info.valid_sections |=
12717 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12718 ctxt.info.queueing_opt_flags |=
12719 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12720 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
12723 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12724 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
12725 if (pf->vf[vsi->vf_id].spoofchk) {
12726 ctxt.info.valid_sections |=
12727 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12728 ctxt.info.sec_flags |=
12729 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12730 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12732 /* Setup the VSI tx/rx queue map for TC0 only for now */
12733 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12736 case I40E_VSI_IWARP:
12737 /* send down message to iWARP */
12744 if (vsi->type != I40E_VSI_MAIN) {
12745 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12747 dev_info(&vsi->back->pdev->dev,
12748 "add vsi failed, err %s aq_err %s\n",
12749 i40e_stat_str(&pf->hw, ret),
12750 i40e_aq_str(&pf->hw,
12751 pf->hw.aq.asq_last_status));
12755 vsi->info = ctxt.info;
12756 vsi->info.valid_sections = 0;
12757 vsi->seid = ctxt.seid;
12758 vsi->id = ctxt.vsi_number;
12761 vsi->active_filters = 0;
12762 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
12763 spin_lock_bh(&vsi->mac_filter_hash_lock);
12764 /* If macvlan filters already exist, force them to get loaded */
12765 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
12766 f->state = I40E_FILTER_NEW;
12769 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12772 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
12773 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
12776 /* Update VSI BW information */
12777 ret = i40e_vsi_get_bw_info(vsi);
12779 dev_info(&pf->pdev->dev,
12780 "couldn't get vsi bw info, err %s aq_err %s\n",
12781 i40e_stat_str(&pf->hw, ret),
12782 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12783 /* VSI is already added so not tearing that up */
12792 * i40e_vsi_release - Delete a VSI and free its resources
12793 * @vsi: the VSI being removed
12795 * Returns 0 on success or < 0 on error
12797 int i40e_vsi_release(struct i40e_vsi *vsi)
12799 struct i40e_mac_filter *f;
12800 struct hlist_node *h;
12801 struct i40e_veb *veb = NULL;
12802 struct i40e_pf *pf;
12808 /* release of a VEB-owner or last VSI is not allowed */
12809 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12810 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12811 vsi->seid, vsi->uplink_seid);
12814 if (vsi == pf->vsi[pf->lan_vsi] &&
12815 !test_bit(__I40E_DOWN, pf->state)) {
12816 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12820 uplink_seid = vsi->uplink_seid;
12821 if (vsi->type != I40E_VSI_SRIOV) {
12822 if (vsi->netdev_registered) {
12823 vsi->netdev_registered = false;
12825 /* results in a call to i40e_close() */
12826 unregister_netdev(vsi->netdev);
12829 i40e_vsi_close(vsi);
12831 i40e_vsi_disable_irq(vsi);
12834 spin_lock_bh(&vsi->mac_filter_hash_lock);
12836 /* clear the sync flag on all filters */
12838 __dev_uc_unsync(vsi->netdev, NULL);
12839 __dev_mc_unsync(vsi->netdev, NULL);
12842 /* make sure any remaining filters are marked for deletion */
12843 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
12844 __i40e_del_filter(vsi, f);
12846 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12848 i40e_sync_vsi_filters(vsi);
12850 i40e_vsi_delete(vsi);
12851 i40e_vsi_free_q_vectors(vsi);
12853 free_netdev(vsi->netdev);
12854 vsi->netdev = NULL;
12856 i40e_vsi_clear_rings(vsi);
12857 i40e_vsi_clear(vsi);
12859 /* If this was the last thing on the VEB, except for the
12860 * controlling VSI, remove the VEB, which puts the controlling
12861 * VSI onto the next level down in the switch.
12863 * Well, okay, there's one more exception here: don't remove
12864 * the orphan VEBs yet. We'll wait for an explicit remove request
12865 * from up the network stack.
12867 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
12869 pf->vsi[i]->uplink_seid == uplink_seid &&
12870 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12871 n++; /* count the VSIs */
12874 for (i = 0; i < I40E_MAX_VEB; i++) {
12877 if (pf->veb[i]->uplink_seid == uplink_seid)
12878 n++; /* count the VEBs */
12879 if (pf->veb[i]->seid == uplink_seid)
12882 if (n == 0 && veb && veb->uplink_seid != 0)
12883 i40e_veb_release(veb);
12889 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12890 * @vsi: ptr to the VSI
12892 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12893 * corresponding SW VSI structure and initializes num_queue_pairs for the
12894 * newly allocated VSI.
12896 * Returns 0 on success or negative on failure
12898 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12901 struct i40e_pf *pf = vsi->back;
12903 if (vsi->q_vectors[0]) {
12904 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12909 if (vsi->base_vector) {
12910 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
12911 vsi->seid, vsi->base_vector);
12915 ret = i40e_vsi_alloc_q_vectors(vsi);
12917 dev_info(&pf->pdev->dev,
12918 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12919 vsi->num_q_vectors, vsi->seid, ret);
12920 vsi->num_q_vectors = 0;
12921 goto vector_setup_out;
12924 /* In Legacy mode, we do not have to get any other vector since we
12925 * piggyback on the misc/ICR0 for queue interrupts.
12927 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12929 if (vsi->num_q_vectors)
12930 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12931 vsi->num_q_vectors, vsi->idx);
12932 if (vsi->base_vector < 0) {
12933 dev_info(&pf->pdev->dev,
12934 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12935 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
12936 i40e_vsi_free_q_vectors(vsi);
12938 goto vector_setup_out;
12946 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12947 * @vsi: pointer to the vsi.
12949 * This re-allocates a vsi's queue resources.
12951 * Returns pointer to the successfully allocated and configured VSI sw struct
12952 * on success, otherwise returns NULL on failure.
12954 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12956 u16 alloc_queue_pairs;
12957 struct i40e_pf *pf;
12966 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12967 i40e_vsi_clear_rings(vsi);
12969 i40e_vsi_free_arrays(vsi, false);
12970 i40e_set_num_rings_in_vsi(vsi);
12971 ret = i40e_vsi_alloc_arrays(vsi, false);
12975 alloc_queue_pairs = vsi->alloc_queue_pairs *
12976 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12978 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12980 dev_info(&pf->pdev->dev,
12981 "failed to get tracking for %d queues for VSI %d err %d\n",
12982 alloc_queue_pairs, vsi->seid, ret);
12985 vsi->base_queue = ret;
12987 /* Update the FW view of the VSI. Force a reset of TC and queue
12988 * layout configurations.
12990 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12991 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12992 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12993 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
12994 if (vsi->type == I40E_VSI_MAIN)
12995 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
12997 /* assign it some queues */
12998 ret = i40e_alloc_rings(vsi);
13002 /* map all of the rings to the q_vectors */
13003 i40e_vsi_map_rings_to_vectors(vsi);
13007 i40e_vsi_free_q_vectors(vsi);
13008 if (vsi->netdev_registered) {
13009 vsi->netdev_registered = false;
13010 unregister_netdev(vsi->netdev);
13011 free_netdev(vsi->netdev);
13012 vsi->netdev = NULL;
13014 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13016 i40e_vsi_clear(vsi);
13021 * i40e_vsi_setup - Set up a VSI by a given type
13022 * @pf: board private structure
13024 * @uplink_seid: the switch element to link to
13025 * @param1: usage depends upon VSI type. For VF types, indicates VF id
13027 * This allocates the sw VSI structure and its queue resources, then add a VSI
13028 * to the identified VEB.
13030 * Returns pointer to the successfully allocated and configure VSI sw struct on
13031 * success, otherwise returns NULL on failure.
13033 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13034 u16 uplink_seid, u32 param1)
13036 struct i40e_vsi *vsi = NULL;
13037 struct i40e_veb *veb = NULL;
13038 u16 alloc_queue_pairs;
13042 /* The requested uplink_seid must be either
13043 * - the PF's port seid
13044 * no VEB is needed because this is the PF
13045 * or this is a Flow Director special case VSI
13046 * - seid of an existing VEB
13047 * - seid of a VSI that owns an existing VEB
13048 * - seid of a VSI that doesn't own a VEB
13049 * a new VEB is created and the VSI becomes the owner
13050 * - seid of the PF VSI, which is what creates the first VEB
13051 * this is a special case of the previous
13053 * Find which uplink_seid we were given and create a new VEB if needed
13055 for (i = 0; i < I40E_MAX_VEB; i++) {
13056 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13062 if (!veb && uplink_seid != pf->mac_seid) {
13064 for (i = 0; i < pf->num_alloc_vsi; i++) {
13065 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
13071 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
13076 if (vsi->uplink_seid == pf->mac_seid)
13077 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
13078 vsi->tc_config.enabled_tc);
13079 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
13080 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13081 vsi->tc_config.enabled_tc);
13083 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
13084 dev_info(&vsi->back->pdev->dev,
13085 "New VSI creation error, uplink seid of LAN VSI expected.\n");
13088 /* We come up by default in VEPA mode if SRIOV is not
13089 * already enabled, in which case we can't force VEPA
13092 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
13093 veb->bridge_mode = BRIDGE_MODE_VEPA;
13094 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13096 i40e_config_bridge_mode(veb);
13098 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13099 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13103 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
13107 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13108 uplink_seid = veb->seid;
13111 /* get vsi sw struct */
13112 v_idx = i40e_vsi_mem_alloc(pf, type);
13115 vsi = pf->vsi[v_idx];
13119 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
13121 if (type == I40E_VSI_MAIN)
13122 pf->lan_vsi = v_idx;
13123 else if (type == I40E_VSI_SRIOV)
13124 vsi->vf_id = param1;
13125 /* assign it some queues */
13126 alloc_queue_pairs = vsi->alloc_queue_pairs *
13127 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13129 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13131 dev_info(&pf->pdev->dev,
13132 "failed to get tracking for %d queues for VSI %d err=%d\n",
13133 alloc_queue_pairs, vsi->seid, ret);
13136 vsi->base_queue = ret;
13138 /* get a VSI from the hardware */
13139 vsi->uplink_seid = uplink_seid;
13140 ret = i40e_add_vsi(vsi);
13144 switch (vsi->type) {
13145 /* setup the netdev if needed */
13146 case I40E_VSI_MAIN:
13147 case I40E_VSI_VMDQ2:
13148 ret = i40e_config_netdev(vsi);
13151 ret = register_netdev(vsi->netdev);
13154 vsi->netdev_registered = true;
13155 netif_carrier_off(vsi->netdev);
13156 #ifdef CONFIG_I40E_DCB
13157 /* Setup DCB netlink interface */
13158 i40e_dcbnl_setup(vsi);
13159 #endif /* CONFIG_I40E_DCB */
13162 case I40E_VSI_FDIR:
13163 /* set up vectors and rings if needed */
13164 ret = i40e_vsi_setup_vectors(vsi);
13168 ret = i40e_alloc_rings(vsi);
13172 /* map all of the rings to the q_vectors */
13173 i40e_vsi_map_rings_to_vectors(vsi);
13175 i40e_vsi_reset_stats(vsi);
13179 /* no netdev or rings for the other VSI types */
13183 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13184 (vsi->type == I40E_VSI_VMDQ2)) {
13185 ret = i40e_vsi_config_rss(vsi);
13190 i40e_vsi_free_q_vectors(vsi);
13192 if (vsi->netdev_registered) {
13193 vsi->netdev_registered = false;
13194 unregister_netdev(vsi->netdev);
13195 free_netdev(vsi->netdev);
13196 vsi->netdev = NULL;
13199 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13201 i40e_vsi_clear(vsi);
13207 * i40e_veb_get_bw_info - Query VEB BW information
13208 * @veb: the veb to query
13210 * Query the Tx scheduler BW configuration data for given VEB
13212 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13214 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13215 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13216 struct i40e_pf *pf = veb->pf;
13217 struct i40e_hw *hw = &pf->hw;
13222 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13225 dev_info(&pf->pdev->dev,
13226 "query veb bw config failed, err %s aq_err %s\n",
13227 i40e_stat_str(&pf->hw, ret),
13228 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13232 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13235 dev_info(&pf->pdev->dev,
13236 "query veb bw ets config failed, err %s aq_err %s\n",
13237 i40e_stat_str(&pf->hw, ret),
13238 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13242 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13243 veb->bw_max_quanta = ets_data.tc_bw_max;
13244 veb->is_abs_credits = bw_data.absolute_credits_enable;
13245 veb->enabled_tc = ets_data.tc_valid_bits;
13246 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13247 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13248 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13249 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13250 veb->bw_tc_limit_credits[i] =
13251 le16_to_cpu(bw_data.tc_bw_limits[i]);
13252 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13260 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13261 * @pf: board private structure
13263 * On error: returns error code (negative)
13264 * On success: returns vsi index in PF (positive)
13266 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13269 struct i40e_veb *veb;
13272 /* Need to protect the allocation of switch elements at the PF level */
13273 mutex_lock(&pf->switch_mutex);
13275 /* VEB list may be fragmented if VEB creation/destruction has
13276 * been happening. We can afford to do a quick scan to look
13277 * for any free slots in the list.
13279 * find next empty veb slot, looping back around if necessary
13282 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13284 if (i >= I40E_MAX_VEB) {
13286 goto err_alloc_veb; /* out of VEB slots! */
13289 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13292 goto err_alloc_veb;
13296 veb->enabled_tc = 1;
13301 mutex_unlock(&pf->switch_mutex);
13306 * i40e_switch_branch_release - Delete a branch of the switch tree
13307 * @branch: where to start deleting
13309 * This uses recursion to find the tips of the branch to be
13310 * removed, deleting until we get back to and can delete this VEB.
13312 static void i40e_switch_branch_release(struct i40e_veb *branch)
13314 struct i40e_pf *pf = branch->pf;
13315 u16 branch_seid = branch->seid;
13316 u16 veb_idx = branch->idx;
13319 /* release any VEBs on this VEB - RECURSION */
13320 for (i = 0; i < I40E_MAX_VEB; i++) {
13323 if (pf->veb[i]->uplink_seid == branch->seid)
13324 i40e_switch_branch_release(pf->veb[i]);
13327 /* Release the VSIs on this VEB, but not the owner VSI.
13329 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13330 * the VEB itself, so don't use (*branch) after this loop.
13332 for (i = 0; i < pf->num_alloc_vsi; i++) {
13335 if (pf->vsi[i]->uplink_seid == branch_seid &&
13336 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13337 i40e_vsi_release(pf->vsi[i]);
13341 /* There's one corner case where the VEB might not have been
13342 * removed, so double check it here and remove it if needed.
13343 * This case happens if the veb was created from the debugfs
13344 * commands and no VSIs were added to it.
13346 if (pf->veb[veb_idx])
13347 i40e_veb_release(pf->veb[veb_idx]);
13351 * i40e_veb_clear - remove veb struct
13352 * @veb: the veb to remove
13354 static void i40e_veb_clear(struct i40e_veb *veb)
13360 struct i40e_pf *pf = veb->pf;
13362 mutex_lock(&pf->switch_mutex);
13363 if (pf->veb[veb->idx] == veb)
13364 pf->veb[veb->idx] = NULL;
13365 mutex_unlock(&pf->switch_mutex);
13372 * i40e_veb_release - Delete a VEB and free its resources
13373 * @veb: the VEB being removed
13375 void i40e_veb_release(struct i40e_veb *veb)
13377 struct i40e_vsi *vsi = NULL;
13378 struct i40e_pf *pf;
13383 /* find the remaining VSI and check for extras */
13384 for (i = 0; i < pf->num_alloc_vsi; i++) {
13385 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13391 dev_info(&pf->pdev->dev,
13392 "can't remove VEB %d with %d VSIs left\n",
13397 /* move the remaining VSI to uplink veb */
13398 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13399 if (veb->uplink_seid) {
13400 vsi->uplink_seid = veb->uplink_seid;
13401 if (veb->uplink_seid == pf->mac_seid)
13402 vsi->veb_idx = I40E_NO_VEB;
13404 vsi->veb_idx = veb->veb_idx;
13407 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13408 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13411 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13412 i40e_veb_clear(veb);
13416 * i40e_add_veb - create the VEB in the switch
13417 * @veb: the VEB to be instantiated
13418 * @vsi: the controlling VSI
13420 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13422 struct i40e_pf *pf = veb->pf;
13423 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13426 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13427 veb->enabled_tc, false,
13428 &veb->seid, enable_stats, NULL);
13430 /* get a VEB from the hardware */
13432 dev_info(&pf->pdev->dev,
13433 "couldn't add VEB, err %s aq_err %s\n",
13434 i40e_stat_str(&pf->hw, ret),
13435 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13439 /* get statistics counter */
13440 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13441 &veb->stats_idx, NULL, NULL, NULL);
13443 dev_info(&pf->pdev->dev,
13444 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13445 i40e_stat_str(&pf->hw, ret),
13446 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13449 ret = i40e_veb_get_bw_info(veb);
13451 dev_info(&pf->pdev->dev,
13452 "couldn't get VEB bw info, err %s aq_err %s\n",
13453 i40e_stat_str(&pf->hw, ret),
13454 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13455 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13459 vsi->uplink_seid = veb->seid;
13460 vsi->veb_idx = veb->idx;
13461 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13467 * i40e_veb_setup - Set up a VEB
13468 * @pf: board private structure
13469 * @flags: VEB setup flags
13470 * @uplink_seid: the switch element to link to
13471 * @vsi_seid: the initial VSI seid
13472 * @enabled_tc: Enabled TC bit-map
13474 * This allocates the sw VEB structure and links it into the switch
13475 * It is possible and legal for this to be a duplicate of an already
13476 * existing VEB. It is also possible for both uplink and vsi seids
13477 * to be zero, in order to create a floating VEB.
13479 * Returns pointer to the successfully allocated VEB sw struct on
13480 * success, otherwise returns NULL on failure.
13482 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
13483 u16 uplink_seid, u16 vsi_seid,
13486 struct i40e_veb *veb, *uplink_veb = NULL;
13487 int vsi_idx, veb_idx;
13490 /* if one seid is 0, the other must be 0 to create a floating relay */
13491 if ((uplink_seid == 0 || vsi_seid == 0) &&
13492 (uplink_seid + vsi_seid != 0)) {
13493 dev_info(&pf->pdev->dev,
13494 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13495 uplink_seid, vsi_seid);
13499 /* make sure there is such a vsi and uplink */
13500 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
13501 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
13503 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
13504 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
13509 if (uplink_seid && uplink_seid != pf->mac_seid) {
13510 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
13511 if (pf->veb[veb_idx] &&
13512 pf->veb[veb_idx]->seid == uplink_seid) {
13513 uplink_veb = pf->veb[veb_idx];
13518 dev_info(&pf->pdev->dev,
13519 "uplink seid %d not found\n", uplink_seid);
13524 /* get veb sw struct */
13525 veb_idx = i40e_veb_mem_alloc(pf);
13528 veb = pf->veb[veb_idx];
13529 veb->flags = flags;
13530 veb->uplink_seid = uplink_seid;
13531 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
13532 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
13534 /* create the VEB in the switch */
13535 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
13538 if (vsi_idx == pf->lan_vsi)
13539 pf->lan_veb = veb->idx;
13544 i40e_veb_clear(veb);
13550 * i40e_setup_pf_switch_element - set PF vars based on switch type
13551 * @pf: board private structure
13552 * @ele: element we are building info from
13553 * @num_reported: total number of elements
13554 * @printconfig: should we print the contents
13556 * helper function to assist in extracting a few useful SEID values.
13558 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
13559 struct i40e_aqc_switch_config_element_resp *ele,
13560 u16 num_reported, bool printconfig)
13562 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
13563 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
13564 u8 element_type = ele->element_type;
13565 u16 seid = le16_to_cpu(ele->seid);
13568 dev_info(&pf->pdev->dev,
13569 "type=%d seid=%d uplink=%d downlink=%d\n",
13570 element_type, seid, uplink_seid, downlink_seid);
13572 switch (element_type) {
13573 case I40E_SWITCH_ELEMENT_TYPE_MAC:
13574 pf->mac_seid = seid;
13576 case I40E_SWITCH_ELEMENT_TYPE_VEB:
13578 if (uplink_seid != pf->mac_seid)
13580 if (pf->lan_veb >= I40E_MAX_VEB) {
13583 /* find existing or else empty VEB */
13584 for (v = 0; v < I40E_MAX_VEB; v++) {
13585 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
13590 if (pf->lan_veb >= I40E_MAX_VEB) {
13591 v = i40e_veb_mem_alloc(pf);
13597 if (pf->lan_veb >= I40E_MAX_VEB)
13600 pf->veb[pf->lan_veb]->seid = seid;
13601 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
13602 pf->veb[pf->lan_veb]->pf = pf;
13603 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
13605 case I40E_SWITCH_ELEMENT_TYPE_VSI:
13606 if (num_reported != 1)
13608 /* This is immediately after a reset so we can assume this is
13611 pf->mac_seid = uplink_seid;
13612 pf->pf_seid = downlink_seid;
13613 pf->main_vsi_seid = seid;
13615 dev_info(&pf->pdev->dev,
13616 "pf_seid=%d main_vsi_seid=%d\n",
13617 pf->pf_seid, pf->main_vsi_seid);
13619 case I40E_SWITCH_ELEMENT_TYPE_PF:
13620 case I40E_SWITCH_ELEMENT_TYPE_VF:
13621 case I40E_SWITCH_ELEMENT_TYPE_EMP:
13622 case I40E_SWITCH_ELEMENT_TYPE_BMC:
13623 case I40E_SWITCH_ELEMENT_TYPE_PE:
13624 case I40E_SWITCH_ELEMENT_TYPE_PA:
13625 /* ignore these for now */
13628 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
13629 element_type, seid);
13635 * i40e_fetch_switch_configuration - Get switch config from firmware
13636 * @pf: board private structure
13637 * @printconfig: should we print the contents
13639 * Get the current switch configuration from the device and
13640 * extract a few useful SEID values.
13642 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
13644 struct i40e_aqc_get_switch_config_resp *sw_config;
13650 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13654 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13656 u16 num_reported, num_total;
13658 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13662 dev_info(&pf->pdev->dev,
13663 "get switch config failed err %s aq_err %s\n",
13664 i40e_stat_str(&pf->hw, ret),
13665 i40e_aq_str(&pf->hw,
13666 pf->hw.aq.asq_last_status));
13671 num_reported = le16_to_cpu(sw_config->header.num_reported);
13672 num_total = le16_to_cpu(sw_config->header.num_total);
13675 dev_info(&pf->pdev->dev,
13676 "header: %d reported %d total\n",
13677 num_reported, num_total);
13679 for (i = 0; i < num_reported; i++) {
13680 struct i40e_aqc_switch_config_element_resp *ele =
13681 &sw_config->element[i];
13683 i40e_setup_pf_switch_element(pf, ele, num_reported,
13686 } while (next_seid != 0);
13693 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13694 * @pf: board private structure
13695 * @reinit: if the Main VSI needs to re-initialized.
13697 * Returns 0 on success, negative value on failure
13699 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
13704 /* find out what's out there already */
13705 ret = i40e_fetch_switch_configuration(pf, false);
13707 dev_info(&pf->pdev->dev,
13708 "couldn't fetch switch config, err %s aq_err %s\n",
13709 i40e_stat_str(&pf->hw, ret),
13710 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13713 i40e_pf_reset_stats(pf);
13715 /* set the switch config bit for the whole device to
13716 * support limited promisc or true promisc
13717 * when user requests promisc. The default is limited
13721 if ((pf->hw.pf_id == 0) &&
13722 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
13723 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13724 pf->last_sw_conf_flags = flags;
13727 if (pf->hw.pf_id == 0) {
13730 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13731 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
13733 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13734 dev_info(&pf->pdev->dev,
13735 "couldn't set switch config bits, err %s aq_err %s\n",
13736 i40e_stat_str(&pf->hw, ret),
13737 i40e_aq_str(&pf->hw,
13738 pf->hw.aq.asq_last_status));
13739 /* not a fatal problem, just keep going */
13741 pf->last_sw_conf_valid_flags = valid_flags;
13744 /* first time setup */
13745 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
13746 struct i40e_vsi *vsi = NULL;
13749 /* Set up the PF VSI associated with the PF's main VSI
13750 * that is already in the HW switch
13752 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
13753 uplink_seid = pf->veb[pf->lan_veb]->seid;
13755 uplink_seid = pf->mac_seid;
13756 if (pf->lan_vsi == I40E_NO_VSI)
13757 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13759 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
13761 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
13762 i40e_cloud_filter_exit(pf);
13763 i40e_fdir_teardown(pf);
13767 /* force a reset of TC and queue layout configurations */
13768 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13770 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13771 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13772 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13774 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13776 i40e_fdir_sb_setup(pf);
13778 /* Setup static PF queue filter control settings */
13779 ret = i40e_setup_pf_filter_control(pf);
13781 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13783 /* Failure here should not stop continuing other steps */
13786 /* enable RSS in the HW, even for only one queue, as the stack can use
13789 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
13790 i40e_pf_config_rss(pf);
13792 /* fill in link information and enable LSE reporting */
13793 i40e_link_event(pf);
13795 /* Initialize user-specific link properties */
13796 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13797 I40E_AQ_AN_COMPLETED) ? true : false);
13801 /* repopulate tunnel port filters */
13802 i40e_sync_udp_filters(pf);
13808 * i40e_determine_queue_usage - Work out queue distribution
13809 * @pf: board private structure
13811 static void i40e_determine_queue_usage(struct i40e_pf *pf)
13816 pf->num_lan_qps = 0;
13818 /* Find the max queues to be put into basic use. We'll always be
13819 * using TC0, whether or not DCB is running, and TC0 will get the
13822 queues_left = pf->hw.func_caps.num_tx_qp;
13824 if ((queues_left == 1) ||
13825 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
13826 /* one qp for PF, no queues for anything else */
13828 pf->alloc_rss_size = pf->num_lan_qps = 1;
13830 /* make sure all the fancies are disabled */
13831 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13832 I40E_FLAG_IWARP_ENABLED |
13833 I40E_FLAG_FD_SB_ENABLED |
13834 I40E_FLAG_FD_ATR_ENABLED |
13835 I40E_FLAG_DCB_CAPABLE |
13836 I40E_FLAG_DCB_ENABLED |
13837 I40E_FLAG_SRIOV_ENABLED |
13838 I40E_FLAG_VMDQ_ENABLED);
13839 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13840 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13841 I40E_FLAG_FD_SB_ENABLED |
13842 I40E_FLAG_FD_ATR_ENABLED |
13843 I40E_FLAG_DCB_CAPABLE))) {
13844 /* one qp for PF */
13845 pf->alloc_rss_size = pf->num_lan_qps = 1;
13846 queues_left -= pf->num_lan_qps;
13848 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13849 I40E_FLAG_IWARP_ENABLED |
13850 I40E_FLAG_FD_SB_ENABLED |
13851 I40E_FLAG_FD_ATR_ENABLED |
13852 I40E_FLAG_DCB_ENABLED |
13853 I40E_FLAG_VMDQ_ENABLED);
13854 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13856 /* Not enough queues for all TCs */
13857 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
13858 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
13859 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13860 I40E_FLAG_DCB_ENABLED);
13861 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13864 /* limit lan qps to the smaller of qps, cpus or msix */
13865 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13866 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13867 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13868 pf->num_lan_qps = q_max;
13870 queues_left -= pf->num_lan_qps;
13873 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13874 if (queues_left > 1) {
13875 queues_left -= 1; /* save 1 queue for FD */
13877 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
13878 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13879 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13883 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13884 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
13885 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13886 (queues_left / pf->num_vf_qps));
13887 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13890 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13891 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13892 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13893 (queues_left / pf->num_vmdq_qps));
13894 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13897 pf->queues_left = queues_left;
13898 dev_dbg(&pf->pdev->dev,
13899 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13900 pf->hw.func_caps.num_tx_qp,
13901 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
13902 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13903 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13908 * i40e_setup_pf_filter_control - Setup PF static filter control
13909 * @pf: PF to be setup
13911 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13912 * settings. If PE/FCoE are enabled then it will also set the per PF
13913 * based filter sizes required for them. It also enables Flow director,
13914 * ethertype and macvlan type filter settings for the pf.
13916 * Returns 0 on success, negative on failure
13918 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13920 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13922 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13924 /* Flow Director is enabled */
13925 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
13926 settings->enable_fdir = true;
13928 /* Ethtype and MACVLAN filters enabled for PF */
13929 settings->enable_ethtype = true;
13930 settings->enable_macvlan = true;
13932 if (i40e_set_filter_control(&pf->hw, settings))
13938 #define INFO_STRING_LEN 255
13939 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13940 static void i40e_print_features(struct i40e_pf *pf)
13942 struct i40e_hw *hw = &pf->hw;
13946 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13950 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
13951 #ifdef CONFIG_PCI_IOV
13952 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
13954 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
13955 pf->hw.func_caps.num_vsis,
13956 pf->vsi[pf->lan_vsi]->num_queue_pairs);
13957 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13958 i += snprintf(&buf[i], REMAIN(i), " RSS");
13959 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13960 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
13961 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13962 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13963 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
13965 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13966 i += snprintf(&buf[i], REMAIN(i), " DCB");
13967 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13968 i += snprintf(&buf[i], REMAIN(i), " Geneve");
13969 if (pf->flags & I40E_FLAG_PTP)
13970 i += snprintf(&buf[i], REMAIN(i), " PTP");
13971 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13972 i += snprintf(&buf[i], REMAIN(i), " VEB");
13974 i += snprintf(&buf[i], REMAIN(i), " VEPA");
13976 dev_info(&pf->pdev->dev, "%s\n", buf);
13978 WARN_ON(i > INFO_STRING_LEN);
13982 * i40e_get_platform_mac_addr - get platform-specific MAC address
13983 * @pdev: PCI device information struct
13984 * @pf: board private structure
13986 * Look up the MAC address for the device. First we'll try
13987 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13988 * specific fallback. Otherwise, we'll default to the stored value in
13991 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13993 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13994 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
13998 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
13999 * @fec_cfg: FEC option to set in flags
14000 * @flags: ptr to flags in which we set FEC option
14002 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14004 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14005 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14006 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14007 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14008 *flags |= I40E_FLAG_RS_FEC;
14009 *flags &= ~I40E_FLAG_BASE_R_FEC;
14011 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14012 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14013 *flags |= I40E_FLAG_BASE_R_FEC;
14014 *flags &= ~I40E_FLAG_RS_FEC;
14017 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14021 * i40e_check_recovery_mode - check if we are running transition firmware
14022 * @pf: board private structure
14024 * Check registers indicating the firmware runs in recovery mode. Sets the
14025 * appropriate driver state.
14027 * Returns true if the recovery mode was detected, false otherwise
14029 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14031 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
14033 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
14034 dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14035 dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14036 set_bit(__I40E_RECOVERY_MODE, pf->state);
14040 if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
14041 dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
14047 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
14048 * @pf: board private structure
14049 * @hw: ptr to the hardware info
14051 * This function does a minimal setup of all subsystems needed for running
14054 * Returns 0 on success, negative on failure
14056 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14058 struct i40e_vsi *vsi;
14062 pci_save_state(pf->pdev);
14064 /* set up periodic task facility */
14065 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14066 pf->service_timer_period = HZ;
14068 INIT_WORK(&pf->service_task, i40e_service_task);
14069 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14071 err = i40e_init_interrupt_scheme(pf);
14073 goto err_switch_setup;
14075 /* The number of VSIs reported by the FW is the minimum guaranteed
14076 * to us; HW supports far more and we share the remaining pool with
14077 * the other PFs. We allocate space for more than the guarantee with
14078 * the understanding that we might not get them all later.
14080 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14081 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14083 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14085 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
14086 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14090 goto err_switch_setup;
14093 /* We allocate one VSI which is needed as absolute minimum
14094 * in order to register the netdev
14096 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14098 goto err_switch_setup;
14099 pf->lan_vsi = v_idx;
14100 vsi = pf->vsi[v_idx];
14102 goto err_switch_setup;
14103 vsi->alloc_queue_pairs = 1;
14104 err = i40e_config_netdev(vsi);
14106 goto err_switch_setup;
14107 err = register_netdev(vsi->netdev);
14109 goto err_switch_setup;
14110 vsi->netdev_registered = true;
14111 i40e_dbg_pf_init(pf);
14113 err = i40e_setup_misc_vector_for_recovery_mode(pf);
14115 goto err_switch_setup;
14117 /* tell the firmware that we're starting */
14118 i40e_send_version(pf);
14120 /* since everything's happy, start the service_task timer */
14121 mod_timer(&pf->service_timer,
14122 round_jiffies(jiffies + pf->service_timer_period));
14127 i40e_reset_interrupt_capability(pf);
14128 del_timer_sync(&pf->service_timer);
14129 i40e_shutdown_adminq(hw);
14130 iounmap(hw->hw_addr);
14131 pci_disable_pcie_error_reporting(pf->pdev);
14132 pci_release_mem_regions(pf->pdev);
14133 pci_disable_device(pf->pdev);
14140 * i40e_probe - Device initialization routine
14141 * @pdev: PCI device information struct
14142 * @ent: entry in i40e_pci_tbl
14144 * i40e_probe initializes a PF identified by a pci_dev structure.
14145 * The OS initialization, configuring of the PF private structure,
14146 * and a hardware reset occur.
14148 * Returns 0 on success, negative on failure
14150 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14152 struct i40e_aq_get_phy_abilities_resp abilities;
14153 struct i40e_pf *pf;
14154 struct i40e_hw *hw;
14155 static u16 pfs_found;
14163 err = pci_enable_device_mem(pdev);
14167 /* set up for high or low dma */
14168 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
14170 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
14172 dev_err(&pdev->dev,
14173 "DMA configuration failed: 0x%x\n", err);
14178 /* set up pci connections */
14179 err = pci_request_mem_regions(pdev, i40e_driver_name);
14181 dev_info(&pdev->dev,
14182 "pci_request_selected_regions failed %d\n", err);
14186 pci_enable_pcie_error_reporting(pdev);
14187 pci_set_master(pdev);
14189 /* Now that we have a PCI connection, we need to do the
14190 * low level device setup. This is primarily setting up
14191 * the Admin Queue structures and then querying for the
14192 * device's current profile information.
14194 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
14201 set_bit(__I40E_DOWN, pf->state);
14206 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
14207 I40E_MAX_CSR_SPACE);
14209 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
14210 if (!hw->hw_addr) {
14212 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
14213 (unsigned int)pci_resource_start(pdev, 0),
14214 pf->ioremap_len, err);
14217 hw->vendor_id = pdev->vendor;
14218 hw->device_id = pdev->device;
14219 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
14220 hw->subsystem_vendor_id = pdev->subsystem_vendor;
14221 hw->subsystem_device_id = pdev->subsystem_device;
14222 hw->bus.device = PCI_SLOT(pdev->devfn);
14223 hw->bus.func = PCI_FUNC(pdev->devfn);
14224 hw->bus.bus_id = pdev->bus->number;
14225 pf->instance = pfs_found;
14227 /* Select something other than the 802.1ad ethertype for the
14228 * switch to use internally and drop on ingress.
14230 hw->switch_tag = 0xffff;
14231 hw->first_tag = ETH_P_8021AD;
14232 hw->second_tag = ETH_P_8021Q;
14234 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
14235 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
14236 INIT_LIST_HEAD(&pf->ddp_old_prof);
14238 /* set up the locks for the AQ, do this only once in probe
14239 * and destroy them only once in remove
14241 mutex_init(&hw->aq.asq_mutex);
14242 mutex_init(&hw->aq.arq_mutex);
14244 pf->msg_enable = netif_msg_init(debug,
14249 pf->hw.debug_mask = debug;
14251 /* do a special CORER for clearing PXE mode once at init */
14252 if (hw->revision_id == 0 &&
14253 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14254 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14259 i40e_clear_pxe_mode(hw);
14262 /* Reset here to make sure all is clean and to define PF 'n' */
14264 if (!i40e_check_recovery_mode(pf)) {
14265 err = i40e_pf_reset(hw);
14267 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14272 hw->aq.num_arq_entries = I40E_AQ_LEN;
14273 hw->aq.num_asq_entries = I40E_AQ_LEN;
14274 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14275 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14276 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14278 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14280 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14282 err = i40e_init_shared_code(hw);
14284 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14289 /* set up a default setting for link flow control */
14290 pf->hw.fc.requested_mode = I40E_FC_NONE;
14292 err = i40e_init_adminq(hw);
14294 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14295 dev_info(&pdev->dev,
14296 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
14297 hw->aq.api_maj_ver,
14298 hw->aq.api_min_ver,
14299 I40E_FW_API_VERSION_MAJOR,
14300 I40E_FW_MINOR_VERSION(hw));
14302 dev_info(&pdev->dev,
14303 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14307 i40e_get_oem_version(hw);
14309 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
14310 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
14311 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14312 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14313 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
14314 hw->subsystem_vendor_id, hw->subsystem_device_id);
14316 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14317 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14318 dev_info(&pdev->dev,
14319 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
14320 hw->aq.api_maj_ver,
14321 hw->aq.api_min_ver,
14322 I40E_FW_API_VERSION_MAJOR,
14323 I40E_FW_MINOR_VERSION(hw));
14324 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14325 dev_info(&pdev->dev,
14326 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
14327 hw->aq.api_maj_ver,
14328 hw->aq.api_min_ver,
14329 I40E_FW_API_VERSION_MAJOR,
14330 I40E_FW_MINOR_VERSION(hw));
14332 i40e_verify_eeprom(pf);
14334 /* Rev 0 hardware was never productized */
14335 if (hw->revision_id < 1)
14336 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14338 i40e_clear_pxe_mode(hw);
14340 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14342 goto err_adminq_setup;
14344 err = i40e_sw_init(pf);
14346 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14350 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14351 return i40e_init_recovery_mode(pf, hw);
14353 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14354 hw->func_caps.num_rx_qp, 0, 0);
14356 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14357 goto err_init_lan_hmc;
14360 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14362 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14364 goto err_configure_lan_hmc;
14367 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
14368 * Ignore error return codes because if it was already disabled via
14369 * hardware settings this will fail
14371 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14372 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14373 i40e_aq_stop_lldp(hw, true, false, NULL);
14376 /* allow a platform config to override the HW addr */
14377 i40e_get_platform_mac_addr(pdev, pf);
14379 if (!is_valid_ether_addr(hw->mac.addr)) {
14380 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14384 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14385 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14386 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14387 if (is_valid_ether_addr(hw->mac.port_addr))
14388 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14390 pci_set_drvdata(pdev, pf);
14391 pci_save_state(pdev);
14393 /* Enable FW to write default DCB config on link-up */
14394 i40e_aq_set_dcb_parameters(hw, true, NULL);
14396 #ifdef CONFIG_I40E_DCB
14397 err = i40e_init_pf_dcb(pf);
14399 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
14400 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
14401 /* Continue without DCB enabled */
14403 #endif /* CONFIG_I40E_DCB */
14405 /* set up periodic task facility */
14406 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14407 pf->service_timer_period = HZ;
14409 INIT_WORK(&pf->service_task, i40e_service_task);
14410 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14412 /* NVM bit on means WoL disabled for the port */
14413 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
14414 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
14415 pf->wol_en = false;
14418 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
14420 /* set up the main switch operations */
14421 i40e_determine_queue_usage(pf);
14422 err = i40e_init_interrupt_scheme(pf);
14424 goto err_switch_setup;
14426 /* The number of VSIs reported by the FW is the minimum guaranteed
14427 * to us; HW supports far more and we share the remaining pool with
14428 * the other PFs. We allocate space for more than the guarantee with
14429 * the understanding that we might not get them all later.
14431 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14432 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14434 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14436 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
14437 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14441 goto err_switch_setup;
14444 #ifdef CONFIG_PCI_IOV
14445 /* prep for VF support */
14446 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14447 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14448 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14449 if (pci_num_vf(pdev))
14450 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
14453 err = i40e_setup_pf_switch(pf, false);
14455 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
14458 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
14460 /* Make sure flow control is set according to current settings */
14461 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
14462 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
14463 dev_dbg(&pf->pdev->dev,
14464 "Set fc with err %s aq_err %s on get_phy_cap\n",
14465 i40e_stat_str(hw, err),
14466 i40e_aq_str(hw, hw->aq.asq_last_status));
14467 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
14468 dev_dbg(&pf->pdev->dev,
14469 "Set fc with err %s aq_err %s on set_phy_config\n",
14470 i40e_stat_str(hw, err),
14471 i40e_aq_str(hw, hw->aq.asq_last_status));
14472 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
14473 dev_dbg(&pf->pdev->dev,
14474 "Set fc with err %s aq_err %s on get_link_info\n",
14475 i40e_stat_str(hw, err),
14476 i40e_aq_str(hw, hw->aq.asq_last_status));
14478 /* if FDIR VSI was set up, start it now */
14479 for (i = 0; i < pf->num_alloc_vsi; i++) {
14480 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
14481 i40e_vsi_open(pf->vsi[i]);
14486 /* The driver only wants link up/down and module qualification
14487 * reports from firmware. Note the negative logic.
14489 err = i40e_aq_set_phy_int_mask(&pf->hw,
14490 ~(I40E_AQ_EVENT_LINK_UPDOWN |
14491 I40E_AQ_EVENT_MEDIA_NA |
14492 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
14494 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
14495 i40e_stat_str(&pf->hw, err),
14496 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14498 /* Reconfigure hardware for allowing smaller MSS in the case
14499 * of TSO, so that we avoid the MDD being fired and causing
14500 * a reset in the case of small MSS+TSO.
14502 val = rd32(hw, I40E_REG_MSS);
14503 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
14504 val &= ~I40E_REG_MSS_MIN_MASK;
14505 val |= I40E_64BYTE_MSS;
14506 wr32(hw, I40E_REG_MSS, val);
14509 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
14511 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
14513 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
14514 i40e_stat_str(&pf->hw, err),
14515 i40e_aq_str(&pf->hw,
14516 pf->hw.aq.asq_last_status));
14518 /* The main driver is (mostly) up and happy. We need to set this state
14519 * before setting up the misc vector or we get a race and the vector
14520 * ends up disabled forever.
14522 clear_bit(__I40E_DOWN, pf->state);
14524 /* In case of MSIX we are going to setup the misc vector right here
14525 * to handle admin queue events etc. In case of legacy and MSI
14526 * the misc functionality and queue processing is combined in
14527 * the same vector and that gets setup at open.
14529 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
14530 err = i40e_setup_misc_vector(pf);
14532 dev_info(&pdev->dev,
14533 "setup of misc vector failed: %d\n", err);
14538 #ifdef CONFIG_PCI_IOV
14539 /* prep for VF support */
14540 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14541 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14542 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14543 /* disable link interrupts for VFs */
14544 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
14545 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
14546 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
14549 if (pci_num_vf(pdev)) {
14550 dev_info(&pdev->dev,
14551 "Active VFs found, allocating resources.\n");
14552 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
14554 dev_info(&pdev->dev,
14555 "Error %d allocating resources for existing VFs\n",
14559 #endif /* CONFIG_PCI_IOV */
14561 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14562 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
14563 pf->num_iwarp_msix,
14564 I40E_IWARP_IRQ_PILE_ID);
14565 if (pf->iwarp_base_vector < 0) {
14566 dev_info(&pdev->dev,
14567 "failed to get tracking for %d vectors for IWARP err=%d\n",
14568 pf->num_iwarp_msix, pf->iwarp_base_vector);
14569 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
14573 i40e_dbg_pf_init(pf);
14575 /* tell the firmware that we're starting */
14576 i40e_send_version(pf);
14578 /* since everything's happy, start the service_task timer */
14579 mod_timer(&pf->service_timer,
14580 round_jiffies(jiffies + pf->service_timer_period));
14582 /* add this PF to client device list and launch a client service task */
14583 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14584 err = i40e_lan_add_device(pf);
14586 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
14590 #define PCI_SPEED_SIZE 8
14591 #define PCI_WIDTH_SIZE 8
14592 /* Devices on the IOSF bus do not have this information
14593 * and will report PCI Gen 1 x 1 by default so don't bother
14596 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
14597 char speed[PCI_SPEED_SIZE] = "Unknown";
14598 char width[PCI_WIDTH_SIZE] = "Unknown";
14600 /* Get the negotiated link width and speed from PCI config
14603 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
14606 i40e_set_pci_config_data(hw, link_status);
14608 switch (hw->bus.speed) {
14609 case i40e_bus_speed_8000:
14610 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
14611 case i40e_bus_speed_5000:
14612 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
14613 case i40e_bus_speed_2500:
14614 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
14618 switch (hw->bus.width) {
14619 case i40e_bus_width_pcie_x8:
14620 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
14621 case i40e_bus_width_pcie_x4:
14622 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
14623 case i40e_bus_width_pcie_x2:
14624 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
14625 case i40e_bus_width_pcie_x1:
14626 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
14631 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
14634 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
14635 hw->bus.speed < i40e_bus_speed_8000) {
14636 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
14637 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
14641 /* get the requested speeds from the fw */
14642 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
14644 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
14645 i40e_stat_str(&pf->hw, err),
14646 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14647 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
14649 /* set the FEC config due to the board capabilities */
14650 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
14652 /* get the supported phy types from the fw */
14653 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
14655 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
14656 i40e_stat_str(&pf->hw, err),
14657 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14659 /* Add a filter to drop all Flow control frames from any VSI from being
14660 * transmitted. By doing so we stop a malicious VF from sending out
14661 * PAUSE or PFC frames and potentially controlling traffic for other
14663 * The FW can still send Flow control frames if enabled.
14665 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
14666 pf->main_vsi_seid);
14668 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
14669 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
14670 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
14671 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
14672 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
14673 /* print a string summarizing features */
14674 i40e_print_features(pf);
14678 /* Unwind what we've done if something failed in the setup */
14680 set_bit(__I40E_DOWN, pf->state);
14681 i40e_clear_interrupt_scheme(pf);
14684 i40e_reset_interrupt_capability(pf);
14685 del_timer_sync(&pf->service_timer);
14687 err_configure_lan_hmc:
14688 (void)i40e_shutdown_lan_hmc(hw);
14690 kfree(pf->qp_pile);
14694 iounmap(hw->hw_addr);
14698 pci_disable_pcie_error_reporting(pdev);
14699 pci_release_mem_regions(pdev);
14702 pci_disable_device(pdev);
14707 * i40e_remove - Device removal routine
14708 * @pdev: PCI device information struct
14710 * i40e_remove is called by the PCI subsystem to alert the driver
14711 * that is should release a PCI device. This could be caused by a
14712 * Hot-Plug event, or because the driver is going to be removed from
14715 static void i40e_remove(struct pci_dev *pdev)
14717 struct i40e_pf *pf = pci_get_drvdata(pdev);
14718 struct i40e_hw *hw = &pf->hw;
14719 i40e_status ret_code;
14722 i40e_dbg_pf_exit(pf);
14726 /* Disable RSS in hw */
14727 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
14728 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
14730 /* no more scheduling of any task */
14731 set_bit(__I40E_SUSPENDED, pf->state);
14732 set_bit(__I40E_DOWN, pf->state);
14733 if (pf->service_timer.function)
14734 del_timer_sync(&pf->service_timer);
14735 if (pf->service_task.func)
14736 cancel_work_sync(&pf->service_task);
14738 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
14739 struct i40e_vsi *vsi = pf->vsi[0];
14741 /* We know that we have allocated only one vsi for this PF,
14742 * it was just for registering netdevice, so the interface
14743 * could be visible in the 'ifconfig' output
14745 unregister_netdev(vsi->netdev);
14746 free_netdev(vsi->netdev);
14751 /* Client close must be called explicitly here because the timer
14752 * has been stopped.
14754 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14756 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
14758 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
14761 i40e_fdir_teardown(pf);
14763 /* If there is a switch structure or any orphans, remove them.
14764 * This will leave only the PF's VSI remaining.
14766 for (i = 0; i < I40E_MAX_VEB; i++) {
14770 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
14771 pf->veb[i]->uplink_seid == 0)
14772 i40e_switch_branch_release(pf->veb[i]);
14775 /* Now we can shutdown the PF's VSI, just before we kill
14778 if (pf->vsi[pf->lan_vsi])
14779 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
14781 i40e_cloud_filter_exit(pf);
14783 /* remove attached clients */
14784 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14785 ret_code = i40e_lan_del_device(pf);
14787 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
14791 /* shutdown and destroy the HMC */
14792 if (hw->hmc.hmc_obj) {
14793 ret_code = i40e_shutdown_lan_hmc(hw);
14795 dev_warn(&pdev->dev,
14796 "Failed to destroy the HMC resources: %d\n",
14801 /* Free MSI/legacy interrupt 0 when in recovery mode. */
14802 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
14803 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
14804 free_irq(pf->pdev->irq, pf);
14806 /* shutdown the adminq */
14807 i40e_shutdown_adminq(hw);
14809 /* destroy the locks only once, here */
14810 mutex_destroy(&hw->aq.arq_mutex);
14811 mutex_destroy(&hw->aq.asq_mutex);
14813 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
14815 i40e_clear_interrupt_scheme(pf);
14816 for (i = 0; i < pf->num_alloc_vsi; i++) {
14818 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
14819 i40e_vsi_clear_rings(pf->vsi[i]);
14820 i40e_vsi_clear(pf->vsi[i]);
14826 for (i = 0; i < I40E_MAX_VEB; i++) {
14831 kfree(pf->qp_pile);
14834 iounmap(hw->hw_addr);
14836 pci_release_mem_regions(pdev);
14838 pci_disable_pcie_error_reporting(pdev);
14839 pci_disable_device(pdev);
14843 * i40e_pci_error_detected - warning that something funky happened in PCI land
14844 * @pdev: PCI device information struct
14845 * @error: the type of PCI error
14847 * Called to warn that something happened and the error handling steps
14848 * are in progress. Allows the driver to quiesce things, be ready for
14851 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14852 enum pci_channel_state error)
14854 struct i40e_pf *pf = pci_get_drvdata(pdev);
14856 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14859 dev_info(&pdev->dev,
14860 "Cannot recover - error happened during device probe\n");
14861 return PCI_ERS_RESULT_DISCONNECT;
14864 /* shutdown all operations */
14865 if (!test_bit(__I40E_SUSPENDED, pf->state))
14866 i40e_prep_for_reset(pf, false);
14868 /* Request a slot reset */
14869 return PCI_ERS_RESULT_NEED_RESET;
14873 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14874 * @pdev: PCI device information struct
14876 * Called to find if the driver can work with the device now that
14877 * the pci slot has been reset. If a basic connection seems good
14878 * (registers are readable and have sane content) then return a
14879 * happy little PCI_ERS_RESULT_xxx.
14881 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14883 struct i40e_pf *pf = pci_get_drvdata(pdev);
14884 pci_ers_result_t result;
14887 dev_dbg(&pdev->dev, "%s\n", __func__);
14888 if (pci_enable_device_mem(pdev)) {
14889 dev_info(&pdev->dev,
14890 "Cannot re-enable PCI device after reset.\n");
14891 result = PCI_ERS_RESULT_DISCONNECT;
14893 pci_set_master(pdev);
14894 pci_restore_state(pdev);
14895 pci_save_state(pdev);
14896 pci_wake_from_d3(pdev, false);
14898 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14900 result = PCI_ERS_RESULT_RECOVERED;
14902 result = PCI_ERS_RESULT_DISCONNECT;
14909 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14910 * @pdev: PCI device information struct
14912 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14914 struct i40e_pf *pf = pci_get_drvdata(pdev);
14916 i40e_prep_for_reset(pf, false);
14920 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14921 * @pdev: PCI device information struct
14923 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14925 struct i40e_pf *pf = pci_get_drvdata(pdev);
14927 i40e_reset_and_rebuild(pf, false, false);
14931 * i40e_pci_error_resume - restart operations after PCI error recovery
14932 * @pdev: PCI device information struct
14934 * Called to allow the driver to bring things back up after PCI error
14935 * and/or reset recovery has finished.
14937 static void i40e_pci_error_resume(struct pci_dev *pdev)
14939 struct i40e_pf *pf = pci_get_drvdata(pdev);
14941 dev_dbg(&pdev->dev, "%s\n", __func__);
14942 if (test_bit(__I40E_SUSPENDED, pf->state))
14945 i40e_handle_reset_warning(pf, false);
14949 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14950 * using the mac_address_write admin q function
14951 * @pf: pointer to i40e_pf struct
14953 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14955 struct i40e_hw *hw = &pf->hw;
14960 /* Get current MAC address in case it's an LAA */
14961 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14962 ether_addr_copy(mac_addr,
14963 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14965 dev_err(&pf->pdev->dev,
14966 "Failed to retrieve MAC address; using default\n");
14967 ether_addr_copy(mac_addr, hw->mac.addr);
14970 /* The FW expects the mac address write cmd to first be called with
14971 * one of these flags before calling it again with the multicast
14974 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14976 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14977 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14979 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14981 dev_err(&pf->pdev->dev,
14982 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14986 flags = I40E_AQC_MC_MAG_EN
14987 | I40E_AQC_WOL_PRESERVE_ON_PFR
14988 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14989 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14991 dev_err(&pf->pdev->dev,
14992 "Failed to enable Multicast Magic Packet wake up\n");
14996 * i40e_shutdown - PCI callback for shutting down
14997 * @pdev: PCI device information struct
14999 static void i40e_shutdown(struct pci_dev *pdev)
15001 struct i40e_pf *pf = pci_get_drvdata(pdev);
15002 struct i40e_hw *hw = &pf->hw;
15004 set_bit(__I40E_SUSPENDED, pf->state);
15005 set_bit(__I40E_DOWN, pf->state);
15007 del_timer_sync(&pf->service_timer);
15008 cancel_work_sync(&pf->service_task);
15009 i40e_cloud_filter_exit(pf);
15010 i40e_fdir_teardown(pf);
15012 /* Client close must be called explicitly here because the timer
15013 * has been stopped.
15015 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15017 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15018 i40e_enable_mc_magic_wake(pf);
15020 i40e_prep_for_reset(pf, false);
15022 wr32(hw, I40E_PFPM_APM,
15023 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15024 wr32(hw, I40E_PFPM_WUFC,
15025 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15027 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15028 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15029 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15030 free_irq(pf->pdev->irq, pf);
15032 /* Since we're going to destroy queues during the
15033 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15037 i40e_clear_interrupt_scheme(pf);
15040 if (system_state == SYSTEM_POWER_OFF) {
15041 pci_wake_from_d3(pdev, pf->wol_en);
15042 pci_set_power_state(pdev, PCI_D3hot);
15047 * i40e_suspend - PM callback for moving to D3
15048 * @dev: generic device information structure
15050 static int __maybe_unused i40e_suspend(struct device *dev)
15052 struct pci_dev *pdev = to_pci_dev(dev);
15053 struct i40e_pf *pf = pci_get_drvdata(pdev);
15054 struct i40e_hw *hw = &pf->hw;
15056 /* If we're already suspended, then there is nothing to do */
15057 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
15060 set_bit(__I40E_DOWN, pf->state);
15062 /* Ensure service task will not be running */
15063 del_timer_sync(&pf->service_timer);
15064 cancel_work_sync(&pf->service_task);
15066 /* Client close must be called explicitly here because the timer
15067 * has been stopped.
15069 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15071 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15072 i40e_enable_mc_magic_wake(pf);
15074 /* Since we're going to destroy queues during the
15075 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15080 i40e_prep_for_reset(pf, true);
15082 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15083 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15085 /* Clear the interrupt scheme and release our IRQs so that the system
15086 * can safely hibernate even when there are a large number of CPUs.
15087 * Otherwise hibernation might fail when mapping all the vectors back
15090 i40e_clear_interrupt_scheme(pf);
15098 * i40e_resume - PM callback for waking up from D3
15099 * @dev: generic device information structure
15101 static int __maybe_unused i40e_resume(struct device *dev)
15103 struct pci_dev *pdev = to_pci_dev(dev);
15104 struct i40e_pf *pf = pci_get_drvdata(pdev);
15107 /* If we're not suspended, then there is nothing to do */
15108 if (!test_bit(__I40E_SUSPENDED, pf->state))
15111 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
15112 * since we're going to be restoring queues
15116 /* We cleared the interrupt scheme when we suspended, so we need to
15117 * restore it now to resume device functionality.
15119 err = i40e_restore_interrupt_scheme(pf);
15121 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
15125 clear_bit(__I40E_DOWN, pf->state);
15126 i40e_reset_and_rebuild(pf, false, true);
15130 /* Clear suspended state last after everything is recovered */
15131 clear_bit(__I40E_SUSPENDED, pf->state);
15133 /* Restart the service task */
15134 mod_timer(&pf->service_timer,
15135 round_jiffies(jiffies + pf->service_timer_period));
15140 static const struct pci_error_handlers i40e_err_handler = {
15141 .error_detected = i40e_pci_error_detected,
15142 .slot_reset = i40e_pci_error_slot_reset,
15143 .reset_prepare = i40e_pci_error_reset_prepare,
15144 .reset_done = i40e_pci_error_reset_done,
15145 .resume = i40e_pci_error_resume,
15148 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
15150 static struct pci_driver i40e_driver = {
15151 .name = i40e_driver_name,
15152 .id_table = i40e_pci_tbl,
15153 .probe = i40e_probe,
15154 .remove = i40e_remove,
15156 .pm = &i40e_pm_ops,
15158 .shutdown = i40e_shutdown,
15159 .err_handler = &i40e_err_handler,
15160 .sriov_configure = i40e_pci_sriov_configure,
15164 * i40e_init_module - Driver registration routine
15166 * i40e_init_module is the first routine called when the driver is
15167 * loaded. All it does is register with the PCI subsystem.
15169 static int __init i40e_init_module(void)
15171 pr_info("%s: %s - version %s\n", i40e_driver_name,
15172 i40e_driver_string, i40e_driver_version_str);
15173 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
15175 /* There is no need to throttle the number of active tasks because
15176 * each device limits its own task using a state bit for scheduling
15177 * the service task, and the device tasks do not interfere with each
15178 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
15179 * since we need to be able to guarantee forward progress even under
15182 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
15184 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
15189 return pci_register_driver(&i40e_driver);
15191 module_init(i40e_init_module);
15194 * i40e_exit_module - Driver exit cleanup routine
15196 * i40e_exit_module is called just before the driver is removed
15199 static void __exit i40e_exit_module(void)
15201 pci_unregister_driver(&i40e_driver);
15202 destroy_workqueue(i40e_wq);
15205 module_exit(i40e_exit_module);