1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #define DRV_VERSION "0.7.1-k"
11 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
12 const char ice_drv_ver[] = DRV_VERSION;
13 static const char ice_driver_string[] = DRV_SUMMARY;
14 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
16 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
17 MODULE_DESCRIPTION(DRV_SUMMARY);
18 MODULE_LICENSE("GPL v2");
19 MODULE_VERSION(DRV_VERSION);
21 static int debug = -1;
22 module_param(debug, int, 0644);
23 #ifndef CONFIG_DYNAMIC_DEBUG
24 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
26 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
27 #endif /* !CONFIG_DYNAMIC_DEBUG */
29 static struct workqueue_struct *ice_wq;
30 static const struct net_device_ops ice_netdev_ops;
32 static void ice_pf_dis_all_vsi(struct ice_pf *pf);
33 static void ice_rebuild(struct ice_pf *pf);
34 static int ice_vsi_release(struct ice_vsi *vsi);
35 static void ice_vsi_release_all(struct ice_pf *pf);
36 static void ice_update_vsi_stats(struct ice_vsi *vsi);
37 static void ice_update_pf_stats(struct ice_pf *pf);
40 * ice_get_tx_pending - returns number of Tx descriptors not processed
41 * @ring: the ring of descriptors
43 static u32 ice_get_tx_pending(struct ice_ring *ring)
47 head = ring->next_to_clean;
48 tail = readl(ring->tail);
51 return (head < tail) ?
52 tail - head : (tail + ring->count - head);
57 * ice_check_for_hang_subtask - check for and recover hung queues
58 * @pf: pointer to PF struct
60 static void ice_check_for_hang_subtask(struct ice_pf *pf)
62 struct ice_vsi *vsi = NULL;
67 ice_for_each_vsi(pf, v)
68 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
73 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
76 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
79 for (i = 0; i < vsi->num_txq; i++) {
80 struct ice_ring *tx_ring = vsi->tx_rings[i];
82 if (tx_ring && tx_ring->desc) {
83 int itr = ICE_ITR_NONE;
85 /* If packet counter has not changed the queue is
86 * likely stalled, so force an interrupt for this
89 * prev_pkt would be negative if there was no
92 packets = tx_ring->stats.pkts & INT_MAX;
93 if (tx_ring->tx_stats.prev_pkt == packets) {
94 /* Trigger sw interrupt to revive the queue */
95 v_idx = tx_ring->q_vector->v_idx;
97 GLINT_DYN_CTL(vsi->base_vector + v_idx),
98 (itr << GLINT_DYN_CTL_ITR_INDX_S) |
99 GLINT_DYN_CTL_SWINT_TRIG_M |
100 GLINT_DYN_CTL_INTENA_MSK_M);
104 /* Memory barrier between read of packet count and call
105 * to ice_get_tx_pending()
108 tx_ring->tx_stats.prev_pkt =
109 ice_get_tx_pending(tx_ring) ? packets : -1;
115 * ice_get_free_slot - get the next non-NULL location index in array
116 * @array: array to search
117 * @size: size of the array
118 * @curr: last known occupied index to be used as a search hint
120 * void * is being used to keep the functionality generic. This lets us use this
121 * function on any array of pointers.
123 static int ice_get_free_slot(void *array, int size, int curr)
125 int **tmp_array = (int **)array;
128 if (curr < (size - 1) && !tmp_array[curr + 1]) {
133 while ((i < size) && (tmp_array[i]))
144 * ice_search_res - Search the tracker for a block of resources
145 * @res: pointer to the resource
146 * @needed: size of the block needed
147 * @id: identifier to track owner
148 * Returns the base item index of the block, or -ENOMEM for error
150 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
152 int start = res->search_hint;
155 id |= ICE_RES_VALID_BIT;
158 /* skip already allocated entries */
159 if (res->list[end++] & ICE_RES_VALID_BIT) {
161 if ((start + needed) > res->num_entries)
165 if (end == (start + needed)) {
168 /* there was enough, so assign it to the requestor */
172 if (end == res->num_entries)
175 res->search_hint = end;
184 * ice_get_res - get a block of resources
185 * @pf: board private structure
186 * @res: pointer to the resource
187 * @needed: size of the block needed
188 * @id: identifier to track owner
190 * Returns the base item index of the block, or -ENOMEM for error
191 * The search_hint trick and lack of advanced fit-finding only works
192 * because we're highly likely to have all the same sized requests.
193 * Linear search time and any fragmentation should be minimal.
196 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
203 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
204 dev_err(&pf->pdev->dev,
205 "param err: needed=%d, num_entries = %d id=0x%04x\n",
206 needed, res->num_entries, id);
210 /* search based on search_hint */
211 ret = ice_search_res(res, needed, id);
214 /* previous search failed. Reset search hint and try again */
215 res->search_hint = 0;
216 ret = ice_search_res(res, needed, id);
223 * ice_free_res - free a block of resources
224 * @res: pointer to the resource
225 * @index: starting index previously returned by ice_get_res
226 * @id: identifier to track owner
227 * Returns number of resources freed
229 static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
234 if (!res || index >= res->num_entries)
237 id |= ICE_RES_VALID_BIT;
238 for (i = index; i < res->num_entries && res->list[i] == id; i++) {
247 * ice_add_mac_to_list - Add a mac address filter entry to the list
248 * @vsi: the VSI to be forwarded to
249 * @add_list: pointer to the list which contains MAC filter entries
250 * @macaddr: the MAC address to be added.
252 * Adds mac address filter entry to the temp list
254 * Returns 0 on success or ENOMEM on failure.
256 static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
259 struct ice_fltr_list_entry *tmp;
260 struct ice_pf *pf = vsi->back;
262 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
266 tmp->fltr_info.flag = ICE_FLTR_TX;
267 tmp->fltr_info.src = vsi->vsi_num;
268 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
269 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
270 tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
271 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
273 INIT_LIST_HEAD(&tmp->list_entry);
274 list_add(&tmp->list_entry, add_list);
280 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
281 * @netdev: the net device on which the sync is happening
282 * @addr: mac address to sync
284 * This is a callback function which is called by the in kernel device sync
285 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
286 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
287 * mac filters from the hardware.
289 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
291 struct ice_netdev_priv *np = netdev_priv(netdev);
292 struct ice_vsi *vsi = np->vsi;
294 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
301 * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
302 * @netdev: the net device on which the unsync is happening
303 * @addr: mac address to unsync
305 * This is a callback function which is called by the in kernel device unsync
306 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
307 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
308 * delete the mac filters from the hardware.
310 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
312 struct ice_netdev_priv *np = netdev_priv(netdev);
313 struct ice_vsi *vsi = np->vsi;
315 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
322 * ice_free_fltr_list - free filter lists helper
323 * @dev: pointer to the device struct
324 * @h: pointer to the list head to be freed
326 * Helper function to free filter lists previously created using
327 * ice_add_mac_to_list
329 static void ice_free_fltr_list(struct device *dev, struct list_head *h)
331 struct ice_fltr_list_entry *e, *tmp;
333 list_for_each_entry_safe(e, tmp, h, list_entry) {
334 list_del(&e->list_entry);
340 * ice_vsi_fltr_changed - check if filter state changed
341 * @vsi: VSI to be checked
343 * returns true if filter state has changed, false otherwise.
345 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
347 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
348 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
349 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
353 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
354 * @vsi: VSI to enable or disable VLAN pruning on
355 * @ena: set to true to enable VLAN pruning and false to disable it
357 * returns 0 if VSI is updated, negative otherwise
359 static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
361 struct ice_vsi_ctx *ctxt;
368 dev = &vsi->back->pdev->dev;
369 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
373 ctxt->info = vsi->info;
376 ctxt->info.sec_flags |=
377 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
378 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
379 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
381 ctxt->info.sec_flags &=
382 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
383 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
384 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
387 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
388 ICE_AQ_VSI_PROP_SW_VALID);
389 ctxt->vsi_num = vsi->vsi_num;
390 status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL);
392 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n",
393 ena ? "Ena" : "Dis", vsi->vsi_num, status,
394 vsi->back->hw.adminq.sq_last_status);
398 vsi->info.sec_flags = ctxt->info.sec_flags;
399 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
401 devm_kfree(dev, ctxt);
405 devm_kfree(dev, ctxt);
410 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
411 * @vsi: ptr to the VSI
413 * Push any outstanding VSI filter changes through the AdminQ.
415 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
417 struct device *dev = &vsi->back->pdev->dev;
418 struct net_device *netdev = vsi->netdev;
419 bool promisc_forced_on = false;
420 struct ice_pf *pf = vsi->back;
421 struct ice_hw *hw = &pf->hw;
422 enum ice_status status = 0;
423 u32 changed_flags = 0;
429 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
430 usleep_range(1000, 2000);
432 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
433 vsi->current_netdev_flags = vsi->netdev->flags;
435 INIT_LIST_HEAD(&vsi->tmp_sync_list);
436 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
438 if (ice_vsi_fltr_changed(vsi)) {
439 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
440 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
441 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
443 /* grab the netdev's addr_list_lock */
444 netif_addr_lock_bh(netdev);
445 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
446 ice_add_mac_to_unsync_list);
447 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
448 ice_add_mac_to_unsync_list);
449 /* our temp lists are populated. release lock */
450 netif_addr_unlock_bh(netdev);
453 /* Remove mac addresses in the unsync list */
454 status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
455 ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
457 netdev_err(netdev, "Failed to delete MAC filters\n");
458 /* if we failed because of alloc failures, just bail */
459 if (status == ICE_ERR_NO_MEMORY) {
465 /* Add mac addresses in the sync list */
466 status = ice_add_mac(hw, &vsi->tmp_sync_list);
467 ice_free_fltr_list(dev, &vsi->tmp_sync_list);
469 netdev_err(netdev, "Failed to add MAC filters\n");
470 /* If there is no more space for new umac filters, vsi
471 * should go into promiscuous mode. There should be some
472 * space reserved for promiscuous filters.
474 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
475 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
477 promisc_forced_on = true;
479 "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
486 /* check for changes in promiscuous modes */
487 if (changed_flags & IFF_ALLMULTI)
488 netdev_warn(netdev, "Unsupported configuration\n");
490 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
491 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
492 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
493 if (vsi->current_netdev_flags & IFF_PROMISC) {
494 /* Apply TX filter rule to get traffic from VMs */
495 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
498 netdev_err(netdev, "Error setting default VSI %i tx rule\n",
500 vsi->current_netdev_flags &= ~IFF_PROMISC;
504 /* Apply RX filter rule to get traffic from wire */
505 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
508 netdev_err(netdev, "Error setting default VSI %i rx rule\n",
510 vsi->current_netdev_flags &= ~IFF_PROMISC;
515 /* Clear TX filter rule to stop traffic from VMs */
516 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
519 netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
521 vsi->current_netdev_flags |= IFF_PROMISC;
525 /* Clear filter RX to remove traffic from wire */
526 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
529 netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
531 vsi->current_netdev_flags |= IFF_PROMISC;
540 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
543 /* if something went wrong then set the changed flag so we try again */
544 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
545 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
547 clear_bit(__ICE_CFG_BUSY, vsi->state);
552 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
553 * @pf: board private structure
555 static void ice_sync_fltr_subtask(struct ice_pf *pf)
559 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
562 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
564 for (v = 0; v < pf->num_alloc_vsi; v++)
565 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
566 ice_vsi_sync_fltr(pf->vsi[v])) {
567 /* come back and try again later */
568 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
574 * ice_is_reset_recovery_pending - schedule a reset
575 * @state: pf state field
577 static bool ice_is_reset_recovery_pending(unsigned long int *state)
579 return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
583 * ice_prepare_for_reset - prep for the core to reset
584 * @pf: board private structure
586 * Inform or close all dependent features in prep for reset.
589 ice_prepare_for_reset(struct ice_pf *pf)
591 struct ice_hw *hw = &pf->hw;
593 /* disable the VSIs and their queues that are not already DOWN */
594 ice_pf_dis_all_vsi(pf);
596 ice_shutdown_all_ctrlq(hw);
598 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
602 * ice_do_reset - Initiate one of many types of resets
603 * @pf: board private structure
604 * @reset_type: reset type requested
605 * before this function was called.
607 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
609 struct device *dev = &pf->pdev->dev;
610 struct ice_hw *hw = &pf->hw;
612 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
613 WARN_ON(in_interrupt());
615 /* PFR is a bit of a special case because it doesn't result in an OICR
616 * interrupt. Set pending bit here which otherwise gets set in the
619 if (reset_type == ICE_RESET_PFR)
620 set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
622 ice_prepare_for_reset(pf);
624 /* trigger the reset */
625 if (ice_reset(hw, reset_type)) {
626 dev_err(dev, "reset %d failed\n", reset_type);
627 set_bit(__ICE_RESET_FAILED, pf->state);
628 clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
629 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
633 /* PFR is a bit of a special case because it doesn't result in an OICR
634 * interrupt. So for PFR, rebuild after the reset and clear the reset-
635 * associated state bits.
637 if (reset_type == ICE_RESET_PFR) {
640 clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
641 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
646 * ice_reset_subtask - Set up for resetting the device and driver
647 * @pf: board private structure
649 static void ice_reset_subtask(struct ice_pf *pf)
651 enum ice_reset_req reset_type = ICE_RESET_INVAL;
653 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
654 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
655 * of reset is pending and sets bits in pf->state indicating the reset
656 * type and __ICE_RESET_RECOVERY_PENDING. So, if the latter bit is set
657 * prepare for pending reset if not already (for PF software-initiated
658 * global resets the software should already be prepared for it as
659 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
660 * by firmware or software on other PFs, that bit is not set so prepare
661 * for the reset now), poll for reset done, rebuild and return.
663 if (ice_is_reset_recovery_pending(pf->state)) {
664 clear_bit(__ICE_GLOBR_RECV, pf->state);
665 clear_bit(__ICE_CORER_RECV, pf->state);
666 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
667 ice_prepare_for_reset(pf);
669 /* make sure we are ready to rebuild */
670 if (ice_check_reset(&pf->hw)) {
671 set_bit(__ICE_RESET_FAILED, pf->state);
673 /* done with reset. start rebuild */
674 pf->hw.reset_ongoing = false;
676 /* clear bit to resume normal operations, but
677 * ICE_NEEDS_RESTART bit is set incase rebuild failed
679 clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
680 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
686 /* No pending resets to finish processing. Check for new resets */
687 if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
688 reset_type = ICE_RESET_PFR;
689 if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
690 reset_type = ICE_RESET_CORER;
691 if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
692 reset_type = ICE_RESET_GLOBR;
693 /* If no valid reset type requested just return */
694 if (reset_type == ICE_RESET_INVAL)
697 /* reset if not already down or busy */
698 if (!test_bit(__ICE_DOWN, pf->state) &&
699 !test_bit(__ICE_CFG_BUSY, pf->state)) {
700 ice_do_reset(pf, reset_type);
705 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
706 * @pf: board private structure
708 static void ice_watchdog_subtask(struct ice_pf *pf)
712 /* if interface is down do nothing */
713 if (test_bit(__ICE_DOWN, pf->state) ||
714 test_bit(__ICE_CFG_BUSY, pf->state))
717 /* make sure we don't do these things too often */
718 if (time_before(jiffies,
719 pf->serv_tmr_prev + pf->serv_tmr_period))
722 pf->serv_tmr_prev = jiffies;
724 /* Update the stats for active netdevs so the network stack
725 * can look at updated numbers whenever it cares to
727 ice_update_pf_stats(pf);
728 for (i = 0; i < pf->num_alloc_vsi; i++)
729 if (pf->vsi[i] && pf->vsi[i]->netdev)
730 ice_update_vsi_stats(pf->vsi[i]);
734 * ice_print_link_msg - print link up or down message
735 * @vsi: the VSI whose link status is being queried
736 * @isup: boolean for if the link is now up or down
738 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
743 if (vsi->current_isup == isup)
746 vsi->current_isup = isup;
749 netdev_info(vsi->netdev, "NIC Link is Down\n");
753 switch (vsi->port_info->phy.link_info.link_speed) {
754 case ICE_AQ_LINK_SPEED_40GB:
757 case ICE_AQ_LINK_SPEED_25GB:
760 case ICE_AQ_LINK_SPEED_20GB:
763 case ICE_AQ_LINK_SPEED_10GB:
766 case ICE_AQ_LINK_SPEED_5GB:
769 case ICE_AQ_LINK_SPEED_2500MB:
772 case ICE_AQ_LINK_SPEED_1000MB:
775 case ICE_AQ_LINK_SPEED_100MB:
783 switch (vsi->port_info->fc.current_mode) {
787 case ICE_FC_TX_PAUSE:
790 case ICE_FC_RX_PAUSE:
798 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
803 * ice_init_link_events - enable/initialize link events
804 * @pi: pointer to the port_info instance
806 * Returns -EIO on failure, 0 on success
808 static int ice_init_link_events(struct ice_port_info *pi)
812 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
813 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
815 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
816 dev_dbg(ice_hw_to_dev(pi->hw),
817 "Failed to set link event mask for port %d\n",
822 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
823 dev_dbg(ice_hw_to_dev(pi->hw),
824 "Failed to enable link events for port %d\n",
833 * ice_vsi_link_event - update the vsi's netdev
834 * @vsi: the vsi on which the link event occurred
835 * @link_up: whether or not the vsi needs to be set up or down
837 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
839 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
842 if (vsi->type == ICE_VSI_PF) {
844 dev_dbg(&vsi->back->pdev->dev,
845 "vsi->netdev is not initialized!\n");
849 netif_carrier_on(vsi->netdev);
850 netif_tx_wake_all_queues(vsi->netdev);
852 netif_carrier_off(vsi->netdev);
853 netif_tx_stop_all_queues(vsi->netdev);
859 * ice_link_event - process the link event
860 * @pf: pf that the link event is associated with
861 * @pi: port_info for the port that the link event is associated with
863 * Returns -EIO if ice_get_link_status() fails
864 * Returns 0 on success
867 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
869 u8 new_link_speed, old_link_speed;
870 struct ice_phy_info *phy_info;
871 bool new_link_same_as_old;
872 bool new_link, old_link;
877 phy_info->link_info_old = phy_info->link_info;
878 /* Force ice_get_link_status() to update link info */
879 phy_info->get_link_info = true;
881 old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
882 old_link_speed = phy_info->link_info_old.link_speed;
885 if (ice_get_link_status(pi, &new_link)) {
886 dev_dbg(&pf->pdev->dev,
887 "Could not get link status for port %d\n", lport);
891 new_link_speed = phy_info->link_info.link_speed;
893 new_link_same_as_old = (new_link == old_link &&
894 new_link_speed == old_link_speed);
896 ice_for_each_vsi(pf, v) {
897 struct ice_vsi *vsi = pf->vsi[v];
899 if (!vsi || !vsi->port_info)
902 if (new_link_same_as_old &&
903 (test_bit(__ICE_DOWN, vsi->state) ||
904 new_link == netif_carrier_ok(vsi->netdev)))
907 if (vsi->port_info->lport == lport) {
908 ice_print_link_msg(vsi, new_link);
909 ice_vsi_link_event(vsi, new_link);
917 * ice_handle_link_event - handle link event via ARQ
918 * @pf: pf that the link event is associated with
920 * Return -EINVAL if port_info is null
921 * Return status on succes
923 static int ice_handle_link_event(struct ice_pf *pf)
925 struct ice_port_info *port_info;
928 port_info = pf->hw.port_info;
932 status = ice_link_event(pf, port_info);
934 dev_dbg(&pf->pdev->dev,
935 "Could not process link event, error %d\n", status);
941 * __ice_clean_ctrlq - helper function to clean controlq rings
942 * @pf: ptr to struct ice_pf
943 * @q_type: specific Control queue type
945 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
947 struct ice_rq_event_info event;
948 struct ice_hw *hw = &pf->hw;
949 struct ice_ctl_q_info *cq;
954 /* Do not clean control queue if/when PF reset fails */
955 if (test_bit(__ICE_RESET_FAILED, pf->state))
959 case ICE_CTL_Q_ADMIN:
964 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
969 /* check for error indications - PF_xx_AxQLEN register layout for
970 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
972 val = rd32(hw, cq->rq.len);
973 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
974 PF_FW_ARQLEN_ARQCRIT_M)) {
976 if (val & PF_FW_ARQLEN_ARQVFE_M)
977 dev_dbg(&pf->pdev->dev,
978 "%s Receive Queue VF Error detected\n", qtype);
979 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
980 dev_dbg(&pf->pdev->dev,
981 "%s Receive Queue Overflow Error detected\n",
984 if (val & PF_FW_ARQLEN_ARQCRIT_M)
985 dev_dbg(&pf->pdev->dev,
986 "%s Receive Queue Critical Error detected\n",
988 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
989 PF_FW_ARQLEN_ARQCRIT_M);
991 wr32(hw, cq->rq.len, val);
994 val = rd32(hw, cq->sq.len);
995 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
996 PF_FW_ATQLEN_ATQCRIT_M)) {
998 if (val & PF_FW_ATQLEN_ATQVFE_M)
999 dev_dbg(&pf->pdev->dev,
1000 "%s Send Queue VF Error detected\n", qtype);
1001 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1002 dev_dbg(&pf->pdev->dev,
1003 "%s Send Queue Overflow Error detected\n",
1006 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1007 dev_dbg(&pf->pdev->dev,
1008 "%s Send Queue Critical Error detected\n",
1010 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1011 PF_FW_ATQLEN_ATQCRIT_M);
1013 wr32(hw, cq->sq.len, val);
1016 event.buf_len = cq->rq_buf_size;
1017 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
1023 enum ice_status ret;
1026 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1027 if (ret == ICE_ERR_AQ_NO_WORK)
1030 dev_err(&pf->pdev->dev,
1031 "%s Receive Queue event error %d\n", qtype,
1036 opcode = le16_to_cpu(event.desc.opcode);
1039 case ice_aqc_opc_get_link_status:
1040 if (ice_handle_link_event(pf))
1041 dev_err(&pf->pdev->dev,
1042 "Could not handle link event\n");
1044 case ice_aqc_opc_fw_logging:
1045 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1048 dev_dbg(&pf->pdev->dev,
1049 "%s Receive Queue unknown event 0x%04x ignored\n",
1053 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1055 devm_kfree(&pf->pdev->dev, event.msg_buf);
1057 return pending && (i == ICE_DFLT_IRQ_WORK);
1061 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1062 * @hw: pointer to hardware info
1063 * @cq: control queue information
1065 * returns true if there are pending messages in a queue, false if there aren't
1067 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1071 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1072 return cq->rq.next_to_clean != ntu;
1076 * ice_clean_adminq_subtask - clean the AdminQ rings
1077 * @pf: board private structure
1079 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1081 struct ice_hw *hw = &pf->hw;
1083 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1086 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1089 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1091 /* There might be a situation where new messages arrive to a control
1092 * queue between processing the last message and clearing the
1093 * EVENT_PENDING bit. So before exiting, check queue head again (using
1094 * ice_ctrlq_pending) and process new messages if any.
1096 if (ice_ctrlq_pending(hw, &hw->adminq))
1097 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1103 * ice_service_task_schedule - schedule the service task to wake up
1104 * @pf: board private structure
1106 * If not already scheduled, this puts the task into the work queue.
1108 static void ice_service_task_schedule(struct ice_pf *pf)
1110 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1111 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1112 !test_bit(__ICE_NEEDS_RESTART, pf->state))
1113 queue_work(ice_wq, &pf->serv_task);
1117 * ice_service_task_complete - finish up the service task
1118 * @pf: board private structure
1120 static void ice_service_task_complete(struct ice_pf *pf)
1122 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1124 /* force memory (pf->state) to sync before next service task */
1125 smp_mb__before_atomic();
1126 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1130 * ice_service_task_stop - stop service task and cancel works
1131 * @pf: board private structure
1133 static void ice_service_task_stop(struct ice_pf *pf)
1135 set_bit(__ICE_SERVICE_DIS, pf->state);
1137 if (pf->serv_tmr.function)
1138 del_timer_sync(&pf->serv_tmr);
1139 if (pf->serv_task.func)
1140 cancel_work_sync(&pf->serv_task);
1142 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1146 * ice_service_timer - timer callback to schedule service task
1147 * @t: pointer to timer_list
1149 static void ice_service_timer(struct timer_list *t)
1151 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1153 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1154 ice_service_task_schedule(pf);
1158 * ice_handle_mdd_event - handle malicious driver detect event
1159 * @pf: pointer to the PF structure
1161 * Called from service task. OICR interrupt handler indicates MDD event
1163 static void ice_handle_mdd_event(struct ice_pf *pf)
1165 struct ice_hw *hw = &pf->hw;
1166 bool mdd_detected = false;
1169 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
1172 /* find what triggered the MDD event */
1173 reg = rd32(hw, GL_MDET_TX_PQM);
1174 if (reg & GL_MDET_TX_PQM_VALID_M) {
1175 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1176 GL_MDET_TX_PQM_PF_NUM_S;
1177 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1178 GL_MDET_TX_PQM_VF_NUM_S;
1179 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1180 GL_MDET_TX_PQM_MAL_TYPE_S;
1181 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1182 GL_MDET_TX_PQM_QNUM_S);
1184 if (netif_msg_tx_err(pf))
1185 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1186 event, queue, pf_num, vf_num);
1187 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1188 mdd_detected = true;
1191 reg = rd32(hw, GL_MDET_TX_TCLAN);
1192 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1193 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1194 GL_MDET_TX_TCLAN_PF_NUM_S;
1195 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1196 GL_MDET_TX_TCLAN_VF_NUM_S;
1197 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1198 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1199 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1200 GL_MDET_TX_TCLAN_QNUM_S);
1202 if (netif_msg_rx_err(pf))
1203 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1204 event, queue, pf_num, vf_num);
1205 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1206 mdd_detected = true;
1209 reg = rd32(hw, GL_MDET_RX);
1210 if (reg & GL_MDET_RX_VALID_M) {
1211 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1212 GL_MDET_RX_PF_NUM_S;
1213 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1214 GL_MDET_RX_VF_NUM_S;
1215 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1216 GL_MDET_RX_MAL_TYPE_S;
1217 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1220 if (netif_msg_rx_err(pf))
1221 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1222 event, queue, pf_num, vf_num);
1223 wr32(hw, GL_MDET_RX, 0xffffffff);
1224 mdd_detected = true;
1228 bool pf_mdd_detected = false;
1230 reg = rd32(hw, PF_MDET_TX_PQM);
1231 if (reg & PF_MDET_TX_PQM_VALID_M) {
1232 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1233 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1234 pf_mdd_detected = true;
1237 reg = rd32(hw, PF_MDET_TX_TCLAN);
1238 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1239 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1240 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1241 pf_mdd_detected = true;
1244 reg = rd32(hw, PF_MDET_RX);
1245 if (reg & PF_MDET_RX_VALID_M) {
1246 wr32(hw, PF_MDET_RX, 0xFFFF);
1247 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
1248 pf_mdd_detected = true;
1250 /* Queue belongs to the PF initiate a reset */
1251 if (pf_mdd_detected) {
1252 set_bit(__ICE_NEEDS_RESTART, pf->state);
1253 ice_service_task_schedule(pf);
1257 /* re-enable MDD interrupt cause */
1258 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1259 reg = rd32(hw, PFINT_OICR_ENA);
1260 reg |= PFINT_OICR_MAL_DETECT_M;
1261 wr32(hw, PFINT_OICR_ENA, reg);
1266 * ice_service_task - manage and run subtasks
1267 * @work: pointer to work_struct contained by the PF struct
1269 static void ice_service_task(struct work_struct *work)
1271 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
1272 unsigned long start_time = jiffies;
1276 /* process reset requests first */
1277 ice_reset_subtask(pf);
1279 /* bail if a reset/recovery cycle is pending or rebuild failed */
1280 if (ice_is_reset_recovery_pending(pf->state) ||
1281 test_bit(__ICE_SUSPENDED, pf->state) ||
1282 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
1283 ice_service_task_complete(pf);
1287 ice_check_for_hang_subtask(pf);
1288 ice_sync_fltr_subtask(pf);
1289 ice_handle_mdd_event(pf);
1290 ice_watchdog_subtask(pf);
1291 ice_clean_adminq_subtask(pf);
1293 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
1294 ice_service_task_complete(pf);
1296 /* If the tasks have taken longer than one service timer period
1297 * or there is more work to be done, reset the service timer to
1298 * schedule the service task now.
1300 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
1301 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
1302 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1303 mod_timer(&pf->serv_tmr, jiffies);
1307 * ice_set_ctrlq_len - helper function to set controlq length
1308 * @hw: pointer to the hw instance
1310 static void ice_set_ctrlq_len(struct ice_hw *hw)
1312 hw->adminq.num_rq_entries = ICE_AQ_LEN;
1313 hw->adminq.num_sq_entries = ICE_AQ_LEN;
1314 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1315 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
1319 * ice_irq_affinity_notify - Callback for affinity changes
1320 * @notify: context as to what irq was changed
1321 * @mask: the new affinity mask
1323 * This is a callback function used by the irq_set_affinity_notifier function
1324 * so that we may register to receive changes to the irq affinity masks.
1326 static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1327 const cpumask_t *mask)
1329 struct ice_q_vector *q_vector =
1330 container_of(notify, struct ice_q_vector, affinity_notify);
1332 cpumask_copy(&q_vector->affinity_mask, mask);
1336 * ice_irq_affinity_release - Callback for affinity notifier release
1337 * @ref: internal core kernel usage
1339 * This is a callback function used by the irq_set_affinity_notifier function
1340 * to inform the current notification subscriber that they will no longer
1341 * receive notifications.
1343 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1346 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
1347 * @vsi: the VSI being un-configured
1349 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
1351 struct ice_pf *pf = vsi->back;
1352 struct ice_hw *hw = &pf->hw;
1353 int base = vsi->base_vector;
1357 /* disable interrupt causation from each queue */
1358 if (vsi->tx_rings) {
1359 ice_for_each_txq(vsi, i) {
1360 if (vsi->tx_rings[i]) {
1363 reg = vsi->tx_rings[i]->reg_idx;
1364 val = rd32(hw, QINT_TQCTL(reg));
1365 val &= ~QINT_TQCTL_CAUSE_ENA_M;
1366 wr32(hw, QINT_TQCTL(reg), val);
1371 if (vsi->rx_rings) {
1372 ice_for_each_rxq(vsi, i) {
1373 if (vsi->rx_rings[i]) {
1376 reg = vsi->rx_rings[i]->reg_idx;
1377 val = rd32(hw, QINT_RQCTL(reg));
1378 val &= ~QINT_RQCTL_CAUSE_ENA_M;
1379 wr32(hw, QINT_RQCTL(reg), val);
1384 /* disable each interrupt */
1385 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1386 for (i = vsi->base_vector;
1387 i < (vsi->num_q_vectors + vsi->base_vector); i++)
1388 wr32(hw, GLINT_DYN_CTL(i), 0);
1391 for (i = 0; i < vsi->num_q_vectors; i++)
1392 synchronize_irq(pf->msix_entries[i + base].vector);
1397 * ice_vsi_ena_irq - Enable IRQ for the given VSI
1398 * @vsi: the VSI being configured
1400 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1402 struct ice_pf *pf = vsi->back;
1403 struct ice_hw *hw = &pf->hw;
1405 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1408 for (i = 0; i < vsi->num_q_vectors; i++)
1409 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
1417 * ice_vsi_delete - delete a VSI from the switch
1418 * @vsi: pointer to VSI being removed
1420 static void ice_vsi_delete(struct ice_vsi *vsi)
1422 struct ice_pf *pf = vsi->back;
1423 struct ice_vsi_ctx ctxt;
1424 enum ice_status status;
1426 ctxt.vsi_num = vsi->vsi_num;
1428 memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
1430 status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
1432 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
1437 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
1438 * @vsi: the VSI being configured
1439 * @basename: name for the vector
1441 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
1443 int q_vectors = vsi->num_q_vectors;
1444 struct ice_pf *pf = vsi->back;
1445 int base = vsi->base_vector;
1451 for (vector = 0; vector < q_vectors; vector++) {
1452 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
1454 irq_num = pf->msix_entries[base + vector].vector;
1456 if (q_vector->tx.ring && q_vector->rx.ring) {
1457 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1458 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
1460 } else if (q_vector->rx.ring) {
1461 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1462 "%s-%s-%d", basename, "rx", rx_int_idx++);
1463 } else if (q_vector->tx.ring) {
1464 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1465 "%s-%s-%d", basename, "tx", tx_int_idx++);
1467 /* skip this unused q_vector */
1470 err = devm_request_irq(&pf->pdev->dev,
1471 pf->msix_entries[base + vector].vector,
1472 vsi->irq_handler, 0, q_vector->name,
1475 netdev_err(vsi->netdev,
1476 "MSIX request_irq failed, error: %d\n", err);
1480 /* register for affinity change notifications */
1481 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
1482 q_vector->affinity_notify.release = ice_irq_affinity_release;
1483 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
1485 /* assign the mask for this irq */
1486 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
1489 vsi->irqs_ready = true;
1495 irq_num = pf->msix_entries[base + vector].vector,
1496 irq_set_affinity_notifier(irq_num, NULL);
1497 irq_set_affinity_hint(irq_num, NULL);
1498 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
1504 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
1505 * @vsi: the VSI being configured
1507 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
1509 struct ice_hw_common_caps *cap;
1510 struct ice_pf *pf = vsi->back;
1512 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
1517 cap = &pf->hw.func_caps.common_cap;
1518 switch (vsi->type) {
1520 /* PF VSI will inherit RSS instance of PF */
1521 vsi->rss_table_size = cap->rss_table_size;
1522 vsi->rss_size = min_t(int, num_online_cpus(),
1523 BIT(cap->rss_table_entry_width));
1524 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
1527 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
1533 * ice_vsi_setup_q_map - Setup a VSI queue map
1534 * @vsi: the VSI being configured
1535 * @ctxt: VSI context structure
1537 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1539 u16 offset = 0, qmap = 0, numq_tc;
1540 u16 pow = 0, max_rss = 0, qcount;
1541 u16 qcount_tx = vsi->alloc_txq;
1542 u16 qcount_rx = vsi->alloc_rxq;
1543 bool ena_tc0 = false;
1546 /* at least TC0 should be enabled by default */
1547 if (vsi->tc_cfg.numtc) {
1548 if (!(vsi->tc_cfg.ena_tc & BIT(0)))
1555 vsi->tc_cfg.numtc++;
1556 vsi->tc_cfg.ena_tc |= 1;
1559 numq_tc = qcount_rx / vsi->tc_cfg.numtc;
1561 /* TC mapping is a function of the number of Rx queues assigned to the
1562 * VSI for each traffic class and the offset of these queues.
1563 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
1564 * queues allocated to TC0. No:of queues is a power-of-2.
1566 * If TC is not enabled, the queue offset is set to 0, and allocate one
1567 * queue, this way, traffic for the given TC will be sent to the default
1570 * Setup number and offset of Rx queues for all TCs for the VSI
1573 /* qcount will change if RSS is enabled */
1574 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
1575 if (vsi->type == ICE_VSI_PF)
1576 max_rss = ICE_MAX_LG_RSS_QS;
1578 max_rss = ICE_MAX_SMALL_RSS_QS;
1580 qcount = min_t(int, numq_tc, max_rss);
1581 qcount = min_t(int, qcount, vsi->rss_size);
1586 /* find the (rounded up) power-of-2 of qcount */
1587 pow = order_base_2(qcount);
1589 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
1590 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1591 /* TC is not enabled */
1592 vsi->tc_cfg.tc_info[i].qoffset = 0;
1593 vsi->tc_cfg.tc_info[i].qcount = 1;
1594 ctxt->info.tc_mapping[i] = 0;
1599 vsi->tc_cfg.tc_info[i].qoffset = offset;
1600 vsi->tc_cfg.tc_info[i].qcount = qcount;
1602 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
1603 ICE_AQ_VSI_TC_Q_OFFSET_M) |
1604 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
1605 ICE_AQ_VSI_TC_Q_NUM_M);
1607 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1610 vsi->num_txq = qcount_tx;
1611 vsi->num_rxq = offset;
1613 /* Rx queue mapping */
1614 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1615 /* q_mapping buffer holds the info for the first queue allocated for
1616 * this VSI in the PF space and also the number of queues associated
1619 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1620 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1624 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
1625 * @ctxt: the VSI context being set
1627 * This initializes a default VSI context for all sections except the Queues.
1629 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
1633 memset(&ctxt->info, 0, sizeof(ctxt->info));
1634 /* VSI's should be allocated from shared pool */
1635 ctxt->alloc_from_pool = true;
1636 /* Src pruning enabled by default */
1637 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
1638 /* Traffic from VSI can be sent to LAN */
1639 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1641 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
1642 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
1643 * packets untagged/tagged.
1645 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
1646 ICE_AQ_VSI_VLAN_MODE_M) >>
1647 ICE_AQ_VSI_VLAN_MODE_S);
1649 /* Have 1:1 UP mapping for both ingress/egress tables */
1650 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
1651 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
1652 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
1653 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
1654 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
1655 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
1656 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
1657 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
1658 ctxt->info.ingress_table = cpu_to_le32(table);
1659 ctxt->info.egress_table = cpu_to_le32(table);
1660 /* Have 1:1 UP mapping for outer to inner UP table */
1661 ctxt->info.outer_up_table = cpu_to_le32(table);
1662 /* No Outer tag support outer_tag_flags remains to zero */
1666 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1667 * @ctxt: the VSI context being set
1668 * @vsi: the VSI being configured
1670 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1672 u8 lut_type, hash_type;
1674 switch (vsi->type) {
1676 /* PF VSI will inherit RSS instance of PF */
1677 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1678 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1681 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
1686 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
1687 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
1688 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
1689 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
1693 * ice_vsi_init - Create and initialize a VSI
1694 * @vsi: the VSI being configured
1696 * This initializes a VSI context depending on the VSI type to be added and
1697 * passes it down to the add_vsi aq command to create a new VSI.
1699 static int ice_vsi_init(struct ice_vsi *vsi)
1701 struct ice_vsi_ctx ctxt = { 0 };
1702 struct ice_pf *pf = vsi->back;
1703 struct ice_hw *hw = &pf->hw;
1706 switch (vsi->type) {
1708 ctxt.flags = ICE_AQ_VSI_TYPE_PF;
1714 ice_set_dflt_vsi_ctx(&ctxt);
1715 /* if the switch is in VEB mode, allow VSI loopback */
1716 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1717 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1719 /* Set LUT type and HASH type if RSS is enabled */
1720 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1721 ice_set_rss_vsi_ctx(&ctxt, vsi);
1723 ctxt.info.sw_id = vsi->port_info->sw_id;
1724 ice_vsi_setup_q_map(vsi, &ctxt);
1726 ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
1728 dev_err(&pf->pdev->dev,
1729 "Add VSI failed, err %d\n", ret);
1733 /* keep context for update VSI operations */
1734 vsi->info = ctxt.info;
1736 /* record VSI number returned */
1737 vsi->vsi_num = ctxt.vsi_num;
1743 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
1744 * @vsi: the VSI being cleaned up
1746 static void ice_vsi_release_msix(struct ice_vsi *vsi)
1748 struct ice_pf *pf = vsi->back;
1749 u16 vector = vsi->base_vector;
1750 struct ice_hw *hw = &pf->hw;
1755 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1756 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1758 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
1759 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
1760 for (q = 0; q < q_vector->num_ring_tx; q++) {
1761 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
1765 for (q = 0; q < q_vector->num_ring_rx; q++) {
1766 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
1775 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1776 * @vsi: the VSI having rings deallocated
1778 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1782 if (vsi->tx_rings) {
1783 for (i = 0; i < vsi->alloc_txq; i++) {
1784 if (vsi->tx_rings[i]) {
1785 kfree_rcu(vsi->tx_rings[i], rcu);
1786 vsi->tx_rings[i] = NULL;
1790 if (vsi->rx_rings) {
1791 for (i = 0; i < vsi->alloc_rxq; i++) {
1792 if (vsi->rx_rings[i]) {
1793 kfree_rcu(vsi->rx_rings[i], rcu);
1794 vsi->rx_rings[i] = NULL;
1801 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1802 * @vsi: VSI which is having rings allocated
1804 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1806 struct ice_pf *pf = vsi->back;
1809 /* Allocate tx_rings */
1810 for (i = 0; i < vsi->alloc_txq; i++) {
1811 struct ice_ring *ring;
1813 /* allocate with kzalloc(), free with kfree_rcu() */
1814 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1820 ring->reg_idx = vsi->txq_map[i];
1821 ring->ring_active = false;
1823 ring->netdev = vsi->netdev;
1824 ring->dev = &pf->pdev->dev;
1825 ring->count = vsi->num_desc;
1827 vsi->tx_rings[i] = ring;
1830 /* Allocate rx_rings */
1831 for (i = 0; i < vsi->alloc_rxq; i++) {
1832 struct ice_ring *ring;
1834 /* allocate with kzalloc(), free with kfree_rcu() */
1835 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1840 ring->reg_idx = vsi->rxq_map[i];
1841 ring->ring_active = false;
1843 ring->netdev = vsi->netdev;
1844 ring->dev = &pf->pdev->dev;
1845 ring->count = vsi->num_desc;
1846 vsi->rx_rings[i] = ring;
1852 ice_vsi_clear_rings(vsi);
1857 * ice_vsi_free_irq - Free the irq association with the OS
1858 * @vsi: the VSI being configured
1860 static void ice_vsi_free_irq(struct ice_vsi *vsi)
1862 struct ice_pf *pf = vsi->back;
1863 int base = vsi->base_vector;
1865 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1868 if (!vsi->q_vectors || !vsi->irqs_ready)
1871 vsi->irqs_ready = false;
1872 for (i = 0; i < vsi->num_q_vectors; i++) {
1873 u16 vector = i + base;
1876 irq_num = pf->msix_entries[vector].vector;
1878 /* free only the irqs that were actually requested */
1879 if (!vsi->q_vectors[i] ||
1880 !(vsi->q_vectors[i]->num_ring_tx ||
1881 vsi->q_vectors[i]->num_ring_rx))
1884 /* clear the affinity notifier in the IRQ descriptor */
1885 irq_set_affinity_notifier(irq_num, NULL);
1887 /* clear the affinity_mask in the IRQ descriptor */
1888 irq_set_affinity_hint(irq_num, NULL);
1889 synchronize_irq(irq_num);
1890 devm_free_irq(&pf->pdev->dev, irq_num,
1893 ice_vsi_release_msix(vsi);
1898 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1899 * @vsi: the VSI being configured
1901 static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1903 struct ice_pf *pf = vsi->back;
1904 u16 vector = vsi->base_vector;
1905 struct ice_hw *hw = &pf->hw;
1906 u32 txq = 0, rxq = 0;
1910 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1911 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1913 itr_gran = hw->itr_gran_200;
1915 if (q_vector->num_ring_rx) {
1917 ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
1919 q_vector->rx.latency_range = ICE_LOW_LATENCY;
1922 if (q_vector->num_ring_tx) {
1924 ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
1926 q_vector->tx.latency_range = ICE_LOW_LATENCY;
1928 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
1929 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
1931 /* Both Transmit Queue Interrupt Cause Control register
1932 * and Receive Queue Interrupt Cause control register
1933 * expects MSIX_INDX field to be the vector index
1934 * within the function space and not the absolute
1935 * vector index across PF or across device.
1936 * For SR-IOV VF VSIs queue vector index always starts
1937 * with 1 since first vector index(0) is used for OICR
1938 * in VF space. Since VMDq and other PF VSIs are withtin
1939 * the PF function space, use the vector index thats
1940 * tracked for this PF.
1942 for (q = 0; q < q_vector->num_ring_tx; q++) {
1946 val = QINT_TQCTL_CAUSE_ENA_M |
1947 (itr << QINT_TQCTL_ITR_INDX_S) |
1948 (vector << QINT_TQCTL_MSIX_INDX_S);
1949 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1953 for (q = 0; q < q_vector->num_ring_rx; q++) {
1957 val = QINT_RQCTL_CAUSE_ENA_M |
1958 (itr << QINT_RQCTL_ITR_INDX_S) |
1959 (vector << QINT_RQCTL_MSIX_INDX_S);
1960 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1969 * ice_ena_misc_vector - enable the non-queue interrupts
1970 * @pf: board private structure
1972 static void ice_ena_misc_vector(struct ice_pf *pf)
1974 struct ice_hw *hw = &pf->hw;
1977 /* clear things first */
1978 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1979 rd32(hw, PFINT_OICR); /* read to clear */
1981 val = (PFINT_OICR_ECC_ERR_M |
1982 PFINT_OICR_MAL_DETECT_M |
1984 PFINT_OICR_PCI_EXCEPTION_M |
1985 PFINT_OICR_HMC_ERR_M |
1986 PFINT_OICR_PE_CRITERR_M);
1988 wr32(hw, PFINT_OICR_ENA, val);
1990 /* SW_ITR_IDX = 0, but don't change INTENA */
1991 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1992 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
1996 * ice_misc_intr - misc interrupt handler
1997 * @irq: interrupt number
1998 * @data: pointer to a q_vector
2000 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2002 struct ice_pf *pf = (struct ice_pf *)data;
2003 struct ice_hw *hw = &pf->hw;
2004 irqreturn_t ret = IRQ_NONE;
2007 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
2009 oicr = rd32(hw, PFINT_OICR);
2010 ena_mask = rd32(hw, PFINT_OICR_ENA);
2012 if (oicr & PFINT_OICR_MAL_DETECT_M) {
2013 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2014 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
2017 if (oicr & PFINT_OICR_GRST_M) {
2020 /* we have a reset warning */
2021 ena_mask &= ~PFINT_OICR_GRST_M;
2022 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2023 GLGEN_RSTAT_RESET_TYPE_S;
2025 if (reset == ICE_RESET_CORER)
2027 else if (reset == ICE_RESET_GLOBR)
2032 /* If a reset cycle isn't already in progress, we set a bit in
2033 * pf->state so that the service task can start a reset/rebuild.
2034 * We also make note of which reset happened so that peer
2035 * devices/drivers can be informed.
2037 if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING,
2039 if (reset == ICE_RESET_CORER)
2040 set_bit(__ICE_CORER_RECV, pf->state);
2041 else if (reset == ICE_RESET_GLOBR)
2042 set_bit(__ICE_GLOBR_RECV, pf->state);
2044 set_bit(__ICE_EMPR_RECV, pf->state);
2046 /* There are couple of different bits at play here.
2047 * hw->reset_ongoing indicates whether the hardware is
2048 * in reset. This is set to true when a reset interrupt
2049 * is received and set back to false after the driver
2050 * has determined that the hardware is out of reset.
2052 * __ICE_RESET_RECOVERY_PENDING in pf->state indicates
2053 * that a post reset rebuild is required before the
2054 * driver is operational again. This is set above.
2056 * As this is the start of the reset/rebuild cycle, set
2057 * both to indicate that.
2059 hw->reset_ongoing = true;
2063 if (oicr & PFINT_OICR_HMC_ERR_M) {
2064 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
2065 dev_dbg(&pf->pdev->dev,
2066 "HMC Error interrupt - info 0x%x, data 0x%x\n",
2067 rd32(hw, PFHMC_ERRORINFO),
2068 rd32(hw, PFHMC_ERRORDATA));
2071 /* Report and mask off any remaining unexpected interrupts */
2074 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
2076 /* If a critical error is pending there is no choice but to
2079 if (oicr & (PFINT_OICR_PE_CRITERR_M |
2080 PFINT_OICR_PCI_EXCEPTION_M |
2081 PFINT_OICR_ECC_ERR_M)) {
2082 set_bit(__ICE_PFR_REQ, pf->state);
2083 ice_service_task_schedule(pf);
2089 /* re-enable interrupt causes that are not handled during this pass */
2090 wr32(hw, PFINT_OICR_ENA, ena_mask);
2091 if (!test_bit(__ICE_DOWN, pf->state)) {
2092 ice_service_task_schedule(pf);
2093 ice_irq_dynamic_ena(hw, NULL, NULL);
2100 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
2101 * @vsi: the VSI being configured
2103 * This function maps descriptor rings to the queue-specific vectors allotted
2104 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
2105 * and Rx rings to the vector as "efficiently" as possible.
2107 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
2109 int q_vectors = vsi->num_q_vectors;
2110 int tx_rings_rem, rx_rings_rem;
2113 /* initially assigning remaining rings count to VSIs num queue value */
2114 tx_rings_rem = vsi->num_txq;
2115 rx_rings_rem = vsi->num_rxq;
2117 for (v_id = 0; v_id < q_vectors; v_id++) {
2118 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
2119 int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
2121 /* Tx rings mapping to vector */
2122 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
2123 q_vector->num_ring_tx = tx_rings_per_v;
2124 q_vector->tx.ring = NULL;
2125 q_base = vsi->num_txq - tx_rings_rem;
2127 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
2128 struct ice_ring *tx_ring = vsi->tx_rings[q_id];
2130 tx_ring->q_vector = q_vector;
2131 tx_ring->next = q_vector->tx.ring;
2132 q_vector->tx.ring = tx_ring;
2134 tx_rings_rem -= tx_rings_per_v;
2136 /* Rx rings mapping to vector */
2137 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
2138 q_vector->num_ring_rx = rx_rings_per_v;
2139 q_vector->rx.ring = NULL;
2140 q_base = vsi->num_rxq - rx_rings_rem;
2142 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
2143 struct ice_ring *rx_ring = vsi->rx_rings[q_id];
2145 rx_ring->q_vector = q_vector;
2146 rx_ring->next = q_vector->rx.ring;
2147 q_vector->rx.ring = rx_ring;
2149 rx_rings_rem -= rx_rings_per_v;
2154 * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
2155 * @vsi: the VSI being configured
2157 * Return 0 on success and a negative value on error
2159 static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
2161 struct ice_pf *pf = vsi->back;
2163 switch (vsi->type) {
2165 vsi->alloc_txq = pf->num_lan_tx;
2166 vsi->alloc_rxq = pf->num_lan_rx;
2167 vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
2168 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
2171 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
2178 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
2180 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
2182 * On error: returns error code (negative)
2183 * On success: returns 0
2185 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
2187 struct ice_pf *pf = vsi->back;
2189 /* allocate memory for both Tx and Rx ring pointers */
2190 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
2191 sizeof(struct ice_ring *), GFP_KERNEL);
2195 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
2196 sizeof(struct ice_ring *), GFP_KERNEL);
2200 if (alloc_qvectors) {
2201 /* allocate memory for q_vector pointers */
2202 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
2204 sizeof(struct ice_q_vector *),
2206 if (!vsi->q_vectors)
2213 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
2215 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
2221 * ice_msix_clean_rings - MSIX mode Interrupt Handler
2222 * @irq: interrupt number
2223 * @data: pointer to a q_vector
2225 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
2227 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
2229 if (!q_vector->tx.ring && !q_vector->rx.ring)
2232 napi_schedule(&q_vector->napi);
2238 * ice_vsi_alloc - Allocates the next available struct vsi in the PF
2239 * @pf: board private structure
2240 * @type: type of VSI
2242 * returns a pointer to a VSI on success, NULL on failure.
2244 static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
2246 struct ice_vsi *vsi = NULL;
2248 /* Need to protect the allocation of the VSIs at the PF level */
2249 mutex_lock(&pf->sw_mutex);
2251 /* If we have already allocated our maximum number of VSIs,
2252 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
2253 * is available to be populated
2255 if (pf->next_vsi == ICE_NO_VSI) {
2256 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
2260 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
2266 set_bit(__ICE_DOWN, vsi->state);
2267 vsi->idx = pf->next_vsi;
2268 vsi->work_lmt = ICE_DFLT_IRQ_WORK;
2270 ice_vsi_set_num_qs(vsi);
2272 switch (vsi->type) {
2274 if (ice_vsi_alloc_arrays(vsi, true))
2277 /* Setup default MSIX irq handler for VSI */
2278 vsi->irq_handler = ice_msix_clean_rings;
2281 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
2285 /* fill VSI slot in the PF struct */
2286 pf->vsi[pf->next_vsi] = vsi;
2288 /* prepare pf->next_vsi for next use */
2289 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
2294 devm_kfree(&pf->pdev->dev, vsi);
2297 mutex_unlock(&pf->sw_mutex);
2302 * ice_free_irq_msix_misc - Unroll misc vector setup
2303 * @pf: board private structure
2305 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2307 /* disable OICR interrupt */
2308 wr32(&pf->hw, PFINT_OICR_ENA, 0);
2311 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
2312 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2313 devm_free_irq(&pf->pdev->dev,
2314 pf->msix_entries[pf->oicr_idx].vector, pf);
2317 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2321 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2322 * @pf: board private structure
2324 * This sets up the handler for MSIX 0, which is used to manage the
2325 * non-queue interrupts, e.g. AdminQ and errors. This is not used
2326 * when in MSI or Legacy interrupt mode.
2328 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2330 struct ice_hw *hw = &pf->hw;
2331 int oicr_idx, err = 0;
2335 if (!pf->int_name[0])
2336 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2337 dev_driver_string(&pf->pdev->dev),
2338 dev_name(&pf->pdev->dev));
2340 /* Do not request IRQ but do enable OICR interrupt since settings are
2341 * lost during reset. Note that this function is called only during
2342 * rebuild path and not while reset is in progress.
2344 if (ice_is_reset_recovery_pending(pf->state))
2347 /* reserve one vector in irq_tracker for misc interrupts */
2348 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2352 pf->oicr_idx = oicr_idx;
2354 err = devm_request_irq(&pf->pdev->dev,
2355 pf->msix_entries[pf->oicr_idx].vector,
2356 ice_misc_intr, 0, pf->int_name, pf);
2358 dev_err(&pf->pdev->dev,
2359 "devm_request_irq for %s failed: %d\n",
2361 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2366 ice_ena_misc_vector(pf);
2368 val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2369 PFINT_OICR_CTL_CAUSE_ENA_M);
2370 wr32(hw, PFINT_OICR_CTL, val);
2372 /* This enables Admin queue Interrupt causes */
2373 val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2374 PFINT_FW_CTL_CAUSE_ENA_M);
2375 wr32(hw, PFINT_FW_CTL, val);
2377 itr_gran = hw->itr_gran_200;
2379 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2380 ITR_TO_REG(ICE_ITR_8K, itr_gran));
2383 ice_irq_dynamic_ena(hw, NULL, NULL);
2389 * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
2390 * @vsi: the VSI getting queues
2392 * Return 0 on success and a negative value on error
2394 static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
2396 struct ice_pf *pf = vsi->back;
2397 int offset, ret = 0;
2399 mutex_lock(&pf->avail_q_mutex);
2400 /* look for contiguous block of queues for tx */
2401 offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
2402 0, vsi->alloc_txq, 0);
2403 if (offset < ICE_MAX_TXQS) {
2406 bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
2407 for (i = 0; i < vsi->alloc_txq; i++)
2408 vsi->txq_map[i] = i + offset;
2411 vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
2414 /* look for contiguous block of queues for rx */
2415 offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
2416 0, vsi->alloc_rxq, 0);
2417 if (offset < ICE_MAX_RXQS) {
2420 bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
2421 for (i = 0; i < vsi->alloc_rxq; i++)
2422 vsi->rxq_map[i] = i + offset;
2425 vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
2427 mutex_unlock(&pf->avail_q_mutex);
2433 * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
2434 * @vsi: the VSI getting queues
2436 * Return 0 on success and a negative value on error
2438 static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
2440 struct ice_pf *pf = vsi->back;
2443 mutex_lock(&pf->avail_q_mutex);
2445 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
2446 for (i = 0; i < vsi->alloc_txq; i++) {
2447 index = find_next_zero_bit(pf->avail_txqs,
2448 ICE_MAX_TXQS, index);
2449 if (index < ICE_MAX_TXQS) {
2450 set_bit(index, pf->avail_txqs);
2451 vsi->txq_map[i] = index;
2453 goto err_scatter_tx;
2458 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
2459 for (i = 0; i < vsi->alloc_rxq; i++) {
2460 index = find_next_zero_bit(pf->avail_rxqs,
2461 ICE_MAX_RXQS, index);
2462 if (index < ICE_MAX_RXQS) {
2463 set_bit(index, pf->avail_rxqs);
2464 vsi->rxq_map[i] = index;
2466 goto err_scatter_rx;
2471 mutex_unlock(&pf->avail_q_mutex);
2475 /* unflag any queues we have grabbed (i is failed position) */
2476 for (index = 0; index < i; index++) {
2477 clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
2478 vsi->rxq_map[index] = 0;
2482 /* i is either position of failed attempt or vsi->alloc_txq */
2483 for (index = 0; index < i; index++) {
2484 clear_bit(vsi->txq_map[index], pf->avail_txqs);
2485 vsi->txq_map[index] = 0;
2488 mutex_unlock(&pf->avail_q_mutex);
2493 * ice_vsi_get_qs - Assign queues from PF to VSI
2494 * @vsi: the VSI to assign queues to
2496 * Returns 0 on success and a negative value on error
2498 static int ice_vsi_get_qs(struct ice_vsi *vsi)
2502 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
2503 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
2505 /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
2506 * modes individually to scatter if assigning contiguous queues
2509 ret = ice_vsi_get_qs_contig(vsi);
2511 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
2512 vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
2513 ICE_MAX_SCATTER_TXQS);
2514 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
2515 vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
2516 ICE_MAX_SCATTER_RXQS);
2517 ret = ice_vsi_get_qs_scatter(vsi);
2524 * ice_vsi_put_qs - Release queues from VSI to PF
2525 * @vsi: the VSI thats going to release queues
2527 static void ice_vsi_put_qs(struct ice_vsi *vsi)
2529 struct ice_pf *pf = vsi->back;
2532 mutex_lock(&pf->avail_q_mutex);
2534 for (i = 0; i < vsi->alloc_txq; i++) {
2535 clear_bit(vsi->txq_map[i], pf->avail_txqs);
2536 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
2539 for (i = 0; i < vsi->alloc_rxq; i++) {
2540 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
2541 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
2544 mutex_unlock(&pf->avail_q_mutex);
2548 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
2549 * @vsi: VSI having the memory freed
2550 * @v_idx: index of the vector to be freed
2552 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
2554 struct ice_q_vector *q_vector;
2555 struct ice_ring *ring;
2557 if (!vsi->q_vectors[v_idx]) {
2558 dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
2562 q_vector = vsi->q_vectors[v_idx];
2564 ice_for_each_ring(ring, q_vector->tx)
2565 ring->q_vector = NULL;
2566 ice_for_each_ring(ring, q_vector->rx)
2567 ring->q_vector = NULL;
2569 /* only VSI with an associated netdev is set up with NAPI */
2571 netif_napi_del(&q_vector->napi);
2573 devm_kfree(&vsi->back->pdev->dev, q_vector);
2574 vsi->q_vectors[v_idx] = NULL;
2578 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
2579 * @vsi: the VSI having memory freed
2581 static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
2585 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
2586 ice_free_q_vector(vsi, v_idx);
2590 * ice_cfg_netdev - Setup the netdev flags
2591 * @vsi: the VSI being configured
2593 * Returns 0 on success, negative value on failure
2595 static int ice_cfg_netdev(struct ice_vsi *vsi)
2597 netdev_features_t csumo_features;
2598 netdev_features_t vlano_features;
2599 netdev_features_t dflt_features;
2600 netdev_features_t tso_features;
2601 struct ice_netdev_priv *np;
2602 struct net_device *netdev;
2603 u8 mac_addr[ETH_ALEN];
2605 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
2606 vsi->alloc_txq, vsi->alloc_rxq);
2610 vsi->netdev = netdev;
2611 np = netdev_priv(netdev);
2614 dflt_features = NETIF_F_SG |
2618 csumo_features = NETIF_F_RXCSUM |
2622 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2623 NETIF_F_HW_VLAN_CTAG_TX |
2624 NETIF_F_HW_VLAN_CTAG_RX;
2626 tso_features = NETIF_F_TSO;
2628 /* set features that user can change */
2629 netdev->hw_features = dflt_features | csumo_features |
2630 vlano_features | tso_features;
2632 /* enable features */
2633 netdev->features |= netdev->hw_features;
2634 /* encap and VLAN devices inherit default, csumo and tso features */
2635 netdev->hw_enc_features |= dflt_features | csumo_features |
2637 netdev->vlan_features |= dflt_features | csumo_features |
2640 if (vsi->type == ICE_VSI_PF) {
2641 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
2642 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
2644 ether_addr_copy(netdev->dev_addr, mac_addr);
2645 ether_addr_copy(netdev->perm_addr, mac_addr);
2648 netdev->priv_flags |= IFF_UNICAST_FLT;
2650 /* assign netdev_ops */
2651 netdev->netdev_ops = &ice_netdev_ops;
2653 /* setup watchdog timeout value to be 5 second */
2654 netdev->watchdog_timeo = 5 * HZ;
2656 ice_set_ethtool_ops(netdev);
2658 netdev->min_mtu = ETH_MIN_MTU;
2659 netdev->max_mtu = ICE_MAX_MTU;
2665 * ice_vsi_free_arrays - clean up vsi resources
2666 * @vsi: pointer to VSI being cleared
2667 * @free_qvectors: bool to specify if q_vectors should be deallocated
2669 static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
2671 struct ice_pf *pf = vsi->back;
2673 /* free the ring and vector containers */
2674 if (free_qvectors && vsi->q_vectors) {
2675 devm_kfree(&pf->pdev->dev, vsi->q_vectors);
2676 vsi->q_vectors = NULL;
2678 if (vsi->tx_rings) {
2679 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
2680 vsi->tx_rings = NULL;
2682 if (vsi->rx_rings) {
2683 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
2684 vsi->rx_rings = NULL;
2689 * ice_vsi_clear - clean up and deallocate the provided vsi
2690 * @vsi: pointer to VSI being cleared
2692 * This deallocates the vsi's queue resources, removes it from the PF's
2693 * VSI array if necessary, and deallocates the VSI
2695 * Returns 0 on success, negative on failure
2697 static int ice_vsi_clear(struct ice_vsi *vsi)
2699 struct ice_pf *pf = NULL;
2709 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
2710 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
2715 mutex_lock(&pf->sw_mutex);
2716 /* updates the PF for this cleared vsi */
2718 pf->vsi[vsi->idx] = NULL;
2719 if (vsi->idx < pf->next_vsi)
2720 pf->next_vsi = vsi->idx;
2722 ice_vsi_free_arrays(vsi, true);
2723 mutex_unlock(&pf->sw_mutex);
2724 devm_kfree(&pf->pdev->dev, vsi);
2730 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
2731 * @vsi: the VSI being configured
2732 * @v_idx: index of the vector in the vsi struct
2734 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2736 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
2738 struct ice_pf *pf = vsi->back;
2739 struct ice_q_vector *q_vector;
2741 /* allocate q_vector */
2742 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
2746 q_vector->vsi = vsi;
2747 q_vector->v_idx = v_idx;
2748 /* only set affinity_mask if the CPU is online */
2749 if (cpu_online(v_idx))
2750 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
2753 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
2755 /* tie q_vector and vsi together */
2756 vsi->q_vectors[v_idx] = q_vector;
2762 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
2763 * @vsi: the VSI being configured
2765 * We allocate one q_vector per queue interrupt. If allocation fails we
2768 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
2770 struct ice_pf *pf = vsi->back;
2771 int v_idx = 0, num_q_vectors;
2774 if (vsi->q_vectors[0]) {
2775 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
2780 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
2781 num_q_vectors = vsi->num_q_vectors;
2787 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
2788 err = ice_vsi_alloc_q_vector(vsi, v_idx);
2797 ice_free_q_vector(vsi, v_idx);
2799 dev_err(&pf->pdev->dev,
2800 "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
2801 vsi->num_q_vectors, vsi->vsi_num, err);
2802 vsi->num_q_vectors = 0;
2807 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
2808 * @vsi: ptr to the VSI
2810 * This should only be called after ice_vsi_alloc() which allocates the
2811 * corresponding SW VSI structure and initializes num_queue_pairs for the
2812 * newly allocated VSI.
2814 * Returns 0 on success or negative on failure
2816 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
2818 struct ice_pf *pf = vsi->back;
2819 int num_q_vectors = 0;
2821 if (vsi->base_vector) {
2822 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
2823 vsi->vsi_num, vsi->base_vector);
2827 if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2830 switch (vsi->type) {
2832 num_q_vectors = vsi->num_q_vectors;
2835 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
2841 vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
2842 num_q_vectors, vsi->idx);
2844 if (vsi->base_vector < 0) {
2845 dev_err(&pf->pdev->dev,
2846 "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
2847 num_q_vectors, vsi->vsi_num, vsi->base_vector);
2855 * ice_fill_rss_lut - Fill the RSS lookup table with default values
2856 * @lut: Lookup table
2857 * @rss_table_size: Lookup table size
2858 * @rss_size: Range of queue number for hashing
2860 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
2864 for (i = 0; i < rss_table_size; i++)
2865 lut[i] = i % rss_size;
2869 * ice_vsi_cfg_rss - Configure RSS params for a VSI
2870 * @vsi: VSI to be configured
2872 static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
2874 u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
2875 struct ice_aqc_get_set_rss_keys *key;
2876 struct ice_pf *pf = vsi->back;
2877 enum ice_status status;
2881 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
2883 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
2887 if (vsi->rss_lut_user)
2888 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
2890 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
2892 status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
2893 lut, vsi->rss_table_size);
2896 dev_err(&vsi->back->pdev->dev,
2897 "set_rss_lut failed, error %d\n", status);
2899 goto ice_vsi_cfg_rss_exit;
2902 key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
2905 goto ice_vsi_cfg_rss_exit;
2908 if (vsi->rss_hkey_user)
2909 memcpy(seed, vsi->rss_hkey_user,
2910 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
2912 netdev_rss_key_fill((void *)seed,
2913 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
2914 memcpy(&key->standard_rss_key, seed,
2915 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
2917 status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
2920 dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
2925 devm_kfree(&pf->pdev->dev, key);
2926 ice_vsi_cfg_rss_exit:
2927 devm_kfree(&pf->pdev->dev, lut);
2932 * ice_vsi_rebuild - Rebuild VSI after reset
2933 * @vsi: vsi to be rebuild
2935 * Returns 0 on success and negative value on failure
2937 static int ice_vsi_rebuild(struct ice_vsi *vsi)
2939 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2945 ice_vsi_free_q_vectors(vsi);
2946 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
2947 vsi->base_vector = 0;
2948 ice_vsi_clear_rings(vsi);
2949 ice_vsi_free_arrays(vsi, false);
2950 ice_vsi_set_num_qs(vsi);
2952 /* Initialize VSI struct elements and create VSI in FW */
2953 ret = ice_vsi_init(vsi);
2957 ret = ice_vsi_alloc_arrays(vsi, false);
2961 switch (vsi->type) {
2964 ret = ice_vsi_alloc_q_vectors(vsi);
2968 ret = ice_vsi_setup_vector_base(vsi);
2972 ret = ice_vsi_alloc_rings(vsi);
2976 ice_vsi_map_rings_to_vectors(vsi);
2982 ice_vsi_set_tc_cfg(vsi);
2984 /* configure VSI nodes based on number of queues and TC's */
2985 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2986 max_txqs[i] = vsi->num_txq;
2988 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
2989 vsi->tc_cfg.ena_tc, max_txqs);
2991 dev_info(&vsi->back->pdev->dev,
2992 "Failed VSI lan queue config\n");
2998 ice_vsi_free_q_vectors(vsi);
3001 vsi->current_netdev_flags = 0;
3002 unregister_netdev(vsi->netdev);
3003 free_netdev(vsi->netdev);
3008 set_bit(__ICE_RESET_FAILED, vsi->back->state);
3013 * ice_vsi_setup - Set up a VSI by a given type
3014 * @pf: board private structure
3015 * @pi: pointer to the port_info instance
3017 * @vf_id: defines VF id to which this VSI connects. This field is meant to be
3018 * used only for ICE_VSI_VF VSI type. For other VSI types, should
3019 * fill-in ICE_INVAL_VFID as input.
3021 * This allocates the sw VSI structure and its queue resources.
3023 * Returns pointer to the successfully allocated and configured VSI sw struct on
3024 * success, NULL on failure.
3026 static struct ice_vsi *
3027 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3028 enum ice_vsi_type type, u16 __always_unused vf_id)
3030 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3031 struct device *dev = &pf->pdev->dev;
3032 struct ice_vsi *vsi;
3035 vsi = ice_vsi_alloc(pf, type);
3037 dev_err(dev, "could not allocate VSI\n");
3041 vsi->port_info = pi;
3042 vsi->vsw = pf->first_sw;
3044 if (ice_vsi_get_qs(vsi)) {
3045 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
3050 /* set RSS capabilities */
3051 ice_vsi_set_rss_params(vsi);
3053 /* create the VSI */
3054 ret = ice_vsi_init(vsi);
3058 switch (vsi->type) {
3060 ret = ice_cfg_netdev(vsi);
3062 goto err_cfg_netdev;
3064 ret = register_netdev(vsi->netdev);
3066 goto err_register_netdev;
3068 netif_carrier_off(vsi->netdev);
3070 /* make sure transmit queues start off as stopped */
3071 netif_tx_stop_all_queues(vsi->netdev);
3072 ret = ice_vsi_alloc_q_vectors(vsi);
3076 ret = ice_vsi_setup_vector_base(vsi);
3080 ret = ice_vsi_alloc_rings(vsi);
3084 ice_vsi_map_rings_to_vectors(vsi);
3086 /* Do not exit if configuring RSS had an issue, at least
3087 * receive traffic on first queue. Hence no need to capture
3090 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3091 ice_vsi_cfg_rss(vsi);
3094 /* if vsi type is not recognized, clean up the resources and
3100 ice_vsi_set_tc_cfg(vsi);
3102 /* configure VSI nodes based on number of queues and TC's */
3103 for (i = 0; i < vsi->tc_cfg.numtc; i++)
3104 max_txqs[i] = vsi->num_txq;
3106 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
3107 vsi->tc_cfg.ena_tc, max_txqs);
3109 dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
3116 ice_vsi_free_q_vectors(vsi);
3118 if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
3119 unregister_netdev(vsi->netdev);
3120 err_register_netdev:
3122 free_netdev(vsi->netdev);
3126 ice_vsi_delete(vsi);
3128 ice_vsi_put_qs(vsi);
3130 pf->q_left_tx += vsi->alloc_txq;
3131 pf->q_left_rx += vsi->alloc_rxq;
3138 * ice_pf_vsi_setup - Set up a PF VSI
3139 * @pf: board private structure
3140 * @pi: pointer to the port_info instance
3142 * Returns pointer to the successfully allocated VSI sw struct on success,
3143 * otherwise returns NULL on failure.
3145 static struct ice_vsi *
3146 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3148 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3152 * ice_vsi_add_vlan - Add vsi membership for given vlan
3153 * @vsi: the vsi being configured
3154 * @vid: vlan id to be added
3156 static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
3158 struct ice_fltr_list_entry *tmp;
3159 struct ice_pf *pf = vsi->back;
3160 LIST_HEAD(tmp_add_list);
3161 enum ice_status status;
3164 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
3168 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
3169 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3170 tmp->fltr_info.flag = ICE_FLTR_TX;
3171 tmp->fltr_info.src = vsi->vsi_num;
3172 tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
3173 tmp->fltr_info.l_data.vlan.vlan_id = vid;
3175 INIT_LIST_HEAD(&tmp->list_entry);
3176 list_add(&tmp->list_entry, &tmp_add_list);
3178 status = ice_add_vlan(&pf->hw, &tmp_add_list);
3181 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
3185 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3190 * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
3191 * @netdev: network interface to be adjusted
3192 * @proto: unused protocol
3193 * @vid: vlan id to be added
3195 * net_device_ops implementation for adding vlan ids
3197 static int ice_vlan_rx_add_vid(struct net_device *netdev,
3198 __always_unused __be16 proto, u16 vid)
3200 struct ice_netdev_priv *np = netdev_priv(netdev);
3201 struct ice_vsi *vsi = np->vsi;
3204 if (vid >= VLAN_N_VID) {
3205 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
3213 /* Enable VLAN pruning when VLAN 0 is added */
3214 if (unlikely(!vid)) {
3215 ret = ice_cfg_vlan_pruning(vsi, true);
3220 /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
3221 * needed to continue allowing all untagged packets since VLAN prune
3222 * list is applied to all packets by the switch
3224 ret = ice_vsi_add_vlan(vsi, vid);
3227 set_bit(vid, vsi->active_vlans);
3233 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
3234 * @vsi: the VSI being configured
3235 * @vid: VLAN id to be removed
3237 * Returns 0 on success and negative on failure
3239 static int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
3241 struct ice_fltr_list_entry *list;
3242 struct ice_pf *pf = vsi->back;
3243 LIST_HEAD(tmp_add_list);
3246 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
3250 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
3251 list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
3252 list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3253 list->fltr_info.l_data.vlan.vlan_id = vid;
3254 list->fltr_info.flag = ICE_FLTR_TX;
3255 list->fltr_info.src = vsi->vsi_num;
3257 INIT_LIST_HEAD(&list->list_entry);
3258 list_add(&list->list_entry, &tmp_add_list);
3260 if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
3261 dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
3266 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3271 * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3272 * @netdev: network interface to be adjusted
3273 * @proto: unused protocol
3274 * @vid: vlan id to be removed
3276 * net_device_ops implementation for removing vlan ids
3278 static int ice_vlan_rx_kill_vid(struct net_device *netdev,
3279 __always_unused __be16 proto, u16 vid)
3281 struct ice_netdev_priv *np = netdev_priv(netdev);
3282 struct ice_vsi *vsi = np->vsi;
3288 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3291 status = ice_vsi_kill_vlan(vsi, vid);
3295 clear_bit(vid, vsi->active_vlans);
3297 /* Disable VLAN pruning when VLAN 0 is removed */
3299 status = ice_cfg_vlan_pruning(vsi, false);
3305 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3306 * @pf: board private structure
3308 * Returns 0 on success, negative value on failure
3310 static int ice_setup_pf_sw(struct ice_pf *pf)
3312 LIST_HEAD(tmp_add_list);
3313 u8 broadcast[ETH_ALEN];
3314 struct ice_vsi *vsi;
3317 if (ice_is_reset_recovery_pending(pf->state))
3320 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3323 goto unroll_vsi_setup;
3326 /* To add a MAC filter, first add the MAC to a list and then
3327 * pass the list to ice_add_mac.
3330 /* Add a unicast MAC filter so the VSI can get its packets */
3331 status = ice_add_mac_to_list(vsi, &tmp_add_list,
3332 vsi->port_info->mac.perm_addr);
3334 goto unroll_vsi_setup;
3336 /* VSI needs to receive broadcast traffic, so add the broadcast
3337 * MAC address to the list as well.
3339 eth_broadcast_addr(broadcast);
3340 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
3344 /* program MAC filters for entries in tmp_add_list */
3345 status = ice_add_mac(&pf->hw, &tmp_add_list);
3347 dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
3352 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3356 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3360 ice_vsi_free_q_vectors(vsi);
3361 if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
3362 unregister_netdev(vsi->netdev);
3364 free_netdev(vsi->netdev);
3368 ice_vsi_delete(vsi);
3369 ice_vsi_put_qs(vsi);
3370 pf->q_left_tx += vsi->alloc_txq;
3371 pf->q_left_rx += vsi->alloc_rxq;
3378 * ice_determine_q_usage - Calculate queue distribution
3379 * @pf: board private structure
3381 * Return -ENOMEM if we don't get enough queues for all ports
3383 static void ice_determine_q_usage(struct ice_pf *pf)
3385 u16 q_left_tx, q_left_rx;
3387 q_left_tx = pf->hw.func_caps.common_cap.num_txq;
3388 q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
3390 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
3392 /* only 1 rx queue unless RSS is enabled */
3393 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3396 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
3398 pf->q_left_tx = q_left_tx - pf->num_lan_tx;
3399 pf->q_left_rx = q_left_rx - pf->num_lan_rx;
3403 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3404 * @pf: board private structure to initialize
3406 static void ice_deinit_pf(struct ice_pf *pf)
3408 ice_service_task_stop(pf);
3409 mutex_destroy(&pf->sw_mutex);
3410 mutex_destroy(&pf->avail_q_mutex);
3414 * ice_init_pf - Initialize general software structures (struct ice_pf)
3415 * @pf: board private structure to initialize
3417 static void ice_init_pf(struct ice_pf *pf)
3419 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
3420 set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
3422 mutex_init(&pf->sw_mutex);
3423 mutex_init(&pf->avail_q_mutex);
3425 /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
3426 mutex_lock(&pf->avail_q_mutex);
3427 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
3428 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
3429 mutex_unlock(&pf->avail_q_mutex);
3431 if (pf->hw.func_caps.common_cap.rss_table_size)
3432 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3434 /* setup service timer and periodic service task */
3435 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3436 pf->serv_tmr_period = HZ;
3437 INIT_WORK(&pf->serv_task, ice_service_task);
3438 clear_bit(__ICE_SERVICE_SCHED, pf->state);
3442 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3443 * @pf: board private structure
3445 * compute the number of MSIX vectors required (v_budget) and request from
3446 * the OS. Return the number of vectors reserved or negative on failure
3448 static int ice_ena_msix_range(struct ice_pf *pf)
3450 int v_left, v_actual, v_budget = 0;
3453 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3455 /* reserve one vector for miscellaneous handler */
3460 /* reserve vectors for LAN traffic */
3461 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
3462 v_budget += pf->num_lan_msix;
3464 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
3465 sizeof(struct msix_entry), GFP_KERNEL);
3467 if (!pf->msix_entries) {
3472 for (i = 0; i < v_budget; i++)
3473 pf->msix_entries[i].entry = i;
3475 /* actually reserve the vectors */
3476 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3477 ICE_MIN_MSIX, v_budget);
3480 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
3485 if (v_actual < v_budget) {
3486 dev_warn(&pf->pdev->dev,
3487 "not enough vectors. requested = %d, obtained = %d\n",
3488 v_budget, v_actual);
3489 if (v_actual >= (pf->num_lan_msix + 1)) {
3490 pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
3491 } else if (v_actual >= 2) {
3492 pf->num_lan_msix = 1;
3493 pf->num_avail_msix = v_actual - 2;
3495 pci_disable_msix(pf->pdev);
3504 devm_kfree(&pf->pdev->dev, pf->msix_entries);
3508 pf->num_lan_msix = 0;
3509 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
3514 * ice_dis_msix - Disable MSI-X interrupt setup in OS
3515 * @pf: board private structure
3517 static void ice_dis_msix(struct ice_pf *pf)
3519 pci_disable_msix(pf->pdev);
3520 devm_kfree(&pf->pdev->dev, pf->msix_entries);
3521 pf->msix_entries = NULL;
3522 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
3526 * ice_init_interrupt_scheme - Determine proper interrupt scheme
3527 * @pf: board private structure to initialize
3529 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3534 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3535 vectors = ice_ena_msix_range(pf);
3542 /* set up vector assignment tracking */
3543 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
3545 pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
3546 if (!pf->irq_tracker) {
3551 pf->irq_tracker->num_entries = vectors;
3557 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3558 * @pf: board private structure
3560 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3562 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3565 if (pf->irq_tracker) {
3566 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
3567 pf->irq_tracker = NULL;
3572 * ice_probe - Device initialization routine
3573 * @pdev: PCI device information struct
3574 * @ent: entry in ice_pci_tbl
3576 * Returns 0 on success, negative on failure
3578 static int ice_probe(struct pci_dev *pdev,
3579 const struct pci_device_id __always_unused *ent)
3585 /* this driver uses devres, see Documentation/driver-model/devres.txt */
3586 err = pcim_enable_device(pdev);
3590 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3592 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
3596 pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
3600 /* set up for high or low dma */
3601 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3603 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3605 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
3609 pci_enable_pcie_error_reporting(pdev);
3610 pci_set_master(pdev);
3613 pci_set_drvdata(pdev, pf);
3614 set_bit(__ICE_DOWN, pf->state);
3615 /* Disable service task until DOWN bit is cleared */
3616 set_bit(__ICE_SERVICE_DIS, pf->state);
3619 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
3621 hw->vendor_id = pdev->vendor;
3622 hw->device_id = pdev->device;
3623 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3624 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3625 hw->subsystem_device_id = pdev->subsystem_device;
3626 hw->bus.device = PCI_SLOT(pdev->devfn);
3627 hw->bus.func = PCI_FUNC(pdev->devfn);
3628 ice_set_ctrlq_len(hw);
3630 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
3632 #ifndef CONFIG_DYNAMIC_DEBUG
3634 hw->debug_mask = debug;
3637 err = ice_init_hw(hw);
3639 dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
3641 goto err_exit_unroll;
3644 dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
3645 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
3646 hw->api_maj_ver, hw->api_min_ver);
3650 ice_determine_q_usage(pf);
3652 pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
3653 hw->func_caps.guaranteed_num_vsi);
3654 if (!pf->num_alloc_vsi) {
3656 goto err_init_pf_unroll;
3659 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
3660 sizeof(struct ice_vsi *), GFP_KERNEL);
3663 goto err_init_pf_unroll;
3666 err = ice_init_interrupt_scheme(pf);
3669 "ice_init_interrupt_scheme failed: %d\n", err);
3671 goto err_init_interrupt_unroll;
3674 /* Driver is mostly up */
3675 clear_bit(__ICE_DOWN, pf->state);
3677 /* In case of MSIX we are going to setup the misc vector right here
3678 * to handle admin queue events etc. In case of legacy and MSI
3679 * the misc functionality and queue processing is combined in
3680 * the same vector and that gets setup at open.
3682 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
3683 err = ice_req_irq_msix_misc(pf);
3686 "setup of misc vector failed: %d\n", err);
3687 goto err_init_interrupt_unroll;
3691 /* create switch struct for the switch element created by FW on boot */
3692 pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
3694 if (!pf->first_sw) {
3696 goto err_msix_misc_unroll;
3700 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
3702 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
3704 pf->first_sw->pf = pf;
3706 /* record the sw_id available for later use */
3707 pf->first_sw->sw_id = hw->port_info->sw_id;
3709 err = ice_setup_pf_sw(pf);
3712 "probe failed due to setup pf switch:%d\n", err);
3713 goto err_alloc_sw_unroll;
3716 clear_bit(__ICE_SERVICE_DIS, pf->state);
3718 /* since everything is good, start the service timer */
3719 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
3721 err = ice_init_link_events(pf->hw.port_info);
3723 dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
3724 goto err_alloc_sw_unroll;
3729 err_alloc_sw_unroll:
3730 set_bit(__ICE_SERVICE_DIS, pf->state);
3731 set_bit(__ICE_DOWN, pf->state);
3732 devm_kfree(&pf->pdev->dev, pf->first_sw);
3733 err_msix_misc_unroll:
3734 ice_free_irq_msix_misc(pf);
3735 err_init_interrupt_unroll:
3736 ice_clear_interrupt_scheme(pf);
3737 devm_kfree(&pdev->dev, pf->vsi);
3742 pci_disable_pcie_error_reporting(pdev);
3747 * ice_remove - Device removal routine
3748 * @pdev: PCI device information struct
3750 static void ice_remove(struct pci_dev *pdev)
3752 struct ice_pf *pf = pci_get_drvdata(pdev);
3757 set_bit(__ICE_DOWN, pf->state);
3758 ice_service_task_stop(pf);
3760 ice_vsi_release_all(pf);
3761 ice_free_irq_msix_misc(pf);
3762 ice_clear_interrupt_scheme(pf);
3764 ice_deinit_hw(&pf->hw);
3765 pci_disable_pcie_error_reporting(pdev);
3768 /* ice_pci_tbl - PCI Device ID Table
3770 * Wildcard entries (PCI_ANY_ID) should come last
3771 * Last entry must be all 0s
3773 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
3774 * Class, Class Mask, private data (not used) }
3776 static const struct pci_device_id ice_pci_tbl[] = {
3777 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
3778 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
3779 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
3780 /* required last entry */
3783 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
3785 static struct pci_driver ice_driver = {
3786 .name = KBUILD_MODNAME,
3787 .id_table = ice_pci_tbl,
3789 .remove = ice_remove,
3793 * ice_module_init - Driver registration routine
3795 * ice_module_init is the first routine called when the driver is
3796 * loaded. All it does is register with the PCI subsystem.
3798 static int __init ice_module_init(void)
3802 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
3803 pr_info("%s\n", ice_copyright);
3805 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
3807 pr_err("Failed to create workqueue\n");
3811 status = pci_register_driver(&ice_driver);
3813 pr_err("failed to register pci driver, err %d\n", status);
3814 destroy_workqueue(ice_wq);
3819 module_init(ice_module_init);
3822 * ice_module_exit - Driver exit cleanup routine
3824 * ice_module_exit is called just before the driver is removed
3827 static void __exit ice_module_exit(void)
3829 pci_unregister_driver(&ice_driver);
3830 destroy_workqueue(ice_wq);
3831 pr_info("module unloaded\n");
3833 module_exit(ice_module_exit);
3836 * ice_set_mac_address - NDO callback to set mac address
3837 * @netdev: network interface device structure
3838 * @pi: pointer to an address structure
3840 * Returns 0 on success, negative on failure
3842 static int ice_set_mac_address(struct net_device *netdev, void *pi)
3844 struct ice_netdev_priv *np = netdev_priv(netdev);
3845 struct ice_vsi *vsi = np->vsi;
3846 struct ice_pf *pf = vsi->back;
3847 struct ice_hw *hw = &pf->hw;
3848 struct sockaddr *addr = pi;
3849 enum ice_status status;
3850 LIST_HEAD(a_mac_list);
3851 LIST_HEAD(r_mac_list);
3856 mac = (u8 *)addr->sa_data;
3858 if (!is_valid_ether_addr(mac))
3859 return -EADDRNOTAVAIL;
3861 if (ether_addr_equal(netdev->dev_addr, mac)) {
3862 netdev_warn(netdev, "already using mac %pM\n", mac);
3866 if (test_bit(__ICE_DOWN, pf->state) ||
3867 ice_is_reset_recovery_pending(pf->state)) {
3868 netdev_err(netdev, "can't set mac %pM. device not ready\n",
3873 /* When we change the mac address we also have to change the mac address
3874 * based filter rules that were created previously for the old mac
3875 * address. So first, we remove the old filter rule using ice_remove_mac
3876 * and then create a new filter rule using ice_add_mac. Note that for
3877 * both these operations, we first need to form a "list" of mac
3878 * addresses (even though in this case, we have only 1 mac address to be
3879 * added/removed) and this done using ice_add_mac_to_list. Depending on
3880 * the ensuing operation this "list" of mac addresses is either to be
3881 * added or removed from the filter.
3883 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
3885 err = -EADDRNOTAVAIL;
3889 status = ice_remove_mac(hw, &r_mac_list);
3891 err = -EADDRNOTAVAIL;
3895 err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
3897 err = -EADDRNOTAVAIL;
3901 status = ice_add_mac(hw, &a_mac_list);
3903 err = -EADDRNOTAVAIL;
3908 /* free list entries */
3909 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
3910 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
3913 netdev_err(netdev, "can't set mac %pM. filter update failed\n",
3918 /* change the netdev's mac address */
3919 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3920 netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
3923 /* write new mac address to the firmware */
3924 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3925 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
3927 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
3934 * ice_set_rx_mode - NDO callback to set the netdev filters
3935 * @netdev: network interface device structure
3937 static void ice_set_rx_mode(struct net_device *netdev)
3939 struct ice_netdev_priv *np = netdev_priv(netdev);
3940 struct ice_vsi *vsi = np->vsi;
3945 /* Set the flags to synchronize filters
3946 * ndo_set_rx_mode may be triggered even without a change in netdev
3949 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
3950 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
3951 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
3953 /* schedule our worker thread which will take care of
3954 * applying the new filter changes
3956 ice_service_task_schedule(vsi->back);
3960 * ice_fdb_add - add an entry to the hardware database
3961 * @ndm: the input from the stack
3962 * @tb: pointer to array of nladdr (unused)
3963 * @dev: the net device pointer
3964 * @addr: the MAC address entry being added
3966 * @flags: instructions from stack about fdb operation
3968 static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
3969 struct net_device *dev, const unsigned char *addr,
3975 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
3978 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3979 netdev_err(dev, "FDB only supports static addresses\n");
3983 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3984 err = dev_uc_add_excl(dev, addr);
3985 else if (is_multicast_ether_addr(addr))
3986 err = dev_mc_add_excl(dev, addr);
3990 /* Only return duplicate errors if NLM_F_EXCL is set */
3991 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3998 * ice_fdb_del - delete an entry from the hardware database
3999 * @ndm: the input from the stack
4000 * @tb: pointer to array of nladdr (unused)
4001 * @dev: the net device pointer
4002 * @addr: the MAC address entry being added
4005 static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
4006 struct net_device *dev, const unsigned char *addr,
4007 __always_unused u16 vid)
4011 if (ndm->ndm_state & NUD_PERMANENT) {
4012 netdev_err(dev, "FDB only supports static addresses\n");
4016 if (is_unicast_ether_addr(addr))
4017 err = dev_uc_del(dev, addr);
4018 else if (is_multicast_ether_addr(addr))
4019 err = dev_mc_del(dev, addr);
4027 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
4028 * @vsi: the vsi being changed
4030 static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
4032 struct device *dev = &vsi->back->pdev->dev;
4033 struct ice_hw *hw = &vsi->back->hw;
4034 struct ice_vsi_ctx ctxt = { 0 };
4035 enum ice_status status;
4037 /* Here we are configuring the VSI to let the driver add VLAN tags by
4038 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
4039 * insertion happens in the Tx hot path, in ice_tx_map.
4041 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
4043 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
4044 ctxt.vsi_num = vsi->vsi_num;
4046 status = ice_aq_update_vsi(hw, &ctxt, NULL);
4048 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
4049 status, hw->adminq.sq_last_status);
4053 vsi->info.vlan_flags = ctxt.info.vlan_flags;
4058 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
4059 * @vsi: the vsi being changed
4060 * @ena: boolean value indicating if this is a enable or disable request
4062 static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4064 struct device *dev = &vsi->back->pdev->dev;
4065 struct ice_hw *hw = &vsi->back->hw;
4066 struct ice_vsi_ctx ctxt = { 0 };
4067 enum ice_status status;
4069 /* Here we are configuring what the VSI should do with the VLAN tag in
4070 * the Rx packet. We can either leave the tag in the packet or put it in
4071 * the Rx descriptor.
4074 /* Strip VLAN tag from Rx packet and put it in the desc */
4075 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
4077 /* Disable stripping. Leave tag in packet */
4078 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
4081 /* Allow all packets untagged/tagged */
4082 ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
4084 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
4085 ctxt.vsi_num = vsi->vsi_num;
4087 status = ice_aq_update_vsi(hw, &ctxt, NULL);
4089 dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
4090 ena, status, hw->adminq.sq_last_status);
4094 vsi->info.vlan_flags = ctxt.info.vlan_flags;
4099 * ice_set_features - set the netdev feature flags
4100 * @netdev: ptr to the netdev being adjusted
4101 * @features: the feature set that the stack is suggesting
4103 static int ice_set_features(struct net_device *netdev,
4104 netdev_features_t features)
4106 struct ice_netdev_priv *np = netdev_priv(netdev);
4107 struct ice_vsi *vsi = np->vsi;
4110 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
4111 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
4112 ret = ice_vsi_manage_vlan_stripping(vsi, true);
4113 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
4114 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
4115 ret = ice_vsi_manage_vlan_stripping(vsi, false);
4116 else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
4117 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
4118 ret = ice_vsi_manage_vlan_insertion(vsi);
4119 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
4120 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
4121 ret = ice_vsi_manage_vlan_insertion(vsi);
4127 * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
4128 * @vsi: VSI to setup vlan properties for
4130 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
4134 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
4135 ret = ice_vsi_manage_vlan_stripping(vsi, true);
4136 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
4137 ret = ice_vsi_manage_vlan_insertion(vsi);
4143 * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
4144 * @vsi: the VSI being brought back up
4146 static int ice_restore_vlan(struct ice_vsi *vsi)
4154 err = ice_vsi_vlan_setup(vsi);
4158 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
4159 err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
4168 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
4169 * @ring: The Tx ring to configure
4170 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
4171 * @pf_q: queue index in the PF space
4173 * Configure the Tx descriptor ring in TLAN context.
4176 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
4178 struct ice_vsi *vsi = ring->vsi;
4179 struct ice_hw *hw = &vsi->back->hw;
4181 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
4183 tlan_ctx->port_num = vsi->port_info->lport;
4185 /* Transmit Queue Length */
4186 tlan_ctx->qlen = ring->count;
4189 tlan_ctx->pf_num = hw->pf_id;
4191 /* queue belongs to a specific VSI type
4192 * VF / VM index should be programmed per vmvf_type setting:
4193 * for vmvf_type = VF, it is VF number between 0-256
4194 * for vmvf_type = VM, it is VM number between 0-767
4195 * for PF or EMP this field should be set to zero
4197 switch (vsi->type) {
4199 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
4205 /* make sure the context is associated with the right VSI */
4206 tlan_ctx->src_vsi = vsi->vsi_num;
4208 tlan_ctx->tso_ena = ICE_TX_LEGACY;
4209 tlan_ctx->tso_qnum = pf_q;
4211 /* Legacy or Advanced Host Interface:
4212 * 0: Advanced Host Interface
4213 * 1: Legacy Host Interface
4215 tlan_ctx->legacy_int = ICE_TX_LEGACY;
4219 * ice_vsi_cfg_txqs - Configure the VSI for Tx
4220 * @vsi: the VSI being configured
4222 * Return 0 on success and a negative value on error
4223 * Configure the Tx VSI for operation.
4225 static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
4227 struct ice_aqc_add_tx_qgrp *qg_buf;
4228 struct ice_aqc_add_txqs_perq *txq;
4229 struct ice_pf *pf = vsi->back;
4230 enum ice_status status;
4231 u16 buf_len, i, pf_q;
4232 int err = 0, tc = 0;
4235 buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
4236 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
4240 if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
4244 qg_buf->num_txqs = 1;
4247 /* set up and configure the tx queues */
4248 ice_for_each_txq(vsi, i) {
4249 struct ice_tlan_ctx tlan_ctx = { 0 };
4251 pf_q = vsi->txq_map[i];
4252 ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
4253 /* copy context contents into the qg_buf */
4254 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
4255 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
4258 /* init queue specific tail reg. It is referred as transmit
4259 * comm scheduler queue doorbell.
4261 vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
4262 status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
4263 num_q_grps, qg_buf, buf_len, NULL);
4265 dev_err(&vsi->back->pdev->dev,
4266 "Failed to set LAN Tx queue context, error: %d\n",
4272 /* Add Tx Queue TEID into the VSI tx ring from the response
4273 * This will complete configuring and enabling the queue.
4275 txq = &qg_buf->txqs[0];
4276 if (pf_q == le16_to_cpu(txq->txq_id))
4277 vsi->tx_rings[i]->txq_teid =
4278 le32_to_cpu(txq->q_teid);
4281 devm_kfree(&pf->pdev->dev, qg_buf);
4286 * ice_setup_rx_ctx - Configure a receive ring context
4287 * @ring: The Rx ring to configure
4289 * Configure the Rx descriptor ring in RLAN context.
4291 static int ice_setup_rx_ctx(struct ice_ring *ring)
4293 struct ice_vsi *vsi = ring->vsi;
4294 struct ice_hw *hw = &vsi->back->hw;
4295 u32 rxdid = ICE_RXDID_FLEX_NIC;
4296 struct ice_rlan_ctx rlan_ctx;
4301 /* what is RX queue number in global space of 2K rx queues */
4302 pf_q = vsi->rxq_map[ring->q_index];
4304 /* clear the context structure first */
4305 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
4307 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
4309 rlan_ctx.qlen = ring->count;
4311 /* Receive Packet Data Buffer Size.
4312 * The Packet Data Buffer Size is defined in 128 byte units.
4314 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
4316 /* use 32 byte descriptors */
4319 /* Strip the Ethernet CRC bytes before the packet is posted to host
4322 rlan_ctx.crcstrip = 1;
4324 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
4325 rlan_ctx.l2tsel = 1;
4327 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
4328 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
4329 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
4331 /* This controls whether VLAN is stripped from inner headers
4332 * The VLAN in the inner L2 header is stripped to the receive
4333 * descriptor if enabled by this flag.
4335 rlan_ctx.showiv = 0;
4337 /* Max packet size for this queue - must not be set to a larger value
4340 rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
4341 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
4343 /* Rx queue threshold in units of 64 */
4344 rlan_ctx.lrxqthresh = 1;
4346 /* Enable Flexible Descriptors in the queue context which
4347 * allows this driver to select a specific receive descriptor format
4349 regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
4350 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
4351 QRXFLXP_CNTXT_RXDID_IDX_M;
4353 /* increasing context priority to pick up profile id;
4354 * default is 0x01; setting to 0x03 to ensure profile
4355 * is programming if prev context is of same priority
4357 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
4358 QRXFLXP_CNTXT_RXDID_PRIO_M;
4360 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
4362 /* Absolute queue number out of 2K needs to be passed */
4363 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
4365 dev_err(&vsi->back->pdev->dev,
4366 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
4371 /* init queue specific tail register */
4372 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
4373 writel(0, ring->tail);
4374 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
4380 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
4381 * @vsi: the VSI being configured
4383 * Return 0 on success and a negative value on error
4384 * Configure the Rx VSI for operation.
4386 static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
4391 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
4392 vsi->max_frame = vsi->netdev->mtu +
4393 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4395 vsi->max_frame = ICE_RXBUF_2048;
4397 vsi->rx_buf_len = ICE_RXBUF_2048;
4398 /* set up individual rings */
4399 for (i = 0; i < vsi->num_rxq && !err; i++)
4400 err = ice_setup_rx_ctx(vsi->rx_rings[i]);
4403 dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
4410 * ice_vsi_cfg - Setup the VSI
4411 * @vsi: the VSI being configured
4413 * Return 0 on success and negative value on error
4415 static int ice_vsi_cfg(struct ice_vsi *vsi)
4420 ice_set_rx_mode(vsi->netdev);
4421 err = ice_restore_vlan(vsi);
4426 err = ice_vsi_cfg_txqs(vsi);
4428 err = ice_vsi_cfg_rxqs(vsi);
4434 * ice_vsi_stop_tx_rings - Disable Tx rings
4435 * @vsi: the VSI being configured
4437 static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
4439 struct ice_pf *pf = vsi->back;
4440 struct ice_hw *hw = &pf->hw;
4441 enum ice_status status;
4446 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
4449 q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
4454 q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
4458 goto err_alloc_q_ids;
4461 /* set up the tx queue list to be disabled */
4462 ice_for_each_txq(vsi, i) {
4465 if (!vsi->tx_rings || !vsi->tx_rings[i]) {
4470 q_ids[i] = vsi->txq_map[i];
4471 q_teids[i] = vsi->tx_rings[i]->txq_teid;
4473 /* clear cause_ena bit for disabled queues */
4474 val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4475 val &= ~QINT_TQCTL_CAUSE_ENA_M;
4476 wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4478 /* software is expected to wait for 100 ns */
4481 /* trigger a software interrupt for the vector associated to
4482 * the queue to schedule napi handler
4484 v_idx = vsi->tx_rings[i]->q_vector->v_idx;
4485 wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
4486 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
4488 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
4490 /* if the disable queue command was exercised during an active reset
4491 * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
4492 * the reset operation disables queues at the hardware level anyway.
4494 if (status == ICE_ERR_RESET_ONGOING) {
4495 dev_dbg(&pf->pdev->dev,
4496 "Reset in progress. LAN Tx queues already disabled\n");
4497 } else if (status) {
4498 dev_err(&pf->pdev->dev,
4499 "Failed to disable LAN Tx queues, error: %d\n",
4505 devm_kfree(&pf->pdev->dev, q_ids);
4508 devm_kfree(&pf->pdev->dev, q_teids);
4514 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4515 * @pf: the PF being configured
4516 * @pf_q: the PF queue
4517 * @ena: enable or disable state of the queue
4519 * This routine will wait for the given Rx queue of the PF to reach the
4520 * enabled or disabled state.
4521 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4522 * multiple retries; else will return 0 in case of success.
4524 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
4528 for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
4529 u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
4531 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
4534 usleep_range(10, 20);
4536 if (i >= ICE_Q_WAIT_RETRY_LIMIT)
4543 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
4544 * @vsi: the VSI being configured
4545 * @ena: start or stop the rx rings
4547 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
4549 struct ice_pf *pf = vsi->back;
4550 struct ice_hw *hw = &pf->hw;
4553 for (i = 0; i < vsi->num_rxq; i++) {
4554 int pf_q = vsi->rxq_map[i];
4557 for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
4558 rx_reg = rd32(hw, QRX_CTRL(pf_q));
4559 if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
4560 ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
4562 usleep_range(1000, 2000);
4565 /* Skip if the queue is already in the requested state */
4566 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
4569 /* turn on/off the queue */
4571 rx_reg |= QRX_CTRL_QENA_REQ_M;
4573 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
4574 wr32(hw, QRX_CTRL(pf_q), rx_reg);
4576 /* wait for the change to finish */
4577 ret = ice_pf_rxq_wait(pf, pf_q, ena);
4579 dev_err(&pf->pdev->dev,
4580 "VSI idx %d Rx ring %d %sable timeout\n",
4581 vsi->idx, pf_q, (ena ? "en" : "dis"));
4590 * ice_vsi_start_rx_rings - start VSI's rx rings
4591 * @vsi: the VSI whose rings are to be started
4593 * Returns 0 on success and a negative value on error
4595 static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
4597 return ice_vsi_ctrl_rx_rings(vsi, true);
4601 * ice_vsi_stop_rx_rings - stop VSI's rx rings
4604 * Returns 0 on success and a negative value on error
4606 static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
4608 return ice_vsi_ctrl_rx_rings(vsi, false);
4612 * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
4614 * Returns 0 on success and a negative value on error
4616 static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
4620 err_tx = ice_vsi_stop_tx_rings(vsi);
4622 dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
4624 err_rx = ice_vsi_stop_rx_rings(vsi);
4626 dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
4628 if (err_tx || err_rx)
4635 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4636 * @vsi: the VSI being configured
4638 static void ice_napi_enable_all(struct ice_vsi *vsi)
4645 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4646 napi_enable(&vsi->q_vectors[q_idx]->napi);
4650 * ice_up_complete - Finish the last steps of bringing up a connection
4651 * @vsi: The VSI being configured
4653 * Return 0 on success and negative value on error
4655 static int ice_up_complete(struct ice_vsi *vsi)
4657 struct ice_pf *pf = vsi->back;
4660 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
4661 ice_vsi_cfg_msix(vsi);
4665 /* Enable only Rx rings, Tx rings were enabled by the FW when the
4666 * Tx queue group list was configured and the context bits were
4667 * programmed using ice_vsi_cfg_txqs
4669 err = ice_vsi_start_rx_rings(vsi);
4673 clear_bit(__ICE_DOWN, vsi->state);
4674 ice_napi_enable_all(vsi);
4675 ice_vsi_ena_irq(vsi);
4677 if (vsi->port_info &&
4678 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
4680 ice_print_link_msg(vsi, true);
4681 netif_tx_start_all_queues(vsi->netdev);
4682 netif_carrier_on(vsi->netdev);
4685 ice_service_task_schedule(pf);
4691 * ice_up - Bring the connection back up after being down
4692 * @vsi: VSI being configured
4694 int ice_up(struct ice_vsi *vsi)
4698 err = ice_vsi_cfg(vsi);
4700 err = ice_up_complete(vsi);
4706 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
4707 * @ring: Tx or Rx ring to read stats from
4708 * @pkts: packets stats counter
4709 * @bytes: bytes stats counter
4711 * This function fetches stats from the ring considering the atomic operations
4712 * that needs to be performed to read u64 values in 32 bit machine.
4714 static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
4724 start = u64_stats_fetch_begin_irq(&ring->syncp);
4725 *pkts = ring->stats.pkts;
4726 *bytes = ring->stats.bytes;
4727 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4731 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4732 * @hw: ptr to the hardware info
4733 * @hireg: high 32 bit HW register to read from
4734 * @loreg: low 32 bit HW register to read from
4735 * @prev_stat_loaded: bool to specify if previous stats are loaded
4736 * @prev_stat: ptr to previous loaded stat value
4737 * @cur_stat: ptr to current stat value
4739 static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
4740 bool prev_stat_loaded, u64 *prev_stat,
4745 new_data = rd32(hw, loreg);
4746 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4748 /* device stats are not reset at PFR, they likely will not be zeroed
4749 * when the driver starts. So save the first values read and use them as
4750 * offsets to be subtracted from the raw values in order to report stats
4751 * that count from zero.
4753 if (!prev_stat_loaded)
4754 *prev_stat = new_data;
4755 if (likely(new_data >= *prev_stat))
4756 *cur_stat = new_data - *prev_stat;
4758 /* to manage the potential roll-over */
4759 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
4760 *cur_stat &= 0xFFFFFFFFFFULL;
4764 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4765 * @hw: ptr to the hardware info
4766 * @reg: HW register to read from
4767 * @prev_stat_loaded: bool to specify if previous stats are loaded
4768 * @prev_stat: ptr to previous loaded stat value
4769 * @cur_stat: ptr to current stat value
4771 static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4772 u64 *prev_stat, u64 *cur_stat)
4776 new_data = rd32(hw, reg);
4778 /* device stats are not reset at PFR, they likely will not be zeroed
4779 * when the driver starts. So save the first values read and use them as
4780 * offsets to be subtracted from the raw values in order to report stats
4781 * that count from zero.
4783 if (!prev_stat_loaded)
4784 *prev_stat = new_data;
4785 if (likely(new_data >= *prev_stat))
4786 *cur_stat = new_data - *prev_stat;
4788 /* to manage the potential roll-over */
4789 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
4793 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
4794 * @vsi: the VSI to be updated
4796 static void ice_update_eth_stats(struct ice_vsi *vsi)
4798 struct ice_eth_stats *prev_es, *cur_es;
4799 struct ice_hw *hw = &vsi->back->hw;
4800 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
4802 prev_es = &vsi->eth_stats_prev;
4803 cur_es = &vsi->eth_stats;
4805 ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
4806 vsi->stat_offsets_loaded, &prev_es->rx_bytes,
4809 ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
4810 vsi->stat_offsets_loaded, &prev_es->rx_unicast,
4811 &cur_es->rx_unicast);
4813 ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
4814 vsi->stat_offsets_loaded, &prev_es->rx_multicast,
4815 &cur_es->rx_multicast);
4817 ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
4818 vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
4819 &cur_es->rx_broadcast);
4821 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
4822 &prev_es->rx_discards, &cur_es->rx_discards);
4824 ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
4825 vsi->stat_offsets_loaded, &prev_es->tx_bytes,
4828 ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
4829 vsi->stat_offsets_loaded, &prev_es->tx_unicast,
4830 &cur_es->tx_unicast);
4832 ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
4833 vsi->stat_offsets_loaded, &prev_es->tx_multicast,
4834 &cur_es->tx_multicast);
4836 ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
4837 vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
4838 &cur_es->tx_broadcast);
4840 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
4841 &prev_es->tx_errors, &cur_es->tx_errors);
4843 vsi->stat_offsets_loaded = true;
4847 * ice_update_vsi_ring_stats - Update VSI stats counters
4848 * @vsi: the VSI to be updated
4850 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
4852 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
4853 struct ice_ring *ring;
4857 /* reset netdev stats */
4858 vsi_stats->tx_packets = 0;
4859 vsi_stats->tx_bytes = 0;
4860 vsi_stats->rx_packets = 0;
4861 vsi_stats->rx_bytes = 0;
4863 /* reset non-netdev (extended) stats */
4864 vsi->tx_restart = 0;
4866 vsi->tx_linearize = 0;
4867 vsi->rx_buf_failed = 0;
4868 vsi->rx_page_failed = 0;
4872 /* update Tx rings counters */
4873 ice_for_each_txq(vsi, i) {
4874 ring = READ_ONCE(vsi->tx_rings[i]);
4875 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
4876 vsi_stats->tx_packets += pkts;
4877 vsi_stats->tx_bytes += bytes;
4878 vsi->tx_restart += ring->tx_stats.restart_q;
4879 vsi->tx_busy += ring->tx_stats.tx_busy;
4880 vsi->tx_linearize += ring->tx_stats.tx_linearize;
4883 /* update Rx rings counters */
4884 ice_for_each_rxq(vsi, i) {
4885 ring = READ_ONCE(vsi->rx_rings[i]);
4886 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
4887 vsi_stats->rx_packets += pkts;
4888 vsi_stats->rx_bytes += bytes;
4889 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
4890 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
4897 * ice_update_vsi_stats - Update VSI stats counters
4898 * @vsi: the VSI to be updated
4900 static void ice_update_vsi_stats(struct ice_vsi *vsi)
4902 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
4903 struct ice_eth_stats *cur_es = &vsi->eth_stats;
4904 struct ice_pf *pf = vsi->back;
4906 if (test_bit(__ICE_DOWN, vsi->state) ||
4907 test_bit(__ICE_CFG_BUSY, pf->state))
4910 /* get stats as recorded by Tx/Rx rings */
4911 ice_update_vsi_ring_stats(vsi);
4913 /* get VSI stats as recorded by the hardware */
4914 ice_update_eth_stats(vsi);
4916 cur_ns->tx_errors = cur_es->tx_errors;
4917 cur_ns->rx_dropped = cur_es->rx_discards;
4918 cur_ns->tx_dropped = cur_es->tx_discards;
4919 cur_ns->multicast = cur_es->rx_multicast;
4921 /* update some more netdev stats if this is main VSI */
4922 if (vsi->type == ICE_VSI_PF) {
4923 cur_ns->rx_crc_errors = pf->stats.crc_errors;
4924 cur_ns->rx_errors = pf->stats.crc_errors +
4925 pf->stats.illegal_bytes;
4926 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
4931 * ice_update_pf_stats - Update PF port stats counters
4932 * @pf: PF whose stats needs to be updated
4934 static void ice_update_pf_stats(struct ice_pf *pf)
4936 struct ice_hw_port_stats *prev_ps, *cur_ps;
4937 struct ice_hw *hw = &pf->hw;
4940 prev_ps = &pf->stats_prev;
4941 cur_ps = &pf->stats;
4944 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
4945 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
4946 &cur_ps->eth.rx_bytes);
4948 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
4949 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
4950 &cur_ps->eth.rx_unicast);
4952 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
4953 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
4954 &cur_ps->eth.rx_multicast);
4956 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
4957 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
4958 &cur_ps->eth.rx_broadcast);
4960 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
4961 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
4962 &cur_ps->eth.tx_bytes);
4964 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
4965 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
4966 &cur_ps->eth.tx_unicast);
4968 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
4969 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
4970 &cur_ps->eth.tx_multicast);
4972 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
4973 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
4974 &cur_ps->eth.tx_broadcast);
4976 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
4977 &prev_ps->tx_dropped_link_down,
4978 &cur_ps->tx_dropped_link_down);
4980 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
4981 pf->stat_prev_loaded, &prev_ps->rx_size_64,
4982 &cur_ps->rx_size_64);
4984 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
4985 pf->stat_prev_loaded, &prev_ps->rx_size_127,
4986 &cur_ps->rx_size_127);
4988 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
4989 pf->stat_prev_loaded, &prev_ps->rx_size_255,
4990 &cur_ps->rx_size_255);
4992 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
4993 pf->stat_prev_loaded, &prev_ps->rx_size_511,
4994 &cur_ps->rx_size_511);
4996 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
4997 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
4998 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5000 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
5001 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
5002 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5004 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
5005 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
5006 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5008 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
5009 pf->stat_prev_loaded, &prev_ps->tx_size_64,
5010 &cur_ps->tx_size_64);
5012 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
5013 pf->stat_prev_loaded, &prev_ps->tx_size_127,
5014 &cur_ps->tx_size_127);
5016 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
5017 pf->stat_prev_loaded, &prev_ps->tx_size_255,
5018 &cur_ps->tx_size_255);
5020 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
5021 pf->stat_prev_loaded, &prev_ps->tx_size_511,
5022 &cur_ps->tx_size_511);
5024 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
5025 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
5026 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5028 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
5029 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
5030 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5032 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
5033 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
5034 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5036 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
5037 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5039 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
5040 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5042 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
5043 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5045 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
5046 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5048 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
5049 &prev_ps->crc_errors, &cur_ps->crc_errors);
5051 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
5052 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5054 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
5055 &prev_ps->mac_local_faults,
5056 &cur_ps->mac_local_faults);
5058 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
5059 &prev_ps->mac_remote_faults,
5060 &cur_ps->mac_remote_faults);
5062 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
5063 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5065 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
5066 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5068 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
5069 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5071 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
5072 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5074 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
5075 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5077 pf->stat_prev_loaded = true;
5081 * ice_get_stats64 - get statistics for network device structure
5082 * @netdev: network interface device structure
5083 * @stats: main device statistics structure
5086 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5088 struct ice_netdev_priv *np = netdev_priv(netdev);
5089 struct rtnl_link_stats64 *vsi_stats;
5090 struct ice_vsi *vsi = np->vsi;
5092 vsi_stats = &vsi->net_stats;
5094 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
5096 /* netdev packet/byte stats come from ring counter. These are obtained
5097 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5099 ice_update_vsi_ring_stats(vsi);
5100 stats->tx_packets = vsi_stats->tx_packets;
5101 stats->tx_bytes = vsi_stats->tx_bytes;
5102 stats->rx_packets = vsi_stats->rx_packets;
5103 stats->rx_bytes = vsi_stats->rx_bytes;
5105 /* The rest of the stats can be read from the hardware but instead we
5106 * just return values that the watchdog task has already obtained from
5109 stats->multicast = vsi_stats->multicast;
5110 stats->tx_errors = vsi_stats->tx_errors;
5111 stats->tx_dropped = vsi_stats->tx_dropped;
5112 stats->rx_errors = vsi_stats->rx_errors;
5113 stats->rx_dropped = vsi_stats->rx_dropped;
5114 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5115 stats->rx_length_errors = vsi_stats->rx_length_errors;
5119 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5120 * @vsi: VSI having NAPI disabled
5122 static void ice_napi_disable_all(struct ice_vsi *vsi)
5129 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
5130 napi_disable(&vsi->q_vectors[q_idx]->napi);
5134 * ice_down - Shutdown the connection
5135 * @vsi: The VSI being stopped
5137 int ice_down(struct ice_vsi *vsi)
5141 /* Caller of this function is expected to set the
5142 * vsi->state __ICE_DOWN bit
5145 netif_carrier_off(vsi->netdev);
5146 netif_tx_disable(vsi->netdev);
5149 ice_vsi_dis_irq(vsi);
5150 err = ice_vsi_stop_tx_rx_rings(vsi);
5151 ice_napi_disable_all(vsi);
5153 ice_for_each_txq(vsi, i)
5154 ice_clean_tx_ring(vsi->tx_rings[i]);
5156 ice_for_each_rxq(vsi, i)
5157 ice_clean_rx_ring(vsi->rx_rings[i]);
5160 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5161 vsi->vsi_num, vsi->vsw->sw_id);
5166 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5167 * @vsi: VSI having resources allocated
5169 * Return 0 on success, negative on failure
5171 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5175 if (!vsi->num_txq) {
5176 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
5181 ice_for_each_txq(vsi, i) {
5182 err = ice_setup_tx_ring(vsi->tx_rings[i]);
5191 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5192 * @vsi: VSI having resources allocated
5194 * Return 0 on success, negative on failure
5196 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5200 if (!vsi->num_rxq) {
5201 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
5206 ice_for_each_rxq(vsi, i) {
5207 err = ice_setup_rx_ring(vsi->rx_rings[i]);
5216 * ice_vsi_req_irq - Request IRQ from the OS
5217 * @vsi: The VSI IRQ is being requested for
5218 * @basename: name for the vector
5220 * Return 0 on success and a negative value on error
5222 static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
5224 struct ice_pf *pf = vsi->back;
5227 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
5228 err = ice_vsi_req_irq_msix(vsi, basename);
5234 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
5235 * @vsi: the VSI having resources freed
5237 static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
5244 ice_for_each_txq(vsi, i)
5245 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
5246 ice_free_tx_ring(vsi->tx_rings[i]);
5250 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
5251 * @vsi: the VSI having resources freed
5253 static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
5260 ice_for_each_rxq(vsi, i)
5261 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
5262 ice_free_rx_ring(vsi->rx_rings[i]);
5266 * ice_vsi_open - Called when a network interface is made active
5267 * @vsi: the VSI to open
5269 * Initialization of the VSI
5271 * Returns 0 on success, negative value on error
5273 static int ice_vsi_open(struct ice_vsi *vsi)
5275 char int_name[ICE_INT_NAME_STR_LEN];
5276 struct ice_pf *pf = vsi->back;
5279 /* allocate descriptors */
5280 err = ice_vsi_setup_tx_rings(vsi);
5284 err = ice_vsi_setup_rx_rings(vsi);
5288 err = ice_vsi_cfg(vsi);
5292 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5293 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5294 err = ice_vsi_req_irq(vsi, int_name);
5298 /* Notify the stack of the actual queue counts. */
5299 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5303 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5307 err = ice_up_complete(vsi);
5309 goto err_up_complete;
5316 ice_vsi_free_irq(vsi);
5318 ice_vsi_free_rx_rings(vsi);
5320 ice_vsi_free_tx_rings(vsi);
5326 * ice_vsi_close - Shut down a VSI
5327 * @vsi: the VSI being shut down
5329 static void ice_vsi_close(struct ice_vsi *vsi)
5331 if (!test_and_set_bit(__ICE_DOWN, vsi->state))
5334 ice_vsi_free_irq(vsi);
5335 ice_vsi_free_tx_rings(vsi);
5336 ice_vsi_free_rx_rings(vsi);
5340 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
5341 * @vsi: the VSI being removed
5343 static void ice_rss_clean(struct ice_vsi *vsi)
5349 if (vsi->rss_hkey_user)
5350 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
5351 if (vsi->rss_lut_user)
5352 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
5356 * ice_vsi_release - Delete a VSI and free its resources
5357 * @vsi: the VSI being removed
5359 * Returns 0 on success or < 0 on error
5361 static int ice_vsi_release(struct ice_vsi *vsi)
5368 /* do not unregister and free netdevs while driver is in the reset
5369 * recovery pending state. Since reset/rebuild happens through PF
5370 * service task workqueue, its not a good idea to unregister netdev
5371 * that is associated to the PF that is running the work queue items
5372 * currently. This is done to avoid check_flush_dependency() warning
5375 if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) {
5376 unregister_netdev(vsi->netdev);
5377 free_netdev(vsi->netdev);
5381 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
5384 /* Disable VSI and free resources */
5385 ice_vsi_dis_irq(vsi);
5388 /* reclaim interrupt vectors back to PF */
5389 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
5390 pf->num_avail_msix += vsi->num_q_vectors;
5392 ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
5393 ice_vsi_delete(vsi);
5394 ice_vsi_free_q_vectors(vsi);
5395 ice_vsi_clear_rings(vsi);
5397 ice_vsi_put_qs(vsi);
5398 pf->q_left_tx += vsi->alloc_txq;
5399 pf->q_left_rx += vsi->alloc_rxq;
5401 /* retain SW VSI data structure since it is needed to unregister and
5402 * free VSI netdev when PF is not in reset recovery pending state,\
5403 * for ex: during rmmod.
5405 if (!ice_is_reset_recovery_pending(pf->state))
5412 * ice_vsi_release_all - Delete all VSIs
5413 * @pf: PF from which all VSIs are being removed
5415 static void ice_vsi_release_all(struct ice_pf *pf)
5422 for (i = 0; i < pf->num_alloc_vsi; i++) {
5426 err = ice_vsi_release(pf->vsi[i]);
5428 dev_dbg(&pf->pdev->dev,
5429 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
5430 i, err, pf->vsi[i]->vsi_num);
5435 * ice_dis_vsi - pause a VSI
5436 * @vsi: the VSI being paused
5438 static void ice_dis_vsi(struct ice_vsi *vsi)
5440 if (test_bit(__ICE_DOWN, vsi->state))
5443 set_bit(__ICE_NEEDS_RESTART, vsi->state);
5445 if (vsi->netdev && netif_running(vsi->netdev) &&
5446 vsi->type == ICE_VSI_PF) {
5448 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5456 * ice_ena_vsi - resume a VSI
5457 * @vsi: the VSI being resume
5459 static int ice_ena_vsi(struct ice_vsi *vsi)
5463 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
5464 if (vsi->netdev && netif_running(vsi->netdev)) {
5466 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5474 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
5477 static void ice_pf_dis_all_vsi(struct ice_pf *pf)
5481 ice_for_each_vsi(pf, v)
5483 ice_dis_vsi(pf->vsi[v]);
5487 * ice_pf_ena_all_vsi - Resume all VSIs on a PF
5490 static int ice_pf_ena_all_vsi(struct ice_pf *pf)
5494 ice_for_each_vsi(pf, v)
5496 if (ice_ena_vsi(pf->vsi[v]))
5503 * ice_vsi_rebuild_all - rebuild all VSIs in pf
5506 static int ice_vsi_rebuild_all(struct ice_pf *pf)
5510 /* loop through pf->vsi array and reinit the VSI if found */
5511 for (i = 0; i < pf->num_alloc_vsi; i++) {
5517 err = ice_vsi_rebuild(pf->vsi[i]);
5519 dev_err(&pf->pdev->dev,
5520 "VSI at index %d rebuild failed\n",
5525 dev_info(&pf->pdev->dev,
5526 "VSI at index %d rebuilt. vsi_num = 0x%x\n",
5527 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
5534 * ice_rebuild - rebuild after reset
5535 * @pf: pf to rebuild
5537 static void ice_rebuild(struct ice_pf *pf)
5539 struct device *dev = &pf->pdev->dev;
5540 struct ice_hw *hw = &pf->hw;
5541 enum ice_status ret;
5544 if (test_bit(__ICE_DOWN, pf->state))
5545 goto clear_recovery;
5547 dev_dbg(dev, "rebuilding pf\n");
5549 ret = ice_init_all_ctrlq(hw);
5551 dev_err(dev, "control queues init failed %d\n", ret);
5552 goto err_init_ctrlq;
5555 ret = ice_clear_pf_cfg(hw);
5557 dev_err(dev, "clear PF configuration failed %d\n", ret);
5558 goto err_init_ctrlq;
5561 ice_clear_pxe_mode(hw);
5563 ret = ice_get_caps(hw);
5565 dev_err(dev, "ice_get_caps failed %d\n", ret);
5566 goto err_init_ctrlq;
5569 err = ice_sched_init_port(hw->port_info);
5571 goto err_sched_init_port;
5573 err = ice_vsi_rebuild_all(pf);
5575 dev_err(dev, "ice_vsi_rebuild_all failed\n");
5576 goto err_vsi_rebuild;
5579 ret = ice_replay_all_fltr(&pf->hw);
5581 dev_err(&pf->pdev->dev,
5582 "error replaying switch filter rules\n");
5583 goto err_vsi_rebuild;
5586 /* start misc vector */
5587 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
5588 err = ice_req_irq_msix_misc(pf);
5590 dev_err(dev, "misc vector setup failed: %d\n", err);
5591 goto err_vsi_rebuild;
5595 /* restart the VSIs that were rebuilt and running before the reset */
5596 err = ice_pf_ena_all_vsi(pf);
5598 dev_err(&pf->pdev->dev, "error enabling VSIs\n");
5599 /* no need to disable VSIs in tear down path in ice_rebuild()
5600 * since its already taken care in ice_vsi_open()
5602 goto err_vsi_rebuild;
5605 /* if we get here, reset flow is successful */
5606 clear_bit(__ICE_RESET_FAILED, pf->state);
5610 ice_vsi_release_all(pf);
5611 err_sched_init_port:
5612 ice_sched_cleanup_all(hw);
5614 ice_shutdown_all_ctrlq(hw);
5615 set_bit(__ICE_RESET_FAILED, pf->state);
5617 /* set this bit in PF state to control service task scheduling */
5618 set_bit(__ICE_NEEDS_RESTART, pf->state);
5619 dev_err(dev, "Rebuild failed, unload and reload driver\n");
5623 * ice_change_mtu - NDO callback to change the MTU
5624 * @netdev: network interface device structure
5625 * @new_mtu: new value for maximum frame size
5627 * Returns 0 on success, negative on failure
5629 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
5631 struct ice_netdev_priv *np = netdev_priv(netdev);
5632 struct ice_vsi *vsi = np->vsi;
5633 struct ice_pf *pf = vsi->back;
5636 if (new_mtu == netdev->mtu) {
5637 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
5641 if (new_mtu < netdev->min_mtu) {
5642 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
5645 } else if (new_mtu > netdev->max_mtu) {
5646 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
5650 /* if a reset is in progress, wait for some time for it to complete */
5652 if (ice_is_reset_recovery_pending(pf->state)) {
5654 usleep_range(1000, 2000);
5659 } while (count < 100);
5662 netdev_err(netdev, "can't change mtu. Device is busy\n");
5666 netdev->mtu = new_mtu;
5668 /* if VSI is up, bring it down and then back up */
5669 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
5672 err = ice_down(vsi);
5674 netdev_err(netdev, "change mtu if_up err %d\n", err);
5680 netdev_err(netdev, "change mtu if_up err %d\n", err);
5685 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
5690 * ice_set_rss - Set RSS keys and lut
5691 * @vsi: Pointer to VSI structure
5692 * @seed: RSS hash seed
5693 * @lut: Lookup table
5694 * @lut_size: Lookup table size
5696 * Returns 0 on success, negative on failure
5698 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
5700 struct ice_pf *pf = vsi->back;
5701 struct ice_hw *hw = &pf->hw;
5702 enum ice_status status;
5705 struct ice_aqc_get_set_rss_keys *buf =
5706 (struct ice_aqc_get_set_rss_keys *)seed;
5708 status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
5711 dev_err(&pf->pdev->dev,
5712 "Cannot set RSS key, err %d aq_err %d\n",
5713 status, hw->adminq.rq_last_status);
5719 status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
5720 vsi->rss_lut_type, lut, lut_size);
5722 dev_err(&pf->pdev->dev,
5723 "Cannot set RSS lut, err %d aq_err %d\n",
5724 status, hw->adminq.rq_last_status);
5733 * ice_get_rss - Get RSS keys and lut
5734 * @vsi: Pointer to VSI structure
5735 * @seed: Buffer to store the keys
5736 * @lut: Buffer to store the lookup table entries
5737 * @lut_size: Size of buffer to store the lookup table entries
5739 * Returns 0 on success, negative on failure
5741 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
5743 struct ice_pf *pf = vsi->back;
5744 struct ice_hw *hw = &pf->hw;
5745 enum ice_status status;
5748 struct ice_aqc_get_set_rss_keys *buf =
5749 (struct ice_aqc_get_set_rss_keys *)seed;
5751 status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
5753 dev_err(&pf->pdev->dev,
5754 "Cannot get RSS key, err %d aq_err %d\n",
5755 status, hw->adminq.rq_last_status);
5761 status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
5762 vsi->rss_lut_type, lut, lut_size);
5764 dev_err(&pf->pdev->dev,
5765 "Cannot get RSS lut, err %d aq_err %d\n",
5766 status, hw->adminq.rq_last_status);
5775 * ice_bridge_getlink - Get the hardware bridge mode
5778 * @seq: RTNL message seq
5779 * @dev: the netdev being configured
5780 * @filter_mask: filter mask passed in
5781 * @nlflags: netlink flags passed in
5783 * Return the bridge mode (VEB/VEPA)
5786 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5787 struct net_device *dev, u32 filter_mask, int nlflags)
5789 struct ice_netdev_priv *np = netdev_priv(dev);
5790 struct ice_vsi *vsi = np->vsi;
5791 struct ice_pf *pf = vsi->back;
5794 bmode = pf->first_sw->bridge_mode;
5796 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
5801 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
5802 * @vsi: Pointer to VSI structure
5803 * @bmode: Hardware bridge mode (VEB/VEPA)
5805 * Returns 0 on success, negative on failure
5807 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
5809 struct device *dev = &vsi->back->pdev->dev;
5810 struct ice_aqc_vsi_props *vsi_props;
5811 struct ice_hw *hw = &vsi->back->hw;
5812 struct ice_vsi_ctx ctxt = { 0 };
5813 enum ice_status status;
5815 vsi_props = &vsi->info;
5816 ctxt.info = vsi->info;
5818 if (bmode == BRIDGE_MODE_VEB)
5819 /* change from VEPA to VEB mode */
5820 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
5822 /* change from VEB to VEPA mode */
5823 ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
5824 ctxt.vsi_num = vsi->vsi_num;
5825 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
5826 status = ice_aq_update_vsi(hw, &ctxt, NULL);
5828 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
5829 bmode, status, hw->adminq.sq_last_status);
5832 /* Update sw flags for book keeping */
5833 vsi_props->sw_flags = ctxt.info.sw_flags;
5839 * ice_bridge_setlink - Set the hardware bridge mode
5840 * @dev: the netdev being configured
5841 * @nlh: RTNL message
5842 * @flags: bridge setlink flags
5844 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
5845 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
5846 * not already set for all VSIs connected to this switch. And also update the
5847 * unicast switch filter rules for the corresponding switch of the netdev.
5850 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5851 u16 __always_unused flags)
5853 struct ice_netdev_priv *np = netdev_priv(dev);
5854 struct ice_pf *pf = np->vsi->back;
5855 struct nlattr *attr, *br_spec;
5856 struct ice_hw *hw = &pf->hw;
5857 enum ice_status status;
5858 struct ice_sw *pf_sw;
5859 int rem, v, err = 0;
5861 pf_sw = pf->first_sw;
5862 /* find the attribute in the netlink message */
5863 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5865 nla_for_each_nested(attr, br_spec, rem) {
5868 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5870 mode = nla_get_u16(attr);
5871 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5873 /* Continue if bridge mode is not being flipped */
5874 if (mode == pf_sw->bridge_mode)
5876 /* Iterates through the PF VSI list and update the loopback
5879 ice_for_each_vsi(pf, v) {
5882 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
5887 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
5888 /* Update the unicast switch filter rules for the corresponding
5889 * switch of the netdev
5891 status = ice_update_sw_rule_bridge_mode(hw);
5893 netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
5894 mode, status, hw->adminq.sq_last_status);
5895 /* revert hw->evb_veb */
5896 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
5900 pf_sw->bridge_mode = mode;
5907 * ice_tx_timeout - Respond to a Tx Hang
5908 * @netdev: network interface device structure
5910 static void ice_tx_timeout(struct net_device *netdev)
5912 struct ice_netdev_priv *np = netdev_priv(netdev);
5913 struct ice_ring *tx_ring = NULL;
5914 struct ice_vsi *vsi = np->vsi;
5915 struct ice_pf *pf = vsi->back;
5916 u32 head, val = 0, i;
5917 int hung_queue = -1;
5919 pf->tx_timeout_count++;
5921 /* find the stopped queue the same way the stack does */
5922 for (i = 0; i < netdev->num_tx_queues; i++) {
5923 struct netdev_queue *q;
5924 unsigned long trans_start;
5926 q = netdev_get_tx_queue(netdev, i);
5927 trans_start = q->trans_start;
5928 if (netif_xmit_stopped(q) &&
5930 (trans_start + netdev->watchdog_timeo))) {
5936 if (i == netdev->num_tx_queues) {
5937 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
5939 /* now that we have an index, find the tx_ring struct */
5940 for (i = 0; i < vsi->num_txq; i++) {
5941 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
5943 vsi->tx_rings[i]->q_index) {
5944 tx_ring = vsi->tx_rings[i];
5951 /* Reset recovery level if enough time has elapsed after last timeout.
5952 * Also ensure no new reset action happens before next timeout period.
5954 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
5955 pf->tx_timeout_recovery_level = 1;
5956 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
5957 netdev->watchdog_timeo)))
5961 head = tx_ring->next_to_clean;
5962 /* Read interrupt register */
5963 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
5965 GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
5966 tx_ring->vsi->base_vector - 1));
5968 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
5969 vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
5970 head, tx_ring->next_to_use,
5971 readl(tx_ring->tail), val);
5974 pf->tx_timeout_last_recovery = jiffies;
5975 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
5976 pf->tx_timeout_recovery_level, hung_queue);
5978 switch (pf->tx_timeout_recovery_level) {
5980 set_bit(__ICE_PFR_REQ, pf->state);
5983 set_bit(__ICE_CORER_REQ, pf->state);
5986 set_bit(__ICE_GLOBR_REQ, pf->state);
5989 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
5990 set_bit(__ICE_DOWN, pf->state);
5991 set_bit(__ICE_NEEDS_RESTART, vsi->state);
5992 set_bit(__ICE_SERVICE_DIS, pf->state);
5996 ice_service_task_schedule(pf);
5997 pf->tx_timeout_recovery_level++;
6001 * ice_open - Called when a network interface becomes active
6002 * @netdev: network interface device structure
6004 * The open entry point is called when a network interface is made
6005 * active by the system (IFF_UP). At this point all resources needed
6006 * for transmit and receive operations are allocated, the interrupt
6007 * handler is registered with the OS, the netdev watchdog is enabled,
6008 * and the stack is notified that the interface is ready.
6010 * Returns 0 on success, negative value on failure
6012 static int ice_open(struct net_device *netdev)
6014 struct ice_netdev_priv *np = netdev_priv(netdev);
6015 struct ice_vsi *vsi = np->vsi;
6018 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
6019 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6023 netif_carrier_off(netdev);
6025 err = ice_vsi_open(vsi);
6028 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6029 vsi->vsi_num, vsi->vsw->sw_id);
6034 * ice_stop - Disables a network interface
6035 * @netdev: network interface device structure
6037 * The stop entry point is called when an interface is de-activated by the OS,
6038 * and the netdevice enters the DOWN state. The hardware is still under the
6039 * driver's control, but the netdev interface is disabled.
6041 * Returns success only - not allowed to fail
6043 static int ice_stop(struct net_device *netdev)
6045 struct ice_netdev_priv *np = netdev_priv(netdev);
6046 struct ice_vsi *vsi = np->vsi;
6054 * ice_features_check - Validate encapsulated packet conforms to limits
6056 * @netdev: This port's netdev
6057 * @features: Offload features that the stack believes apply
6059 static netdev_features_t
6060 ice_features_check(struct sk_buff *skb,
6061 struct net_device __always_unused *netdev,
6062 netdev_features_t features)
6066 /* No point in doing any of this if neither checksum nor GSO are
6067 * being requested for this frame. We can rule out both by just
6068 * checking for CHECKSUM_PARTIAL
6070 if (skb->ip_summed != CHECKSUM_PARTIAL)
6073 /* We cannot support GSO if the MSS is going to be less than
6074 * 64 bytes. If it is then we need to drop support for GSO.
6076 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
6077 features &= ~NETIF_F_GSO_MASK;
6079 len = skb_network_header(skb) - skb->data;
6080 if (len & ~(ICE_TXD_MACLEN_MAX))
6081 goto out_rm_features;
6083 len = skb_transport_header(skb) - skb_network_header(skb);
6084 if (len & ~(ICE_TXD_IPLEN_MAX))
6085 goto out_rm_features;
6087 if (skb->encapsulation) {
6088 len = skb_inner_network_header(skb) - skb_transport_header(skb);
6089 if (len & ~(ICE_TXD_L4LEN_MAX))
6090 goto out_rm_features;
6092 len = skb_inner_transport_header(skb) -
6093 skb_inner_network_header(skb);
6094 if (len & ~(ICE_TXD_IPLEN_MAX))
6095 goto out_rm_features;
6100 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6103 static const struct net_device_ops ice_netdev_ops = {
6104 .ndo_open = ice_open,
6105 .ndo_stop = ice_stop,
6106 .ndo_start_xmit = ice_start_xmit,
6107 .ndo_features_check = ice_features_check,
6108 .ndo_set_rx_mode = ice_set_rx_mode,
6109 .ndo_set_mac_address = ice_set_mac_address,
6110 .ndo_validate_addr = eth_validate_addr,
6111 .ndo_change_mtu = ice_change_mtu,
6112 .ndo_get_stats64 = ice_get_stats64,
6113 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6114 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6115 .ndo_set_features = ice_set_features,
6116 .ndo_bridge_getlink = ice_bridge_getlink,
6117 .ndo_bridge_setlink = ice_bridge_setlink,
6118 .ndo_fdb_add = ice_fdb_add,
6119 .ndo_fdb_del = ice_fdb_del,
6120 .ndo_tx_timeout = ice_tx_timeout,