1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #define DRV_VERSION "ice-0.7.0-k"
11 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
12 const char ice_drv_ver[] = DRV_VERSION;
13 static const char ice_driver_string[] = DRV_SUMMARY;
14 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
16 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
17 MODULE_DESCRIPTION(DRV_SUMMARY);
18 MODULE_LICENSE("GPL");
19 MODULE_VERSION(DRV_VERSION);
21 static int debug = -1;
22 module_param(debug, int, 0644);
23 #ifndef CONFIG_DYNAMIC_DEBUG
24 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
26 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
27 #endif /* !CONFIG_DYNAMIC_DEBUG */
29 static struct workqueue_struct *ice_wq;
30 static const struct net_device_ops ice_netdev_ops;
32 static void ice_pf_dis_all_vsi(struct ice_pf *pf);
33 static void ice_rebuild(struct ice_pf *pf);
34 static int ice_vsi_release(struct ice_vsi *vsi);
35 static void ice_vsi_release_all(struct ice_pf *pf);
36 static void ice_update_vsi_stats(struct ice_vsi *vsi);
37 static void ice_update_pf_stats(struct ice_pf *pf);
40 * ice_get_tx_pending - returns number of Tx descriptors not processed
41 * @ring: the ring of descriptors
43 static u32 ice_get_tx_pending(struct ice_ring *ring)
47 head = ring->next_to_clean;
48 tail = readl(ring->tail);
51 return (head < tail) ?
52 tail - head : (tail + ring->count - head);
57 * ice_check_for_hang_subtask - check for and recover hung queues
58 * @pf: pointer to PF struct
60 static void ice_check_for_hang_subtask(struct ice_pf *pf)
62 struct ice_vsi *vsi = NULL;
67 ice_for_each_vsi(pf, v)
68 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
73 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
76 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
79 for (i = 0; i < vsi->num_txq; i++) {
80 struct ice_ring *tx_ring = vsi->tx_rings[i];
82 if (tx_ring && tx_ring->desc) {
83 int itr = ICE_ITR_NONE;
85 /* If packet counter has not changed the queue is
86 * likely stalled, so force an interrupt for this
89 * prev_pkt would be negative if there was no
92 packets = tx_ring->stats.pkts & INT_MAX;
93 if (tx_ring->tx_stats.prev_pkt == packets) {
94 /* Trigger sw interrupt to revive the queue */
95 v_idx = tx_ring->q_vector->v_idx;
97 GLINT_DYN_CTL(vsi->base_vector + v_idx),
98 (itr << GLINT_DYN_CTL_ITR_INDX_S) |
99 GLINT_DYN_CTL_SWINT_TRIG_M |
100 GLINT_DYN_CTL_INTENA_MSK_M);
104 /* Memory barrier between read of packet count and call
105 * to ice_get_tx_pending()
108 tx_ring->tx_stats.prev_pkt =
109 ice_get_tx_pending(tx_ring) ? packets : -1;
115 * ice_get_free_slot - get the next non-NULL location index in array
116 * @array: array to search
117 * @size: size of the array
118 * @curr: last known occupied index to be used as a search hint
120 * void * is being used to keep the functionality generic. This lets us use this
121 * function on any array of pointers.
123 static int ice_get_free_slot(void *array, int size, int curr)
125 int **tmp_array = (int **)array;
128 if (curr < (size - 1) && !tmp_array[curr + 1]) {
133 while ((i < size) && (tmp_array[i]))
144 * ice_search_res - Search the tracker for a block of resources
145 * @res: pointer to the resource
146 * @needed: size of the block needed
147 * @id: identifier to track owner
148 * Returns the base item index of the block, or -ENOMEM for error
150 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
152 int start = res->search_hint;
155 id |= ICE_RES_VALID_BIT;
158 /* skip already allocated entries */
159 if (res->list[end++] & ICE_RES_VALID_BIT) {
161 if ((start + needed) > res->num_entries)
165 if (end == (start + needed)) {
168 /* there was enough, so assign it to the requestor */
172 if (end == res->num_entries)
175 res->search_hint = end;
184 * ice_get_res - get a block of resources
185 * @pf: board private structure
186 * @res: pointer to the resource
187 * @needed: size of the block needed
188 * @id: identifier to track owner
190 * Returns the base item index of the block, or -ENOMEM for error
191 * The search_hint trick and lack of advanced fit-finding only works
192 * because we're highly likely to have all the same sized requests.
193 * Linear search time and any fragmentation should be minimal.
196 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
203 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
204 dev_err(&pf->pdev->dev,
205 "param err: needed=%d, num_entries = %d id=0x%04x\n",
206 needed, res->num_entries, id);
210 /* search based on search_hint */
211 ret = ice_search_res(res, needed, id);
214 /* previous search failed. Reset search hint and try again */
215 res->search_hint = 0;
216 ret = ice_search_res(res, needed, id);
223 * ice_free_res - free a block of resources
224 * @res: pointer to the resource
225 * @index: starting index previously returned by ice_get_res
226 * @id: identifier to track owner
227 * Returns number of resources freed
229 static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
234 if (!res || index >= res->num_entries)
237 id |= ICE_RES_VALID_BIT;
238 for (i = index; i < res->num_entries && res->list[i] == id; i++) {
247 * ice_add_mac_to_list - Add a mac address filter entry to the list
248 * @vsi: the VSI to be forwarded to
249 * @add_list: pointer to the list which contains MAC filter entries
250 * @macaddr: the MAC address to be added.
252 * Adds mac address filter entry to the temp list
254 * Returns 0 on success or ENOMEM on failure.
256 static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
259 struct ice_fltr_list_entry *tmp;
260 struct ice_pf *pf = vsi->back;
262 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
266 tmp->fltr_info.flag = ICE_FLTR_TX;
267 tmp->fltr_info.src = vsi->vsi_num;
268 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
269 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
270 tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
271 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
273 INIT_LIST_HEAD(&tmp->list_entry);
274 list_add(&tmp->list_entry, add_list);
280 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
281 * @netdev: the net device on which the sync is happening
282 * @addr: mac address to sync
284 * This is a callback function which is called by the in kernel device sync
285 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
286 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
287 * mac filters from the hardware.
289 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
291 struct ice_netdev_priv *np = netdev_priv(netdev);
292 struct ice_vsi *vsi = np->vsi;
294 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
301 * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
302 * @netdev: the net device on which the unsync is happening
303 * @addr: mac address to unsync
305 * This is a callback function which is called by the in kernel device unsync
306 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
307 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
308 * delete the mac filters from the hardware.
310 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
312 struct ice_netdev_priv *np = netdev_priv(netdev);
313 struct ice_vsi *vsi = np->vsi;
315 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
322 * ice_free_fltr_list - free filter lists helper
323 * @dev: pointer to the device struct
324 * @h: pointer to the list head to be freed
326 * Helper function to free filter lists previously created using
327 * ice_add_mac_to_list
329 static void ice_free_fltr_list(struct device *dev, struct list_head *h)
331 struct ice_fltr_list_entry *e, *tmp;
333 list_for_each_entry_safe(e, tmp, h, list_entry) {
334 list_del(&e->list_entry);
340 * ice_vsi_fltr_changed - check if filter state changed
341 * @vsi: VSI to be checked
343 * returns true if filter state has changed, false otherwise.
345 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
347 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
348 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
349 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
353 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
354 * @vsi: ptr to the VSI
356 * Push any outstanding VSI filter changes through the AdminQ.
358 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
360 struct device *dev = &vsi->back->pdev->dev;
361 struct net_device *netdev = vsi->netdev;
362 bool promisc_forced_on = false;
363 struct ice_pf *pf = vsi->back;
364 struct ice_hw *hw = &pf->hw;
365 enum ice_status status = 0;
366 u32 changed_flags = 0;
372 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
373 usleep_range(1000, 2000);
375 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
376 vsi->current_netdev_flags = vsi->netdev->flags;
378 INIT_LIST_HEAD(&vsi->tmp_sync_list);
379 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
381 if (ice_vsi_fltr_changed(vsi)) {
382 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
383 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
384 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
386 /* grab the netdev's addr_list_lock */
387 netif_addr_lock_bh(netdev);
388 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
389 ice_add_mac_to_unsync_list);
390 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
391 ice_add_mac_to_unsync_list);
392 /* our temp lists are populated. release lock */
393 netif_addr_unlock_bh(netdev);
396 /* Remove mac addresses in the unsync list */
397 status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
398 ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
400 netdev_err(netdev, "Failed to delete MAC filters\n");
401 /* if we failed because of alloc failures, just bail */
402 if (status == ICE_ERR_NO_MEMORY) {
408 /* Add mac addresses in the sync list */
409 status = ice_add_mac(hw, &vsi->tmp_sync_list);
410 ice_free_fltr_list(dev, &vsi->tmp_sync_list);
412 netdev_err(netdev, "Failed to add MAC filters\n");
413 /* If there is no more space for new umac filters, vsi
414 * should go into promiscuous mode. There should be some
415 * space reserved for promiscuous filters.
417 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
418 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
420 promisc_forced_on = true;
422 "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
429 /* check for changes in promiscuous modes */
430 if (changed_flags & IFF_ALLMULTI)
431 netdev_warn(netdev, "Unsupported configuration\n");
433 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
434 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
435 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
436 if (vsi->current_netdev_flags & IFF_PROMISC) {
437 /* Apply TX filter rule to get traffic from VMs */
438 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
441 netdev_err(netdev, "Error setting default VSI %i tx rule\n",
443 vsi->current_netdev_flags &= ~IFF_PROMISC;
447 /* Apply RX filter rule to get traffic from wire */
448 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
451 netdev_err(netdev, "Error setting default VSI %i rx rule\n",
453 vsi->current_netdev_flags &= ~IFF_PROMISC;
458 /* Clear TX filter rule to stop traffic from VMs */
459 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
462 netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
464 vsi->current_netdev_flags |= IFF_PROMISC;
468 /* Clear filter RX to remove traffic from wire */
469 status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
472 netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
474 vsi->current_netdev_flags |= IFF_PROMISC;
483 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
486 /* if something went wrong then set the changed flag so we try again */
487 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
488 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
490 clear_bit(__ICE_CFG_BUSY, vsi->state);
495 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
496 * @pf: board private structure
498 static void ice_sync_fltr_subtask(struct ice_pf *pf)
502 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
505 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
507 for (v = 0; v < pf->num_alloc_vsi; v++)
508 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
509 ice_vsi_sync_fltr(pf->vsi[v])) {
510 /* come back and try again later */
511 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
517 * ice_is_reset_recovery_pending - schedule a reset
518 * @state: pf state field
520 static bool ice_is_reset_recovery_pending(unsigned long int *state)
522 return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
526 * ice_prepare_for_reset - prep for the core to reset
527 * @pf: board private structure
529 * Inform or close all dependent features in prep for reset.
532 ice_prepare_for_reset(struct ice_pf *pf)
534 struct ice_hw *hw = &pf->hw;
536 /* disable the VSIs and their queues that are not already DOWN */
537 ice_pf_dis_all_vsi(pf);
539 ice_shutdown_all_ctrlq(hw);
541 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
545 * ice_do_reset - Initiate one of many types of resets
546 * @pf: board private structure
547 * @reset_type: reset type requested
548 * before this function was called.
550 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
552 struct device *dev = &pf->pdev->dev;
553 struct ice_hw *hw = &pf->hw;
555 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
556 WARN_ON(in_interrupt());
558 /* PFR is a bit of a special case because it doesn't result in an OICR
559 * interrupt. Set pending bit here which otherwise gets set in the
562 if (reset_type == ICE_RESET_PFR)
563 set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
565 ice_prepare_for_reset(pf);
567 /* trigger the reset */
568 if (ice_reset(hw, reset_type)) {
569 dev_err(dev, "reset %d failed\n", reset_type);
570 set_bit(__ICE_RESET_FAILED, pf->state);
571 clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
572 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
576 /* PFR is a bit of a special case because it doesn't result in an OICR
577 * interrupt. So for PFR, rebuild after the reset and clear the reset-
578 * associated state bits.
580 if (reset_type == ICE_RESET_PFR) {
583 clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
584 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
589 * ice_reset_subtask - Set up for resetting the device and driver
590 * @pf: board private structure
592 static void ice_reset_subtask(struct ice_pf *pf)
594 enum ice_reset_req reset_type = ICE_RESET_INVAL;
596 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
597 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
598 * of reset is pending and sets bits in pf->state indicating the reset
599 * type and __ICE_RESET_RECOVERY_PENDING. So, if the latter bit is set
600 * prepare for pending reset if not already (for PF software-initiated
601 * global resets the software should already be prepared for it as
602 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
603 * by firmware or software on other PFs, that bit is not set so prepare
604 * for the reset now), poll for reset done, rebuild and return.
606 if (ice_is_reset_recovery_pending(pf->state)) {
607 clear_bit(__ICE_GLOBR_RECV, pf->state);
608 clear_bit(__ICE_CORER_RECV, pf->state);
609 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
610 ice_prepare_for_reset(pf);
612 /* make sure we are ready to rebuild */
613 if (ice_check_reset(&pf->hw)) {
614 set_bit(__ICE_RESET_FAILED, pf->state);
616 /* done with reset. start rebuild */
617 pf->hw.reset_ongoing = false;
619 /* clear bit to resume normal operations, but
620 * ICE_NEEDS_RESTART bit is set incase rebuild failed
622 clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
623 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
629 /* No pending resets to finish processing. Check for new resets */
630 if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
631 reset_type = ICE_RESET_PFR;
632 if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
633 reset_type = ICE_RESET_CORER;
634 if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
635 reset_type = ICE_RESET_GLOBR;
636 /* If no valid reset type requested just return */
637 if (reset_type == ICE_RESET_INVAL)
640 /* reset if not already down or busy */
641 if (!test_bit(__ICE_DOWN, pf->state) &&
642 !test_bit(__ICE_CFG_BUSY, pf->state)) {
643 ice_do_reset(pf, reset_type);
648 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
649 * @pf: board private structure
651 static void ice_watchdog_subtask(struct ice_pf *pf)
655 /* if interface is down do nothing */
656 if (test_bit(__ICE_DOWN, pf->state) ||
657 test_bit(__ICE_CFG_BUSY, pf->state))
660 /* make sure we don't do these things too often */
661 if (time_before(jiffies,
662 pf->serv_tmr_prev + pf->serv_tmr_period))
665 pf->serv_tmr_prev = jiffies;
667 /* Update the stats for active netdevs so the network stack
668 * can look at updated numbers whenever it cares to
670 ice_update_pf_stats(pf);
671 for (i = 0; i < pf->num_alloc_vsi; i++)
672 if (pf->vsi[i] && pf->vsi[i]->netdev)
673 ice_update_vsi_stats(pf->vsi[i]);
677 * ice_print_link_msg - print link up or down message
678 * @vsi: the VSI whose link status is being queried
679 * @isup: boolean for if the link is now up or down
681 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
686 if (vsi->current_isup == isup)
689 vsi->current_isup = isup;
692 netdev_info(vsi->netdev, "NIC Link is Down\n");
696 switch (vsi->port_info->phy.link_info.link_speed) {
697 case ICE_AQ_LINK_SPEED_40GB:
700 case ICE_AQ_LINK_SPEED_25GB:
703 case ICE_AQ_LINK_SPEED_20GB:
706 case ICE_AQ_LINK_SPEED_10GB:
709 case ICE_AQ_LINK_SPEED_5GB:
712 case ICE_AQ_LINK_SPEED_2500MB:
715 case ICE_AQ_LINK_SPEED_1000MB:
718 case ICE_AQ_LINK_SPEED_100MB:
726 switch (vsi->port_info->fc.current_mode) {
730 case ICE_FC_TX_PAUSE:
733 case ICE_FC_RX_PAUSE:
741 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
746 * ice_init_link_events - enable/initialize link events
747 * @pi: pointer to the port_info instance
749 * Returns -EIO on failure, 0 on success
751 static int ice_init_link_events(struct ice_port_info *pi)
755 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
756 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
758 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
759 dev_dbg(ice_hw_to_dev(pi->hw),
760 "Failed to set link event mask for port %d\n",
765 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
766 dev_dbg(ice_hw_to_dev(pi->hw),
767 "Failed to enable link events for port %d\n",
776 * ice_vsi_link_event - update the vsi's netdev
777 * @vsi: the vsi on which the link event occurred
778 * @link_up: whether or not the vsi needs to be set up or down
780 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
782 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
785 if (vsi->type == ICE_VSI_PF) {
787 dev_dbg(&vsi->back->pdev->dev,
788 "vsi->netdev is not initialized!\n");
792 netif_carrier_on(vsi->netdev);
793 netif_tx_wake_all_queues(vsi->netdev);
795 netif_carrier_off(vsi->netdev);
796 netif_tx_stop_all_queues(vsi->netdev);
802 * ice_link_event - process the link event
803 * @pf: pf that the link event is associated with
804 * @pi: port_info for the port that the link event is associated with
806 * Returns -EIO if ice_get_link_status() fails
807 * Returns 0 on success
810 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
812 u8 new_link_speed, old_link_speed;
813 struct ice_phy_info *phy_info;
814 bool new_link_same_as_old;
815 bool new_link, old_link;
820 phy_info->link_info_old = phy_info->link_info;
821 /* Force ice_get_link_status() to update link info */
822 phy_info->get_link_info = true;
824 old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
825 old_link_speed = phy_info->link_info_old.link_speed;
828 if (ice_get_link_status(pi, &new_link)) {
829 dev_dbg(&pf->pdev->dev,
830 "Could not get link status for port %d\n", lport);
834 new_link_speed = phy_info->link_info.link_speed;
836 new_link_same_as_old = (new_link == old_link &&
837 new_link_speed == old_link_speed);
839 ice_for_each_vsi(pf, v) {
840 struct ice_vsi *vsi = pf->vsi[v];
842 if (!vsi || !vsi->port_info)
845 if (new_link_same_as_old &&
846 (test_bit(__ICE_DOWN, vsi->state) ||
847 new_link == netif_carrier_ok(vsi->netdev)))
850 if (vsi->port_info->lport == lport) {
851 ice_print_link_msg(vsi, new_link);
852 ice_vsi_link_event(vsi, new_link);
860 * ice_handle_link_event - handle link event via ARQ
861 * @pf: pf that the link event is associated with
863 * Return -EINVAL if port_info is null
864 * Return status on succes
866 static int ice_handle_link_event(struct ice_pf *pf)
868 struct ice_port_info *port_info;
871 port_info = pf->hw.port_info;
875 status = ice_link_event(pf, port_info);
877 dev_dbg(&pf->pdev->dev,
878 "Could not process link event, error %d\n", status);
884 * __ice_clean_ctrlq - helper function to clean controlq rings
885 * @pf: ptr to struct ice_pf
886 * @q_type: specific Control queue type
888 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
890 struct ice_rq_event_info event;
891 struct ice_hw *hw = &pf->hw;
892 struct ice_ctl_q_info *cq;
897 /* Do not clean control queue if/when PF reset fails */
898 if (test_bit(__ICE_RESET_FAILED, pf->state))
902 case ICE_CTL_Q_ADMIN:
907 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
912 /* check for error indications - PF_xx_AxQLEN register layout for
913 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
915 val = rd32(hw, cq->rq.len);
916 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
917 PF_FW_ARQLEN_ARQCRIT_M)) {
919 if (val & PF_FW_ARQLEN_ARQVFE_M)
920 dev_dbg(&pf->pdev->dev,
921 "%s Receive Queue VF Error detected\n", qtype);
922 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
923 dev_dbg(&pf->pdev->dev,
924 "%s Receive Queue Overflow Error detected\n",
927 if (val & PF_FW_ARQLEN_ARQCRIT_M)
928 dev_dbg(&pf->pdev->dev,
929 "%s Receive Queue Critical Error detected\n",
931 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
932 PF_FW_ARQLEN_ARQCRIT_M);
934 wr32(hw, cq->rq.len, val);
937 val = rd32(hw, cq->sq.len);
938 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
939 PF_FW_ATQLEN_ATQCRIT_M)) {
941 if (val & PF_FW_ATQLEN_ATQVFE_M)
942 dev_dbg(&pf->pdev->dev,
943 "%s Send Queue VF Error detected\n", qtype);
944 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
945 dev_dbg(&pf->pdev->dev,
946 "%s Send Queue Overflow Error detected\n",
949 if (val & PF_FW_ATQLEN_ATQCRIT_M)
950 dev_dbg(&pf->pdev->dev,
951 "%s Send Queue Critical Error detected\n",
953 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
954 PF_FW_ATQLEN_ATQCRIT_M);
956 wr32(hw, cq->sq.len, val);
959 event.buf_len = cq->rq_buf_size;
960 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
969 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
970 if (ret == ICE_ERR_AQ_NO_WORK)
973 dev_err(&pf->pdev->dev,
974 "%s Receive Queue event error %d\n", qtype,
979 opcode = le16_to_cpu(event.desc.opcode);
982 case ice_aqc_opc_get_link_status:
983 if (ice_handle_link_event(pf))
984 dev_err(&pf->pdev->dev,
985 "Could not handle link event\n");
987 case ice_aqc_opc_fw_logging:
988 ice_output_fw_log(hw, &event.desc, event.msg_buf);
991 dev_dbg(&pf->pdev->dev,
992 "%s Receive Queue unknown event 0x%04x ignored\n",
996 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
998 devm_kfree(&pf->pdev->dev, event.msg_buf);
1000 return pending && (i == ICE_DFLT_IRQ_WORK);
1004 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1005 * @hw: pointer to hardware info
1006 * @cq: control queue information
1008 * returns true if there are pending messages in a queue, false if there aren't
1010 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1014 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1015 return cq->rq.next_to_clean != ntu;
1019 * ice_clean_adminq_subtask - clean the AdminQ rings
1020 * @pf: board private structure
1022 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1024 struct ice_hw *hw = &pf->hw;
1026 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1029 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1032 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1034 /* There might be a situation where new messages arrive to a control
1035 * queue between processing the last message and clearing the
1036 * EVENT_PENDING bit. So before exiting, check queue head again (using
1037 * ice_ctrlq_pending) and process new messages if any.
1039 if (ice_ctrlq_pending(hw, &hw->adminq))
1040 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1046 * ice_service_task_schedule - schedule the service task to wake up
1047 * @pf: board private structure
1049 * If not already scheduled, this puts the task into the work queue.
1051 static void ice_service_task_schedule(struct ice_pf *pf)
1053 if (!test_bit(__ICE_DOWN, pf->state) &&
1054 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1055 !test_bit(__ICE_NEEDS_RESTART, pf->state))
1056 queue_work(ice_wq, &pf->serv_task);
1060 * ice_service_task_complete - finish up the service task
1061 * @pf: board private structure
1063 static void ice_service_task_complete(struct ice_pf *pf)
1065 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1067 /* force memory (pf->state) to sync before next service task */
1068 smp_mb__before_atomic();
1069 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1073 * ice_service_timer - timer callback to schedule service task
1074 * @t: pointer to timer_list
1076 static void ice_service_timer(struct timer_list *t)
1078 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1080 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1081 ice_service_task_schedule(pf);
1085 * ice_handle_mdd_event - handle malicious driver detect event
1086 * @pf: pointer to the PF structure
1088 * Called from service task. OICR interrupt handler indicates MDD event
1090 static void ice_handle_mdd_event(struct ice_pf *pf)
1092 struct ice_hw *hw = &pf->hw;
1093 bool mdd_detected = false;
1096 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
1099 /* find what triggered the MDD event */
1100 reg = rd32(hw, GL_MDET_TX_PQM);
1101 if (reg & GL_MDET_TX_PQM_VALID_M) {
1102 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1103 GL_MDET_TX_PQM_PF_NUM_S;
1104 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1105 GL_MDET_TX_PQM_VF_NUM_S;
1106 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1107 GL_MDET_TX_PQM_MAL_TYPE_S;
1108 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1109 GL_MDET_TX_PQM_QNUM_S);
1111 if (netif_msg_tx_err(pf))
1112 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1113 event, queue, pf_num, vf_num);
1114 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1115 mdd_detected = true;
1118 reg = rd32(hw, GL_MDET_TX_TCLAN);
1119 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1120 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1121 GL_MDET_TX_TCLAN_PF_NUM_S;
1122 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1123 GL_MDET_TX_TCLAN_VF_NUM_S;
1124 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1125 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1126 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1127 GL_MDET_TX_TCLAN_QNUM_S);
1129 if (netif_msg_rx_err(pf))
1130 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1131 event, queue, pf_num, vf_num);
1132 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1133 mdd_detected = true;
1136 reg = rd32(hw, GL_MDET_RX);
1137 if (reg & GL_MDET_RX_VALID_M) {
1138 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1139 GL_MDET_RX_PF_NUM_S;
1140 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1141 GL_MDET_RX_VF_NUM_S;
1142 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1143 GL_MDET_RX_MAL_TYPE_S;
1144 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1147 if (netif_msg_rx_err(pf))
1148 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1149 event, queue, pf_num, vf_num);
1150 wr32(hw, GL_MDET_RX, 0xffffffff);
1151 mdd_detected = true;
1155 bool pf_mdd_detected = false;
1157 reg = rd32(hw, PF_MDET_TX_PQM);
1158 if (reg & PF_MDET_TX_PQM_VALID_M) {
1159 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1160 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1161 pf_mdd_detected = true;
1164 reg = rd32(hw, PF_MDET_TX_TCLAN);
1165 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1166 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1167 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1168 pf_mdd_detected = true;
1171 reg = rd32(hw, PF_MDET_RX);
1172 if (reg & PF_MDET_RX_VALID_M) {
1173 wr32(hw, PF_MDET_RX, 0xFFFF);
1174 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
1175 pf_mdd_detected = true;
1177 /* Queue belongs to the PF initiate a reset */
1178 if (pf_mdd_detected) {
1179 set_bit(__ICE_NEEDS_RESTART, pf->state);
1180 ice_service_task_schedule(pf);
1184 /* re-enable MDD interrupt cause */
1185 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1186 reg = rd32(hw, PFINT_OICR_ENA);
1187 reg |= PFINT_OICR_MAL_DETECT_M;
1188 wr32(hw, PFINT_OICR_ENA, reg);
1193 * ice_service_task - manage and run subtasks
1194 * @work: pointer to work_struct contained by the PF struct
1196 static void ice_service_task(struct work_struct *work)
1198 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
1199 unsigned long start_time = jiffies;
1203 /* process reset requests first */
1204 ice_reset_subtask(pf);
1206 /* bail if a reset/recovery cycle is pending or rebuild failed */
1207 if (ice_is_reset_recovery_pending(pf->state) ||
1208 test_bit(__ICE_SUSPENDED, pf->state) ||
1209 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
1210 ice_service_task_complete(pf);
1214 ice_check_for_hang_subtask(pf);
1215 ice_sync_fltr_subtask(pf);
1216 ice_handle_mdd_event(pf);
1217 ice_watchdog_subtask(pf);
1218 ice_clean_adminq_subtask(pf);
1220 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
1221 ice_service_task_complete(pf);
1223 /* If the tasks have taken longer than one service timer period
1224 * or there is more work to be done, reset the service timer to
1225 * schedule the service task now.
1227 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
1228 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
1229 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1230 mod_timer(&pf->serv_tmr, jiffies);
1234 * ice_set_ctrlq_len - helper function to set controlq length
1235 * @hw: pointer to the hw instance
1237 static void ice_set_ctrlq_len(struct ice_hw *hw)
1239 hw->adminq.num_rq_entries = ICE_AQ_LEN;
1240 hw->adminq.num_sq_entries = ICE_AQ_LEN;
1241 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1242 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
1246 * ice_irq_affinity_notify - Callback for affinity changes
1247 * @notify: context as to what irq was changed
1248 * @mask: the new affinity mask
1250 * This is a callback function used by the irq_set_affinity_notifier function
1251 * so that we may register to receive changes to the irq affinity masks.
1253 static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1254 const cpumask_t *mask)
1256 struct ice_q_vector *q_vector =
1257 container_of(notify, struct ice_q_vector, affinity_notify);
1259 cpumask_copy(&q_vector->affinity_mask, mask);
1263 * ice_irq_affinity_release - Callback for affinity notifier release
1264 * @ref: internal core kernel usage
1266 * This is a callback function used by the irq_set_affinity_notifier function
1267 * to inform the current notification subscriber that they will no longer
1268 * receive notifications.
1270 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1273 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
1274 * @vsi: the VSI being un-configured
1276 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
1278 struct ice_pf *pf = vsi->back;
1279 struct ice_hw *hw = &pf->hw;
1280 int base = vsi->base_vector;
1284 /* disable interrupt causation from each queue */
1285 if (vsi->tx_rings) {
1286 ice_for_each_txq(vsi, i) {
1287 if (vsi->tx_rings[i]) {
1290 reg = vsi->tx_rings[i]->reg_idx;
1291 val = rd32(hw, QINT_TQCTL(reg));
1292 val &= ~QINT_TQCTL_CAUSE_ENA_M;
1293 wr32(hw, QINT_TQCTL(reg), val);
1298 if (vsi->rx_rings) {
1299 ice_for_each_rxq(vsi, i) {
1300 if (vsi->rx_rings[i]) {
1303 reg = vsi->rx_rings[i]->reg_idx;
1304 val = rd32(hw, QINT_RQCTL(reg));
1305 val &= ~QINT_RQCTL_CAUSE_ENA_M;
1306 wr32(hw, QINT_RQCTL(reg), val);
1311 /* disable each interrupt */
1312 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1313 for (i = vsi->base_vector;
1314 i < (vsi->num_q_vectors + vsi->base_vector); i++)
1315 wr32(hw, GLINT_DYN_CTL(i), 0);
1318 for (i = 0; i < vsi->num_q_vectors; i++)
1319 synchronize_irq(pf->msix_entries[i + base].vector);
1324 * ice_vsi_ena_irq - Enable IRQ for the given VSI
1325 * @vsi: the VSI being configured
1327 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1329 struct ice_pf *pf = vsi->back;
1330 struct ice_hw *hw = &pf->hw;
1332 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1335 for (i = 0; i < vsi->num_q_vectors; i++)
1336 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
1344 * ice_vsi_delete - delete a VSI from the switch
1345 * @vsi: pointer to VSI being removed
1347 static void ice_vsi_delete(struct ice_vsi *vsi)
1349 struct ice_pf *pf = vsi->back;
1350 struct ice_vsi_ctx ctxt;
1351 enum ice_status status;
1353 ctxt.vsi_num = vsi->vsi_num;
1355 memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
1357 status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
1359 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
1364 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
1365 * @vsi: the VSI being configured
1366 * @basename: name for the vector
1368 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
1370 int q_vectors = vsi->num_q_vectors;
1371 struct ice_pf *pf = vsi->back;
1372 int base = vsi->base_vector;
1378 for (vector = 0; vector < q_vectors; vector++) {
1379 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
1381 irq_num = pf->msix_entries[base + vector].vector;
1383 if (q_vector->tx.ring && q_vector->rx.ring) {
1384 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1385 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
1387 } else if (q_vector->rx.ring) {
1388 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1389 "%s-%s-%d", basename, "rx", rx_int_idx++);
1390 } else if (q_vector->tx.ring) {
1391 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1392 "%s-%s-%d", basename, "tx", tx_int_idx++);
1394 /* skip this unused q_vector */
1397 err = devm_request_irq(&pf->pdev->dev,
1398 pf->msix_entries[base + vector].vector,
1399 vsi->irq_handler, 0, q_vector->name,
1402 netdev_err(vsi->netdev,
1403 "MSIX request_irq failed, error: %d\n", err);
1407 /* register for affinity change notifications */
1408 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
1409 q_vector->affinity_notify.release = ice_irq_affinity_release;
1410 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
1412 /* assign the mask for this irq */
1413 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
1416 vsi->irqs_ready = true;
1422 irq_num = pf->msix_entries[base + vector].vector,
1423 irq_set_affinity_notifier(irq_num, NULL);
1424 irq_set_affinity_hint(irq_num, NULL);
1425 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
1431 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
1432 * @vsi: the VSI being configured
1434 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
1436 struct ice_hw_common_caps *cap;
1437 struct ice_pf *pf = vsi->back;
1439 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
1444 cap = &pf->hw.func_caps.common_cap;
1445 switch (vsi->type) {
1447 /* PF VSI will inherit RSS instance of PF */
1448 vsi->rss_table_size = cap->rss_table_size;
1449 vsi->rss_size = min_t(int, num_online_cpus(),
1450 BIT(cap->rss_table_entry_width));
1451 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
1454 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
1460 * ice_vsi_setup_q_map - Setup a VSI queue map
1461 * @vsi: the VSI being configured
1462 * @ctxt: VSI context structure
1464 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1466 u16 offset = 0, qmap = 0, numq_tc;
1467 u16 pow = 0, max_rss = 0, qcount;
1468 u16 qcount_tx = vsi->alloc_txq;
1469 u16 qcount_rx = vsi->alloc_rxq;
1470 bool ena_tc0 = false;
1473 /* at least TC0 should be enabled by default */
1474 if (vsi->tc_cfg.numtc) {
1475 if (!(vsi->tc_cfg.ena_tc & BIT(0)))
1482 vsi->tc_cfg.numtc++;
1483 vsi->tc_cfg.ena_tc |= 1;
1486 numq_tc = qcount_rx / vsi->tc_cfg.numtc;
1488 /* TC mapping is a function of the number of Rx queues assigned to the
1489 * VSI for each traffic class and the offset of these queues.
1490 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
1491 * queues allocated to TC0. No:of queues is a power-of-2.
1493 * If TC is not enabled, the queue offset is set to 0, and allocate one
1494 * queue, this way, traffic for the given TC will be sent to the default
1497 * Setup number and offset of Rx queues for all TCs for the VSI
1500 /* qcount will change if RSS is enabled */
1501 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
1502 if (vsi->type == ICE_VSI_PF)
1503 max_rss = ICE_MAX_LG_RSS_QS;
1505 max_rss = ICE_MAX_SMALL_RSS_QS;
1507 qcount = min_t(int, numq_tc, max_rss);
1508 qcount = min_t(int, qcount, vsi->rss_size);
1513 /* find the (rounded up) power-of-2 of qcount */
1514 pow = order_base_2(qcount);
1516 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
1517 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1518 /* TC is not enabled */
1519 vsi->tc_cfg.tc_info[i].qoffset = 0;
1520 vsi->tc_cfg.tc_info[i].qcount = 1;
1521 ctxt->info.tc_mapping[i] = 0;
1526 vsi->tc_cfg.tc_info[i].qoffset = offset;
1527 vsi->tc_cfg.tc_info[i].qcount = qcount;
1529 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
1530 ICE_AQ_VSI_TC_Q_OFFSET_M) |
1531 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
1532 ICE_AQ_VSI_TC_Q_NUM_M);
1534 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1537 vsi->num_txq = qcount_tx;
1538 vsi->num_rxq = offset;
1540 /* Rx queue mapping */
1541 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1542 /* q_mapping buffer holds the info for the first queue allocated for
1543 * this VSI in the PF space and also the number of queues associated
1546 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1547 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1551 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
1552 * @ctxt: the VSI context being set
1554 * This initializes a default VSI context for all sections except the Queues.
1556 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
1560 memset(&ctxt->info, 0, sizeof(ctxt->info));
1561 /* VSI's should be allocated from shared pool */
1562 ctxt->alloc_from_pool = true;
1563 /* Src pruning enabled by default */
1564 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
1565 /* Traffic from VSI can be sent to LAN */
1566 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1568 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
1569 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
1570 * packets untagged/tagged.
1572 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
1573 ICE_AQ_VSI_VLAN_MODE_M) >>
1574 ICE_AQ_VSI_VLAN_MODE_S);
1576 /* Have 1:1 UP mapping for both ingress/egress tables */
1577 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
1578 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
1579 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
1580 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
1581 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
1582 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
1583 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
1584 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
1585 ctxt->info.ingress_table = cpu_to_le32(table);
1586 ctxt->info.egress_table = cpu_to_le32(table);
1587 /* Have 1:1 UP mapping for outer to inner UP table */
1588 ctxt->info.outer_up_table = cpu_to_le32(table);
1589 /* No Outer tag support outer_tag_flags remains to zero */
1593 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1594 * @ctxt: the VSI context being set
1595 * @vsi: the VSI being configured
1597 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1599 u8 lut_type, hash_type;
1601 switch (vsi->type) {
1603 /* PF VSI will inherit RSS instance of PF */
1604 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1605 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1608 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
1613 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
1614 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
1615 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
1616 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
1620 * ice_vsi_init - Create and initialize a VSI
1621 * @vsi: the VSI being configured
1623 * This initializes a VSI context depending on the VSI type to be added and
1624 * passes it down to the add_vsi aq command to create a new VSI.
1626 static int ice_vsi_init(struct ice_vsi *vsi)
1628 struct ice_vsi_ctx ctxt = { 0 };
1629 struct ice_pf *pf = vsi->back;
1630 struct ice_hw *hw = &pf->hw;
1633 switch (vsi->type) {
1635 ctxt.flags = ICE_AQ_VSI_TYPE_PF;
1641 ice_set_dflt_vsi_ctx(&ctxt);
1642 /* if the switch is in VEB mode, allow VSI loopback */
1643 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1644 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1646 /* Set LUT type and HASH type if RSS is enabled */
1647 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1648 ice_set_rss_vsi_ctx(&ctxt, vsi);
1650 ctxt.info.sw_id = vsi->port_info->sw_id;
1651 ice_vsi_setup_q_map(vsi, &ctxt);
1653 ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
1655 dev_err(&pf->pdev->dev,
1656 "Add VSI failed, err %d\n", ret);
1660 /* keep context for update VSI operations */
1661 vsi->info = ctxt.info;
1663 /* record VSI number returned */
1664 vsi->vsi_num = ctxt.vsi_num;
1670 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
1671 * @vsi: the VSI being cleaned up
1673 static void ice_vsi_release_msix(struct ice_vsi *vsi)
1675 struct ice_pf *pf = vsi->back;
1676 u16 vector = vsi->base_vector;
1677 struct ice_hw *hw = &pf->hw;
1682 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1683 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1685 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
1686 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
1687 for (q = 0; q < q_vector->num_ring_tx; q++) {
1688 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
1692 for (q = 0; q < q_vector->num_ring_rx; q++) {
1693 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
1702 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1703 * @vsi: the VSI having rings deallocated
1705 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1709 if (vsi->tx_rings) {
1710 for (i = 0; i < vsi->alloc_txq; i++) {
1711 if (vsi->tx_rings[i]) {
1712 kfree_rcu(vsi->tx_rings[i], rcu);
1713 vsi->tx_rings[i] = NULL;
1717 if (vsi->rx_rings) {
1718 for (i = 0; i < vsi->alloc_rxq; i++) {
1719 if (vsi->rx_rings[i]) {
1720 kfree_rcu(vsi->rx_rings[i], rcu);
1721 vsi->rx_rings[i] = NULL;
1728 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1729 * @vsi: VSI which is having rings allocated
1731 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1733 struct ice_pf *pf = vsi->back;
1736 /* Allocate tx_rings */
1737 for (i = 0; i < vsi->alloc_txq; i++) {
1738 struct ice_ring *ring;
1740 /* allocate with kzalloc(), free with kfree_rcu() */
1741 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1747 ring->reg_idx = vsi->txq_map[i];
1748 ring->ring_active = false;
1750 ring->netdev = vsi->netdev;
1751 ring->dev = &pf->pdev->dev;
1752 ring->count = vsi->num_desc;
1754 vsi->tx_rings[i] = ring;
1757 /* Allocate rx_rings */
1758 for (i = 0; i < vsi->alloc_rxq; i++) {
1759 struct ice_ring *ring;
1761 /* allocate with kzalloc(), free with kfree_rcu() */
1762 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1767 ring->reg_idx = vsi->rxq_map[i];
1768 ring->ring_active = false;
1770 ring->netdev = vsi->netdev;
1771 ring->dev = &pf->pdev->dev;
1772 ring->count = vsi->num_desc;
1773 vsi->rx_rings[i] = ring;
1779 ice_vsi_clear_rings(vsi);
1784 * ice_vsi_free_irq - Free the irq association with the OS
1785 * @vsi: the VSI being configured
1787 static void ice_vsi_free_irq(struct ice_vsi *vsi)
1789 struct ice_pf *pf = vsi->back;
1790 int base = vsi->base_vector;
1792 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1795 if (!vsi->q_vectors || !vsi->irqs_ready)
1798 vsi->irqs_ready = false;
1799 for (i = 0; i < vsi->num_q_vectors; i++) {
1800 u16 vector = i + base;
1803 irq_num = pf->msix_entries[vector].vector;
1805 /* free only the irqs that were actually requested */
1806 if (!vsi->q_vectors[i] ||
1807 !(vsi->q_vectors[i]->num_ring_tx ||
1808 vsi->q_vectors[i]->num_ring_rx))
1811 /* clear the affinity notifier in the IRQ descriptor */
1812 irq_set_affinity_notifier(irq_num, NULL);
1814 /* clear the affinity_mask in the IRQ descriptor */
1815 irq_set_affinity_hint(irq_num, NULL);
1816 synchronize_irq(irq_num);
1817 devm_free_irq(&pf->pdev->dev, irq_num,
1820 ice_vsi_release_msix(vsi);
1825 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1826 * @vsi: the VSI being configured
1828 static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1830 struct ice_pf *pf = vsi->back;
1831 u16 vector = vsi->base_vector;
1832 struct ice_hw *hw = &pf->hw;
1833 u32 txq = 0, rxq = 0;
1837 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1838 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1840 itr_gran = hw->itr_gran_200;
1842 if (q_vector->num_ring_rx) {
1844 ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
1846 q_vector->rx.latency_range = ICE_LOW_LATENCY;
1849 if (q_vector->num_ring_tx) {
1851 ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
1853 q_vector->tx.latency_range = ICE_LOW_LATENCY;
1855 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
1856 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
1858 /* Both Transmit Queue Interrupt Cause Control register
1859 * and Receive Queue Interrupt Cause control register
1860 * expects MSIX_INDX field to be the vector index
1861 * within the function space and not the absolute
1862 * vector index across PF or across device.
1863 * For SR-IOV VF VSIs queue vector index always starts
1864 * with 1 since first vector index(0) is used for OICR
1865 * in VF space. Since VMDq and other PF VSIs are withtin
1866 * the PF function space, use the vector index thats
1867 * tracked for this PF.
1869 for (q = 0; q < q_vector->num_ring_tx; q++) {
1873 val = QINT_TQCTL_CAUSE_ENA_M |
1874 (itr << QINT_TQCTL_ITR_INDX_S) |
1875 (vector << QINT_TQCTL_MSIX_INDX_S);
1876 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1880 for (q = 0; q < q_vector->num_ring_rx; q++) {
1884 val = QINT_RQCTL_CAUSE_ENA_M |
1885 (itr << QINT_RQCTL_ITR_INDX_S) |
1886 (vector << QINT_RQCTL_MSIX_INDX_S);
1887 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1896 * ice_ena_misc_vector - enable the non-queue interrupts
1897 * @pf: board private structure
1899 static void ice_ena_misc_vector(struct ice_pf *pf)
1901 struct ice_hw *hw = &pf->hw;
1904 /* clear things first */
1905 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1906 rd32(hw, PFINT_OICR); /* read to clear */
1908 val = (PFINT_OICR_ECC_ERR_M |
1909 PFINT_OICR_MAL_DETECT_M |
1911 PFINT_OICR_PCI_EXCEPTION_M |
1912 PFINT_OICR_HMC_ERR_M |
1913 PFINT_OICR_PE_CRITERR_M);
1915 wr32(hw, PFINT_OICR_ENA, val);
1917 /* SW_ITR_IDX = 0, but don't change INTENA */
1918 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1919 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
1923 * ice_misc_intr - misc interrupt handler
1924 * @irq: interrupt number
1925 * @data: pointer to a q_vector
1927 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1929 struct ice_pf *pf = (struct ice_pf *)data;
1930 struct ice_hw *hw = &pf->hw;
1931 irqreturn_t ret = IRQ_NONE;
1934 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1936 oicr = rd32(hw, PFINT_OICR);
1937 ena_mask = rd32(hw, PFINT_OICR_ENA);
1939 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1940 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
1941 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1944 if (oicr & PFINT_OICR_GRST_M) {
1947 /* we have a reset warning */
1948 ena_mask &= ~PFINT_OICR_GRST_M;
1949 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
1950 GLGEN_RSTAT_RESET_TYPE_S;
1952 if (reset == ICE_RESET_CORER)
1954 else if (reset == ICE_RESET_GLOBR)
1959 /* If a reset cycle isn't already in progress, we set a bit in
1960 * pf->state so that the service task can start a reset/rebuild.
1961 * We also make note of which reset happened so that peer
1962 * devices/drivers can be informed.
1964 if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING,
1966 if (reset == ICE_RESET_CORER)
1967 set_bit(__ICE_CORER_RECV, pf->state);
1968 else if (reset == ICE_RESET_GLOBR)
1969 set_bit(__ICE_GLOBR_RECV, pf->state);
1971 set_bit(__ICE_EMPR_RECV, pf->state);
1973 /* There are couple of different bits at play here.
1974 * hw->reset_ongoing indicates whether the hardware is
1975 * in reset. This is set to true when a reset interrupt
1976 * is received and set back to false after the driver
1977 * has determined that the hardware is out of reset.
1979 * __ICE_RESET_RECOVERY_PENDING in pf->state indicates
1980 * that a post reset rebuild is required before the
1981 * driver is operational again. This is set above.
1983 * As this is the start of the reset/rebuild cycle, set
1984 * both to indicate that.
1986 hw->reset_ongoing = true;
1990 if (oicr & PFINT_OICR_HMC_ERR_M) {
1991 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
1992 dev_dbg(&pf->pdev->dev,
1993 "HMC Error interrupt - info 0x%x, data 0x%x\n",
1994 rd32(hw, PFHMC_ERRORINFO),
1995 rd32(hw, PFHMC_ERRORDATA));
1998 /* Report and mask off any remaining unexpected interrupts */
2001 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
2003 /* If a critical error is pending there is no choice but to
2006 if (oicr & (PFINT_OICR_PE_CRITERR_M |
2007 PFINT_OICR_PCI_EXCEPTION_M |
2008 PFINT_OICR_ECC_ERR_M)) {
2009 set_bit(__ICE_PFR_REQ, pf->state);
2010 ice_service_task_schedule(pf);
2016 /* re-enable interrupt causes that are not handled during this pass */
2017 wr32(hw, PFINT_OICR_ENA, ena_mask);
2018 if (!test_bit(__ICE_DOWN, pf->state)) {
2019 ice_service_task_schedule(pf);
2020 ice_irq_dynamic_ena(hw, NULL, NULL);
2027 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
2028 * @vsi: the VSI being configured
2030 * This function maps descriptor rings to the queue-specific vectors allotted
2031 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
2032 * and Rx rings to the vector as "efficiently" as possible.
2034 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
2036 int q_vectors = vsi->num_q_vectors;
2037 int tx_rings_rem, rx_rings_rem;
2040 /* initially assigning remaining rings count to VSIs num queue value */
2041 tx_rings_rem = vsi->num_txq;
2042 rx_rings_rem = vsi->num_rxq;
2044 for (v_id = 0; v_id < q_vectors; v_id++) {
2045 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
2046 int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
2048 /* Tx rings mapping to vector */
2049 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
2050 q_vector->num_ring_tx = tx_rings_per_v;
2051 q_vector->tx.ring = NULL;
2052 q_base = vsi->num_txq - tx_rings_rem;
2054 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
2055 struct ice_ring *tx_ring = vsi->tx_rings[q_id];
2057 tx_ring->q_vector = q_vector;
2058 tx_ring->next = q_vector->tx.ring;
2059 q_vector->tx.ring = tx_ring;
2061 tx_rings_rem -= tx_rings_per_v;
2063 /* Rx rings mapping to vector */
2064 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
2065 q_vector->num_ring_rx = rx_rings_per_v;
2066 q_vector->rx.ring = NULL;
2067 q_base = vsi->num_rxq - rx_rings_rem;
2069 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
2070 struct ice_ring *rx_ring = vsi->rx_rings[q_id];
2072 rx_ring->q_vector = q_vector;
2073 rx_ring->next = q_vector->rx.ring;
2074 q_vector->rx.ring = rx_ring;
2076 rx_rings_rem -= rx_rings_per_v;
2081 * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
2082 * @vsi: the VSI being configured
2084 * Return 0 on success and a negative value on error
2086 static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
2088 struct ice_pf *pf = vsi->back;
2090 switch (vsi->type) {
2092 vsi->alloc_txq = pf->num_lan_tx;
2093 vsi->alloc_rxq = pf->num_lan_rx;
2094 vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
2095 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
2098 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
2105 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
2107 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
2109 * On error: returns error code (negative)
2110 * On success: returns 0
2112 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
2114 struct ice_pf *pf = vsi->back;
2116 /* allocate memory for both Tx and Rx ring pointers */
2117 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
2118 sizeof(struct ice_ring *), GFP_KERNEL);
2122 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
2123 sizeof(struct ice_ring *), GFP_KERNEL);
2127 if (alloc_qvectors) {
2128 /* allocate memory for q_vector pointers */
2129 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
2131 sizeof(struct ice_q_vector *),
2133 if (!vsi->q_vectors)
2140 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
2142 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
2148 * ice_msix_clean_rings - MSIX mode Interrupt Handler
2149 * @irq: interrupt number
2150 * @data: pointer to a q_vector
2152 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
2154 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
2156 if (!q_vector->tx.ring && !q_vector->rx.ring)
2159 napi_schedule(&q_vector->napi);
2165 * ice_vsi_alloc - Allocates the next available struct vsi in the PF
2166 * @pf: board private structure
2167 * @type: type of VSI
2169 * returns a pointer to a VSI on success, NULL on failure.
2171 static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
2173 struct ice_vsi *vsi = NULL;
2175 /* Need to protect the allocation of the VSIs at the PF level */
2176 mutex_lock(&pf->sw_mutex);
2178 /* If we have already allocated our maximum number of VSIs,
2179 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
2180 * is available to be populated
2182 if (pf->next_vsi == ICE_NO_VSI) {
2183 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
2187 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
2193 set_bit(__ICE_DOWN, vsi->state);
2194 vsi->idx = pf->next_vsi;
2195 vsi->work_lmt = ICE_DFLT_IRQ_WORK;
2197 ice_vsi_set_num_qs(vsi);
2199 switch (vsi->type) {
2201 if (ice_vsi_alloc_arrays(vsi, true))
2204 /* Setup default MSIX irq handler for VSI */
2205 vsi->irq_handler = ice_msix_clean_rings;
2208 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
2212 /* fill VSI slot in the PF struct */
2213 pf->vsi[pf->next_vsi] = vsi;
2215 /* prepare pf->next_vsi for next use */
2216 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
2221 devm_kfree(&pf->pdev->dev, vsi);
2224 mutex_unlock(&pf->sw_mutex);
2229 * ice_free_irq_msix_misc - Unroll misc vector setup
2230 * @pf: board private structure
2232 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2234 /* disable OICR interrupt */
2235 wr32(&pf->hw, PFINT_OICR_ENA, 0);
2238 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
2239 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2240 devm_free_irq(&pf->pdev->dev,
2241 pf->msix_entries[pf->oicr_idx].vector, pf);
2244 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2248 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2249 * @pf: board private structure
2251 * This sets up the handler for MSIX 0, which is used to manage the
2252 * non-queue interrupts, e.g. AdminQ and errors. This is not used
2253 * when in MSI or Legacy interrupt mode.
2255 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2257 struct ice_hw *hw = &pf->hw;
2258 int oicr_idx, err = 0;
2262 if (!pf->int_name[0])
2263 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2264 dev_driver_string(&pf->pdev->dev),
2265 dev_name(&pf->pdev->dev));
2267 /* Do not request IRQ but do enable OICR interrupt since settings are
2268 * lost during reset. Note that this function is called only during
2269 * rebuild path and not while reset is in progress.
2271 if (ice_is_reset_recovery_pending(pf->state))
2274 /* reserve one vector in irq_tracker for misc interrupts */
2275 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2279 pf->oicr_idx = oicr_idx;
2281 err = devm_request_irq(&pf->pdev->dev,
2282 pf->msix_entries[pf->oicr_idx].vector,
2283 ice_misc_intr, 0, pf->int_name, pf);
2285 dev_err(&pf->pdev->dev,
2286 "devm_request_irq for %s failed: %d\n",
2288 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2293 ice_ena_misc_vector(pf);
2295 val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2296 PFINT_OICR_CTL_CAUSE_ENA_M);
2297 wr32(hw, PFINT_OICR_CTL, val);
2299 /* This enables Admin queue Interrupt causes */
2300 val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2301 PFINT_FW_CTL_CAUSE_ENA_M);
2302 wr32(hw, PFINT_FW_CTL, val);
2304 itr_gran = hw->itr_gran_200;
2306 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2307 ITR_TO_REG(ICE_ITR_8K, itr_gran));
2310 ice_irq_dynamic_ena(hw, NULL, NULL);
2316 * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
2317 * @vsi: the VSI getting queues
2319 * Return 0 on success and a negative value on error
2321 static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
2323 struct ice_pf *pf = vsi->back;
2324 int offset, ret = 0;
2326 mutex_lock(&pf->avail_q_mutex);
2327 /* look for contiguous block of queues for tx */
2328 offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
2329 0, vsi->alloc_txq, 0);
2330 if (offset < ICE_MAX_TXQS) {
2333 bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
2334 for (i = 0; i < vsi->alloc_txq; i++)
2335 vsi->txq_map[i] = i + offset;
2338 vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
2341 /* look for contiguous block of queues for rx */
2342 offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
2343 0, vsi->alloc_rxq, 0);
2344 if (offset < ICE_MAX_RXQS) {
2347 bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
2348 for (i = 0; i < vsi->alloc_rxq; i++)
2349 vsi->rxq_map[i] = i + offset;
2352 vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
2354 mutex_unlock(&pf->avail_q_mutex);
2360 * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
2361 * @vsi: the VSI getting queues
2363 * Return 0 on success and a negative value on error
2365 static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
2367 struct ice_pf *pf = vsi->back;
2370 mutex_lock(&pf->avail_q_mutex);
2372 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
2373 for (i = 0; i < vsi->alloc_txq; i++) {
2374 index = find_next_zero_bit(pf->avail_txqs,
2375 ICE_MAX_TXQS, index);
2376 if (index < ICE_MAX_TXQS) {
2377 set_bit(index, pf->avail_txqs);
2378 vsi->txq_map[i] = index;
2380 goto err_scatter_tx;
2385 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
2386 for (i = 0; i < vsi->alloc_rxq; i++) {
2387 index = find_next_zero_bit(pf->avail_rxqs,
2388 ICE_MAX_RXQS, index);
2389 if (index < ICE_MAX_RXQS) {
2390 set_bit(index, pf->avail_rxqs);
2391 vsi->rxq_map[i] = index;
2393 goto err_scatter_rx;
2398 mutex_unlock(&pf->avail_q_mutex);
2402 /* unflag any queues we have grabbed (i is failed position) */
2403 for (index = 0; index < i; index++) {
2404 clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
2405 vsi->rxq_map[index] = 0;
2409 /* i is either position of failed attempt or vsi->alloc_txq */
2410 for (index = 0; index < i; index++) {
2411 clear_bit(vsi->txq_map[index], pf->avail_txqs);
2412 vsi->txq_map[index] = 0;
2415 mutex_unlock(&pf->avail_q_mutex);
2420 * ice_vsi_get_qs - Assign queues from PF to VSI
2421 * @vsi: the VSI to assign queues to
2423 * Returns 0 on success and a negative value on error
2425 static int ice_vsi_get_qs(struct ice_vsi *vsi)
2429 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
2430 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
2432 /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
2433 * modes individually to scatter if assigning contiguous queues
2436 ret = ice_vsi_get_qs_contig(vsi);
2438 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
2439 vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
2440 ICE_MAX_SCATTER_TXQS);
2441 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
2442 vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
2443 ICE_MAX_SCATTER_RXQS);
2444 ret = ice_vsi_get_qs_scatter(vsi);
2451 * ice_vsi_put_qs - Release queues from VSI to PF
2452 * @vsi: the VSI thats going to release queues
2454 static void ice_vsi_put_qs(struct ice_vsi *vsi)
2456 struct ice_pf *pf = vsi->back;
2459 mutex_lock(&pf->avail_q_mutex);
2461 for (i = 0; i < vsi->alloc_txq; i++) {
2462 clear_bit(vsi->txq_map[i], pf->avail_txqs);
2463 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
2466 for (i = 0; i < vsi->alloc_rxq; i++) {
2467 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
2468 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
2471 mutex_unlock(&pf->avail_q_mutex);
2475 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
2476 * @vsi: VSI having the memory freed
2477 * @v_idx: index of the vector to be freed
2479 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
2481 struct ice_q_vector *q_vector;
2482 struct ice_ring *ring;
2484 if (!vsi->q_vectors[v_idx]) {
2485 dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
2489 q_vector = vsi->q_vectors[v_idx];
2491 ice_for_each_ring(ring, q_vector->tx)
2492 ring->q_vector = NULL;
2493 ice_for_each_ring(ring, q_vector->rx)
2494 ring->q_vector = NULL;
2496 /* only VSI with an associated netdev is set up with NAPI */
2498 netif_napi_del(&q_vector->napi);
2500 devm_kfree(&vsi->back->pdev->dev, q_vector);
2501 vsi->q_vectors[v_idx] = NULL;
2505 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
2506 * @vsi: the VSI having memory freed
2508 static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
2512 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
2513 ice_free_q_vector(vsi, v_idx);
2517 * ice_cfg_netdev - Setup the netdev flags
2518 * @vsi: the VSI being configured
2520 * Returns 0 on success, negative value on failure
2522 static int ice_cfg_netdev(struct ice_vsi *vsi)
2524 netdev_features_t csumo_features;
2525 netdev_features_t vlano_features;
2526 netdev_features_t dflt_features;
2527 netdev_features_t tso_features;
2528 struct ice_netdev_priv *np;
2529 struct net_device *netdev;
2530 u8 mac_addr[ETH_ALEN];
2532 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
2533 vsi->alloc_txq, vsi->alloc_rxq);
2537 vsi->netdev = netdev;
2538 np = netdev_priv(netdev);
2541 dflt_features = NETIF_F_SG |
2545 csumo_features = NETIF_F_RXCSUM |
2549 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2550 NETIF_F_HW_VLAN_CTAG_TX |
2551 NETIF_F_HW_VLAN_CTAG_RX;
2553 tso_features = NETIF_F_TSO;
2555 /* set features that user can change */
2556 netdev->hw_features = dflt_features | csumo_features |
2557 vlano_features | tso_features;
2559 /* enable features */
2560 netdev->features |= netdev->hw_features;
2561 /* encap and VLAN devices inherit default, csumo and tso features */
2562 netdev->hw_enc_features |= dflt_features | csumo_features |
2564 netdev->vlan_features |= dflt_features | csumo_features |
2567 if (vsi->type == ICE_VSI_PF) {
2568 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
2569 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
2571 ether_addr_copy(netdev->dev_addr, mac_addr);
2572 ether_addr_copy(netdev->perm_addr, mac_addr);
2575 netdev->priv_flags |= IFF_UNICAST_FLT;
2577 /* assign netdev_ops */
2578 netdev->netdev_ops = &ice_netdev_ops;
2580 /* setup watchdog timeout value to be 5 second */
2581 netdev->watchdog_timeo = 5 * HZ;
2583 ice_set_ethtool_ops(netdev);
2585 netdev->min_mtu = ETH_MIN_MTU;
2586 netdev->max_mtu = ICE_MAX_MTU;
2592 * ice_vsi_free_arrays - clean up vsi resources
2593 * @vsi: pointer to VSI being cleared
2594 * @free_qvectors: bool to specify if q_vectors should be deallocated
2596 static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
2598 struct ice_pf *pf = vsi->back;
2600 /* free the ring and vector containers */
2601 if (free_qvectors && vsi->q_vectors) {
2602 devm_kfree(&pf->pdev->dev, vsi->q_vectors);
2603 vsi->q_vectors = NULL;
2605 if (vsi->tx_rings) {
2606 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
2607 vsi->tx_rings = NULL;
2609 if (vsi->rx_rings) {
2610 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
2611 vsi->rx_rings = NULL;
2616 * ice_vsi_clear - clean up and deallocate the provided vsi
2617 * @vsi: pointer to VSI being cleared
2619 * This deallocates the vsi's queue resources, removes it from the PF's
2620 * VSI array if necessary, and deallocates the VSI
2622 * Returns 0 on success, negative on failure
2624 static int ice_vsi_clear(struct ice_vsi *vsi)
2626 struct ice_pf *pf = NULL;
2636 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
2637 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
2642 mutex_lock(&pf->sw_mutex);
2643 /* updates the PF for this cleared vsi */
2645 pf->vsi[vsi->idx] = NULL;
2646 if (vsi->idx < pf->next_vsi)
2647 pf->next_vsi = vsi->idx;
2649 ice_vsi_free_arrays(vsi, true);
2650 mutex_unlock(&pf->sw_mutex);
2651 devm_kfree(&pf->pdev->dev, vsi);
2657 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
2658 * @vsi: the VSI being configured
2659 * @v_idx: index of the vector in the vsi struct
2661 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2663 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
2665 struct ice_pf *pf = vsi->back;
2666 struct ice_q_vector *q_vector;
2668 /* allocate q_vector */
2669 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
2673 q_vector->vsi = vsi;
2674 q_vector->v_idx = v_idx;
2675 /* only set affinity_mask if the CPU is online */
2676 if (cpu_online(v_idx))
2677 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
2680 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
2682 /* tie q_vector and vsi together */
2683 vsi->q_vectors[v_idx] = q_vector;
2689 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
2690 * @vsi: the VSI being configured
2692 * We allocate one q_vector per queue interrupt. If allocation fails we
2695 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
2697 struct ice_pf *pf = vsi->back;
2698 int v_idx = 0, num_q_vectors;
2701 if (vsi->q_vectors[0]) {
2702 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
2707 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
2708 num_q_vectors = vsi->num_q_vectors;
2714 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
2715 err = ice_vsi_alloc_q_vector(vsi, v_idx);
2724 ice_free_q_vector(vsi, v_idx);
2726 dev_err(&pf->pdev->dev,
2727 "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
2728 vsi->num_q_vectors, vsi->vsi_num, err);
2729 vsi->num_q_vectors = 0;
2734 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
2735 * @vsi: ptr to the VSI
2737 * This should only be called after ice_vsi_alloc() which allocates the
2738 * corresponding SW VSI structure and initializes num_queue_pairs for the
2739 * newly allocated VSI.
2741 * Returns 0 on success or negative on failure
2743 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
2745 struct ice_pf *pf = vsi->back;
2746 int num_q_vectors = 0;
2748 if (vsi->base_vector) {
2749 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
2750 vsi->vsi_num, vsi->base_vector);
2754 if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2757 switch (vsi->type) {
2759 num_q_vectors = vsi->num_q_vectors;
2762 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
2768 vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
2769 num_q_vectors, vsi->idx);
2771 if (vsi->base_vector < 0) {
2772 dev_err(&pf->pdev->dev,
2773 "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
2774 num_q_vectors, vsi->vsi_num, vsi->base_vector);
2782 * ice_fill_rss_lut - Fill the RSS lookup table with default values
2783 * @lut: Lookup table
2784 * @rss_table_size: Lookup table size
2785 * @rss_size: Range of queue number for hashing
2787 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
2791 for (i = 0; i < rss_table_size; i++)
2792 lut[i] = i % rss_size;
2796 * ice_vsi_cfg_rss - Configure RSS params for a VSI
2797 * @vsi: VSI to be configured
2799 static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
2801 u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
2802 struct ice_aqc_get_set_rss_keys *key;
2803 struct ice_pf *pf = vsi->back;
2804 enum ice_status status;
2808 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
2810 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
2814 if (vsi->rss_lut_user)
2815 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
2817 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
2819 status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
2820 lut, vsi->rss_table_size);
2823 dev_err(&vsi->back->pdev->dev,
2824 "set_rss_lut failed, error %d\n", status);
2826 goto ice_vsi_cfg_rss_exit;
2829 key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
2832 goto ice_vsi_cfg_rss_exit;
2835 if (vsi->rss_hkey_user)
2836 memcpy(seed, vsi->rss_hkey_user,
2837 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
2839 netdev_rss_key_fill((void *)seed,
2840 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
2841 memcpy(&key->standard_rss_key, seed,
2842 ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
2844 status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
2847 dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
2852 devm_kfree(&pf->pdev->dev, key);
2853 ice_vsi_cfg_rss_exit:
2854 devm_kfree(&pf->pdev->dev, lut);
2859 * ice_vsi_rebuild - Rebuild VSI after reset
2860 * @vsi: vsi to be rebuild
2862 * Returns 0 on success and negative value on failure
2864 static int ice_vsi_rebuild(struct ice_vsi *vsi)
2866 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2872 ice_vsi_free_q_vectors(vsi);
2873 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
2874 vsi->base_vector = 0;
2875 ice_vsi_clear_rings(vsi);
2876 ice_vsi_free_arrays(vsi, false);
2877 ice_vsi_set_num_qs(vsi);
2879 /* Initialize VSI struct elements and create VSI in FW */
2880 ret = ice_vsi_init(vsi);
2884 ret = ice_vsi_alloc_arrays(vsi, false);
2888 switch (vsi->type) {
2891 ret = ice_vsi_alloc_q_vectors(vsi);
2895 ret = ice_vsi_setup_vector_base(vsi);
2899 ret = ice_vsi_alloc_rings(vsi);
2903 ice_vsi_map_rings_to_vectors(vsi);
2909 ice_vsi_set_tc_cfg(vsi);
2911 /* configure VSI nodes based on number of queues and TC's */
2912 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2913 max_txqs[i] = vsi->num_txq;
2915 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
2916 vsi->tc_cfg.ena_tc, max_txqs);
2918 dev_info(&vsi->back->pdev->dev,
2919 "Failed VSI lan queue config\n");
2925 ice_vsi_free_q_vectors(vsi);
2928 vsi->current_netdev_flags = 0;
2929 unregister_netdev(vsi->netdev);
2930 free_netdev(vsi->netdev);
2935 set_bit(__ICE_RESET_FAILED, vsi->back->state);
2940 * ice_vsi_setup - Set up a VSI by a given type
2941 * @pf: board private structure
2942 * @pi: pointer to the port_info instance
2944 * @vf_id: defines VF id to which this VSI connects. This field is meant to be
2945 * used only for ICE_VSI_VF VSI type. For other VSI types, should
2946 * fill-in ICE_INVAL_VFID as input.
2948 * This allocates the sw VSI structure and its queue resources.
2950 * Returns pointer to the successfully allocated and configured VSI sw struct on
2951 * success, NULL on failure.
2953 static struct ice_vsi *
2954 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2955 enum ice_vsi_type type, u16 __always_unused vf_id)
2957 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2958 struct device *dev = &pf->pdev->dev;
2959 struct ice_vsi *vsi;
2962 vsi = ice_vsi_alloc(pf, type);
2964 dev_err(dev, "could not allocate VSI\n");
2968 vsi->port_info = pi;
2969 vsi->vsw = pf->first_sw;
2971 if (ice_vsi_get_qs(vsi)) {
2972 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2977 /* set RSS capabilities */
2978 ice_vsi_set_rss_params(vsi);
2980 /* create the VSI */
2981 ret = ice_vsi_init(vsi);
2985 switch (vsi->type) {
2987 ret = ice_cfg_netdev(vsi);
2989 goto err_cfg_netdev;
2991 ret = register_netdev(vsi->netdev);
2993 goto err_register_netdev;
2995 netif_carrier_off(vsi->netdev);
2997 /* make sure transmit queues start off as stopped */
2998 netif_tx_stop_all_queues(vsi->netdev);
2999 ret = ice_vsi_alloc_q_vectors(vsi);
3003 ret = ice_vsi_setup_vector_base(vsi);
3007 ret = ice_vsi_alloc_rings(vsi);
3011 ice_vsi_map_rings_to_vectors(vsi);
3013 /* Do not exit if configuring RSS had an issue, at least
3014 * receive traffic on first queue. Hence no need to capture
3017 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3018 ice_vsi_cfg_rss(vsi);
3021 /* if vsi type is not recognized, clean up the resources and
3027 ice_vsi_set_tc_cfg(vsi);
3029 /* configure VSI nodes based on number of queues and TC's */
3030 for (i = 0; i < vsi->tc_cfg.numtc; i++)
3031 max_txqs[i] = vsi->num_txq;
3033 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
3034 vsi->tc_cfg.ena_tc, max_txqs);
3036 dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
3043 ice_vsi_free_q_vectors(vsi);
3045 if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
3046 unregister_netdev(vsi->netdev);
3047 err_register_netdev:
3049 free_netdev(vsi->netdev);
3053 ice_vsi_delete(vsi);
3055 ice_vsi_put_qs(vsi);
3057 pf->q_left_tx += vsi->alloc_txq;
3058 pf->q_left_rx += vsi->alloc_rxq;
3065 * ice_pf_vsi_setup - Set up a PF VSI
3066 * @pf: board private structure
3067 * @pi: pointer to the port_info instance
3069 * Returns pointer to the successfully allocated VSI sw struct on success,
3070 * otherwise returns NULL on failure.
3072 static struct ice_vsi *
3073 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3075 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3079 * ice_vsi_add_vlan - Add vsi membership for given vlan
3080 * @vsi: the vsi being configured
3081 * @vid: vlan id to be added
3083 static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
3085 struct ice_fltr_list_entry *tmp;
3086 struct ice_pf *pf = vsi->back;
3087 LIST_HEAD(tmp_add_list);
3088 enum ice_status status;
3091 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
3095 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
3096 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3097 tmp->fltr_info.flag = ICE_FLTR_TX;
3098 tmp->fltr_info.src = vsi->vsi_num;
3099 tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
3100 tmp->fltr_info.l_data.vlan.vlan_id = vid;
3102 INIT_LIST_HEAD(&tmp->list_entry);
3103 list_add(&tmp->list_entry, &tmp_add_list);
3105 status = ice_add_vlan(&pf->hw, &tmp_add_list);
3108 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
3112 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3117 * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
3118 * @netdev: network interface to be adjusted
3119 * @proto: unused protocol
3120 * @vid: vlan id to be added
3122 * net_device_ops implementation for adding vlan ids
3124 static int ice_vlan_rx_add_vid(struct net_device *netdev,
3125 __always_unused __be16 proto, u16 vid)
3127 struct ice_netdev_priv *np = netdev_priv(netdev);
3128 struct ice_vsi *vsi = np->vsi;
3131 if (vid >= VLAN_N_VID) {
3132 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
3140 /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
3141 * needed to continue allowing all untagged packets since VLAN prune
3142 * list is applied to all packets by the switch
3144 ret = ice_vsi_add_vlan(vsi, vid);
3147 set_bit(vid, vsi->active_vlans);
3153 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
3154 * @vsi: the VSI being configured
3155 * @vid: VLAN id to be removed
3157 static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
3159 struct ice_fltr_list_entry *list;
3160 struct ice_pf *pf = vsi->back;
3161 LIST_HEAD(tmp_add_list);
3163 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
3167 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
3168 list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
3169 list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3170 list->fltr_info.l_data.vlan.vlan_id = vid;
3171 list->fltr_info.flag = ICE_FLTR_TX;
3172 list->fltr_info.src = vsi->vsi_num;
3174 INIT_LIST_HEAD(&list->list_entry);
3175 list_add(&list->list_entry, &tmp_add_list);
3177 if (ice_remove_vlan(&pf->hw, &tmp_add_list))
3178 dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
3181 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3185 * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3186 * @netdev: network interface to be adjusted
3187 * @proto: unused protocol
3188 * @vid: vlan id to be removed
3190 * net_device_ops implementation for removing vlan ids
3192 static int ice_vlan_rx_kill_vid(struct net_device *netdev,
3193 __always_unused __be16 proto, u16 vid)
3195 struct ice_netdev_priv *np = netdev_priv(netdev);
3196 struct ice_vsi *vsi = np->vsi;
3201 /* return code is ignored as there is nothing a user
3202 * can do about failure to remove and a log message was
3203 * already printed from the other function
3205 ice_vsi_kill_vlan(vsi, vid);
3207 clear_bit(vid, vsi->active_vlans);
3213 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3214 * @pf: board private structure
3216 * Returns 0 on success, negative value on failure
3218 static int ice_setup_pf_sw(struct ice_pf *pf)
3220 LIST_HEAD(tmp_add_list);
3221 u8 broadcast[ETH_ALEN];
3222 struct ice_vsi *vsi;
3225 if (ice_is_reset_recovery_pending(pf->state))
3228 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3231 goto unroll_vsi_setup;
3234 /* To add a MAC filter, first add the MAC to a list and then
3235 * pass the list to ice_add_mac.
3238 /* Add a unicast MAC filter so the VSI can get its packets */
3239 status = ice_add_mac_to_list(vsi, &tmp_add_list,
3240 vsi->port_info->mac.perm_addr);
3242 goto unroll_vsi_setup;
3244 /* VSI needs to receive broadcast traffic, so add the broadcast
3245 * MAC address to the list as well.
3247 eth_broadcast_addr(broadcast);
3248 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
3252 /* program MAC filters for entries in tmp_add_list */
3253 status = ice_add_mac(&pf->hw, &tmp_add_list);
3255 dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
3260 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3264 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
3268 ice_vsi_free_q_vectors(vsi);
3269 if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
3270 unregister_netdev(vsi->netdev);
3272 free_netdev(vsi->netdev);
3276 ice_vsi_delete(vsi);
3277 ice_vsi_put_qs(vsi);
3278 pf->q_left_tx += vsi->alloc_txq;
3279 pf->q_left_rx += vsi->alloc_rxq;
3286 * ice_determine_q_usage - Calculate queue distribution
3287 * @pf: board private structure
3289 * Return -ENOMEM if we don't get enough queues for all ports
3291 static void ice_determine_q_usage(struct ice_pf *pf)
3293 u16 q_left_tx, q_left_rx;
3295 q_left_tx = pf->hw.func_caps.common_cap.num_txq;
3296 q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
3298 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
3300 /* only 1 rx queue unless RSS is enabled */
3301 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3304 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
3306 pf->q_left_tx = q_left_tx - pf->num_lan_tx;
3307 pf->q_left_rx = q_left_rx - pf->num_lan_rx;
3311 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3312 * @pf: board private structure to initialize
3314 static void ice_deinit_pf(struct ice_pf *pf)
3316 if (pf->serv_tmr.function)
3317 del_timer_sync(&pf->serv_tmr);
3318 if (pf->serv_task.func)
3319 cancel_work_sync(&pf->serv_task);
3320 mutex_destroy(&pf->sw_mutex);
3321 mutex_destroy(&pf->avail_q_mutex);
3325 * ice_init_pf - Initialize general software structures (struct ice_pf)
3326 * @pf: board private structure to initialize
3328 static void ice_init_pf(struct ice_pf *pf)
3330 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
3331 set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
3333 mutex_init(&pf->sw_mutex);
3334 mutex_init(&pf->avail_q_mutex);
3336 /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
3337 mutex_lock(&pf->avail_q_mutex);
3338 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
3339 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
3340 mutex_unlock(&pf->avail_q_mutex);
3342 if (pf->hw.func_caps.common_cap.rss_table_size)
3343 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3345 /* setup service timer and periodic service task */
3346 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3347 pf->serv_tmr_period = HZ;
3348 INIT_WORK(&pf->serv_task, ice_service_task);
3349 clear_bit(__ICE_SERVICE_SCHED, pf->state);
3353 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3354 * @pf: board private structure
3356 * compute the number of MSIX vectors required (v_budget) and request from
3357 * the OS. Return the number of vectors reserved or negative on failure
3359 static int ice_ena_msix_range(struct ice_pf *pf)
3361 int v_left, v_actual, v_budget = 0;
3364 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3366 /* reserve one vector for miscellaneous handler */
3371 /* reserve vectors for LAN traffic */
3372 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
3373 v_budget += pf->num_lan_msix;
3375 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
3376 sizeof(struct msix_entry), GFP_KERNEL);
3378 if (!pf->msix_entries) {
3383 for (i = 0; i < v_budget; i++)
3384 pf->msix_entries[i].entry = i;
3386 /* actually reserve the vectors */
3387 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3388 ICE_MIN_MSIX, v_budget);
3391 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
3396 if (v_actual < v_budget) {
3397 dev_warn(&pf->pdev->dev,
3398 "not enough vectors. requested = %d, obtained = %d\n",
3399 v_budget, v_actual);
3400 if (v_actual >= (pf->num_lan_msix + 1)) {
3401 pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
3402 } else if (v_actual >= 2) {
3403 pf->num_lan_msix = 1;
3404 pf->num_avail_msix = v_actual - 2;
3406 pci_disable_msix(pf->pdev);
3415 devm_kfree(&pf->pdev->dev, pf->msix_entries);
3419 pf->num_lan_msix = 0;
3420 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
3425 * ice_dis_msix - Disable MSI-X interrupt setup in OS
3426 * @pf: board private structure
3428 static void ice_dis_msix(struct ice_pf *pf)
3430 pci_disable_msix(pf->pdev);
3431 devm_kfree(&pf->pdev->dev, pf->msix_entries);
3432 pf->msix_entries = NULL;
3433 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
3437 * ice_init_interrupt_scheme - Determine proper interrupt scheme
3438 * @pf: board private structure to initialize
3440 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3445 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3446 vectors = ice_ena_msix_range(pf);
3453 /* set up vector assignment tracking */
3454 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
3456 pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
3457 if (!pf->irq_tracker) {
3462 pf->irq_tracker->num_entries = vectors;
3468 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3469 * @pf: board private structure
3471 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3473 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3476 if (pf->irq_tracker) {
3477 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
3478 pf->irq_tracker = NULL;
3483 * ice_probe - Device initialization routine
3484 * @pdev: PCI device information struct
3485 * @ent: entry in ice_pci_tbl
3487 * Returns 0 on success, negative on failure
3489 static int ice_probe(struct pci_dev *pdev,
3490 const struct pci_device_id __always_unused *ent)
3496 /* this driver uses devres, see Documentation/driver-model/devres.txt */
3497 err = pcim_enable_device(pdev);
3501 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3503 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
3507 pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
3511 /* set up for high or low dma */
3512 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3514 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3516 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
3520 pci_enable_pcie_error_reporting(pdev);
3521 pci_set_master(pdev);
3524 pci_set_drvdata(pdev, pf);
3525 set_bit(__ICE_DOWN, pf->state);
3528 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
3530 hw->vendor_id = pdev->vendor;
3531 hw->device_id = pdev->device;
3532 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3533 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3534 hw->subsystem_device_id = pdev->subsystem_device;
3535 hw->bus.device = PCI_SLOT(pdev->devfn);
3536 hw->bus.func = PCI_FUNC(pdev->devfn);
3537 ice_set_ctrlq_len(hw);
3539 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
3541 #ifndef CONFIG_DYNAMIC_DEBUG
3543 hw->debug_mask = debug;
3546 err = ice_init_hw(hw);
3548 dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
3550 goto err_exit_unroll;
3553 dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
3554 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
3555 hw->api_maj_ver, hw->api_min_ver);
3559 ice_determine_q_usage(pf);
3561 pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
3562 hw->func_caps.guaranteed_num_vsi);
3563 if (!pf->num_alloc_vsi) {
3565 goto err_init_pf_unroll;
3568 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
3569 sizeof(struct ice_vsi *), GFP_KERNEL);
3572 goto err_init_pf_unroll;
3575 err = ice_init_interrupt_scheme(pf);
3578 "ice_init_interrupt_scheme failed: %d\n", err);
3580 goto err_init_interrupt_unroll;
3583 /* In case of MSIX we are going to setup the misc vector right here
3584 * to handle admin queue events etc. In case of legacy and MSI
3585 * the misc functionality and queue processing is combined in
3586 * the same vector and that gets setup at open.
3588 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
3589 err = ice_req_irq_msix_misc(pf);
3592 "setup of misc vector failed: %d\n", err);
3593 goto err_init_interrupt_unroll;
3597 /* create switch struct for the switch element created by FW on boot */
3598 pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
3600 if (!pf->first_sw) {
3602 goto err_msix_misc_unroll;
3606 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
3608 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
3610 pf->first_sw->pf = pf;
3612 /* record the sw_id available for later use */
3613 pf->first_sw->sw_id = hw->port_info->sw_id;
3615 err = ice_setup_pf_sw(pf);
3618 "probe failed due to setup pf switch:%d\n", err);
3619 goto err_alloc_sw_unroll;
3622 /* Driver is mostly up */
3623 clear_bit(__ICE_DOWN, pf->state);
3625 /* since everything is good, start the service timer */
3626 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
3628 err = ice_init_link_events(pf->hw.port_info);
3630 dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
3631 goto err_alloc_sw_unroll;
3636 err_alloc_sw_unroll:
3637 set_bit(__ICE_DOWN, pf->state);
3638 devm_kfree(&pf->pdev->dev, pf->first_sw);
3639 err_msix_misc_unroll:
3640 ice_free_irq_msix_misc(pf);
3641 err_init_interrupt_unroll:
3642 ice_clear_interrupt_scheme(pf);
3643 devm_kfree(&pdev->dev, pf->vsi);
3648 pci_disable_pcie_error_reporting(pdev);
3653 * ice_remove - Device removal routine
3654 * @pdev: PCI device information struct
3656 static void ice_remove(struct pci_dev *pdev)
3658 struct ice_pf *pf = pci_get_drvdata(pdev);
3663 set_bit(__ICE_DOWN, pf->state);
3665 ice_vsi_release_all(pf);
3666 ice_free_irq_msix_misc(pf);
3667 ice_clear_interrupt_scheme(pf);
3669 ice_deinit_hw(&pf->hw);
3670 pci_disable_pcie_error_reporting(pdev);
3673 /* ice_pci_tbl - PCI Device ID Table
3675 * Wildcard entries (PCI_ANY_ID) should come last
3676 * Last entry must be all 0s
3678 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
3679 * Class, Class Mask, private data (not used) }
3681 static const struct pci_device_id ice_pci_tbl[] = {
3682 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
3683 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
3684 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
3685 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
3686 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
3687 /* required last entry */
3690 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
3692 static struct pci_driver ice_driver = {
3693 .name = KBUILD_MODNAME,
3694 .id_table = ice_pci_tbl,
3696 .remove = ice_remove,
3700 * ice_module_init - Driver registration routine
3702 * ice_module_init is the first routine called when the driver is
3703 * loaded. All it does is register with the PCI subsystem.
3705 static int __init ice_module_init(void)
3709 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
3710 pr_info("%s\n", ice_copyright);
3712 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
3714 pr_err("Failed to create workqueue\n");
3718 status = pci_register_driver(&ice_driver);
3720 pr_err("failed to register pci driver, err %d\n", status);
3721 destroy_workqueue(ice_wq);
3726 module_init(ice_module_init);
3729 * ice_module_exit - Driver exit cleanup routine
3731 * ice_module_exit is called just before the driver is removed
3734 static void __exit ice_module_exit(void)
3736 pci_unregister_driver(&ice_driver);
3737 destroy_workqueue(ice_wq);
3738 pr_info("module unloaded\n");
3740 module_exit(ice_module_exit);
3743 * ice_set_mac_address - NDO callback to set mac address
3744 * @netdev: network interface device structure
3745 * @pi: pointer to an address structure
3747 * Returns 0 on success, negative on failure
3749 static int ice_set_mac_address(struct net_device *netdev, void *pi)
3751 struct ice_netdev_priv *np = netdev_priv(netdev);
3752 struct ice_vsi *vsi = np->vsi;
3753 struct ice_pf *pf = vsi->back;
3754 struct ice_hw *hw = &pf->hw;
3755 struct sockaddr *addr = pi;
3756 enum ice_status status;
3757 LIST_HEAD(a_mac_list);
3758 LIST_HEAD(r_mac_list);
3763 mac = (u8 *)addr->sa_data;
3765 if (!is_valid_ether_addr(mac))
3766 return -EADDRNOTAVAIL;
3768 if (ether_addr_equal(netdev->dev_addr, mac)) {
3769 netdev_warn(netdev, "already using mac %pM\n", mac);
3773 if (test_bit(__ICE_DOWN, pf->state) ||
3774 ice_is_reset_recovery_pending(pf->state)) {
3775 netdev_err(netdev, "can't set mac %pM. device not ready\n",
3780 /* When we change the mac address we also have to change the mac address
3781 * based filter rules that were created previously for the old mac
3782 * address. So first, we remove the old filter rule using ice_remove_mac
3783 * and then create a new filter rule using ice_add_mac. Note that for
3784 * both these operations, we first need to form a "list" of mac
3785 * addresses (even though in this case, we have only 1 mac address to be
3786 * added/removed) and this done using ice_add_mac_to_list. Depending on
3787 * the ensuing operation this "list" of mac addresses is either to be
3788 * added or removed from the filter.
3790 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
3792 err = -EADDRNOTAVAIL;
3796 status = ice_remove_mac(hw, &r_mac_list);
3798 err = -EADDRNOTAVAIL;
3802 err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
3804 err = -EADDRNOTAVAIL;
3808 status = ice_add_mac(hw, &a_mac_list);
3810 err = -EADDRNOTAVAIL;
3815 /* free list entries */
3816 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
3817 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
3820 netdev_err(netdev, "can't set mac %pM. filter update failed\n",
3825 /* change the netdev's mac address */
3826 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3827 netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
3830 /* write new mac address to the firmware */
3831 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3832 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
3834 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
3841 * ice_set_rx_mode - NDO callback to set the netdev filters
3842 * @netdev: network interface device structure
3844 static void ice_set_rx_mode(struct net_device *netdev)
3846 struct ice_netdev_priv *np = netdev_priv(netdev);
3847 struct ice_vsi *vsi = np->vsi;
3852 /* Set the flags to synchronize filters
3853 * ndo_set_rx_mode may be triggered even without a change in netdev
3856 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
3857 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
3858 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
3860 /* schedule our worker thread which will take care of
3861 * applying the new filter changes
3863 ice_service_task_schedule(vsi->back);
3867 * ice_fdb_add - add an entry to the hardware database
3868 * @ndm: the input from the stack
3869 * @tb: pointer to array of nladdr (unused)
3870 * @dev: the net device pointer
3871 * @addr: the MAC address entry being added
3873 * @flags: instructions from stack about fdb operation
3875 static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
3876 struct net_device *dev, const unsigned char *addr,
3882 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
3885 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3886 netdev_err(dev, "FDB only supports static addresses\n");
3890 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3891 err = dev_uc_add_excl(dev, addr);
3892 else if (is_multicast_ether_addr(addr))
3893 err = dev_mc_add_excl(dev, addr);
3897 /* Only return duplicate errors if NLM_F_EXCL is set */
3898 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3905 * ice_fdb_del - delete an entry from the hardware database
3906 * @ndm: the input from the stack
3907 * @tb: pointer to array of nladdr (unused)
3908 * @dev: the net device pointer
3909 * @addr: the MAC address entry being added
3912 static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
3913 struct net_device *dev, const unsigned char *addr,
3914 __always_unused u16 vid)
3918 if (ndm->ndm_state & NUD_PERMANENT) {
3919 netdev_err(dev, "FDB only supports static addresses\n");
3923 if (is_unicast_ether_addr(addr))
3924 err = dev_uc_del(dev, addr);
3925 else if (is_multicast_ether_addr(addr))
3926 err = dev_mc_del(dev, addr);
3934 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
3935 * @vsi: the vsi being changed
3937 static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3939 struct device *dev = &vsi->back->pdev->dev;
3940 struct ice_hw *hw = &vsi->back->hw;
3941 struct ice_vsi_ctx ctxt = { 0 };
3942 enum ice_status status;
3944 /* Here we are configuring the VSI to let the driver add VLAN tags by
3945 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
3946 * insertion happens in the Tx hot path, in ice_tx_map.
3948 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
3950 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3951 ctxt.vsi_num = vsi->vsi_num;
3953 status = ice_aq_update_vsi(hw, &ctxt, NULL);
3955 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
3956 status, hw->adminq.sq_last_status);
3960 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3965 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
3966 * @vsi: the vsi being changed
3967 * @ena: boolean value indicating if this is a enable or disable request
3969 static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3971 struct device *dev = &vsi->back->pdev->dev;
3972 struct ice_hw *hw = &vsi->back->hw;
3973 struct ice_vsi_ctx ctxt = { 0 };
3974 enum ice_status status;
3976 /* Here we are configuring what the VSI should do with the VLAN tag in
3977 * the Rx packet. We can either leave the tag in the packet or put it in
3978 * the Rx descriptor.
3981 /* Strip VLAN tag from Rx packet and put it in the desc */
3982 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3984 /* Disable stripping. Leave tag in packet */
3985 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3988 /* Allow all packets untagged/tagged */
3989 ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
3991 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3992 ctxt.vsi_num = vsi->vsi_num;
3994 status = ice_aq_update_vsi(hw, &ctxt, NULL);
3996 dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
3997 ena, status, hw->adminq.sq_last_status);
4001 vsi->info.vlan_flags = ctxt.info.vlan_flags;
4006 * ice_set_features - set the netdev feature flags
4007 * @netdev: ptr to the netdev being adjusted
4008 * @features: the feature set that the stack is suggesting
4010 static int ice_set_features(struct net_device *netdev,
4011 netdev_features_t features)
4013 struct ice_netdev_priv *np = netdev_priv(netdev);
4014 struct ice_vsi *vsi = np->vsi;
4017 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
4018 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
4019 ret = ice_vsi_manage_vlan_stripping(vsi, true);
4020 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
4021 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
4022 ret = ice_vsi_manage_vlan_stripping(vsi, false);
4023 else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
4024 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
4025 ret = ice_vsi_manage_vlan_insertion(vsi);
4026 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
4027 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
4028 ret = ice_vsi_manage_vlan_insertion(vsi);
4034 * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
4035 * @vsi: VSI to setup vlan properties for
4037 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
4041 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
4042 ret = ice_vsi_manage_vlan_stripping(vsi, true);
4043 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
4044 ret = ice_vsi_manage_vlan_insertion(vsi);
4050 * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
4051 * @vsi: the VSI being brought back up
4053 static int ice_restore_vlan(struct ice_vsi *vsi)
4061 err = ice_vsi_vlan_setup(vsi);
4065 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
4066 err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
4075 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
4076 * @ring: The Tx ring to configure
4077 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
4078 * @pf_q: queue index in the PF space
4080 * Configure the Tx descriptor ring in TLAN context.
4083 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
4085 struct ice_vsi *vsi = ring->vsi;
4086 struct ice_hw *hw = &vsi->back->hw;
4088 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
4090 tlan_ctx->port_num = vsi->port_info->lport;
4092 /* Transmit Queue Length */
4093 tlan_ctx->qlen = ring->count;
4096 tlan_ctx->pf_num = hw->pf_id;
4098 /* queue belongs to a specific VSI type
4099 * VF / VM index should be programmed per vmvf_type setting:
4100 * for vmvf_type = VF, it is VF number between 0-256
4101 * for vmvf_type = VM, it is VM number between 0-767
4102 * for PF or EMP this field should be set to zero
4104 switch (vsi->type) {
4106 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
4112 /* make sure the context is associated with the right VSI */
4113 tlan_ctx->src_vsi = vsi->vsi_num;
4115 tlan_ctx->tso_ena = ICE_TX_LEGACY;
4116 tlan_ctx->tso_qnum = pf_q;
4118 /* Legacy or Advanced Host Interface:
4119 * 0: Advanced Host Interface
4120 * 1: Legacy Host Interface
4122 tlan_ctx->legacy_int = ICE_TX_LEGACY;
4126 * ice_vsi_cfg_txqs - Configure the VSI for Tx
4127 * @vsi: the VSI being configured
4129 * Return 0 on success and a negative value on error
4130 * Configure the Tx VSI for operation.
4132 static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
4134 struct ice_aqc_add_tx_qgrp *qg_buf;
4135 struct ice_aqc_add_txqs_perq *txq;
4136 struct ice_pf *pf = vsi->back;
4137 enum ice_status status;
4138 u16 buf_len, i, pf_q;
4139 int err = 0, tc = 0;
4142 buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
4143 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
4147 if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
4151 qg_buf->num_txqs = 1;
4154 /* set up and configure the tx queues */
4155 ice_for_each_txq(vsi, i) {
4156 struct ice_tlan_ctx tlan_ctx = { 0 };
4158 pf_q = vsi->txq_map[i];
4159 ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
4160 /* copy context contents into the qg_buf */
4161 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
4162 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
4165 /* init queue specific tail reg. It is referred as transmit
4166 * comm scheduler queue doorbell.
4168 vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
4169 status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
4170 num_q_grps, qg_buf, buf_len, NULL);
4172 dev_err(&vsi->back->pdev->dev,
4173 "Failed to set LAN Tx queue context, error: %d\n",
4179 /* Add Tx Queue TEID into the VSI tx ring from the response
4180 * This will complete configuring and enabling the queue.
4182 txq = &qg_buf->txqs[0];
4183 if (pf_q == le16_to_cpu(txq->txq_id))
4184 vsi->tx_rings[i]->txq_teid =
4185 le32_to_cpu(txq->q_teid);
4188 devm_kfree(&pf->pdev->dev, qg_buf);
4193 * ice_setup_rx_ctx - Configure a receive ring context
4194 * @ring: The Rx ring to configure
4196 * Configure the Rx descriptor ring in RLAN context.
4198 static int ice_setup_rx_ctx(struct ice_ring *ring)
4200 struct ice_vsi *vsi = ring->vsi;
4201 struct ice_hw *hw = &vsi->back->hw;
4202 u32 rxdid = ICE_RXDID_FLEX_NIC;
4203 struct ice_rlan_ctx rlan_ctx;
4208 /* what is RX queue number in global space of 2K rx queues */
4209 pf_q = vsi->rxq_map[ring->q_index];
4211 /* clear the context structure first */
4212 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
4214 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
4216 rlan_ctx.qlen = ring->count;
4218 /* Receive Packet Data Buffer Size.
4219 * The Packet Data Buffer Size is defined in 128 byte units.
4221 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
4223 /* use 32 byte descriptors */
4226 /* Strip the Ethernet CRC bytes before the packet is posted to host
4229 rlan_ctx.crcstrip = 1;
4231 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
4232 rlan_ctx.l2tsel = 1;
4234 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
4235 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
4236 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
4238 /* This controls whether VLAN is stripped from inner headers
4239 * The VLAN in the inner L2 header is stripped to the receive
4240 * descriptor if enabled by this flag.
4242 rlan_ctx.showiv = 0;
4244 /* Max packet size for this queue - must not be set to a larger value
4247 rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
4248 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
4250 /* Rx queue threshold in units of 64 */
4251 rlan_ctx.lrxqthresh = 1;
4253 /* Enable Flexible Descriptors in the queue context which
4254 * allows this driver to select a specific receive descriptor format
4256 regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
4257 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
4258 QRXFLXP_CNTXT_RXDID_IDX_M;
4260 /* increasing context priority to pick up profile id;
4261 * default is 0x01; setting to 0x03 to ensure profile
4262 * is programming if prev context is of same priority
4264 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
4265 QRXFLXP_CNTXT_RXDID_PRIO_M;
4267 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
4269 /* Absolute queue number out of 2K needs to be passed */
4270 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
4272 dev_err(&vsi->back->pdev->dev,
4273 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
4278 /* init queue specific tail register */
4279 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
4280 writel(0, ring->tail);
4281 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
4287 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
4288 * @vsi: the VSI being configured
4290 * Return 0 on success and a negative value on error
4291 * Configure the Rx VSI for operation.
4293 static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
4298 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
4299 vsi->max_frame = vsi->netdev->mtu +
4300 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4302 vsi->max_frame = ICE_RXBUF_2048;
4304 vsi->rx_buf_len = ICE_RXBUF_2048;
4305 /* set up individual rings */
4306 for (i = 0; i < vsi->num_rxq && !err; i++)
4307 err = ice_setup_rx_ctx(vsi->rx_rings[i]);
4310 dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
4317 * ice_vsi_cfg - Setup the VSI
4318 * @vsi: the VSI being configured
4320 * Return 0 on success and negative value on error
4322 static int ice_vsi_cfg(struct ice_vsi *vsi)
4327 ice_set_rx_mode(vsi->netdev);
4328 err = ice_restore_vlan(vsi);
4333 err = ice_vsi_cfg_txqs(vsi);
4335 err = ice_vsi_cfg_rxqs(vsi);
4341 * ice_vsi_stop_tx_rings - Disable Tx rings
4342 * @vsi: the VSI being configured
4344 static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
4346 struct ice_pf *pf = vsi->back;
4347 struct ice_hw *hw = &pf->hw;
4348 enum ice_status status;
4353 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
4356 q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
4361 q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
4365 goto err_alloc_q_ids;
4368 /* set up the tx queue list to be disabled */
4369 ice_for_each_txq(vsi, i) {
4372 if (!vsi->tx_rings || !vsi->tx_rings[i]) {
4377 q_ids[i] = vsi->txq_map[i];
4378 q_teids[i] = vsi->tx_rings[i]->txq_teid;
4380 /* clear cause_ena bit for disabled queues */
4381 val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4382 val &= ~QINT_TQCTL_CAUSE_ENA_M;
4383 wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4385 /* software is expected to wait for 100 ns */
4388 /* trigger a software interrupt for the vector associated to
4389 * the queue to schedule napi handler
4391 v_idx = vsi->tx_rings[i]->q_vector->v_idx;
4392 wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
4393 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
4395 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
4397 /* if the disable queue command was exercised during an active reset
4398 * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
4399 * the reset operation disables queues at the hardware level anyway.
4401 if (status == ICE_ERR_RESET_ONGOING) {
4402 dev_dbg(&pf->pdev->dev,
4403 "Reset in progress. LAN Tx queues already disabled\n");
4404 } else if (status) {
4405 dev_err(&pf->pdev->dev,
4406 "Failed to disable LAN Tx queues, error: %d\n",
4412 devm_kfree(&pf->pdev->dev, q_ids);
4415 devm_kfree(&pf->pdev->dev, q_teids);
4421 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4422 * @pf: the PF being configured
4423 * @pf_q: the PF queue
4424 * @ena: enable or disable state of the queue
4426 * This routine will wait for the given Rx queue of the PF to reach the
4427 * enabled or disabled state.
4428 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4429 * multiple retries; else will return 0 in case of success.
4431 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
4435 for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
4436 u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
4438 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
4441 usleep_range(10, 20);
4443 if (i >= ICE_Q_WAIT_RETRY_LIMIT)
4450 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
4451 * @vsi: the VSI being configured
4452 * @ena: start or stop the rx rings
4454 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
4456 struct ice_pf *pf = vsi->back;
4457 struct ice_hw *hw = &pf->hw;
4460 for (i = 0; i < vsi->num_rxq; i++) {
4461 int pf_q = vsi->rxq_map[i];
4464 for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
4465 rx_reg = rd32(hw, QRX_CTRL(pf_q));
4466 if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
4467 ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
4469 usleep_range(1000, 2000);
4472 /* Skip if the queue is already in the requested state */
4473 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
4476 /* turn on/off the queue */
4478 rx_reg |= QRX_CTRL_QENA_REQ_M;
4480 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
4481 wr32(hw, QRX_CTRL(pf_q), rx_reg);
4483 /* wait for the change to finish */
4484 ret = ice_pf_rxq_wait(pf, pf_q, ena);
4486 dev_err(&pf->pdev->dev,
4487 "VSI idx %d Rx ring %d %sable timeout\n",
4488 vsi->idx, pf_q, (ena ? "en" : "dis"));
4497 * ice_vsi_start_rx_rings - start VSI's rx rings
4498 * @vsi: the VSI whose rings are to be started
4500 * Returns 0 on success and a negative value on error
4502 static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
4504 return ice_vsi_ctrl_rx_rings(vsi, true);
4508 * ice_vsi_stop_rx_rings - stop VSI's rx rings
4511 * Returns 0 on success and a negative value on error
4513 static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
4515 return ice_vsi_ctrl_rx_rings(vsi, false);
4519 * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
4521 * Returns 0 on success and a negative value on error
4523 static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
4527 err_tx = ice_vsi_stop_tx_rings(vsi);
4529 dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
4531 err_rx = ice_vsi_stop_rx_rings(vsi);
4533 dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
4535 if (err_tx || err_rx)
4542 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4543 * @vsi: the VSI being configured
4545 static void ice_napi_enable_all(struct ice_vsi *vsi)
4552 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4553 napi_enable(&vsi->q_vectors[q_idx]->napi);
4557 * ice_up_complete - Finish the last steps of bringing up a connection
4558 * @vsi: The VSI being configured
4560 * Return 0 on success and negative value on error
4562 static int ice_up_complete(struct ice_vsi *vsi)
4564 struct ice_pf *pf = vsi->back;
4567 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
4568 ice_vsi_cfg_msix(vsi);
4572 /* Enable only Rx rings, Tx rings were enabled by the FW when the
4573 * Tx queue group list was configured and the context bits were
4574 * programmed using ice_vsi_cfg_txqs
4576 err = ice_vsi_start_rx_rings(vsi);
4580 clear_bit(__ICE_DOWN, vsi->state);
4581 ice_napi_enable_all(vsi);
4582 ice_vsi_ena_irq(vsi);
4584 if (vsi->port_info &&
4585 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
4587 ice_print_link_msg(vsi, true);
4588 netif_tx_start_all_queues(vsi->netdev);
4589 netif_carrier_on(vsi->netdev);
4592 ice_service_task_schedule(pf);
4598 * ice_up - Bring the connection back up after being down
4599 * @vsi: VSI being configured
4601 int ice_up(struct ice_vsi *vsi)
4605 err = ice_vsi_cfg(vsi);
4607 err = ice_up_complete(vsi);
4613 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
4614 * @ring: Tx or Rx ring to read stats from
4615 * @pkts: packets stats counter
4616 * @bytes: bytes stats counter
4618 * This function fetches stats from the ring considering the atomic operations
4619 * that needs to be performed to read u64 values in 32 bit machine.
4621 static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
4631 start = u64_stats_fetch_begin_irq(&ring->syncp);
4632 *pkts = ring->stats.pkts;
4633 *bytes = ring->stats.bytes;
4634 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4638 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4639 * @hw: ptr to the hardware info
4640 * @hireg: high 32 bit HW register to read from
4641 * @loreg: low 32 bit HW register to read from
4642 * @prev_stat_loaded: bool to specify if previous stats are loaded
4643 * @prev_stat: ptr to previous loaded stat value
4644 * @cur_stat: ptr to current stat value
4646 static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
4647 bool prev_stat_loaded, u64 *prev_stat,
4652 new_data = rd32(hw, loreg);
4653 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4655 /* device stats are not reset at PFR, they likely will not be zeroed
4656 * when the driver starts. So save the first values read and use them as
4657 * offsets to be subtracted from the raw values in order to report stats
4658 * that count from zero.
4660 if (!prev_stat_loaded)
4661 *prev_stat = new_data;
4662 if (likely(new_data >= *prev_stat))
4663 *cur_stat = new_data - *prev_stat;
4665 /* to manage the potential roll-over */
4666 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
4667 *cur_stat &= 0xFFFFFFFFFFULL;
4671 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4672 * @hw: ptr to the hardware info
4673 * @reg: HW register to read from
4674 * @prev_stat_loaded: bool to specify if previous stats are loaded
4675 * @prev_stat: ptr to previous loaded stat value
4676 * @cur_stat: ptr to current stat value
4678 static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4679 u64 *prev_stat, u64 *cur_stat)
4683 new_data = rd32(hw, reg);
4685 /* device stats are not reset at PFR, they likely will not be zeroed
4686 * when the driver starts. So save the first values read and use them as
4687 * offsets to be subtracted from the raw values in order to report stats
4688 * that count from zero.
4690 if (!prev_stat_loaded)
4691 *prev_stat = new_data;
4692 if (likely(new_data >= *prev_stat))
4693 *cur_stat = new_data - *prev_stat;
4695 /* to manage the potential roll-over */
4696 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
4700 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
4701 * @vsi: the VSI to be updated
4703 static void ice_update_eth_stats(struct ice_vsi *vsi)
4705 struct ice_eth_stats *prev_es, *cur_es;
4706 struct ice_hw *hw = &vsi->back->hw;
4707 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
4709 prev_es = &vsi->eth_stats_prev;
4710 cur_es = &vsi->eth_stats;
4712 ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
4713 vsi->stat_offsets_loaded, &prev_es->rx_bytes,
4716 ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
4717 vsi->stat_offsets_loaded, &prev_es->rx_unicast,
4718 &cur_es->rx_unicast);
4720 ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
4721 vsi->stat_offsets_loaded, &prev_es->rx_multicast,
4722 &cur_es->rx_multicast);
4724 ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
4725 vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
4726 &cur_es->rx_broadcast);
4728 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
4729 &prev_es->rx_discards, &cur_es->rx_discards);
4731 ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
4732 vsi->stat_offsets_loaded, &prev_es->tx_bytes,
4735 ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
4736 vsi->stat_offsets_loaded, &prev_es->tx_unicast,
4737 &cur_es->tx_unicast);
4739 ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
4740 vsi->stat_offsets_loaded, &prev_es->tx_multicast,
4741 &cur_es->tx_multicast);
4743 ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
4744 vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
4745 &cur_es->tx_broadcast);
4747 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
4748 &prev_es->tx_errors, &cur_es->tx_errors);
4750 vsi->stat_offsets_loaded = true;
4754 * ice_update_vsi_ring_stats - Update VSI stats counters
4755 * @vsi: the VSI to be updated
4757 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
4759 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
4760 struct ice_ring *ring;
4764 /* reset netdev stats */
4765 vsi_stats->tx_packets = 0;
4766 vsi_stats->tx_bytes = 0;
4767 vsi_stats->rx_packets = 0;
4768 vsi_stats->rx_bytes = 0;
4770 /* reset non-netdev (extended) stats */
4771 vsi->tx_restart = 0;
4773 vsi->tx_linearize = 0;
4774 vsi->rx_buf_failed = 0;
4775 vsi->rx_page_failed = 0;
4779 /* update Tx rings counters */
4780 ice_for_each_txq(vsi, i) {
4781 ring = READ_ONCE(vsi->tx_rings[i]);
4782 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
4783 vsi_stats->tx_packets += pkts;
4784 vsi_stats->tx_bytes += bytes;
4785 vsi->tx_restart += ring->tx_stats.restart_q;
4786 vsi->tx_busy += ring->tx_stats.tx_busy;
4787 vsi->tx_linearize += ring->tx_stats.tx_linearize;
4790 /* update Rx rings counters */
4791 ice_for_each_rxq(vsi, i) {
4792 ring = READ_ONCE(vsi->rx_rings[i]);
4793 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
4794 vsi_stats->rx_packets += pkts;
4795 vsi_stats->rx_bytes += bytes;
4796 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
4797 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
4804 * ice_update_vsi_stats - Update VSI stats counters
4805 * @vsi: the VSI to be updated
4807 static void ice_update_vsi_stats(struct ice_vsi *vsi)
4809 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
4810 struct ice_eth_stats *cur_es = &vsi->eth_stats;
4811 struct ice_pf *pf = vsi->back;
4813 if (test_bit(__ICE_DOWN, vsi->state) ||
4814 test_bit(__ICE_CFG_BUSY, pf->state))
4817 /* get stats as recorded by Tx/Rx rings */
4818 ice_update_vsi_ring_stats(vsi);
4820 /* get VSI stats as recorded by the hardware */
4821 ice_update_eth_stats(vsi);
4823 cur_ns->tx_errors = cur_es->tx_errors;
4824 cur_ns->rx_dropped = cur_es->rx_discards;
4825 cur_ns->tx_dropped = cur_es->tx_discards;
4826 cur_ns->multicast = cur_es->rx_multicast;
4828 /* update some more netdev stats if this is main VSI */
4829 if (vsi->type == ICE_VSI_PF) {
4830 cur_ns->rx_crc_errors = pf->stats.crc_errors;
4831 cur_ns->rx_errors = pf->stats.crc_errors +
4832 pf->stats.illegal_bytes;
4833 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
4838 * ice_update_pf_stats - Update PF port stats counters
4839 * @pf: PF whose stats needs to be updated
4841 static void ice_update_pf_stats(struct ice_pf *pf)
4843 struct ice_hw_port_stats *prev_ps, *cur_ps;
4844 struct ice_hw *hw = &pf->hw;
4847 prev_ps = &pf->stats_prev;
4848 cur_ps = &pf->stats;
4851 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
4852 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
4853 &cur_ps->eth.rx_bytes);
4855 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
4856 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
4857 &cur_ps->eth.rx_unicast);
4859 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
4860 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
4861 &cur_ps->eth.rx_multicast);
4863 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
4864 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
4865 &cur_ps->eth.rx_broadcast);
4867 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
4868 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
4869 &cur_ps->eth.tx_bytes);
4871 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
4872 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
4873 &cur_ps->eth.tx_unicast);
4875 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
4876 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
4877 &cur_ps->eth.tx_multicast);
4879 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
4880 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
4881 &cur_ps->eth.tx_broadcast);
4883 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
4884 &prev_ps->tx_dropped_link_down,
4885 &cur_ps->tx_dropped_link_down);
4887 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
4888 pf->stat_prev_loaded, &prev_ps->rx_size_64,
4889 &cur_ps->rx_size_64);
4891 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
4892 pf->stat_prev_loaded, &prev_ps->rx_size_127,
4893 &cur_ps->rx_size_127);
4895 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
4896 pf->stat_prev_loaded, &prev_ps->rx_size_255,
4897 &cur_ps->rx_size_255);
4899 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
4900 pf->stat_prev_loaded, &prev_ps->rx_size_511,
4901 &cur_ps->rx_size_511);
4903 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
4904 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
4905 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
4907 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
4908 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
4909 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
4911 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
4912 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
4913 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
4915 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
4916 pf->stat_prev_loaded, &prev_ps->tx_size_64,
4917 &cur_ps->tx_size_64);
4919 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
4920 pf->stat_prev_loaded, &prev_ps->tx_size_127,
4921 &cur_ps->tx_size_127);
4923 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
4924 pf->stat_prev_loaded, &prev_ps->tx_size_255,
4925 &cur_ps->tx_size_255);
4927 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
4928 pf->stat_prev_loaded, &prev_ps->tx_size_511,
4929 &cur_ps->tx_size_511);
4931 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
4932 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
4933 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
4935 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
4936 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
4937 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
4939 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
4940 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
4941 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
4943 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
4944 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
4946 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
4947 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
4949 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
4950 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
4952 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
4953 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
4955 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
4956 &prev_ps->crc_errors, &cur_ps->crc_errors);
4958 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
4959 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
4961 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
4962 &prev_ps->mac_local_faults,
4963 &cur_ps->mac_local_faults);
4965 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
4966 &prev_ps->mac_remote_faults,
4967 &cur_ps->mac_remote_faults);
4969 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
4970 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
4972 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
4973 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
4975 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
4976 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
4978 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
4979 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
4981 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
4982 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
4984 pf->stat_prev_loaded = true;
4988 * ice_get_stats64 - get statistics for network device structure
4989 * @netdev: network interface device structure
4990 * @stats: main device statistics structure
4993 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
4995 struct ice_netdev_priv *np = netdev_priv(netdev);
4996 struct rtnl_link_stats64 *vsi_stats;
4997 struct ice_vsi *vsi = np->vsi;
4999 vsi_stats = &vsi->net_stats;
5001 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
5003 /* netdev packet/byte stats come from ring counter. These are obtained
5004 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5006 ice_update_vsi_ring_stats(vsi);
5007 stats->tx_packets = vsi_stats->tx_packets;
5008 stats->tx_bytes = vsi_stats->tx_bytes;
5009 stats->rx_packets = vsi_stats->rx_packets;
5010 stats->rx_bytes = vsi_stats->rx_bytes;
5012 /* The rest of the stats can be read from the hardware but instead we
5013 * just return values that the watchdog task has already obtained from
5016 stats->multicast = vsi_stats->multicast;
5017 stats->tx_errors = vsi_stats->tx_errors;
5018 stats->tx_dropped = vsi_stats->tx_dropped;
5019 stats->rx_errors = vsi_stats->rx_errors;
5020 stats->rx_dropped = vsi_stats->rx_dropped;
5021 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5022 stats->rx_length_errors = vsi_stats->rx_length_errors;
5025 #ifdef CONFIG_NET_POLL_CONTROLLER
5027 * ice_netpoll - polling "interrupt" handler
5028 * @netdev: network interface device structure
5030 * Used by netconsole to send skbs without having to re-enable interrupts.
5031 * This is not called in the normal interrupt path.
5033 static void ice_netpoll(struct net_device *netdev)
5035 struct ice_netdev_priv *np = netdev_priv(netdev);
5036 struct ice_vsi *vsi = np->vsi;
5037 struct ice_pf *pf = vsi->back;
5040 if (test_bit(__ICE_DOWN, vsi->state) ||
5041 !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
5044 for (i = 0; i < vsi->num_q_vectors; i++)
5045 ice_msix_clean_rings(0, vsi->q_vectors[i]);
5047 #endif /* CONFIG_NET_POLL_CONTROLLER */
5050 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5051 * @vsi: VSI having NAPI disabled
5053 static void ice_napi_disable_all(struct ice_vsi *vsi)
5060 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
5061 napi_disable(&vsi->q_vectors[q_idx]->napi);
5065 * ice_down - Shutdown the connection
5066 * @vsi: The VSI being stopped
5068 int ice_down(struct ice_vsi *vsi)
5072 /* Caller of this function is expected to set the
5073 * vsi->state __ICE_DOWN bit
5076 netif_carrier_off(vsi->netdev);
5077 netif_tx_disable(vsi->netdev);
5080 ice_vsi_dis_irq(vsi);
5081 err = ice_vsi_stop_tx_rx_rings(vsi);
5082 ice_napi_disable_all(vsi);
5084 ice_for_each_txq(vsi, i)
5085 ice_clean_tx_ring(vsi->tx_rings[i]);
5087 ice_for_each_rxq(vsi, i)
5088 ice_clean_rx_ring(vsi->rx_rings[i]);
5091 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5092 vsi->vsi_num, vsi->vsw->sw_id);
5097 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5098 * @vsi: VSI having resources allocated
5100 * Return 0 on success, negative on failure
5102 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5106 if (!vsi->num_txq) {
5107 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
5112 ice_for_each_txq(vsi, i) {
5113 err = ice_setup_tx_ring(vsi->tx_rings[i]);
5122 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5123 * @vsi: VSI having resources allocated
5125 * Return 0 on success, negative on failure
5127 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5131 if (!vsi->num_rxq) {
5132 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
5137 ice_for_each_rxq(vsi, i) {
5138 err = ice_setup_rx_ring(vsi->rx_rings[i]);
5147 * ice_vsi_req_irq - Request IRQ from the OS
5148 * @vsi: The VSI IRQ is being requested for
5149 * @basename: name for the vector
5151 * Return 0 on success and a negative value on error
5153 static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
5155 struct ice_pf *pf = vsi->back;
5158 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
5159 err = ice_vsi_req_irq_msix(vsi, basename);
5165 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
5166 * @vsi: the VSI having resources freed
5168 static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
5175 ice_for_each_txq(vsi, i)
5176 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
5177 ice_free_tx_ring(vsi->tx_rings[i]);
5181 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
5182 * @vsi: the VSI having resources freed
5184 static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
5191 ice_for_each_rxq(vsi, i)
5192 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
5193 ice_free_rx_ring(vsi->rx_rings[i]);
5197 * ice_vsi_open - Called when a network interface is made active
5198 * @vsi: the VSI to open
5200 * Initialization of the VSI
5202 * Returns 0 on success, negative value on error
5204 static int ice_vsi_open(struct ice_vsi *vsi)
5206 char int_name[ICE_INT_NAME_STR_LEN];
5207 struct ice_pf *pf = vsi->back;
5210 /* allocate descriptors */
5211 err = ice_vsi_setup_tx_rings(vsi);
5215 err = ice_vsi_setup_rx_rings(vsi);
5219 err = ice_vsi_cfg(vsi);
5223 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5224 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5225 err = ice_vsi_req_irq(vsi, int_name);
5229 /* Notify the stack of the actual queue counts. */
5230 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5234 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5238 err = ice_up_complete(vsi);
5240 goto err_up_complete;
5247 ice_vsi_free_irq(vsi);
5249 ice_vsi_free_rx_rings(vsi);
5251 ice_vsi_free_tx_rings(vsi);
5257 * ice_vsi_close - Shut down a VSI
5258 * @vsi: the VSI being shut down
5260 static void ice_vsi_close(struct ice_vsi *vsi)
5262 if (!test_and_set_bit(__ICE_DOWN, vsi->state))
5265 ice_vsi_free_irq(vsi);
5266 ice_vsi_free_tx_rings(vsi);
5267 ice_vsi_free_rx_rings(vsi);
5271 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
5272 * @vsi: the VSI being removed
5274 static void ice_rss_clean(struct ice_vsi *vsi)
5280 if (vsi->rss_hkey_user)
5281 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
5282 if (vsi->rss_lut_user)
5283 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
5287 * ice_vsi_release - Delete a VSI and free its resources
5288 * @vsi: the VSI being removed
5290 * Returns 0 on success or < 0 on error
5292 static int ice_vsi_release(struct ice_vsi *vsi)
5299 /* do not unregister and free netdevs while driver is in the reset
5300 * recovery pending state. Since reset/rebuild happens through PF
5301 * service task workqueue, its not a good idea to unregister netdev
5302 * that is associated to the PF that is running the work queue items
5303 * currently. This is done to avoid check_flush_dependency() warning
5306 if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) {
5307 unregister_netdev(vsi->netdev);
5308 free_netdev(vsi->netdev);
5312 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
5315 /* Disable VSI and free resources */
5316 ice_vsi_dis_irq(vsi);
5319 /* reclaim interrupt vectors back to PF */
5320 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
5321 pf->num_avail_msix += vsi->num_q_vectors;
5323 ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
5324 ice_vsi_delete(vsi);
5325 ice_vsi_free_q_vectors(vsi);
5326 ice_vsi_clear_rings(vsi);
5328 ice_vsi_put_qs(vsi);
5329 pf->q_left_tx += vsi->alloc_txq;
5330 pf->q_left_rx += vsi->alloc_rxq;
5332 /* retain SW VSI data structure since it is needed to unregister and
5333 * free VSI netdev when PF is not in reset recovery pending state,\
5334 * for ex: during rmmod.
5336 if (!ice_is_reset_recovery_pending(pf->state))
5343 * ice_vsi_release_all - Delete all VSIs
5344 * @pf: PF from which all VSIs are being removed
5346 static void ice_vsi_release_all(struct ice_pf *pf)
5353 for (i = 0; i < pf->num_alloc_vsi; i++) {
5357 err = ice_vsi_release(pf->vsi[i]);
5359 dev_dbg(&pf->pdev->dev,
5360 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
5361 i, err, pf->vsi[i]->vsi_num);
5366 * ice_dis_vsi - pause a VSI
5367 * @vsi: the VSI being paused
5369 static void ice_dis_vsi(struct ice_vsi *vsi)
5371 if (test_bit(__ICE_DOWN, vsi->state))
5374 set_bit(__ICE_NEEDS_RESTART, vsi->state);
5376 if (vsi->netdev && netif_running(vsi->netdev) &&
5377 vsi->type == ICE_VSI_PF) {
5379 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5387 * ice_ena_vsi - resume a VSI
5388 * @vsi: the VSI being resume
5390 static int ice_ena_vsi(struct ice_vsi *vsi)
5394 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
5395 if (vsi->netdev && netif_running(vsi->netdev)) {
5397 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5405 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
5408 static void ice_pf_dis_all_vsi(struct ice_pf *pf)
5412 ice_for_each_vsi(pf, v)
5414 ice_dis_vsi(pf->vsi[v]);
5418 * ice_pf_ena_all_vsi - Resume all VSIs on a PF
5421 static int ice_pf_ena_all_vsi(struct ice_pf *pf)
5425 ice_for_each_vsi(pf, v)
5427 if (ice_ena_vsi(pf->vsi[v]))
5434 * ice_vsi_rebuild_all - rebuild all VSIs in pf
5437 static int ice_vsi_rebuild_all(struct ice_pf *pf)
5441 /* loop through pf->vsi array and reinit the VSI if found */
5442 for (i = 0; i < pf->num_alloc_vsi; i++) {
5448 err = ice_vsi_rebuild(pf->vsi[i]);
5450 dev_err(&pf->pdev->dev,
5451 "VSI at index %d rebuild failed\n",
5456 dev_info(&pf->pdev->dev,
5457 "VSI at index %d rebuilt. vsi_num = 0x%x\n",
5458 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
5465 * ice_rebuild - rebuild after reset
5466 * @pf: pf to rebuild
5468 static void ice_rebuild(struct ice_pf *pf)
5470 struct device *dev = &pf->pdev->dev;
5471 struct ice_hw *hw = &pf->hw;
5472 enum ice_status ret;
5475 if (test_bit(__ICE_DOWN, pf->state))
5476 goto clear_recovery;
5478 dev_dbg(dev, "rebuilding pf\n");
5480 ret = ice_init_all_ctrlq(hw);
5482 dev_err(dev, "control queues init failed %d\n", ret);
5483 goto err_init_ctrlq;
5486 ret = ice_clear_pf_cfg(hw);
5488 dev_err(dev, "clear PF configuration failed %d\n", ret);
5489 goto err_init_ctrlq;
5492 ice_clear_pxe_mode(hw);
5494 ret = ice_get_caps(hw);
5496 dev_err(dev, "ice_get_caps failed %d\n", ret);
5497 goto err_init_ctrlq;
5500 err = ice_sched_init_port(hw->port_info);
5502 goto err_sched_init_port;
5504 err = ice_vsi_rebuild_all(pf);
5506 dev_err(dev, "ice_vsi_rebuild_all failed\n");
5507 goto err_vsi_rebuild;
5510 ret = ice_replay_all_fltr(&pf->hw);
5512 dev_err(&pf->pdev->dev,
5513 "error replaying switch filter rules\n");
5514 goto err_vsi_rebuild;
5517 /* start misc vector */
5518 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
5519 err = ice_req_irq_msix_misc(pf);
5521 dev_err(dev, "misc vector setup failed: %d\n", err);
5522 goto err_vsi_rebuild;
5526 /* restart the VSIs that were rebuilt and running before the reset */
5527 err = ice_pf_ena_all_vsi(pf);
5529 dev_err(&pf->pdev->dev, "error enabling VSIs\n");
5530 /* no need to disable VSIs in tear down path in ice_rebuild()
5531 * since its already taken care in ice_vsi_open()
5533 goto err_vsi_rebuild;
5536 /* if we get here, reset flow is successful */
5537 clear_bit(__ICE_RESET_FAILED, pf->state);
5541 ice_vsi_release_all(pf);
5542 err_sched_init_port:
5543 ice_sched_cleanup_all(hw);
5545 ice_shutdown_all_ctrlq(hw);
5546 set_bit(__ICE_RESET_FAILED, pf->state);
5548 /* set this bit in PF state to control service task scheduling */
5549 set_bit(__ICE_NEEDS_RESTART, pf->state);
5550 dev_err(dev, "Rebuild failed, unload and reload driver\n");
5554 * ice_change_mtu - NDO callback to change the MTU
5555 * @netdev: network interface device structure
5556 * @new_mtu: new value for maximum frame size
5558 * Returns 0 on success, negative on failure
5560 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
5562 struct ice_netdev_priv *np = netdev_priv(netdev);
5563 struct ice_vsi *vsi = np->vsi;
5564 struct ice_pf *pf = vsi->back;
5567 if (new_mtu == netdev->mtu) {
5568 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
5572 if (new_mtu < netdev->min_mtu) {
5573 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
5576 } else if (new_mtu > netdev->max_mtu) {
5577 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
5581 /* if a reset is in progress, wait for some time for it to complete */
5583 if (ice_is_reset_recovery_pending(pf->state)) {
5585 usleep_range(1000, 2000);
5590 } while (count < 100);
5593 netdev_err(netdev, "can't change mtu. Device is busy\n");
5597 netdev->mtu = new_mtu;
5599 /* if VSI is up, bring it down and then back up */
5600 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
5603 err = ice_down(vsi);
5605 netdev_err(netdev, "change mtu if_up err %d\n", err);
5611 netdev_err(netdev, "change mtu if_up err %d\n", err);
5616 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
5621 * ice_set_rss - Set RSS keys and lut
5622 * @vsi: Pointer to VSI structure
5623 * @seed: RSS hash seed
5624 * @lut: Lookup table
5625 * @lut_size: Lookup table size
5627 * Returns 0 on success, negative on failure
5629 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
5631 struct ice_pf *pf = vsi->back;
5632 struct ice_hw *hw = &pf->hw;
5633 enum ice_status status;
5636 struct ice_aqc_get_set_rss_keys *buf =
5637 (struct ice_aqc_get_set_rss_keys *)seed;
5639 status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
5642 dev_err(&pf->pdev->dev,
5643 "Cannot set RSS key, err %d aq_err %d\n",
5644 status, hw->adminq.rq_last_status);
5650 status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
5651 vsi->rss_lut_type, lut, lut_size);
5653 dev_err(&pf->pdev->dev,
5654 "Cannot set RSS lut, err %d aq_err %d\n",
5655 status, hw->adminq.rq_last_status);
5664 * ice_get_rss - Get RSS keys and lut
5665 * @vsi: Pointer to VSI structure
5666 * @seed: Buffer to store the keys
5667 * @lut: Buffer to store the lookup table entries
5668 * @lut_size: Size of buffer to store the lookup table entries
5670 * Returns 0 on success, negative on failure
5672 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
5674 struct ice_pf *pf = vsi->back;
5675 struct ice_hw *hw = &pf->hw;
5676 enum ice_status status;
5679 struct ice_aqc_get_set_rss_keys *buf =
5680 (struct ice_aqc_get_set_rss_keys *)seed;
5682 status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
5684 dev_err(&pf->pdev->dev,
5685 "Cannot get RSS key, err %d aq_err %d\n",
5686 status, hw->adminq.rq_last_status);
5692 status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
5693 vsi->rss_lut_type, lut, lut_size);
5695 dev_err(&pf->pdev->dev,
5696 "Cannot get RSS lut, err %d aq_err %d\n",
5697 status, hw->adminq.rq_last_status);
5706 * ice_bridge_getlink - Get the hardware bridge mode
5709 * @seq: RTNL message seq
5710 * @dev: the netdev being configured
5711 * @filter_mask: filter mask passed in
5712 * @nlflags: netlink flags passed in
5714 * Return the bridge mode (VEB/VEPA)
5717 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5718 struct net_device *dev, u32 filter_mask, int nlflags)
5720 struct ice_netdev_priv *np = netdev_priv(dev);
5721 struct ice_vsi *vsi = np->vsi;
5722 struct ice_pf *pf = vsi->back;
5725 bmode = pf->first_sw->bridge_mode;
5727 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
5732 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
5733 * @vsi: Pointer to VSI structure
5734 * @bmode: Hardware bridge mode (VEB/VEPA)
5736 * Returns 0 on success, negative on failure
5738 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
5740 struct device *dev = &vsi->back->pdev->dev;
5741 struct ice_aqc_vsi_props *vsi_props;
5742 struct ice_hw *hw = &vsi->back->hw;
5743 struct ice_vsi_ctx ctxt = { 0 };
5744 enum ice_status status;
5746 vsi_props = &vsi->info;
5747 ctxt.info = vsi->info;
5749 if (bmode == BRIDGE_MODE_VEB)
5750 /* change from VEPA to VEB mode */
5751 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
5753 /* change from VEB to VEPA mode */
5754 ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
5755 ctxt.vsi_num = vsi->vsi_num;
5756 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
5757 status = ice_aq_update_vsi(hw, &ctxt, NULL);
5759 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
5760 bmode, status, hw->adminq.sq_last_status);
5763 /* Update sw flags for book keeping */
5764 vsi_props->sw_flags = ctxt.info.sw_flags;
5770 * ice_bridge_setlink - Set the hardware bridge mode
5771 * @dev: the netdev being configured
5772 * @nlh: RTNL message
5773 * @flags: bridge setlink flags
5775 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
5776 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
5777 * not already set for all VSIs connected to this switch. And also update the
5778 * unicast switch filter rules for the corresponding switch of the netdev.
5781 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5782 u16 __always_unused flags)
5784 struct ice_netdev_priv *np = netdev_priv(dev);
5785 struct ice_pf *pf = np->vsi->back;
5786 struct nlattr *attr, *br_spec;
5787 struct ice_hw *hw = &pf->hw;
5788 enum ice_status status;
5789 struct ice_sw *pf_sw;
5790 int rem, v, err = 0;
5792 pf_sw = pf->first_sw;
5793 /* find the attribute in the netlink message */
5794 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5796 nla_for_each_nested(attr, br_spec, rem) {
5799 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5801 mode = nla_get_u16(attr);
5802 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5804 /* Continue if bridge mode is not being flipped */
5805 if (mode == pf_sw->bridge_mode)
5807 /* Iterates through the PF VSI list and update the loopback
5810 ice_for_each_vsi(pf, v) {
5813 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
5818 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
5819 /* Update the unicast switch filter rules for the corresponding
5820 * switch of the netdev
5822 status = ice_update_sw_rule_bridge_mode(hw);
5824 netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
5825 mode, status, hw->adminq.sq_last_status);
5826 /* revert hw->evb_veb */
5827 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
5831 pf_sw->bridge_mode = mode;
5838 * ice_tx_timeout - Respond to a Tx Hang
5839 * @netdev: network interface device structure
5841 static void ice_tx_timeout(struct net_device *netdev)
5843 struct ice_netdev_priv *np = netdev_priv(netdev);
5844 struct ice_ring *tx_ring = NULL;
5845 struct ice_vsi *vsi = np->vsi;
5846 struct ice_pf *pf = vsi->back;
5847 u32 head, val = 0, i;
5848 int hung_queue = -1;
5850 pf->tx_timeout_count++;
5852 /* find the stopped queue the same way the stack does */
5853 for (i = 0; i < netdev->num_tx_queues; i++) {
5854 struct netdev_queue *q;
5855 unsigned long trans_start;
5857 q = netdev_get_tx_queue(netdev, i);
5858 trans_start = q->trans_start;
5859 if (netif_xmit_stopped(q) &&
5861 (trans_start + netdev->watchdog_timeo))) {
5867 if (i == netdev->num_tx_queues) {
5868 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
5870 /* now that we have an index, find the tx_ring struct */
5871 for (i = 0; i < vsi->num_txq; i++) {
5872 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
5874 vsi->tx_rings[i]->q_index) {
5875 tx_ring = vsi->tx_rings[i];
5882 /* Reset recovery level if enough time has elapsed after last timeout.
5883 * Also ensure no new reset action happens before next timeout period.
5885 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
5886 pf->tx_timeout_recovery_level = 1;
5887 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
5888 netdev->watchdog_timeo)))
5892 head = tx_ring->next_to_clean;
5893 /* Read interrupt register */
5894 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
5896 GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
5897 tx_ring->vsi->base_vector - 1));
5899 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
5900 vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
5901 head, tx_ring->next_to_use,
5902 readl(tx_ring->tail), val);
5905 pf->tx_timeout_last_recovery = jiffies;
5906 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
5907 pf->tx_timeout_recovery_level, hung_queue);
5909 switch (pf->tx_timeout_recovery_level) {
5911 set_bit(__ICE_PFR_REQ, pf->state);
5914 set_bit(__ICE_CORER_REQ, pf->state);
5917 set_bit(__ICE_GLOBR_REQ, pf->state);
5920 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
5921 set_bit(__ICE_DOWN, pf->state);
5922 set_bit(__ICE_NEEDS_RESTART, vsi->state);
5926 ice_service_task_schedule(pf);
5927 pf->tx_timeout_recovery_level++;
5931 * ice_open - Called when a network interface becomes active
5932 * @netdev: network interface device structure
5934 * The open entry point is called when a network interface is made
5935 * active by the system (IFF_UP). At this point all resources needed
5936 * for transmit and receive operations are allocated, the interrupt
5937 * handler is registered with the OS, the netdev watchdog is enabled,
5938 * and the stack is notified that the interface is ready.
5940 * Returns 0 on success, negative value on failure
5942 static int ice_open(struct net_device *netdev)
5944 struct ice_netdev_priv *np = netdev_priv(netdev);
5945 struct ice_vsi *vsi = np->vsi;
5948 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
5949 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
5953 netif_carrier_off(netdev);
5955 err = ice_vsi_open(vsi);
5958 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
5959 vsi->vsi_num, vsi->vsw->sw_id);
5964 * ice_stop - Disables a network interface
5965 * @netdev: network interface device structure
5967 * The stop entry point is called when an interface is de-activated by the OS,
5968 * and the netdevice enters the DOWN state. The hardware is still under the
5969 * driver's control, but the netdev interface is disabled.
5971 * Returns success only - not allowed to fail
5973 static int ice_stop(struct net_device *netdev)
5975 struct ice_netdev_priv *np = netdev_priv(netdev);
5976 struct ice_vsi *vsi = np->vsi;
5984 * ice_features_check - Validate encapsulated packet conforms to limits
5986 * @netdev: This port's netdev
5987 * @features: Offload features that the stack believes apply
5989 static netdev_features_t
5990 ice_features_check(struct sk_buff *skb,
5991 struct net_device __always_unused *netdev,
5992 netdev_features_t features)
5996 /* No point in doing any of this if neither checksum nor GSO are
5997 * being requested for this frame. We can rule out both by just
5998 * checking for CHECKSUM_PARTIAL
6000 if (skb->ip_summed != CHECKSUM_PARTIAL)
6003 /* We cannot support GSO if the MSS is going to be less than
6004 * 64 bytes. If it is then we need to drop support for GSO.
6006 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
6007 features &= ~NETIF_F_GSO_MASK;
6009 len = skb_network_header(skb) - skb->data;
6010 if (len & ~(ICE_TXD_MACLEN_MAX))
6011 goto out_rm_features;
6013 len = skb_transport_header(skb) - skb_network_header(skb);
6014 if (len & ~(ICE_TXD_IPLEN_MAX))
6015 goto out_rm_features;
6017 if (skb->encapsulation) {
6018 len = skb_inner_network_header(skb) - skb_transport_header(skb);
6019 if (len & ~(ICE_TXD_L4LEN_MAX))
6020 goto out_rm_features;
6022 len = skb_inner_transport_header(skb) -
6023 skb_inner_network_header(skb);
6024 if (len & ~(ICE_TXD_IPLEN_MAX))
6025 goto out_rm_features;
6030 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6033 static const struct net_device_ops ice_netdev_ops = {
6034 .ndo_open = ice_open,
6035 .ndo_stop = ice_stop,
6036 .ndo_start_xmit = ice_start_xmit,
6037 .ndo_features_check = ice_features_check,
6038 .ndo_set_rx_mode = ice_set_rx_mode,
6039 .ndo_set_mac_address = ice_set_mac_address,
6040 .ndo_validate_addr = eth_validate_addr,
6041 .ndo_change_mtu = ice_change_mtu,
6042 .ndo_get_stats64 = ice_get_stats64,
6043 #ifdef CONFIG_NET_POLL_CONTROLLER
6044 .ndo_poll_controller = ice_netpoll,
6045 #endif /* CONFIG_NET_POLL_CONTROLLER */
6046 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6047 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6048 .ndo_set_features = ice_set_features,
6049 .ndo_bridge_getlink = ice_bridge_getlink,
6050 .ndo_bridge_setlink = ice_bridge_setlink,
6051 .ndo_fdb_add = ice_fdb_add,
6052 .ndo_fdb_del = ice_fdb_del,
6053 .ndo_tx_timeout = ice_tx_timeout,