1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
6 #include "ice_dcb_lib.h"
9 * ice_setup_rx_ctx - Configure a receive ring context
10 * @ring: The Rx ring to configure
12 * Configure the Rx descriptor ring in RLAN context.
14 static int ice_setup_rx_ctx(struct ice_ring *ring)
16 struct ice_vsi *vsi = ring->vsi;
17 struct ice_hw *hw = &vsi->back->hw;
18 u32 rxdid = ICE_RXDID_FLEX_NIC;
19 struct ice_rlan_ctx rlan_ctx;
24 /* what is Rx queue number in global space of 2K Rx queues */
25 pf_q = vsi->rxq_map[ring->q_index];
27 /* clear the context structure first */
28 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
30 rlan_ctx.base = ring->dma >> 7;
32 rlan_ctx.qlen = ring->count;
34 /* Receive Packet Data Buffer Size.
35 * The Packet Data Buffer Size is defined in 128 byte units.
37 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
39 /* use 32 byte descriptors */
42 /* Strip the Ethernet CRC bytes before the packet is posted to host
45 rlan_ctx.crcstrip = 1;
47 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
50 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
51 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
52 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
54 /* This controls whether VLAN is stripped from inner headers
55 * The VLAN in the inner L2 header is stripped to the receive
56 * descriptor if enabled by this flag.
60 /* Max packet size for this queue - must not be set to a larger value
63 rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
64 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
66 /* Rx queue threshold in units of 64 */
67 rlan_ctx.lrxqthresh = 1;
69 /* Enable Flexible Descriptors in the queue context which
70 * allows this driver to select a specific receive descriptor format
72 if (vsi->type != ICE_VSI_VF) {
73 regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
74 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
75 QRXFLXP_CNTXT_RXDID_IDX_M;
77 /* increasing context priority to pick up profile ID;
78 * default is 0x01; setting to 0x03 to ensure profile
79 * is programming if prev context is of same priority
81 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
82 QRXFLXP_CNTXT_RXDID_PRIO_M;
84 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
87 /* Absolute queue number out of 2K needs to be passed */
88 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
90 dev_err(&vsi->back->pdev->dev,
91 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
96 if (vsi->type == ICE_VSI_VF)
99 /* init queue specific tail register */
100 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
101 writel(0, ring->tail);
102 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
108 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
109 * @ring: The Tx ring to configure
110 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
111 * @pf_q: queue index in the PF space
113 * Configure the Tx descriptor ring in TLAN context.
116 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
118 struct ice_vsi *vsi = ring->vsi;
119 struct ice_hw *hw = &vsi->back->hw;
121 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
123 tlan_ctx->port_num = vsi->port_info->lport;
125 /* Transmit Queue Length */
126 tlan_ctx->qlen = ring->count;
128 ice_set_cgd_num(tlan_ctx, ring);
131 tlan_ctx->pf_num = hw->pf_id;
133 /* queue belongs to a specific VSI type
134 * VF / VM index should be programmed per vmvf_type setting:
135 * for vmvf_type = VF, it is VF number between 0-256
136 * for vmvf_type = VM, it is VM number between 0-767
137 * for PF or EMP this field should be set to zero
143 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
146 /* Firmware expects vmvf_num to be absolute VF ID */
147 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
148 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
154 /* make sure the context is associated with the right VSI */
155 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
157 tlan_ctx->tso_ena = ICE_TX_LEGACY;
158 tlan_ctx->tso_qnum = pf_q;
160 /* Legacy or Advanced Host Interface:
161 * 0: Advanced Host Interface
162 * 1: Legacy Host Interface
164 tlan_ctx->legacy_int = ICE_TX_LEGACY;
168 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
169 * @pf: the PF being configured
170 * @pf_q: the PF queue
171 * @ena: enable or disable state of the queue
173 * This routine will wait for the given Rx queue of the PF to reach the
174 * enabled or disabled state.
175 * Returns -ETIMEDOUT in case of failing to reach the requested state after
176 * multiple retries; else will return 0 in case of success.
178 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
182 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
183 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
184 QRX_CTRL_QENA_STAT_M))
187 usleep_range(20, 40);
194 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
195 * @vsi: the VSI being configured
196 * @ena: start or stop the Rx rings
198 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
200 struct ice_pf *pf = vsi->back;
201 struct ice_hw *hw = &pf->hw;
204 for (i = 0; i < vsi->num_rxq; i++) {
205 int pf_q = vsi->rxq_map[i];
208 rx_reg = rd32(hw, QRX_CTRL(pf_q));
210 /* Skip if the queue is already in the requested state */
211 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
214 /* turn on/off the queue */
216 rx_reg |= QRX_CTRL_QENA_REQ_M;
218 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
219 wr32(hw, QRX_CTRL(pf_q), rx_reg);
221 /* wait for the change to finish */
222 ret = ice_pf_rxq_wait(pf, pf_q, ena);
224 dev_err(&pf->pdev->dev,
225 "VSI idx %d Rx ring %d %sable timeout\n",
226 vsi->idx, pf_q, (ena ? "en" : "dis"));
235 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
238 * On error: returns error code (negative)
239 * On success: returns 0
241 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
243 struct ice_pf *pf = vsi->back;
245 /* allocate memory for both Tx and Rx ring pointers */
246 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
247 sizeof(*vsi->tx_rings), GFP_KERNEL);
251 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
252 sizeof(*vsi->rx_rings), GFP_KERNEL);
256 /* There is no need to allocate q_vectors for a loopback VSI. */
257 if (vsi->type == ICE_VSI_LB)
260 /* allocate memory for q_vector pointers */
261 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
262 sizeof(*vsi->q_vectors), GFP_KERNEL);
269 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
271 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
277 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
278 * @vsi: the VSI being configured
280 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
286 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
287 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
290 dev_dbg(&vsi->back->pdev->dev,
291 "Not setting number of Tx/Rx descriptors for VSI type %d\n",
298 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
299 * @vsi: the VSI being configured
300 * @vf_id: ID of the VF being configured
302 * Return 0 on success and a negative value on error
304 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
306 struct ice_pf *pf = vsi->back;
307 struct ice_vf *vf = NULL;
309 if (vsi->type == ICE_VSI_VF)
314 vsi->alloc_txq = pf->num_lan_tx;
315 vsi->alloc_rxq = pf->num_lan_rx;
316 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
319 vf = &pf->vf[vsi->vf_id];
320 vsi->alloc_txq = vf->num_vf_qs;
321 vsi->alloc_rxq = vf->num_vf_qs;
322 /* pf->num_vf_msix includes (VF miscellaneous vector +
323 * data queue interrupts). Since vsi->num_q_vectors is number
324 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
325 * original vector count
327 vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
334 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
338 ice_vsi_set_num_desc(vsi);
342 * ice_get_free_slot - get the next non-NULL location index in array
343 * @array: array to search
344 * @size: size of the array
345 * @curr: last known occupied index to be used as a search hint
347 * void * is being used to keep the functionality generic. This lets us use this
348 * function on any array of pointers.
350 static int ice_get_free_slot(void *array, int size, int curr)
352 int **tmp_array = (int **)array;
355 if (curr < (size - 1) && !tmp_array[curr + 1]) {
360 while ((i < size) && (tmp_array[i]))
371 * ice_vsi_delete - delete a VSI from the switch
372 * @vsi: pointer to VSI being removed
374 void ice_vsi_delete(struct ice_vsi *vsi)
376 struct ice_pf *pf = vsi->back;
377 struct ice_vsi_ctx *ctxt;
378 enum ice_status status;
380 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
384 if (vsi->type == ICE_VSI_VF)
385 ctxt->vf_num = vsi->vf_id;
386 ctxt->vsi_num = vsi->vsi_num;
388 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
390 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
392 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
395 devm_kfree(&pf->pdev->dev, ctxt);
399 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
400 * @vsi: pointer to VSI being cleared
402 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
404 struct ice_pf *pf = vsi->back;
406 /* free the ring and vector containers */
407 if (vsi->q_vectors) {
408 devm_kfree(&pf->pdev->dev, vsi->q_vectors);
409 vsi->q_vectors = NULL;
412 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
413 vsi->tx_rings = NULL;
416 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
417 vsi->rx_rings = NULL;
422 * ice_vsi_clear - clean up and deallocate the provided VSI
423 * @vsi: pointer to VSI being cleared
425 * This deallocates the VSI's queue resources, removes it from the PF's
426 * VSI array if necessary, and deallocates the VSI
428 * Returns 0 on success, negative on failure
430 int ice_vsi_clear(struct ice_vsi *vsi)
432 struct ice_pf *pf = NULL;
442 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
443 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
448 mutex_lock(&pf->sw_mutex);
449 /* updates the PF for this cleared VSI */
451 pf->vsi[vsi->idx] = NULL;
452 if (vsi->idx < pf->next_vsi)
453 pf->next_vsi = vsi->idx;
455 ice_vsi_free_arrays(vsi);
456 mutex_unlock(&pf->sw_mutex);
457 devm_kfree(&pf->pdev->dev, vsi);
463 * ice_msix_clean_rings - MSIX mode Interrupt Handler
464 * @irq: interrupt number
465 * @data: pointer to a q_vector
467 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
469 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
471 if (!q_vector->tx.ring && !q_vector->rx.ring)
474 napi_schedule(&q_vector->napi);
480 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
481 * @pf: board private structure
483 * @vf_id: ID of the VF being configured
485 * returns a pointer to a VSI on success, NULL on failure.
487 static struct ice_vsi *
488 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
490 struct ice_vsi *vsi = NULL;
492 /* Need to protect the allocation of the VSIs at the PF level */
493 mutex_lock(&pf->sw_mutex);
495 /* If we have already allocated our maximum number of VSIs,
496 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
497 * is available to be populated
499 if (pf->next_vsi == ICE_NO_VSI) {
500 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
504 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
510 set_bit(__ICE_DOWN, vsi->state);
511 vsi->idx = pf->next_vsi;
512 vsi->work_lmt = ICE_DFLT_IRQ_WORK;
514 if (type == ICE_VSI_VF)
515 ice_vsi_set_num_qs(vsi, vf_id);
517 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
521 if (ice_vsi_alloc_arrays(vsi))
524 /* Setup default MSIX irq handler for VSI */
525 vsi->irq_handler = ice_msix_clean_rings;
528 if (ice_vsi_alloc_arrays(vsi))
532 if (ice_vsi_alloc_arrays(vsi))
536 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
540 /* fill VSI slot in the PF struct */
541 pf->vsi[pf->next_vsi] = vsi;
543 /* prepare pf->next_vsi for next use */
544 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
549 devm_kfree(&pf->pdev->dev, vsi);
552 mutex_unlock(&pf->sw_mutex);
557 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
558 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
560 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
562 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
566 mutex_lock(qs_cfg->qs_mutex);
567 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
568 0, qs_cfg->q_count, 0);
569 if (offset >= qs_cfg->pf_map_size) {
570 mutex_unlock(qs_cfg->qs_mutex);
574 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
575 for (i = 0; i < qs_cfg->q_count; i++)
576 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
577 mutex_unlock(qs_cfg->qs_mutex);
583 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
584 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
586 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
588 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
592 mutex_lock(qs_cfg->qs_mutex);
593 for (i = 0; i < qs_cfg->q_count; i++) {
594 index = find_next_zero_bit(qs_cfg->pf_map,
595 qs_cfg->pf_map_size, index);
596 if (index >= qs_cfg->pf_map_size)
598 set_bit(index, qs_cfg->pf_map);
599 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
601 mutex_unlock(qs_cfg->qs_mutex);
605 for (index = 0; index < i; index++) {
606 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
607 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
609 mutex_unlock(qs_cfg->qs_mutex);
615 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
616 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
618 * This function first tries to find contiguous space. If it is not successful,
619 * it tries with the scatter approach.
621 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
623 static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
627 ret = __ice_vsi_get_qs_contig(qs_cfg);
629 /* contig failed, so try with scatter approach */
630 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
631 qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
632 qs_cfg->scatter_count);
633 ret = __ice_vsi_get_qs_sc(qs_cfg);
639 * ice_vsi_get_qs - Assign queues from PF to VSI
640 * @vsi: the VSI to assign queues to
642 * Returns 0 on success and a negative value on error
644 static int ice_vsi_get_qs(struct ice_vsi *vsi)
646 struct ice_pf *pf = vsi->back;
647 struct ice_qs_cfg tx_qs_cfg = {
648 .qs_mutex = &pf->avail_q_mutex,
649 .pf_map = pf->avail_txqs,
650 .pf_map_size = ICE_MAX_TXQS,
651 .q_count = vsi->alloc_txq,
652 .scatter_count = ICE_MAX_SCATTER_TXQS,
653 .vsi_map = vsi->txq_map,
655 .mapping_mode = vsi->tx_mapping_mode
657 struct ice_qs_cfg rx_qs_cfg = {
658 .qs_mutex = &pf->avail_q_mutex,
659 .pf_map = pf->avail_rxqs,
660 .pf_map_size = ICE_MAX_RXQS,
661 .q_count = vsi->alloc_rxq,
662 .scatter_count = ICE_MAX_SCATTER_RXQS,
663 .vsi_map = vsi->rxq_map,
665 .mapping_mode = vsi->rx_mapping_mode
669 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
670 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
672 ret = __ice_vsi_get_qs(&tx_qs_cfg);
674 ret = __ice_vsi_get_qs(&rx_qs_cfg);
680 * ice_vsi_put_qs - Release queues from VSI to PF
681 * @vsi: the VSI that is going to release queues
683 void ice_vsi_put_qs(struct ice_vsi *vsi)
685 struct ice_pf *pf = vsi->back;
688 mutex_lock(&pf->avail_q_mutex);
690 for (i = 0; i < vsi->alloc_txq; i++) {
691 clear_bit(vsi->txq_map[i], pf->avail_txqs);
692 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
695 for (i = 0; i < vsi->alloc_rxq; i++) {
696 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
697 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
700 mutex_unlock(&pf->avail_q_mutex);
704 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
705 * @vsi: the VSI being removed
707 static void ice_rss_clean(struct ice_vsi *vsi)
713 if (vsi->rss_hkey_user)
714 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
715 if (vsi->rss_lut_user)
716 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
720 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
721 * @vsi: the VSI being configured
723 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
725 struct ice_hw_common_caps *cap;
726 struct ice_pf *pf = vsi->back;
728 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
733 cap = &pf->hw.func_caps.common_cap;
736 /* PF VSI will inherit RSS instance of PF */
737 vsi->rss_table_size = cap->rss_table_size;
738 vsi->rss_size = min_t(int, num_online_cpus(),
739 BIT(cap->rss_table_entry_width));
740 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
743 /* VF VSI will gets a small RSS table
744 * For VSI_LUT, LUT size should be set to 64 bytes
746 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
747 vsi->rss_size = min_t(int, num_online_cpus(),
748 BIT(cap->rss_table_entry_width));
749 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
754 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
761 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
762 * @ctxt: the VSI context being set
764 * This initializes a default VSI context for all sections except the Queues.
766 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
770 memset(&ctxt->info, 0, sizeof(ctxt->info));
771 /* VSI's should be allocated from shared pool */
772 ctxt->alloc_from_pool = true;
773 /* Src pruning enabled by default */
774 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
775 /* Traffic from VSI can be sent to LAN */
776 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
777 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
778 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
779 * packets untagged/tagged.
781 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
782 ICE_AQ_VSI_VLAN_MODE_M) >>
783 ICE_AQ_VSI_VLAN_MODE_S);
784 /* Have 1:1 UP mapping for both ingress/egress tables */
785 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
786 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
787 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
788 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
789 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
790 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
791 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
792 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
793 ctxt->info.ingress_table = cpu_to_le32(table);
794 ctxt->info.egress_table = cpu_to_le32(table);
795 /* Have 1:1 UP mapping for outer to inner UP table */
796 ctxt->info.outer_up_table = cpu_to_le32(table);
797 /* No Outer tag support outer_tag_flags remains to zero */
801 * ice_vsi_setup_q_map - Setup a VSI queue map
802 * @vsi: the VSI being configured
803 * @ctxt: VSI context structure
805 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
807 u16 offset = 0, qmap = 0, tx_count = 0;
808 u16 qcount_tx = vsi->alloc_txq;
809 u16 qcount_rx = vsi->alloc_rxq;
810 u16 tx_numq_tc, rx_numq_tc;
811 u16 pow = 0, max_rss = 0;
812 bool ena_tc0 = false;
816 /* at least TC0 should be enabled by default */
817 if (vsi->tc_cfg.numtc) {
818 if (!(vsi->tc_cfg.ena_tc & BIT(0)))
826 vsi->tc_cfg.ena_tc |= 1;
829 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
832 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
836 /* TC mapping is a function of the number of Rx queues assigned to the
837 * VSI for each traffic class and the offset of these queues.
838 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
839 * queues allocated to TC0. No:of queues is a power-of-2.
841 * If TC is not enabled, the queue offset is set to 0, and allocate one
842 * queue, this way, traffic for the given TC will be sent to the default
845 * Setup number and offset of Rx queues for all TCs for the VSI
848 qcount_rx = rx_numq_tc;
850 /* qcount will change if RSS is enabled */
851 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
852 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
853 if (vsi->type == ICE_VSI_PF)
854 max_rss = ICE_MAX_LG_RSS_QS;
856 max_rss = ICE_MAX_SMALL_RSS_QS;
857 qcount_rx = min_t(int, rx_numq_tc, max_rss);
858 qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
862 /* find the (rounded up) power-of-2 of qcount */
863 pow = order_base_2(qcount_rx);
865 ice_for_each_traffic_class(i) {
866 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
867 /* TC is not enabled */
868 vsi->tc_cfg.tc_info[i].qoffset = 0;
869 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
870 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
871 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
872 ctxt->info.tc_mapping[i] = 0;
877 vsi->tc_cfg.tc_info[i].qoffset = offset;
878 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
879 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
880 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
882 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
883 ICE_AQ_VSI_TC_Q_OFFSET_M) |
884 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
885 ICE_AQ_VSI_TC_Q_NUM_M);
887 tx_count += tx_numq_tc;
888 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
891 /* if offset is non-zero, means it is calculated correctly based on
892 * enabled TCs for a given VSI otherwise qcount_rx will always
893 * be correct and non-zero because it is based off - VSI's
894 * allocated Rx queues which is at least 1 (hence qcount_tx will be
898 vsi->num_rxq = offset;
900 vsi->num_rxq = qcount_rx;
902 vsi->num_txq = tx_count;
904 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
905 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
906 /* since there is a chance that num_rxq could have been changed
907 * in the above for loop, make num_txq equal to num_rxq.
909 vsi->num_txq = vsi->num_rxq;
912 /* Rx queue mapping */
913 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
914 /* q_mapping buffer holds the info for the first queue allocated for
915 * this VSI in the PF space and also the number of queues associated
918 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
919 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
923 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
924 * @ctxt: the VSI context being set
925 * @vsi: the VSI being configured
927 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
929 u8 lut_type, hash_type;
936 /* PF VSI will inherit RSS instance of PF */
937 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
938 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
941 /* VF VSI will gets a small RSS table which is a VSI LUT type */
942 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
943 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
946 dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
949 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
953 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
954 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
955 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
956 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
960 * ice_vsi_init - Create and initialize a VSI
961 * @vsi: the VSI being configured
963 * This initializes a VSI context depending on the VSI type to be added and
964 * passes it down to the add_vsi aq command to create a new VSI.
966 static int ice_vsi_init(struct ice_vsi *vsi)
968 struct ice_pf *pf = vsi->back;
969 struct ice_hw *hw = &pf->hw;
970 struct ice_vsi_ctx *ctxt;
973 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
977 ctxt->info = vsi->info;
982 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
985 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
986 /* VF number here is the absolute VF number (0-255) */
987 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
993 ice_set_dflt_vsi_ctx(ctxt);
994 /* if the switch is in VEB mode, allow VSI loopback */
995 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
996 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
998 /* Set LUT type and HASH type if RSS is enabled */
999 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1000 ice_set_rss_vsi_ctx(ctxt, vsi);
1002 ctxt->info.sw_id = vsi->port_info->sw_id;
1003 ice_vsi_setup_q_map(vsi, ctxt);
1005 /* Enable MAC Antispoof with new VSI being initialized or updated */
1006 if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
1007 ctxt->info.valid_sections |=
1008 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1009 ctxt->info.sec_flags |=
1010 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1013 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1015 dev_err(&pf->pdev->dev,
1016 "Add VSI failed, err %d\n", ret);
1020 /* keep context for update VSI operations */
1021 vsi->info = ctxt->info;
1023 /* record VSI number returned */
1024 vsi->vsi_num = ctxt->vsi_num;
1026 devm_kfree(&pf->pdev->dev, ctxt);
1031 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
1032 * @vsi: VSI having the memory freed
1033 * @v_idx: index of the vector to be freed
1035 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
1037 struct ice_q_vector *q_vector;
1038 struct ice_pf *pf = vsi->back;
1039 struct ice_ring *ring;
1041 if (!vsi->q_vectors[v_idx]) {
1042 dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
1046 q_vector = vsi->q_vectors[v_idx];
1048 ice_for_each_ring(ring, q_vector->tx)
1049 ring->q_vector = NULL;
1050 ice_for_each_ring(ring, q_vector->rx)
1051 ring->q_vector = NULL;
1053 /* only VSI with an associated netdev is set up with NAPI */
1055 netif_napi_del(&q_vector->napi);
1057 devm_kfree(&pf->pdev->dev, q_vector);
1058 vsi->q_vectors[v_idx] = NULL;
1062 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
1063 * @vsi: the VSI having memory freed
1065 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
1069 ice_for_each_q_vector(vsi, v_idx)
1070 ice_free_q_vector(vsi, v_idx);
1074 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
1075 * @vsi: the VSI being configured
1076 * @v_idx: index of the vector in the VSI struct
1078 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1080 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
1082 struct ice_pf *pf = vsi->back;
1083 struct ice_q_vector *q_vector;
1085 /* allocate q_vector */
1086 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
1090 q_vector->vsi = vsi;
1091 q_vector->v_idx = v_idx;
1092 if (vsi->type == ICE_VSI_VF)
1094 /* only set affinity_mask if the CPU is online */
1095 if (cpu_online(v_idx))
1096 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
1098 /* This will not be called in the driver load path because the netdev
1099 * will not be created yet. All other cases with register the NAPI
1100 * handler here (i.e. resume, reset/rebuild, etc.)
1103 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
1107 /* tie q_vector and VSI together */
1108 vsi->q_vectors[v_idx] = q_vector;
1114 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
1115 * @vsi: the VSI being configured
1117 * We allocate one q_vector per queue interrupt. If allocation fails we
1120 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
1122 struct ice_pf *pf = vsi->back;
1123 int v_idx = 0, num_q_vectors;
1126 if (vsi->q_vectors[0]) {
1127 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
1132 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1133 num_q_vectors = vsi->num_q_vectors;
1139 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
1140 err = ice_vsi_alloc_q_vector(vsi, v_idx);
1149 ice_free_q_vector(vsi, v_idx);
1151 dev_err(&pf->pdev->dev,
1152 "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
1153 vsi->num_q_vectors, vsi->vsi_num, err);
1154 vsi->num_q_vectors = 0;
1159 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
1160 * @vsi: ptr to the VSI
1162 * This should only be called after ice_vsi_alloc() which allocates the
1163 * corresponding SW VSI structure and initializes num_queue_pairs for the
1164 * newly allocated VSI.
1166 * Returns 0 on success or negative on failure
1168 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1170 struct ice_pf *pf = vsi->back;
1173 /* SRIOV doesn't grab irq_tracker entries for each VSI */
1174 if (vsi->type == ICE_VSI_VF)
1177 if (vsi->base_vector) {
1178 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
1179 vsi->vsi_num, vsi->base_vector);
1183 if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1186 num_q_vectors = vsi->num_q_vectors;
1187 /* reserve slots from OS requested IRQs */
1188 vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
1190 if (vsi->base_vector < 0) {
1191 dev_err(&pf->pdev->dev,
1192 "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
1193 num_q_vectors, vsi->vsi_num, vsi->base_vector);
1196 pf->num_avail_sw_msix -= num_q_vectors;
1202 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1203 * @vsi: the VSI having rings deallocated
1205 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1209 if (vsi->tx_rings) {
1210 for (i = 0; i < vsi->alloc_txq; i++) {
1211 if (vsi->tx_rings[i]) {
1212 kfree_rcu(vsi->tx_rings[i], rcu);
1213 vsi->tx_rings[i] = NULL;
1217 if (vsi->rx_rings) {
1218 for (i = 0; i < vsi->alloc_rxq; i++) {
1219 if (vsi->rx_rings[i]) {
1220 kfree_rcu(vsi->rx_rings[i], rcu);
1221 vsi->rx_rings[i] = NULL;
1228 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1229 * @vsi: VSI which is having rings allocated
1231 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1233 struct ice_pf *pf = vsi->back;
1236 /* Allocate Tx rings */
1237 for (i = 0; i < vsi->alloc_txq; i++) {
1238 struct ice_ring *ring;
1240 /* allocate with kzalloc(), free with kfree_rcu() */
1241 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1247 ring->reg_idx = vsi->txq_map[i];
1248 ring->ring_active = false;
1250 ring->dev = &pf->pdev->dev;
1251 ring->count = vsi->num_tx_desc;
1252 vsi->tx_rings[i] = ring;
1255 /* Allocate Rx rings */
1256 for (i = 0; i < vsi->alloc_rxq; i++) {
1257 struct ice_ring *ring;
1259 /* allocate with kzalloc(), free with kfree_rcu() */
1260 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1265 ring->reg_idx = vsi->rxq_map[i];
1266 ring->ring_active = false;
1268 ring->netdev = vsi->netdev;
1269 ring->dev = &pf->pdev->dev;
1270 ring->count = vsi->num_rx_desc;
1271 vsi->rx_rings[i] = ring;
1277 ice_vsi_clear_rings(vsi);
1282 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
1283 * @vsi: the VSI being configured
1285 * This function maps descriptor rings to the queue-specific vectors allotted
1286 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
1287 * and Rx rings to the vector as "efficiently" as possible.
1290 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1292 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1293 #endif /* CONFIG_DCB */
1295 int q_vectors = vsi->num_q_vectors;
1296 int tx_rings_rem, rx_rings_rem;
1299 /* initially assigning remaining rings count to VSIs num queue value */
1300 tx_rings_rem = vsi->num_txq;
1301 rx_rings_rem = vsi->num_rxq;
1303 for (v_id = 0; v_id < q_vectors; v_id++) {
1304 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
1305 int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
1307 /* Tx rings mapping to vector */
1308 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
1309 q_vector->num_ring_tx = tx_rings_per_v;
1310 q_vector->tx.ring = NULL;
1311 q_vector->tx.itr_idx = ICE_TX_ITR;
1312 q_base = vsi->num_txq - tx_rings_rem;
1314 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
1315 struct ice_ring *tx_ring = vsi->tx_rings[q_id];
1317 tx_ring->q_vector = q_vector;
1318 tx_ring->next = q_vector->tx.ring;
1319 q_vector->tx.ring = tx_ring;
1321 tx_rings_rem -= tx_rings_per_v;
1323 /* Rx rings mapping to vector */
1324 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
1325 q_vector->num_ring_rx = rx_rings_per_v;
1326 q_vector->rx.ring = NULL;
1327 q_vector->rx.itr_idx = ICE_RX_ITR;
1328 q_base = vsi->num_rxq - rx_rings_rem;
1330 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
1331 struct ice_ring *rx_ring = vsi->rx_rings[q_id];
1333 rx_ring->q_vector = q_vector;
1334 rx_ring->next = q_vector->rx.ring;
1335 q_vector->rx.ring = rx_ring;
1337 rx_rings_rem -= rx_rings_per_v;
1342 * ice_vsi_manage_rss_lut - disable/enable RSS
1343 * @vsi: the VSI being changed
1344 * @ena: boolean value indicating if this is an enable or disable request
1346 * In the event of disable request for RSS, this function will zero out RSS
1347 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1350 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1355 lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
1361 if (vsi->rss_lut_user)
1362 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1364 ice_fill_rss_lut(lut, vsi->rss_table_size,
1368 err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
1369 devm_kfree(&vsi->back->pdev->dev, lut);
1374 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1375 * @vsi: VSI to be configured
1377 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1379 struct ice_aqc_get_set_rss_keys *key;
1380 struct ice_pf *pf = vsi->back;
1381 enum ice_status status;
1385 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
1387 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
1391 if (vsi->rss_lut_user)
1392 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1394 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1396 status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
1397 vsi->rss_table_size);
1400 dev_err(&pf->pdev->dev,
1401 "set_rss_lut failed, error %d\n", status);
1403 goto ice_vsi_cfg_rss_exit;
1406 key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
1409 goto ice_vsi_cfg_rss_exit;
1412 if (vsi->rss_hkey_user)
1414 (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
1415 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1417 netdev_rss_key_fill((void *)key,
1418 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1420 status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1423 dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
1428 devm_kfree(&pf->pdev->dev, key);
1429 ice_vsi_cfg_rss_exit:
1430 devm_kfree(&pf->pdev->dev, lut);
1435 * ice_add_mac_to_list - Add a MAC address filter entry to the list
1436 * @vsi: the VSI to be forwarded to
1437 * @add_list: pointer to the list which contains MAC filter entries
1438 * @macaddr: the MAC address to be added.
1440 * Adds MAC address filter entry to the temp list
1442 * Returns 0 on success or ENOMEM on failure.
1444 int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
1447 struct ice_fltr_list_entry *tmp;
1448 struct ice_pf *pf = vsi->back;
1450 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
1454 tmp->fltr_info.flag = ICE_FLTR_TX;
1455 tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1456 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
1457 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1458 tmp->fltr_info.vsi_handle = vsi->idx;
1459 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
1461 INIT_LIST_HEAD(&tmp->list_entry);
1462 list_add(&tmp->list_entry, add_list);
1468 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1469 * @vsi: the VSI to be updated
1471 void ice_update_eth_stats(struct ice_vsi *vsi)
1473 struct ice_eth_stats *prev_es, *cur_es;
1474 struct ice_hw *hw = &vsi->back->hw;
1475 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1477 prev_es = &vsi->eth_stats_prev;
1478 cur_es = &vsi->eth_stats;
1480 ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
1481 vsi->stat_offsets_loaded, &prev_es->rx_bytes,
1484 ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
1485 vsi->stat_offsets_loaded, &prev_es->rx_unicast,
1486 &cur_es->rx_unicast);
1488 ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
1489 vsi->stat_offsets_loaded, &prev_es->rx_multicast,
1490 &cur_es->rx_multicast);
1492 ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
1493 vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
1494 &cur_es->rx_broadcast);
1496 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1497 &prev_es->rx_discards, &cur_es->rx_discards);
1499 ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
1500 vsi->stat_offsets_loaded, &prev_es->tx_bytes,
1503 ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
1504 vsi->stat_offsets_loaded, &prev_es->tx_unicast,
1505 &cur_es->tx_unicast);
1507 ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
1508 vsi->stat_offsets_loaded, &prev_es->tx_multicast,
1509 &cur_es->tx_multicast);
1511 ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
1512 vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
1513 &cur_es->tx_broadcast);
1515 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1516 &prev_es->tx_errors, &cur_es->tx_errors);
1518 vsi->stat_offsets_loaded = true;
1522 * ice_free_fltr_list - free filter lists helper
1523 * @dev: pointer to the device struct
1524 * @h: pointer to the list head to be freed
1526 * Helper function to free filter lists previously created using
1527 * ice_add_mac_to_list
1529 void ice_free_fltr_list(struct device *dev, struct list_head *h)
1531 struct ice_fltr_list_entry *e, *tmp;
1533 list_for_each_entry_safe(e, tmp, h, list_entry) {
1534 list_del(&e->list_entry);
1540 * ice_vsi_add_vlan - Add VSI membership for given VLAN
1541 * @vsi: the VSI being configured
1542 * @vid: VLAN ID to be added
1544 int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
1546 struct ice_fltr_list_entry *tmp;
1547 struct ice_pf *pf = vsi->back;
1548 LIST_HEAD(tmp_add_list);
1549 enum ice_status status;
1552 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
1556 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1557 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1558 tmp->fltr_info.flag = ICE_FLTR_TX;
1559 tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1560 tmp->fltr_info.vsi_handle = vsi->idx;
1561 tmp->fltr_info.l_data.vlan.vlan_id = vid;
1563 INIT_LIST_HEAD(&tmp->list_entry);
1564 list_add(&tmp->list_entry, &tmp_add_list);
1566 status = ice_add_vlan(&pf->hw, &tmp_add_list);
1569 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
1573 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1578 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
1579 * @vsi: the VSI being configured
1580 * @vid: VLAN ID to be removed
1582 * Returns 0 on success and negative on failure
1584 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
1586 struct ice_fltr_list_entry *list;
1587 struct ice_pf *pf = vsi->back;
1588 LIST_HEAD(tmp_add_list);
1589 enum ice_status status;
1592 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
1596 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1597 list->fltr_info.vsi_handle = vsi->idx;
1598 list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1599 list->fltr_info.l_data.vlan.vlan_id = vid;
1600 list->fltr_info.flag = ICE_FLTR_TX;
1601 list->fltr_info.src_id = ICE_SRC_ID_VSI;
1603 INIT_LIST_HEAD(&list->list_entry);
1604 list_add(&list->list_entry, &tmp_add_list);
1606 status = ice_remove_vlan(&pf->hw, &tmp_add_list);
1607 if (status == ICE_ERR_DOES_NOT_EXIST) {
1608 dev_dbg(&pf->pdev->dev,
1609 "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
1610 vid, vsi->vsi_num, status);
1611 } else if (status) {
1612 dev_err(&pf->pdev->dev,
1613 "Error removing VLAN %d on vsi %i error: %d\n",
1614 vid, vsi->vsi_num, status);
1618 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1623 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1624 * @vsi: the VSI being configured
1626 * Return 0 on success and a negative value on error
1627 * Configure the Rx VSI for operation.
1629 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1633 if (vsi->type == ICE_VSI_VF)
1636 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
1637 vsi->max_frame = vsi->netdev->mtu +
1638 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1640 vsi->max_frame = ICE_RXBUF_2048;
1642 vsi->rx_buf_len = ICE_RXBUF_2048;
1644 /* set up individual rings */
1645 for (i = 0; i < vsi->num_rxq; i++) {
1648 err = ice_setup_rx_ctx(vsi->rx_rings[i]);
1650 dev_err(&vsi->back->pdev->dev,
1651 "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
1661 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1662 * @vsi: the VSI being configured
1663 * @rings: Tx ring array to be configured
1664 * @offset: offset within vsi->txq_map
1666 * Return 0 on success and a negative value on error
1667 * Configure the Tx VSI for operation.
1670 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
1672 struct ice_aqc_add_tx_qgrp *qg_buf;
1673 struct ice_aqc_add_txqs_perq *txq;
1674 struct ice_pf *pf = vsi->back;
1675 u8 num_q_grps, q_idx = 0;
1676 enum ice_status status;
1677 u16 buf_len, i, pf_q;
1680 buf_len = sizeof(*qg_buf);
1681 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
1685 qg_buf->num_txqs = 1;
1688 /* set up and configure the Tx queues for each enabled TC */
1689 ice_for_each_traffic_class(tc) {
1690 if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
1693 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
1694 struct ice_tlan_ctx tlan_ctx = { 0 };
1696 pf_q = vsi->txq_map[q_idx + offset];
1697 ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
1698 /* copy context contents into the qg_buf */
1699 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1700 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
1703 /* init queue specific tail reg. It is referred as
1704 * transmit comm scheduler queue doorbell.
1706 rings[q_idx]->tail =
1707 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
1708 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
1709 i, num_q_grps, qg_buf,
1712 dev_err(&pf->pdev->dev,
1713 "Failed to set LAN Tx queue context, error: %d\n",
1719 /* Add Tx Queue TEID into the VSI Tx ring from the
1720 * response. This will complete configuring and
1721 * enabling the queue.
1723 txq = &qg_buf->txqs[0];
1724 if (pf_q == le16_to_cpu(txq->txq_id))
1725 rings[q_idx]->txq_teid =
1726 le32_to_cpu(txq->q_teid);
1732 devm_kfree(&pf->pdev->dev, qg_buf);
1737 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1738 * @vsi: the VSI being configured
1740 * Return 0 on success and a negative value on error
1741 * Configure the Tx VSI for operation.
1743 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1745 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
1749 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1750 * @intrl: interrupt rate limit in usecs
1751 * @gran: interrupt rate limit granularity in usecs
1753 * This function converts a decimal interrupt rate limit in usecs to the format
1754 * expected by firmware.
1756 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1758 u32 val = intrl / gran;
1761 return val | GLINT_RATE_INTRL_ENA_M;
1766 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
1767 * @hw: board specific structure
1769 static void ice_cfg_itr_gran(struct ice_hw *hw)
1771 u32 regval = rd32(hw, GLINT_CTL);
1773 /* no need to update global register if ITR gran is already set */
1774 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
1775 (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
1776 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
1777 (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
1778 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
1779 (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
1780 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
1781 (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
1782 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
1785 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
1786 GLINT_CTL_ITR_GRAN_200_M) |
1787 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
1788 GLINT_CTL_ITR_GRAN_100_M) |
1789 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
1790 GLINT_CTL_ITR_GRAN_50_M) |
1791 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
1792 GLINT_CTL_ITR_GRAN_25_M);
1793 wr32(hw, GLINT_CTL, regval);
1797 * ice_cfg_itr - configure the initial interrupt throttle values
1798 * @hw: pointer to the HW structure
1799 * @q_vector: interrupt vector that's being configured
1801 * Configure interrupt throttling values for the ring containers that are
1802 * associated with the interrupt vector passed in.
1805 ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
1807 ice_cfg_itr_gran(hw);
1809 if (q_vector->num_ring_rx) {
1810 struct ice_ring_container *rc = &q_vector->rx;
1812 /* if this value is set then don't overwrite with default */
1813 if (!rc->itr_setting)
1814 rc->itr_setting = ICE_DFLT_RX_ITR;
1816 rc->target_itr = ITR_TO_REG(rc->itr_setting);
1817 rc->next_update = jiffies + 1;
1818 rc->current_itr = rc->target_itr;
1819 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1820 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1823 if (q_vector->num_ring_tx) {
1824 struct ice_ring_container *rc = &q_vector->tx;
1826 /* if this value is set then don't overwrite with default */
1827 if (!rc->itr_setting)
1828 rc->itr_setting = ICE_DFLT_TX_ITR;
1830 rc->target_itr = ITR_TO_REG(rc->itr_setting);
1831 rc->next_update = jiffies + 1;
1832 rc->current_itr = rc->target_itr;
1833 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1834 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1839 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
1840 * @vsi: the VSI being configured
1841 * @txq: Tx queue being mapped to MSI-X vector
1842 * @msix_idx: MSI-X vector index within the function
1843 * @itr_idx: ITR index of the interrupt cause
1845 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
1846 * within the function space.
1848 #ifdef CONFIG_PCI_IOV
1850 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1853 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1854 #endif /* CONFIG_PCI_IOV */
1856 struct ice_pf *pf = vsi->back;
1857 struct ice_hw *hw = &pf->hw;
1860 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
1862 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
1863 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
1865 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1869 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
1870 * @vsi: the VSI being configured
1871 * @rxq: Rx queue being mapped to MSI-X vector
1872 * @msix_idx: MSI-X vector index within the function
1873 * @itr_idx: ITR index of the interrupt cause
1875 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
1876 * within the function space.
1878 #ifdef CONFIG_PCI_IOV
1880 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1883 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1884 #endif /* CONFIG_PCI_IOV */
1886 struct ice_pf *pf = vsi->back;
1887 struct ice_hw *hw = &pf->hw;
1890 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
1892 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
1893 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
1895 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1901 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1902 * @vsi: the VSI being configured
1904 * This configures MSIX mode interrupts for the PF VSI, and should not be used
1907 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1909 struct ice_pf *pf = vsi->back;
1910 struct ice_hw *hw = &pf->hw;
1911 u32 txq = 0, rxq = 0;
1914 for (i = 0; i < vsi->num_q_vectors; i++) {
1915 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1916 u16 reg_idx = q_vector->reg_idx;
1918 ice_cfg_itr(hw, q_vector);
1920 wr32(hw, GLINT_RATE(reg_idx),
1921 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1923 /* Both Transmit Queue Interrupt Cause Control register
1924 * and Receive Queue Interrupt Cause control register
1925 * expects MSIX_INDX field to be the vector index
1926 * within the function space and not the absolute
1927 * vector index across PF or across device.
1928 * For SR-IOV VF VSIs queue vector index always starts
1929 * with 1 since first vector index(0) is used for OICR
1930 * in VF space. Since VMDq and other PF VSIs are within
1931 * the PF function space, use the vector index that is
1932 * tracked for this PF.
1934 for (q = 0; q < q_vector->num_ring_tx; q++) {
1935 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1936 q_vector->tx.itr_idx);
1940 for (q = 0; q < q_vector->num_ring_rx; q++) {
1941 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1942 q_vector->rx.itr_idx);
1949 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
1950 * @vsi: the VSI being changed
1952 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
1954 struct device *dev = &vsi->back->pdev->dev;
1955 struct ice_hw *hw = &vsi->back->hw;
1956 struct ice_vsi_ctx *ctxt;
1957 enum ice_status status;
1960 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
1964 /* Here we are configuring the VSI to let the driver add VLAN tags by
1965 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
1966 * insertion happens in the Tx hot path, in ice_tx_map.
1968 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1970 /* Preserve existing VLAN strip setting */
1971 ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
1972 ICE_AQ_VSI_VLAN_EMOD_M);
1974 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1976 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1978 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
1979 status, hw->adminq.sq_last_status);
1984 vsi->info.vlan_flags = ctxt->info.vlan_flags;
1986 devm_kfree(dev, ctxt);
1991 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
1992 * @vsi: the VSI being changed
1993 * @ena: boolean value indicating if this is a enable or disable request
1995 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
1997 struct device *dev = &vsi->back->pdev->dev;
1998 struct ice_hw *hw = &vsi->back->hw;
1999 struct ice_vsi_ctx *ctxt;
2000 enum ice_status status;
2003 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2007 /* Here we are configuring what the VSI should do with the VLAN tag in
2008 * the Rx packet. We can either leave the tag in the packet or put it in
2009 * the Rx descriptor.
2012 /* Strip VLAN tag from Rx packet and put it in the desc */
2013 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2015 /* Disable stripping. Leave tag in packet */
2016 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2018 /* Allow all packets untagged/tagged */
2019 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
2021 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
2023 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2025 dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
2026 ena, status, hw->adminq.sq_last_status);
2031 vsi->info.vlan_flags = ctxt->info.vlan_flags;
2033 devm_kfree(dev, ctxt);
2038 * ice_vsi_start_rx_rings - start VSI's Rx rings
2039 * @vsi: the VSI whose rings are to be started
2041 * Returns 0 on success and a negative value on error
2043 int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
2045 return ice_vsi_ctrl_rx_rings(vsi, true);
2049 * ice_vsi_stop_rx_rings - stop VSI's Rx rings
2052 * Returns 0 on success and a negative value on error
2054 int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
2056 return ice_vsi_ctrl_rx_rings(vsi, false);
2060 * ice_trigger_sw_intr - trigger a software interrupt
2061 * @hw: pointer to the HW structure
2062 * @q_vector: interrupt vector to trigger the software interrupt for
2064 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
2066 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
2067 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
2068 GLINT_DYN_CTL_SWINT_TRIG_M |
2069 GLINT_DYN_CTL_INTENA_M);
2073 * ice_vsi_stop_tx_rings - Disable Tx rings
2074 * @vsi: the VSI being configured
2075 * @rst_src: reset source
2076 * @rel_vmvf_num: Relative ID of VF/VM
2077 * @rings: Tx ring array to be stopped
2078 * @offset: offset within vsi->txq_map
2081 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2082 u16 rel_vmvf_num, struct ice_ring **rings, int offset)
2084 struct ice_pf *pf = vsi->back;
2085 struct ice_hw *hw = &pf->hw;
2086 int tc, q_idx = 0, err = 0;
2087 u16 *q_ids, *q_handles, i;
2088 enum ice_status status;
2091 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2094 q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
2099 q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
2103 goto err_alloc_q_ids;
2106 q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
2107 sizeof(*q_handles), GFP_KERNEL);
2110 goto err_alloc_q_handles;
2113 /* set up the Tx queue list to be disabled for each enabled TC */
2114 ice_for_each_traffic_class(tc) {
2115 if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
2118 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
2119 struct ice_q_vector *q_vector;
2121 if (!rings || !rings[q_idx]) {
2126 q_ids[i] = vsi->txq_map[q_idx + offset];
2127 q_teids[i] = rings[q_idx]->txq_teid;
2130 /* clear cause_ena bit for disabled queues */
2131 val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
2132 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2133 wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
2135 /* software is expected to wait for 100 ns */
2138 /* trigger a software interrupt for the vector
2139 * associated to the queue to schedule NAPI handler
2141 q_vector = rings[i]->q_vector;
2143 ice_trigger_sw_intr(hw, q_vector);
2147 status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
2148 vsi->num_txq, q_handles, q_ids,
2149 q_teids, rst_src, rel_vmvf_num, NULL);
2151 /* if the disable queue command was exercised during an active
2152 * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not
2153 * an error as the reset operation disables queues at the
2154 * hardware level anyway.
2156 if (status == ICE_ERR_RESET_ONGOING) {
2157 dev_dbg(&pf->pdev->dev,
2158 "Reset in progress. LAN Tx queues already disabled\n");
2159 } else if (status) {
2160 dev_err(&pf->pdev->dev,
2161 "Failed to disable LAN Tx queues, error: %d\n",
2168 devm_kfree(&pf->pdev->dev, q_handles);
2170 err_alloc_q_handles:
2171 devm_kfree(&pf->pdev->dev, q_ids);
2174 devm_kfree(&pf->pdev->dev, q_teids);
2180 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2181 * @vsi: the VSI being configured
2182 * @rst_src: reset source
2183 * @rel_vmvf_num: Relative ID of VF/VM
2186 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2189 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
2194 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
2195 * @vsi: VSI to enable or disable VLAN pruning on
2196 * @ena: set to true to enable VLAN pruning and false to disable it
2197 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
2199 * returns 0 if VSI is updated, negative otherwise
2201 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
2203 struct ice_vsi_ctx *ctxt;
2212 dev = &pf->pdev->dev;
2213 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2217 ctxt->info = vsi->info;
2220 ctxt->info.sec_flags |=
2221 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2222 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2223 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2225 ctxt->info.sec_flags &=
2226 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2227 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2228 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2232 ctxt->info.valid_sections =
2233 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
2234 ICE_AQ_VSI_PROP_SW_VALID);
2236 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
2238 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
2239 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
2240 pf->hw.adminq.sq_last_status);
2244 vsi->info.sec_flags = ctxt->info.sec_flags;
2245 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
2247 devm_kfree(dev, ctxt);
2251 devm_kfree(dev, ctxt);
2255 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2257 struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
2259 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
2260 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
2264 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
2265 * @vsi: VSI to set the q_vectors register index on
2268 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
2272 if (!vsi || !vsi->q_vectors)
2275 ice_for_each_q_vector(vsi, i) {
2276 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2279 dev_err(&vsi->back->pdev->dev,
2280 "Failed to set reg_idx on q_vector %d VSI %d\n",
2285 if (vsi->type == ICE_VSI_VF) {
2286 struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
2288 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
2291 q_vector->v_idx + vsi->base_vector;
2298 ice_for_each_q_vector(vsi, i) {
2299 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2302 q_vector->reg_idx = 0;
2309 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
2310 * @vsi: the VSI being configured
2311 * @add_rule: boolean value to add or remove ethertype filter rule
2314 ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
2316 struct ice_fltr_list_entry *list;
2317 struct ice_pf *pf = vsi->back;
2318 LIST_HEAD(tmp_add_list);
2319 enum ice_status status;
2321 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
2325 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2326 list->fltr_info.fltr_act = ICE_DROP_PACKET;
2327 list->fltr_info.flag = ICE_FLTR_TX;
2328 list->fltr_info.src_id = ICE_SRC_ID_VSI;
2329 list->fltr_info.vsi_handle = vsi->idx;
2330 list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
2332 INIT_LIST_HEAD(&list->list_entry);
2333 list_add(&list->list_entry, &tmp_add_list);
2336 status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
2338 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
2341 dev_err(&pf->pdev->dev,
2342 "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
2343 vsi->vsi_num, status);
2345 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
2349 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2350 * @vsi: the VSI being configured
2351 * @tx: bool to determine Tx or Rx rule
2352 * @create: bool to determine create or remove Rule
2354 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2356 struct ice_fltr_list_entry *list;
2357 struct ice_pf *pf = vsi->back;
2358 LIST_HEAD(tmp_add_list);
2359 enum ice_status status;
2361 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
2365 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2366 list->fltr_info.vsi_handle = vsi->idx;
2367 list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
2370 list->fltr_info.fltr_act = ICE_DROP_PACKET;
2371 list->fltr_info.flag = ICE_FLTR_TX;
2372 list->fltr_info.src_id = ICE_SRC_ID_VSI;
2374 list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2375 list->fltr_info.flag = ICE_FLTR_RX;
2376 list->fltr_info.src_id = ICE_SRC_ID_LPORT;
2379 INIT_LIST_HEAD(&list->list_entry);
2380 list_add(&list->list_entry, &tmp_add_list);
2383 status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
2385 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
2388 dev_err(&pf->pdev->dev,
2389 "Fail %s %s LLDP rule on VSI %i error: %d\n",
2390 create ? "adding" : "removing", tx ? "TX" : "RX",
2391 vsi->vsi_num, status);
2393 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
2397 * ice_vsi_setup - Set up a VSI by a given type
2398 * @pf: board private structure
2399 * @pi: pointer to the port_info instance
2401 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
2402 * used only for ICE_VSI_VF VSI type. For other VSI types, should
2403 * fill-in ICE_INVAL_VFID as input.
2405 * This allocates the sw VSI structure and its queue resources.
2407 * Returns pointer to the successfully allocated and configured VSI sw struct on
2408 * success, NULL on failure.
2411 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2412 enum ice_vsi_type type, u16 vf_id)
2414 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2415 struct device *dev = &pf->pdev->dev;
2416 enum ice_status status;
2417 struct ice_vsi *vsi;
2420 if (type == ICE_VSI_VF)
2421 vsi = ice_vsi_alloc(pf, type, vf_id);
2423 vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
2426 dev_err(dev, "could not allocate VSI\n");
2430 vsi->port_info = pi;
2431 vsi->vsw = pf->first_sw;
2432 if (vsi->type == ICE_VSI_PF)
2433 vsi->ethtype = ETH_P_PAUSE;
2435 if (vsi->type == ICE_VSI_VF)
2438 if (ice_vsi_get_qs(vsi)) {
2439 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2444 /* set RSS capabilities */
2445 ice_vsi_set_rss_params(vsi);
2447 /* set TC configuration */
2448 ice_vsi_set_tc_cfg(vsi);
2450 /* create the VSI */
2451 ret = ice_vsi_init(vsi);
2455 switch (vsi->type) {
2457 ret = ice_vsi_alloc_q_vectors(vsi);
2459 goto unroll_vsi_init;
2461 ret = ice_vsi_setup_vector_base(vsi);
2463 goto unroll_alloc_q_vector;
2465 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2467 goto unroll_vector_base;
2469 ret = ice_vsi_alloc_rings(vsi);
2471 goto unroll_vector_base;
2473 ice_vsi_map_rings_to_vectors(vsi);
2475 /* Do not exit if configuring RSS had an issue, at least
2476 * receive traffic on first queue. Hence no need to capture
2479 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2480 ice_vsi_cfg_rss_lut_key(vsi);
2483 /* VF driver will take care of creating netdev for this type and
2484 * map queues to vectors through Virtchnl, PF driver only
2485 * creates a VSI and corresponding structures for bookkeeping
2488 ret = ice_vsi_alloc_q_vectors(vsi);
2490 goto unroll_vsi_init;
2492 ret = ice_vsi_alloc_rings(vsi);
2494 goto unroll_alloc_q_vector;
2496 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2498 goto unroll_vector_base;
2500 pf->q_left_tx -= vsi->alloc_txq;
2501 pf->q_left_rx -= vsi->alloc_rxq;
2503 /* Do not exit if configuring RSS had an issue, at least
2504 * receive traffic on first queue. Hence no need to capture
2507 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2508 ice_vsi_cfg_rss_lut_key(vsi);
2511 ret = ice_vsi_alloc_rings(vsi);
2513 goto unroll_vsi_init;
2516 /* clean up the resources and exit */
2517 goto unroll_vsi_init;
2520 /* configure VSI nodes based on number of queues and TC's */
2521 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2522 max_txqs[i] = pf->num_lan_tx;
2524 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2527 dev_err(&pf->pdev->dev,
2528 "VSI %d failed lan queue config, error %d\n",
2529 vsi->vsi_num, status);
2530 goto unroll_vector_base;
2533 /* Add switch rule to drop all Tx Flow Control Frames, of look up
2534 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2535 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2536 * The rule is added once for PF VSI in order to create appropriate
2537 * recipe, since VSI/VSI list is ignored with drop action...
2538 * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
2539 * need to be dropped so that VFs cannot send LLDP packets to reconfig
2540 * DCB settings in the HW. Also, if the FW DCBX engine is not running
2541 * then Rx LLDP packets need to be redirected up the stack.
2543 if (vsi->type == ICE_VSI_PF) {
2544 ice_vsi_add_rem_eth_mac(vsi, true);
2546 /* Tx LLDP packets */
2547 ice_cfg_sw_lldp(vsi, true, true);
2549 /* Rx LLDP packets */
2550 if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
2551 ice_cfg_sw_lldp(vsi, false, true);
2557 /* reclaim SW interrupts back to the common pool */
2558 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2559 pf->num_avail_sw_msix += vsi->num_q_vectors;
2560 unroll_alloc_q_vector:
2561 ice_vsi_free_q_vectors(vsi);
2563 ice_vsi_delete(vsi);
2565 ice_vsi_put_qs(vsi);
2566 pf->q_left_tx += vsi->alloc_txq;
2567 pf->q_left_rx += vsi->alloc_rxq;
2574 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2575 * @vsi: the VSI being cleaned up
2577 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2579 struct ice_pf *pf = vsi->back;
2580 struct ice_hw *hw = &pf->hw;
2585 for (i = 0; i < vsi->num_q_vectors; i++) {
2586 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2587 u16 reg_idx = q_vector->reg_idx;
2589 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
2590 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2591 for (q = 0; q < q_vector->num_ring_tx; q++) {
2592 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2596 for (q = 0; q < q_vector->num_ring_rx; q++) {
2597 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2606 * ice_vsi_free_irq - Free the IRQ association with the OS
2607 * @vsi: the VSI being configured
2609 void ice_vsi_free_irq(struct ice_vsi *vsi)
2611 struct ice_pf *pf = vsi->back;
2612 int base = vsi->base_vector;
2614 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
2617 if (!vsi->q_vectors || !vsi->irqs_ready)
2620 ice_vsi_release_msix(vsi);
2621 if (vsi->type == ICE_VSI_VF)
2624 vsi->irqs_ready = false;
2625 ice_for_each_q_vector(vsi, i) {
2626 u16 vector = i + base;
2629 irq_num = pf->msix_entries[vector].vector;
2631 /* free only the irqs that were actually requested */
2632 if (!vsi->q_vectors[i] ||
2633 !(vsi->q_vectors[i]->num_ring_tx ||
2634 vsi->q_vectors[i]->num_ring_rx))
2637 /* clear the affinity notifier in the IRQ descriptor */
2638 irq_set_affinity_notifier(irq_num, NULL);
2640 /* clear the affinity_mask in the IRQ descriptor */
2641 irq_set_affinity_hint(irq_num, NULL);
2642 synchronize_irq(irq_num);
2643 devm_free_irq(&pf->pdev->dev, irq_num,
2650 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2651 * @vsi: the VSI having resources freed
2653 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2660 ice_for_each_txq(vsi, i)
2661 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2662 ice_free_tx_ring(vsi->tx_rings[i]);
2666 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2667 * @vsi: the VSI having resources freed
2669 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2676 ice_for_each_rxq(vsi, i)
2677 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2678 ice_free_rx_ring(vsi->rx_rings[i]);
2682 * ice_vsi_close - Shut down a VSI
2683 * @vsi: the VSI being shut down
2685 void ice_vsi_close(struct ice_vsi *vsi)
2687 if (!test_and_set_bit(__ICE_DOWN, vsi->state))
2690 ice_vsi_free_irq(vsi);
2691 ice_vsi_free_tx_rings(vsi);
2692 ice_vsi_free_rx_rings(vsi);
2696 * ice_free_res - free a block of resources
2697 * @res: pointer to the resource
2698 * @index: starting index previously returned by ice_get_res
2699 * @id: identifier to track owner
2701 * Returns number of resources freed
2703 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
2708 if (!res || index >= res->end)
2711 id |= ICE_RES_VALID_BIT;
2712 for (i = index; i < res->end && res->list[i] == id; i++) {
2721 * ice_search_res - Search the tracker for a block of resources
2722 * @res: pointer to the resource
2723 * @needed: size of the block needed
2724 * @id: identifier to track owner
2726 * Returns the base item index of the block, or -ENOMEM for error
2728 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
2730 int start = 0, end = 0;
2732 if (needed > res->end)
2735 id |= ICE_RES_VALID_BIT;
2738 /* skip already allocated entries */
2739 if (res->list[end++] & ICE_RES_VALID_BIT) {
2741 if ((start + needed) > res->end)
2745 if (end == (start + needed)) {
2748 /* there was enough, so assign it to the requestor */
2750 res->list[i++] = id;
2754 } while (end < res->end);
2760 * ice_get_res - get a block of resources
2761 * @pf: board private structure
2762 * @res: pointer to the resource
2763 * @needed: size of the block needed
2764 * @id: identifier to track owner
2766 * Returns the base item index of the block, or negative for error
2769 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
2774 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
2775 dev_err(&pf->pdev->dev,
2776 "param err: needed=%d, num_entries = %d id=0x%04x\n",
2777 needed, res->num_entries, id);
2781 return ice_search_res(res, needed, id);
2785 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2786 * @vsi: the VSI being un-configured
2788 void ice_vsi_dis_irq(struct ice_vsi *vsi)
2790 int base = vsi->base_vector;
2791 struct ice_pf *pf = vsi->back;
2792 struct ice_hw *hw = &pf->hw;
2796 /* disable interrupt causation from each queue */
2797 if (vsi->tx_rings) {
2798 ice_for_each_txq(vsi, i) {
2799 if (vsi->tx_rings[i]) {
2802 reg = vsi->tx_rings[i]->reg_idx;
2803 val = rd32(hw, QINT_TQCTL(reg));
2804 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2805 wr32(hw, QINT_TQCTL(reg), val);
2810 if (vsi->rx_rings) {
2811 ice_for_each_rxq(vsi, i) {
2812 if (vsi->rx_rings[i]) {
2815 reg = vsi->rx_rings[i]->reg_idx;
2816 val = rd32(hw, QINT_RQCTL(reg));
2817 val &= ~QINT_RQCTL_CAUSE_ENA_M;
2818 wr32(hw, QINT_RQCTL(reg), val);
2823 /* disable each interrupt */
2824 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
2825 ice_for_each_q_vector(vsi, i)
2826 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2830 ice_for_each_q_vector(vsi, i)
2831 synchronize_irq(pf->msix_entries[i + base].vector);
2836 * ice_napi_del - Remove NAPI handler for the VSI
2837 * @vsi: VSI for which NAPI handler is to be removed
2839 void ice_napi_del(struct ice_vsi *vsi)
2846 ice_for_each_q_vector(vsi, v_idx)
2847 netif_napi_del(&vsi->q_vectors[v_idx]->napi);
2851 * ice_vsi_release - Delete a VSI and free its resources
2852 * @vsi: the VSI being removed
2854 * Returns 0 on success or < 0 on error
2856 int ice_vsi_release(struct ice_vsi *vsi)
2864 /* do not unregister while driver is in the reset recovery pending
2865 * state. Since reset/rebuild happens through PF service task workqueue,
2866 * it's not a good idea to unregister netdev that is associated to the
2867 * PF that is running the work queue items currently. This is done to
2868 * avoid check_flush_dependency() warning on this wq
2870 if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
2871 unregister_netdev(vsi->netdev);
2873 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2876 /* Disable VSI and free resources */
2877 if (vsi->type != ICE_VSI_LB)
2878 ice_vsi_dis_irq(vsi);
2881 /* SR-IOV determines needed MSIX resources all at once instead of per
2882 * VSI since when VFs are spawned we know how many VFs there are and how
2883 * many interrupts each VF needs. SR-IOV MSIX resources are also
2884 * cleared in the same manner.
2886 if (vsi->type != ICE_VSI_VF) {
2887 /* reclaim SW interrupts back to the common pool */
2888 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2889 pf->num_avail_sw_msix += vsi->num_q_vectors;
2892 if (vsi->type == ICE_VSI_PF) {
2893 ice_vsi_add_rem_eth_mac(vsi, false);
2894 ice_cfg_sw_lldp(vsi, true, false);
2895 /* The Rx rule will only exist to remove if the LLDP FW
2896 * engine is currently stopped
2898 if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
2899 ice_cfg_sw_lldp(vsi, false, false);
2902 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2903 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2904 ice_vsi_delete(vsi);
2905 ice_vsi_free_q_vectors(vsi);
2907 /* make sure unregister_netdev() was called by checking __ICE_DOWN */
2908 if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
2909 free_netdev(vsi->netdev);
2913 ice_vsi_clear_rings(vsi);
2915 ice_vsi_put_qs(vsi);
2916 pf->q_left_tx += vsi->alloc_txq;
2917 pf->q_left_rx += vsi->alloc_rxq;
2919 /* retain SW VSI data structure since it is needed to unregister and
2920 * free VSI netdev when PF is not in reset recovery pending state,\
2921 * for ex: during rmmod.
2923 if (!ice_is_reset_in_progress(pf->state))
2930 * ice_vsi_rebuild - Rebuild VSI after reset
2931 * @vsi: VSI to be rebuild
2933 * Returns 0 on success and negative value on failure
2935 int ice_vsi_rebuild(struct ice_vsi *vsi)
2937 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2938 struct ice_vf *vf = NULL;
2939 enum ice_status status;
2947 if (vsi->type == ICE_VSI_VF)
2948 vf = &pf->vf[vsi->vf_id];
2950 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2951 ice_vsi_free_q_vectors(vsi);
2953 /* SR-IOV determines needed MSIX resources all at once instead of per
2954 * VSI since when VFs are spawned we know how many VFs there are and how
2955 * many interrupts each VF needs. SR-IOV MSIX resources are also
2956 * cleared in the same manner.
2958 if (vsi->type != ICE_VSI_VF) {
2959 /* reclaim SW interrupts back to the common pool */
2960 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2961 pf->num_avail_sw_msix += vsi->num_q_vectors;
2962 vsi->base_vector = 0;
2965 ice_vsi_clear_rings(vsi);
2966 ice_vsi_free_arrays(vsi);
2967 ice_dev_onetime_setup(&pf->hw);
2968 if (vsi->type == ICE_VSI_VF)
2969 ice_vsi_set_num_qs(vsi, vf->vf_id);
2971 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
2972 ice_vsi_set_tc_cfg(vsi);
2974 /* Initialize VSI struct elements and create VSI in FW */
2975 ret = ice_vsi_init(vsi);
2979 ret = ice_vsi_alloc_arrays(vsi);
2983 switch (vsi->type) {
2985 ret = ice_vsi_alloc_q_vectors(vsi);
2989 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2993 ret = ice_vsi_alloc_rings(vsi);
2997 ice_vsi_map_rings_to_vectors(vsi);
2998 /* Do not exit if configuring RSS had an issue, at least
2999 * receive traffic on first queue. Hence no need to capture
3002 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3003 ice_vsi_cfg_rss_lut_key(vsi);
3006 ret = ice_vsi_alloc_q_vectors(vsi);
3010 ret = ice_vsi_setup_vector_base(vsi);
3014 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3018 ret = ice_vsi_alloc_rings(vsi);
3022 pf->q_left_tx -= vsi->alloc_txq;
3023 pf->q_left_rx -= vsi->alloc_rxq;
3029 /* configure VSI nodes based on number of queues and TC's */
3030 for (i = 0; i < vsi->tc_cfg.numtc; i++)
3031 max_txqs[i] = pf->num_lan_tx;
3033 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3036 dev_err(&pf->pdev->dev,
3037 "VSI %d failed lan queue config, error %d\n",
3038 vsi->vsi_num, status);
3044 ice_vsi_free_q_vectors(vsi);
3047 vsi->current_netdev_flags = 0;
3048 unregister_netdev(vsi->netdev);
3049 free_netdev(vsi->netdev);
3054 set_bit(__ICE_RESET_FAILED, pf->state);
3059 * ice_is_reset_in_progress - check for a reset in progress
3060 * @state: PF state field
3062 bool ice_is_reset_in_progress(unsigned long *state)
3064 return test_bit(__ICE_RESET_OICR_RECV, state) ||
3065 test_bit(__ICE_PFR_REQ, state) ||
3066 test_bit(__ICE_CORER_REQ, state) ||
3067 test_bit(__ICE_GLOBR_REQ, state);
3072 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3073 * @vsi: VSI being configured
3074 * @ctx: the context buffer returned from AQ VSI update command
3076 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3078 vsi->info.mapping_flags = ctx->info.mapping_flags;
3079 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3080 sizeof(vsi->info.q_mapping));
3081 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3082 sizeof(vsi->info.tc_mapping));
3086 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3087 * @vsi: the VSI being configured
3088 * @ena_tc: TC map to be enabled
3090 static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3092 struct net_device *netdev = vsi->netdev;
3093 struct ice_pf *pf = vsi->back;
3094 struct ice_dcbx_cfg *dcbcfg;
3102 netdev_reset_tc(netdev);
3106 if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
3109 dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
3111 ice_for_each_traffic_class(i)
3112 if (vsi->tc_cfg.ena_tc & BIT(i))
3113 netdev_set_tc_queue(netdev,
3114 vsi->tc_cfg.tc_info[i].netdev_tc,
3115 vsi->tc_cfg.tc_info[i].qcount_tx,
3116 vsi->tc_cfg.tc_info[i].qoffset);
3118 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3119 u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3121 /* Get the mapped netdev TC# for the UP */
3122 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3123 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3128 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3129 * @vsi: VSI to be configured
3130 * @ena_tc: TC bitmap
3132 * VSI queues expected to be quiesced before calling this function
3134 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3136 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3137 struct ice_vsi_ctx *ctx;
3138 struct ice_pf *pf = vsi->back;
3139 enum ice_status status;
3143 ice_for_each_traffic_class(i) {
3144 /* build bitmap of enabled TCs */
3145 if (ena_tc & BIT(i))
3147 /* populate max_txqs per TC */
3148 max_txqs[i] = pf->num_lan_tx;
3151 vsi->tc_cfg.ena_tc = ena_tc;
3152 vsi->tc_cfg.numtc = num_tc;
3154 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
3159 ctx->info = vsi->info;
3161 ice_vsi_setup_q_map(vsi, ctx);
3163 /* must to indicate which section of VSI context are being modified */
3164 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3165 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3167 dev_info(&pf->pdev->dev, "Failed VSI Update\n");
3172 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3176 dev_err(&pf->pdev->dev,
3177 "VSI %d failed TC config, error %d\n",
3178 vsi->vsi_num, status);
3182 ice_vsi_update_q_map(vsi, ctx);
3183 vsi->info.valid_sections = 0;
3185 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3187 devm_kfree(&pf->pdev->dev, ctx);
3190 #endif /* CONFIG_DCB */