1 // SPDX-License-Identifier: GPL-2.0
2 /*******************************************************************************
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 ******************************************************************************/
30 /*********************notification routines***********************/
33 * i40e_vc_vf_broadcast
34 * @pf: pointer to the PF structure
35 * @opcode: operation code
36 * @retval: return value
37 * @msg: pointer to the msg buffer
40 * send a message to all VFs on a given PF
42 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
43 enum virtchnl_ops v_opcode,
44 i40e_status v_retval, u8 *msg,
47 struct i40e_hw *hw = &pf->hw;
48 struct i40e_vf *vf = pf->vf;
51 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
52 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
53 /* Not all vfs are enabled so skip the ones that are not */
54 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
55 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
58 /* Ignore return value on purpose - a given VF may fail, but
59 * we need to keep going and send to all of them
61 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
67 * i40e_vc_notify_vf_link_state
68 * @vf: pointer to the VF structure
70 * send a link status message to a single VF
72 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
74 struct virtchnl_pf_event pfe;
75 struct i40e_pf *pf = vf->pf;
76 struct i40e_hw *hw = &pf->hw;
77 struct i40e_link_status *ls = &pf->hw.phy.link_info;
78 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
80 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
81 pfe.severity = PF_EVENT_SEVERITY_INFO;
82 if (vf->link_forced) {
83 pfe.event_data.link_event.link_status = vf->link_up;
84 pfe.event_data.link_event.link_speed =
85 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
87 pfe.event_data.link_event.link_status =
88 ls->link_info & I40E_AQ_LINK_UP;
89 pfe.event_data.link_event.link_speed =
90 i40e_virtchnl_link_speed(ls->link_speed);
92 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
93 0, (u8 *)&pfe, sizeof(pfe), NULL);
97 * i40e_vc_notify_link_state
98 * @pf: pointer to the PF structure
100 * send a link status message to all VFs on a given PF
102 void i40e_vc_notify_link_state(struct i40e_pf *pf)
106 for (i = 0; i < pf->num_alloc_vfs; i++)
107 i40e_vc_notify_vf_link_state(&pf->vf[i]);
111 * i40e_vc_notify_reset
112 * @pf: pointer to the PF structure
114 * indicate a pending reset to all VFs on a given PF
116 void i40e_vc_notify_reset(struct i40e_pf *pf)
118 struct virtchnl_pf_event pfe;
120 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
121 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
122 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
123 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
127 * i40e_vc_notify_vf_reset
128 * @vf: pointer to the VF structure
130 * indicate a pending reset to the given VF
132 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
134 struct virtchnl_pf_event pfe;
137 /* validate the request */
138 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
141 /* verify if the VF is in either init or active before proceeding */
142 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
143 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
146 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
148 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
149 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
150 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
152 sizeof(struct virtchnl_pf_event), NULL);
154 /***********************misc routines*****************************/
158 * @vf: pointer to the VF info
160 * Disable the VF through a SW reset.
162 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
166 i40e_vc_notify_vf_reset(vf);
168 /* We want to ensure that an actual reset occurs initiated after this
169 * function was called. However, we do not want to wait forever, so
170 * we'll give a reasonable time and print a message if we failed to
173 for (i = 0; i < 20; i++) {
174 if (i40e_reset_vf(vf, false))
176 usleep_range(10000, 20000);
179 dev_warn(&vf->pf->pdev->dev,
180 "Failed to initiate reset for VF %d after 200 milliseconds\n",
185 * i40e_vc_isvalid_vsi_id
186 * @vf: pointer to the VF info
187 * @vsi_id: VF relative VSI id
189 * check for the valid VSI id
191 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
193 struct i40e_pf *pf = vf->pf;
194 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
196 return (vsi && (vsi->vf_id == vf->vf_id));
200 * i40e_vc_isvalid_queue_id
201 * @vf: pointer to the VF info
203 * @qid: vsi relative queue id
205 * check for the valid queue id
207 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
210 struct i40e_pf *pf = vf->pf;
211 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
213 return (vsi && (qid < vsi->alloc_queue_pairs));
217 * i40e_vc_isvalid_vector_id
218 * @vf: pointer to the VF info
219 * @vector_id: VF relative vector id
221 * check for the valid vector id
223 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
225 struct i40e_pf *pf = vf->pf;
227 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
230 /***********************vf resource mgmt routines*****************/
233 * i40e_vc_get_pf_queue_id
234 * @vf: pointer to the VF info
235 * @vsi_id: id of VSI as provided by the FW
236 * @vsi_queue_id: vsi relative queue id
238 * return PF relative queue id
240 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
243 struct i40e_pf *pf = vf->pf;
244 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
245 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
250 if (le16_to_cpu(vsi->info.mapping_flags) &
251 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
253 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
255 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
262 * i40e_get_real_pf_qid
263 * @vf: pointer to the VF info
265 * @queue_id: queue number
267 * wrapper function to get pf_queue_id handling ADq code as well
269 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
273 if (vf->adq_enabled) {
274 /* Although VF considers all the queues(can be 1 to 16) as its
275 * own but they may actually belong to different VSIs(up to 4).
276 * We need to find which queues belongs to which VSI.
278 for (i = 0; i < vf->num_tc; i++) {
279 if (queue_id < vf->ch[i].num_qps) {
280 vsi_id = vf->ch[i].vsi_id;
283 /* find right queue id which is relative to a
286 queue_id -= vf->ch[i].num_qps;
290 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
294 * i40e_config_irq_link_list
295 * @vf: pointer to the VF info
296 * @vsi_id: id of VSI as given by the FW
297 * @vecmap: irq map info
299 * configure irq link list from the map
301 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
302 struct virtchnl_vector_map *vecmap)
304 unsigned long linklistmap = 0, tempmap;
305 struct i40e_pf *pf = vf->pf;
306 struct i40e_hw *hw = &pf->hw;
307 u16 vsi_queue_id, pf_queue_id;
308 enum i40e_queue_type qtype;
309 u16 next_q, vector_id, size;
313 vector_id = vecmap->vector_id;
316 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
318 reg_idx = I40E_VPINT_LNKLSTN(
319 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
322 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
323 /* Special case - No queues mapped on this vector */
324 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
327 tempmap = vecmap->rxq_map;
328 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
329 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
333 tempmap = vecmap->txq_map;
334 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
335 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
339 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
340 next_q = find_first_bit(&linklistmap, size);
341 if (unlikely(next_q == size))
344 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
345 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
346 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
347 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
349 wr32(hw, reg_idx, reg);
351 while (next_q < size) {
353 case I40E_QUEUE_TYPE_RX:
354 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
355 itr_idx = vecmap->rxitr_idx;
357 case I40E_QUEUE_TYPE_TX:
358 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
359 itr_idx = vecmap->txitr_idx;
365 next_q = find_next_bit(&linklistmap, size, next_q + 1);
367 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
368 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
369 pf_queue_id = i40e_get_real_pf_qid(vf,
373 pf_queue_id = I40E_QUEUE_END_OF_LIST;
377 /* format for the RQCTL & TQCTL regs is same */
379 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
380 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
381 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
382 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
383 wr32(hw, reg_idx, reg);
386 /* if the vf is running in polling mode and using interrupt zero,
387 * need to disable auto-mask on enabling zero interrupt for VFs.
389 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
391 reg = rd32(hw, I40E_GLINT_CTL);
392 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
393 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
394 wr32(hw, I40E_GLINT_CTL, reg);
403 * i40e_release_iwarp_qvlist
404 * @vf: pointer to the VF.
407 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
409 struct i40e_pf *pf = vf->pf;
410 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
414 if (!vf->qvlist_info)
417 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
418 for (i = 0; i < qvlist_info->num_vectors; i++) {
419 struct virtchnl_iwarp_qv_info *qv_info;
420 u32 next_q_index, next_q_type;
421 struct i40e_hw *hw = &pf->hw;
422 u32 v_idx, reg_idx, reg;
424 qv_info = &qvlist_info->qv_info[i];
427 v_idx = qv_info->v_idx;
428 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
429 /* Figure out the queue after CEQ and make that the
432 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
433 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
434 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
435 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
436 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
437 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
439 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
440 reg = (next_q_index &
441 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
443 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
445 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
448 kfree(vf->qvlist_info);
449 vf->qvlist_info = NULL;
453 * i40e_config_iwarp_qvlist
454 * @vf: pointer to the VF info
455 * @qvlist_info: queue and vector list
457 * Return 0 on success or < 0 on error
459 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
460 struct virtchnl_iwarp_qvlist_info *qvlist_info)
462 struct i40e_pf *pf = vf->pf;
463 struct i40e_hw *hw = &pf->hw;
464 struct virtchnl_iwarp_qv_info *qv_info;
465 u32 v_idx, i, reg_idx, reg;
466 u32 next_q_idx, next_q_type;
469 size = sizeof(struct virtchnl_iwarp_qvlist_info) +
470 (sizeof(struct virtchnl_iwarp_qv_info) *
471 (qvlist_info->num_vectors - 1));
472 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
473 if (!vf->qvlist_info)
476 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
478 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
479 for (i = 0; i < qvlist_info->num_vectors; i++) {
480 qv_info = &qvlist_info->qv_info[i];
483 v_idx = qv_info->v_idx;
485 /* Validate vector id belongs to this vf */
486 if (!i40e_vc_isvalid_vector_id(vf, v_idx))
489 vf->qvlist_info->qv_info[i] = *qv_info;
491 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
492 /* We might be sharing the interrupt, so get the first queue
493 * index and type, push it down the list by adding the new
494 * queue on top. Also link it with the new queue in CEQCTL.
496 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
497 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
498 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
499 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
500 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
502 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
503 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
504 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
505 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
506 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
507 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
508 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
509 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
511 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
512 reg = (qv_info->ceq_idx &
513 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
514 (I40E_QUEUE_TYPE_PE_CEQ <<
515 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
516 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
519 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
520 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
521 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
522 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
524 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
530 kfree(vf->qvlist_info);
531 vf->qvlist_info = NULL;
536 * i40e_config_vsi_tx_queue
537 * @vf: pointer to the VF info
538 * @vsi_id: id of VSI as provided by the FW
539 * @vsi_queue_id: vsi relative queue index
540 * @info: config. info
544 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
546 struct virtchnl_txq_info *info)
548 struct i40e_pf *pf = vf->pf;
549 struct i40e_hw *hw = &pf->hw;
550 struct i40e_hmc_obj_txq tx_ctx;
551 struct i40e_vsi *vsi;
556 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
560 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
561 vsi = i40e_find_vsi_from_id(pf, vsi_id);
567 /* clear the context structure first */
568 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
570 /* only set the required fields */
571 tx_ctx.base = info->dma_ring_addr / 128;
572 tx_ctx.qlen = info->ring_len;
573 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
574 tx_ctx.rdylist_act = 0;
575 tx_ctx.head_wb_ena = info->headwb_enabled;
576 tx_ctx.head_wb_addr = info->dma_headwb_addr;
578 /* clear the context in the HMC */
579 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
581 dev_err(&pf->pdev->dev,
582 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
588 /* set the context in the HMC */
589 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
591 dev_err(&pf->pdev->dev,
592 "Failed to set VF LAN Tx queue context %d error: %d\n",
598 /* associate this queue with the PCI VF function */
599 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
600 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
601 & I40E_QTX_CTL_PF_INDX_MASK);
602 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
603 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
604 & I40E_QTX_CTL_VFVM_INDX_MASK);
605 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
613 * i40e_config_vsi_rx_queue
614 * @vf: pointer to the VF info
615 * @vsi_id: id of VSI as provided by the FW
616 * @vsi_queue_id: vsi relative queue index
617 * @info: config. info
621 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
623 struct virtchnl_rxq_info *info)
625 struct i40e_pf *pf = vf->pf;
626 struct i40e_hw *hw = &pf->hw;
627 struct i40e_hmc_obj_rxq rx_ctx;
631 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
633 /* clear the context structure first */
634 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
636 /* only set the required fields */
637 rx_ctx.base = info->dma_ring_addr / 128;
638 rx_ctx.qlen = info->ring_len;
640 if (info->splithdr_enabled) {
641 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
643 I40E_RX_SPLIT_TCP_UDP |
645 /* header length validation */
646 if (info->hdr_size > ((2 * 1024) - 64)) {
650 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
652 /* set split mode 10b */
653 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
656 /* databuffer length validation */
657 if (info->databuffer_size > ((16 * 1024) - 128)) {
661 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
663 /* max pkt. length validation */
664 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
668 rx_ctx.rxmax = info->max_pkt_size;
670 /* enable 32bytes desc always */
674 rx_ctx.lrxqthresh = 1;
679 /* clear the context in the HMC */
680 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
682 dev_err(&pf->pdev->dev,
683 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
689 /* set the context in the HMC */
690 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
692 dev_err(&pf->pdev->dev,
693 "Failed to set VF LAN Rx queue context %d error: %d\n",
705 * @vf: pointer to the VF info
706 * @idx: VSI index, applies only for ADq mode, zero otherwise
708 * alloc VF vsi context & resources
710 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
712 struct i40e_mac_filter *f = NULL;
713 struct i40e_pf *pf = vf->pf;
714 struct i40e_vsi *vsi;
718 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
722 dev_err(&pf->pdev->dev,
723 "add vsi failed for VF %d, aq_err %d\n",
724 vf->vf_id, pf->hw.aq.asq_last_status);
726 goto error_alloc_vsi_res;
730 u64 hena = i40e_pf_get_default_rss_hena(pf);
731 u8 broadcast[ETH_ALEN];
733 vf->lan_vsi_idx = vsi->idx;
734 vf->lan_vsi_id = vsi->id;
735 /* If the port VLAN has been configured and then the
736 * VF driver was removed then the VSI port VLAN
737 * configuration was destroyed. Check if there is
738 * a port VLAN and restore the VSI configuration if
741 if (vf->port_vlan_id)
742 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
744 spin_lock_bh(&vsi->mac_filter_hash_lock);
745 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
746 f = i40e_add_mac_filter(vsi,
747 vf->default_lan_addr.addr);
749 dev_info(&pf->pdev->dev,
750 "Could not add MAC filter %pM for VF %d\n",
751 vf->default_lan_addr.addr, vf->vf_id);
753 eth_broadcast_addr(broadcast);
754 f = i40e_add_mac_filter(vsi, broadcast);
756 dev_info(&pf->pdev->dev,
757 "Could not allocate VF broadcast filter\n");
758 spin_unlock_bh(&vsi->mac_filter_hash_lock);
759 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
760 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
761 /* program mac filter only for VF VSI */
762 ret = i40e_sync_vsi_filters(vsi);
764 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
767 /* storing VSI index and id for ADq and don't apply the mac filter */
768 if (vf->adq_enabled) {
769 vf->ch[idx].vsi_idx = vsi->idx;
770 vf->ch[idx].vsi_id = vsi->id;
773 /* Set VF bandwidth if specified */
775 max_tx_rate = vf->tx_rate;
776 } else if (vf->ch[idx].max_tx_rate) {
777 max_tx_rate = vf->ch[idx].max_tx_rate;
781 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
782 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
783 max_tx_rate, 0, NULL);
785 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
794 * i40e_map_pf_queues_to_vsi
795 * @vf: pointer to the VF info
797 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
798 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
800 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
802 struct i40e_pf *pf = vf->pf;
803 struct i40e_hw *hw = &pf->hw;
804 u32 reg, num_tc = 1; /* VF has at least one traffic class */
811 for (i = 0; i < num_tc; i++) {
812 if (vf->adq_enabled) {
813 qps = vf->ch[i].num_qps;
814 vsi_id = vf->ch[i].vsi_id;
816 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
817 vsi_id = vf->lan_vsi_id;
820 for (j = 0; j < 7; j++) {
825 u16 qid = i40e_vc_get_pf_queue_id(vf,
829 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
833 i40e_write_rx_ctl(hw,
834 I40E_VSILAN_QTABLE(j, vsi_id),
841 * i40e_map_pf_to_vf_queues
842 * @vf: pointer to the VF info
844 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
845 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
847 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
849 struct i40e_pf *pf = vf->pf;
850 struct i40e_hw *hw = &pf->hw;
851 u32 reg, total_qps = 0;
852 u32 qps, num_tc = 1; /* VF has at least one traffic class */
859 for (i = 0; i < num_tc; i++) {
860 if (vf->adq_enabled) {
861 qps = vf->ch[i].num_qps;
862 vsi_id = vf->ch[i].vsi_id;
864 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
865 vsi_id = vf->lan_vsi_id;
868 for (j = 0; j < qps; j++) {
869 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
871 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
872 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
880 * i40e_enable_vf_mappings
881 * @vf: pointer to the VF info
885 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
887 struct i40e_pf *pf = vf->pf;
888 struct i40e_hw *hw = &pf->hw;
891 /* Tell the hardware we're using noncontiguous mapping. HW requires
892 * that VF queues be mapped using this method, even when they are
893 * contiguous in real life
895 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
896 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
898 /* enable VF vplan_qtable mappings */
899 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
900 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
902 i40e_map_pf_to_vf_queues(vf);
903 i40e_map_pf_queues_to_vsi(vf);
909 * i40e_disable_vf_mappings
910 * @vf: pointer to the VF info
912 * disable VF mappings
914 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
916 struct i40e_pf *pf = vf->pf;
917 struct i40e_hw *hw = &pf->hw;
920 /* disable qp mappings */
921 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
922 for (i = 0; i < I40E_MAX_VSI_QP; i++)
923 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
924 I40E_QUEUE_END_OF_LIST);
930 * @vf: pointer to the VF info
934 static void i40e_free_vf_res(struct i40e_vf *vf)
936 struct i40e_pf *pf = vf->pf;
937 struct i40e_hw *hw = &pf->hw;
941 /* Start by disabling VF's configuration API to prevent the OS from
942 * accessing the VF's VSI after it's freed / invalidated.
944 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
946 /* It's possible the VF had requeuested more queues than the default so
947 * do the accounting here when we're about to free them.
949 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
950 pf->queues_left += vf->num_queue_pairs -
951 I40E_DEFAULT_QUEUES_PER_VF;
954 /* free vsi & disconnect it from the parent uplink */
955 if (vf->lan_vsi_idx) {
956 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
962 /* do the accounting and remove additional ADq VSI's */
963 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
964 for (j = 0; j < vf->num_tc; j++) {
965 /* At this point VSI0 is already released so don't
966 * release it again and only clear their values in
967 * structure variables
970 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
971 vf->ch[j].vsi_idx = 0;
972 vf->ch[j].vsi_id = 0;
975 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
977 /* disable interrupts so the VF starts in a known state */
978 for (i = 0; i < msix_vf; i++) {
979 /* format is same for both registers */
981 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
983 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
986 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
990 /* clear the irq settings */
991 for (i = 0; i < msix_vf; i++) {
992 /* format is same for both registers */
994 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
996 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
999 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1000 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1001 wr32(hw, reg_idx, reg);
1004 /* reset some of the state variables keeping track of the resources */
1005 vf->num_queue_pairs = 0;
1006 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1007 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1012 * @vf: pointer to the VF info
1014 * allocate VF resources
1016 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1018 struct i40e_pf *pf = vf->pf;
1019 int total_queue_pairs = 0;
1022 if (vf->num_req_queues &&
1023 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1024 pf->num_vf_qps = vf->num_req_queues;
1026 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1028 /* allocate hw vsi context & associated resources */
1029 ret = i40e_alloc_vsi_res(vf, 0);
1032 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1034 /* allocate additional VSIs based on tc information for ADq */
1035 if (vf->adq_enabled) {
1036 if (pf->queues_left >=
1037 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1038 /* TC 0 always belongs to VF VSI */
1039 for (idx = 1; idx < vf->num_tc; idx++) {
1040 ret = i40e_alloc_vsi_res(vf, idx);
1044 /* send correct number of queues */
1045 total_queue_pairs = I40E_MAX_VF_QUEUES;
1047 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1049 vf->adq_enabled = false;
1053 /* We account for each VF to get a default number of queue pairs. If
1054 * the VF has now requested more, we need to account for that to make
1055 * certain we never request more queues than we actually have left in
1058 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1060 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1063 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1065 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1067 /* store the total qps number for the runtime
1070 vf->num_queue_pairs = total_queue_pairs;
1072 /* VF is now completely initialized */
1073 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1077 i40e_free_vf_res(vf);
1082 #define VF_DEVICE_STATUS 0xAA
1083 #define VF_TRANS_PENDING_MASK 0x20
1085 * i40e_quiesce_vf_pci
1086 * @vf: pointer to the VF structure
1088 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1089 * if the transactions never clear.
1091 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1093 struct i40e_pf *pf = vf->pf;
1094 struct i40e_hw *hw = &pf->hw;
1098 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1100 wr32(hw, I40E_PF_PCI_CIAA,
1101 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1102 for (i = 0; i < 100; i++) {
1103 reg = rd32(hw, I40E_PF_PCI_CIAD);
1104 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1112 * i40e_trigger_vf_reset
1113 * @vf: pointer to the VF structure
1114 * @flr: VFLR was issued or not
1116 * Trigger hardware to start a reset for a particular VF. Expects the caller
1117 * to wait the proper amount of time to allow hardware to reset the VF before
1118 * it cleans up and restores VF functionality.
1120 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1122 struct i40e_pf *pf = vf->pf;
1123 struct i40e_hw *hw = &pf->hw;
1124 u32 reg, reg_idx, bit_idx;
1127 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1129 /* Disable VF's configuration API during reset. The flag is re-enabled
1130 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1131 * It's normally disabled in i40e_free_vf_res(), but it's safer
1132 * to do it earlier to give some time to finish to any VF config
1133 * functions that may still be running at this point.
1135 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1137 /* In the case of a VFLR, the HW has already reset the VF and we
1138 * just need to clean up, so don't hit the VFRTRIG register.
1141 /* reset VF using VPGEN_VFRTRIG reg */
1142 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1143 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1144 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1147 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1148 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1149 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1150 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1153 if (i40e_quiesce_vf_pci(vf))
1154 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1159 * i40e_cleanup_reset_vf
1160 * @vf: pointer to the VF structure
1162 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1163 * have verified whether the reset is finished properly, and ensure the
1164 * minimum amount of wait time has passed.
1166 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1168 struct i40e_pf *pf = vf->pf;
1169 struct i40e_hw *hw = &pf->hw;
1172 /* free VF resources to begin resetting the VSI state */
1173 i40e_free_vf_res(vf);
1175 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1176 * By doing this we allow HW to access VF memory at any point. If we
1177 * did it any sooner, HW could access memory while it was being freed
1178 * in i40e_free_vf_res(), causing an IOMMU fault.
1180 * On the other hand, this needs to be done ASAP, because the VF driver
1181 * is waiting for this to happen and may report a timeout. It's
1182 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1185 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1186 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1187 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1189 /* reallocate VF resources to finish resetting the VSI state */
1190 if (!i40e_alloc_vf_res(vf)) {
1191 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1192 i40e_enable_vf_mappings(vf);
1193 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1194 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1195 /* Do not notify the client during VF init */
1196 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1198 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1202 /* Tell the VF driver the reset is done. This needs to be done only
1203 * after VF has been fully initialized, because the VF driver may
1204 * request resources immediately after setting this flag.
1206 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1211 * @vf: pointer to the VF structure
1212 * @flr: VFLR was issued or not
1214 * Returns true if the VF is reset, false otherwise.
1216 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1218 struct i40e_pf *pf = vf->pf;
1219 struct i40e_hw *hw = &pf->hw;
1224 /* If the VFs have been disabled, this means something else is
1225 * resetting the VF, so we shouldn't continue.
1227 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1230 i40e_trigger_vf_reset(vf, flr);
1232 /* poll VPGEN_VFRSTAT reg to make sure
1233 * that reset is complete
1235 for (i = 0; i < 10; i++) {
1236 /* VF reset requires driver to first reset the VF and then
1237 * poll the status register to make sure that the reset
1238 * completed successfully. Due to internal HW FIFO flushes,
1239 * we must wait 10ms before the register will be valid.
1241 usleep_range(10000, 20000);
1242 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1243 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1250 usleep_range(10000, 20000);
1253 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1255 usleep_range(10000, 20000);
1257 /* On initial reset, we don't have any queues to disable */
1258 if (vf->lan_vsi_idx != 0)
1259 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1261 i40e_cleanup_reset_vf(vf);
1264 clear_bit(__I40E_VF_DISABLE, pf->state);
1270 * i40e_reset_all_vfs
1271 * @pf: pointer to the PF structure
1272 * @flr: VFLR was issued or not
1274 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1275 * VF, then do all the waiting in one chunk, and finally finish restoring each
1276 * VF after the wait. This is useful during PF routines which need to reset
1277 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1279 * Returns true if any VFs were reset, and false otherwise.
1281 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1283 struct i40e_hw *hw = &pf->hw;
1288 /* If we don't have any VFs, then there is nothing to reset */
1289 if (!pf->num_alloc_vfs)
1292 /* If VFs have been disabled, there is no need to reset */
1293 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1296 /* Begin reset on all VFs at once */
1297 for (v = 0; v < pf->num_alloc_vfs; v++)
1298 i40e_trigger_vf_reset(&pf->vf[v], flr);
1300 /* HW requires some time to make sure it can flush the FIFO for a VF
1301 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1302 * sequence to make sure that it has completed. We'll keep track of
1303 * the VFs using a simple iterator that increments once that VF has
1304 * finished resetting.
1306 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1307 usleep_range(10000, 20000);
1309 /* Check each VF in sequence, beginning with the VF to fail
1310 * the previous check.
1312 while (v < pf->num_alloc_vfs) {
1314 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1315 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1318 /* If the current VF has finished resetting, move on
1319 * to the next VF in sequence.
1326 usleep_range(10000, 20000);
1328 /* Display a warning if at least one VF didn't manage to reset in
1329 * time, but continue on with the operation.
1331 if (v < pf->num_alloc_vfs)
1332 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1334 usleep_range(10000, 20000);
1336 /* Begin disabling all the rings associated with VFs, but do not wait
1339 for (v = 0; v < pf->num_alloc_vfs; v++) {
1340 /* On initial reset, we don't have any queues to disable */
1341 if (pf->vf[v].lan_vsi_idx == 0)
1344 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1347 /* Now that we've notified HW to disable all of the VF rings, wait
1348 * until they finish.
1350 for (v = 0; v < pf->num_alloc_vfs; v++) {
1351 /* On initial reset, we don't have any queues to disable */
1352 if (pf->vf[v].lan_vsi_idx == 0)
1355 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1358 /* Hw may need up to 50ms to finish disabling the RX queues. We
1359 * minimize the wait by delaying only once for all VFs.
1363 /* Finish the reset on each VF */
1364 for (v = 0; v < pf->num_alloc_vfs; v++)
1365 i40e_cleanup_reset_vf(&pf->vf[v]);
1368 clear_bit(__I40E_VF_DISABLE, pf->state);
1375 * @pf: pointer to the PF structure
1379 void i40e_free_vfs(struct i40e_pf *pf)
1381 struct i40e_hw *hw = &pf->hw;
1382 u32 reg_idx, bit_idx;
1387 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1388 usleep_range(1000, 2000);
1390 i40e_notify_client_of_vf_enable(pf, 0);
1392 /* Amortize wait time by stopping all VFs at the same time */
1393 for (i = 0; i < pf->num_alloc_vfs; i++) {
1394 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1397 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1400 for (i = 0; i < pf->num_alloc_vfs; i++) {
1401 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1404 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1407 /* Disable IOV before freeing resources. This lets any VF drivers
1408 * running in the host get themselves cleaned up before we yank
1409 * the carpet out from underneath their feet.
1411 if (!pci_vfs_assigned(pf->pdev))
1412 pci_disable_sriov(pf->pdev);
1414 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1416 /* free up VF resources */
1417 tmp = pf->num_alloc_vfs;
1418 pf->num_alloc_vfs = 0;
1419 for (i = 0; i < tmp; i++) {
1420 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1421 i40e_free_vf_res(&pf->vf[i]);
1422 /* disable qp mappings */
1423 i40e_disable_vf_mappings(&pf->vf[i]);
1429 /* This check is for when the driver is unloaded while VFs are
1430 * assigned. Setting the number of VFs to 0 through sysfs is caught
1431 * before this function ever gets called.
1433 if (!pci_vfs_assigned(pf->pdev)) {
1434 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1435 * work correctly when SR-IOV gets re-enabled.
1437 for (vf_id = 0; vf_id < tmp; vf_id++) {
1438 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1439 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1440 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1443 clear_bit(__I40E_VF_DISABLE, pf->state);
1446 #ifdef CONFIG_PCI_IOV
1449 * @pf: pointer to the PF structure
1450 * @num_alloc_vfs: number of VFs to allocate
1452 * allocate VF resources
1454 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1456 struct i40e_vf *vfs;
1459 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1460 i40e_irq_dynamic_disable_icr0(pf);
1462 /* Check to see if we're just allocating resources for extant VFs */
1463 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1464 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1466 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1467 pf->num_alloc_vfs = 0;
1471 /* allocate memory */
1472 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1479 /* apply default profile */
1480 for (i = 0; i < num_alloc_vfs; i++) {
1482 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1485 /* assign default capabilities */
1486 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1487 vfs[i].spoofchk = true;
1489 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1492 pf->num_alloc_vfs = num_alloc_vfs;
1494 /* VF resources get allocated during reset */
1495 i40e_reset_all_vfs(pf, false);
1497 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1503 /* Re-enable interrupt 0. */
1504 i40e_irq_dynamic_enable_icr0(pf);
1510 * i40e_pci_sriov_enable
1511 * @pdev: pointer to a pci_dev structure
1512 * @num_vfs: number of VFs to allocate
1514 * Enable or change the number of VFs
1516 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1518 #ifdef CONFIG_PCI_IOV
1519 struct i40e_pf *pf = pci_get_drvdata(pdev);
1520 int pre_existing_vfs = pci_num_vf(pdev);
1523 if (test_bit(__I40E_TESTING, pf->state)) {
1524 dev_warn(&pdev->dev,
1525 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1530 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1532 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1535 if (num_vfs > pf->num_req_vfs) {
1536 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1537 num_vfs, pf->num_req_vfs);
1542 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1543 err = i40e_alloc_vfs(pf, num_vfs);
1545 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1559 * i40e_pci_sriov_configure
1560 * @pdev: pointer to a pci_dev structure
1561 * @num_vfs: number of VFs to allocate
1563 * Enable or change the number of VFs. Called when the user updates the number
1566 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1568 struct i40e_pf *pf = pci_get_drvdata(pdev);
1571 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1572 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1573 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1575 return i40e_pci_sriov_enable(pdev, num_vfs);
1578 if (!pci_vfs_assigned(pf->pdev)) {
1580 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1581 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1583 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1589 /***********************virtual channel routines******************/
1592 * i40e_vc_send_msg_to_vf
1593 * @vf: pointer to the VF info
1594 * @v_opcode: virtual channel opcode
1595 * @v_retval: virtual channel return value
1596 * @msg: pointer to the msg buffer
1597 * @msglen: msg length
1601 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1602 u32 v_retval, u8 *msg, u16 msglen)
1609 /* validate the request */
1610 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1615 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1617 /* single place to detect unsuccessful return values */
1619 vf->num_invalid_msgs++;
1620 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1621 vf->vf_id, v_opcode, v_retval);
1622 if (vf->num_invalid_msgs >
1623 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1624 dev_err(&pf->pdev->dev,
1625 "Number of invalid messages exceeded for VF %d\n",
1627 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1628 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1631 vf->num_valid_msgs++;
1632 /* reset the invalid counter, if a valid message is received. */
1633 vf->num_invalid_msgs = 0;
1636 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1639 dev_info(&pf->pdev->dev,
1640 "Unable to send the message to VF %d aq_err %d\n",
1641 vf->vf_id, pf->hw.aq.asq_last_status);
1649 * i40e_vc_send_resp_to_vf
1650 * @vf: pointer to the VF info
1651 * @opcode: operation code
1652 * @retval: return value
1654 * send resp msg to VF
1656 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1657 enum virtchnl_ops opcode,
1660 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1664 * i40e_vc_get_version_msg
1665 * @vf: pointer to the VF info
1667 * called from the VF to request the API version used by the PF
1669 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1671 struct virtchnl_version_info info = {
1672 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1675 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1676 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1677 if (VF_IS_V10(&vf->vf_ver))
1678 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1679 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1680 I40E_SUCCESS, (u8 *)&info,
1681 sizeof(struct virtchnl_version_info));
1685 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1686 * @vf: pointer to VF structure
1688 static void i40e_del_qch(struct i40e_vf *vf)
1690 struct i40e_pf *pf = vf->pf;
1693 /* first element in the array belongs to primary VF VSI and we shouldn't
1694 * delete it. We should however delete the rest of the VSIs created
1696 for (i = 1; i < vf->num_tc; i++) {
1697 if (vf->ch[i].vsi_idx) {
1698 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1699 vf->ch[i].vsi_idx = 0;
1700 vf->ch[i].vsi_id = 0;
1706 * i40e_vc_get_vf_resources_msg
1707 * @vf: pointer to the VF info
1708 * @msg: pointer to the msg buffer
1709 * @msglen: msg length
1711 * called from the VF to request its resources
1713 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1715 struct virtchnl_vf_resource *vfres = NULL;
1716 struct i40e_pf *pf = vf->pf;
1717 i40e_status aq_ret = 0;
1718 struct i40e_vsi *vsi;
1723 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1724 aq_ret = I40E_ERR_PARAM;
1728 len = (sizeof(struct virtchnl_vf_resource) +
1729 sizeof(struct virtchnl_vsi_resource) * num_vsis);
1731 vfres = kzalloc(len, GFP_KERNEL);
1733 aq_ret = I40E_ERR_NO_MEMORY;
1737 if (VF_IS_V11(&vf->vf_ver))
1738 vf->driver_caps = *(u32 *)msg;
1740 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1741 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1742 VIRTCHNL_VF_OFFLOAD_VLAN;
1744 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1745 vsi = pf->vsi[vf->lan_vsi_idx];
1746 if (!vsi->info.pvid)
1747 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1749 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1750 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1751 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1752 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1754 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1757 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1758 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1760 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1761 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1762 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1764 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1767 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1768 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1769 vfres->vf_cap_flags |=
1770 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1773 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1774 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1776 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1777 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1778 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1780 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1781 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1782 dev_err(&pf->pdev->dev,
1783 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1785 aq_ret = I40E_ERR_PARAM;
1788 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1791 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1792 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1793 vfres->vf_cap_flags |=
1794 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1797 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1798 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1800 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1801 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1803 vfres->num_vsis = num_vsis;
1804 vfres->num_queue_pairs = vf->num_queue_pairs;
1805 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1806 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1807 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1809 if (vf->lan_vsi_idx) {
1810 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1811 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1812 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1813 /* VFs only use TC 0 */
1814 vfres->vsi_res[0].qset_handle
1815 = le16_to_cpu(vsi->info.qs_handle[0]);
1816 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1817 vf->default_lan_addr.addr);
1819 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1822 /* send the response back to the VF */
1823 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1824 aq_ret, (u8 *)vfres, len);
1831 * i40e_vc_reset_vf_msg
1832 * @vf: pointer to the VF info
1833 * @msg: pointer to the msg buffer
1834 * @msglen: msg length
1836 * called from the VF to reset itself,
1837 * unlike other virtchnl messages, PF driver
1838 * doesn't send the response back to the VF
1840 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1842 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1843 i40e_reset_vf(vf, false);
1847 * i40e_getnum_vf_vsi_vlan_filters
1848 * @vsi: pointer to the vsi
1850 * called to get the number of VLANs offloaded on this VF
1852 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1854 struct i40e_mac_filter *f;
1855 int num_vlans = 0, bkt;
1857 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1858 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1866 * i40e_vc_config_promiscuous_mode_msg
1867 * @vf: pointer to the VF info
1868 * @msg: pointer to the msg buffer
1869 * @msglen: msg length
1871 * called from the VF to configure the promiscuous mode of
1874 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1875 u8 *msg, u16 msglen)
1877 struct virtchnl_promisc_info *info =
1878 (struct virtchnl_promisc_info *)msg;
1879 struct i40e_pf *pf = vf->pf;
1880 struct i40e_hw *hw = &pf->hw;
1881 struct i40e_mac_filter *f;
1882 i40e_status aq_ret = 0;
1883 bool allmulti = false;
1884 struct i40e_vsi *vsi;
1885 bool alluni = false;
1889 vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1890 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1891 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1893 aq_ret = I40E_ERR_PARAM;
1896 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1897 dev_err(&pf->pdev->dev,
1898 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1900 /* Lie to the VF on purpose. */
1904 /* Multicast promiscuous handling*/
1905 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1908 if (vf->port_vlan_id) {
1909 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1913 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1914 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1915 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1917 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1922 aq_err = pf->hw.aq.asq_last_status;
1924 dev_err(&pf->pdev->dev,
1925 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1927 i40e_stat_str(&pf->hw, aq_ret),
1928 i40e_aq_str(&pf->hw, aq_err));
1933 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1935 aq_err = pf->hw.aq.asq_last_status;
1937 dev_err(&pf->pdev->dev,
1938 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1940 i40e_stat_str(&pf->hw, aq_ret),
1941 i40e_aq_str(&pf->hw, aq_err));
1947 dev_info(&pf->pdev->dev,
1948 "VF %d successfully set multicast promiscuous mode\n",
1951 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1953 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1956 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1958 if (vf->port_vlan_id) {
1959 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1963 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1964 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1965 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1967 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1972 aq_err = pf->hw.aq.asq_last_status;
1974 dev_err(&pf->pdev->dev,
1975 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1977 i40e_stat_str(&pf->hw, aq_ret),
1978 i40e_aq_str(&pf->hw, aq_err));
1981 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1984 aq_err = pf->hw.aq.asq_last_status;
1986 dev_err(&pf->pdev->dev,
1987 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1988 vf->vf_id, info->flags,
1989 i40e_stat_str(&pf->hw, aq_ret),
1990 i40e_aq_str(&pf->hw, aq_err));
1996 dev_info(&pf->pdev->dev,
1997 "VF %d successfully set unicast promiscuous mode\n",
2000 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2002 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2006 /* send the response to the VF */
2007 return i40e_vc_send_resp_to_vf(vf,
2008 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2013 * i40e_vc_config_queues_msg
2014 * @vf: pointer to the VF info
2015 * @msg: pointer to the msg buffer
2016 * @msglen: msg length
2018 * called from the VF to configure the rx/tx
2021 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2023 struct virtchnl_vsi_queue_config_info *qci =
2024 (struct virtchnl_vsi_queue_config_info *)msg;
2025 struct virtchnl_queue_pair_info *qpi;
2026 struct i40e_pf *pf = vf->pf;
2027 u16 vsi_id, vsi_queue_id = 0;
2028 i40e_status aq_ret = 0;
2029 int i, j = 0, idx = 0;
2031 vsi_id = qci->vsi_id;
2033 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2034 aq_ret = I40E_ERR_PARAM;
2038 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2039 aq_ret = I40E_ERR_PARAM;
2043 for (i = 0; i < qci->num_queue_pairs; i++) {
2044 qpi = &qci->qpair[i];
2046 if (!vf->adq_enabled) {
2047 vsi_queue_id = qpi->txq.queue_id;
2049 if (qpi->txq.vsi_id != qci->vsi_id ||
2050 qpi->rxq.vsi_id != qci->vsi_id ||
2051 qpi->rxq.queue_id != vsi_queue_id) {
2052 aq_ret = I40E_ERR_PARAM;
2057 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
2058 aq_ret = I40E_ERR_PARAM;
2062 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2064 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2066 aq_ret = I40E_ERR_PARAM;
2070 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2071 * VF does not know about these additional VSIs and all
2072 * it cares is about its own queues. PF configures these queues
2073 * to its appropriate VSIs based on TC mapping
2075 if (vf->adq_enabled) {
2076 if (j == (vf->ch[idx].num_qps - 1)) {
2078 j = 0; /* resetting the queue count */
2084 vsi_id = vf->ch[idx].vsi_id;
2087 /* set vsi num_queue_pairs in use to num configured by VF */
2088 if (!vf->adq_enabled) {
2089 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2090 qci->num_queue_pairs;
2092 for (i = 0; i < vf->num_tc; i++)
2093 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2098 /* send the response to the VF */
2099 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2104 * i40e_validate_queue_map
2106 * @queuemap: Tx or Rx queue map
2108 * check if Tx or Rx queue map is valid
2110 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2111 unsigned long queuemap)
2113 u16 vsi_queue_id, queue_id;
2115 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2116 if (vf->adq_enabled) {
2117 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2118 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2120 queue_id = vsi_queue_id;
2123 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2131 * i40e_vc_config_irq_map_msg
2132 * @vf: pointer to the VF info
2133 * @msg: pointer to the msg buffer
2134 * @msglen: msg length
2136 * called from the VF to configure the irq to
2139 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2141 struct virtchnl_irq_map_info *irqmap_info =
2142 (struct virtchnl_irq_map_info *)msg;
2143 struct virtchnl_vector_map *map;
2144 u16 vsi_id, vector_id;
2145 i40e_status aq_ret = 0;
2148 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2149 aq_ret = I40E_ERR_PARAM;
2153 for (i = 0; i < irqmap_info->num_vectors; i++) {
2154 map = &irqmap_info->vecmap[i];
2155 vector_id = map->vector_id;
2156 vsi_id = map->vsi_id;
2157 /* validate msg params */
2158 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
2159 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2160 aq_ret = I40E_ERR_PARAM;
2164 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2165 aq_ret = I40E_ERR_PARAM;
2169 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2170 aq_ret = I40E_ERR_PARAM;
2174 i40e_config_irq_link_list(vf, vsi_id, map);
2177 /* send the response to the VF */
2178 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2183 * i40e_vc_enable_queues_msg
2184 * @vf: pointer to the VF info
2185 * @msg: pointer to the msg buffer
2186 * @msglen: msg length
2188 * called from the VF to enable all or specific queue(s)
2190 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2192 struct virtchnl_queue_select *vqs =
2193 (struct virtchnl_queue_select *)msg;
2194 struct i40e_pf *pf = vf->pf;
2195 u16 vsi_id = vqs->vsi_id;
2196 i40e_status aq_ret = 0;
2199 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2200 aq_ret = I40E_ERR_PARAM;
2204 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2205 aq_ret = I40E_ERR_PARAM;
2209 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2210 aq_ret = I40E_ERR_PARAM;
2214 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
2215 aq_ret = I40E_ERR_TIMEOUT;
2217 /* need to start the rings for additional ADq VSI's as well */
2218 if (vf->adq_enabled) {
2219 /* zero belongs to LAN VSI */
2220 for (i = 1; i < vf->num_tc; i++) {
2221 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2222 aq_ret = I40E_ERR_TIMEOUT;
2227 /* send the response to the VF */
2228 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2233 * i40e_vc_disable_queues_msg
2234 * @vf: pointer to the VF info
2235 * @msg: pointer to the msg buffer
2236 * @msglen: msg length
2238 * called from the VF to disable all or specific
2241 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2243 struct virtchnl_queue_select *vqs =
2244 (struct virtchnl_queue_select *)msg;
2245 struct i40e_pf *pf = vf->pf;
2246 i40e_status aq_ret = 0;
2248 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2249 aq_ret = I40E_ERR_PARAM;
2253 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2254 aq_ret = I40E_ERR_PARAM;
2258 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2259 aq_ret = I40E_ERR_PARAM;
2263 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
2266 /* send the response to the VF */
2267 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2272 * i40e_vc_request_queues_msg
2273 * @vf: pointer to the VF info
2274 * @msg: pointer to the msg buffer
2275 * @msglen: msg length
2277 * VFs get a default number of queues but can use this message to request a
2278 * different number. If the request is successful, PF will reset the VF and
2279 * return 0. If unsuccessful, PF will send message informing VF of number of
2280 * available queues and return result of sending VF a message.
2282 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2284 struct virtchnl_vf_res_request *vfres =
2285 (struct virtchnl_vf_res_request *)msg;
2286 int req_pairs = vfres->num_queue_pairs;
2287 int cur_pairs = vf->num_queue_pairs;
2288 struct i40e_pf *pf = vf->pf;
2290 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2293 if (req_pairs <= 0) {
2294 dev_err(&pf->pdev->dev,
2295 "VF %d tried to request %d queues. Ignoring.\n",
2296 vf->vf_id, req_pairs);
2297 } else if (req_pairs > I40E_MAX_VF_QUEUES) {
2298 dev_err(&pf->pdev->dev,
2299 "VF %d tried to request more than %d queues.\n",
2301 I40E_MAX_VF_QUEUES);
2302 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2303 } else if (req_pairs - cur_pairs > pf->queues_left) {
2304 dev_warn(&pf->pdev->dev,
2305 "VF %d requested %d more queues, but only %d left.\n",
2307 req_pairs - cur_pairs,
2309 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2311 /* successful request */
2312 vf->num_req_queues = req_pairs;
2313 i40e_vc_notify_vf_reset(vf);
2314 i40e_reset_vf(vf, false);
2318 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2319 (u8 *)vfres, sizeof(*vfres));
2323 * i40e_vc_get_stats_msg
2324 * @vf: pointer to the VF info
2325 * @msg: pointer to the msg buffer
2326 * @msglen: msg length
2328 * called from the VF to get vsi stats
2330 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2332 struct virtchnl_queue_select *vqs =
2333 (struct virtchnl_queue_select *)msg;
2334 struct i40e_pf *pf = vf->pf;
2335 struct i40e_eth_stats stats;
2336 i40e_status aq_ret = 0;
2337 struct i40e_vsi *vsi;
2339 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2341 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2342 aq_ret = I40E_ERR_PARAM;
2346 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2347 aq_ret = I40E_ERR_PARAM;
2351 vsi = pf->vsi[vf->lan_vsi_idx];
2353 aq_ret = I40E_ERR_PARAM;
2356 i40e_update_eth_stats(vsi);
2357 stats = vsi->eth_stats;
2360 /* send the response back to the VF */
2361 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2362 (u8 *)&stats, sizeof(stats));
2365 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */
2366 #define I40E_VC_MAX_MAC_ADDR_PER_VF 12
2367 #define I40E_VC_MAX_VLAN_PER_VF 8
2370 * i40e_check_vf_permission
2371 * @vf: pointer to the VF info
2372 * @al: MAC address list from virtchnl
2374 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2375 * if any address in the list is not valid. Checks the following conditions:
2377 * 1) broadcast and zero addresses are never valid
2378 * 2) unicast addresses are not allowed if the VMM has administratively set
2379 * the VF MAC address, unless the VF is marked as privileged.
2380 * 3) There is enough space to add all the addresses.
2382 * Note that to guarantee consistency, it is expected this function be called
2383 * while holding the mac_filter_hash_lock, as otherwise the current number of
2384 * addresses might not be accurate.
2386 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2387 struct virtchnl_ether_addr_list *al)
2389 struct i40e_pf *pf = vf->pf;
2392 /* If this VF is not privileged, then we can't add more than a limited
2393 * number of addresses. Check to make sure that the additions do not
2394 * push us over the limit.
2396 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2397 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2398 dev_err(&pf->pdev->dev,
2399 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2403 for (i = 0; i < al->num_elements; i++) {
2404 u8 *addr = al->list[i].addr;
2406 if (is_broadcast_ether_addr(addr) ||
2407 is_zero_ether_addr(addr)) {
2408 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2410 return I40E_ERR_INVALID_MAC_ADDR;
2413 /* If the host VMM administrator has set the VF MAC address
2414 * administratively via the ndo_set_vf_mac command then deny
2415 * permission to the VF to add or delete unicast MAC addresses.
2416 * Unless the VF is privileged and then it can do whatever.
2417 * The VF may request to set the MAC address filter already
2418 * assigned to it so do not return an error in that case.
2420 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2421 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2422 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2423 dev_err(&pf->pdev->dev,
2424 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2433 * i40e_vc_add_mac_addr_msg
2434 * @vf: pointer to the VF info
2435 * @msg: pointer to the msg buffer
2436 * @msglen: msg length
2438 * add guest mac address filter
2440 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2442 struct virtchnl_ether_addr_list *al =
2443 (struct virtchnl_ether_addr_list *)msg;
2444 struct i40e_pf *pf = vf->pf;
2445 struct i40e_vsi *vsi = NULL;
2446 u16 vsi_id = al->vsi_id;
2447 i40e_status ret = 0;
2450 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2451 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2452 ret = I40E_ERR_PARAM;
2456 vsi = pf->vsi[vf->lan_vsi_idx];
2458 /* Lock once, because all function inside for loop accesses VSI's
2459 * MAC filter list which needs to be protected using same lock.
2461 spin_lock_bh(&vsi->mac_filter_hash_lock);
2463 ret = i40e_check_vf_permission(vf, al);
2465 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2469 /* add new addresses to the list */
2470 for (i = 0; i < al->num_elements; i++) {
2471 struct i40e_mac_filter *f;
2473 f = i40e_find_mac(vsi, al->list[i].addr);
2475 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2478 dev_err(&pf->pdev->dev,
2479 "Unable to add MAC filter %pM for VF %d\n",
2480 al->list[i].addr, vf->vf_id);
2481 ret = I40E_ERR_PARAM;
2482 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2489 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2491 /* program the updated filter list */
2492 ret = i40e_sync_vsi_filters(vsi);
2494 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2498 /* send the response to the VF */
2499 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2504 * i40e_vc_del_mac_addr_msg
2505 * @vf: pointer to the VF info
2506 * @msg: pointer to the msg buffer
2507 * @msglen: msg length
2509 * remove guest mac address filter
2511 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2513 struct virtchnl_ether_addr_list *al =
2514 (struct virtchnl_ether_addr_list *)msg;
2515 struct i40e_pf *pf = vf->pf;
2516 struct i40e_vsi *vsi = NULL;
2517 u16 vsi_id = al->vsi_id;
2518 i40e_status ret = 0;
2521 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2522 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2523 ret = I40E_ERR_PARAM;
2527 for (i = 0; i < al->num_elements; i++) {
2528 if (is_broadcast_ether_addr(al->list[i].addr) ||
2529 is_zero_ether_addr(al->list[i].addr)) {
2530 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2531 al->list[i].addr, vf->vf_id);
2532 ret = I40E_ERR_INVALID_MAC_ADDR;
2536 vsi = pf->vsi[vf->lan_vsi_idx];
2538 spin_lock_bh(&vsi->mac_filter_hash_lock);
2539 /* delete addresses from the list */
2540 for (i = 0; i < al->num_elements; i++)
2541 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2542 ret = I40E_ERR_INVALID_MAC_ADDR;
2543 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2549 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2551 /* program the updated filter list */
2552 ret = i40e_sync_vsi_filters(vsi);
2554 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2558 /* send the response to the VF */
2559 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2564 * i40e_vc_add_vlan_msg
2565 * @vf: pointer to the VF info
2566 * @msg: pointer to the msg buffer
2567 * @msglen: msg length
2569 * program guest vlan id
2571 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2573 struct virtchnl_vlan_filter_list *vfl =
2574 (struct virtchnl_vlan_filter_list *)msg;
2575 struct i40e_pf *pf = vf->pf;
2576 struct i40e_vsi *vsi = NULL;
2577 u16 vsi_id = vfl->vsi_id;
2578 i40e_status aq_ret = 0;
2581 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2582 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2583 dev_err(&pf->pdev->dev,
2584 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2587 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2588 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2589 aq_ret = I40E_ERR_PARAM;
2593 for (i = 0; i < vfl->num_elements; i++) {
2594 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2595 aq_ret = I40E_ERR_PARAM;
2596 dev_err(&pf->pdev->dev,
2597 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2601 vsi = pf->vsi[vf->lan_vsi_idx];
2602 if (vsi->info.pvid) {
2603 aq_ret = I40E_ERR_PARAM;
2607 i40e_vlan_stripping_enable(vsi);
2608 for (i = 0; i < vfl->num_elements; i++) {
2609 /* add new VLAN filter */
2610 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2614 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2615 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2619 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2620 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2626 dev_err(&pf->pdev->dev,
2627 "Unable to add VLAN filter %d for VF %d, error %d\n",
2628 vfl->vlan_id[i], vf->vf_id, ret);
2632 /* send the response to the VF */
2633 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2637 * i40e_vc_remove_vlan_msg
2638 * @vf: pointer to the VF info
2639 * @msg: pointer to the msg buffer
2640 * @msglen: msg length
2642 * remove programmed guest vlan id
2644 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2646 struct virtchnl_vlan_filter_list *vfl =
2647 (struct virtchnl_vlan_filter_list *)msg;
2648 struct i40e_pf *pf = vf->pf;
2649 struct i40e_vsi *vsi = NULL;
2650 u16 vsi_id = vfl->vsi_id;
2651 i40e_status aq_ret = 0;
2654 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2655 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2656 aq_ret = I40E_ERR_PARAM;
2660 for (i = 0; i < vfl->num_elements; i++) {
2661 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2662 aq_ret = I40E_ERR_PARAM;
2667 vsi = pf->vsi[vf->lan_vsi_idx];
2668 if (vsi->info.pvid) {
2669 aq_ret = I40E_ERR_PARAM;
2673 for (i = 0; i < vfl->num_elements; i++) {
2674 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2677 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2678 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2682 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2683 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2690 /* send the response to the VF */
2691 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2696 * @vf: pointer to the VF info
2697 * @msg: pointer to the msg buffer
2698 * @msglen: msg length
2700 * called from the VF for the iwarp msgs
2702 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2704 struct i40e_pf *pf = vf->pf;
2705 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2706 i40e_status aq_ret = 0;
2708 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2709 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2710 aq_ret = I40E_ERR_PARAM;
2714 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2718 /* send the response to the VF */
2719 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2724 * i40e_vc_iwarp_qvmap_msg
2725 * @vf: pointer to the VF info
2726 * @msg: pointer to the msg buffer
2727 * @msglen: msg length
2728 * @config: config qvmap or release it
2730 * called from the VF for the iwarp msgs
2732 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2735 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2736 (struct virtchnl_iwarp_qvlist_info *)msg;
2737 i40e_status aq_ret = 0;
2739 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2740 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2741 aq_ret = I40E_ERR_PARAM;
2746 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2747 aq_ret = I40E_ERR_PARAM;
2749 i40e_release_iwarp_qvlist(vf);
2753 /* send the response to the VF */
2754 return i40e_vc_send_resp_to_vf(vf,
2755 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2756 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2761 * i40e_vc_config_rss_key
2762 * @vf: pointer to the VF info
2763 * @msg: pointer to the msg buffer
2764 * @msglen: msg length
2766 * Configure the VF's RSS key
2768 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2770 struct virtchnl_rss_key *vrk =
2771 (struct virtchnl_rss_key *)msg;
2772 struct i40e_pf *pf = vf->pf;
2773 struct i40e_vsi *vsi = NULL;
2774 u16 vsi_id = vrk->vsi_id;
2775 i40e_status aq_ret = 0;
2777 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2778 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2779 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2780 aq_ret = I40E_ERR_PARAM;
2784 vsi = pf->vsi[vf->lan_vsi_idx];
2785 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2787 /* send the response to the VF */
2788 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2793 * i40e_vc_config_rss_lut
2794 * @vf: pointer to the VF info
2795 * @msg: pointer to the msg buffer
2796 * @msglen: msg length
2798 * Configure the VF's RSS LUT
2800 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2802 struct virtchnl_rss_lut *vrl =
2803 (struct virtchnl_rss_lut *)msg;
2804 struct i40e_pf *pf = vf->pf;
2805 struct i40e_vsi *vsi = NULL;
2806 u16 vsi_id = vrl->vsi_id;
2807 i40e_status aq_ret = 0;
2809 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2810 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2811 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2812 aq_ret = I40E_ERR_PARAM;
2816 vsi = pf->vsi[vf->lan_vsi_idx];
2817 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2818 /* send the response to the VF */
2820 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2825 * i40e_vc_get_rss_hena
2826 * @vf: pointer to the VF info
2827 * @msg: pointer to the msg buffer
2828 * @msglen: msg length
2830 * Return the RSS HENA bits allowed by the hardware
2832 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2834 struct virtchnl_rss_hena *vrh = NULL;
2835 struct i40e_pf *pf = vf->pf;
2836 i40e_status aq_ret = 0;
2839 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2840 aq_ret = I40E_ERR_PARAM;
2843 len = sizeof(struct virtchnl_rss_hena);
2845 vrh = kzalloc(len, GFP_KERNEL);
2847 aq_ret = I40E_ERR_NO_MEMORY;
2851 vrh->hena = i40e_pf_get_default_rss_hena(pf);
2853 /* send the response back to the VF */
2854 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2855 aq_ret, (u8 *)vrh, len);
2861 * i40e_vc_set_rss_hena
2862 * @vf: pointer to the VF info
2863 * @msg: pointer to the msg buffer
2864 * @msglen: msg length
2866 * Set the RSS HENA bits for the VF
2868 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2870 struct virtchnl_rss_hena *vrh =
2871 (struct virtchnl_rss_hena *)msg;
2872 struct i40e_pf *pf = vf->pf;
2873 struct i40e_hw *hw = &pf->hw;
2874 i40e_status aq_ret = 0;
2876 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2877 aq_ret = I40E_ERR_PARAM;
2880 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2881 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2882 (u32)(vrh->hena >> 32));
2884 /* send the response to the VF */
2886 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2890 * i40e_vc_enable_vlan_stripping
2891 * @vf: pointer to the VF info
2892 * @msg: pointer to the msg buffer
2893 * @msglen: msg length
2895 * Enable vlan header stripping for the VF
2897 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2900 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2901 i40e_status aq_ret = 0;
2903 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2904 aq_ret = I40E_ERR_PARAM;
2908 i40e_vlan_stripping_enable(vsi);
2910 /* send the response to the VF */
2912 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2917 * i40e_vc_disable_vlan_stripping
2918 * @vf: pointer to the VF info
2919 * @msg: pointer to the msg buffer
2920 * @msglen: msg length
2922 * Disable vlan header stripping for the VF
2924 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2927 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2928 i40e_status aq_ret = 0;
2930 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2931 aq_ret = I40E_ERR_PARAM;
2935 i40e_vlan_stripping_disable(vsi);
2937 /* send the response to the VF */
2939 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2944 * i40e_validate_cloud_filter
2945 * @mask: mask for TC filter
2946 * @data: data for TC filter
2948 * This function validates cloud filter programmed as TC filter for ADq
2950 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
2951 struct virtchnl_filter *tc_filter)
2953 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
2954 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
2955 struct i40e_pf *pf = vf->pf;
2956 struct i40e_vsi *vsi = NULL;
2957 struct i40e_mac_filter *f;
2958 struct hlist_node *h;
2962 if (!tc_filter->action) {
2963 dev_info(&pf->pdev->dev,
2964 "VF %d: Currently ADq doesn't support Drop Action\n",
2969 /* action_meta is TC number here to which the filter is applied */
2970 if (!tc_filter->action_meta ||
2971 tc_filter->action_meta > I40E_MAX_VF_VSI) {
2972 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
2973 vf->vf_id, tc_filter->action_meta);
2977 /* Check filter if it's programmed for advanced mode or basic mode.
2978 * There are two ADq modes (for VF only),
2979 * 1. Basic mode: intended to allow as many filter options as possible
2980 * to be added to a VF in Non-trusted mode. Main goal is
2981 * to add filters to its own MAC and VLAN id.
2982 * 2. Advanced mode: is for allowing filters to be applied other than
2983 * its own MAC or VLAN. This mode requires the VF to be
2986 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
2987 vsi = pf->vsi[vf->lan_vsi_idx];
2988 f = i40e_find_mac(vsi, data.dst_mac);
2991 dev_info(&pf->pdev->dev,
2992 "Destination MAC %pM doesn't belong to VF %d\n",
2993 data.dst_mac, vf->vf_id);
2998 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3000 if (f->vlan == ntohs(data.vlan_id)) {
3006 dev_info(&pf->pdev->dev,
3007 "VF %d doesn't have any VLAN id %u\n",
3008 vf->vf_id, ntohs(data.vlan_id));
3013 /* Check if VF is trusted */
3014 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3015 dev_err(&pf->pdev->dev,
3016 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3018 return I40E_ERR_CONFIG;
3022 if (mask.dst_mac[0] & data.dst_mac[0]) {
3023 if (is_broadcast_ether_addr(data.dst_mac) ||
3024 is_zero_ether_addr(data.dst_mac)) {
3025 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3026 vf->vf_id, data.dst_mac);
3031 if (mask.src_mac[0] & data.src_mac[0]) {
3032 if (is_broadcast_ether_addr(data.src_mac) ||
3033 is_zero_ether_addr(data.src_mac)) {
3034 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3035 vf->vf_id, data.src_mac);
3040 if (mask.dst_port & data.dst_port) {
3041 if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
3042 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3048 if (mask.src_port & data.src_port) {
3049 if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
3050 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3056 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3057 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3058 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3063 if (mask.vlan_id & data.vlan_id) {
3064 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3065 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3071 return I40E_SUCCESS;
3073 return I40E_ERR_CONFIG;
3077 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3078 * @vf: pointer to the VF info
3079 * @seid - seid of the vsi it is searching for
3081 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3083 struct i40e_pf *pf = vf->pf;
3084 struct i40e_vsi *vsi = NULL;
3087 for (i = 0; i < vf->num_tc ; i++) {
3088 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3089 if (vsi && vsi->seid == seid)
3096 * i40e_del_all_cloud_filters
3097 * @vf: pointer to the VF info
3099 * This function deletes all cloud filters
3101 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3103 struct i40e_cloud_filter *cfilter = NULL;
3104 struct i40e_pf *pf = vf->pf;
3105 struct i40e_vsi *vsi = NULL;
3106 struct hlist_node *node;
3109 hlist_for_each_entry_safe(cfilter, node,
3110 &vf->cloud_filter_list, cloud_node) {
3111 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3114 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3115 vf->vf_id, cfilter->seid);
3119 if (cfilter->dst_port)
3120 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3123 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3125 dev_err(&pf->pdev->dev,
3126 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3127 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3128 i40e_aq_str(&pf->hw,
3129 pf->hw.aq.asq_last_status));
3131 hlist_del(&cfilter->cloud_node);
3133 vf->num_cloud_filters--;
3138 * i40e_vc_del_cloud_filter
3139 * @vf: pointer to the VF info
3140 * @msg: pointer to the msg buffer
3142 * This function deletes a cloud filter programmed as TC filter for ADq
3144 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3146 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3147 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3148 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3149 struct i40e_cloud_filter cfilter, *cf = NULL;
3150 struct i40e_pf *pf = vf->pf;
3151 struct i40e_vsi *vsi = NULL;
3152 struct hlist_node *node;
3153 i40e_status aq_ret = 0;
3156 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3157 aq_ret = I40E_ERR_PARAM;
3161 if (!vf->adq_enabled) {
3162 dev_info(&pf->pdev->dev,
3163 "VF %d: ADq not enabled, can't apply cloud filter\n",
3165 aq_ret = I40E_ERR_PARAM;
3169 if (i40e_validate_cloud_filter(vf, vcf)) {
3170 dev_info(&pf->pdev->dev,
3171 "VF %d: Invalid input, can't apply cloud filter\n",
3173 aq_ret = I40E_ERR_PARAM;
3177 memset(&cfilter, 0, sizeof(cfilter));
3178 /* parse destination mac address */
3179 for (i = 0; i < ETH_ALEN; i++)
3180 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3182 /* parse source mac address */
3183 for (i = 0; i < ETH_ALEN; i++)
3184 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3186 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3187 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3188 cfilter.src_port = mask.src_port & tcf.src_port;
3190 switch (vcf->flow_type) {
3191 case VIRTCHNL_TCP_V4_FLOW:
3192 cfilter.n_proto = ETH_P_IP;
3193 if (mask.dst_ip[0] & tcf.dst_ip[0])
3194 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3195 ARRAY_SIZE(tcf.dst_ip));
3196 else if (mask.src_ip[0] & tcf.dst_ip[0])
3197 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3198 ARRAY_SIZE(tcf.dst_ip));
3200 case VIRTCHNL_TCP_V6_FLOW:
3201 cfilter.n_proto = ETH_P_IPV6;
3202 if (mask.dst_ip[3] & tcf.dst_ip[3])
3203 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3204 sizeof(cfilter.ip.v6.dst_ip6));
3205 if (mask.src_ip[3] & tcf.src_ip[3])
3206 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3207 sizeof(cfilter.ip.v6.src_ip6));
3210 /* TC filter can be configured based on different combinations
3211 * and in this case IP is not a part of filter config
3213 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3217 /* get the vsi to which the tc belongs to */
3218 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3219 cfilter.seid = vsi->seid;
3220 cfilter.flags = vcf->field_flags;
3222 /* Deleting TC filter */
3224 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3226 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3228 dev_err(&pf->pdev->dev,
3229 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3230 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3231 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3235 hlist_for_each_entry_safe(cf, node,
3236 &vf->cloud_filter_list, cloud_node) {
3237 if (cf->seid != cfilter.seid)
3240 if (cfilter.dst_port != cf->dst_port)
3242 if (mask.dst_mac[0])
3243 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3245 /* for ipv4 data to be valid, only first byte of mask is set */
3246 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3247 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3248 ARRAY_SIZE(tcf.dst_ip)))
3250 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3251 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3252 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3253 sizeof(cfilter.ip.v6.src_ip6)))
3256 if (cfilter.vlan_id != cf->vlan_id)
3259 hlist_del(&cf->cloud_node);
3261 vf->num_cloud_filters--;
3265 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3270 * i40e_vc_add_cloud_filter
3271 * @vf: pointer to the VF info
3272 * @msg: pointer to the msg buffer
3274 * This function adds a cloud filter programmed as TC filter for ADq
3276 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3278 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3279 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3280 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3281 struct i40e_cloud_filter *cfilter = NULL;
3282 struct i40e_pf *pf = vf->pf;
3283 struct i40e_vsi *vsi = NULL;
3284 i40e_status aq_ret = 0;
3287 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3288 aq_ret = I40E_ERR_PARAM;
3292 if (!vf->adq_enabled) {
3293 dev_info(&pf->pdev->dev,
3294 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3296 aq_ret = I40E_ERR_PARAM;
3300 if (i40e_validate_cloud_filter(vf, vcf)) {
3301 dev_info(&pf->pdev->dev,
3302 "VF %d: Invalid input/s, can't apply cloud filter\n",
3304 aq_ret = I40E_ERR_PARAM;
3308 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3312 /* parse destination mac address */
3313 for (i = 0; i < ETH_ALEN; i++)
3314 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3316 /* parse source mac address */
3317 for (i = 0; i < ETH_ALEN; i++)
3318 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3320 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3321 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3322 cfilter->src_port = mask.src_port & tcf.src_port;
3324 switch (vcf->flow_type) {
3325 case VIRTCHNL_TCP_V4_FLOW:
3326 cfilter->n_proto = ETH_P_IP;
3327 if (mask.dst_ip[0] & tcf.dst_ip[0])
3328 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3329 ARRAY_SIZE(tcf.dst_ip));
3330 else if (mask.src_ip[0] & tcf.dst_ip[0])
3331 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3332 ARRAY_SIZE(tcf.dst_ip));
3334 case VIRTCHNL_TCP_V6_FLOW:
3335 cfilter->n_proto = ETH_P_IPV6;
3336 if (mask.dst_ip[3] & tcf.dst_ip[3])
3337 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3338 sizeof(cfilter->ip.v6.dst_ip6));
3339 if (mask.src_ip[3] & tcf.src_ip[3])
3340 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3341 sizeof(cfilter->ip.v6.src_ip6));
3344 /* TC filter can be configured based on different combinations
3345 * and in this case IP is not a part of filter config
3347 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3351 /* get the VSI to which the TC belongs to */
3352 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3353 cfilter->seid = vsi->seid;
3354 cfilter->flags = vcf->field_flags;
3356 /* Adding cloud filter programmed as TC filter */
3358 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3360 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3362 dev_err(&pf->pdev->dev,
3363 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3364 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3365 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3369 INIT_HLIST_NODE(&cfilter->cloud_node);
3370 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3371 vf->num_cloud_filters++;
3373 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3378 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3379 * @vf: pointer to the VF info
3380 * @msg: pointer to the msg buffer
3382 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3384 struct virtchnl_tc_info *tci =
3385 (struct virtchnl_tc_info *)msg;
3386 struct i40e_pf *pf = vf->pf;
3387 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3388 int i, adq_request_qps = 0, speed = 0;
3389 i40e_status aq_ret = 0;
3391 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3392 aq_ret = I40E_ERR_PARAM;
3396 /* ADq cannot be applied if spoof check is ON */
3398 dev_err(&pf->pdev->dev,
3399 "Spoof check is ON, turn it OFF to enable ADq\n");
3400 aq_ret = I40E_ERR_PARAM;
3404 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3405 dev_err(&pf->pdev->dev,
3406 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3408 aq_ret = I40E_ERR_PARAM;
3412 /* max number of traffic classes for VF currently capped at 4 */
3413 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3414 dev_err(&pf->pdev->dev,
3415 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
3416 vf->vf_id, tci->num_tc);
3417 aq_ret = I40E_ERR_PARAM;
3421 /* validate queues for each TC */
3422 for (i = 0; i < tci->num_tc; i++)
3423 if (!tci->list[i].count ||
3424 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3425 dev_err(&pf->pdev->dev,
3426 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
3427 vf->vf_id, i, tci->list[i].count);
3428 aq_ret = I40E_ERR_PARAM;
3432 /* need Max VF queues but already have default number of queues */
3433 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3435 if (pf->queues_left < adq_request_qps) {
3436 dev_err(&pf->pdev->dev,
3437 "No queues left to allocate to VF %d\n",
3439 aq_ret = I40E_ERR_PARAM;
3442 /* we need to allocate max VF queues to enable ADq so as to
3443 * make sure ADq enabled VF always gets back queues when it
3444 * goes through a reset.
3446 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3449 /* get link speed in MB to validate rate limit */
3450 switch (ls->link_speed) {
3451 case VIRTCHNL_LINK_SPEED_100MB:
3454 case VIRTCHNL_LINK_SPEED_1GB:
3457 case VIRTCHNL_LINK_SPEED_10GB:
3458 speed = SPEED_10000;
3460 case VIRTCHNL_LINK_SPEED_20GB:
3461 speed = SPEED_20000;
3463 case VIRTCHNL_LINK_SPEED_25GB:
3464 speed = SPEED_25000;
3466 case VIRTCHNL_LINK_SPEED_40GB:
3467 speed = SPEED_40000;
3470 dev_err(&pf->pdev->dev,
3471 "Cannot detect link speed\n");
3472 aq_ret = I40E_ERR_PARAM;
3476 /* parse data from the queue channel info */
3477 vf->num_tc = tci->num_tc;
3478 for (i = 0; i < vf->num_tc; i++) {
3479 if (tci->list[i].max_tx_rate) {
3480 if (tci->list[i].max_tx_rate > speed) {
3481 dev_err(&pf->pdev->dev,
3482 "Invalid max tx rate %llu specified for VF %d.",
3483 tci->list[i].max_tx_rate,
3485 aq_ret = I40E_ERR_PARAM;
3488 vf->ch[i].max_tx_rate =
3489 tci->list[i].max_tx_rate;
3492 vf->ch[i].num_qps = tci->list[i].count;
3495 /* set this flag only after making sure all inputs are sane */
3496 vf->adq_enabled = true;
3497 /* num_req_queues is set when user changes number of queues via ethtool
3498 * and this causes issue for default VSI(which depends on this variable)
3499 * when ADq is enabled, hence reset it.
3501 vf->num_req_queues = 0;
3503 /* reset the VF in order to allocate resources */
3504 i40e_vc_notify_vf_reset(vf);
3505 i40e_reset_vf(vf, false);
3507 return I40E_SUCCESS;
3509 /* send the response to the VF */
3511 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3516 * i40e_vc_del_qch_msg
3517 * @vf: pointer to the VF info
3518 * @msg: pointer to the msg buffer
3520 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3522 struct i40e_pf *pf = vf->pf;
3523 i40e_status aq_ret = 0;
3525 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3526 aq_ret = I40E_ERR_PARAM;
3530 if (vf->adq_enabled) {
3531 i40e_del_all_cloud_filters(vf);
3533 vf->adq_enabled = false;
3535 dev_info(&pf->pdev->dev,
3536 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3539 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3541 aq_ret = I40E_ERR_PARAM;
3544 /* reset the VF in order to allocate resources */
3545 i40e_vc_notify_vf_reset(vf);
3546 i40e_reset_vf(vf, false);
3548 return I40E_SUCCESS;
3551 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3556 * i40e_vc_process_vf_msg
3557 * @pf: pointer to the PF structure
3558 * @vf_id: source VF id
3559 * @msg: pointer to the msg buffer
3560 * @msglen: msg length
3561 * @msghndl: msg handle
3563 * called from the common aeq/arq handler to
3564 * process request from VF
3566 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3567 u32 v_retval, u8 *msg, u16 msglen)
3569 struct i40e_hw *hw = &pf->hw;
3570 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3574 pf->vf_aq_requests++;
3575 if (local_vf_id >= pf->num_alloc_vfs)
3577 vf = &(pf->vf[local_vf_id]);
3579 /* Check if VF is disabled. */
3580 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3581 return I40E_ERR_PARAM;
3583 /* perform basic checks on the msg */
3584 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3586 /* perform additional checks specific to this driver */
3587 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
3588 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
3590 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
3592 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
3593 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
3595 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
3600 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3601 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3602 local_vf_id, v_opcode, msglen);
3604 case VIRTCHNL_ERR_PARAM:
3612 case VIRTCHNL_OP_VERSION:
3613 ret = i40e_vc_get_version_msg(vf, msg);
3615 case VIRTCHNL_OP_GET_VF_RESOURCES:
3616 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3617 i40e_vc_notify_vf_link_state(vf);
3619 case VIRTCHNL_OP_RESET_VF:
3620 i40e_vc_reset_vf_msg(vf);
3623 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3624 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
3626 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3627 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
3629 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3630 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
3632 case VIRTCHNL_OP_ENABLE_QUEUES:
3633 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
3634 i40e_vc_notify_vf_link_state(vf);
3636 case VIRTCHNL_OP_DISABLE_QUEUES:
3637 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
3639 case VIRTCHNL_OP_ADD_ETH_ADDR:
3640 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
3642 case VIRTCHNL_OP_DEL_ETH_ADDR:
3643 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
3645 case VIRTCHNL_OP_ADD_VLAN:
3646 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
3648 case VIRTCHNL_OP_DEL_VLAN:
3649 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
3651 case VIRTCHNL_OP_GET_STATS:
3652 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
3654 case VIRTCHNL_OP_IWARP:
3655 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3657 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3658 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
3660 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3661 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
3663 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3664 ret = i40e_vc_config_rss_key(vf, msg, msglen);
3666 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3667 ret = i40e_vc_config_rss_lut(vf, msg, msglen);
3669 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3670 ret = i40e_vc_get_rss_hena(vf, msg, msglen);
3672 case VIRTCHNL_OP_SET_RSS_HENA:
3673 ret = i40e_vc_set_rss_hena(vf, msg, msglen);
3675 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3676 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
3678 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3679 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
3681 case VIRTCHNL_OP_REQUEST_QUEUES:
3682 ret = i40e_vc_request_queues_msg(vf, msg, msglen);
3684 case VIRTCHNL_OP_ENABLE_CHANNELS:
3685 ret = i40e_vc_add_qch_msg(vf, msg);
3687 case VIRTCHNL_OP_DISABLE_CHANNELS:
3688 ret = i40e_vc_del_qch_msg(vf, msg);
3690 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3691 ret = i40e_vc_add_cloud_filter(vf, msg);
3693 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3694 ret = i40e_vc_del_cloud_filter(vf, msg);
3696 case VIRTCHNL_OP_UNKNOWN:
3698 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3699 v_opcode, local_vf_id);
3700 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3701 I40E_ERR_NOT_IMPLEMENTED);
3709 * i40e_vc_process_vflr_event
3710 * @pf: pointer to the PF structure
3712 * called from the vlfr irq handler to
3713 * free up VF resources and state variables
3715 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3717 struct i40e_hw *hw = &pf->hw;
3718 u32 reg, reg_idx, bit_idx;
3722 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3725 /* Re-enable the VFLR interrupt cause here, before looking for which
3726 * VF got reset. Otherwise, if another VF gets a reset while the
3727 * first one is being processed, that interrupt will be lost, and
3728 * that VF will be stuck in reset forever.
3730 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3731 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3732 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3735 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3736 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3737 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3738 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3739 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
3740 vf = &pf->vf[vf_id];
3741 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3742 if (reg & BIT(bit_idx))
3743 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3744 i40e_reset_vf(vf, true);
3751 * i40e_ndo_set_vf_mac
3752 * @netdev: network interface device structure
3753 * @vf_id: VF identifier
3756 * program VF mac address
3758 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3760 struct i40e_netdev_priv *np = netdev_priv(netdev);
3761 struct i40e_vsi *vsi = np->vsi;
3762 struct i40e_pf *pf = vsi->back;
3763 struct i40e_mac_filter *f;
3766 struct hlist_node *h;
3770 /* validate the request */
3771 if (vf_id >= pf->num_alloc_vfs) {
3772 dev_err(&pf->pdev->dev,
3773 "Invalid VF Identifier %d\n", vf_id);
3778 vf = &(pf->vf[vf_id]);
3779 vsi = pf->vsi[vf->lan_vsi_idx];
3781 /* When the VF is resetting wait until it is done.
3782 * It can take up to 200 milliseconds,
3783 * but wait for up to 300 milliseconds to be safe.
3785 for (i = 0; i < 15; i++) {
3786 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3790 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3791 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3797 if (is_multicast_ether_addr(mac)) {
3798 dev_err(&pf->pdev->dev,
3799 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3804 /* Lock once because below invoked function add/del_filter requires
3805 * mac_filter_hash_lock to be held
3807 spin_lock_bh(&vsi->mac_filter_hash_lock);
3809 /* delete the temporary mac address */
3810 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
3811 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
3813 /* Delete all the filters for this VSI - we're going to kill it
3816 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
3817 __i40e_del_filter(vsi, f);
3819 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3821 /* program mac filter */
3822 if (i40e_sync_vsi_filters(vsi)) {
3823 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
3827 ether_addr_copy(vf->default_lan_addr.addr, mac);
3829 if (is_zero_ether_addr(mac)) {
3830 vf->pf_set_mac = false;
3831 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
3833 vf->pf_set_mac = true;
3834 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
3838 /* Force the VF driver stop so it has to reload with new MAC address */
3839 i40e_vc_disable_vf(vf);
3840 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
3847 * i40e_vsi_has_vlans - True if VSI has configured VLANs
3848 * @vsi: pointer to the vsi
3850 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
3851 * we have no configured VLANs. Do not call while holding the
3852 * mac_filter_hash_lock.
3854 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
3858 /* If we have a port VLAN, then the VSI cannot have any VLANs
3859 * configured, as all MAC/VLAN filters will be assigned to the PVID.
3864 /* Since we don't have a PVID, we know that if the device is in VLAN
3865 * mode it must be because of a VLAN filter configured on this VSI.
3867 spin_lock_bh(&vsi->mac_filter_hash_lock);
3868 have_vlans = i40e_is_vsi_in_vlan(vsi);
3869 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3875 * i40e_ndo_set_vf_port_vlan
3876 * @netdev: network interface device structure
3877 * @vf_id: VF identifier
3878 * @vlan_id: mac address
3879 * @qos: priority setting
3880 * @vlan_proto: vlan protocol
3882 * program VF vlan id and/or qos
3884 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3885 u16 vlan_id, u8 qos, __be16 vlan_proto)
3887 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3888 struct i40e_netdev_priv *np = netdev_priv(netdev);
3889 struct i40e_pf *pf = np->vsi->back;
3890 struct i40e_vsi *vsi;
3894 /* validate the request */
3895 if (vf_id >= pf->num_alloc_vfs) {
3896 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3901 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3902 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3907 if (vlan_proto != htons(ETH_P_8021Q)) {
3908 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3909 ret = -EPROTONOSUPPORT;
3913 vf = &(pf->vf[vf_id]);
3914 vsi = pf->vsi[vf->lan_vsi_idx];
3915 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3916 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3922 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
3923 /* duplicate request, so just return success */
3926 if (i40e_vsi_has_vlans(vsi)) {
3927 dev_err(&pf->pdev->dev,
3928 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
3930 /* Administrator Error - knock the VF offline until he does
3931 * the right thing by reconfiguring his network correctly
3932 * and then reloading the VF driver.
3934 i40e_vc_disable_vf(vf);
3935 /* During reset the VF got a new VSI, so refresh the pointer. */
3936 vsi = pf->vsi[vf->lan_vsi_idx];
3939 /* Locked once because multiple functions below iterate list */
3940 spin_lock_bh(&vsi->mac_filter_hash_lock);
3942 /* Check for condition where there was already a port VLAN ID
3943 * filter set and now it is being deleted by setting it to zero.
3944 * Additionally check for the condition where there was a port
3945 * VLAN but now there is a new and different port VLAN being set.
3946 * Before deleting all the old VLAN filters we must add new ones
3947 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
3948 * MAC addresses deleted.
3950 if ((!(vlan_id || qos) ||
3951 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
3953 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
3955 dev_info(&vsi->back->pdev->dev,
3956 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3957 vsi->back->hw.aq.asq_last_status);
3958 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3963 if (vsi->info.pvid) {
3964 /* remove all filters on the old VLAN */
3965 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
3969 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3971 ret = i40e_vsi_add_pvid(vsi, vlanprio);
3973 i40e_vsi_remove_pvid(vsi);
3974 spin_lock_bh(&vsi->mac_filter_hash_lock);
3977 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
3978 vlan_id, qos, vf_id);
3980 /* add new VLAN filter for each MAC */
3981 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
3983 dev_info(&vsi->back->pdev->dev,
3984 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3985 vsi->back->hw.aq.asq_last_status);
3986 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3990 /* remove the previously added non-VLAN MAC filters */
3991 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
3994 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3996 /* Schedule the worker thread to take care of applying changes */
3997 i40e_service_event_schedule(vsi->back);
4000 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4004 /* The Port VLAN needs to be saved across resets the same as the
4005 * default LAN MAC address.
4007 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4015 * i40e_ndo_set_vf_bw
4016 * @netdev: network interface device structure
4017 * @vf_id: VF identifier
4020 * configure VF Tx rate
4022 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4025 struct i40e_netdev_priv *np = netdev_priv(netdev);
4026 struct i40e_pf *pf = np->vsi->back;
4027 struct i40e_vsi *vsi;
4031 /* validate the request */
4032 if (vf_id >= pf->num_alloc_vfs) {
4033 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
4039 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4040 min_tx_rate, vf_id);
4044 vf = &(pf->vf[vf_id]);
4045 vsi = pf->vsi[vf->lan_vsi_idx];
4046 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4047 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4053 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4057 vf->tx_rate = max_tx_rate;
4063 * i40e_ndo_get_vf_config
4064 * @netdev: network interface device structure
4065 * @vf_id: VF identifier
4066 * @ivi: VF configuration structure
4068 * return VF configuration
4070 int i40e_ndo_get_vf_config(struct net_device *netdev,
4071 int vf_id, struct ifla_vf_info *ivi)
4073 struct i40e_netdev_priv *np = netdev_priv(netdev);
4074 struct i40e_vsi *vsi = np->vsi;
4075 struct i40e_pf *pf = vsi->back;
4079 /* validate the request */
4080 if (vf_id >= pf->num_alloc_vfs) {
4081 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4086 vf = &(pf->vf[vf_id]);
4087 /* first vsi is always the LAN vsi */
4088 vsi = pf->vsi[vf->lan_vsi_idx];
4089 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4090 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4098 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4100 ivi->max_tx_rate = vf->tx_rate;
4101 ivi->min_tx_rate = 0;
4102 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4103 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4104 I40E_VLAN_PRIORITY_SHIFT;
4105 if (vf->link_forced == false)
4106 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4107 else if (vf->link_up == true)
4108 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4110 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4111 ivi->spoofchk = vf->spoofchk;
4112 ivi->trusted = vf->trusted;
4120 * i40e_ndo_set_vf_link_state
4121 * @netdev: network interface device structure
4122 * @vf_id: VF identifier
4123 * @link: required link state
4125 * Set the link state of a specified VF, regardless of physical link state
4127 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4129 struct i40e_netdev_priv *np = netdev_priv(netdev);
4130 struct i40e_pf *pf = np->vsi->back;
4131 struct virtchnl_pf_event pfe;
4132 struct i40e_hw *hw = &pf->hw;
4137 /* validate the request */
4138 if (vf_id >= pf->num_alloc_vfs) {
4139 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4144 vf = &pf->vf[vf_id];
4145 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4147 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4148 pfe.severity = PF_EVENT_SEVERITY_INFO;
4151 case IFLA_VF_LINK_STATE_AUTO:
4152 vf->link_forced = false;
4153 pfe.event_data.link_event.link_status =
4154 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4155 pfe.event_data.link_event.link_speed =
4156 (enum virtchnl_link_speed)
4157 pf->hw.phy.link_info.link_speed;
4159 case IFLA_VF_LINK_STATE_ENABLE:
4160 vf->link_forced = true;
4162 pfe.event_data.link_event.link_status = true;
4163 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
4165 case IFLA_VF_LINK_STATE_DISABLE:
4166 vf->link_forced = true;
4167 vf->link_up = false;
4168 pfe.event_data.link_event.link_status = false;
4169 pfe.event_data.link_event.link_speed = 0;
4175 /* Notify the VF of its new link state */
4176 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4177 0, (u8 *)&pfe, sizeof(pfe), NULL);
4184 * i40e_ndo_set_vf_spoofchk
4185 * @netdev: network interface device structure
4186 * @vf_id: VF identifier
4187 * @enable: flag to enable or disable feature
4189 * Enable or disable VF spoof checking
4191 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4193 struct i40e_netdev_priv *np = netdev_priv(netdev);
4194 struct i40e_vsi *vsi = np->vsi;
4195 struct i40e_pf *pf = vsi->back;
4196 struct i40e_vsi_context ctxt;
4197 struct i40e_hw *hw = &pf->hw;
4201 /* validate the request */
4202 if (vf_id >= pf->num_alloc_vfs) {
4203 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4208 vf = &(pf->vf[vf_id]);
4209 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4210 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4216 if (enable == vf->spoofchk)
4219 vf->spoofchk = enable;
4220 memset(&ctxt, 0, sizeof(ctxt));
4221 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4222 ctxt.pf_num = pf->hw.pf_id;
4223 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4225 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4226 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4227 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4229 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4238 * i40e_ndo_set_vf_trust
4239 * @netdev: network interface device structure of the pf
4240 * @vf_id: VF identifier
4241 * @setting: trust setting
4243 * Enable or disable VF trust setting
4245 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4247 struct i40e_netdev_priv *np = netdev_priv(netdev);
4248 struct i40e_pf *pf = np->vsi->back;
4252 /* validate the request */
4253 if (vf_id >= pf->num_alloc_vfs) {
4254 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4258 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4259 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4263 vf = &pf->vf[vf_id];
4265 if (setting == vf->trusted)
4268 vf->trusted = setting;
4269 i40e_vc_disable_vf(vf);
4270 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4271 vf_id, setting ? "" : "un");
4273 if (vf->adq_enabled) {
4275 dev_info(&pf->pdev->dev,
4276 "VF %u no longer Trusted, deleting all cloud filters\n",
4278 i40e_del_all_cloud_filters(vf);