1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43 * i40e_vc_notify_vf_link_state
44 * @vf: pointer to the VF structure
46 * send a link status message to a single VF
48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
59 /* Always report link is down if the VF queues aren't enabled */
60 if (!vf->queues_enabled) {
61 pfe.event_data.link_event.link_status = false;
62 pfe.event_data.link_event.link_speed = 0;
63 } else if (vf->link_forced) {
64 pfe.event_data.link_event.link_status = vf->link_up;
65 pfe.event_data.link_event.link_speed =
66 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
68 pfe.event_data.link_event.link_status =
69 ls->link_info & I40E_AQ_LINK_UP;
70 pfe.event_data.link_event.link_speed =
71 i40e_virtchnl_link_speed(ls->link_speed);
74 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
75 0, (u8 *)&pfe, sizeof(pfe), NULL);
79 * i40e_vc_notify_link_state
80 * @pf: pointer to the PF structure
82 * send a link status message to all VFs on a given PF
84 void i40e_vc_notify_link_state(struct i40e_pf *pf)
88 for (i = 0; i < pf->num_alloc_vfs; i++)
89 i40e_vc_notify_vf_link_state(&pf->vf[i]);
93 * i40e_vc_notify_reset
94 * @pf: pointer to the PF structure
96 * indicate a pending reset to all VFs on a given PF
98 void i40e_vc_notify_reset(struct i40e_pf *pf)
100 struct virtchnl_pf_event pfe;
102 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
103 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
104 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
105 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
109 * i40e_vc_notify_vf_reset
110 * @vf: pointer to the VF structure
112 * indicate a pending reset to the given VF
114 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
116 struct virtchnl_pf_event pfe;
119 /* validate the request */
120 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
123 /* verify if the VF is in either init or active before proceeding */
124 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
125 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
128 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
130 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
131 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
132 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
134 sizeof(struct virtchnl_pf_event), NULL);
136 /***********************misc routines*****************************/
140 * @vf: pointer to the VF info
142 * Disable the VF through a SW reset.
144 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
148 i40e_vc_notify_vf_reset(vf);
150 /* We want to ensure that an actual reset occurs initiated after this
151 * function was called. However, we do not want to wait forever, so
152 * we'll give a reasonable time and print a message if we failed to
155 for (i = 0; i < 20; i++) {
156 if (i40e_reset_vf(vf, false))
158 usleep_range(10000, 20000);
161 dev_warn(&vf->pf->pdev->dev,
162 "Failed to initiate reset for VF %d after 200 milliseconds\n",
167 * i40e_vc_isvalid_vsi_id
168 * @vf: pointer to the VF info
169 * @vsi_id: VF relative VSI id
171 * check for the valid VSI id
173 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
175 struct i40e_pf *pf = vf->pf;
176 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
178 return (vsi && (vsi->vf_id == vf->vf_id));
182 * i40e_vc_isvalid_queue_id
183 * @vf: pointer to the VF info
185 * @qid: vsi relative queue id
187 * check for the valid queue id
189 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
192 struct i40e_pf *pf = vf->pf;
193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
195 return (vsi && (qid < vsi->alloc_queue_pairs));
199 * i40e_vc_isvalid_vector_id
200 * @vf: pointer to the VF info
201 * @vector_id: VF relative vector id
203 * check for the valid vector id
205 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
207 struct i40e_pf *pf = vf->pf;
209 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
212 /***********************vf resource mgmt routines*****************/
215 * i40e_vc_get_pf_queue_id
216 * @vf: pointer to the VF info
217 * @vsi_id: id of VSI as provided by the FW
218 * @vsi_queue_id: vsi relative queue id
220 * return PF relative queue id
222 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
225 struct i40e_pf *pf = vf->pf;
226 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
227 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
232 if (le16_to_cpu(vsi->info.mapping_flags) &
233 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
235 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
237 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
244 * i40e_get_real_pf_qid
245 * @vf: pointer to the VF info
247 * @queue_id: queue number
249 * wrapper function to get pf_queue_id handling ADq code as well
251 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
255 if (vf->adq_enabled) {
256 /* Although VF considers all the queues(can be 1 to 16) as its
257 * own but they may actually belong to different VSIs(up to 4).
258 * We need to find which queues belongs to which VSI.
260 for (i = 0; i < vf->num_tc; i++) {
261 if (queue_id < vf->ch[i].num_qps) {
262 vsi_id = vf->ch[i].vsi_id;
265 /* find right queue id which is relative to a
268 queue_id -= vf->ch[i].num_qps;
272 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
276 * i40e_config_irq_link_list
277 * @vf: pointer to the VF info
278 * @vsi_id: id of VSI as given by the FW
279 * @vecmap: irq map info
281 * configure irq link list from the map
283 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
284 struct virtchnl_vector_map *vecmap)
286 unsigned long linklistmap = 0, tempmap;
287 struct i40e_pf *pf = vf->pf;
288 struct i40e_hw *hw = &pf->hw;
289 u16 vsi_queue_id, pf_queue_id;
290 enum i40e_queue_type qtype;
291 u16 next_q, vector_id, size;
295 vector_id = vecmap->vector_id;
298 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
300 reg_idx = I40E_VPINT_LNKLSTN(
301 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
304 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
305 /* Special case - No queues mapped on this vector */
306 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
309 tempmap = vecmap->rxq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
315 tempmap = vecmap->txq_map;
316 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
317 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
321 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 next_q = find_first_bit(&linklistmap, size);
323 if (unlikely(next_q == size))
326 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
327 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
328 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
329 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
331 wr32(hw, reg_idx, reg);
333 while (next_q < size) {
335 case I40E_QUEUE_TYPE_RX:
336 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
337 itr_idx = vecmap->rxitr_idx;
339 case I40E_QUEUE_TYPE_TX:
340 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
341 itr_idx = vecmap->txitr_idx;
347 next_q = find_next_bit(&linklistmap, size, next_q + 1);
349 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
350 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
351 pf_queue_id = i40e_get_real_pf_qid(vf,
355 pf_queue_id = I40E_QUEUE_END_OF_LIST;
359 /* format for the RQCTL & TQCTL regs is same */
361 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
362 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
363 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
364 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
365 wr32(hw, reg_idx, reg);
368 /* if the vf is running in polling mode and using interrupt zero,
369 * need to disable auto-mask on enabling zero interrupt for VFs.
371 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
373 reg = rd32(hw, I40E_GLINT_CTL);
374 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
375 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
376 wr32(hw, I40E_GLINT_CTL, reg);
385 * i40e_release_iwarp_qvlist
386 * @vf: pointer to the VF.
389 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
391 struct i40e_pf *pf = vf->pf;
392 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
396 if (!vf->qvlist_info)
399 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
400 for (i = 0; i < qvlist_info->num_vectors; i++) {
401 struct virtchnl_iwarp_qv_info *qv_info;
402 u32 next_q_index, next_q_type;
403 struct i40e_hw *hw = &pf->hw;
404 u32 v_idx, reg_idx, reg;
406 qv_info = &qvlist_info->qv_info[i];
409 v_idx = qv_info->v_idx;
410 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
411 /* Figure out the queue after CEQ and make that the
414 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
415 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
416 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
417 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
418 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
419 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
421 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
422 reg = (next_q_index &
423 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
425 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
427 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
430 kfree(vf->qvlist_info);
431 vf->qvlist_info = NULL;
435 * i40e_config_iwarp_qvlist
436 * @vf: pointer to the VF info
437 * @qvlist_info: queue and vector list
439 * Return 0 on success or < 0 on error
441 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
442 struct virtchnl_iwarp_qvlist_info *qvlist_info)
444 struct i40e_pf *pf = vf->pf;
445 struct i40e_hw *hw = &pf->hw;
446 struct virtchnl_iwarp_qv_info *qv_info;
447 u32 v_idx, i, reg_idx, reg;
448 u32 next_q_idx, next_q_type;
452 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
454 if (qvlist_info->num_vectors > msix_vf) {
455 dev_warn(&pf->pdev->dev,
456 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
457 qvlist_info->num_vectors,
463 kfree(vf->qvlist_info);
464 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
465 qvlist_info->num_vectors - 1),
467 if (!vf->qvlist_info) {
471 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
473 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
474 for (i = 0; i < qvlist_info->num_vectors; i++) {
475 qv_info = &qvlist_info->qv_info[i];
479 /* Validate vector id belongs to this vf */
480 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
485 v_idx = qv_info->v_idx;
487 vf->qvlist_info->qv_info[i] = *qv_info;
489 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
490 /* We might be sharing the interrupt, so get the first queue
491 * index and type, push it down the list by adding the new
492 * queue on top. Also link it with the new queue in CEQCTL.
494 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
495 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
496 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
497 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
498 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
500 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
501 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
503 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
504 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
505 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
506 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
507 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
509 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
510 reg = (qv_info->ceq_idx &
511 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
512 (I40E_QUEUE_TYPE_PE_CEQ <<
513 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
514 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
517 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
518 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
519 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
520 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
522 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
528 kfree(vf->qvlist_info);
529 vf->qvlist_info = NULL;
535 * i40e_config_vsi_tx_queue
536 * @vf: pointer to the VF info
537 * @vsi_id: id of VSI as provided by the FW
538 * @vsi_queue_id: vsi relative queue index
539 * @info: config. info
543 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
545 struct virtchnl_txq_info *info)
547 struct i40e_pf *pf = vf->pf;
548 struct i40e_hw *hw = &pf->hw;
549 struct i40e_hmc_obj_txq tx_ctx;
550 struct i40e_vsi *vsi;
555 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
559 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
560 vsi = i40e_find_vsi_from_id(pf, vsi_id);
566 /* clear the context structure first */
567 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
569 /* only set the required fields */
570 tx_ctx.base = info->dma_ring_addr / 128;
571 tx_ctx.qlen = info->ring_len;
572 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
573 tx_ctx.rdylist_act = 0;
574 tx_ctx.head_wb_ena = info->headwb_enabled;
575 tx_ctx.head_wb_addr = info->dma_headwb_addr;
577 /* clear the context in the HMC */
578 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
580 dev_err(&pf->pdev->dev,
581 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
587 /* set the context in the HMC */
588 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
590 dev_err(&pf->pdev->dev,
591 "Failed to set VF LAN Tx queue context %d error: %d\n",
597 /* associate this queue with the PCI VF function */
598 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
599 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
600 & I40E_QTX_CTL_PF_INDX_MASK);
601 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
602 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
603 & I40E_QTX_CTL_VFVM_INDX_MASK);
604 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
612 * i40e_config_vsi_rx_queue
613 * @vf: pointer to the VF info
614 * @vsi_id: id of VSI as provided by the FW
615 * @vsi_queue_id: vsi relative queue index
616 * @info: config. info
620 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
622 struct virtchnl_rxq_info *info)
624 struct i40e_pf *pf = vf->pf;
625 struct i40e_hw *hw = &pf->hw;
626 struct i40e_hmc_obj_rxq rx_ctx;
630 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
632 /* clear the context structure first */
633 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
635 /* only set the required fields */
636 rx_ctx.base = info->dma_ring_addr / 128;
637 rx_ctx.qlen = info->ring_len;
639 if (info->splithdr_enabled) {
640 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
642 I40E_RX_SPLIT_TCP_UDP |
644 /* header length validation */
645 if (info->hdr_size > ((2 * 1024) - 64)) {
649 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
651 /* set split mode 10b */
652 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
655 /* databuffer length validation */
656 if (info->databuffer_size > ((16 * 1024) - 128)) {
660 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
662 /* max pkt. length validation */
663 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
667 rx_ctx.rxmax = info->max_pkt_size;
669 /* enable 32bytes desc always */
673 rx_ctx.lrxqthresh = 1;
678 /* clear the context in the HMC */
679 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
681 dev_err(&pf->pdev->dev,
682 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
688 /* set the context in the HMC */
689 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
691 dev_err(&pf->pdev->dev,
692 "Failed to set VF LAN Rx queue context %d error: %d\n",
704 * @vf: pointer to the VF info
705 * @idx: VSI index, applies only for ADq mode, zero otherwise
707 * alloc VF vsi context & resources
709 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
711 struct i40e_mac_filter *f = NULL;
712 struct i40e_pf *pf = vf->pf;
713 struct i40e_vsi *vsi;
717 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
721 dev_err(&pf->pdev->dev,
722 "add vsi failed for VF %d, aq_err %d\n",
723 vf->vf_id, pf->hw.aq.asq_last_status);
725 goto error_alloc_vsi_res;
729 u64 hena = i40e_pf_get_default_rss_hena(pf);
730 u8 broadcast[ETH_ALEN];
732 vf->lan_vsi_idx = vsi->idx;
733 vf->lan_vsi_id = vsi->id;
734 /* If the port VLAN has been configured and then the
735 * VF driver was removed then the VSI port VLAN
736 * configuration was destroyed. Check if there is
737 * a port VLAN and restore the VSI configuration if
740 if (vf->port_vlan_id)
741 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
743 spin_lock_bh(&vsi->mac_filter_hash_lock);
744 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
745 f = i40e_add_mac_filter(vsi,
746 vf->default_lan_addr.addr);
748 dev_info(&pf->pdev->dev,
749 "Could not add MAC filter %pM for VF %d\n",
750 vf->default_lan_addr.addr, vf->vf_id);
752 eth_broadcast_addr(broadcast);
753 f = i40e_add_mac_filter(vsi, broadcast);
755 dev_info(&pf->pdev->dev,
756 "Could not allocate VF broadcast filter\n");
757 spin_unlock_bh(&vsi->mac_filter_hash_lock);
758 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
759 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
760 /* program mac filter only for VF VSI */
761 ret = i40e_sync_vsi_filters(vsi);
763 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
766 /* storing VSI index and id for ADq and don't apply the mac filter */
767 if (vf->adq_enabled) {
768 vf->ch[idx].vsi_idx = vsi->idx;
769 vf->ch[idx].vsi_id = vsi->id;
772 /* Set VF bandwidth if specified */
774 max_tx_rate = vf->tx_rate;
775 } else if (vf->ch[idx].max_tx_rate) {
776 max_tx_rate = vf->ch[idx].max_tx_rate;
780 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
781 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
782 max_tx_rate, 0, NULL);
784 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
793 * i40e_map_pf_queues_to_vsi
794 * @vf: pointer to the VF info
796 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
797 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
799 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
801 struct i40e_pf *pf = vf->pf;
802 struct i40e_hw *hw = &pf->hw;
803 u32 reg, num_tc = 1; /* VF has at least one traffic class */
810 for (i = 0; i < num_tc; i++) {
811 if (vf->adq_enabled) {
812 qps = vf->ch[i].num_qps;
813 vsi_id = vf->ch[i].vsi_id;
815 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
816 vsi_id = vf->lan_vsi_id;
819 for (j = 0; j < 7; j++) {
824 u16 qid = i40e_vc_get_pf_queue_id(vf,
828 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
832 i40e_write_rx_ctl(hw,
833 I40E_VSILAN_QTABLE(j, vsi_id),
840 * i40e_map_pf_to_vf_queues
841 * @vf: pointer to the VF info
843 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
844 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
846 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
848 struct i40e_pf *pf = vf->pf;
849 struct i40e_hw *hw = &pf->hw;
850 u32 reg, total_qps = 0;
851 u32 qps, num_tc = 1; /* VF has at least one traffic class */
858 for (i = 0; i < num_tc; i++) {
859 if (vf->adq_enabled) {
860 qps = vf->ch[i].num_qps;
861 vsi_id = vf->ch[i].vsi_id;
863 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
864 vsi_id = vf->lan_vsi_id;
867 for (j = 0; j < qps; j++) {
868 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
870 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
871 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
879 * i40e_enable_vf_mappings
880 * @vf: pointer to the VF info
884 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
886 struct i40e_pf *pf = vf->pf;
887 struct i40e_hw *hw = &pf->hw;
890 /* Tell the hardware we're using noncontiguous mapping. HW requires
891 * that VF queues be mapped using this method, even when they are
892 * contiguous in real life
894 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
895 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
897 /* enable VF vplan_qtable mappings */
898 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
899 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
901 i40e_map_pf_to_vf_queues(vf);
902 i40e_map_pf_queues_to_vsi(vf);
908 * i40e_disable_vf_mappings
909 * @vf: pointer to the VF info
911 * disable VF mappings
913 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
915 struct i40e_pf *pf = vf->pf;
916 struct i40e_hw *hw = &pf->hw;
919 /* disable qp mappings */
920 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
921 for (i = 0; i < I40E_MAX_VSI_QP; i++)
922 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
923 I40E_QUEUE_END_OF_LIST);
929 * @vf: pointer to the VF info
933 static void i40e_free_vf_res(struct i40e_vf *vf)
935 struct i40e_pf *pf = vf->pf;
936 struct i40e_hw *hw = &pf->hw;
940 /* Start by disabling VF's configuration API to prevent the OS from
941 * accessing the VF's VSI after it's freed / invalidated.
943 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
945 /* It's possible the VF had requeuested more queues than the default so
946 * do the accounting here when we're about to free them.
948 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
949 pf->queues_left += vf->num_queue_pairs -
950 I40E_DEFAULT_QUEUES_PER_VF;
953 /* free vsi & disconnect it from the parent uplink */
954 if (vf->lan_vsi_idx) {
955 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
960 /* do the accounting and remove additional ADq VSI's */
961 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
962 for (j = 0; j < vf->num_tc; j++) {
963 /* At this point VSI0 is already released so don't
964 * release it again and only clear their values in
965 * structure variables
968 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
969 vf->ch[j].vsi_idx = 0;
970 vf->ch[j].vsi_id = 0;
973 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
975 /* disable interrupts so the VF starts in a known state */
976 for (i = 0; i < msix_vf; i++) {
977 /* format is same for both registers */
979 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
981 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
984 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
988 /* clear the irq settings */
989 for (i = 0; i < msix_vf; i++) {
990 /* format is same for both registers */
992 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
994 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
997 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
998 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
999 wr32(hw, reg_idx, reg);
1002 /* reset some of the state variables keeping track of the resources */
1003 vf->num_queue_pairs = 0;
1004 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1005 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1010 * @vf: pointer to the VF info
1012 * allocate VF resources
1014 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1016 struct i40e_pf *pf = vf->pf;
1017 int total_queue_pairs = 0;
1020 if (vf->num_req_queues &&
1021 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1022 pf->num_vf_qps = vf->num_req_queues;
1024 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1026 /* allocate hw vsi context & associated resources */
1027 ret = i40e_alloc_vsi_res(vf, 0);
1030 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1032 /* allocate additional VSIs based on tc information for ADq */
1033 if (vf->adq_enabled) {
1034 if (pf->queues_left >=
1035 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1036 /* TC 0 always belongs to VF VSI */
1037 for (idx = 1; idx < vf->num_tc; idx++) {
1038 ret = i40e_alloc_vsi_res(vf, idx);
1042 /* send correct number of queues */
1043 total_queue_pairs = I40E_MAX_VF_QUEUES;
1045 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1047 vf->adq_enabled = false;
1051 /* We account for each VF to get a default number of queue pairs. If
1052 * the VF has now requested more, we need to account for that to make
1053 * certain we never request more queues than we actually have left in
1056 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1058 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1061 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1063 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1065 /* store the total qps number for the runtime
1068 vf->num_queue_pairs = total_queue_pairs;
1070 /* VF is now completely initialized */
1071 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1075 i40e_free_vf_res(vf);
1080 #define VF_DEVICE_STATUS 0xAA
1081 #define VF_TRANS_PENDING_MASK 0x20
1083 * i40e_quiesce_vf_pci
1084 * @vf: pointer to the VF structure
1086 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1087 * if the transactions never clear.
1089 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1091 struct i40e_pf *pf = vf->pf;
1092 struct i40e_hw *hw = &pf->hw;
1096 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1098 wr32(hw, I40E_PF_PCI_CIAA,
1099 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1100 for (i = 0; i < 100; i++) {
1101 reg = rd32(hw, I40E_PF_PCI_CIAD);
1102 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1109 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
1112 * i40e_config_vf_promiscuous_mode
1113 * @vf: pointer to the VF info
1115 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1116 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1118 * Called from the VF to configure the promiscuous mode of
1119 * VF vsis and from the VF reset path to reset promiscuous mode.
1121 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1126 struct i40e_pf *pf = vf->pf;
1127 struct i40e_hw *hw = &pf->hw;
1128 struct i40e_mac_filter *f;
1129 i40e_status aq_ret = 0;
1130 struct i40e_vsi *vsi;
1133 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1134 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1135 return I40E_ERR_PARAM;
1137 if (vf->port_vlan_id) {
1138 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1143 int aq_err = pf->hw.aq.asq_last_status;
1145 dev_err(&pf->pdev->dev,
1146 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1148 i40e_stat_str(&pf->hw, aq_ret),
1149 i40e_aq_str(&pf->hw, aq_err));
1153 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1158 int aq_err = pf->hw.aq.asq_last_status;
1160 dev_err(&pf->pdev->dev,
1161 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1163 i40e_stat_str(&pf->hw, aq_ret),
1164 i40e_aq_str(&pf->hw, aq_err));
1167 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1168 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1169 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1171 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1177 int aq_err = pf->hw.aq.asq_last_status;
1179 dev_err(&pf->pdev->dev,
1180 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1182 i40e_stat_str(&pf->hw, aq_ret),
1183 i40e_aq_str(&pf->hw, aq_err));
1186 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1192 int aq_err = pf->hw.aq.asq_last_status;
1194 dev_err(&pf->pdev->dev,
1195 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1197 i40e_stat_str(&pf->hw, aq_ret),
1198 i40e_aq_str(&pf->hw, aq_err));
1203 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
1206 int aq_err = pf->hw.aq.asq_last_status;
1208 dev_err(&pf->pdev->dev,
1209 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1211 i40e_stat_str(&pf->hw, aq_ret),
1212 i40e_aq_str(&pf->hw, aq_err));
1216 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
1219 int aq_err = pf->hw.aq.asq_last_status;
1221 dev_err(&pf->pdev->dev,
1222 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1224 i40e_stat_str(&pf->hw, aq_ret),
1225 i40e_aq_str(&pf->hw, aq_err));
1232 * i40e_trigger_vf_reset
1233 * @vf: pointer to the VF structure
1234 * @flr: VFLR was issued or not
1236 * Trigger hardware to start a reset for a particular VF. Expects the caller
1237 * to wait the proper amount of time to allow hardware to reset the VF before
1238 * it cleans up and restores VF functionality.
1240 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1242 struct i40e_pf *pf = vf->pf;
1243 struct i40e_hw *hw = &pf->hw;
1244 u32 reg, reg_idx, bit_idx;
1247 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1249 /* Disable VF's configuration API during reset. The flag is re-enabled
1250 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1251 * It's normally disabled in i40e_free_vf_res(), but it's safer
1252 * to do it earlier to give some time to finish to any VF config
1253 * functions that may still be running at this point.
1255 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1257 /* In the case of a VFLR, the HW has already reset the VF and we
1258 * just need to clean up, so don't hit the VFRTRIG register.
1261 /* reset VF using VPGEN_VFRTRIG reg */
1262 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1263 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1264 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1267 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1268 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1269 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1270 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1273 if (i40e_quiesce_vf_pci(vf))
1274 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1279 * i40e_cleanup_reset_vf
1280 * @vf: pointer to the VF structure
1282 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1283 * have verified whether the reset is finished properly, and ensure the
1284 * minimum amount of wait time has passed.
1286 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1288 struct i40e_pf *pf = vf->pf;
1289 struct i40e_hw *hw = &pf->hw;
1292 /* disable promisc modes in case they were enabled */
1293 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1295 /* free VF resources to begin resetting the VSI state */
1296 i40e_free_vf_res(vf);
1298 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1299 * By doing this we allow HW to access VF memory at any point. If we
1300 * did it any sooner, HW could access memory while it was being freed
1301 * in i40e_free_vf_res(), causing an IOMMU fault.
1303 * On the other hand, this needs to be done ASAP, because the VF driver
1304 * is waiting for this to happen and may report a timeout. It's
1305 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1308 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1309 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1310 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1312 /* reallocate VF resources to finish resetting the VSI state */
1313 if (!i40e_alloc_vf_res(vf)) {
1314 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1315 i40e_enable_vf_mappings(vf);
1316 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1317 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1318 /* Do not notify the client during VF init */
1319 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1321 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1325 /* Tell the VF driver the reset is done. This needs to be done only
1326 * after VF has been fully initialized, because the VF driver may
1327 * request resources immediately after setting this flag.
1329 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1334 * @vf: pointer to the VF structure
1335 * @flr: VFLR was issued or not
1337 * Returns true if the VF is reset, false otherwise.
1339 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1341 struct i40e_pf *pf = vf->pf;
1342 struct i40e_hw *hw = &pf->hw;
1347 /* If the VFs have been disabled, this means something else is
1348 * resetting the VF, so we shouldn't continue.
1350 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1353 i40e_trigger_vf_reset(vf, flr);
1355 /* poll VPGEN_VFRSTAT reg to make sure
1356 * that reset is complete
1358 for (i = 0; i < 10; i++) {
1359 /* VF reset requires driver to first reset the VF and then
1360 * poll the status register to make sure that the reset
1361 * completed successfully. Due to internal HW FIFO flushes,
1362 * we must wait 10ms before the register will be valid.
1364 usleep_range(10000, 20000);
1365 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1366 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1373 usleep_range(10000, 20000);
1376 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1378 usleep_range(10000, 20000);
1380 /* On initial reset, we don't have any queues to disable */
1381 if (vf->lan_vsi_idx != 0)
1382 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1384 i40e_cleanup_reset_vf(vf);
1387 clear_bit(__I40E_VF_DISABLE, pf->state);
1393 * i40e_reset_all_vfs
1394 * @pf: pointer to the PF structure
1395 * @flr: VFLR was issued or not
1397 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1398 * VF, then do all the waiting in one chunk, and finally finish restoring each
1399 * VF after the wait. This is useful during PF routines which need to reset
1400 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1402 * Returns true if any VFs were reset, and false otherwise.
1404 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1406 struct i40e_hw *hw = &pf->hw;
1411 /* If we don't have any VFs, then there is nothing to reset */
1412 if (!pf->num_alloc_vfs)
1415 /* If VFs have been disabled, there is no need to reset */
1416 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1419 /* Begin reset on all VFs at once */
1420 for (v = 0; v < pf->num_alloc_vfs; v++)
1421 i40e_trigger_vf_reset(&pf->vf[v], flr);
1423 /* HW requires some time to make sure it can flush the FIFO for a VF
1424 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1425 * sequence to make sure that it has completed. We'll keep track of
1426 * the VFs using a simple iterator that increments once that VF has
1427 * finished resetting.
1429 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1430 usleep_range(10000, 20000);
1432 /* Check each VF in sequence, beginning with the VF to fail
1433 * the previous check.
1435 while (v < pf->num_alloc_vfs) {
1437 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1438 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1441 /* If the current VF has finished resetting, move on
1442 * to the next VF in sequence.
1449 usleep_range(10000, 20000);
1451 /* Display a warning if at least one VF didn't manage to reset in
1452 * time, but continue on with the operation.
1454 if (v < pf->num_alloc_vfs)
1455 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1457 usleep_range(10000, 20000);
1459 /* Begin disabling all the rings associated with VFs, but do not wait
1462 for (v = 0; v < pf->num_alloc_vfs; v++) {
1463 /* On initial reset, we don't have any queues to disable */
1464 if (pf->vf[v].lan_vsi_idx == 0)
1467 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1470 /* Now that we've notified HW to disable all of the VF rings, wait
1471 * until they finish.
1473 for (v = 0; v < pf->num_alloc_vfs; v++) {
1474 /* On initial reset, we don't have any queues to disable */
1475 if (pf->vf[v].lan_vsi_idx == 0)
1478 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1481 /* Hw may need up to 50ms to finish disabling the RX queues. We
1482 * minimize the wait by delaying only once for all VFs.
1486 /* Finish the reset on each VF */
1487 for (v = 0; v < pf->num_alloc_vfs; v++)
1488 i40e_cleanup_reset_vf(&pf->vf[v]);
1491 clear_bit(__I40E_VF_DISABLE, pf->state);
1498 * @pf: pointer to the PF structure
1502 void i40e_free_vfs(struct i40e_pf *pf)
1504 struct i40e_hw *hw = &pf->hw;
1505 u32 reg_idx, bit_idx;
1510 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1511 usleep_range(1000, 2000);
1513 i40e_notify_client_of_vf_enable(pf, 0);
1515 /* Amortize wait time by stopping all VFs at the same time */
1516 for (i = 0; i < pf->num_alloc_vfs; i++) {
1517 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1520 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1523 for (i = 0; i < pf->num_alloc_vfs; i++) {
1524 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1527 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1530 /* Disable IOV before freeing resources. This lets any VF drivers
1531 * running in the host get themselves cleaned up before we yank
1532 * the carpet out from underneath their feet.
1534 if (!pci_vfs_assigned(pf->pdev))
1535 pci_disable_sriov(pf->pdev);
1537 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1539 /* free up VF resources */
1540 tmp = pf->num_alloc_vfs;
1541 pf->num_alloc_vfs = 0;
1542 for (i = 0; i < tmp; i++) {
1543 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1544 i40e_free_vf_res(&pf->vf[i]);
1545 /* disable qp mappings */
1546 i40e_disable_vf_mappings(&pf->vf[i]);
1552 /* This check is for when the driver is unloaded while VFs are
1553 * assigned. Setting the number of VFs to 0 through sysfs is caught
1554 * before this function ever gets called.
1556 if (!pci_vfs_assigned(pf->pdev)) {
1557 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1558 * work correctly when SR-IOV gets re-enabled.
1560 for (vf_id = 0; vf_id < tmp; vf_id++) {
1561 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1562 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1563 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1566 clear_bit(__I40E_VF_DISABLE, pf->state);
1569 #ifdef CONFIG_PCI_IOV
1572 * @pf: pointer to the PF structure
1573 * @num_alloc_vfs: number of VFs to allocate
1575 * allocate VF resources
1577 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1579 struct i40e_vf *vfs;
1582 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1583 i40e_irq_dynamic_disable_icr0(pf);
1585 /* Check to see if we're just allocating resources for extant VFs */
1586 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1587 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1589 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1590 pf->num_alloc_vfs = 0;
1594 /* allocate memory */
1595 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1602 /* apply default profile */
1603 for (i = 0; i < num_alloc_vfs; i++) {
1605 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1608 /* assign default capabilities */
1609 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1610 vfs[i].spoofchk = true;
1612 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1615 pf->num_alloc_vfs = num_alloc_vfs;
1617 /* VF resources get allocated during reset */
1618 i40e_reset_all_vfs(pf, false);
1620 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1626 /* Re-enable interrupt 0. */
1627 i40e_irq_dynamic_enable_icr0(pf);
1633 * i40e_pci_sriov_enable
1634 * @pdev: pointer to a pci_dev structure
1635 * @num_vfs: number of VFs to allocate
1637 * Enable or change the number of VFs
1639 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1641 #ifdef CONFIG_PCI_IOV
1642 struct i40e_pf *pf = pci_get_drvdata(pdev);
1643 int pre_existing_vfs = pci_num_vf(pdev);
1646 if (test_bit(__I40E_TESTING, pf->state)) {
1647 dev_warn(&pdev->dev,
1648 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1653 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1655 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1658 if (num_vfs > pf->num_req_vfs) {
1659 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1660 num_vfs, pf->num_req_vfs);
1665 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1666 err = i40e_alloc_vfs(pf, num_vfs);
1668 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1682 * i40e_pci_sriov_configure
1683 * @pdev: pointer to a pci_dev structure
1684 * @num_vfs: number of VFs to allocate
1686 * Enable or change the number of VFs. Called when the user updates the number
1689 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1691 struct i40e_pf *pf = pci_get_drvdata(pdev);
1694 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1695 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1700 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1701 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1702 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1704 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1705 goto sriov_configure_out;
1708 if (!pci_vfs_assigned(pf->pdev)) {
1710 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1711 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1713 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1715 goto sriov_configure_out;
1717 sriov_configure_out:
1718 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1722 /***********************virtual channel routines******************/
1725 * i40e_vc_send_msg_to_vf
1726 * @vf: pointer to the VF info
1727 * @v_opcode: virtual channel opcode
1728 * @v_retval: virtual channel return value
1729 * @msg: pointer to the msg buffer
1730 * @msglen: msg length
1734 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1735 u32 v_retval, u8 *msg, u16 msglen)
1742 /* validate the request */
1743 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1748 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1750 /* single place to detect unsuccessful return values */
1752 vf->num_invalid_msgs++;
1753 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1754 vf->vf_id, v_opcode, v_retval);
1755 if (vf->num_invalid_msgs >
1756 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1757 dev_err(&pf->pdev->dev,
1758 "Number of invalid messages exceeded for VF %d\n",
1760 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1761 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1764 vf->num_valid_msgs++;
1765 /* reset the invalid counter, if a valid message is received. */
1766 vf->num_invalid_msgs = 0;
1769 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1772 dev_info(&pf->pdev->dev,
1773 "Unable to send the message to VF %d aq_err %d\n",
1774 vf->vf_id, pf->hw.aq.asq_last_status);
1782 * i40e_vc_send_resp_to_vf
1783 * @vf: pointer to the VF info
1784 * @opcode: operation code
1785 * @retval: return value
1787 * send resp msg to VF
1789 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1790 enum virtchnl_ops opcode,
1793 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1797 * i40e_vc_get_version_msg
1798 * @vf: pointer to the VF info
1799 * @msg: pointer to the msg buffer
1801 * called from the VF to request the API version used by the PF
1803 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1805 struct virtchnl_version_info info = {
1806 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1809 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1810 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1811 if (VF_IS_V10(&vf->vf_ver))
1812 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1813 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1814 I40E_SUCCESS, (u8 *)&info,
1815 sizeof(struct virtchnl_version_info));
1819 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1820 * @vf: pointer to VF structure
1822 static void i40e_del_qch(struct i40e_vf *vf)
1824 struct i40e_pf *pf = vf->pf;
1827 /* first element in the array belongs to primary VF VSI and we shouldn't
1828 * delete it. We should however delete the rest of the VSIs created
1830 for (i = 1; i < vf->num_tc; i++) {
1831 if (vf->ch[i].vsi_idx) {
1832 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1833 vf->ch[i].vsi_idx = 0;
1834 vf->ch[i].vsi_id = 0;
1840 * i40e_vc_get_vf_resources_msg
1841 * @vf: pointer to the VF info
1842 * @msg: pointer to the msg buffer
1844 * called from the VF to request its resources
1846 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1848 struct virtchnl_vf_resource *vfres = NULL;
1849 struct i40e_pf *pf = vf->pf;
1850 i40e_status aq_ret = 0;
1851 struct i40e_vsi *vsi;
1856 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1857 aq_ret = I40E_ERR_PARAM;
1861 len = struct_size(vfres, vsi_res, num_vsis);
1862 vfres = kzalloc(len, GFP_KERNEL);
1864 aq_ret = I40E_ERR_NO_MEMORY;
1868 if (VF_IS_V11(&vf->vf_ver))
1869 vf->driver_caps = *(u32 *)msg;
1871 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1872 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1873 VIRTCHNL_VF_OFFLOAD_VLAN;
1875 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1876 vsi = pf->vsi[vf->lan_vsi_idx];
1877 if (!vsi->info.pvid)
1878 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1880 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1881 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1882 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1883 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1885 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1888 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1889 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1891 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1892 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1893 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1895 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1898 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1899 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1900 vfres->vf_cap_flags |=
1901 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1904 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1905 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1907 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1908 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1909 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1911 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1912 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1913 dev_err(&pf->pdev->dev,
1914 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1916 aq_ret = I40E_ERR_PARAM;
1919 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1922 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1923 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1924 vfres->vf_cap_flags |=
1925 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1928 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1929 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1931 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1932 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1934 vfres->num_vsis = num_vsis;
1935 vfres->num_queue_pairs = vf->num_queue_pairs;
1936 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1937 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1938 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1940 if (vf->lan_vsi_idx) {
1941 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1942 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1943 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1944 /* VFs only use TC 0 */
1945 vfres->vsi_res[0].qset_handle
1946 = le16_to_cpu(vsi->info.qs_handle[0]);
1947 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1948 vf->default_lan_addr.addr);
1950 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1953 /* send the response back to the VF */
1954 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1955 aq_ret, (u8 *)vfres, len);
1962 * i40e_vc_reset_vf_msg
1963 * @vf: pointer to the VF info
1965 * called from the VF to reset itself,
1966 * unlike other virtchnl messages, PF driver
1967 * doesn't send the response back to the VF
1969 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1971 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1972 i40e_reset_vf(vf, false);
1976 * i40e_getnum_vf_vsi_vlan_filters
1977 * @vsi: pointer to the vsi
1979 * called to get the number of VLANs offloaded on this VF
1981 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1983 struct i40e_mac_filter *f;
1984 int num_vlans = 0, bkt;
1986 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1987 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1995 * i40e_vc_config_promiscuous_mode_msg
1996 * @vf: pointer to the VF info
1997 * @msg: pointer to the msg buffer
1999 * called from the VF to configure the promiscuous mode of
2002 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2004 struct virtchnl_promisc_info *info =
2005 (struct virtchnl_promisc_info *)msg;
2006 struct i40e_pf *pf = vf->pf;
2007 i40e_status aq_ret = 0;
2008 bool allmulti = false;
2009 bool alluni = false;
2011 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2012 aq_ret = I40E_ERR_PARAM;
2015 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2016 dev_err(&pf->pdev->dev,
2017 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2020 /* Lie to the VF on purpose, because this is an error we can
2021 * ignore. Unprivileged VF is not a virtual channel error.
2027 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2028 aq_ret = I40E_ERR_PARAM;
2032 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2033 aq_ret = I40E_ERR_PARAM;
2037 /* Multicast promiscuous handling*/
2038 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2041 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2043 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2049 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2051 dev_info(&pf->pdev->dev,
2052 "VF %d successfully set multicast promiscuous mode\n",
2054 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2056 dev_info(&pf->pdev->dev,
2057 "VF %d successfully unset multicast promiscuous mode\n",
2061 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2063 dev_info(&pf->pdev->dev,
2064 "VF %d successfully set unicast promiscuous mode\n",
2066 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2068 dev_info(&pf->pdev->dev,
2069 "VF %d successfully unset unicast promiscuous mode\n",
2073 /* send the response to the VF */
2074 return i40e_vc_send_resp_to_vf(vf,
2075 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2080 * i40e_vc_config_queues_msg
2081 * @vf: pointer to the VF info
2082 * @msg: pointer to the msg buffer
2084 * called from the VF to configure the rx/tx
2087 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2089 struct virtchnl_vsi_queue_config_info *qci =
2090 (struct virtchnl_vsi_queue_config_info *)msg;
2091 struct virtchnl_queue_pair_info *qpi;
2092 struct i40e_pf *pf = vf->pf;
2093 u16 vsi_id, vsi_queue_id = 0;
2094 u16 num_qps_all = 0;
2095 i40e_status aq_ret = 0;
2096 int i, j = 0, idx = 0;
2098 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2099 aq_ret = I40E_ERR_PARAM;
2103 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2104 aq_ret = I40E_ERR_PARAM;
2108 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2109 aq_ret = I40E_ERR_PARAM;
2113 if (vf->adq_enabled) {
2114 for (i = 0; i < I40E_MAX_VF_VSI; i++)
2115 num_qps_all += vf->ch[i].num_qps;
2116 if (num_qps_all != qci->num_queue_pairs) {
2117 aq_ret = I40E_ERR_PARAM;
2122 vsi_id = qci->vsi_id;
2124 for (i = 0; i < qci->num_queue_pairs; i++) {
2125 qpi = &qci->qpair[i];
2127 if (!vf->adq_enabled) {
2128 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2129 qpi->txq.queue_id)) {
2130 aq_ret = I40E_ERR_PARAM;
2134 vsi_queue_id = qpi->txq.queue_id;
2136 if (qpi->txq.vsi_id != qci->vsi_id ||
2137 qpi->rxq.vsi_id != qci->vsi_id ||
2138 qpi->rxq.queue_id != vsi_queue_id) {
2139 aq_ret = I40E_ERR_PARAM;
2144 if (vf->adq_enabled) {
2145 if (idx >= ARRAY_SIZE(vf->ch)) {
2146 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2149 vsi_id = vf->ch[idx].vsi_id;
2152 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2154 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2156 aq_ret = I40E_ERR_PARAM;
2160 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2161 * VF does not know about these additional VSIs and all
2162 * it cares is about its own queues. PF configures these queues
2163 * to its appropriate VSIs based on TC mapping
2165 if (vf->adq_enabled) {
2166 if (idx >= ARRAY_SIZE(vf->ch)) {
2167 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2170 if (j == (vf->ch[idx].num_qps - 1)) {
2172 j = 0; /* resetting the queue count */
2180 /* set vsi num_queue_pairs in use to num configured by VF */
2181 if (!vf->adq_enabled) {
2182 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2183 qci->num_queue_pairs;
2185 for (i = 0; i < vf->num_tc; i++)
2186 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2191 /* send the response to the VF */
2192 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2197 * i40e_validate_queue_map
2199 * @queuemap: Tx or Rx queue map
2201 * check if Tx or Rx queue map is valid
2203 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2204 unsigned long queuemap)
2206 u16 vsi_queue_id, queue_id;
2208 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2209 if (vf->adq_enabled) {
2210 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2211 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2213 queue_id = vsi_queue_id;
2216 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2224 * i40e_vc_config_irq_map_msg
2225 * @vf: pointer to the VF info
2226 * @msg: pointer to the msg buffer
2228 * called from the VF to configure the irq to
2231 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2233 struct virtchnl_irq_map_info *irqmap_info =
2234 (struct virtchnl_irq_map_info *)msg;
2235 struct virtchnl_vector_map *map;
2237 i40e_status aq_ret = 0;
2240 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2241 aq_ret = I40E_ERR_PARAM;
2245 if (irqmap_info->num_vectors >
2246 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2247 aq_ret = I40E_ERR_PARAM;
2251 for (i = 0; i < irqmap_info->num_vectors; i++) {
2252 map = &irqmap_info->vecmap[i];
2253 /* validate msg params */
2254 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2255 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2256 aq_ret = I40E_ERR_PARAM;
2259 vsi_id = map->vsi_id;
2261 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2262 aq_ret = I40E_ERR_PARAM;
2266 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2267 aq_ret = I40E_ERR_PARAM;
2271 i40e_config_irq_link_list(vf, vsi_id, map);
2274 /* send the response to the VF */
2275 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2280 * i40e_ctrl_vf_tx_rings
2281 * @vsi: the SRIOV VSI being configured
2282 * @q_map: bit map of the queues to be enabled
2283 * @enable: start or stop the queue
2285 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2288 struct i40e_pf *pf = vsi->back;
2292 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2293 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2294 vsi->base_queue + q_id,
2295 false /*is xdp*/, enable);
2303 * i40e_ctrl_vf_rx_rings
2304 * @vsi: the SRIOV VSI being configured
2305 * @q_map: bit map of the queues to be enabled
2306 * @enable: start or stop the queue
2308 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2311 struct i40e_pf *pf = vsi->back;
2315 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2316 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2325 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2326 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2328 * Returns true if validation was successful, else false.
2330 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2332 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2333 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2334 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2341 * i40e_vc_enable_queues_msg
2342 * @vf: pointer to the VF info
2343 * @msg: pointer to the msg buffer
2345 * called from the VF to enable all or specific queue(s)
2347 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2349 struct virtchnl_queue_select *vqs =
2350 (struct virtchnl_queue_select *)msg;
2351 struct i40e_pf *pf = vf->pf;
2352 i40e_status aq_ret = 0;
2355 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2356 aq_ret = I40E_ERR_PARAM;
2360 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2361 aq_ret = I40E_ERR_PARAM;
2365 if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2366 aq_ret = I40E_ERR_PARAM;
2370 /* Use the queue bit map sent by the VF */
2371 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2373 aq_ret = I40E_ERR_TIMEOUT;
2376 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2378 aq_ret = I40E_ERR_TIMEOUT;
2382 /* need to start the rings for additional ADq VSI's as well */
2383 if (vf->adq_enabled) {
2384 /* zero belongs to LAN VSI */
2385 for (i = 1; i < vf->num_tc; i++) {
2386 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2387 aq_ret = I40E_ERR_TIMEOUT;
2391 vf->queues_enabled = true;
2394 /* send the response to the VF */
2395 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2400 * i40e_vc_disable_queues_msg
2401 * @vf: pointer to the VF info
2402 * @msg: pointer to the msg buffer
2404 * called from the VF to disable all or specific
2407 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2409 struct virtchnl_queue_select *vqs =
2410 (struct virtchnl_queue_select *)msg;
2411 struct i40e_pf *pf = vf->pf;
2412 i40e_status aq_ret = 0;
2414 /* Immediately mark queues as disabled */
2415 vf->queues_enabled = false;
2417 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2418 aq_ret = I40E_ERR_PARAM;
2422 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2423 aq_ret = I40E_ERR_PARAM;
2427 if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2428 aq_ret = I40E_ERR_PARAM;
2432 /* Use the queue bit map sent by the VF */
2433 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2435 aq_ret = I40E_ERR_TIMEOUT;
2438 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2440 aq_ret = I40E_ERR_TIMEOUT;
2444 /* send the response to the VF */
2445 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2450 * i40e_vc_request_queues_msg
2451 * @vf: pointer to the VF info
2452 * @msg: pointer to the msg buffer
2454 * VFs get a default number of queues but can use this message to request a
2455 * different number. If the request is successful, PF will reset the VF and
2456 * return 0. If unsuccessful, PF will send message informing VF of number of
2457 * available queues and return result of sending VF a message.
2459 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2461 struct virtchnl_vf_res_request *vfres =
2462 (struct virtchnl_vf_res_request *)msg;
2463 u16 req_pairs = vfres->num_queue_pairs;
2464 u8 cur_pairs = vf->num_queue_pairs;
2465 struct i40e_pf *pf = vf->pf;
2467 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2470 if (req_pairs > I40E_MAX_VF_QUEUES) {
2471 dev_err(&pf->pdev->dev,
2472 "VF %d tried to request more than %d queues.\n",
2474 I40E_MAX_VF_QUEUES);
2475 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2476 } else if (req_pairs - cur_pairs > pf->queues_left) {
2477 dev_warn(&pf->pdev->dev,
2478 "VF %d requested %d more queues, but only %d left.\n",
2480 req_pairs - cur_pairs,
2482 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2484 /* successful request */
2485 vf->num_req_queues = req_pairs;
2486 i40e_vc_notify_vf_reset(vf);
2487 i40e_reset_vf(vf, false);
2491 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2492 (u8 *)vfres, sizeof(*vfres));
2496 * i40e_vc_get_stats_msg
2497 * @vf: pointer to the VF info
2498 * @msg: pointer to the msg buffer
2500 * called from the VF to get vsi stats
2502 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2504 struct virtchnl_queue_select *vqs =
2505 (struct virtchnl_queue_select *)msg;
2506 struct i40e_pf *pf = vf->pf;
2507 struct i40e_eth_stats stats;
2508 i40e_status aq_ret = 0;
2509 struct i40e_vsi *vsi;
2511 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2513 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2514 aq_ret = I40E_ERR_PARAM;
2518 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2519 aq_ret = I40E_ERR_PARAM;
2523 vsi = pf->vsi[vf->lan_vsi_idx];
2525 aq_ret = I40E_ERR_PARAM;
2528 i40e_update_eth_stats(vsi);
2529 stats = vsi->eth_stats;
2532 /* send the response back to the VF */
2533 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2534 (u8 *)&stats, sizeof(stats));
2537 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2538 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2540 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2541 #define I40E_VC_MAX_VLAN_PER_VF 16
2544 * i40e_check_vf_permission
2545 * @vf: pointer to the VF info
2546 * @al: MAC address list from virtchnl
2548 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2549 * if any address in the list is not valid. Checks the following conditions:
2551 * 1) broadcast and zero addresses are never valid
2552 * 2) unicast addresses are not allowed if the VMM has administratively set
2553 * the VF MAC address, unless the VF is marked as privileged.
2554 * 3) There is enough space to add all the addresses.
2556 * Note that to guarantee consistency, it is expected this function be called
2557 * while holding the mac_filter_hash_lock, as otherwise the current number of
2558 * addresses might not be accurate.
2560 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2561 struct virtchnl_ether_addr_list *al)
2563 struct i40e_pf *pf = vf->pf;
2564 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2565 int mac2add_cnt = 0;
2568 for (i = 0; i < al->num_elements; i++) {
2569 struct i40e_mac_filter *f;
2570 u8 *addr = al->list[i].addr;
2572 if (is_broadcast_ether_addr(addr) ||
2573 is_zero_ether_addr(addr)) {
2574 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2576 return I40E_ERR_INVALID_MAC_ADDR;
2579 /* If the host VMM administrator has set the VF MAC address
2580 * administratively via the ndo_set_vf_mac command then deny
2581 * permission to the VF to add or delete unicast MAC addresses.
2582 * Unless the VF is privileged and then it can do whatever.
2583 * The VF may request to set the MAC address filter already
2584 * assigned to it so do not return an error in that case.
2586 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2587 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2588 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2589 dev_err(&pf->pdev->dev,
2590 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2594 /*count filters that really will be added*/
2595 f = i40e_find_mac(vsi, addr);
2600 /* If this VF is not privileged, then we can't add more than a limited
2601 * number of addresses. Check to make sure that the additions do not
2602 * push us over the limit.
2604 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2605 (i40e_count_filters(vsi) + mac2add_cnt) >
2606 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2607 dev_err(&pf->pdev->dev,
2608 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2615 * i40e_vc_add_mac_addr_msg
2616 * @vf: pointer to the VF info
2617 * @msg: pointer to the msg buffer
2619 * add guest mac address filter
2621 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2623 struct virtchnl_ether_addr_list *al =
2624 (struct virtchnl_ether_addr_list *)msg;
2625 struct i40e_pf *pf = vf->pf;
2626 struct i40e_vsi *vsi = NULL;
2627 i40e_status ret = 0;
2630 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2631 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2632 ret = I40E_ERR_PARAM;
2636 vsi = pf->vsi[vf->lan_vsi_idx];
2638 /* Lock once, because all function inside for loop accesses VSI's
2639 * MAC filter list which needs to be protected using same lock.
2641 spin_lock_bh(&vsi->mac_filter_hash_lock);
2643 ret = i40e_check_vf_permission(vf, al);
2645 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2649 /* add new addresses to the list */
2650 for (i = 0; i < al->num_elements; i++) {
2651 struct i40e_mac_filter *f;
2653 f = i40e_find_mac(vsi, al->list[i].addr);
2655 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2658 dev_err(&pf->pdev->dev,
2659 "Unable to add MAC filter %pM for VF %d\n",
2660 al->list[i].addr, vf->vf_id);
2661 ret = I40E_ERR_PARAM;
2662 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2667 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2669 /* program the updated filter list */
2670 ret = i40e_sync_vsi_filters(vsi);
2672 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2676 /* send the response to the VF */
2677 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2682 * i40e_vc_del_mac_addr_msg
2683 * @vf: pointer to the VF info
2684 * @msg: pointer to the msg buffer
2686 * remove guest mac address filter
2688 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2690 struct virtchnl_ether_addr_list *al =
2691 (struct virtchnl_ether_addr_list *)msg;
2692 struct i40e_pf *pf = vf->pf;
2693 struct i40e_vsi *vsi = NULL;
2694 i40e_status ret = 0;
2697 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2698 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2699 ret = I40E_ERR_PARAM;
2703 for (i = 0; i < al->num_elements; i++) {
2704 if (is_broadcast_ether_addr(al->list[i].addr) ||
2705 is_zero_ether_addr(al->list[i].addr)) {
2706 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2707 al->list[i].addr, vf->vf_id);
2708 ret = I40E_ERR_INVALID_MAC_ADDR;
2712 vsi = pf->vsi[vf->lan_vsi_idx];
2714 spin_lock_bh(&vsi->mac_filter_hash_lock);
2715 /* delete addresses from the list */
2716 for (i = 0; i < al->num_elements; i++)
2717 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2718 ret = I40E_ERR_INVALID_MAC_ADDR;
2719 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2723 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2725 /* program the updated filter list */
2726 ret = i40e_sync_vsi_filters(vsi);
2728 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2732 /* send the response to the VF */
2733 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2738 * i40e_vc_add_vlan_msg
2739 * @vf: pointer to the VF info
2740 * @msg: pointer to the msg buffer
2742 * program guest vlan id
2744 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2746 struct virtchnl_vlan_filter_list *vfl =
2747 (struct virtchnl_vlan_filter_list *)msg;
2748 struct i40e_pf *pf = vf->pf;
2749 struct i40e_vsi *vsi = NULL;
2750 i40e_status aq_ret = 0;
2753 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2754 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2755 dev_err(&pf->pdev->dev,
2756 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2759 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2760 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2761 aq_ret = I40E_ERR_PARAM;
2765 for (i = 0; i < vfl->num_elements; i++) {
2766 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2767 aq_ret = I40E_ERR_PARAM;
2768 dev_err(&pf->pdev->dev,
2769 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2773 vsi = pf->vsi[vf->lan_vsi_idx];
2774 if (vsi->info.pvid) {
2775 aq_ret = I40E_ERR_PARAM;
2779 i40e_vlan_stripping_enable(vsi);
2780 for (i = 0; i < vfl->num_elements; i++) {
2781 /* add new VLAN filter */
2782 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2786 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2787 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2791 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2792 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2798 dev_err(&pf->pdev->dev,
2799 "Unable to add VLAN filter %d for VF %d, error %d\n",
2800 vfl->vlan_id[i], vf->vf_id, ret);
2804 /* send the response to the VF */
2805 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2809 * i40e_vc_remove_vlan_msg
2810 * @vf: pointer to the VF info
2811 * @msg: pointer to the msg buffer
2813 * remove programmed guest vlan id
2815 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2817 struct virtchnl_vlan_filter_list *vfl =
2818 (struct virtchnl_vlan_filter_list *)msg;
2819 struct i40e_pf *pf = vf->pf;
2820 struct i40e_vsi *vsi = NULL;
2821 i40e_status aq_ret = 0;
2824 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2825 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2826 aq_ret = I40E_ERR_PARAM;
2830 for (i = 0; i < vfl->num_elements; i++) {
2831 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2832 aq_ret = I40E_ERR_PARAM;
2837 vsi = pf->vsi[vf->lan_vsi_idx];
2838 if (vsi->info.pvid) {
2839 if (vfl->num_elements > 1 || vfl->vlan_id[0])
2840 aq_ret = I40E_ERR_PARAM;
2844 for (i = 0; i < vfl->num_elements; i++) {
2845 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2848 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2849 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2853 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2854 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2861 /* send the response to the VF */
2862 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2867 * @vf: pointer to the VF info
2868 * @msg: pointer to the msg buffer
2869 * @msglen: msg length
2871 * called from the VF for the iwarp msgs
2873 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2875 struct i40e_pf *pf = vf->pf;
2876 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2877 i40e_status aq_ret = 0;
2879 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2880 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2881 aq_ret = I40E_ERR_PARAM;
2885 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2889 /* send the response to the VF */
2890 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2895 * i40e_vc_iwarp_qvmap_msg
2896 * @vf: pointer to the VF info
2897 * @msg: pointer to the msg buffer
2898 * @config: config qvmap or release it
2900 * called from the VF for the iwarp msgs
2902 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
2904 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2905 (struct virtchnl_iwarp_qvlist_info *)msg;
2906 i40e_status aq_ret = 0;
2908 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2909 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2910 aq_ret = I40E_ERR_PARAM;
2915 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2916 aq_ret = I40E_ERR_PARAM;
2918 i40e_release_iwarp_qvlist(vf);
2922 /* send the response to the VF */
2923 return i40e_vc_send_resp_to_vf(vf,
2924 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2925 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2930 * i40e_vc_config_rss_key
2931 * @vf: pointer to the VF info
2932 * @msg: pointer to the msg buffer
2934 * Configure the VF's RSS key
2936 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
2938 struct virtchnl_rss_key *vrk =
2939 (struct virtchnl_rss_key *)msg;
2940 struct i40e_pf *pf = vf->pf;
2941 struct i40e_vsi *vsi = NULL;
2942 i40e_status aq_ret = 0;
2944 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2945 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
2946 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2947 aq_ret = I40E_ERR_PARAM;
2951 vsi = pf->vsi[vf->lan_vsi_idx];
2952 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2954 /* send the response to the VF */
2955 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2960 * i40e_vc_config_rss_lut
2961 * @vf: pointer to the VF info
2962 * @msg: pointer to the msg buffer
2964 * Configure the VF's RSS LUT
2966 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
2968 struct virtchnl_rss_lut *vrl =
2969 (struct virtchnl_rss_lut *)msg;
2970 struct i40e_pf *pf = vf->pf;
2971 struct i40e_vsi *vsi = NULL;
2972 i40e_status aq_ret = 0;
2975 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2976 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
2977 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2978 aq_ret = I40E_ERR_PARAM;
2982 for (i = 0; i < vrl->lut_entries; i++)
2983 if (vrl->lut[i] >= vf->num_queue_pairs) {
2984 aq_ret = I40E_ERR_PARAM;
2988 vsi = pf->vsi[vf->lan_vsi_idx];
2989 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2990 /* send the response to the VF */
2992 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2997 * i40e_vc_get_rss_hena
2998 * @vf: pointer to the VF info
2999 * @msg: pointer to the msg buffer
3001 * Return the RSS HENA bits allowed by the hardware
3003 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3005 struct virtchnl_rss_hena *vrh = NULL;
3006 struct i40e_pf *pf = vf->pf;
3007 i40e_status aq_ret = 0;
3010 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3011 aq_ret = I40E_ERR_PARAM;
3014 len = sizeof(struct virtchnl_rss_hena);
3016 vrh = kzalloc(len, GFP_KERNEL);
3018 aq_ret = I40E_ERR_NO_MEMORY;
3022 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3024 /* send the response back to the VF */
3025 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3026 aq_ret, (u8 *)vrh, len);
3032 * i40e_vc_set_rss_hena
3033 * @vf: pointer to the VF info
3034 * @msg: pointer to the msg buffer
3036 * Set the RSS HENA bits for the VF
3038 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3040 struct virtchnl_rss_hena *vrh =
3041 (struct virtchnl_rss_hena *)msg;
3042 struct i40e_pf *pf = vf->pf;
3043 struct i40e_hw *hw = &pf->hw;
3044 i40e_status aq_ret = 0;
3046 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3047 aq_ret = I40E_ERR_PARAM;
3050 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3051 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3052 (u32)(vrh->hena >> 32));
3054 /* send the response to the VF */
3056 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3060 * i40e_vc_enable_vlan_stripping
3061 * @vf: pointer to the VF info
3062 * @msg: pointer to the msg buffer
3064 * Enable vlan header stripping for the VF
3066 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3068 i40e_status aq_ret = 0;
3069 struct i40e_vsi *vsi;
3071 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3072 aq_ret = I40E_ERR_PARAM;
3076 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3077 i40e_vlan_stripping_enable(vsi);
3079 /* send the response to the VF */
3081 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3086 * i40e_vc_disable_vlan_stripping
3087 * @vf: pointer to the VF info
3088 * @msg: pointer to the msg buffer
3090 * Disable vlan header stripping for the VF
3092 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3094 i40e_status aq_ret = 0;
3095 struct i40e_vsi *vsi;
3097 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3098 aq_ret = I40E_ERR_PARAM;
3102 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3103 i40e_vlan_stripping_disable(vsi);
3105 /* send the response to the VF */
3107 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3112 * i40e_validate_cloud_filter
3113 * @mask: mask for TC filter
3114 * @data: data for TC filter
3116 * This function validates cloud filter programmed as TC filter for ADq
3118 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3119 struct virtchnl_filter *tc_filter)
3121 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3122 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3123 struct i40e_pf *pf = vf->pf;
3124 struct i40e_vsi *vsi = NULL;
3125 struct i40e_mac_filter *f;
3126 struct hlist_node *h;
3130 if (!tc_filter->action) {
3131 dev_info(&pf->pdev->dev,
3132 "VF %d: Currently ADq doesn't support Drop Action\n",
3137 /* action_meta is TC number here to which the filter is applied */
3138 if (!tc_filter->action_meta ||
3139 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3140 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3141 vf->vf_id, tc_filter->action_meta);
3145 /* Check filter if it's programmed for advanced mode or basic mode.
3146 * There are two ADq modes (for VF only),
3147 * 1. Basic mode: intended to allow as many filter options as possible
3148 * to be added to a VF in Non-trusted mode. Main goal is
3149 * to add filters to its own MAC and VLAN id.
3150 * 2. Advanced mode: is for allowing filters to be applied other than
3151 * its own MAC or VLAN. This mode requires the VF to be
3154 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3155 vsi = pf->vsi[vf->lan_vsi_idx];
3156 f = i40e_find_mac(vsi, data.dst_mac);
3159 dev_info(&pf->pdev->dev,
3160 "Destination MAC %pM doesn't belong to VF %d\n",
3161 data.dst_mac, vf->vf_id);
3166 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3168 if (f->vlan == ntohs(data.vlan_id)) {
3174 dev_info(&pf->pdev->dev,
3175 "VF %d doesn't have any VLAN id %u\n",
3176 vf->vf_id, ntohs(data.vlan_id));
3181 /* Check if VF is trusted */
3182 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3183 dev_err(&pf->pdev->dev,
3184 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3186 return I40E_ERR_CONFIG;
3190 if (mask.dst_mac[0] & data.dst_mac[0]) {
3191 if (is_broadcast_ether_addr(data.dst_mac) ||
3192 is_zero_ether_addr(data.dst_mac)) {
3193 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3194 vf->vf_id, data.dst_mac);
3199 if (mask.src_mac[0] & data.src_mac[0]) {
3200 if (is_broadcast_ether_addr(data.src_mac) ||
3201 is_zero_ether_addr(data.src_mac)) {
3202 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3203 vf->vf_id, data.src_mac);
3208 if (mask.dst_port & data.dst_port) {
3209 if (!data.dst_port) {
3210 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3216 if (mask.src_port & data.src_port) {
3217 if (!data.src_port) {
3218 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3224 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3225 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3226 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3231 if (mask.vlan_id & data.vlan_id) {
3232 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3233 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3239 return I40E_SUCCESS;
3241 return I40E_ERR_CONFIG;
3245 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3246 * @vf: pointer to the VF info
3247 * @seid - seid of the vsi it is searching for
3249 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3251 struct i40e_pf *pf = vf->pf;
3252 struct i40e_vsi *vsi = NULL;
3255 for (i = 0; i < vf->num_tc ; i++) {
3256 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3257 if (vsi && vsi->seid == seid)
3264 * i40e_del_all_cloud_filters
3265 * @vf: pointer to the VF info
3267 * This function deletes all cloud filters
3269 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3271 struct i40e_cloud_filter *cfilter = NULL;
3272 struct i40e_pf *pf = vf->pf;
3273 struct i40e_vsi *vsi = NULL;
3274 struct hlist_node *node;
3277 hlist_for_each_entry_safe(cfilter, node,
3278 &vf->cloud_filter_list, cloud_node) {
3279 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3282 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3283 vf->vf_id, cfilter->seid);
3287 if (cfilter->dst_port)
3288 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3291 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3293 dev_err(&pf->pdev->dev,
3294 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3295 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3296 i40e_aq_str(&pf->hw,
3297 pf->hw.aq.asq_last_status));
3299 hlist_del(&cfilter->cloud_node);
3301 vf->num_cloud_filters--;
3306 * i40e_vc_del_cloud_filter
3307 * @vf: pointer to the VF info
3308 * @msg: pointer to the msg buffer
3310 * This function deletes a cloud filter programmed as TC filter for ADq
3312 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3314 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3315 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3316 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3317 struct i40e_cloud_filter cfilter, *cf = NULL;
3318 struct i40e_pf *pf = vf->pf;
3319 struct i40e_vsi *vsi = NULL;
3320 struct hlist_node *node;
3321 i40e_status aq_ret = 0;
3324 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3325 aq_ret = I40E_ERR_PARAM;
3329 if (!vf->adq_enabled) {
3330 dev_info(&pf->pdev->dev,
3331 "VF %d: ADq not enabled, can't apply cloud filter\n",
3333 aq_ret = I40E_ERR_PARAM;
3337 if (i40e_validate_cloud_filter(vf, vcf)) {
3338 dev_info(&pf->pdev->dev,
3339 "VF %d: Invalid input, can't apply cloud filter\n",
3341 aq_ret = I40E_ERR_PARAM;
3345 memset(&cfilter, 0, sizeof(cfilter));
3346 /* parse destination mac address */
3347 for (i = 0; i < ETH_ALEN; i++)
3348 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3350 /* parse source mac address */
3351 for (i = 0; i < ETH_ALEN; i++)
3352 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3354 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3355 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3356 cfilter.src_port = mask.src_port & tcf.src_port;
3358 switch (vcf->flow_type) {
3359 case VIRTCHNL_TCP_V4_FLOW:
3360 cfilter.n_proto = ETH_P_IP;
3361 if (mask.dst_ip[0] & tcf.dst_ip[0])
3362 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3363 ARRAY_SIZE(tcf.dst_ip));
3364 else if (mask.src_ip[0] & tcf.dst_ip[0])
3365 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3366 ARRAY_SIZE(tcf.dst_ip));
3368 case VIRTCHNL_TCP_V6_FLOW:
3369 cfilter.n_proto = ETH_P_IPV6;
3370 if (mask.dst_ip[3] & tcf.dst_ip[3])
3371 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3372 sizeof(cfilter.ip.v6.dst_ip6));
3373 if (mask.src_ip[3] & tcf.src_ip[3])
3374 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3375 sizeof(cfilter.ip.v6.src_ip6));
3378 /* TC filter can be configured based on different combinations
3379 * and in this case IP is not a part of filter config
3381 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3385 /* get the vsi to which the tc belongs to */
3386 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3387 cfilter.seid = vsi->seid;
3388 cfilter.flags = vcf->field_flags;
3390 /* Deleting TC filter */
3392 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3394 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3396 dev_err(&pf->pdev->dev,
3397 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3398 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3399 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3403 hlist_for_each_entry_safe(cf, node,
3404 &vf->cloud_filter_list, cloud_node) {
3405 if (cf->seid != cfilter.seid)
3408 if (cfilter.dst_port != cf->dst_port)
3410 if (mask.dst_mac[0])
3411 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3413 /* for ipv4 data to be valid, only first byte of mask is set */
3414 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3415 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3416 ARRAY_SIZE(tcf.dst_ip)))
3418 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3419 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3420 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3421 sizeof(cfilter.ip.v6.src_ip6)))
3424 if (cfilter.vlan_id != cf->vlan_id)
3427 hlist_del(&cf->cloud_node);
3429 vf->num_cloud_filters--;
3433 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3438 * i40e_vc_add_cloud_filter
3439 * @vf: pointer to the VF info
3440 * @msg: pointer to the msg buffer
3442 * This function adds a cloud filter programmed as TC filter for ADq
3444 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3446 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3447 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3448 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3449 struct i40e_cloud_filter *cfilter = NULL;
3450 struct i40e_pf *pf = vf->pf;
3451 struct i40e_vsi *vsi = NULL;
3452 i40e_status aq_ret = 0;
3455 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3456 aq_ret = I40E_ERR_PARAM;
3460 if (!vf->adq_enabled) {
3461 dev_info(&pf->pdev->dev,
3462 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3464 aq_ret = I40E_ERR_PARAM;
3468 if (i40e_validate_cloud_filter(vf, vcf)) {
3469 dev_info(&pf->pdev->dev,
3470 "VF %d: Invalid input/s, can't apply cloud filter\n",
3472 aq_ret = I40E_ERR_PARAM;
3476 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3480 /* parse destination mac address */
3481 for (i = 0; i < ETH_ALEN; i++)
3482 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3484 /* parse source mac address */
3485 for (i = 0; i < ETH_ALEN; i++)
3486 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3488 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3489 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3490 cfilter->src_port = mask.src_port & tcf.src_port;
3492 switch (vcf->flow_type) {
3493 case VIRTCHNL_TCP_V4_FLOW:
3494 cfilter->n_proto = ETH_P_IP;
3495 if (mask.dst_ip[0] & tcf.dst_ip[0])
3496 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3497 ARRAY_SIZE(tcf.dst_ip));
3498 else if (mask.src_ip[0] & tcf.dst_ip[0])
3499 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3500 ARRAY_SIZE(tcf.dst_ip));
3502 case VIRTCHNL_TCP_V6_FLOW:
3503 cfilter->n_proto = ETH_P_IPV6;
3504 if (mask.dst_ip[3] & tcf.dst_ip[3])
3505 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3506 sizeof(cfilter->ip.v6.dst_ip6));
3507 if (mask.src_ip[3] & tcf.src_ip[3])
3508 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3509 sizeof(cfilter->ip.v6.src_ip6));
3512 /* TC filter can be configured based on different combinations
3513 * and in this case IP is not a part of filter config
3515 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3519 /* get the VSI to which the TC belongs to */
3520 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3521 cfilter->seid = vsi->seid;
3522 cfilter->flags = vcf->field_flags;
3524 /* Adding cloud filter programmed as TC filter */
3526 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3528 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3530 dev_err(&pf->pdev->dev,
3531 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3532 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3533 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3537 INIT_HLIST_NODE(&cfilter->cloud_node);
3538 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3539 /* release the pointer passing it to the collection */
3541 vf->num_cloud_filters++;
3545 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3550 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3551 * @vf: pointer to the VF info
3552 * @msg: pointer to the msg buffer
3554 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3556 struct virtchnl_tc_info *tci =
3557 (struct virtchnl_tc_info *)msg;
3558 struct i40e_pf *pf = vf->pf;
3559 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3560 int i, adq_request_qps = 0;
3561 i40e_status aq_ret = 0;
3564 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3565 aq_ret = I40E_ERR_PARAM;
3569 /* ADq cannot be applied if spoof check is ON */
3571 dev_err(&pf->pdev->dev,
3572 "Spoof check is ON, turn it OFF to enable ADq\n");
3573 aq_ret = I40E_ERR_PARAM;
3577 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3578 dev_err(&pf->pdev->dev,
3579 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3581 aq_ret = I40E_ERR_PARAM;
3585 /* max number of traffic classes for VF currently capped at 4 */
3586 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3587 dev_err(&pf->pdev->dev,
3588 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3589 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3590 aq_ret = I40E_ERR_PARAM;
3594 /* validate queues for each TC */
3595 for (i = 0; i < tci->num_tc; i++)
3596 if (!tci->list[i].count ||
3597 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3598 dev_err(&pf->pdev->dev,
3599 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3600 vf->vf_id, i, tci->list[i].count,
3601 I40E_DEFAULT_QUEUES_PER_VF);
3602 aq_ret = I40E_ERR_PARAM;
3606 /* need Max VF queues but already have default number of queues */
3607 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3609 if (pf->queues_left < adq_request_qps) {
3610 dev_err(&pf->pdev->dev,
3611 "No queues left to allocate to VF %d\n",
3613 aq_ret = I40E_ERR_PARAM;
3616 /* we need to allocate max VF queues to enable ADq so as to
3617 * make sure ADq enabled VF always gets back queues when it
3618 * goes through a reset.
3620 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3623 /* get link speed in MB to validate rate limit */
3624 switch (ls->link_speed) {
3625 case VIRTCHNL_LINK_SPEED_100MB:
3628 case VIRTCHNL_LINK_SPEED_1GB:
3631 case VIRTCHNL_LINK_SPEED_10GB:
3632 speed = SPEED_10000;
3634 case VIRTCHNL_LINK_SPEED_20GB:
3635 speed = SPEED_20000;
3637 case VIRTCHNL_LINK_SPEED_25GB:
3638 speed = SPEED_25000;
3640 case VIRTCHNL_LINK_SPEED_40GB:
3641 speed = SPEED_40000;
3644 dev_err(&pf->pdev->dev,
3645 "Cannot detect link speed\n");
3646 aq_ret = I40E_ERR_PARAM;
3650 /* parse data from the queue channel info */
3651 vf->num_tc = tci->num_tc;
3652 for (i = 0; i < vf->num_tc; i++) {
3653 if (tci->list[i].max_tx_rate) {
3654 if (tci->list[i].max_tx_rate > speed) {
3655 dev_err(&pf->pdev->dev,
3656 "Invalid max tx rate %llu specified for VF %d.",
3657 tci->list[i].max_tx_rate,
3659 aq_ret = I40E_ERR_PARAM;
3662 vf->ch[i].max_tx_rate =
3663 tci->list[i].max_tx_rate;
3666 vf->ch[i].num_qps = tci->list[i].count;
3669 /* set this flag only after making sure all inputs are sane */
3670 vf->adq_enabled = true;
3671 /* num_req_queues is set when user changes number of queues via ethtool
3672 * and this causes issue for default VSI(which depends on this variable)
3673 * when ADq is enabled, hence reset it.
3675 vf->num_req_queues = 0;
3677 /* reset the VF in order to allocate resources */
3678 i40e_vc_notify_vf_reset(vf);
3679 i40e_reset_vf(vf, false);
3681 return I40E_SUCCESS;
3683 /* send the response to the VF */
3685 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3690 * i40e_vc_del_qch_msg
3691 * @vf: pointer to the VF info
3692 * @msg: pointer to the msg buffer
3694 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3696 struct i40e_pf *pf = vf->pf;
3697 i40e_status aq_ret = 0;
3699 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3700 aq_ret = I40E_ERR_PARAM;
3704 if (vf->adq_enabled) {
3705 i40e_del_all_cloud_filters(vf);
3707 vf->adq_enabled = false;
3709 dev_info(&pf->pdev->dev,
3710 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3713 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3715 aq_ret = I40E_ERR_PARAM;
3718 /* reset the VF in order to allocate resources */
3719 i40e_vc_notify_vf_reset(vf);
3720 i40e_reset_vf(vf, false);
3722 return I40E_SUCCESS;
3725 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3730 * i40e_vc_process_vf_msg
3731 * @pf: pointer to the PF structure
3732 * @vf_id: source VF id
3733 * @v_opcode: operation code
3734 * @v_retval: unused return value code
3735 * @msg: pointer to the msg buffer
3736 * @msglen: msg length
3738 * called from the common aeq/arq handler to
3739 * process request from VF
3741 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3742 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3744 struct i40e_hw *hw = &pf->hw;
3745 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3749 pf->vf_aq_requests++;
3750 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3752 vf = &(pf->vf[local_vf_id]);
3754 /* Check if VF is disabled. */
3755 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3756 return I40E_ERR_PARAM;
3758 /* perform basic checks on the msg */
3759 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3762 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3763 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3764 local_vf_id, v_opcode, msglen);
3766 case VIRTCHNL_STATUS_ERR_PARAM:
3774 case VIRTCHNL_OP_VERSION:
3775 ret = i40e_vc_get_version_msg(vf, msg);
3777 case VIRTCHNL_OP_GET_VF_RESOURCES:
3778 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3779 i40e_vc_notify_vf_link_state(vf);
3781 case VIRTCHNL_OP_RESET_VF:
3782 i40e_vc_reset_vf_msg(vf);
3785 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3786 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3788 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3789 ret = i40e_vc_config_queues_msg(vf, msg);
3791 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3792 ret = i40e_vc_config_irq_map_msg(vf, msg);
3794 case VIRTCHNL_OP_ENABLE_QUEUES:
3795 ret = i40e_vc_enable_queues_msg(vf, msg);
3796 i40e_vc_notify_vf_link_state(vf);
3798 case VIRTCHNL_OP_DISABLE_QUEUES:
3799 ret = i40e_vc_disable_queues_msg(vf, msg);
3801 case VIRTCHNL_OP_ADD_ETH_ADDR:
3802 ret = i40e_vc_add_mac_addr_msg(vf, msg);
3804 case VIRTCHNL_OP_DEL_ETH_ADDR:
3805 ret = i40e_vc_del_mac_addr_msg(vf, msg);
3807 case VIRTCHNL_OP_ADD_VLAN:
3808 ret = i40e_vc_add_vlan_msg(vf, msg);
3810 case VIRTCHNL_OP_DEL_VLAN:
3811 ret = i40e_vc_remove_vlan_msg(vf, msg);
3813 case VIRTCHNL_OP_GET_STATS:
3814 ret = i40e_vc_get_stats_msg(vf, msg);
3816 case VIRTCHNL_OP_IWARP:
3817 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3819 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3820 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3822 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3823 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3825 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3826 ret = i40e_vc_config_rss_key(vf, msg);
3828 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3829 ret = i40e_vc_config_rss_lut(vf, msg);
3831 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3832 ret = i40e_vc_get_rss_hena(vf, msg);
3834 case VIRTCHNL_OP_SET_RSS_HENA:
3835 ret = i40e_vc_set_rss_hena(vf, msg);
3837 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3838 ret = i40e_vc_enable_vlan_stripping(vf, msg);
3840 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3841 ret = i40e_vc_disable_vlan_stripping(vf, msg);
3843 case VIRTCHNL_OP_REQUEST_QUEUES:
3844 ret = i40e_vc_request_queues_msg(vf, msg);
3846 case VIRTCHNL_OP_ENABLE_CHANNELS:
3847 ret = i40e_vc_add_qch_msg(vf, msg);
3849 case VIRTCHNL_OP_DISABLE_CHANNELS:
3850 ret = i40e_vc_del_qch_msg(vf, msg);
3852 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3853 ret = i40e_vc_add_cloud_filter(vf, msg);
3855 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3856 ret = i40e_vc_del_cloud_filter(vf, msg);
3858 case VIRTCHNL_OP_UNKNOWN:
3860 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3861 v_opcode, local_vf_id);
3862 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3863 I40E_ERR_NOT_IMPLEMENTED);
3871 * i40e_vc_process_vflr_event
3872 * @pf: pointer to the PF structure
3874 * called from the vlfr irq handler to
3875 * free up VF resources and state variables
3877 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3879 struct i40e_hw *hw = &pf->hw;
3880 u32 reg, reg_idx, bit_idx;
3884 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3887 /* Re-enable the VFLR interrupt cause here, before looking for which
3888 * VF got reset. Otherwise, if another VF gets a reset while the
3889 * first one is being processed, that interrupt will be lost, and
3890 * that VF will be stuck in reset forever.
3892 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3893 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3894 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3897 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3898 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3899 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3900 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3901 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
3902 vf = &pf->vf[vf_id];
3903 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3904 if (reg & BIT(bit_idx))
3905 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3906 i40e_reset_vf(vf, true);
3914 * @pf: the physical function
3915 * @vf_id: VF identifier
3917 * Check that the VF is enabled and the VSI exists.
3919 * Returns 0 on success, negative on failure
3921 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
3923 struct i40e_vsi *vsi;
3927 if (vf_id >= pf->num_alloc_vfs) {
3928 dev_err(&pf->pdev->dev,
3929 "Invalid VF Identifier %d\n", vf_id);
3933 vf = &pf->vf[vf_id];
3934 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
3942 * i40e_ndo_set_vf_mac
3943 * @netdev: network interface device structure
3944 * @vf_id: VF identifier
3947 * program VF mac address
3949 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3951 struct i40e_netdev_priv *np = netdev_priv(netdev);
3952 struct i40e_vsi *vsi = np->vsi;
3953 struct i40e_pf *pf = vsi->back;
3954 struct i40e_mac_filter *f;
3957 struct hlist_node *h;
3961 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
3962 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
3966 /* validate the request */
3967 ret = i40e_validate_vf(pf, vf_id);
3971 vf = &pf->vf[vf_id];
3972 vsi = pf->vsi[vf->lan_vsi_idx];
3974 /* When the VF is resetting wait until it is done.
3975 * It can take up to 200 milliseconds,
3976 * but wait for up to 300 milliseconds to be safe.
3977 * If the VF is indeed in reset, the vsi pointer has
3978 * to show on the newly loaded vsi under pf->vsi[id].
3980 for (i = 0; i < 15; i++) {
3981 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3983 vsi = pf->vsi[vf->lan_vsi_idx];
3988 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3989 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3995 if (is_multicast_ether_addr(mac)) {
3996 dev_err(&pf->pdev->dev,
3997 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4002 /* Lock once because below invoked function add/del_filter requires
4003 * mac_filter_hash_lock to be held
4005 spin_lock_bh(&vsi->mac_filter_hash_lock);
4007 /* delete the temporary mac address */
4008 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4009 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4011 /* Delete all the filters for this VSI - we're going to kill it
4014 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4015 __i40e_del_filter(vsi, f);
4017 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4019 /* program mac filter */
4020 if (i40e_sync_vsi_filters(vsi)) {
4021 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4025 ether_addr_copy(vf->default_lan_addr.addr, mac);
4027 if (is_zero_ether_addr(mac)) {
4028 vf->pf_set_mac = false;
4029 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4031 vf->pf_set_mac = true;
4032 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4036 /* Force the VF interface down so it has to bring up with new MAC
4039 i40e_vc_disable_vf(vf);
4040 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4043 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4048 * i40e_vsi_has_vlans - True if VSI has configured VLANs
4049 * @vsi: pointer to the vsi
4051 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
4052 * we have no configured VLANs. Do not call while holding the
4053 * mac_filter_hash_lock.
4055 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
4059 /* If we have a port VLAN, then the VSI cannot have any VLANs
4060 * configured, as all MAC/VLAN filters will be assigned to the PVID.
4065 /* Since we don't have a PVID, we know that if the device is in VLAN
4066 * mode it must be because of a VLAN filter configured on this VSI.
4068 spin_lock_bh(&vsi->mac_filter_hash_lock);
4069 have_vlans = i40e_is_vsi_in_vlan(vsi);
4070 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4076 * i40e_ndo_set_vf_port_vlan
4077 * @netdev: network interface device structure
4078 * @vf_id: VF identifier
4079 * @vlan_id: mac address
4080 * @qos: priority setting
4081 * @vlan_proto: vlan protocol
4083 * program VF vlan id and/or qos
4085 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4086 u16 vlan_id, u8 qos, __be16 vlan_proto)
4088 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4089 struct i40e_netdev_priv *np = netdev_priv(netdev);
4090 bool allmulti = false, alluni = false;
4091 struct i40e_pf *pf = np->vsi->back;
4092 struct i40e_vsi *vsi;
4096 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4097 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4101 /* validate the request */
4102 ret = i40e_validate_vf(pf, vf_id);
4106 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4107 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4112 if (vlan_proto != htons(ETH_P_8021Q)) {
4113 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4114 ret = -EPROTONOSUPPORT;
4118 vf = &pf->vf[vf_id];
4119 vsi = pf->vsi[vf->lan_vsi_idx];
4120 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4121 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4127 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4128 /* duplicate request, so just return success */
4131 if (i40e_vsi_has_vlans(vsi)) {
4132 dev_err(&pf->pdev->dev,
4133 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4135 /* Administrator Error - knock the VF offline until he does
4136 * the right thing by reconfiguring his network correctly
4137 * and then reloading the VF driver.
4139 i40e_vc_disable_vf(vf);
4140 /* During reset the VF got a new VSI, so refresh the pointer. */
4141 vsi = pf->vsi[vf->lan_vsi_idx];
4144 /* Locked once because multiple functions below iterate list */
4145 spin_lock_bh(&vsi->mac_filter_hash_lock);
4147 /* Check for condition where there was already a port VLAN ID
4148 * filter set and now it is being deleted by setting it to zero.
4149 * Additionally check for the condition where there was a port
4150 * VLAN but now there is a new and different port VLAN being set.
4151 * Before deleting all the old VLAN filters we must add new ones
4152 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4153 * MAC addresses deleted.
4155 if ((!(vlan_id || qos) ||
4156 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4158 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4160 dev_info(&vsi->back->pdev->dev,
4161 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4162 vsi->back->hw.aq.asq_last_status);
4163 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4168 if (vsi->info.pvid) {
4169 /* remove all filters on the old VLAN */
4170 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4174 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4176 /* disable promisc modes in case they were enabled */
4177 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4180 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4185 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4187 i40e_vsi_remove_pvid(vsi);
4188 spin_lock_bh(&vsi->mac_filter_hash_lock);
4191 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4192 vlan_id, qos, vf_id);
4194 /* add new VLAN filter for each MAC */
4195 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4197 dev_info(&vsi->back->pdev->dev,
4198 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4199 vsi->back->hw.aq.asq_last_status);
4200 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4204 /* remove the previously added non-VLAN MAC filters */
4205 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4208 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4210 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4213 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4216 /* Schedule the worker thread to take care of applying changes */
4217 i40e_service_event_schedule(vsi->back);
4220 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4224 /* The Port VLAN needs to be saved across resets the same as the
4225 * default LAN MAC address.
4227 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4229 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4231 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4238 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4243 * i40e_ndo_set_vf_bw
4244 * @netdev: network interface device structure
4245 * @vf_id: VF identifier
4246 * @min_tx_rate: Minimum Tx rate
4247 * @max_tx_rate: Maximum Tx rate
4249 * configure VF Tx rate
4251 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4254 struct i40e_netdev_priv *np = netdev_priv(netdev);
4255 struct i40e_pf *pf = np->vsi->back;
4256 struct i40e_vsi *vsi;
4260 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4261 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4265 /* validate the request */
4266 ret = i40e_validate_vf(pf, vf_id);
4271 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4272 min_tx_rate, vf_id);
4277 vf = &pf->vf[vf_id];
4278 vsi = pf->vsi[vf->lan_vsi_idx];
4279 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4280 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4286 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4290 vf->tx_rate = max_tx_rate;
4292 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4297 * i40e_ndo_get_vf_config
4298 * @netdev: network interface device structure
4299 * @vf_id: VF identifier
4300 * @ivi: VF configuration structure
4302 * return VF configuration
4304 int i40e_ndo_get_vf_config(struct net_device *netdev,
4305 int vf_id, struct ifla_vf_info *ivi)
4307 struct i40e_netdev_priv *np = netdev_priv(netdev);
4308 struct i40e_vsi *vsi = np->vsi;
4309 struct i40e_pf *pf = vsi->back;
4313 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4314 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4318 /* validate the request */
4319 ret = i40e_validate_vf(pf, vf_id);
4323 vf = &pf->vf[vf_id];
4324 /* first vsi is always the LAN vsi */
4325 vsi = pf->vsi[vf->lan_vsi_idx];
4333 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4335 ivi->max_tx_rate = vf->tx_rate;
4336 ivi->min_tx_rate = 0;
4337 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4338 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4339 I40E_VLAN_PRIORITY_SHIFT;
4340 if (vf->link_forced == false)
4341 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4342 else if (vf->link_up == true)
4343 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4345 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4346 ivi->spoofchk = vf->spoofchk;
4347 ivi->trusted = vf->trusted;
4351 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4356 * i40e_ndo_set_vf_link_state
4357 * @netdev: network interface device structure
4358 * @vf_id: VF identifier
4359 * @link: required link state
4361 * Set the link state of a specified VF, regardless of physical link state
4363 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4365 struct i40e_netdev_priv *np = netdev_priv(netdev);
4366 struct i40e_pf *pf = np->vsi->back;
4367 struct virtchnl_pf_event pfe;
4368 struct i40e_hw *hw = &pf->hw;
4373 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4374 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4378 /* validate the request */
4379 if (vf_id >= pf->num_alloc_vfs) {
4380 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4385 vf = &pf->vf[vf_id];
4386 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4388 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4389 pfe.severity = PF_EVENT_SEVERITY_INFO;
4392 case IFLA_VF_LINK_STATE_AUTO:
4393 vf->link_forced = false;
4394 pfe.event_data.link_event.link_status =
4395 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4396 pfe.event_data.link_event.link_speed =
4397 (enum virtchnl_link_speed)
4398 pf->hw.phy.link_info.link_speed;
4400 case IFLA_VF_LINK_STATE_ENABLE:
4401 vf->link_forced = true;
4403 pfe.event_data.link_event.link_status = true;
4404 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
4406 case IFLA_VF_LINK_STATE_DISABLE:
4407 vf->link_forced = true;
4408 vf->link_up = false;
4409 pfe.event_data.link_event.link_status = false;
4410 pfe.event_data.link_event.link_speed = 0;
4416 /* Notify the VF of its new link state */
4417 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4418 0, (u8 *)&pfe, sizeof(pfe), NULL);
4421 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4426 * i40e_ndo_set_vf_spoofchk
4427 * @netdev: network interface device structure
4428 * @vf_id: VF identifier
4429 * @enable: flag to enable or disable feature
4431 * Enable or disable VF spoof checking
4433 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4435 struct i40e_netdev_priv *np = netdev_priv(netdev);
4436 struct i40e_vsi *vsi = np->vsi;
4437 struct i40e_pf *pf = vsi->back;
4438 struct i40e_vsi_context ctxt;
4439 struct i40e_hw *hw = &pf->hw;
4443 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4444 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4448 /* validate the request */
4449 if (vf_id >= pf->num_alloc_vfs) {
4450 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4455 vf = &(pf->vf[vf_id]);
4456 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4457 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4463 if (enable == vf->spoofchk)
4466 vf->spoofchk = enable;
4467 memset(&ctxt, 0, sizeof(ctxt));
4468 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4469 ctxt.pf_num = pf->hw.pf_id;
4470 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4472 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4473 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4474 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4476 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4481 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4486 * i40e_ndo_set_vf_trust
4487 * @netdev: network interface device structure of the pf
4488 * @vf_id: VF identifier
4489 * @setting: trust setting
4491 * Enable or disable VF trust setting
4493 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4495 struct i40e_netdev_priv *np = netdev_priv(netdev);
4496 struct i40e_pf *pf = np->vsi->back;
4500 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4501 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4505 /* validate the request */
4506 if (vf_id >= pf->num_alloc_vfs) {
4507 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4512 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4513 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4518 vf = &pf->vf[vf_id];
4520 if (setting == vf->trusted)
4523 vf->trusted = setting;
4524 i40e_vc_disable_vf(vf);
4525 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4526 vf_id, setting ? "" : "un");
4528 if (vf->adq_enabled) {
4530 dev_info(&pf->pdev->dev,
4531 "VF %u no longer Trusted, deleting all cloud filters\n",
4533 i40e_del_all_cloud_filters(vf);
4538 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4543 * i40e_get_vf_stats - populate some stats for the VF
4544 * @netdev: the netdev of the PF
4545 * @vf_id: the host OS identifier (0-127)
4546 * @vf_stats: pointer to the OS memory to be initialized
4548 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4549 struct ifla_vf_stats *vf_stats)
4551 struct i40e_netdev_priv *np = netdev_priv(netdev);
4552 struct i40e_pf *pf = np->vsi->back;
4553 struct i40e_eth_stats *stats;
4554 struct i40e_vsi *vsi;
4557 /* validate the request */
4558 if (i40e_validate_vf(pf, vf_id))
4561 vf = &pf->vf[vf_id];
4562 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4563 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4567 vsi = pf->vsi[vf->lan_vsi_idx];
4571 i40e_update_eth_stats(vsi);
4572 stats = &vsi->eth_stats;
4574 memset(vf_stats, 0, sizeof(*vf_stats));
4576 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4577 stats->rx_multicast;
4578 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4579 stats->tx_multicast;
4580 vf_stats->rx_bytes = stats->rx_bytes;
4581 vf_stats->tx_bytes = stats->tx_bytes;
4582 vf_stats->broadcast = stats->rx_broadcast;
4583 vf_stats->multicast = stats->rx_multicast;
4584 vf_stats->rx_dropped = stats->rx_discards;
4585 vf_stats->tx_dropped = stats->tx_discards;