1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
11 #include "qed_reg_addr.h"
12 #include "qed_sriov.h"
15 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
16 int rel_vf_id, bool b_enabled_only)
18 if (!p_hwfn->pf_iov_info) {
19 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
23 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
27 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
34 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
38 struct qed_vf_info *vf = NULL;
40 if (!p_hwfn->pf_iov_info) {
41 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
45 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
46 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
48 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
54 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
56 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
59 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
60 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
62 pci_read_config_word(cdev->pdev,
63 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
64 pci_read_config_word(cdev->pdev,
65 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
67 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
71 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
75 pci_read_config_word(cdev->pdev,
76 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
78 pci_read_config_word(cdev->pdev,
79 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
81 pci_read_config_word(cdev->pdev,
82 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
84 pci_read_config_dword(cdev->pdev,
85 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
87 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
89 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
93 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
99 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
101 /* Some sanity checks */
102 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
103 iov->total_vfs > NUM_OF_VFS(cdev)) {
104 /* This can happen only due to a bug. In this case we set
105 * num_vfs to zero to avoid memory corruption in the code that
106 * assumes max number of vfs
109 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
119 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
120 struct qed_ptt *p_ptt)
122 struct qed_igu_block *p_sb;
126 if (!p_hwfn->hw_info.p_igu_info) {
128 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
132 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
134 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
135 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
136 !(p_sb->status & QED_IGU_STATUS_PF)) {
137 val = qed_rd(p_hwfn, p_ptt,
138 IGU_REG_MAPPING_MEMORY + sb_id * 4);
139 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
140 qed_wr(p_hwfn, p_ptt,
141 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
146 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
148 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
149 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
150 struct qed_bulletin_content *p_bulletin_virt;
151 dma_addr_t req_p, rply_p, bulletin_p;
152 union pfvf_tlvs *p_reply_virt_addr;
153 union vfpf_tlvs *p_req_virt_addr;
156 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
158 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
159 req_p = p_iov_info->mbx_msg_phys_addr;
160 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
161 rply_p = p_iov_info->mbx_reply_phys_addr;
162 p_bulletin_virt = p_iov_info->p_bulletins;
163 bulletin_p = p_iov_info->bulletins_phys;
164 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
166 "qed_iov_setup_vfdb called without allocating mem first\n");
170 for (idx = 0; idx < p_iov->total_vfs; idx++) {
171 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
174 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
175 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
176 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
177 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
179 vf->state = VF_STOPPED;
182 vf->bulletin.phys = idx *
183 sizeof(struct qed_bulletin_content) +
185 vf->bulletin.p_virt = p_bulletin_virt + idx;
186 vf->bulletin.size = sizeof(struct qed_bulletin_content);
188 vf->relative_vf_id = idx;
189 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
190 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
191 vf->concrete_fid = concrete;
192 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
193 (vf->abs_vf_id << 8);
194 vf->vport_id = idx + 1;
198 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
200 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
204 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
206 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
207 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
209 /* Allocate PF Mailbox buffer (per-VF) */
210 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
211 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
212 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
213 p_iov_info->mbx_msg_size,
214 &p_iov_info->mbx_msg_phys_addr,
219 /* Allocate PF Mailbox Reply buffer (per-VF) */
220 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
221 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
222 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
223 p_iov_info->mbx_reply_size,
224 &p_iov_info->mbx_reply_phys_addr,
229 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
231 p_v_addr = &p_iov_info->p_bulletins;
232 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
233 p_iov_info->bulletins_size,
234 &p_iov_info->bulletins_phys,
241 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
242 p_iov_info->mbx_msg_virt_addr,
243 (u64) p_iov_info->mbx_msg_phys_addr,
244 p_iov_info->mbx_reply_virt_addr,
245 (u64) p_iov_info->mbx_reply_phys_addr,
246 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
251 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
253 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
255 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
256 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
257 p_iov_info->mbx_msg_size,
258 p_iov_info->mbx_msg_virt_addr,
259 p_iov_info->mbx_msg_phys_addr);
261 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
262 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
263 p_iov_info->mbx_reply_size,
264 p_iov_info->mbx_reply_virt_addr,
265 p_iov_info->mbx_reply_phys_addr);
267 if (p_iov_info->p_bulletins)
268 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
269 p_iov_info->bulletins_size,
270 p_iov_info->p_bulletins,
271 p_iov_info->bulletins_phys);
274 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
276 struct qed_pf_iov *p_sriov;
278 if (!IS_PF_SRIOV(p_hwfn)) {
279 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
280 "No SR-IOV - no need for IOV db\n");
284 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
286 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
290 p_hwfn->pf_iov_info = p_sriov;
292 return qed_iov_allocate_vfdb(p_hwfn);
295 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
297 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
300 qed_iov_setup_vfdb(p_hwfn);
301 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
304 void qed_iov_free(struct qed_hwfn *p_hwfn)
306 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
307 qed_iov_free_vfdb(p_hwfn);
308 kfree(p_hwfn->pf_iov_info);
312 void qed_iov_free_hw_info(struct qed_dev *cdev)
314 kfree(cdev->p_iov_info);
315 cdev->p_iov_info = NULL;
318 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
320 struct qed_dev *cdev = p_hwfn->cdev;
324 /* Learn the PCI configuration */
325 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
326 PCI_EXT_CAP_ID_SRIOV);
328 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
332 /* Allocate a new struct for IOV information */
333 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
334 if (!cdev->p_iov_info) {
335 DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
338 cdev->p_iov_info->pos = pos;
340 rc = qed_iov_pci_cfg_info(cdev);
344 /* We want PF IOV to be synonemous with the existance of p_iov_info;
345 * In case the capability is published but there are no VFs, simply
346 * de-allocate the struct.
348 if (!cdev->p_iov_info->total_vfs) {
349 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
350 "IOV capabilities, but no VFs are published\n");
351 kfree(cdev->p_iov_info);
352 cdev->p_iov_info = NULL;
356 /* Calculate the first VF index - this is a bit tricky; Basically,
357 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
358 * after the first engine's VFs.
360 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
361 p_hwfn->abs_pf_id - 16;
362 if (QED_PATH_ID(p_hwfn))
363 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
365 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
366 "First VF in hwfn 0x%08x\n",
367 cdev->p_iov_info->first_vf_in_pf);
372 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
374 /* Check PF supports sriov */
375 if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
378 /* Check VF validity */
379 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
385 static bool qed_iov_tlv_supported(u16 tlvtype)
387 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
390 /* place a given tlv on the tlv buffer, continuing current tlv list */
391 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
393 struct channel_tlv *tl = (struct channel_tlv *)*offset;
398 /* Offset should keep pointing to next TLV (the end of the last) */
401 /* Return a pointer to the start of the added tlv */
402 return *offset - length;
405 /* list the types and lengths of the tlvs on the buffer */
406 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
408 u16 i = 1, total_length = 0;
409 struct channel_tlv *tlv;
412 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
415 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
416 "TLV number %d: type %d, length %d\n",
417 i, tlv->type, tlv->length);
419 if (tlv->type == CHANNEL_TLV_LIST_END)
422 /* Validate entry - protect against malicious VFs */
424 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
428 total_length += tlv->length;
430 if (total_length >= sizeof(struct tlv_buffer_size)) {
431 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
439 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
440 struct qed_ptt *p_ptt,
441 struct qed_vf_info *p_vf,
442 u16 length, u8 status)
444 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
445 struct qed_dmae_params params;
448 mbx->reply_virt->default_resp.hdr.status = status;
450 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
452 eng_vf_id = p_vf->abs_vf_id;
454 memset(¶ms, 0, sizeof(struct qed_dmae_params));
455 params.flags = QED_DMAE_FLAG_VF_DST;
456 params.dst_vfid = eng_vf_id;
458 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
459 mbx->req_virt->first_tlv.reply_address +
461 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
464 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
465 mbx->req_virt->first_tlv.reply_address,
466 sizeof(u64) / 4, ¶ms);
469 GTT_BAR0_MAP_REG_USDM_RAM +
470 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
473 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
474 struct qed_ptt *p_ptt,
475 struct qed_vf_info *vf_info,
476 u16 type, u16 length, u8 status)
478 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
480 mbx->offset = (u8 *)mbx->reply_virt;
482 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
483 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
484 sizeof(struct channel_list_end_tlv));
486 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
489 static void qed_iov_process_mbx_dummy_resp(struct qed_hwfn *p_hwfn,
490 struct qed_ptt *p_ptt,
491 struct qed_vf_info *p_vf)
493 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_NONE,
494 sizeof(struct pfvf_def_resp_tlv),
495 PFVF_STATUS_SUCCESS);
498 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
499 struct qed_ptt *p_ptt, int vfid)
501 struct qed_iov_vf_mbx *mbx;
502 struct qed_vf_info *p_vf;
505 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
511 /* qed_iov_process_mbx_request */
514 "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
516 mbx->first_tlv = mbx->req_virt->first_tlv;
518 /* check if tlv type is known */
519 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
520 qed_iov_process_mbx_dummy_resp(p_hwfn, p_ptt, p_vf);
522 /* unknown TLV - this may belong to a VF driver from the future
523 * - a version written after this PF driver was written, which
524 * supports features unknown as of yet. Too bad since we don't
525 * support them. Or this may be because someone wrote a crappy
526 * VF driver and is sending garbage over the channel.
529 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
530 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
532 for (i = 0; i < 20; i++) {
536 mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
541 void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
543 u64 add_bit = 1ULL << (vfid % 64);
545 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
548 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
551 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
553 memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
554 memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
557 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
558 u16 abs_vfid, struct regpair *vf_msg)
560 u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
561 struct qed_vf_info *p_vf;
563 if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
566 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
570 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
572 /* List the physical address of the request so that handler
573 * could later on copy the message from it.
575 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
577 /* Mark the event and schedule the workqueue */
578 qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
579 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
584 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
585 u8 opcode, __le16 echo, union event_ring_data *data)
588 case COMMON_EVENT_VF_PF_CHANNEL:
589 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
590 &data->vf_pf_channel.msg_addr);
592 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
598 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
600 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
606 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
607 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
614 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
617 struct qed_dmae_params params;
618 struct qed_vf_info *vf_info;
620 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
624 memset(¶ms, 0, sizeof(struct qed_dmae_params));
625 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
626 params.src_vfid = vf_info->abs_vf_id;
628 if (qed_dmae_host2host(p_hwfn, ptt,
629 vf_info->vf_mbx.pending_req,
630 vf_info->vf_mbx.req_phys,
631 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
632 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
633 "Failed to copy message from VF 0x%02x\n", vfid);
642 * qed_schedule_iov - schedules IOV task for VF and PF
643 * @hwfn: hardware function pointer
644 * @flag: IOV flag for VF/PF
646 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
648 smp_mb__before_atomic();
649 set_bit(flag, &hwfn->iov_task_flags);
650 smp_mb__after_atomic();
651 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
652 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
655 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
657 u64 events[QED_VF_ARRAY_LENGTH];
661 ptt = qed_ptt_acquire(hwfn);
663 DP_VERBOSE(hwfn, QED_MSG_IOV,
664 "Can't acquire PTT; re-scheduling\n");
665 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
669 qed_iov_pf_get_and_clear_pending_events(hwfn, events);
671 DP_VERBOSE(hwfn, QED_MSG_IOV,
672 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
673 events[0], events[1], events[2]);
675 qed_for_each_vf(hwfn, i) {
676 /* Skip VFs with no pending messages */
677 if (!(events[i / 64] & (1ULL << (i % 64))))
680 DP_VERBOSE(hwfn, QED_MSG_IOV,
681 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
682 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
684 /* Copy VF's message to PF's request buffer for that VF */
685 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
688 qed_iov_process_mbx_req(hwfn, ptt, i);
691 qed_ptt_release(hwfn, ptt);
694 void qed_iov_pf_task(struct work_struct *work)
696 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
699 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
702 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
703 qed_handle_vf_msg(hwfn);
706 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
710 for_each_hwfn(cdev, i) {
711 if (!cdev->hwfns[i].iov_wq)
714 if (schedule_first) {
715 qed_schedule_iov(&cdev->hwfns[i],
716 QED_IOV_WQ_STOP_WQ_FLAG);
717 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
720 flush_workqueue(cdev->hwfns[i].iov_wq);
721 destroy_workqueue(cdev->hwfns[i].iov_wq);
725 int qed_iov_wq_start(struct qed_dev *cdev)
727 char name[NAME_SIZE];
730 for_each_hwfn(cdev, i) {
731 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
733 /* PFs needs a dedicated workqueue only if they support IOV. */
734 if (!IS_PF_SRIOV(p_hwfn))
737 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
738 cdev->pdev->bus->number,
739 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
741 p_hwfn->iov_wq = create_singlethread_workqueue(name);
742 if (!p_hwfn->iov_wq) {
743 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
747 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);