1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 #include "ice_common.h"
6 #include "ice_adminq_cmd.h"
9 #define ICE_PF_RESET_WAIT_COUNT 200
12 * ice_set_mac_type - Sets MAC type
13 * @hw: pointer to the HW structure
15 * This function sets the MAC type of the adapter based on the
16 * vendor ID and device ID stored in the HW structure.
18 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return ICE_ERR_DEVICE_NOT_SUPPORTED;
23 hw->mac_type = ICE_MAC_GENERIC;
28 * ice_clear_pf_cfg - Clear PF configuration
29 * @hw: pointer to the hardware structure
31 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
32 * configuration, flow director filters, etc.).
34 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
36 struct ice_aq_desc desc;
38 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
40 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
44 * ice_aq_manage_mac_read - manage MAC address read command
45 * @hw: pointer to the HW struct
46 * @buf: a virtual buffer to hold the manage MAC read response
47 * @buf_size: Size of the virtual buffer
48 * @cd: pointer to command details structure or NULL
50 * This function is used to return per PF station MAC address (0x0107).
51 * NOTE: Upon successful completion of this command, MAC address information
52 * is returned in user specified buffer. Please interpret user specified
53 * buffer as "manage_mac_read" response.
54 * Response such as various MAC addresses are stored in HW struct (port.mac)
55 * ice_aq_discover_caps is expected to be called before this function is called.
57 static enum ice_status
58 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
61 struct ice_aqc_manage_mac_read_resp *resp;
62 struct ice_aqc_manage_mac_read *cmd;
63 struct ice_aq_desc desc;
64 enum ice_status status;
68 cmd = &desc.params.mac_read;
70 if (buf_size < sizeof(*resp))
71 return ICE_ERR_BUF_TOO_SHORT;
73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
75 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
79 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
80 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
82 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
83 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
87 /* A single port can report up to two (LAN and WoL) addresses */
88 for (i = 0; i < cmd->num_addr; i++)
89 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
90 ether_addr_copy(hw->port_info->mac.lan_addr,
92 ether_addr_copy(hw->port_info->mac.perm_addr,
101 * ice_aq_get_phy_caps - returns PHY capabilities
102 * @pi: port information structure
103 * @qual_mods: report qualified modules
104 * @report_mode: report mode capabilities
105 * @pcaps: structure for PHY capabilities to be filled
106 * @cd: pointer to command details structure or NULL
108 * Returns the various PHY capabilities supported on the Port (0x0600)
111 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
112 struct ice_aqc_get_phy_caps_data *pcaps,
113 struct ice_sq_cd *cd)
115 struct ice_aqc_get_phy_caps *cmd;
116 u16 pcaps_size = sizeof(*pcaps);
117 struct ice_aq_desc desc;
118 enum ice_status status;
120 cmd = &desc.params.get_phy;
122 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
123 return ICE_ERR_PARAM;
125 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
128 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
130 cmd->param0 |= cpu_to_le16(report_mode);
131 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
133 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
134 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
135 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
142 * ice_get_media_type - Gets media type
143 * @pi: port information structure
145 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
147 struct ice_link_status *hw_link_info;
150 return ICE_MEDIA_UNKNOWN;
152 hw_link_info = &pi->phy.link_info;
153 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
154 /* If more than one media type is selected, report unknown */
155 return ICE_MEDIA_UNKNOWN;
157 if (hw_link_info->phy_type_low) {
158 switch (hw_link_info->phy_type_low) {
159 case ICE_PHY_TYPE_LOW_1000BASE_SX:
160 case ICE_PHY_TYPE_LOW_1000BASE_LX:
161 case ICE_PHY_TYPE_LOW_10GBASE_SR:
162 case ICE_PHY_TYPE_LOW_10GBASE_LR:
163 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
164 case ICE_PHY_TYPE_LOW_25GBASE_SR:
165 case ICE_PHY_TYPE_LOW_25GBASE_LR:
166 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
167 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
168 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
169 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
170 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
171 case ICE_PHY_TYPE_LOW_50GBASE_SR:
172 case ICE_PHY_TYPE_LOW_50GBASE_FR:
173 case ICE_PHY_TYPE_LOW_50GBASE_LR:
174 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
175 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
176 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
177 case ICE_PHY_TYPE_LOW_100GBASE_DR:
178 return ICE_MEDIA_FIBER;
179 case ICE_PHY_TYPE_LOW_100BASE_TX:
180 case ICE_PHY_TYPE_LOW_1000BASE_T:
181 case ICE_PHY_TYPE_LOW_2500BASE_T:
182 case ICE_PHY_TYPE_LOW_5GBASE_T:
183 case ICE_PHY_TYPE_LOW_10GBASE_T:
184 case ICE_PHY_TYPE_LOW_25GBASE_T:
185 return ICE_MEDIA_BASET;
186 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
187 case ICE_PHY_TYPE_LOW_25GBASE_CR:
188 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
189 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
190 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
191 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
192 case ICE_PHY_TYPE_LOW_50GBASE_CP:
193 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
194 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
195 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
197 case ICE_PHY_TYPE_LOW_1000BASE_KX:
198 case ICE_PHY_TYPE_LOW_2500BASE_KX:
199 case ICE_PHY_TYPE_LOW_2500BASE_X:
200 case ICE_PHY_TYPE_LOW_5GBASE_KR:
201 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
202 case ICE_PHY_TYPE_LOW_25GBASE_KR:
203 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
204 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
205 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
206 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
207 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
208 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
209 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
210 return ICE_MEDIA_BACKPLANE;
213 switch (hw_link_info->phy_type_high) {
214 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
215 return ICE_MEDIA_BACKPLANE;
218 return ICE_MEDIA_UNKNOWN;
222 * ice_aq_get_link_info
223 * @pi: port information structure
224 * @ena_lse: enable/disable LinkStatusEvent reporting
225 * @link: pointer to link status structure - optional
226 * @cd: pointer to command details structure or NULL
228 * Get Link Status (0x607). Returns the link status of the adapter.
231 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
232 struct ice_link_status *link, struct ice_sq_cd *cd)
234 struct ice_aqc_get_link_status_data link_data = { 0 };
235 struct ice_aqc_get_link_status *resp;
236 struct ice_link_status *li_old, *li;
237 enum ice_media_type *hw_media_type;
238 struct ice_fc_info *hw_fc_info;
239 bool tx_pause, rx_pause;
240 struct ice_aq_desc desc;
241 enum ice_status status;
246 return ICE_ERR_PARAM;
248 li_old = &pi->phy.link_info_old;
249 hw_media_type = &pi->phy.media_type;
250 li = &pi->phy.link_info;
251 hw_fc_info = &pi->fc;
253 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
254 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
255 resp = &desc.params.get_link_status;
256 resp->cmd_flags = cpu_to_le16(cmd_flags);
257 resp->lport_num = pi->lport;
259 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
264 /* save off old link status information */
267 /* update current link status information */
268 li->link_speed = le16_to_cpu(link_data.link_speed);
269 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
270 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
271 *hw_media_type = ice_get_media_type(pi);
272 li->link_info = link_data.link_info;
273 li->an_info = link_data.an_info;
274 li->ext_info = link_data.ext_info;
275 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
276 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
277 li->topo_media_conflict = link_data.topo_media_conflict;
278 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
279 ICE_AQ_CFG_PACING_TYPE_M);
282 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
283 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
284 if (tx_pause && rx_pause)
285 hw_fc_info->current_mode = ICE_FC_FULL;
287 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
289 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
291 hw_fc_info->current_mode = ICE_FC_NONE;
293 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
295 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
296 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
297 (unsigned long long)li->phy_type_low);
298 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
299 (unsigned long long)li->phy_type_high);
300 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
301 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
302 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
303 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
304 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
305 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
306 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
308 /* save link status information */
312 /* flag cleared so calling functions don't call AQ again */
313 pi->phy.get_link_info = false;
319 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
320 * @hw: pointer to the HW struct
322 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
324 struct ice_switch_info *sw;
326 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
327 sizeof(*hw->switch_info), GFP_KERNEL);
328 sw = hw->switch_info;
331 return ICE_ERR_NO_MEMORY;
333 INIT_LIST_HEAD(&sw->vsi_list_map_head);
335 return ice_init_def_sw_recp(hw);
339 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
340 * @hw: pointer to the HW struct
342 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
344 struct ice_switch_info *sw = hw->switch_info;
345 struct ice_vsi_list_map_info *v_pos_map;
346 struct ice_vsi_list_map_info *v_tmp_map;
347 struct ice_sw_recipe *recps;
350 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
352 list_del(&v_pos_map->list_entry);
353 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
355 recps = hw->switch_info->recp_list;
356 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
357 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
359 recps[i].root_rid = i;
360 mutex_destroy(&recps[i].filt_rule_lock);
361 list_for_each_entry_safe(lst_itr, tmp_entry,
362 &recps[i].filt_rules, list_entry) {
363 list_del(&lst_itr->list_entry);
364 devm_kfree(ice_hw_to_dev(hw), lst_itr);
367 ice_rm_all_sw_replay_rule_info(hw);
368 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
369 devm_kfree(ice_hw_to_dev(hw), sw);
372 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
373 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
374 #define ICE_FW_LOG_DESC_SIZE_MAX \
375 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
378 * ice_get_fw_log_cfg - get FW logging configuration
379 * @hw: pointer to the HW struct
381 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
383 struct ice_aqc_fw_logging_data *config;
384 struct ice_aq_desc desc;
385 enum ice_status status;
388 size = ICE_FW_LOG_DESC_SIZE_MAX;
389 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
391 return ICE_ERR_NO_MEMORY;
393 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
395 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
396 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
398 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
402 /* Save FW logging information into the HW structure */
403 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
406 v = le16_to_cpu(config->entry[i]);
407 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
408 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
410 if (m < ICE_AQC_FW_LOG_ID_MAX)
411 hw->fw_log.evnts[m].cur = flgs;
415 devm_kfree(ice_hw_to_dev(hw), config);
421 * ice_cfg_fw_log - configure FW logging
422 * @hw: pointer to the HW struct
423 * @enable: enable certain FW logging events if true, disable all if false
425 * This function enables/disables the FW logging via Rx CQ events and a UART
426 * port based on predetermined configurations. FW logging via the Rx CQ can be
427 * enabled/disabled for individual PF's. However, FW logging via the UART can
428 * only be enabled/disabled for all PFs on the same device.
430 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
431 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
432 * before initializing the device.
434 * When re/configuring FW logging, callers need to update the "cfg" elements of
435 * the hw->fw_log.evnts array with the desired logging event configurations for
436 * modules of interest. When disabling FW logging completely, the callers can
437 * just pass false in the "enable" parameter. On completion, the function will
438 * update the "cur" element of the hw->fw_log.evnts array with the resulting
439 * logging event configurations of the modules that are being re/configured. FW
440 * logging modules that are not part of a reconfiguration operation retain their
443 * Before resetting the device, it is recommended that the driver disables FW
444 * logging before shutting down the control queue. When disabling FW logging
445 * ("enable" = false), the latest configurations of FW logging events stored in
446 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
449 * When enabling FW logging to emit log messages via the Rx CQ during the
450 * device's initialization phase, a mechanism alternative to interrupt handlers
451 * needs to be used to extract FW log messages from the Rx CQ periodically and
452 * to prevent the Rx CQ from being full and stalling other types of control
453 * messages from FW to SW. Interrupts are typically disabled during the device's
454 * initialization phase.
456 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
458 struct ice_aqc_fw_logging_data *data = NULL;
459 struct ice_aqc_fw_logging *cmd;
460 enum ice_status status = 0;
461 u16 i, chgs = 0, len = 0;
462 struct ice_aq_desc desc;
466 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
469 /* Disable FW logging only when the control queue is still responsive */
471 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
474 /* Get current FW log settings */
475 status = ice_get_fw_log_cfg(hw);
479 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
480 cmd = &desc.params.fw_logging;
482 /* Indicate which controls are valid */
483 if (hw->fw_log.cq_en)
484 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
486 if (hw->fw_log.uart_en)
487 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
490 /* Fill in an array of entries with FW logging modules and
491 * logging events being reconfigured.
493 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
496 /* Keep track of enabled event types */
497 actv_evnts |= hw->fw_log.evnts[i].cfg;
499 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
503 data = devm_kzalloc(ice_hw_to_dev(hw),
504 ICE_FW_LOG_DESC_SIZE_MAX,
507 return ICE_ERR_NO_MEMORY;
510 val = i << ICE_AQC_FW_LOG_ID_S;
511 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
512 data->entry[chgs++] = cpu_to_le16(val);
515 /* Only enable FW logging if at least one module is specified.
516 * If FW logging is currently enabled but all modules are not
517 * enabled to emit log messages, disable FW logging altogether.
520 /* Leave if there is effectively no change */
524 if (hw->fw_log.cq_en)
525 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
527 if (hw->fw_log.uart_en)
528 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
531 len = ICE_FW_LOG_DESC_SIZE(chgs);
532 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
536 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
538 /* Update the current configuration to reflect events enabled.
539 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
540 * logging mode is enabled for the device. They do not reflect
541 * actual modules being enabled to emit log messages. So, their
542 * values remain unchanged even when all modules are disabled.
544 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
546 hw->fw_log.actv_evnts = actv_evnts;
547 for (i = 0; i < cnt; i++) {
551 /* When disabling all FW logging events as part
552 * of device's de-initialization, the original
553 * configurations are retained, and can be used
554 * to reconfigure FW logging later if the device
557 hw->fw_log.evnts[i].cur = 0;
561 v = le16_to_cpu(data->entry[i]);
562 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
563 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
569 devm_kfree(ice_hw_to_dev(hw), data);
576 * @hw: pointer to the HW struct
577 * @desc: pointer to the AQ message descriptor
578 * @buf: pointer to the buffer accompanying the AQ message
580 * Formats a FW Log message and outputs it via the standard driver logs.
582 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
584 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
585 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
586 le16_to_cpu(desc->datalen));
587 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
591 * ice_get_itr_intrl_gran
592 * @hw: pointer to the HW struct
594 * Determines the ITR/INTRL granularities based on the maximum aggregate
595 * bandwidth according to the device's configuration during power-on.
597 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
599 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
600 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
601 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
603 switch (max_agg_bw) {
604 case ICE_MAX_AGG_BW_200G:
605 case ICE_MAX_AGG_BW_100G:
606 case ICE_MAX_AGG_BW_50G:
607 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
608 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
610 case ICE_MAX_AGG_BW_25G:
611 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
612 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
618 * ice_get_nvm_version - get cached NVM version data
619 * @hw: pointer to the hardware structure
620 * @oem_ver: 8 bit NVM version
621 * @oem_build: 16 bit NVM build number
622 * @oem_patch: 8 NVM patch number
623 * @ver_hi: high 16 bits of the NVM version
624 * @ver_lo: low 16 bits of the NVM version
627 ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
628 u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
630 struct ice_nvm_info *nvm = &hw->nvm;
632 *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
633 *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
634 *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
635 ICE_OEM_VER_BUILD_SHIFT);
636 *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
637 *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
641 * ice_init_hw - main hardware initialization routine
642 * @hw: pointer to the hardware structure
644 enum ice_status ice_init_hw(struct ice_hw *hw)
646 struct ice_aqc_get_phy_caps_data *pcaps;
647 enum ice_status status;
651 /* Set MAC type based on DeviceID */
652 status = ice_set_mac_type(hw);
656 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
657 PF_FUNC_RID_FUNC_NUM_M) >>
658 PF_FUNC_RID_FUNC_NUM_S;
660 status = ice_reset(hw, ICE_RESET_PFR);
664 ice_get_itr_intrl_gran(hw);
666 status = ice_create_all_ctrlq(hw);
668 goto err_unroll_cqinit;
670 /* Enable FW logging. Not fatal if this fails. */
671 status = ice_cfg_fw_log(hw, true);
673 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
675 status = ice_clear_pf_cfg(hw);
677 goto err_unroll_cqinit;
679 ice_clear_pxe_mode(hw);
681 status = ice_init_nvm(hw);
683 goto err_unroll_cqinit;
685 status = ice_get_caps(hw);
687 goto err_unroll_cqinit;
689 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
690 sizeof(*hw->port_info), GFP_KERNEL);
691 if (!hw->port_info) {
692 status = ICE_ERR_NO_MEMORY;
693 goto err_unroll_cqinit;
696 /* set the back pointer to HW */
697 hw->port_info->hw = hw;
699 /* Initialize port_info struct with switch configuration data */
700 status = ice_get_initial_sw_cfg(hw);
702 goto err_unroll_alloc;
706 /* Query the allocated resources for Tx scheduler */
707 status = ice_sched_query_res_alloc(hw);
709 ice_debug(hw, ICE_DBG_SCHED,
710 "Failed to get scheduler allocated resources\n");
711 goto err_unroll_alloc;
714 /* Initialize port_info struct with scheduler data */
715 status = ice_sched_init_port(hw->port_info);
717 goto err_unroll_sched;
719 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
721 status = ICE_ERR_NO_MEMORY;
722 goto err_unroll_sched;
725 /* Initialize port_info struct with PHY capabilities */
726 status = ice_aq_get_phy_caps(hw->port_info, false,
727 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
728 devm_kfree(ice_hw_to_dev(hw), pcaps);
730 goto err_unroll_sched;
732 /* Initialize port_info struct with link information */
733 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
735 goto err_unroll_sched;
737 /* need a valid SW entry point to build a Tx tree */
738 if (!hw->sw_entry_point_layer) {
739 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
740 status = ICE_ERR_CFG;
741 goto err_unroll_sched;
743 INIT_LIST_HEAD(&hw->agg_list);
744 /* Initialize max burst size */
745 if (!hw->max_burst_size)
746 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
748 status = ice_init_fltr_mgmt_struct(hw);
750 goto err_unroll_sched;
752 /* Get MAC information */
753 /* A single port can report up to two (LAN and WoL) addresses */
754 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
755 sizeof(struct ice_aqc_manage_mac_read_resp),
757 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
760 status = ICE_ERR_NO_MEMORY;
761 goto err_unroll_fltr_mgmt_struct;
764 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
765 devm_kfree(ice_hw_to_dev(hw), mac_buf);
768 goto err_unroll_fltr_mgmt_struct;
769 status = ice_init_hw_tbls(hw);
771 goto err_unroll_fltr_mgmt_struct;
774 err_unroll_fltr_mgmt_struct:
775 ice_cleanup_fltr_mgmt_struct(hw);
777 ice_sched_cleanup_all(hw);
779 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
781 ice_destroy_all_ctrlq(hw);
786 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
787 * @hw: pointer to the hardware structure
789 * This should be called only during nominal operation, not as a result of
790 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
791 * applicable initializations if it fails for any reason.
793 void ice_deinit_hw(struct ice_hw *hw)
795 ice_cleanup_fltr_mgmt_struct(hw);
797 ice_sched_cleanup_all(hw);
798 ice_sched_clear_agg(hw);
800 ice_free_hw_tbls(hw);
803 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
804 hw->port_info = NULL;
807 /* Attempt to disable FW logging before shutting down control queues */
808 ice_cfg_fw_log(hw, false);
809 ice_destroy_all_ctrlq(hw);
811 /* Clear VSI contexts if not already cleared */
812 ice_clear_all_vsi_ctx(hw);
816 * ice_check_reset - Check to see if a global reset is complete
817 * @hw: pointer to the hardware structure
819 enum ice_status ice_check_reset(struct ice_hw *hw)
821 u32 cnt, reg = 0, grst_delay, uld_mask;
823 /* Poll for Device Active state in case a recent CORER, GLOBR,
824 * or EMPR has occurred. The grst delay value is in 100ms units.
825 * Add 1sec for outstanding AQ commands that can take a long time.
827 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
828 GLGEN_RSTCTL_GRSTDEL_S) + 10;
830 for (cnt = 0; cnt < grst_delay; cnt++) {
832 reg = rd32(hw, GLGEN_RSTAT);
833 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
837 if (cnt == grst_delay) {
838 ice_debug(hw, ICE_DBG_INIT,
839 "Global reset polling failed to complete.\n");
840 return ICE_ERR_RESET_FAILED;
843 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
844 GLNVM_ULD_PCIER_DONE_1_M |\
845 GLNVM_ULD_CORER_DONE_M |\
846 GLNVM_ULD_GLOBR_DONE_M |\
847 GLNVM_ULD_POR_DONE_M |\
848 GLNVM_ULD_POR_DONE_1_M |\
849 GLNVM_ULD_PCIER_DONE_2_M)
851 uld_mask = ICE_RESET_DONE_MASK;
853 /* Device is Active; check Global Reset processes are done */
854 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
855 reg = rd32(hw, GLNVM_ULD) & uld_mask;
856 if (reg == uld_mask) {
857 ice_debug(hw, ICE_DBG_INIT,
858 "Global reset processes done. %d\n", cnt);
864 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
865 ice_debug(hw, ICE_DBG_INIT,
866 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
868 return ICE_ERR_RESET_FAILED;
875 * ice_pf_reset - Reset the PF
876 * @hw: pointer to the hardware structure
878 * If a global reset has been triggered, this function checks
879 * for its completion and then issues the PF reset
881 static enum ice_status ice_pf_reset(struct ice_hw *hw)
885 /* If at function entry a global reset was already in progress, i.e.
886 * state is not 'device active' or any of the reset done bits are not
887 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
888 * global reset is done.
890 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
891 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
892 /* poll on global reset currently in progress until done */
893 if (ice_check_reset(hw))
894 return ICE_ERR_RESET_FAILED;
900 reg = rd32(hw, PFGEN_CTRL);
902 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
904 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
905 reg = rd32(hw, PFGEN_CTRL);
906 if (!(reg & PFGEN_CTRL_PFSWR_M))
912 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
913 ice_debug(hw, ICE_DBG_INIT,
914 "PF reset polling failed to complete.\n");
915 return ICE_ERR_RESET_FAILED;
922 * ice_reset - Perform different types of reset
923 * @hw: pointer to the hardware structure
924 * @req: reset request
926 * This function triggers a reset as specified by the req parameter.
929 * If anything other than a PF reset is triggered, PXE mode is restored.
930 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
931 * interface has been restored in the rebuild flow.
933 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
939 return ice_pf_reset(hw);
940 case ICE_RESET_CORER:
941 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
942 val = GLGEN_RTRIG_CORER_M;
944 case ICE_RESET_GLOBR:
945 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
946 val = GLGEN_RTRIG_GLOBR_M;
949 return ICE_ERR_PARAM;
952 val |= rd32(hw, GLGEN_RTRIG);
953 wr32(hw, GLGEN_RTRIG, val);
956 /* wait for the FW to be ready */
957 return ice_check_reset(hw);
961 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
962 * @hw: pointer to hardware structure
963 * @module_tlv: pointer to module TLV to return
964 * @module_tlv_len: pointer to module TLV length to return
965 * @module_type: module type requested
967 * Finds the requested sub module TLV type from the Preserved Field
968 * Area (PFA) and returns the TLV pointer and length. The caller can
969 * use these to read the variable length TLV value.
972 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
975 enum ice_status status;
976 u16 pfa_len, pfa_ptr;
979 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
981 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
984 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
986 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
989 /* Starting with first TLV after PFA length, iterate through the list
990 * of TLVs to find the requested one.
992 next_tlv = pfa_ptr + 1;
993 while (next_tlv < pfa_ptr + pfa_len) {
994 u16 tlv_sub_module_type;
998 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
1000 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
1003 /* Read TLV length */
1004 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
1006 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
1009 if (tlv_sub_module_type == module_type) {
1011 *module_tlv = next_tlv;
1012 *module_tlv_len = tlv_len;
1015 return ICE_ERR_INVAL_SIZE;
1017 /* Check next TLV, i.e. current TLV pointer + length + 2 words
1018 * (for current TLV's type and length)
1020 next_tlv = next_tlv + tlv_len + 2;
1022 /* Module does not exist */
1023 return ICE_ERR_DOES_NOT_EXIST;
1027 * ice_copy_rxq_ctx_to_hw
1028 * @hw: pointer to the hardware structure
1029 * @ice_rxq_ctx: pointer to the rxq context
1030 * @rxq_index: the index of the Rx queue
1032 * Copies rxq context from dense structure to HW register space
1034 static enum ice_status
1035 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1040 return ICE_ERR_BAD_PTR;
1042 if (rxq_index > QRX_CTRL_MAX_INDEX)
1043 return ICE_ERR_PARAM;
1045 /* Copy each dword separately to HW */
1046 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1047 wr32(hw, QRX_CONTEXT(i, rxq_index),
1048 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1050 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1051 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1057 /* LAN Rx Queue Context */
1058 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1059 /* Field Width LSB */
1060 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1061 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1062 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1063 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1064 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1065 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1066 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1067 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1068 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1069 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1070 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1071 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1072 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1073 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1074 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1075 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1076 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1077 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1078 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1079 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1085 * @hw: pointer to the hardware structure
1086 * @rlan_ctx: pointer to the rxq context
1087 * @rxq_index: the index of the Rx queue
1089 * Converts rxq context from sparse to dense structure and then writes
1090 * it to HW register space and enables the hardware to prefetch descriptors
1091 * instead of only fetching them on demand
1094 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1097 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1100 return ICE_ERR_BAD_PTR;
1102 rlan_ctx->prefena = 1;
1104 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1105 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1108 /* LAN Tx Queue Context */
1109 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1110 /* Field Width LSB */
1111 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1112 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1113 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1114 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1115 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1116 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1117 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1118 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1119 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1120 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1121 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1122 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1123 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1124 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1125 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1126 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1127 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1128 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1129 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1130 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1131 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1132 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1133 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1134 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1135 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1136 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1137 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1138 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1142 /* FW Admin Queue command wrappers */
1144 /* Software lock/mutex that is meant to be held while the Global Config Lock
1145 * in firmware is acquired by the software to prevent most (but not all) types
1146 * of AQ commands from being sent to FW
1148 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1151 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1152 * @hw: pointer to the HW struct
1153 * @desc: descriptor describing the command
1154 * @buf: buffer to use for indirect commands (NULL for direct commands)
1155 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1156 * @cd: pointer to command details structure
1158 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1161 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1162 u16 buf_size, struct ice_sq_cd *cd)
1164 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1165 bool lock_acquired = false;
1166 enum ice_status status;
1168 /* When a package download is in process (i.e. when the firmware's
1169 * Global Configuration Lock resource is held), only the Download
1170 * Package, Get Version, Get Package Info List and Release Resource
1171 * (with resource ID set to Global Config Lock) AdminQ commands are
1172 * allowed; all others must block until the package download completes
1173 * and the Global Config Lock is released. See also
1174 * ice_acquire_global_cfg_lock().
1176 switch (le16_to_cpu(desc->opcode)) {
1177 case ice_aqc_opc_download_pkg:
1178 case ice_aqc_opc_get_pkg_info_list:
1179 case ice_aqc_opc_get_ver:
1181 case ice_aqc_opc_release_res:
1182 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1186 mutex_lock(&ice_global_cfg_lock_sw);
1187 lock_acquired = true;
1191 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1193 mutex_unlock(&ice_global_cfg_lock_sw);
1200 * @hw: pointer to the HW struct
1201 * @cd: pointer to command details structure or NULL
1203 * Get the firmware version (0x0001) from the admin queue commands
1205 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1207 struct ice_aqc_get_ver *resp;
1208 struct ice_aq_desc desc;
1209 enum ice_status status;
1211 resp = &desc.params.get_ver;
1213 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1215 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1218 hw->fw_branch = resp->fw_branch;
1219 hw->fw_maj_ver = resp->fw_major;
1220 hw->fw_min_ver = resp->fw_minor;
1221 hw->fw_patch = resp->fw_patch;
1222 hw->fw_build = le32_to_cpu(resp->fw_build);
1223 hw->api_branch = resp->api_branch;
1224 hw->api_maj_ver = resp->api_major;
1225 hw->api_min_ver = resp->api_minor;
1226 hw->api_patch = resp->api_patch;
1233 * ice_aq_send_driver_ver
1234 * @hw: pointer to the HW struct
1235 * @dv: driver's major, minor version
1236 * @cd: pointer to command details structure or NULL
1238 * Send the driver version (0x0002) to the firmware
1241 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1242 struct ice_sq_cd *cd)
1244 struct ice_aqc_driver_ver *cmd;
1245 struct ice_aq_desc desc;
1248 cmd = &desc.params.driver_ver;
1251 return ICE_ERR_PARAM;
1253 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1255 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1256 cmd->major_ver = dv->major_ver;
1257 cmd->minor_ver = dv->minor_ver;
1258 cmd->build_ver = dv->build_ver;
1259 cmd->subbuild_ver = dv->subbuild_ver;
1262 while (len < sizeof(dv->driver_string) &&
1263 isascii(dv->driver_string[len]) && dv->driver_string[len])
1266 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1271 * @hw: pointer to the HW struct
1272 * @unloading: is the driver unloading itself
1274 * Tell the Firmware that we're shutting down the AdminQ and whether
1275 * or not the driver is unloading as well (0x0003).
1277 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1279 struct ice_aqc_q_shutdown *cmd;
1280 struct ice_aq_desc desc;
1282 cmd = &desc.params.q_shutdown;
1284 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1287 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1289 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1294 * @hw: pointer to the HW struct
1296 * @access: access type
1297 * @sdp_number: resource number
1298 * @timeout: the maximum time in ms that the driver may hold the resource
1299 * @cd: pointer to command details structure or NULL
1301 * Requests common resource using the admin queue commands (0x0008).
1302 * When attempting to acquire the Global Config Lock, the driver can
1303 * learn of three states:
1304 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1305 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1306 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1307 * successfully downloaded the package; the driver does
1308 * not have to download the package and can continue
1311 * Note that if the caller is in an acquire lock, perform action, release lock
1312 * phase of operation, it is possible that the FW may detect a timeout and issue
1313 * a CORER. In this case, the driver will receive a CORER interrupt and will
1314 * have to determine its cause. The calling thread that is handling this flow
1315 * will likely get an error propagated back to it indicating the Download
1316 * Package, Update Package or the Release Resource AQ commands timed out.
1318 static enum ice_status
1319 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1320 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1321 struct ice_sq_cd *cd)
1323 struct ice_aqc_req_res *cmd_resp;
1324 struct ice_aq_desc desc;
1325 enum ice_status status;
1327 cmd_resp = &desc.params.res_owner;
1329 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1331 cmd_resp->res_id = cpu_to_le16(res);
1332 cmd_resp->access_type = cpu_to_le16(access);
1333 cmd_resp->res_number = cpu_to_le32(sdp_number);
1334 cmd_resp->timeout = cpu_to_le32(*timeout);
1337 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1339 /* The completion specifies the maximum time in ms that the driver
1340 * may hold the resource in the Timeout field.
1343 /* Global config lock response utilizes an additional status field.
1345 * If the Global config lock resource is held by some other driver, the
1346 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1347 * and the timeout field indicates the maximum time the current owner
1348 * of the resource has to free it.
1350 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1351 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1352 *timeout = le32_to_cpu(cmd_resp->timeout);
1354 } else if (le16_to_cpu(cmd_resp->status) ==
1355 ICE_AQ_RES_GLBL_IN_PROG) {
1356 *timeout = le32_to_cpu(cmd_resp->timeout);
1357 return ICE_ERR_AQ_ERROR;
1358 } else if (le16_to_cpu(cmd_resp->status) ==
1359 ICE_AQ_RES_GLBL_DONE) {
1360 return ICE_ERR_AQ_NO_WORK;
1363 /* invalid FW response, force a timeout immediately */
1365 return ICE_ERR_AQ_ERROR;
1368 /* If the resource is held by some other driver, the command completes
1369 * with a busy return value and the timeout field indicates the maximum
1370 * time the current owner of the resource has to free it.
1372 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1373 *timeout = le32_to_cpu(cmd_resp->timeout);
1379 * ice_aq_release_res
1380 * @hw: pointer to the HW struct
1382 * @sdp_number: resource number
1383 * @cd: pointer to command details structure or NULL
1385 * release common resource using the admin queue commands (0x0009)
1387 static enum ice_status
1388 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1389 struct ice_sq_cd *cd)
1391 struct ice_aqc_req_res *cmd;
1392 struct ice_aq_desc desc;
1394 cmd = &desc.params.res_owner;
1396 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1398 cmd->res_id = cpu_to_le16(res);
1399 cmd->res_number = cpu_to_le32(sdp_number);
1401 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1406 * @hw: pointer to the HW structure
1408 * @access: access type (read or write)
1409 * @timeout: timeout in milliseconds
1411 * This function will attempt to acquire the ownership of a resource.
1414 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1415 enum ice_aq_res_access_type access, u32 timeout)
1417 #define ICE_RES_POLLING_DELAY_MS 10
1418 u32 delay = ICE_RES_POLLING_DELAY_MS;
1419 u32 time_left = timeout;
1420 enum ice_status status;
1422 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1424 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1425 * previously acquired the resource and performed any necessary updates;
1426 * in this case the caller does not obtain the resource and has no
1427 * further work to do.
1429 if (status == ICE_ERR_AQ_NO_WORK)
1430 goto ice_acquire_res_exit;
1433 ice_debug(hw, ICE_DBG_RES,
1434 "resource %d acquire type %d failed.\n", res, access);
1436 /* If necessary, poll until the current lock owner timeouts */
1437 timeout = time_left;
1438 while (status && timeout && time_left) {
1440 timeout = (timeout > delay) ? timeout - delay : 0;
1441 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1443 if (status == ICE_ERR_AQ_NO_WORK)
1444 /* lock free, but no work to do */
1451 if (status && status != ICE_ERR_AQ_NO_WORK)
1452 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1454 ice_acquire_res_exit:
1455 if (status == ICE_ERR_AQ_NO_WORK) {
1456 if (access == ICE_RES_WRITE)
1457 ice_debug(hw, ICE_DBG_RES,
1458 "resource indicates no work to do.\n");
1460 ice_debug(hw, ICE_DBG_RES,
1461 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1468 * @hw: pointer to the HW structure
1471 * This function will release a resource using the proper Admin Command.
1473 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1475 enum ice_status status;
1476 u32 total_delay = 0;
1478 status = ice_aq_release_res(hw, res, 0, NULL);
1480 /* there are some rare cases when trying to release the resource
1481 * results in an admin queue timeout, so handle them correctly
1483 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1484 (total_delay < hw->adminq.sq_cmd_timeout)) {
1486 status = ice_aq_release_res(hw, res, 0, NULL);
1492 * ice_aq_alloc_free_res - command to allocate/free resources
1493 * @hw: pointer to the HW struct
1494 * @num_entries: number of resource entries in buffer
1495 * @buf: Indirect buffer to hold data parameters and response
1496 * @buf_size: size of buffer for indirect commands
1497 * @opc: pass in the command opcode
1498 * @cd: pointer to command details structure or NULL
1500 * Helper function to allocate/free resources using the admin queue commands
1503 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1504 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1505 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1507 struct ice_aqc_alloc_free_res_cmd *cmd;
1508 struct ice_aq_desc desc;
1510 cmd = &desc.params.sw_res_ctrl;
1513 return ICE_ERR_PARAM;
1515 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1516 return ICE_ERR_PARAM;
1518 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1520 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1522 cmd->num_entries = cpu_to_le16(num_entries);
1524 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1528 * ice_alloc_hw_res - allocate resource
1529 * @hw: pointer to the HW struct
1530 * @type: type of resource
1531 * @num: number of resources to allocate
1532 * @btm: allocate from bottom
1533 * @res: pointer to array that will receive the resources
1536 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1538 struct ice_aqc_alloc_free_res_elem *buf;
1539 enum ice_status status;
1542 buf_len = struct_size(buf, elem, num - 1);
1543 buf = kzalloc(buf_len, GFP_KERNEL);
1545 return ICE_ERR_NO_MEMORY;
1547 /* Prepare buffer to allocate resource. */
1548 buf->num_elems = cpu_to_le16(num);
1549 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1550 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1552 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1554 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1555 ice_aqc_opc_alloc_res, NULL);
1557 goto ice_alloc_res_exit;
1559 memcpy(res, buf->elem, sizeof(buf->elem) * num);
1567 * ice_free_hw_res - free allocated HW resource
1568 * @hw: pointer to the HW struct
1569 * @type: type of resource to free
1570 * @num: number of resources
1571 * @res: pointer to array that contains the resources to free
1574 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1576 struct ice_aqc_alloc_free_res_elem *buf;
1577 enum ice_status status;
1580 buf_len = struct_size(buf, elem, num - 1);
1581 buf = kzalloc(buf_len, GFP_KERNEL);
1583 return ICE_ERR_NO_MEMORY;
1585 /* Prepare buffer to free resource. */
1586 buf->num_elems = cpu_to_le16(num);
1587 buf->res_type = cpu_to_le16(type);
1588 memcpy(buf->elem, res, sizeof(buf->elem) * num);
1590 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1591 ice_aqc_opc_free_res, NULL);
1593 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1600 * ice_get_num_per_func - determine number of resources per PF
1601 * @hw: pointer to the HW structure
1602 * @max: value to be evenly split between each PF
1604 * Determine the number of valid functions by going through the bitmap returned
1605 * from parsing capabilities and use this to calculate the number of resources
1606 * per PF based on the max value passed in.
1608 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1612 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1613 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1614 ICE_CAPS_VALID_FUNCS_M);
1623 * ice_parse_caps - parse function/device capabilities
1624 * @hw: pointer to the HW struct
1625 * @buf: pointer to a buffer containing function/device capability records
1626 * @cap_count: number of capability records in the list
1627 * @opc: type of capabilities list to parse
1629 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1632 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1633 enum ice_adminq_opc opc)
1635 struct ice_aqc_list_caps_elem *cap_resp;
1636 struct ice_hw_func_caps *func_p = NULL;
1637 struct ice_hw_dev_caps *dev_p = NULL;
1638 struct ice_hw_common_caps *caps;
1645 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1647 if (opc == ice_aqc_opc_list_dev_caps) {
1648 dev_p = &hw->dev_caps;
1649 caps = &dev_p->common_cap;
1651 } else if (opc == ice_aqc_opc_list_func_caps) {
1652 func_p = &hw->func_caps;
1653 caps = &func_p->common_cap;
1654 prefix = "func cap";
1656 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1660 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1661 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1662 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1663 u32 number = le32_to_cpu(cap_resp->number);
1664 u16 cap = le16_to_cpu(cap_resp->cap);
1667 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1668 caps->valid_functions = number;
1669 ice_debug(hw, ICE_DBG_INIT,
1670 "%s: valid_functions (bitmap) = %d\n", prefix,
1671 caps->valid_functions);
1673 /* store func count for resource management purposes */
1675 dev_p->num_funcs = hweight32(number);
1677 case ICE_AQC_CAPS_SRIOV:
1678 caps->sr_iov_1_1 = (number == 1);
1679 ice_debug(hw, ICE_DBG_INIT,
1680 "%s: sr_iov_1_1 = %d\n", prefix,
1683 case ICE_AQC_CAPS_VF:
1685 dev_p->num_vfs_exposed = number;
1686 ice_debug(hw, ICE_DBG_INIT,
1687 "%s: num_vfs_exposed = %d\n", prefix,
1688 dev_p->num_vfs_exposed);
1689 } else if (func_p) {
1690 func_p->num_allocd_vfs = number;
1691 func_p->vf_base_id = logical_id;
1692 ice_debug(hw, ICE_DBG_INIT,
1693 "%s: num_allocd_vfs = %d\n", prefix,
1694 func_p->num_allocd_vfs);
1695 ice_debug(hw, ICE_DBG_INIT,
1696 "%s: vf_base_id = %d\n", prefix,
1697 func_p->vf_base_id);
1700 case ICE_AQC_CAPS_VSI:
1702 dev_p->num_vsi_allocd_to_host = number;
1703 ice_debug(hw, ICE_DBG_INIT,
1704 "%s: num_vsi_allocd_to_host = %d\n",
1706 dev_p->num_vsi_allocd_to_host);
1707 } else if (func_p) {
1708 func_p->guar_num_vsi =
1709 ice_get_num_per_func(hw, ICE_MAX_VSI);
1710 ice_debug(hw, ICE_DBG_INIT,
1711 "%s: guar_num_vsi (fw) = %d\n",
1713 ice_debug(hw, ICE_DBG_INIT,
1714 "%s: guar_num_vsi = %d\n",
1715 prefix, func_p->guar_num_vsi);
1718 case ICE_AQC_CAPS_DCB:
1719 caps->dcb = (number == 1);
1720 caps->active_tc_bitmap = logical_id;
1721 caps->maxtc = phys_id;
1722 ice_debug(hw, ICE_DBG_INIT,
1723 "%s: dcb = %d\n", prefix, caps->dcb);
1724 ice_debug(hw, ICE_DBG_INIT,
1725 "%s: active_tc_bitmap = %d\n", prefix,
1726 caps->active_tc_bitmap);
1727 ice_debug(hw, ICE_DBG_INIT,
1728 "%s: maxtc = %d\n", prefix, caps->maxtc);
1730 case ICE_AQC_CAPS_RSS:
1731 caps->rss_table_size = number;
1732 caps->rss_table_entry_width = logical_id;
1733 ice_debug(hw, ICE_DBG_INIT,
1734 "%s: rss_table_size = %d\n", prefix,
1735 caps->rss_table_size);
1736 ice_debug(hw, ICE_DBG_INIT,
1737 "%s: rss_table_entry_width = %d\n", prefix,
1738 caps->rss_table_entry_width);
1740 case ICE_AQC_CAPS_RXQS:
1741 caps->num_rxq = number;
1742 caps->rxq_first_id = phys_id;
1743 ice_debug(hw, ICE_DBG_INIT,
1744 "%s: num_rxq = %d\n", prefix,
1746 ice_debug(hw, ICE_DBG_INIT,
1747 "%s: rxq_first_id = %d\n", prefix,
1748 caps->rxq_first_id);
1750 case ICE_AQC_CAPS_TXQS:
1751 caps->num_txq = number;
1752 caps->txq_first_id = phys_id;
1753 ice_debug(hw, ICE_DBG_INIT,
1754 "%s: num_txq = %d\n", prefix,
1756 ice_debug(hw, ICE_DBG_INIT,
1757 "%s: txq_first_id = %d\n", prefix,
1758 caps->txq_first_id);
1760 case ICE_AQC_CAPS_MSIX:
1761 caps->num_msix_vectors = number;
1762 caps->msix_vector_first_id = phys_id;
1763 ice_debug(hw, ICE_DBG_INIT,
1764 "%s: num_msix_vectors = %d\n", prefix,
1765 caps->num_msix_vectors);
1766 ice_debug(hw, ICE_DBG_INIT,
1767 "%s: msix_vector_first_id = %d\n", prefix,
1768 caps->msix_vector_first_id);
1770 case ICE_AQC_CAPS_MAX_MTU:
1771 caps->max_mtu = number;
1772 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1773 prefix, caps->max_mtu);
1776 ice_debug(hw, ICE_DBG_INIT,
1777 "%s: unknown capability[%d]: 0x%x\n", prefix,
1783 /* Re-calculate capabilities that are dependent on the number of
1784 * physical ports; i.e. some features are not supported or function
1785 * differently on devices with more than 4 ports.
1787 if (hw->dev_caps.num_funcs > 4) {
1788 /* Max 4 TCs per port */
1790 ice_debug(hw, ICE_DBG_INIT,
1791 "%s: maxtc = %d (based on #ports)\n", prefix,
1797 * ice_aq_discover_caps - query function/device capabilities
1798 * @hw: pointer to the HW struct
1799 * @buf: a virtual buffer to hold the capabilities
1800 * @buf_size: Size of the virtual buffer
1801 * @cap_count: cap count needed if AQ err==ENOMEM
1802 * @opc: capabilities type to discover - pass in the command opcode
1803 * @cd: pointer to command details structure or NULL
1805 * Get the function(0x000a)/device(0x000b) capabilities description from
1808 static enum ice_status
1809 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1810 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1812 struct ice_aqc_list_caps *cmd;
1813 struct ice_aq_desc desc;
1814 enum ice_status status;
1816 cmd = &desc.params.get_cap;
1818 if (opc != ice_aqc_opc_list_func_caps &&
1819 opc != ice_aqc_opc_list_dev_caps)
1820 return ICE_ERR_PARAM;
1822 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1824 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1826 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1827 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1828 *cap_count = le32_to_cpu(cmd->count);
1833 * ice_discover_caps - get info about the HW
1834 * @hw: pointer to the hardware structure
1835 * @opc: capabilities type to discover - pass in the command opcode
1837 static enum ice_status
1838 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1840 enum ice_status status;
1845 /* The driver doesn't know how many capabilities the device will return
1846 * so the buffer size required isn't known ahead of time. The driver
1847 * starts with cbuf_len and if this turns out to be insufficient, the
1848 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1849 * The driver then allocates the buffer based on the count and retries
1850 * the operation. So it follows that the retry count is 2.
1852 #define ICE_GET_CAP_BUF_COUNT 40
1853 #define ICE_GET_CAP_RETRY_COUNT 2
1855 cap_count = ICE_GET_CAP_BUF_COUNT;
1856 retries = ICE_GET_CAP_RETRY_COUNT;
1861 cbuf_len = (u16)(cap_count *
1862 sizeof(struct ice_aqc_list_caps_elem));
1863 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1865 return ICE_ERR_NO_MEMORY;
1867 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1869 devm_kfree(ice_hw_to_dev(hw), cbuf);
1871 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1874 /* If ENOMEM is returned, try again with bigger buffer */
1875 } while (--retries);
1881 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
1882 * @hw: pointer to the hardware structure
1884 void ice_set_safe_mode_caps(struct ice_hw *hw)
1886 struct ice_hw_func_caps *func_caps = &hw->func_caps;
1887 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1888 u32 valid_func, rxq_first_id, txq_first_id;
1889 u32 msix_vector_first_id, max_mtu;
1892 /* cache some func_caps values that should be restored after memset */
1893 valid_func = func_caps->common_cap.valid_functions;
1894 txq_first_id = func_caps->common_cap.txq_first_id;
1895 rxq_first_id = func_caps->common_cap.rxq_first_id;
1896 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1897 max_mtu = func_caps->common_cap.max_mtu;
1899 /* unset func capabilities */
1900 memset(func_caps, 0, sizeof(*func_caps));
1902 /* restore cached values */
1903 func_caps->common_cap.valid_functions = valid_func;
1904 func_caps->common_cap.txq_first_id = txq_first_id;
1905 func_caps->common_cap.rxq_first_id = rxq_first_id;
1906 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1907 func_caps->common_cap.max_mtu = max_mtu;
1909 /* one Tx and one Rx queue in safe mode */
1910 func_caps->common_cap.num_rxq = 1;
1911 func_caps->common_cap.num_txq = 1;
1913 /* two MSIX vectors, one for traffic and one for misc causes */
1914 func_caps->common_cap.num_msix_vectors = 2;
1915 func_caps->guar_num_vsi = 1;
1917 /* cache some dev_caps values that should be restored after memset */
1918 valid_func = dev_caps->common_cap.valid_functions;
1919 txq_first_id = dev_caps->common_cap.txq_first_id;
1920 rxq_first_id = dev_caps->common_cap.rxq_first_id;
1921 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1922 max_mtu = dev_caps->common_cap.max_mtu;
1923 num_funcs = dev_caps->num_funcs;
1925 /* unset dev capabilities */
1926 memset(dev_caps, 0, sizeof(*dev_caps));
1928 /* restore cached values */
1929 dev_caps->common_cap.valid_functions = valid_func;
1930 dev_caps->common_cap.txq_first_id = txq_first_id;
1931 dev_caps->common_cap.rxq_first_id = rxq_first_id;
1932 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1933 dev_caps->common_cap.max_mtu = max_mtu;
1934 dev_caps->num_funcs = num_funcs;
1936 /* one Tx and one Rx queue per function in safe mode */
1937 dev_caps->common_cap.num_rxq = num_funcs;
1938 dev_caps->common_cap.num_txq = num_funcs;
1940 /* two MSIX vectors per function */
1941 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
1945 * ice_get_caps - get info about the HW
1946 * @hw: pointer to the hardware structure
1948 enum ice_status ice_get_caps(struct ice_hw *hw)
1950 enum ice_status status;
1952 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1954 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1960 * ice_aq_manage_mac_write - manage MAC address write command
1961 * @hw: pointer to the HW struct
1962 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1963 * @flags: flags to control write behavior
1964 * @cd: pointer to command details structure or NULL
1966 * This function is used to write MAC address to the NVM (0x0108).
1969 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1970 struct ice_sq_cd *cd)
1972 struct ice_aqc_manage_mac_write *cmd;
1973 struct ice_aq_desc desc;
1975 cmd = &desc.params.mac_write;
1976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1980 /* Prep values for flags, sah, sal */
1981 cmd->sah = htons(*((const u16 *)mac_addr));
1982 cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1984 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1988 * ice_aq_clear_pxe_mode
1989 * @hw: pointer to the HW struct
1991 * Tell the firmware that the driver is taking over from PXE (0x0110).
1993 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1995 struct ice_aq_desc desc;
1997 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1998 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2000 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2004 * ice_clear_pxe_mode - clear pxe operations mode
2005 * @hw: pointer to the HW struct
2007 * Make sure all PXE mode settings are cleared, including things
2008 * like descriptor fetch/write-back mode.
2010 void ice_clear_pxe_mode(struct ice_hw *hw)
2012 if (ice_check_sq_alive(hw, &hw->adminq))
2013 ice_aq_clear_pxe_mode(hw);
2017 * ice_get_link_speed_based_on_phy_type - returns link speed
2018 * @phy_type_low: lower part of phy_type
2019 * @phy_type_high: higher part of phy_type
2021 * This helper function will convert an entry in PHY type structure
2022 * [phy_type_low, phy_type_high] to its corresponding link speed.
2023 * Note: In the structure of [phy_type_low, phy_type_high], there should
2024 * be one bit set, as this function will convert one PHY type to its
2026 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2027 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2030 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2032 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2033 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2035 switch (phy_type_low) {
2036 case ICE_PHY_TYPE_LOW_100BASE_TX:
2037 case ICE_PHY_TYPE_LOW_100M_SGMII:
2038 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2040 case ICE_PHY_TYPE_LOW_1000BASE_T:
2041 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2042 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2043 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2044 case ICE_PHY_TYPE_LOW_1G_SGMII:
2045 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2047 case ICE_PHY_TYPE_LOW_2500BASE_T:
2048 case ICE_PHY_TYPE_LOW_2500BASE_X:
2049 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2050 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2052 case ICE_PHY_TYPE_LOW_5GBASE_T:
2053 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2054 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2056 case ICE_PHY_TYPE_LOW_10GBASE_T:
2057 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2058 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2059 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2060 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2061 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2062 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2063 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2065 case ICE_PHY_TYPE_LOW_25GBASE_T:
2066 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2067 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2068 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2069 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2070 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2071 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2072 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2073 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2074 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2075 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2076 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2078 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2079 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2080 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2081 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2082 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2083 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2084 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2086 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2087 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2088 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2089 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2090 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2091 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2092 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2093 case ICE_PHY_TYPE_LOW_50G_AUI2:
2094 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2095 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2096 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2097 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2098 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2099 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2100 case ICE_PHY_TYPE_LOW_50G_AUI1:
2101 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2103 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2104 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2105 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2106 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2107 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2108 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2109 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2110 case ICE_PHY_TYPE_LOW_100G_AUI4:
2111 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2112 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2113 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2114 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2115 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2116 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2119 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2123 switch (phy_type_high) {
2124 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2125 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2126 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2127 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2128 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2129 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2132 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2136 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2137 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2138 return ICE_AQ_LINK_SPEED_UNKNOWN;
2139 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2140 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2141 return ICE_AQ_LINK_SPEED_UNKNOWN;
2142 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2143 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2144 return speed_phy_type_low;
2146 return speed_phy_type_high;
2150 * ice_update_phy_type
2151 * @phy_type_low: pointer to the lower part of phy_type
2152 * @phy_type_high: pointer to the higher part of phy_type
2153 * @link_speeds_bitmap: targeted link speeds bitmap
2155 * Note: For the link_speeds_bitmap structure, you can check it at
2156 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2157 * link_speeds_bitmap include multiple speeds.
2159 * Each entry in this [phy_type_low, phy_type_high] structure will
2160 * present a certain link speed. This helper function will turn on bits
2161 * in [phy_type_low, phy_type_high] structure based on the value of
2162 * link_speeds_bitmap input parameter.
2165 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2166 u16 link_speeds_bitmap)
2173 /* We first check with low part of phy_type */
2174 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2175 pt_low = BIT_ULL(index);
2176 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2178 if (link_speeds_bitmap & speed)
2179 *phy_type_low |= BIT_ULL(index);
2182 /* We then check with high part of phy_type */
2183 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2184 pt_high = BIT_ULL(index);
2185 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2187 if (link_speeds_bitmap & speed)
2188 *phy_type_high |= BIT_ULL(index);
2193 * ice_aq_set_phy_cfg
2194 * @hw: pointer to the HW struct
2195 * @lport: logical port number
2196 * @cfg: structure with PHY configuration data to be set
2197 * @cd: pointer to command details structure or NULL
2199 * Set the various PHY configuration parameters supported on the Port.
2200 * One or more of the Set PHY config parameters may be ignored in an MFP
2201 * mode as the PF may not have the privilege to set some of the PHY Config
2202 * parameters. This status will be indicated by the command response (0x0601).
2205 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2206 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2208 struct ice_aq_desc desc;
2211 return ICE_ERR_PARAM;
2213 /* Ensure that only valid bits of cfg->caps can be turned on. */
2214 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2215 ice_debug(hw, ICE_DBG_PHY,
2216 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2219 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2222 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2223 desc.params.set_phy.lport_num = lport;
2224 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2226 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2227 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2228 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2229 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2230 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2231 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2232 cfg->low_power_ctrl);
2233 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2234 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2235 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2237 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2241 * ice_update_link_info - update status of the HW network link
2242 * @pi: port info structure of the interested logical port
2244 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2246 struct ice_link_status *li;
2247 enum ice_status status;
2250 return ICE_ERR_PARAM;
2252 li = &pi->phy.link_info;
2254 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2258 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2259 struct ice_aqc_get_phy_caps_data *pcaps;
2263 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2266 return ICE_ERR_NO_MEMORY;
2268 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2271 memcpy(li->module_type, &pcaps->module_type,
2272 sizeof(li->module_type));
2274 devm_kfree(ice_hw_to_dev(hw), pcaps);
2282 * @pi: port information structure
2283 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2284 * @ena_auto_link_update: enable automatic link update
2286 * Set the requested flow control mode.
2289 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2291 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2292 struct ice_aqc_get_phy_caps_data *pcaps;
2293 enum ice_status status;
2294 u8 pause_mask = 0x0;
2298 return ICE_ERR_PARAM;
2300 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2302 switch (pi->fc.req_mode) {
2304 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2305 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2307 case ICE_FC_RX_PAUSE:
2308 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2310 case ICE_FC_TX_PAUSE:
2311 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2317 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2319 return ICE_ERR_NO_MEMORY;
2321 /* Get the current PHY config */
2322 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2325 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2329 /* clear the old pause settings */
2330 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2331 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2333 /* set the new capabilities */
2334 cfg.caps |= pause_mask;
2336 /* If the capabilities have changed, then set the new config */
2337 if (cfg.caps != pcaps->caps) {
2338 int retry_count, retry_max = 10;
2340 /* Auto restart link so settings take effect */
2341 if (ena_auto_link_update)
2342 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2343 /* Copy over all the old settings */
2344 cfg.phy_type_high = pcaps->phy_type_high;
2345 cfg.phy_type_low = pcaps->phy_type_low;
2346 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2347 cfg.eee_cap = pcaps->eee_cap;
2348 cfg.eeer_value = pcaps->eeer_value;
2349 cfg.link_fec_opt = pcaps->link_fec_options;
2351 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2353 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2357 /* Update the link info
2358 * It sometimes takes a really long time for link to
2359 * come back from the atomic reset. Thus, we wait a
2362 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2363 status = ice_update_link_info(pi);
2372 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2376 devm_kfree(ice_hw_to_dev(hw), pcaps);
2381 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2382 * @caps: PHY ability structure to copy date from
2383 * @cfg: PHY configuration structure to copy data to
2385 * Helper function to copy AQC PHY get ability data to PHY set configuration
2389 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2390 struct ice_aqc_set_phy_cfg_data *cfg)
2395 cfg->phy_type_low = caps->phy_type_low;
2396 cfg->phy_type_high = caps->phy_type_high;
2397 cfg->caps = caps->caps;
2398 cfg->low_power_ctrl = caps->low_power_ctrl;
2399 cfg->eee_cap = caps->eee_cap;
2400 cfg->eeer_value = caps->eeer_value;
2401 cfg->link_fec_opt = caps->link_fec_options;
2405 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2406 * @cfg: PHY configuration data to set FEC mode
2407 * @fec: FEC mode to configure
2409 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2410 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2411 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2414 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2418 /* Clear RS bits, and AND BASE-R ability
2419 * bits and OR request bits.
2421 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2422 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2423 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2424 ICE_AQC_PHY_FEC_25G_KR_REQ;
2427 /* Clear BASE-R bits, and AND RS ability
2428 * bits and OR request bits.
2430 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2431 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2432 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2435 /* Clear all FEC option bits. */
2436 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2439 /* AND auto FEC bit, and all caps bits. */
2440 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2446 * ice_get_link_status - get status of the HW network link
2447 * @pi: port information structure
2448 * @link_up: pointer to bool (true/false = linkup/linkdown)
2450 * Variable link_up is true if link is up, false if link is down.
2451 * The variable link_up is invalid if status is non zero. As a
2452 * result of this call, link status reporting becomes enabled
2454 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2456 struct ice_phy_info *phy_info;
2457 enum ice_status status = 0;
2459 if (!pi || !link_up)
2460 return ICE_ERR_PARAM;
2462 phy_info = &pi->phy;
2464 if (phy_info->get_link_info) {
2465 status = ice_update_link_info(pi);
2468 ice_debug(pi->hw, ICE_DBG_LINK,
2469 "get link status error, status = %d\n",
2473 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2479 * ice_aq_set_link_restart_an
2480 * @pi: pointer to the port information structure
2481 * @ena_link: if true: enable link, if false: disable link
2482 * @cd: pointer to command details structure or NULL
2484 * Sets up the link and restarts the Auto-Negotiation over the link.
2487 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2488 struct ice_sq_cd *cd)
2490 struct ice_aqc_restart_an *cmd;
2491 struct ice_aq_desc desc;
2493 cmd = &desc.params.restart_an;
2495 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2497 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2498 cmd->lport_num = pi->lport;
2500 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2502 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2504 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2508 * ice_aq_set_event_mask
2509 * @hw: pointer to the HW struct
2510 * @port_num: port number of the physical function
2511 * @mask: event mask to be set
2512 * @cd: pointer to command details structure or NULL
2514 * Set event mask (0x0613)
2517 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2518 struct ice_sq_cd *cd)
2520 struct ice_aqc_set_event_mask *cmd;
2521 struct ice_aq_desc desc;
2523 cmd = &desc.params.set_event_mask;
2525 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2527 cmd->lport_num = port_num;
2529 cmd->event_mask = cpu_to_le16(mask);
2530 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2534 * ice_aq_set_mac_loopback
2535 * @hw: pointer to the HW struct
2536 * @ena_lpbk: Enable or Disable loopback
2537 * @cd: pointer to command details structure or NULL
2539 * Enable/disable loopback on a given port
2542 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2544 struct ice_aqc_set_mac_lb *cmd;
2545 struct ice_aq_desc desc;
2547 cmd = &desc.params.set_mac_lb;
2549 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2551 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2553 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2557 * ice_aq_set_port_id_led
2558 * @pi: pointer to the port information
2559 * @is_orig_mode: is this LED set to original mode (by the net-list)
2560 * @cd: pointer to command details structure or NULL
2562 * Set LED value for the given port (0x06e9)
2565 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2566 struct ice_sq_cd *cd)
2568 struct ice_aqc_set_port_id_led *cmd;
2569 struct ice_hw *hw = pi->hw;
2570 struct ice_aq_desc desc;
2572 cmd = &desc.params.set_port_id_led;
2574 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2577 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2579 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2581 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2586 * @hw: pointer to the HW struct
2587 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
2588 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
2589 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
2591 * @set_page: set or ignore the page
2592 * @data: pointer to data buffer to be read/written to the I2C device.
2593 * @length: 1-16 for read, 1 for write.
2594 * @write: 0 read, 1 for write.
2595 * @cd: pointer to command details structure or NULL
2597 * Read/Write SFF EEPROM (0x06EE)
2600 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
2601 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
2602 bool write, struct ice_sq_cd *cd)
2604 struct ice_aqc_sff_eeprom *cmd;
2605 struct ice_aq_desc desc;
2606 enum ice_status status;
2608 if (!data || (mem_addr & 0xff00))
2609 return ICE_ERR_PARAM;
2611 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
2612 cmd = &desc.params.read_write_sff_param;
2613 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
2614 cmd->lport_num = (u8)(lport & 0xff);
2615 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2616 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
2617 ICE_AQC_SFF_I2CBUS_7BIT_M) |
2619 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
2620 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
2621 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
2622 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
2624 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
2626 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
2631 * __ice_aq_get_set_rss_lut
2632 * @hw: pointer to the hardware structure
2633 * @vsi_id: VSI FW index
2634 * @lut_type: LUT table type
2635 * @lut: pointer to the LUT buffer provided by the caller
2636 * @lut_size: size of the LUT buffer
2637 * @glob_lut_idx: global LUT index
2638 * @set: set true to set the table, false to get the table
2640 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2642 static enum ice_status
2643 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2644 u16 lut_size, u8 glob_lut_idx, bool set)
2646 struct ice_aqc_get_set_rss_lut *cmd_resp;
2647 struct ice_aq_desc desc;
2648 enum ice_status status;
2651 cmd_resp = &desc.params.get_set_rss_lut;
2654 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2655 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2657 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2660 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2661 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2662 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2663 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2666 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2667 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2668 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2669 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2670 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2673 status = ICE_ERR_PARAM;
2674 goto ice_aq_get_set_rss_lut_exit;
2677 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2678 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2679 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2682 goto ice_aq_get_set_rss_lut_send;
2683 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2685 goto ice_aq_get_set_rss_lut_send;
2687 goto ice_aq_get_set_rss_lut_send;
2690 /* LUT size is only valid for Global and PF table types */
2692 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2694 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2695 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2696 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2697 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2699 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2700 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2701 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2702 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2703 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2708 status = ICE_ERR_PARAM;
2709 goto ice_aq_get_set_rss_lut_exit;
2712 ice_aq_get_set_rss_lut_send:
2713 cmd_resp->flags = cpu_to_le16(flags);
2714 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2716 ice_aq_get_set_rss_lut_exit:
2721 * ice_aq_get_rss_lut
2722 * @hw: pointer to the hardware structure
2723 * @vsi_handle: software VSI handle
2724 * @lut_type: LUT table type
2725 * @lut: pointer to the LUT buffer provided by the caller
2726 * @lut_size: size of the LUT buffer
2728 * get the RSS lookup table, PF or VSI type
2731 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2732 u8 *lut, u16 lut_size)
2734 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2735 return ICE_ERR_PARAM;
2737 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2738 lut_type, lut, lut_size, 0, false);
2742 * ice_aq_set_rss_lut
2743 * @hw: pointer to the hardware structure
2744 * @vsi_handle: software VSI handle
2745 * @lut_type: LUT table type
2746 * @lut: pointer to the LUT buffer provided by the caller
2747 * @lut_size: size of the LUT buffer
2749 * set the RSS lookup table, PF or VSI type
2752 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2753 u8 *lut, u16 lut_size)
2755 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2756 return ICE_ERR_PARAM;
2758 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2759 lut_type, lut, lut_size, 0, true);
2763 * __ice_aq_get_set_rss_key
2764 * @hw: pointer to the HW struct
2765 * @vsi_id: VSI FW index
2766 * @key: pointer to key info struct
2767 * @set: set true to set the key, false to get the key
2769 * get (0x0B04) or set (0x0B02) the RSS key per VSI
2772 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2773 struct ice_aqc_get_set_rss_keys *key,
2776 struct ice_aqc_get_set_rss_key *cmd_resp;
2777 u16 key_size = sizeof(*key);
2778 struct ice_aq_desc desc;
2780 cmd_resp = &desc.params.get_set_rss_key;
2783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2784 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2786 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2789 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2790 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2791 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2792 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2794 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2798 * ice_aq_get_rss_key
2799 * @hw: pointer to the HW struct
2800 * @vsi_handle: software VSI handle
2801 * @key: pointer to key info struct
2803 * get the RSS key per VSI
2806 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2807 struct ice_aqc_get_set_rss_keys *key)
2809 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2810 return ICE_ERR_PARAM;
2812 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2817 * ice_aq_set_rss_key
2818 * @hw: pointer to the HW struct
2819 * @vsi_handle: software VSI handle
2820 * @keys: pointer to key info struct
2822 * set the RSS key per VSI
2825 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2826 struct ice_aqc_get_set_rss_keys *keys)
2828 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2829 return ICE_ERR_PARAM;
2831 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2836 * ice_aq_add_lan_txq
2837 * @hw: pointer to the hardware structure
2838 * @num_qgrps: Number of added queue groups
2839 * @qg_list: list of queue groups to be added
2840 * @buf_size: size of buffer for indirect command
2841 * @cd: pointer to command details structure or NULL
2843 * Add Tx LAN queue (0x0C30)
2846 * Prior to calling add Tx LAN queue:
2847 * Initialize the following as part of the Tx queue context:
2848 * Completion queue ID if the queue uses Completion queue, Quanta profile,
2849 * Cache profile and Packet shaper profile.
2851 * After add Tx LAN queue AQ command is completed:
2852 * Interrupts should be associated with specific queues,
2853 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2856 static enum ice_status
2857 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2858 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2859 struct ice_sq_cd *cd)
2861 u16 i, sum_header_size, sum_q_size = 0;
2862 struct ice_aqc_add_tx_qgrp *list;
2863 struct ice_aqc_add_txqs *cmd;
2864 struct ice_aq_desc desc;
2866 cmd = &desc.params.add_txqs;
2868 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2871 return ICE_ERR_PARAM;
2873 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2874 return ICE_ERR_PARAM;
2876 sum_header_size = num_qgrps *
2877 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2880 for (i = 0; i < num_qgrps; i++) {
2881 struct ice_aqc_add_txqs_perq *q = list->txqs;
2883 sum_q_size += list->num_txqs * sizeof(*q);
2884 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2887 if (buf_size != (sum_header_size + sum_q_size))
2888 return ICE_ERR_PARAM;
2890 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2892 cmd->num_qgrps = num_qgrps;
2894 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2898 * ice_aq_dis_lan_txq
2899 * @hw: pointer to the hardware structure
2900 * @num_qgrps: number of groups in the list
2901 * @qg_list: the list of groups to disable
2902 * @buf_size: the total size of the qg_list buffer in bytes
2903 * @rst_src: if called due to reset, specifies the reset source
2904 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2905 * @cd: pointer to command details structure or NULL
2907 * Disable LAN Tx queue (0x0C31)
2909 static enum ice_status
2910 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2911 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2912 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2913 struct ice_sq_cd *cd)
2915 struct ice_aqc_dis_txqs *cmd;
2916 struct ice_aq_desc desc;
2917 enum ice_status status;
2920 cmd = &desc.params.dis_txqs;
2921 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2923 /* qg_list can be NULL only in VM/VF reset flow */
2924 if (!qg_list && !rst_src)
2925 return ICE_ERR_PARAM;
2927 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2928 return ICE_ERR_PARAM;
2930 cmd->num_entries = num_qgrps;
2932 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2933 ICE_AQC_Q_DIS_TIMEOUT_M);
2937 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2938 cmd->vmvf_and_timeout |=
2939 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2942 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2943 /* In this case, FW expects vmvf_num to be absolute VF ID */
2944 cmd->vmvf_and_timeout |=
2945 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2946 ICE_AQC_Q_DIS_VMVF_NUM_M);
2953 /* flush pipe on time out */
2954 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2955 /* If no queue group info, we are in a reset flow. Issue the AQ */
2959 /* set RD bit to indicate that command buffer is provided by the driver
2960 * and it needs to be read by the firmware
2962 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2964 for (i = 0; i < num_qgrps; ++i) {
2965 /* Calculate the size taken up by the queue IDs in this group */
2966 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2968 /* Add the size of the group header */
2969 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2971 /* If the num of queues is even, add 2 bytes of padding */
2972 if ((qg_list[i].num_qs % 2) == 0)
2977 return ICE_ERR_PARAM;
2980 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2983 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2984 vmvf_num, hw->adminq.sq_last_status);
2986 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2987 le16_to_cpu(qg_list[0].q_id[0]),
2988 hw->adminq.sq_last_status);
2993 /* End of FW Admin Queue command wrappers */
2996 * ice_write_byte - write a byte to a packed context structure
2997 * @src_ctx: the context structure to read from
2998 * @dest_ctx: the context to be written to
2999 * @ce_info: a description of the struct to be filled
3002 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3004 u8 src_byte, dest_byte, mask;
3008 /* copy from the next struct field */
3009 from = src_ctx + ce_info->offset;
3011 /* prepare the bits and mask */
3012 shift_width = ce_info->lsb % 8;
3013 mask = (u8)(BIT(ce_info->width) - 1);
3018 /* shift to correct alignment */
3019 mask <<= shift_width;
3020 src_byte <<= shift_width;
3022 /* get the current bits from the target bit string */
3023 dest = dest_ctx + (ce_info->lsb / 8);
3025 memcpy(&dest_byte, dest, sizeof(dest_byte));
3027 dest_byte &= ~mask; /* get the bits not changing */
3028 dest_byte |= src_byte; /* add in the new bits */
3030 /* put it all back */
3031 memcpy(dest, &dest_byte, sizeof(dest_byte));
3035 * ice_write_word - write a word to a packed context structure
3036 * @src_ctx: the context structure to read from
3037 * @dest_ctx: the context to be written to
3038 * @ce_info: a description of the struct to be filled
3041 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3048 /* copy from the next struct field */
3049 from = src_ctx + ce_info->offset;
3051 /* prepare the bits and mask */
3052 shift_width = ce_info->lsb % 8;
3053 mask = BIT(ce_info->width) - 1;
3055 /* don't swizzle the bits until after the mask because the mask bits
3056 * will be in a different bit position on big endian machines
3058 src_word = *(u16 *)from;
3061 /* shift to correct alignment */
3062 mask <<= shift_width;
3063 src_word <<= shift_width;
3065 /* get the current bits from the target bit string */
3066 dest = dest_ctx + (ce_info->lsb / 8);
3068 memcpy(&dest_word, dest, sizeof(dest_word));
3070 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3071 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3073 /* put it all back */
3074 memcpy(dest, &dest_word, sizeof(dest_word));
3078 * ice_write_dword - write a dword to a packed context structure
3079 * @src_ctx: the context structure to read from
3080 * @dest_ctx: the context to be written to
3081 * @ce_info: a description of the struct to be filled
3084 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3086 u32 src_dword, mask;
3091 /* copy from the next struct field */
3092 from = src_ctx + ce_info->offset;
3094 /* prepare the bits and mask */
3095 shift_width = ce_info->lsb % 8;
3097 /* if the field width is exactly 32 on an x86 machine, then the shift
3098 * operation will not work because the SHL instructions count is masked
3099 * to 5 bits so the shift will do nothing
3101 if (ce_info->width < 32)
3102 mask = BIT(ce_info->width) - 1;
3106 /* don't swizzle the bits until after the mask because the mask bits
3107 * will be in a different bit position on big endian machines
3109 src_dword = *(u32 *)from;
3112 /* shift to correct alignment */
3113 mask <<= shift_width;
3114 src_dword <<= shift_width;
3116 /* get the current bits from the target bit string */
3117 dest = dest_ctx + (ce_info->lsb / 8);
3119 memcpy(&dest_dword, dest, sizeof(dest_dword));
3121 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3122 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
3124 /* put it all back */
3125 memcpy(dest, &dest_dword, sizeof(dest_dword));
3129 * ice_write_qword - write a qword to a packed context structure
3130 * @src_ctx: the context structure to read from
3131 * @dest_ctx: the context to be written to
3132 * @ce_info: a description of the struct to be filled
3135 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3137 u64 src_qword, mask;
3142 /* copy from the next struct field */
3143 from = src_ctx + ce_info->offset;
3145 /* prepare the bits and mask */
3146 shift_width = ce_info->lsb % 8;
3148 /* if the field width is exactly 64 on an x86 machine, then the shift
3149 * operation will not work because the SHL instructions count is masked
3150 * to 6 bits so the shift will do nothing
3152 if (ce_info->width < 64)
3153 mask = BIT_ULL(ce_info->width) - 1;
3157 /* don't swizzle the bits until after the mask because the mask bits
3158 * will be in a different bit position on big endian machines
3160 src_qword = *(u64 *)from;
3163 /* shift to correct alignment */
3164 mask <<= shift_width;
3165 src_qword <<= shift_width;
3167 /* get the current bits from the target bit string */
3168 dest = dest_ctx + (ce_info->lsb / 8);
3170 memcpy(&dest_qword, dest, sizeof(dest_qword));
3172 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
3173 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
3175 /* put it all back */
3176 memcpy(dest, &dest_qword, sizeof(dest_qword));
3180 * ice_set_ctx - set context bits in packed structure
3181 * @src_ctx: pointer to a generic non-packed context structure
3182 * @dest_ctx: pointer to memory for the packed structure
3183 * @ce_info: a description of the structure to be transformed
3186 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3190 for (f = 0; ce_info[f].width; f++) {
3191 /* We have to deal with each element of the FW response
3192 * using the correct size so that we are correct regardless
3193 * of the endianness of the machine.
3195 switch (ce_info[f].size_of) {
3197 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3200 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3203 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3206 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3209 return ICE_ERR_INVAL_SIZE;
3217 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3218 * @hw: pointer to the HW struct
3219 * @vsi_handle: software VSI handle
3221 * @q_handle: software queue handle
3224 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3226 struct ice_vsi_ctx *vsi;
3227 struct ice_q_ctx *q_ctx;
3229 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3232 if (q_handle >= vsi->num_lan_q_entries[tc])
3234 if (!vsi->lan_q_ctx[tc])
3236 q_ctx = vsi->lan_q_ctx[tc];
3237 return &q_ctx[q_handle];
3242 * @pi: port information structure
3243 * @vsi_handle: software VSI handle
3245 * @q_handle: software queue handle
3246 * @num_qgrps: Number of added queue groups
3247 * @buf: list of queue groups to be added
3248 * @buf_size: size of buffer for indirect command
3249 * @cd: pointer to command details structure or NULL
3251 * This function adds one LAN queue
3254 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3255 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3256 struct ice_sq_cd *cd)
3258 struct ice_aqc_txsched_elem_data node = { 0 };
3259 struct ice_sched_node *parent;
3260 struct ice_q_ctx *q_ctx;
3261 enum ice_status status;
3264 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3267 if (num_qgrps > 1 || buf->num_txqs > 1)
3268 return ICE_ERR_MAX_LIMIT;
3272 if (!ice_is_vsi_valid(hw, vsi_handle))
3273 return ICE_ERR_PARAM;
3275 mutex_lock(&pi->sched_lock);
3277 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3279 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3281 status = ICE_ERR_PARAM;
3285 /* find a parent node */
3286 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3287 ICE_SCHED_NODE_OWNER_LAN);
3289 status = ICE_ERR_PARAM;
3293 buf->parent_teid = parent->info.node_teid;
3294 node.parent_teid = parent->info.node_teid;
3295 /* Mark that the values in the "generic" section as valid. The default
3296 * value in the "generic" section is zero. This means that :
3297 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3298 * - 0 priority among siblings, indicated by Bit 1-3.
3299 * - WFQ, indicated by Bit 4.
3300 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3302 * - Bit 7 is reserved.
3303 * Without setting the generic section as valid in valid_sections, the
3304 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3306 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3308 /* add the LAN queue */
3309 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3311 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3312 le16_to_cpu(buf->txqs[0].txq_id),
3313 hw->adminq.sq_last_status);
3317 node.node_teid = buf->txqs[0].q_teid;
3318 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3319 q_ctx->q_handle = q_handle;
3320 q_ctx->q_teid = le32_to_cpu(node.node_teid);
3322 /* add a leaf node into scheduler tree queue layer */
3323 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3325 status = ice_sched_replay_q_bw(pi, q_ctx);
3328 mutex_unlock(&pi->sched_lock);
3334 * @pi: port information structure
3335 * @vsi_handle: software VSI handle
3337 * @num_queues: number of queues
3338 * @q_handles: pointer to software queue handle array
3339 * @q_ids: pointer to the q_id array
3340 * @q_teids: pointer to queue node teids
3341 * @rst_src: if called due to reset, specifies the reset source
3342 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3343 * @cd: pointer to command details structure or NULL
3345 * This function removes queues and their corresponding nodes in SW DB
3348 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3349 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3350 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3351 struct ice_sq_cd *cd)
3353 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3354 struct ice_aqc_dis_txq_item qg_list;
3355 struct ice_q_ctx *q_ctx;
3358 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3362 /* if queue is disabled already yet the disable queue command
3363 * has to be sent to complete the VF reset, then call
3364 * ice_aq_dis_lan_txq without any queue information
3367 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3372 mutex_lock(&pi->sched_lock);
3374 for (i = 0; i < num_queues; i++) {
3375 struct ice_sched_node *node;
3377 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3380 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3382 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3386 if (q_ctx->q_handle != q_handles[i]) {
3387 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3388 q_ctx->q_handle, q_handles[i]);
3391 qg_list.parent_teid = node->info.parent_teid;
3393 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3394 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3395 sizeof(qg_list), rst_src, vmvf_num,
3400 ice_free_sched_node(pi, node);
3401 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3403 mutex_unlock(&pi->sched_lock);
3408 * ice_cfg_vsi_qs - configure the new/existing VSI queues
3409 * @pi: port information structure
3410 * @vsi_handle: software VSI handle
3411 * @tc_bitmap: TC bitmap
3412 * @maxqs: max queues array per TC
3413 * @owner: LAN or RDMA
3415 * This function adds/updates the VSI queues per TC.
3417 static enum ice_status
3418 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3419 u16 *maxqs, u8 owner)
3421 enum ice_status status = 0;
3424 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3427 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3428 return ICE_ERR_PARAM;
3430 mutex_lock(&pi->sched_lock);
3432 ice_for_each_traffic_class(i) {
3433 /* configuration is possible only if TC node is present */
3434 if (!ice_sched_get_tc_node(pi, i))
3437 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3438 ice_is_tc_ena(tc_bitmap, i));
3443 mutex_unlock(&pi->sched_lock);
3448 * ice_cfg_vsi_lan - configure VSI LAN queues
3449 * @pi: port information structure
3450 * @vsi_handle: software VSI handle
3451 * @tc_bitmap: TC bitmap
3452 * @max_lanqs: max LAN queues array per TC
3454 * This function adds/updates the VSI LAN queues per TC.
3457 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3460 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3461 ICE_SCHED_NODE_OWNER_LAN);
3465 * ice_replay_pre_init - replay pre initialization
3466 * @hw: pointer to the HW struct
3468 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3470 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3472 struct ice_switch_info *sw = hw->switch_info;
3475 /* Delete old entries from replay filter list head if there is any */
3476 ice_rm_all_sw_replay_rule_info(hw);
3477 /* In start of replay, move entries into replay_rules list, it
3478 * will allow adding rules entries back to filt_rules list,
3479 * which is operational list.
3481 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3482 list_replace_init(&sw->recp_list[i].filt_rules,
3483 &sw->recp_list[i].filt_replay_rules);
3489 * ice_replay_vsi - replay VSI configuration
3490 * @hw: pointer to the HW struct
3491 * @vsi_handle: driver VSI handle
3493 * Restore all VSI configuration after reset. It is required to call this
3494 * function with main VSI first.
3496 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3498 enum ice_status status;
3500 if (!ice_is_vsi_valid(hw, vsi_handle))
3501 return ICE_ERR_PARAM;
3503 /* Replay pre-initialization if there is any */
3504 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3505 status = ice_replay_pre_init(hw);
3509 /* Replay per VSI all RSS configurations */
3510 status = ice_replay_rss_cfg(hw, vsi_handle);
3513 /* Replay per VSI all filters */
3514 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3519 * ice_replay_post - post replay configuration cleanup
3520 * @hw: pointer to the HW struct
3522 * Post replay cleanup.
3524 void ice_replay_post(struct ice_hw *hw)
3526 /* Delete old entries from replay filter list head */
3527 ice_rm_all_sw_replay_rule_info(hw);
3531 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3532 * @hw: ptr to the hardware info
3533 * @reg: offset of 64 bit HW register to read from
3534 * @prev_stat_loaded: bool to specify if previous stats are loaded
3535 * @prev_stat: ptr to previous loaded stat value
3536 * @cur_stat: ptr to current stat value
3539 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3540 u64 *prev_stat, u64 *cur_stat)
3542 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3544 /* device stats are not reset at PFR, they likely will not be zeroed
3545 * when the driver starts. Thus, save the value from the first read
3546 * without adding to the statistic value so that we report stats which
3547 * count up from zero.
3549 if (!prev_stat_loaded) {
3550 *prev_stat = new_data;
3554 /* Calculate the difference between the new and old values, and then
3555 * add it to the software stat value.
3557 if (new_data >= *prev_stat)
3558 *cur_stat += new_data - *prev_stat;
3560 /* to manage the potential roll-over */
3561 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3563 /* Update the previously stored value to prepare for next read */
3564 *prev_stat = new_data;
3568 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3569 * @hw: ptr to the hardware info
3570 * @reg: offset of HW register to read from
3571 * @prev_stat_loaded: bool to specify if previous stats are loaded
3572 * @prev_stat: ptr to previous loaded stat value
3573 * @cur_stat: ptr to current stat value
3576 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3577 u64 *prev_stat, u64 *cur_stat)
3581 new_data = rd32(hw, reg);
3583 /* device stats are not reset at PFR, they likely will not be zeroed
3584 * when the driver starts. Thus, save the value from the first read
3585 * without adding to the statistic value so that we report stats which
3586 * count up from zero.
3588 if (!prev_stat_loaded) {
3589 *prev_stat = new_data;
3593 /* Calculate the difference between the new and old values, and then
3594 * add it to the software stat value.
3596 if (new_data >= *prev_stat)
3597 *cur_stat += new_data - *prev_stat;
3599 /* to manage the potential roll-over */
3600 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3602 /* Update the previously stored value to prepare for next read */
3603 *prev_stat = new_data;
3607 * ice_sched_query_elem - query element information from HW
3608 * @hw: pointer to the HW struct
3609 * @node_teid: node TEID to be queried
3610 * @buf: buffer to element information
3612 * This function queries HW element information
3615 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3616 struct ice_aqc_get_elem *buf)
3618 u16 buf_size, num_elem_ret = 0;
3619 enum ice_status status;
3621 buf_size = sizeof(*buf);
3622 memset(buf, 0, buf_size);
3623 buf->generic[0].node_teid = cpu_to_le32(node_teid);
3624 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3626 if (status || num_elem_ret != 1)
3627 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");