1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <linux/ieee80211.h>
65 #include <linux/etherdevice.h>
66 #include <linux/tcp.h>
70 #include "iwl-trans.h"
71 #include "iwl-eeprom-parse.h"
76 iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
79 struct iwl_fw_dbg_trigger_tlv *trig;
80 struct iwl_fw_dbg_trigger_ba *ba_trig;
82 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
86 ba_trig = (void *)trig->data;
88 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
91 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
92 "BAR sent to %pM, tid %d, ssn %d",
96 #define OPT_HDR(type, skb, off) \
97 (type *)(skb_network_header(skb) + (off))
99 static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
100 struct ieee80211_hdr *hdr,
101 struct ieee80211_tx_info *info,
104 #if IS_ENABLED(CONFIG_INET)
105 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
109 * Do not compute checksum if already computed or if transport will
112 if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
115 /* We do not expect to be requested to csum stuff we do not support */
116 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
117 (skb->protocol != htons(ETH_P_IP) &&
118 skb->protocol != htons(ETH_P_IPV6)),
119 "No support for requested checksum\n")) {
120 skb_checksum_help(skb);
124 if (skb->protocol == htons(ETH_P_IP)) {
125 protocol = ip_hdr(skb)->protocol;
127 #if IS_ENABLED(CONFIG_IPV6)
128 struct ipv6hdr *ipv6h =
129 (struct ipv6hdr *)skb_network_header(skb);
130 unsigned int off = sizeof(*ipv6h);
132 protocol = ipv6h->nexthdr;
133 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
134 struct ipv6_opt_hdr *hp;
136 /* only supported extension headers */
137 if (protocol != NEXTHDR_ROUTING &&
138 protocol != NEXTHDR_HOP &&
139 protocol != NEXTHDR_DEST) {
140 skb_checksum_help(skb);
144 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
145 protocol = hp->nexthdr;
146 off += ipv6_optlen(hp);
148 /* if we get here - protocol now should be TCP/UDP */
152 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
154 skb_checksum_help(skb);
159 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
162 * Set offset to IP header (snap).
163 * We don't support tunneling so no need to take care of inner header.
166 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
168 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
169 if (skb->protocol == htons(ETH_P_IP) &&
170 (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
171 ip_hdr(skb)->check = 0;
172 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
175 /* reset UDP/TCP header csum */
176 if (protocol == IPPROTO_TCP)
177 tcp_hdr(skb)->check = 0;
179 udp_hdr(skb)->check = 0;
182 * mac header len should include IV, size is in words unless
183 * the IV is added by the firmware like in WEP.
184 * In new Tx API, the IV is always added by the firmware.
186 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
187 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
188 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
189 mh_len += info->control.hw_key->iv_len;
191 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
195 return offload_assist;
199 * Sets most of the Tx cmd's fields
201 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
202 struct iwl_tx_cmd *tx_cmd,
203 struct ieee80211_tx_info *info, u8 sta_id)
205 struct ieee80211_hdr *hdr = (void *)skb->data;
206 __le16 fc = hdr->frame_control;
207 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
208 u32 len = skb->len + FCS_LEN;
209 u16 offload_assist = 0;
212 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
213 tx_flags |= TX_CMD_FLG_ACK;
215 tx_flags &= ~TX_CMD_FLG_ACK;
217 if (ieee80211_is_probe_resp(fc))
218 tx_flags |= TX_CMD_FLG_TSF;
220 if (ieee80211_has_morefrags(fc))
221 tx_flags |= TX_CMD_FLG_MORE_FRAG;
223 if (ieee80211_is_data_qos(fc)) {
224 u8 *qc = ieee80211_get_qos_ctl(hdr);
225 tx_cmd->tid_tspec = qc[0] & 0xf;
226 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
227 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
228 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
229 } else if (ieee80211_is_back_req(fc)) {
230 struct ieee80211_bar *bar = (void *)skb->data;
231 u16 control = le16_to_cpu(bar->control);
232 u16 ssn = le16_to_cpu(bar->start_seq_num);
234 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
235 tx_cmd->tid_tspec = (control &
236 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
237 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
238 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
239 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
242 if (ieee80211_is_data(fc))
243 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
245 tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
247 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
248 tx_flags |= TX_CMD_FLG_SEQ_CTL;
250 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
253 /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
254 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
255 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
257 ac = tid_to_mac80211_ac[0];
259 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
260 TX_CMD_FLG_BT_PRIO_POS;
262 if (ieee80211_is_mgmt(fc)) {
263 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
264 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
265 else if (ieee80211_is_action(fc))
266 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
268 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
270 /* The spec allows Action frames in A-MPDU, we don't support
273 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
274 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
275 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
277 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
280 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
281 !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
282 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
284 if (fw_has_capa(&mvm->fw->ucode_capa,
285 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
286 ieee80211_action_contains_tpc(skb))
287 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
289 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
290 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
291 tx_cmd->len = cpu_to_le16((u16)skb->len);
292 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
293 tx_cmd->sta_id = sta_id;
295 /* padding is inserted later in transport */
296 if (ieee80211_hdrlen(fc) % 4 &&
297 !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
298 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
300 tx_cmd->offload_assist |=
301 cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
305 static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
306 struct ieee80211_tx_info *info,
307 struct ieee80211_sta *sta, __le16 fc)
309 if (info->band == NL80211_BAND_2GHZ &&
310 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
311 return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
313 if (sta && ieee80211_is_data(fc)) {
314 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
316 return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
319 return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
322 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
323 struct ieee80211_tx_info *info,
324 struct ieee80211_sta *sta)
330 /* HT rate doesn't make sense for a non data frame */
331 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
332 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
333 info->control.rates[0].flags,
334 info->control.rates[0].idx);
336 rate_idx = info->control.rates[0].idx;
337 /* if the rate isn't a well known legacy rate, take the lowest one */
338 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
339 rate_idx = rate_lowest_index(
340 &mvm->nvm_data->bands[info->band], sta);
342 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
343 if (info->band == NL80211_BAND_5GHZ)
344 rate_idx += IWL_FIRST_OFDM_RATE;
346 /* For 2.4 GHZ band, check that there is no need to remap */
347 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
349 /* Get PLCP rate for tx_cmd->rate_n_flags */
350 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
352 /* Set CCK flag as needed */
353 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
354 rate_flags |= RATE_MCS_CCK_MSK;
356 return (u32)rate_plcp | rate_flags;
359 static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
360 struct ieee80211_tx_info *info,
361 struct ieee80211_sta *sta, __le16 fc)
363 return iwl_mvm_get_tx_rate(mvm, info, sta) |
364 iwl_mvm_get_tx_ant(mvm, info, sta, fc);
368 * Sets the fields in the Tx cmd that are rate related
370 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
371 struct ieee80211_tx_info *info,
372 struct ieee80211_sta *sta, __le16 fc)
374 /* Set retry limit on RTS packets */
375 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
377 /* Set retry limit on DATA packets and Probe Responses*/
378 if (ieee80211_is_probe_resp(fc)) {
379 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
380 tx_cmd->rts_retry_limit =
381 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
382 } else if (ieee80211_is_back_req(fc)) {
383 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
385 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
389 * for data packets, rate info comes from the table inside the fw. This
390 * table is controlled by LINK_QUALITY commands
393 if (ieee80211_is_data(fc) && sta) {
394 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
396 if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
397 tx_cmd->initial_rate_index = 0;
398 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
401 } else if (ieee80211_is_back_req(fc)) {
403 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
406 /* Set the rate in the TX cmd */
407 tx_cmd->rate_n_flags =
408 cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
411 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
414 struct ieee80211_key_conf *keyconf = info->control.hw_key;
417 pn = atomic64_inc_return(&keyconf->tx_pn);
420 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
421 crypto_hdr[1] = pn >> 8;
422 crypto_hdr[4] = pn >> 16;
423 crypto_hdr[5] = pn >> 24;
424 crypto_hdr[6] = pn >> 32;
425 crypto_hdr[7] = pn >> 40;
429 * Sets the fields in the Tx cmd that are crypto related
431 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
432 struct ieee80211_tx_info *info,
433 struct iwl_tx_cmd *tx_cmd,
434 struct sk_buff *skb_frag,
437 struct ieee80211_key_conf *keyconf = info->control.hw_key;
438 u8 *crypto_hdr = skb_frag->data + hdrlen;
439 enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
442 switch (keyconf->cipher) {
443 case WLAN_CIPHER_SUITE_CCMP:
444 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
445 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
448 case WLAN_CIPHER_SUITE_TKIP:
449 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
450 pn = atomic64_inc_return(&keyconf->tx_pn);
451 ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
452 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
455 case WLAN_CIPHER_SUITE_WEP104:
456 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
458 case WLAN_CIPHER_SUITE_WEP40:
459 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
460 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
461 TX_CMD_SEC_WEP_KEY_IDX_MSK);
463 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
465 case WLAN_CIPHER_SUITE_GCMP:
466 case WLAN_CIPHER_SUITE_GCMP_256:
467 type = TX_CMD_SEC_GCMP;
469 case WLAN_CIPHER_SUITE_CCMP_256:
470 /* TODO: Taking the key from the table might introduce a race
471 * when PTK rekeying is done, having an old packets with a PN
472 * based on the old key but the message encrypted with a new
474 * Need to handle this.
476 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
477 tx_cmd->key[0] = keyconf->hw_key_idx;
478 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
481 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
486 * Allocates and sets the Tx cmd the driver data pointers in the skb
488 static struct iwl_device_cmd *
489 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
490 struct ieee80211_tx_info *info, int hdrlen,
491 struct ieee80211_sta *sta, u8 sta_id)
493 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
494 struct iwl_device_cmd *dev_cmd;
495 struct iwl_tx_cmd *tx_cmd;
497 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
499 if (unlikely(!dev_cmd))
502 /* Make sure we zero enough of dev_cmd */
503 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
504 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
506 memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
507 dev_cmd->hdr.cmd = TX_CMD;
509 if (iwl_mvm_has_new_tx_api(mvm)) {
510 u16 offload_assist = 0;
511 u32 rate_n_flags = 0;
513 struct iwl_mvm_sta *mvmsta = sta ?
514 iwl_mvm_sta_from_mac80211(sta) : NULL;
516 if (ieee80211_is_data_qos(hdr->frame_control)) {
517 u8 *qc = ieee80211_get_qos_ctl(hdr);
519 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
520 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
523 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
526 /* padding is inserted later in transport */
527 if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
528 !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
529 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
531 if (!info->control.hw_key)
532 flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
535 * For data packets rate info comes from the fw. Only
536 * set rate/antenna during connection establishment.
538 if (sta && (!ieee80211_is_data(hdr->frame_control) ||
539 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
540 flags |= IWL_TX_FLAGS_CMD_RATE;
542 iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
546 if (mvm->trans->cfg->device_family >=
547 IWL_DEVICE_FAMILY_22560) {
548 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
550 cmd->offload_assist |= cpu_to_le32(offload_assist);
552 /* Total # bytes to be transmitted */
553 cmd->len = cpu_to_le16((u16)skb->len);
555 /* Copy MAC header from skb into command buffer */
556 memcpy(cmd->hdr, hdr, hdrlen);
558 cmd->flags = cpu_to_le16(flags);
559 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
561 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
563 cmd->offload_assist |= cpu_to_le16(offload_assist);
565 /* Total # bytes to be transmitted */
566 cmd->len = cpu_to_le16((u16)skb->len);
568 /* Copy MAC header from skb into command buffer */
569 memcpy(cmd->hdr, hdr, hdrlen);
571 cmd->flags = cpu_to_le32(flags);
572 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
577 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
579 if (info->control.hw_key)
580 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
582 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
584 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
586 /* Copy MAC header from skb into command buffer */
587 memcpy(tx_cmd->hdr, hdr, hdrlen);
593 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
594 struct iwl_device_cmd *cmd)
596 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
598 memset(&skb_info->status, 0, sizeof(skb_info->status));
599 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
601 skb_info->driver_data[1] = cmd;
604 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
605 struct ieee80211_tx_info *info,
606 struct ieee80211_hdr *hdr)
608 struct iwl_mvm_vif *mvmvif =
609 iwl_mvm_vif_from_mac80211(info->control.vif);
610 __le16 fc = hdr->frame_control;
612 switch (info->control.vif->type) {
613 case NL80211_IFTYPE_AP:
614 case NL80211_IFTYPE_ADHOC:
616 * Non-bufferable frames use the broadcast station, thus they
617 * use the probe queue.
618 * Also take care of the case where we send a deauth to a
619 * station that we don't have, or similarly an association
620 * response (with non-success status) for a station we can't
622 * Also, disassociate frames might happen, particular with
623 * reason 7 ("Class 3 frame received from nonassociated STA").
625 if (ieee80211_is_mgmt(fc) &&
626 (!ieee80211_is_bufferable_mmpdu(fc) ||
627 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
628 return mvm->probe_queue;
630 if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
631 is_multicast_ether_addr(hdr->addr1))
632 return mvmvif->cab_queue;
634 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
635 "fc=0x%02x", le16_to_cpu(fc));
636 return mvm->probe_queue;
637 case NL80211_IFTYPE_P2P_DEVICE:
638 if (ieee80211_is_mgmt(fc))
639 return mvm->p2p_dev_queue;
642 return mvm->p2p_dev_queue;
644 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
649 static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
652 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
653 struct iwl_mvm_vif *mvmvif =
654 iwl_mvm_vif_from_mac80211(info->control.vif);
655 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
656 int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
657 struct iwl_probe_resp_data *resp_data;
660 (WLAN_OUI_WFA >> 16) & 0xff,
661 (WLAN_OUI_WFA >> 8) & 0xff,
663 WLAN_OUI_TYPE_WFA_P2P,
668 resp_data = rcu_dereference(mvmvif->probe_resp_data);
672 if (!resp_data->notif.noa_active)
675 ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
676 mgmt->u.probe_resp.variable,
680 IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
684 if (skb_tailroom(skb) < resp_data->noa_len) {
685 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
687 "Failed to reallocate probe resp\n");
692 pos = skb_put(skb, resp_data->noa_len);
694 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
695 /* Set length of IE body (not including ID and length itself) */
696 *pos++ = resp_data->noa_len - 2;
697 *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
698 *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
699 *pos++ = WLAN_OUI_WFA & 0xff;
700 *pos++ = WLAN_OUI_TYPE_WFA_P2P;
702 memcpy(pos, &resp_data->notif.noa_attr,
703 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
709 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
711 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
712 struct ieee80211_tx_info info;
713 struct iwl_device_cmd *dev_cmd;
715 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
716 __le16 fc = hdr->frame_control;
717 bool offchannel = IEEE80211_SKB_CB(skb)->flags &
718 IEEE80211_TX_CTL_TX_OFFCHAN;
721 memcpy(&info, skb->cb, sizeof(info));
723 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
726 if (info.control.vif) {
727 struct iwl_mvm_vif *mvmvif =
728 iwl_mvm_vif_from_mac80211(info.control.vif);
730 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
731 info.control.vif->type == NL80211_IFTYPE_AP ||
732 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
733 if (!ieee80211_is_data(hdr->frame_control))
734 sta_id = mvmvif->bcast_sta.sta_id;
736 sta_id = mvmvif->mcast_sta.sta_id;
738 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
739 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
740 queue = mvm->snif_queue;
741 sta_id = mvm->snif_sta.sta_id;
742 } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
745 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
746 * that can be used in 2 different types of vifs, P2P &
748 * P2P uses the offchannel queue.
749 * STATION (HS2.0) uses the auxiliary context of the FW,
750 * and hence needs to be sent on the aux queue.
752 sta_id = mvm->aux_sta.sta_id;
753 queue = mvm->aux_queue;
758 IWL_ERR(mvm, "No queue was found. Dropping TX\n");
762 if (unlikely(ieee80211_is_probe_resp(fc)))
763 iwl_mvm_probe_resp_set_noa(mvm, skb);
765 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
767 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
771 /* From now on, we cannot access info->control */
772 iwl_mvm_skb_prepare_status(skb, dev_cmd);
774 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
775 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
782 unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
783 struct ieee80211_sta *sta, unsigned int tid)
785 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
786 enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
787 u8 ac = tid_to_mac80211_ac[tid];
789 int lmac = IWL_LMAC_24G_INDEX;
791 if (iwl_mvm_is_cdb_supported(mvm) &&
792 band == NL80211_BAND_5GHZ)
793 lmac = IWL_LMAC_5G_INDEX;
795 /* For HE redirect to trigger based fifos */
796 if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
799 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
802 * Don't send an AMSDU that will be longer than the TXF.
803 * Add a security margin of 256 for the TX command + headers.
804 * We also want to have the start of the next packet inside the
805 * fifo to be able to send bursts.
807 return min_t(unsigned int, mvmsta->max_amsdu_len,
808 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
811 /* Check if there are any timed-out TIDs on a given shared TXQ */
812 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
814 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
815 unsigned long now = jiffies;
818 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
821 for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
822 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
823 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
830 static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
831 struct iwl_mvm_sta *mvmsta,
834 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
835 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
840 if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
841 schedule_delayed_work(&mvm->tcm.work, 0);
843 mdata->tx.airtime += airtime;
846 static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
847 struct iwl_mvm_sta *mvmsta, int tid)
849 u32 ac = tid_to_mac80211_ac[tid];
850 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
851 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
853 mdata->tx.pkts[ac]++;
857 * Sets the fields in the Tx cmd that are crypto related
859 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
860 struct ieee80211_tx_info *info,
861 struct ieee80211_sta *sta)
863 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
864 struct iwl_mvm_sta *mvmsta;
865 struct iwl_device_cmd *dev_cmd;
868 u8 tid = IWL_MAX_TID_COUNT;
870 bool is_ampdu = false;
873 mvmsta = iwl_mvm_sta_from_mac80211(sta);
874 fc = hdr->frame_control;
875 hdrlen = ieee80211_hdrlen(fc);
877 if (WARN_ON_ONCE(!mvmsta))
880 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
883 if (unlikely(ieee80211_is_probe_resp(fc)))
884 iwl_mvm_probe_resp_set_noa(mvm, skb);
886 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
887 sta, mvmsta->sta_id);
892 * we handle that entirely ourselves -- for uAPSD the firmware
893 * will always send a notification, and for PS-Poll responses
894 * we'll notify mac80211 when getting frame status
896 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
898 spin_lock(&mvmsta->lock);
900 /* nullfunc frames should go to the MGMT queue regardless of QOS,
901 * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
902 * assignment of MGMT TID
904 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
905 tid = ieee80211_get_tid(hdr);
906 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
907 goto drop_unlock_sta;
909 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
910 if (WARN_ON_ONCE(is_ampdu &&
911 mvmsta->tid_data[tid].state != IWL_AGG_ON))
912 goto drop_unlock_sta;
914 seq_number = mvmsta->tid_data[tid].seq_number;
915 seq_number &= IEEE80211_SCTL_SEQ;
917 if (!iwl_mvm_has_new_tx_api(mvm)) {
918 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
920 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
921 hdr->seq_ctrl |= cpu_to_le16(seq_number);
922 /* update the tx_cmd hdr as it was already copied */
923 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
925 } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
926 tid = IWL_TID_NON_QOS;
929 txq_id = mvmsta->tid_data[tid].txq_id;
931 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
933 if (WARN_ON_ONCE(txq_id == IWL_MVM_INVALID_QUEUE)) {
934 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
935 spin_unlock(&mvmsta->lock);
939 if (!iwl_mvm_has_new_tx_api(mvm)) {
940 /* Keep track of the time of the last frame for this RA/TID */
941 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
944 * If we have timed-out TIDs - schedule the worker that will
945 * reconfig the queues and update them
947 * Note that the no lock is taken here in order to not serialize
948 * the TX flow. This isn't dangerous because scheduling
949 * mvm->add_stream_wk can't ruin the state, and if we DON'T
950 * schedule it due to some race condition then next TX we get
953 if (unlikely(mvm->queue_info[txq_id].status ==
954 IWL_MVM_QUEUE_SHARED &&
955 iwl_mvm_txq_should_update(mvm, txq_id)))
956 schedule_work(&mvm->add_stream_wk);
959 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
960 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
962 /* From now on, we cannot access info->control */
963 iwl_mvm_skb_prepare_status(skb, dev_cmd);
965 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
966 goto drop_unlock_sta;
968 if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
969 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
971 spin_unlock(&mvmsta->lock);
973 iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
978 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
979 spin_unlock(&mvmsta->lock);
984 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
985 struct ieee80211_sta *sta)
987 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
988 struct ieee80211_tx_info info;
990 if (WARN_ON_ONCE(!mvmsta))
993 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
996 memcpy(&info, skb->cb, sizeof(info));
998 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1001 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1002 struct ieee80211_sta *sta, u8 tid)
1004 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1005 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1006 struct ieee80211_vif *vif = mvmsta->vif;
1009 lockdep_assert_held(&mvmsta->lock);
1011 if ((tid_data->state == IWL_AGG_ON ||
1012 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1013 iwl_mvm_tid_queued(mvm, tid_data) == 0) {
1015 * Now that this aggregation or DQA queue is empty tell
1016 * mac80211 so it knows we no longer have frames buffered for
1017 * the station on this TID (for the TIM bitmap calculation.)
1019 ieee80211_sta_set_buffered(sta, tid, false);
1023 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
1024 * to align the wrap around of ssn so we compare relevant values.
1026 normalized_ssn = tid_data->ssn;
1027 if (mvm->trans->cfg->gen2)
1028 normalized_ssn &= 0xff;
1030 if (normalized_ssn != tid_data->next_reclaimed)
1033 switch (tid_data->state) {
1034 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1035 IWL_DEBUG_TX_QUEUES(mvm,
1036 "Can continue addBA flow ssn = next_recl = %d\n",
1037 tid_data->next_reclaimed);
1038 tid_data->state = IWL_AGG_STARTING;
1039 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1042 case IWL_EMPTYING_HW_QUEUE_DELBA:
1043 IWL_DEBUG_TX_QUEUES(mvm,
1044 "Can continue DELBA flow ssn = next_recl = %d\n",
1045 tid_data->next_reclaimed);
1046 tid_data->state = IWL_AGG_OFF;
1047 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1055 #ifdef CONFIG_IWLWIFI_DEBUG
1056 const char *iwl_mvm_get_tx_fail_reason(u32 status)
1058 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1059 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1061 switch (status & TX_STATUS_MSK) {
1062 case TX_STATUS_SUCCESS:
1064 TX_STATUS_POSTPONE(DELAY);
1065 TX_STATUS_POSTPONE(FEW_BYTES);
1066 TX_STATUS_POSTPONE(BT_PRIO);
1067 TX_STATUS_POSTPONE(QUIET_PERIOD);
1068 TX_STATUS_POSTPONE(CALC_TTAK);
1069 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1070 TX_STATUS_FAIL(SHORT_LIMIT);
1071 TX_STATUS_FAIL(LONG_LIMIT);
1072 TX_STATUS_FAIL(UNDERRUN);
1073 TX_STATUS_FAIL(DRAIN_FLOW);
1074 TX_STATUS_FAIL(RFKILL_FLUSH);
1075 TX_STATUS_FAIL(LIFE_EXPIRE);
1076 TX_STATUS_FAIL(DEST_PS);
1077 TX_STATUS_FAIL(HOST_ABORTED);
1078 TX_STATUS_FAIL(BT_RETRY);
1079 TX_STATUS_FAIL(STA_INVALID);
1080 TX_STATUS_FAIL(FRAG_DROPPED);
1081 TX_STATUS_FAIL(TID_DISABLE);
1082 TX_STATUS_FAIL(FIFO_FLUSHED);
1083 TX_STATUS_FAIL(SMALL_CF_POLL);
1084 TX_STATUS_FAIL(FW_DROP);
1085 TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1090 #undef TX_STATUS_FAIL
1091 #undef TX_STATUS_POSTPONE
1093 #endif /* CONFIG_IWLWIFI_DEBUG */
1095 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1096 enum nl80211_band band,
1097 struct ieee80211_tx_rate *r)
1099 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1100 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1101 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1102 case RATE_MCS_CHAN_WIDTH_20:
1104 case RATE_MCS_CHAN_WIDTH_40:
1105 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1107 case RATE_MCS_CHAN_WIDTH_80:
1108 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1110 case RATE_MCS_CHAN_WIDTH_160:
1111 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1114 if (rate_n_flags & RATE_MCS_SGI_MSK)
1115 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1116 if (rate_n_flags & RATE_MCS_HT_MSK) {
1117 r->flags |= IEEE80211_TX_RC_MCS;
1118 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1119 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1120 ieee80211_rate_set_vht(
1121 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1122 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1123 RATE_VHT_MCS_NSS_POS) + 1);
1124 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1126 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1132 * translate ucode response to mac80211 tx status control values
1134 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
1135 struct ieee80211_tx_info *info)
1137 struct ieee80211_tx_rate *r = &info->status.rates[0];
1139 info->status.antenna =
1140 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1141 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
1144 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1147 struct iwl_fw_dbg_trigger_tlv *trig;
1148 struct iwl_fw_dbg_trigger_tx_status *status_trig;
1151 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1152 FW_DBG_TRIGGER_TX_STATUS);
1156 status_trig = (void *)trig->data;
1158 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1159 /* don't collect on status 0 */
1160 if (!status_trig->statuses[i].status)
1163 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1166 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1167 "Tx status %d was received",
1168 status & TX_STATUS_MSK);
1174 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1175 * @tx_resp: the Tx response from the fw (agg or non-agg)
1177 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1178 * it can't know that everything will go well until the end of the AMPDU, it
1179 * can't know in advance the number of MPDUs that will be sent in the current
1180 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1181 * Hence, it can't know in advance what the SSN of the SCD will be at the end
1182 * of the batch. This is why the SSN of the SCD is written at the end of the
1183 * whole struct at a variable offset. This function knows how to cope with the
1184 * variable offset and returns the SSN of the SCD.
1186 static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
1187 struct iwl_mvm_tx_resp *tx_resp)
1189 return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
1190 tx_resp->frame_count) & 0xfff;
1193 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1194 struct iwl_rx_packet *pkt)
1196 struct ieee80211_sta *sta;
1197 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1198 int txq_id = SEQ_TO_QUEUE(sequence);
1199 /* struct iwl_mvm_tx_resp_v3 is almost the same */
1200 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1201 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1202 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1203 struct agg_tx_status *agg_status =
1204 iwl_mvm_get_agg_status(mvm, tx_resp);
1205 u32 status = le16_to_cpu(agg_status->status);
1206 u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
1207 struct sk_buff_head skbs;
1210 u16 next_reclaimed, seq_ctl;
1211 bool is_ndp = false;
1213 __skb_queue_head_init(&skbs);
1215 if (iwl_mvm_has_new_tx_api(mvm))
1216 txq_id = le16_to_cpu(tx_resp->tx_queue);
1218 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1220 /* we can free until ssn % q.n_bd not inclusive */
1221 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
1223 while (!skb_queue_empty(&skbs)) {
1224 struct sk_buff *skb = __skb_dequeue(&skbs);
1225 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1226 struct ieee80211_hdr *hdr = (void *)skb->data;
1227 bool flushed = false;
1231 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1233 memset(&info->status, 0, sizeof(info->status));
1235 /* inform mac80211 about what happened with the frame */
1236 switch (status & TX_STATUS_MSK) {
1237 case TX_STATUS_SUCCESS:
1238 case TX_STATUS_DIRECT_DONE:
1239 info->flags |= IEEE80211_TX_STAT_ACK;
1241 case TX_STATUS_FAIL_FIFO_FLUSHED:
1242 case TX_STATUS_FAIL_DRAIN_FLOW:
1245 case TX_STATUS_FAIL_DEST_PS:
1246 /* the FW should have stopped the queue and not
1247 * return this status
1250 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1256 if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1257 ieee80211_is_mgmt(hdr->frame_control))
1258 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
1261 * If we are freeing multiple frames, mark all the frames
1262 * but the first one as acked, since they were acknowledged
1266 info->flags |= IEEE80211_TX_STAT_ACK;
1268 iwl_mvm_tx_status_check_trigger(mvm, status);
1270 info->status.rates[0].count = tx_resp->failure_frame + 1;
1271 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
1273 info->status.status_driver_data[1] =
1274 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1276 /* Single frame failure in an AMPDU queue => send BAR */
1277 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1278 !(info->flags & IEEE80211_TX_STAT_ACK) &&
1279 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
1280 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1281 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1283 /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
1284 if (ieee80211_is_back_req(hdr->frame_control))
1286 else if (status != TX_STATUS_SUCCESS)
1287 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1289 if (unlikely(!seq_ctl)) {
1290 struct ieee80211_hdr *hdr = (void *)skb->data;
1293 * If it is an NDP, we can't update next_reclaim since
1294 * its sequence control is 0. Note that for that same
1295 * reason, NDPs are never sent to A-MPDU'able queues
1296 * so that we can never have more than one freed frame
1297 * for a single Tx resonse (see WARN_ON below).
1299 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1304 * TODO: this is not accurate if we are freeing more than one
1307 info->status.tx_time =
1308 le16_to_cpu(tx_resp->wireless_media_time);
1309 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1310 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1311 info->status.status_driver_data[0] =
1312 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1314 ieee80211_tx_status(mvm->hw, skb);
1317 /* This is an aggregation queue or might become one, so we use
1318 * the ssn since: ssn = wifi seq_num % 256.
1319 * The seq_ctl is the sequence control of the packet to which
1320 * this Tx response relates. But if there is a hole in the
1321 * bitmap of the BA we received, this Tx response may allow to
1322 * reclaim the hole and all the subsequent packets that were
1323 * already acked. In that case, seq_ctl != ssn, and the next
1324 * packet to be reclaimed will be ssn and not seq_ctl. In that
1325 * case, several packets will be reclaimed even if
1328 * The ssn is the index (% 256) of the latest packet that has
1329 * treated (acked / dropped) + 1.
1331 next_reclaimed = ssn;
1333 IWL_DEBUG_TX_REPLY(mvm,
1334 "TXQ %d status %s (0x%08x)\n",
1335 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1337 IWL_DEBUG_TX_REPLY(mvm,
1338 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1339 le32_to_cpu(tx_resp->initial_rate),
1340 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1341 ssn, next_reclaimed, seq_ctl);
1345 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1347 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1348 * the firmware while we still have packets for it in the Tx queues.
1350 if (WARN_ON_ONCE(!sta))
1354 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1356 iwl_mvm_tx_airtime(mvm, mvmsta,
1357 le16_to_cpu(tx_resp->wireless_media_time));
1359 if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1360 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
1361 iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
1363 if (sta->wme && tid != IWL_MGMT_TID) {
1364 struct iwl_mvm_tid_data *tid_data =
1365 &mvmsta->tid_data[tid];
1366 bool send_eosp_ndp = false;
1368 spin_lock_bh(&mvmsta->lock);
1371 tid_data->next_reclaimed = next_reclaimed;
1372 IWL_DEBUG_TX_REPLY(mvm,
1373 "Next reclaimed packet:%d\n",
1376 IWL_DEBUG_TX_REPLY(mvm,
1377 "NDP - don't update next_reclaimed\n");
1380 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1382 if (mvmsta->sleep_tx_count) {
1383 mvmsta->sleep_tx_count--;
1384 if (mvmsta->sleep_tx_count &&
1385 !iwl_mvm_tid_queued(mvm, tid_data)) {
1387 * The number of frames in the queue
1388 * dropped to 0 even if we sent less
1389 * frames than we thought we had on the
1391 * This means we had holes in the BA
1392 * window that we just filled, ask
1393 * mac80211 to send EOSP since the
1394 * firmware won't know how to do that.
1395 * Send NDP and the firmware will send
1396 * EOSP notification that will trigger
1397 * a call to ieee80211_sta_eosp().
1399 send_eosp_ndp = true;
1403 spin_unlock_bh(&mvmsta->lock);
1404 if (send_eosp_ndp) {
1405 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1406 IEEE80211_FRAME_RELEASE_UAPSD,
1407 1, tid, false, false);
1408 mvmsta->sleep_tx_count = 0;
1409 ieee80211_send_eosp_nullfunc(sta, tid);
1413 if (mvmsta->next_status_eosp) {
1414 mvmsta->next_status_eosp = false;
1415 ieee80211_sta_eosp(sta);
1422 #ifdef CONFIG_IWLWIFI_DEBUG
1423 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1424 static const char *iwl_get_agg_tx_status(u16 status)
1426 switch (status & AGG_TX_STATE_STATUS_MSK) {
1427 AGG_TX_STATE_(TRANSMITTED);
1428 AGG_TX_STATE_(UNDERRUN);
1429 AGG_TX_STATE_(BT_PRIO);
1430 AGG_TX_STATE_(FEW_BYTES);
1431 AGG_TX_STATE_(ABORT);
1432 AGG_TX_STATE_(TX_ON_AIR_DROP);
1433 AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1434 AGG_TX_STATE_(LAST_SENT_BT_KILL);
1435 AGG_TX_STATE_(SCD_QUERY);
1436 AGG_TX_STATE_(TEST_BAD_CRC32);
1437 AGG_TX_STATE_(RESPONSE);
1438 AGG_TX_STATE_(DUMP_TX);
1439 AGG_TX_STATE_(DELAY_TX);
1445 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1446 struct iwl_rx_packet *pkt)
1448 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1449 struct agg_tx_status *frame_status =
1450 iwl_mvm_get_agg_status(mvm, tx_resp);
1453 for (i = 0; i < tx_resp->frame_count; i++) {
1454 u16 fstatus = le16_to_cpu(frame_status[i].status);
1456 IWL_DEBUG_TX_REPLY(mvm,
1457 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1458 iwl_get_agg_tx_status(fstatus),
1459 fstatus & AGG_TX_STATE_STATUS_MSK,
1460 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1461 AGG_TX_STATE_TRY_CNT_POS,
1462 le16_to_cpu(frame_status[i].sequence));
1466 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1467 struct iwl_rx_packet *pkt)
1469 #endif /* CONFIG_IWLWIFI_DEBUG */
1471 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1472 struct iwl_rx_packet *pkt)
1474 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1475 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1476 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1477 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1478 struct iwl_mvm_sta *mvmsta;
1479 int queue = SEQ_TO_QUEUE(sequence);
1480 struct ieee80211_sta *sta;
1482 if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
1483 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
1486 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1490 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1492 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1493 if (WARN_ON_ONCE(!sta || !sta->wme)) {
1498 if (!WARN_ON_ONCE(!mvmsta)) {
1499 mvmsta->tid_data[tid].rate_n_flags =
1500 le32_to_cpu(tx_resp->initial_rate);
1501 mvmsta->tid_data[tid].tx_time =
1502 le16_to_cpu(tx_resp->wireless_media_time);
1503 mvmsta->tid_data[tid].lq_color =
1504 TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1505 iwl_mvm_tx_airtime(mvm, mvmsta,
1506 le16_to_cpu(tx_resp->wireless_media_time));
1512 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1514 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1515 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1517 if (tx_resp->frame_count == 1)
1518 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1520 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
1523 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1525 struct ieee80211_tx_info *ba_info, u32 rate)
1527 struct sk_buff_head reclaimed_skbs;
1528 struct iwl_mvm_tid_data *tid_data;
1529 struct ieee80211_sta *sta;
1530 struct iwl_mvm_sta *mvmsta;
1531 struct sk_buff *skb;
1534 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
1535 tid > IWL_MAX_TID_COUNT,
1536 "sta_id %d tid %d", sta_id, tid))
1541 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1543 /* Reclaiming frames for a station that has been deleted ? */
1544 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
1549 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1550 tid_data = &mvmsta->tid_data[tid];
1552 if (tid_data->txq_id != txq) {
1554 "invalid BA notification: Q %d, tid %d\n",
1555 tid_data->txq_id, tid);
1560 __skb_queue_head_init(&reclaimed_skbs);
1563 * Release all TFDs before the SSN, i.e. all TFDs in front of
1564 * block-ack window (we assume that they've been successfully
1565 * transmitted ... if not, it's too late anyway).
1567 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
1569 spin_lock_bh(&mvmsta->lock);
1571 tid_data->next_reclaimed = index;
1573 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1577 /* pack lq color from tid_data along the reduced txp */
1578 ba_info->status.status_driver_data[0] =
1579 RS_DRV_DATA_PACK(tid_data->lq_color,
1580 ba_info->status.status_driver_data[0]);
1581 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
1583 skb_queue_walk(&reclaimed_skbs, skb) {
1584 struct ieee80211_hdr *hdr = (void *)skb->data;
1585 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1587 if (ieee80211_is_data_qos(hdr->frame_control))
1590 WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
1592 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1594 memset(&info->status, 0, sizeof(info->status));
1595 /* Packet was transmitted successfully, failures come as single
1596 * frames because before failing a frame the firmware transmits
1597 * it without aggregation at least once.
1599 info->flags |= IEEE80211_TX_STAT_ACK;
1601 /* this is the first skb we deliver in this batch */
1602 /* put the rate scaling data there */
1604 info->flags |= IEEE80211_TX_STAT_AMPDU;
1605 memcpy(&info->status, &ba_info->status,
1606 sizeof(ba_info->status));
1607 iwl_mvm_hwrate_to_tx_status(rate, info);
1611 spin_unlock_bh(&mvmsta->lock);
1613 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1614 * possible (i.e. first MPDU in the aggregation wasn't acked)
1615 * Still it's important to update RS about sent vs. acked.
1617 if (skb_queue_empty(&reclaimed_skbs)) {
1618 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1622 rcu_dereference(mvmsta->vif->chanctx_conf);
1624 if (WARN_ON_ONCE(!chanctx_conf))
1627 ba_info->band = chanctx_conf->def.chan->band;
1628 iwl_mvm_hwrate_to_tx_status(rate, ba_info);
1630 if (!iwl_mvm_has_tlc_offload(mvm)) {
1631 IWL_DEBUG_TX_REPLY(mvm,
1632 "No reclaim. Update rs directly\n");
1633 iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
1640 while (!skb_queue_empty(&reclaimed_skbs)) {
1641 skb = __skb_dequeue(&reclaimed_skbs);
1642 ieee80211_tx_status(mvm->hw, skb);
1646 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1648 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1649 int sta_id, tid, txq, index;
1650 struct ieee80211_tx_info ba_info = {};
1651 struct iwl_mvm_ba_notif *ba_notif;
1652 struct iwl_mvm_tid_data *tid_data;
1653 struct iwl_mvm_sta *mvmsta;
1655 ba_info.flags = IEEE80211_TX_STAT_AMPDU;
1657 if (iwl_mvm_has_new_tx_api(mvm)) {
1658 struct iwl_mvm_compressed_ba_notif *ba_res =
1660 u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
1663 sta_id = ba_res->sta_id;
1664 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
1665 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
1666 ba_info.status.tx_time =
1667 (u16)le32_to_cpu(ba_res->wireless_time);
1668 ba_info.status.status_driver_data[0] =
1669 (void *)(uintptr_t)ba_res->reduced_txp;
1671 if (!le16_to_cpu(ba_res->tfd_cnt))
1676 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1681 for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
1682 struct iwl_mvm_compressed_ba_tfd *ba_tfd =
1686 if (tid == IWL_MGMT_TID)
1687 tid = IWL_MAX_TID_COUNT;
1689 mvmsta->tid_data[i].lq_color = lq_color;
1690 iwl_mvm_tx_reclaim(mvm, sta_id, tid,
1691 (int)(le16_to_cpu(ba_tfd->q_num)),
1692 le16_to_cpu(ba_tfd->tfd_index),
1694 le32_to_cpu(ba_res->tx_rate));
1697 iwl_mvm_tx_airtime(mvm, mvmsta,
1698 le32_to_cpu(ba_res->wireless_time));
1702 IWL_DEBUG_TX_REPLY(mvm,
1703 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1704 sta_id, le32_to_cpu(ba_res->flags),
1705 le16_to_cpu(ba_res->txed),
1706 le16_to_cpu(ba_res->done));
1710 ba_notif = (void *)pkt->data;
1711 sta_id = ba_notif->sta_id;
1712 tid = ba_notif->tid;
1713 /* "flow" corresponds to Tx queue */
1714 txq = le16_to_cpu(ba_notif->scd_flow);
1715 /* "ssn" is start of block-ack Tx window, corresponds to index
1716 * (in Tx queue's circular buffer) of first TFD/frame in window */
1717 index = le16_to_cpu(ba_notif->scd_ssn);
1720 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1721 if (WARN_ON_ONCE(!mvmsta)) {
1726 tid_data = &mvmsta->tid_data[tid];
1728 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
1729 ba_info.status.ampdu_len = ba_notif->txed;
1730 ba_info.status.tx_time = tid_data->tx_time;
1731 ba_info.status.status_driver_data[0] =
1732 (void *)(uintptr_t)ba_notif->reduced_txp;
1736 iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
1737 tid_data->rate_n_flags);
1739 IWL_DEBUG_TX_REPLY(mvm,
1740 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1741 ba_notif->sta_addr, ba_notif->sta_id);
1743 IWL_DEBUG_TX_REPLY(mvm,
1744 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1745 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1746 le64_to_cpu(ba_notif->bitmap), txq, index,
1747 ba_notif->txed, ba_notif->txed_2_done);
1749 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
1750 ba_notif->reduced_txp);
1754 * Note that there are transports that buffer frames before they reach
1755 * the firmware. This means that after flush_tx_path is called, the
1756 * queue might not be empty. The race-free way to handle this is to:
1757 * 1) set the station as draining
1758 * 2) flush the Tx path
1759 * 3) wait for the transport queues to be empty
1761 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
1764 struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
1765 .queues_ctl = cpu_to_le32(tfd_msk),
1766 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
1769 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1771 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1772 sizeof(flush_cmd), &flush_cmd);
1774 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
1778 int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
1779 u16 tids, u32 flags)
1782 struct iwl_tx_path_flush_cmd flush_cmd = {
1783 .sta_id = cpu_to_le32(sta_id),
1784 .tid_mask = cpu_to_le16(tids),
1787 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
1789 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1790 sizeof(flush_cmd), &flush_cmd);
1792 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
1796 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
1798 struct iwl_mvm_int_sta *int_sta = sta;
1799 struct iwl_mvm_sta *mvm_sta = sta;
1801 BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) !=
1802 offsetof(struct iwl_mvm_sta, sta_id));
1804 if (iwl_mvm_has_new_tx_api(mvm))
1805 return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
1806 0xff | BIT(IWL_MGMT_TID), flags);
1809 return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
1812 return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);