1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <net/mac80211.h>
70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
85 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
101 lockdep_assert_held(&mvm->mutex);
103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
109 if (BIT(sta_id) & reserved_ids)
112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
116 return IWL_MVM_INVALID_STA;
119 /* send station add/update command to firmware */
120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121 bool update, unsigned int flags)
123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
135 u32 agg_size = 0, mpdu_dens = 0;
137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
140 if (!update || (flags & STA_MODIFY_QUEUES)) {
141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
150 WARN_ON(flags & STA_MODIFY_QUEUES);
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
171 switch (sta->rx_nss) {
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
188 case IEEE80211_SMPS_STATIC:
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
196 case IEEE80211_SMPS_OFF:
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
206 mpdu_dens = sta->ht_cap.ampdu_density;
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
240 status = ADD_STA_SUCCESS;
241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
243 &add_sta_cmd, &status);
247 switch (status & IWL_ADD_STA_STATUS_MASK) {
248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
253 IWL_ERR(mvm, "ADD_STA failed\n");
260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
272 ba_data = rcu_dereference(*rcu_ptr);
274 if (WARN_ON(!ba_data))
277 if (!ba_data->timeout)
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
307 /* Disable aggregations for a bitmap of TIDs for a given station */
308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
322 sta_id = mvm->queue_info[queue].ra_sta_id;
326 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
328 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
333 mvmsta = iwl_mvm_sta_from_mac80211(sta);
335 mvmsta->tid_disable_agg |= disable_agg_tids;
337 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
338 cmd.sta_id = mvmsta->sta_id;
339 cmd.add_modify = STA_MODE_MODIFY;
340 cmd.modify_mask = STA_MODIFY_QUEUES;
341 if (disable_agg_tids)
342 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
344 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
345 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
346 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
350 /* Notify FW of queue removal from the STA queues */
351 status = ADD_STA_SUCCESS;
352 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
353 iwl_mvm_add_sta_cmd_size(mvm),
359 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
360 int mac80211_queue, u8 tid, u8 flags)
362 struct iwl_scd_txq_cfg_cmd cmd = {
364 .action = SCD_CFG_DISABLE_QUEUE,
366 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 if (iwl_mvm_has_new_tx_api(mvm)) {
373 if (remove_mac_queue)
374 mvm->hw_queue_to_mac80211[queue] &=
375 ~BIT(mac80211_queue);
377 iwl_trans_txq_free(mvm->trans, queue);
382 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
385 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
388 * If there is another TID with the same AC - don't remove the MAC queue
391 if (tid < IWL_MAX_TID_COUNT) {
392 unsigned long tid_bitmap =
393 mvm->queue_info[queue].tid_bitmap;
394 int ac = tid_to_mac80211_ac[tid];
397 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
398 if (tid_to_mac80211_ac[i] == ac)
399 remove_mac_queue = false;
403 if (remove_mac_queue)
404 mvm->hw_queue_to_mac80211[queue] &=
405 ~BIT(mac80211_queue);
407 cmd.action = mvm->queue_info[queue].tid_bitmap ?
408 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
409 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
410 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
412 IWL_DEBUG_TX_QUEUES(mvm,
413 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
415 mvm->queue_info[queue].tid_bitmap,
416 mvm->hw_queue_to_mac80211[queue]);
418 /* If the queue is still enabled - nothing left to do in this func */
419 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
422 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
423 cmd.tid = mvm->queue_info[queue].txq_tid;
425 /* Make sure queue info is correct even though we overwrite it */
426 WARN(mvm->queue_info[queue].tid_bitmap ||
427 mvm->hw_queue_to_mac80211[queue],
428 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
429 queue, mvm->hw_queue_to_mac80211[queue],
430 mvm->queue_info[queue].tid_bitmap);
432 /* If we are here - the queue is freed and we can zero out these vals */
433 mvm->queue_info[queue].tid_bitmap = 0;
434 mvm->hw_queue_to_mac80211[queue] = 0;
436 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
437 mvm->queue_info[queue].reserved = false;
439 iwl_trans_txq_disable(mvm->trans, queue, false);
440 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
441 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
444 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
449 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
451 struct ieee80211_sta *sta;
452 struct iwl_mvm_sta *mvmsta;
453 unsigned long tid_bitmap;
454 unsigned long agg_tids = 0;
458 lockdep_assert_held(&mvm->mutex);
460 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
463 sta_id = mvm->queue_info[queue].ra_sta_id;
464 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
466 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
467 lockdep_is_held(&mvm->mutex));
469 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
472 mvmsta = iwl_mvm_sta_from_mac80211(sta);
474 spin_lock_bh(&mvmsta->lock);
475 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
476 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
477 agg_tids |= BIT(tid);
479 spin_unlock_bh(&mvmsta->lock);
485 * Remove a queue from a station's resources.
486 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
487 * doesn't disable the queue
489 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
491 struct ieee80211_sta *sta;
492 struct iwl_mvm_sta *mvmsta;
493 unsigned long tid_bitmap;
494 unsigned long disable_agg_tids = 0;
498 lockdep_assert_held(&mvm->mutex);
500 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
503 sta_id = mvm->queue_info[queue].ra_sta_id;
504 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
508 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
510 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
515 mvmsta = iwl_mvm_sta_from_mac80211(sta);
517 spin_lock_bh(&mvmsta->lock);
518 /* Unmap MAC queues and TIDs from this queue */
519 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
520 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
521 disable_agg_tids |= BIT(tid);
522 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
525 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
526 spin_unlock_bh(&mvmsta->lock);
531 * The TX path may have been using this TXQ_ID from the tid_data,
532 * so make sure it's no longer running so that we can safely reuse
533 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
534 * above, but nothing guarantees we've stopped using them. Thus,
535 * without this, we could get to iwl_mvm_disable_txq() and remove
536 * the queue while still sending frames to it.
540 return disable_agg_tids;
543 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
546 struct iwl_mvm_sta *mvmsta;
547 u8 txq_curr_ac, sta_id, tid;
548 unsigned long disable_agg_tids = 0;
552 lockdep_assert_held(&mvm->mutex);
554 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
557 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
558 sta_id = mvm->queue_info[queue].ra_sta_id;
559 tid = mvm->queue_info[queue].txq_tid;
561 same_sta = sta_id == new_sta_id;
563 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
564 if (WARN_ON(!mvmsta))
567 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
568 /* Disable the queue */
569 if (disable_agg_tids)
570 iwl_mvm_invalidate_sta_queue(mvm, queue,
571 disable_agg_tids, false);
573 ret = iwl_mvm_disable_txq(mvm, queue,
574 mvmsta->vif->hw_queue[txq_curr_ac],
578 "Failed to free inactive queue %d (ret=%d)\n",
584 /* If TXQ is allocated to another STA, update removal in FW */
586 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
591 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
592 unsigned long tfd_queue_mask, u8 ac)
595 u8 ac_to_queue[IEEE80211_NUM_ACS];
599 * This protects us against grabbing a queue that's being reconfigured
600 * by the inactivity checker.
602 lockdep_assert_held(&mvm->mutex);
604 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
607 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
609 /* See what ACs the existing queues for this STA have */
610 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
611 /* Only DATA queues can be shared */
612 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
613 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
616 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
620 * The queue to share is chosen only from DATA queues as follows (in
621 * descending priority):
624 * 3. Highest AC queue that is lower than new AC
625 * 4. Any existing AC (there always is at least 1 DATA queue)
628 /* Priority 1: An AC_BE queue */
629 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
630 queue = ac_to_queue[IEEE80211_AC_BE];
631 /* Priority 2: Same AC queue */
632 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
633 queue = ac_to_queue[ac];
634 /* Priority 3a: If new AC is VO and VI exists - use VI */
635 else if (ac == IEEE80211_AC_VO &&
636 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
637 queue = ac_to_queue[IEEE80211_AC_VI];
638 /* Priority 3b: No BE so only AC less than the new one is BK */
639 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
640 queue = ac_to_queue[IEEE80211_AC_BK];
641 /* Priority 4a: No BE nor BK - use VI if exists */
642 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
643 queue = ac_to_queue[IEEE80211_AC_VI];
644 /* Priority 4b: No BE, BK nor VI - use VO if exists */
645 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
646 queue = ac_to_queue[IEEE80211_AC_VO];
648 /* Make sure queue found (or not) is legal */
649 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
650 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
651 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
652 IWL_ERR(mvm, "No DATA queues available to share\n");
660 * If a given queue has a higher AC than the TID stream that is being compared
661 * to, the queue needs to be redirected to the lower AC. This function does that
662 * in such a case, otherwise - if no redirection required - it does nothing,
663 * unless the %force param is true.
665 static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
666 int ac, int ssn, unsigned int wdg_timeout,
669 struct iwl_scd_txq_cfg_cmd cmd = {
671 .action = SCD_CFG_DISABLE_QUEUE,
677 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
681 * If the AC is lower than current one - FIFO needs to be redirected to
682 * the lowest one of the streams in the queue. Check if this is needed
684 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
685 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
686 * we need to check if the numerical value of X is LARGER than of Y.
688 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
689 IWL_DEBUG_TX_QUEUES(mvm,
690 "No redirection needed on TXQ #%d\n",
695 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
696 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
697 cmd.tid = mvm->queue_info[queue].txq_tid;
698 mq = mvm->hw_queue_to_mac80211[queue];
699 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
701 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
702 queue, iwl_mvm_ac_to_tx_fifo[ac]);
704 /* Stop MAC queues and wait for this queue to empty */
705 iwl_mvm_stop_mac_queues(mvm, mq);
706 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
708 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
714 /* Before redirecting the queue we need to de-activate it */
715 iwl_trans_txq_disable(mvm->trans, queue, false);
716 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
718 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
721 /* Make sure the SCD wrptr is correctly set before reconfiguring */
722 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
724 /* Update the TID "owner" of the queue */
725 mvm->queue_info[queue].txq_tid = tid;
727 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
729 /* Redirect to lower AC */
730 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
731 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
733 /* Update AC marking of the queue */
734 mvm->queue_info[queue].mac80211_ac = ac;
737 * Mark queue as shared in transport if shared
738 * Note this has to be done after queue enablement because enablement
739 * can also set this value, and there is no indication there to shared
743 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
746 /* Continue using the MAC queues */
747 iwl_mvm_start_mac_queues(mvm, mq);
752 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
757 lockdep_assert_held(&mvm->mutex);
759 /* This should not be hit with new TX path */
760 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
763 /* Start by looking for a free queue */
764 for (i = minq; i <= maxq; i++)
765 if (mvm->queue_info[i].tid_bitmap == 0 &&
766 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
772 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
773 u8 sta_id, u8 tid, unsigned int timeout)
775 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
777 if (tid == IWL_MAX_TID_COUNT) {
779 size = IWL_MGMT_QUEUE_SIZE;
781 queue = iwl_trans_txq_alloc(mvm->trans,
782 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
783 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
786 IWL_DEBUG_TX_QUEUES(mvm,
787 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
792 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
795 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
796 IWL_DEBUG_TX_QUEUES(mvm,
797 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
798 queue, mvm->hw_queue_to_mac80211[queue]);
803 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
804 struct ieee80211_sta *sta, u8 ac,
807 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
808 unsigned int wdg_timeout =
809 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
810 u8 mac_queue = mvmsta->vif->hw_queue[ac];
813 lockdep_assert_held(&mvm->mutex);
815 IWL_DEBUG_TX_QUEUES(mvm,
816 "Allocating queue for sta %d on tid %d\n",
817 mvmsta->sta_id, tid);
818 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
823 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
825 spin_lock_bh(&mvmsta->lock);
826 mvmsta->tid_data[tid].txq_id = queue;
827 spin_unlock_bh(&mvmsta->lock);
832 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
833 int mac80211_queue, u8 sta_id, u8 tid)
835 bool enable_queue = true;
837 /* Make sure this TID isn't already enabled */
838 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
839 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
844 /* Update mappings and refcounts */
845 if (mvm->queue_info[queue].tid_bitmap)
846 enable_queue = false;
848 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
849 WARN(mac80211_queue >=
850 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
851 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
852 mac80211_queue, queue, sta_id, tid);
853 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
856 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
857 mvm->queue_info[queue].ra_sta_id = sta_id;
860 if (tid != IWL_MAX_TID_COUNT)
861 mvm->queue_info[queue].mac80211_ac =
862 tid_to_mac80211_ac[tid];
864 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
866 mvm->queue_info[queue].txq_tid = tid;
869 IWL_DEBUG_TX_QUEUES(mvm,
870 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
871 queue, mvm->queue_info[queue].tid_bitmap,
872 mvm->hw_queue_to_mac80211[queue]);
877 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
878 int mac80211_queue, u16 ssn,
879 const struct iwl_trans_txq_scd_cfg *cfg,
880 unsigned int wdg_timeout)
882 struct iwl_scd_txq_cfg_cmd cmd = {
884 .action = SCD_CFG_ENABLE_QUEUE,
885 .window = cfg->frame_limit,
886 .sta_id = cfg->sta_id,
887 .ssn = cpu_to_le16(ssn),
888 .tx_fifo = cfg->fifo,
889 .aggregate = cfg->aggregate,
894 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
897 /* Send the enabling command if we need to */
898 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
899 cfg->sta_id, cfg->tid))
902 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
905 le16_add_cpu(&cmd.ssn, 1);
907 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
908 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
913 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
915 struct iwl_scd_txq_cfg_cmd cmd = {
917 .action = SCD_CFG_UPDATE_QUEUE_TID,
920 unsigned long tid_bitmap;
923 lockdep_assert_held(&mvm->mutex);
925 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
928 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
930 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
933 /* Find any TID for queue */
934 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
936 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
938 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
940 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
945 mvm->queue_info[queue].txq_tid = tid;
946 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
950 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
952 struct ieee80211_sta *sta;
953 struct iwl_mvm_sta *mvmsta;
956 unsigned long tid_bitmap;
957 unsigned int wdg_timeout;
961 /* queue sharing is disabled on new TX path */
962 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
965 lockdep_assert_held(&mvm->mutex);
967 sta_id = mvm->queue_info[queue].ra_sta_id;
968 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
970 /* Find TID for queue, and make sure it is the only one on the queue */
971 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
972 if (tid_bitmap != BIT(tid)) {
973 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
978 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
981 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
982 lockdep_is_held(&mvm->mutex));
984 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
987 mvmsta = iwl_mvm_sta_from_mac80211(sta);
988 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
990 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
992 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
993 tid_to_mac80211_ac[tid], ssn,
996 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1000 /* If aggs should be turned back on - do it */
1001 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1002 struct iwl_mvm_add_sta_cmd cmd = {0};
1004 mvmsta->tid_disable_agg &= ~BIT(tid);
1006 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1007 cmd.sta_id = mvmsta->sta_id;
1008 cmd.add_modify = STA_MODE_MODIFY;
1009 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1010 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1011 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1013 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1014 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1016 IWL_DEBUG_TX_QUEUES(mvm,
1017 "TXQ #%d is now aggregated again\n",
1020 /* Mark queue intenally as aggregating again */
1021 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1025 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1029 * Remove inactive TIDs of a given queue.
1030 * If all queue TIDs are inactive - mark the queue as inactive
1031 * If only some the queue TIDs are inactive - unmap them from the queue
1033 * Returns %true if all TIDs were removed and the queue could be reused.
1035 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1036 struct iwl_mvm_sta *mvmsta, int queue,
1037 unsigned long tid_bitmap,
1038 unsigned long *unshare_queues,
1039 unsigned long *changetid_queues)
1043 lockdep_assert_held(&mvmsta->lock);
1044 lockdep_assert_held(&mvm->mutex);
1046 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1049 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1050 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1051 /* If some TFDs are still queued - don't mark TID as inactive */
1052 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1053 tid_bitmap &= ~BIT(tid);
1055 /* Don't mark as inactive any TID that has an active BA */
1056 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1057 tid_bitmap &= ~BIT(tid);
1060 /* If all TIDs in the queue are inactive - return it can be reused */
1061 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1062 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1067 * If we are here, this is a shared queue and not all TIDs timed-out.
1068 * Remove the ones that did.
1070 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1071 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1074 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1075 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1076 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1078 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1081 * We need to take into account a situation in which a TXQ was
1082 * allocated to TID x, and then turned shared by adding TIDs y
1083 * and z. If TID x becomes inactive and is removed from the TXQ,
1084 * ownership must be given to one of the remaining TIDs.
1085 * This is mainly because if TID x continues - a new queue can't
1086 * be allocated for it as long as it is an owner of another TXQ.
1088 * Mark this queue in the right bitmap, we'll send the command
1089 * to the firmware later.
1091 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1092 set_bit(queue, changetid_queues);
1094 IWL_DEBUG_TX_QUEUES(mvm,
1095 "Removing inactive TID %d from shared Q:%d\n",
1099 IWL_DEBUG_TX_QUEUES(mvm,
1100 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1101 mvm->queue_info[queue].tid_bitmap);
1104 * There may be different TIDs with the same mac queues, so make
1105 * sure all TIDs have existing corresponding mac queues enabled
1107 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1108 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1109 mvm->hw_queue_to_mac80211[queue] |=
1110 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1113 /* If the queue is marked as shared - "unshare" it */
1114 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1115 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1116 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1118 set_bit(queue, unshare_queues);
1125 * Check for inactivity - this includes checking if any queue
1126 * can be unshared and finding one (and only one) that can be
1128 * This function is also invoked as a sort of clean-up task,
1129 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1131 * Returns the queue number, or -ENOSPC.
1133 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1135 unsigned long now = jiffies;
1136 unsigned long unshare_queues = 0;
1137 unsigned long changetid_queues = 0;
1138 int i, ret, free_queue = -ENOSPC;
1140 lockdep_assert_held(&mvm->mutex);
1142 if (iwl_mvm_has_new_tx_api(mvm))
1147 /* we skip the CMD queue below by starting at 1 */
1148 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1150 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1151 struct ieee80211_sta *sta;
1152 struct iwl_mvm_sta *mvmsta;
1155 unsigned long inactive_tid_bitmap = 0;
1156 unsigned long queue_tid_bitmap;
1158 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1159 if (!queue_tid_bitmap)
1162 /* If TXQ isn't in active use anyway - nothing to do here... */
1163 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1164 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1167 /* Check to see if there are inactive TIDs on this queue */
1168 for_each_set_bit(tid, &queue_tid_bitmap,
1169 IWL_MAX_TID_COUNT + 1) {
1170 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1171 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1174 inactive_tid_bitmap |= BIT(tid);
1177 /* If all TIDs are active - finish check on this queue */
1178 if (!inactive_tid_bitmap)
1182 * If we are here - the queue hadn't been served recently and is
1186 sta_id = mvm->queue_info[i].ra_sta_id;
1187 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1190 * If the STA doesn't exist anymore, it isn't an error. It could
1191 * be that it was removed since getting the queues, and in this
1192 * case it should've inactivated its queues anyway.
1194 if (IS_ERR_OR_NULL(sta))
1197 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1199 spin_lock_bh(&mvmsta->lock);
1200 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1201 inactive_tid_bitmap,
1204 if (ret >= 0 && free_queue < 0)
1206 /* only unlock sta lock - we still need the queue info lock */
1207 spin_unlock_bh(&mvmsta->lock);
1212 /* Reconfigure queues requiring reconfiguation */
1213 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1214 iwl_mvm_unshare_queue(mvm, i);
1215 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1216 iwl_mvm_change_queue_tid(mvm, i);
1218 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1219 ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
1228 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1229 struct ieee80211_sta *sta, u8 ac, int tid,
1230 struct ieee80211_hdr *hdr)
1232 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1233 struct iwl_trans_txq_scd_cfg cfg = {
1234 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1235 .sta_id = mvmsta->sta_id,
1237 .frame_limit = IWL_FRAME_LIMIT,
1239 unsigned int wdg_timeout =
1240 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1241 u8 mac_queue = mvmsta->vif->hw_queue[ac];
1243 unsigned long disable_agg_tids = 0;
1244 enum iwl_mvm_agg_state queue_state;
1245 bool shared_queue = false, inc_ssn;
1247 unsigned long tfd_queue_mask;
1250 lockdep_assert_held(&mvm->mutex);
1252 if (iwl_mvm_has_new_tx_api(mvm))
1253 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1255 spin_lock_bh(&mvmsta->lock);
1256 tfd_queue_mask = mvmsta->tfd_queue_msk;
1257 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1258 spin_unlock_bh(&mvmsta->lock);
1261 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
1264 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1265 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1266 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1267 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1268 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1269 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1270 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1273 /* If no such queue is found, we'll use a DATA queue instead */
1276 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1277 (mvm->queue_info[mvmsta->reserved_queue].status ==
1278 IWL_MVM_QUEUE_RESERVED)) {
1279 queue = mvmsta->reserved_queue;
1280 mvm->queue_info[queue].reserved = true;
1281 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1285 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1286 IWL_MVM_DQA_MIN_DATA_QUEUE,
1287 IWL_MVM_DQA_MAX_DATA_QUEUE);
1289 /* try harder - perhaps kill an inactive queue */
1290 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1293 /* No free queue - we'll have to share */
1295 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1297 shared_queue = true;
1298 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1303 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1304 * to make sure no one else takes it.
1305 * This will allow avoiding re-acquiring the lock at the end of the
1306 * configuration. On error we'll mark it back as free.
1308 if (queue > 0 && !shared_queue)
1309 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1311 /* This shouldn't happen - out of queues */
1312 if (WARN_ON(queue <= 0)) {
1313 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1319 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1320 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1322 * Mark all DATA queues as allowing to be aggregated at some point
1324 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1325 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1327 IWL_DEBUG_TX_QUEUES(mvm,
1328 "Allocating %squeue #%d to sta %d on tid %d\n",
1329 shared_queue ? "shared " : "", queue,
1330 mvmsta->sta_id, tid);
1333 /* Disable any open aggs on this queue */
1334 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1336 if (disable_agg_tids) {
1337 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1339 iwl_mvm_invalidate_sta_queue(mvm, queue,
1340 disable_agg_tids, false);
1344 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1345 ssn, &cfg, wdg_timeout);
1348 * Mark queue as shared in transport if shared
1349 * Note this has to be done after queue enablement because enablement
1350 * can also set this value, and there is no indication there to shared
1354 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1356 spin_lock_bh(&mvmsta->lock);
1358 * This looks racy, but it is not. We have only one packet for
1359 * this ra/tid in our Tx path since we stop the Qdisc when we
1360 * need to allocate a new TFD queue.
1363 mvmsta->tid_data[tid].seq_number += 0x10;
1364 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1366 mvmsta->tid_data[tid].txq_id = queue;
1367 mvmsta->tfd_queue_msk |= BIT(queue);
1368 queue_state = mvmsta->tid_data[tid].state;
1370 if (mvmsta->reserved_queue == queue)
1371 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1372 spin_unlock_bh(&mvmsta->lock);
1374 if (!shared_queue) {
1375 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1379 /* If we need to re-enable aggregations... */
1380 if (queue_state == IWL_AGG_ON) {
1381 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1386 /* Redirect queue, if needed */
1387 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1388 wdg_timeout, false);
1396 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1401 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1403 if (tid == IWL_MAX_TID_COUNT)
1404 return IEEE80211_AC_VO; /* MGMT */
1406 return tid_to_mac80211_ac[tid];
1409 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1410 struct ieee80211_sta *sta, int tid)
1412 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1413 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1414 struct sk_buff *skb;
1415 struct ieee80211_hdr *hdr;
1416 struct sk_buff_head deferred_tx;
1418 bool no_queue = false; /* Marks if there is a problem with the queue */
1421 lockdep_assert_held(&mvm->mutex);
1423 skb = skb_peek(&tid_data->deferred_tx_frames);
1426 hdr = (void *)skb->data;
1428 ac = iwl_mvm_tid_to_ac_queue(tid);
1429 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1431 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1432 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1434 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1435 mvmsta->sta_id, tid);
1438 * Mark queue as problematic so later the deferred traffic is
1439 * freed, as we can do nothing with it
1444 __skb_queue_head_init(&deferred_tx);
1446 /* Disable bottom-halves when entering TX path */
1448 spin_lock(&mvmsta->lock);
1449 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1450 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1451 spin_unlock(&mvmsta->lock);
1453 while ((skb = __skb_dequeue(&deferred_tx)))
1454 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1455 ieee80211_free_txskb(mvm->hw, skb);
1459 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1462 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1464 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1466 struct ieee80211_sta *sta;
1467 struct iwl_mvm_sta *mvmsta;
1468 unsigned long deferred_tid_traffic;
1471 mutex_lock(&mvm->mutex);
1473 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1475 /* Go over all stations with deferred traffic */
1476 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1477 IWL_MVM_STATION_COUNT) {
1478 clear_bit(sta_id, mvm->sta_deferred_frames);
1479 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1480 lockdep_is_held(&mvm->mutex));
1481 if (IS_ERR_OR_NULL(sta))
1484 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1485 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1487 for_each_set_bit(tid, &deferred_tid_traffic,
1488 IWL_MAX_TID_COUNT + 1)
1489 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1492 mutex_unlock(&mvm->mutex);
1495 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1496 struct ieee80211_sta *sta,
1497 enum nl80211_iftype vif_type)
1499 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1502 /* queue reserving is disabled on new TX path */
1503 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1506 /* run the general cleanup/unsharing of queues */
1507 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1509 /* Make sure we have free resources for this STA */
1510 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1511 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1512 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1513 IWL_MVM_QUEUE_FREE))
1514 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1516 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1517 IWL_MVM_DQA_MIN_DATA_QUEUE,
1518 IWL_MVM_DQA_MAX_DATA_QUEUE);
1520 /* try again - this time kick out a queue if needed */
1521 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1523 IWL_ERR(mvm, "No available queues for new station\n");
1527 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1529 mvmsta->reserved_queue = queue;
1531 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1532 queue, mvmsta->sta_id);
1538 * In DQA mode, after a HW restart the queues should be allocated as before, in
1539 * order to avoid race conditions when there are shared queues. This function
1540 * does the re-mapping and queue allocation.
1542 * Note that re-enabling aggregations isn't done in this function.
1544 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1545 struct iwl_mvm_sta *mvm_sta)
1547 unsigned int wdg_timeout =
1548 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1550 struct iwl_trans_txq_scd_cfg cfg = {
1551 .sta_id = mvm_sta->sta_id,
1552 .frame_limit = IWL_FRAME_LIMIT,
1555 /* Make sure reserved queue is still marked as such (if allocated) */
1556 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1557 mvm->queue_info[mvm_sta->reserved_queue].status =
1558 IWL_MVM_QUEUE_RESERVED;
1560 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1561 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1562 int txq_id = tid_data->txq_id;
1566 if (txq_id == IWL_MVM_INVALID_QUEUE)
1569 skb_queue_head_init(&tid_data->deferred_tx_frames);
1571 ac = tid_to_mac80211_ac[i];
1572 mac_queue = mvm_sta->vif->hw_queue[ac];
1574 if (iwl_mvm_has_new_tx_api(mvm)) {
1575 IWL_DEBUG_TX_QUEUES(mvm,
1576 "Re-mapping sta %d tid %d\n",
1577 mvm_sta->sta_id, i);
1578 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1581 tid_data->txq_id = txq_id;
1584 * Since we don't set the seq number after reset, and HW
1585 * sets it now, FW reset will cause the seq num to start
1586 * at 0 again, so driver will need to update it
1587 * internally as well, so it keeps in sync with real val
1589 tid_data->seq_number = 0;
1591 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1594 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1595 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1597 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1599 IWL_DEBUG_TX_QUEUES(mvm,
1600 "Re-mapping sta %d tid %d to queue %d\n",
1601 mvm_sta->sta_id, i, txq_id);
1603 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1605 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1610 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1611 struct iwl_mvm_int_sta *sta,
1613 u16 mac_id, u16 color)
1615 struct iwl_mvm_add_sta_cmd cmd;
1617 u32 status = ADD_STA_SUCCESS;
1619 lockdep_assert_held(&mvm->mutex);
1621 memset(&cmd, 0, sizeof(cmd));
1622 cmd.sta_id = sta->sta_id;
1623 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1625 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1626 cmd.station_type = sta->type;
1628 if (!iwl_mvm_has_new_tx_api(mvm))
1629 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1630 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1633 memcpy(cmd.addr, addr, ETH_ALEN);
1635 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1636 iwl_mvm_add_sta_cmd_size(mvm),
1641 switch (status & IWL_ADD_STA_STATUS_MASK) {
1642 case ADD_STA_SUCCESS:
1643 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1647 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1654 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1655 struct ieee80211_vif *vif,
1656 struct ieee80211_sta *sta)
1658 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1659 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1660 struct iwl_mvm_rxq_dup_data *dup_data;
1662 bool sta_update = false;
1663 unsigned int sta_flags = 0;
1665 lockdep_assert_held(&mvm->mutex);
1667 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1668 sta_id = iwl_mvm_find_free_sta_id(mvm,
1669 ieee80211_vif_type_p2p(vif));
1671 sta_id = mvm_sta->sta_id;
1673 if (sta_id == IWL_MVM_INVALID_STA)
1676 spin_lock_init(&mvm_sta->lock);
1678 /* if this is a HW restart re-alloc existing queues */
1679 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1680 struct iwl_mvm_int_sta tmp_sta = {
1682 .type = mvm_sta->sta_type,
1686 * First add an empty station since allocating
1687 * a queue requires a valid station
1689 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1690 mvmvif->id, mvmvif->color);
1694 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1696 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1700 mvm_sta->sta_id = sta_id;
1701 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1704 if (!mvm->trans->cfg->gen2)
1705 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1707 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1708 mvm_sta->tx_protection = 0;
1709 mvm_sta->tt_tx_protection = false;
1710 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1712 /* HW restart, don't assume the memory has been zeroed */
1713 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1714 mvm_sta->tfd_queue_msk = 0;
1716 /* for HW restart - reset everything but the sequence number */
1717 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1718 u16 seq = mvm_sta->tid_data[i].seq_number;
1719 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1720 mvm_sta->tid_data[i].seq_number = seq;
1723 * Mark all queues for this STA as unallocated and defer TX
1724 * frames until the queue is allocated
1726 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1727 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1729 mvm_sta->deferred_traffic_tid_map = 0;
1730 mvm_sta->agg_tids = 0;
1732 if (iwl_mvm_has_new_rx_api(mvm) &&
1733 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1736 dup_data = kcalloc(mvm->trans->num_rx_queues,
1737 sizeof(*dup_data), GFP_KERNEL);
1741 * Initialize all the last_seq values to 0xffff which can never
1742 * compare equal to the frame's seq_ctrl in the check in
1743 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1744 * number and fragmented packets don't reach that function.
1746 * This thus allows receiving a packet with seqno 0 and the
1747 * retry bit set as the very first packet on a new TID.
1749 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1750 memset(dup_data[q].last_seq, 0xff,
1751 sizeof(dup_data[q].last_seq));
1752 mvm_sta->dup_data = dup_data;
1755 if (!iwl_mvm_has_new_tx_api(mvm)) {
1756 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1757 ieee80211_vif_type_p2p(vif));
1763 * if rs is registered with mac80211, then "add station" will be handled
1764 * via the corresponding ops, otherwise need to notify rate scaling here
1766 if (iwl_mvm_has_tlc_offload(mvm))
1767 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1769 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1772 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1776 if (vif->type == NL80211_IFTYPE_STATION) {
1778 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1779 mvmvif->ap_sta_id = sta_id;
1781 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1785 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1793 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1796 struct iwl_mvm_add_sta_cmd cmd = {};
1800 lockdep_assert_held(&mvm->mutex);
1802 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1803 cmd.sta_id = mvmsta->sta_id;
1804 cmd.add_modify = STA_MODE_MODIFY;
1805 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1806 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1808 status = ADD_STA_SUCCESS;
1809 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1810 iwl_mvm_add_sta_cmd_size(mvm),
1815 switch (status & IWL_ADD_STA_STATUS_MASK) {
1816 case ADD_STA_SUCCESS:
1817 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1822 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1831 * Remove a station from the FW table. Before sending the command to remove
1832 * the station validate that the station is indeed known to the driver (sanity
1835 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1837 struct ieee80211_sta *sta;
1838 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1843 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1844 lockdep_is_held(&mvm->mutex));
1846 /* Note: internal stations are marked as error values */
1848 IWL_ERR(mvm, "Invalid station id\n");
1852 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1853 sizeof(rm_sta_cmd), &rm_sta_cmd);
1855 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1862 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1863 struct ieee80211_vif *vif,
1864 struct iwl_mvm_sta *mvm_sta)
1869 lockdep_assert_held(&mvm->mutex);
1871 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1872 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1875 ac = iwl_mvm_tid_to_ac_queue(i);
1876 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1877 vif->hw_queue[ac], i, 0);
1878 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1882 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1883 struct iwl_mvm_sta *mvm_sta)
1887 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1891 spin_lock_bh(&mvm_sta->lock);
1892 txq_id = mvm_sta->tid_data[i].txq_id;
1893 spin_unlock_bh(&mvm_sta->lock);
1895 if (txq_id == IWL_MVM_INVALID_QUEUE)
1898 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1906 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1907 struct ieee80211_vif *vif,
1908 struct ieee80211_sta *sta)
1910 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1911 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1912 u8 sta_id = mvm_sta->sta_id;
1915 lockdep_assert_held(&mvm->mutex);
1917 if (iwl_mvm_has_new_rx_api(mvm))
1918 kfree(mvm_sta->dup_data);
1920 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1924 /* flush its queues here since we are freeing mvm_sta */
1925 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1928 if (iwl_mvm_has_new_tx_api(mvm)) {
1929 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1931 u32 q_mask = mvm_sta->tfd_queue_msk;
1933 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1939 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1941 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1943 /* If there is a TXQ still marked as reserved - free it */
1944 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1945 u8 reserved_txq = mvm_sta->reserved_queue;
1946 enum iwl_mvm_queue_status *status;
1949 * If no traffic has gone through the reserved TXQ - it
1950 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1951 * should be manually marked as free again
1953 status = &mvm->queue_info[reserved_txq].status;
1954 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1955 (*status != IWL_MVM_QUEUE_FREE),
1956 "sta_id %d reserved txq %d status %d",
1957 sta_id, reserved_txq, *status))
1960 *status = IWL_MVM_QUEUE_FREE;
1963 if (vif->type == NL80211_IFTYPE_STATION &&
1964 mvmvif->ap_sta_id == sta_id) {
1965 /* if associated - we can't remove the AP STA now */
1966 if (vif->bss_conf.assoc)
1969 /* unassoc - go ahead - remove the AP STA now */
1970 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1972 /* clear d0i3_ap_sta_id if no longer relevant */
1973 if (mvm->d0i3_ap_sta_id == sta_id)
1974 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1978 * This shouldn't happen - the TDLS channel switch should be canceled
1979 * before the STA is removed.
1981 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1982 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1983 cancel_delayed_work(&mvm->tdls_cs.dwork);
1987 * Make sure that the tx response code sees the station as -EBUSY and
1988 * calls the drain worker.
1990 spin_lock_bh(&mvm_sta->lock);
1991 spin_unlock_bh(&mvm_sta->lock);
1993 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1994 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1999 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2000 struct ieee80211_vif *vif,
2003 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2005 lockdep_assert_held(&mvm->mutex);
2007 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2011 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2012 struct iwl_mvm_int_sta *sta,
2013 u32 qmask, enum nl80211_iftype iftype,
2014 enum iwl_sta_type type)
2016 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2017 sta->sta_id == IWL_MVM_INVALID_STA) {
2018 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2019 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2023 sta->tfd_queue_msk = qmask;
2026 /* put a non-NULL value so iterating over the stations won't stop */
2027 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2031 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2033 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2034 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2035 sta->sta_id = IWL_MVM_INVALID_STA;
2038 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2041 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2042 mvm->cfg->base_params->wd_timeout :
2043 IWL_WATCHDOG_DISABLED;
2045 if (iwl_mvm_has_new_tx_api(mvm)) {
2047 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2050 *queue = tvqm_queue;
2052 struct iwl_trans_txq_scd_cfg cfg = {
2055 .tid = IWL_MAX_TID_COUNT,
2057 .frame_limit = IWL_FRAME_LIMIT,
2060 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
2064 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2068 lockdep_assert_held(&mvm->mutex);
2070 /* Allocate aux station and assign to it the aux queue */
2071 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2072 NL80211_IFTYPE_UNSPECIFIED,
2073 IWL_STA_AUX_ACTIVITY);
2077 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2078 if (!iwl_mvm_has_new_tx_api(mvm))
2079 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2080 mvm->aux_sta.sta_id,
2081 IWL_MVM_TX_FIFO_MCAST);
2083 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2086 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2091 * For 22000 firmware and on we cannot add queue to a station unknown
2092 * to firmware so enable queue here - after the station was added
2094 if (iwl_mvm_has_new_tx_api(mvm))
2095 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2096 mvm->aux_sta.sta_id,
2097 IWL_MVM_TX_FIFO_MCAST);
2102 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2104 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2107 lockdep_assert_held(&mvm->mutex);
2109 /* Map snif queue to fifo - must happen before adding snif station */
2110 if (!iwl_mvm_has_new_tx_api(mvm))
2111 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2112 mvm->snif_sta.sta_id,
2113 IWL_MVM_TX_FIFO_BE);
2115 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2121 * For 22000 firmware and on we cannot add queue to a station unknown
2122 * to firmware so enable queue here - after the station was added
2124 if (iwl_mvm_has_new_tx_api(mvm))
2125 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2126 mvm->snif_sta.sta_id,
2127 IWL_MVM_TX_FIFO_BE);
2132 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2136 lockdep_assert_held(&mvm->mutex);
2138 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2139 IWL_MAX_TID_COUNT, 0);
2140 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2142 IWL_WARN(mvm, "Failed sending remove station\n");
2147 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2149 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2152 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2154 lockdep_assert_held(&mvm->mutex);
2156 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2160 * Send the add station command for the vif's broadcast station.
2161 * Assumes that the station was already allocated.
2163 * @mvm: the mvm component
2164 * @vif: the interface to which the broadcast station is added
2165 * @bsta: the broadcast station to add.
2167 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2169 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2170 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2171 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2172 const u8 *baddr = _baddr;
2175 unsigned int wdg_timeout =
2176 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2177 struct iwl_trans_txq_scd_cfg cfg = {
2178 .fifo = IWL_MVM_TX_FIFO_VO,
2179 .sta_id = mvmvif->bcast_sta.sta_id,
2180 .tid = IWL_MAX_TID_COUNT,
2182 .frame_limit = IWL_FRAME_LIMIT,
2185 lockdep_assert_held(&mvm->mutex);
2187 if (!iwl_mvm_has_new_tx_api(mvm)) {
2188 if (vif->type == NL80211_IFTYPE_AP ||
2189 vif->type == NL80211_IFTYPE_ADHOC)
2190 queue = mvm->probe_queue;
2191 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2192 queue = mvm->p2p_dev_queue;
2193 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
2196 bsta->tfd_queue_msk |= BIT(queue);
2198 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2202 if (vif->type == NL80211_IFTYPE_ADHOC)
2203 baddr = vif->bss_conf.bssid;
2205 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2208 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2209 mvmvif->id, mvmvif->color);
2214 * For 22000 firmware and on we cannot add queue to a station unknown
2215 * to firmware so enable queue here - after the station was added
2217 if (iwl_mvm_has_new_tx_api(mvm)) {
2218 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2223 if (vif->type == NL80211_IFTYPE_AP ||
2224 vif->type == NL80211_IFTYPE_ADHOC)
2225 mvm->probe_queue = queue;
2226 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2227 mvm->p2p_dev_queue = queue;
2233 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2234 struct ieee80211_vif *vif)
2236 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2239 lockdep_assert_held(&mvm->mutex);
2241 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2243 switch (vif->type) {
2244 case NL80211_IFTYPE_AP:
2245 case NL80211_IFTYPE_ADHOC:
2246 queue = mvm->probe_queue;
2248 case NL80211_IFTYPE_P2P_DEVICE:
2249 queue = mvm->p2p_dev_queue;
2252 WARN(1, "Can't free bcast queue on vif type %d\n",
2257 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2258 if (iwl_mvm_has_new_tx_api(mvm))
2261 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2262 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2265 /* Send the FW a request to remove the station from it's internal data
2266 * structures, but DO NOT remove the entry from the local data structures. */
2267 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2269 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2272 lockdep_assert_held(&mvm->mutex);
2274 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2276 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2278 IWL_WARN(mvm, "Failed sending remove station\n");
2282 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2284 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2286 lockdep_assert_held(&mvm->mutex);
2288 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2289 ieee80211_vif_type_p2p(vif),
2290 IWL_STA_GENERAL_PURPOSE);
2293 /* Allocate a new station entry for the broadcast station to the given vif,
2294 * and send it to the FW.
2295 * Note that each P2P mac should have its own broadcast station.
2297 * @mvm: the mvm component
2298 * @vif: the interface to which the broadcast station is added
2299 * @bsta: the broadcast station to add. */
2300 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2302 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2303 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2306 lockdep_assert_held(&mvm->mutex);
2308 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2312 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2315 iwl_mvm_dealloc_int_sta(mvm, bsta);
2320 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2322 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2324 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2328 * Send the FW a request to remove the station from it's internal data
2329 * structures, and in addition remove it from the local data structure.
2331 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2335 lockdep_assert_held(&mvm->mutex);
2337 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2339 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2345 * Allocate a new station entry for the multicast station to the given vif,
2346 * and send it to the FW.
2347 * Note that each AP/GO mac should have its own multicast station.
2349 * @mvm: the mvm component
2350 * @vif: the interface to which the multicast station is added
2352 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2354 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2355 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2356 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2357 const u8 *maddr = _maddr;
2358 struct iwl_trans_txq_scd_cfg cfg = {
2359 .fifo = IWL_MVM_TX_FIFO_MCAST,
2360 .sta_id = msta->sta_id,
2363 .frame_limit = IWL_FRAME_LIMIT,
2365 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2368 lockdep_assert_held(&mvm->mutex);
2370 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2371 vif->type != NL80211_IFTYPE_ADHOC))
2375 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2376 * invalid, so make sure we use the queue we want.
2377 * Note that this is done here as we want to avoid making DQA
2378 * changes in mac80211 layer.
2380 if (vif->type == NL80211_IFTYPE_ADHOC) {
2381 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2382 mvmvif->cab_queue = vif->cab_queue;
2386 * While in previous FWs we had to exclude cab queue from TFD queue
2387 * mask, now it is needed as any other queue.
2389 if (!iwl_mvm_has_new_tx_api(mvm) &&
2390 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2391 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2393 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2395 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2396 mvmvif->id, mvmvif->color);
2398 iwl_mvm_dealloc_int_sta(mvm, msta);
2403 * Enable cab queue after the ADD_STA command is sent.
2404 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2405 * command with unknown station id, and for FW that doesn't support
2406 * station API since the cab queue is not included in the
2409 if (iwl_mvm_has_new_tx_api(mvm)) {
2410 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2414 mvmvif->cab_queue = queue;
2415 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2416 IWL_UCODE_TLV_API_STA_TYPE))
2417 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2420 if (mvmvif->ap_wep_key) {
2421 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2423 if (key_offset == STA_KEY_IDX_INVALID)
2426 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2427 mvmvif->ap_wep_key, 1, 0, NULL, 0,
2437 * Send the FW a request to remove the station from it's internal data
2438 * structures, and in addition remove it from the local data structure.
2440 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2442 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2445 lockdep_assert_held(&mvm->mutex);
2447 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2449 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2452 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2454 IWL_WARN(mvm, "Failed sending remove station\n");
2459 #define IWL_MAX_RX_BA_SESSIONS 16
2461 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2463 struct iwl_mvm_delba_notif notif = {
2464 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2468 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2471 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2472 struct iwl_mvm_baid_data *data)
2476 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2478 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2480 struct iwl_mvm_reorder_buffer *reorder_buf =
2481 &data->reorder_buf[i];
2482 struct iwl_mvm_reorder_buf_entry *entries =
2483 &data->entries[i * data->entries_per_queue];
2485 spin_lock_bh(&reorder_buf->lock);
2486 if (likely(!reorder_buf->num_stored)) {
2487 spin_unlock_bh(&reorder_buf->lock);
2492 * This shouldn't happen in regular DELBA since the internal
2493 * delBA notification should trigger a release of all frames in
2494 * the reorder buffer.
2498 for (j = 0; j < reorder_buf->buf_size; j++)
2499 __skb_queue_purge(&entries[j].e.frames);
2501 * Prevent timer re-arm. This prevents a very far fetched case
2502 * where we timed out on the notification. There may be prior
2503 * RX frames pending in the RX queue before the notification
2504 * that might get processed between now and the actual deletion
2505 * and we would re-arm the timer although we are deleting the
2508 reorder_buf->removed = true;
2509 spin_unlock_bh(&reorder_buf->lock);
2510 del_timer_sync(&reorder_buf->reorder_timer);
2514 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2515 struct iwl_mvm_baid_data *data,
2516 u16 ssn, u16 buf_size)
2520 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2521 struct iwl_mvm_reorder_buffer *reorder_buf =
2522 &data->reorder_buf[i];
2523 struct iwl_mvm_reorder_buf_entry *entries =
2524 &data->entries[i * data->entries_per_queue];
2527 reorder_buf->num_stored = 0;
2528 reorder_buf->head_sn = ssn;
2529 reorder_buf->buf_size = buf_size;
2530 /* rx reorder timer */
2531 timer_setup(&reorder_buf->reorder_timer,
2532 iwl_mvm_reorder_timer_expired, 0);
2533 spin_lock_init(&reorder_buf->lock);
2534 reorder_buf->mvm = mvm;
2535 reorder_buf->queue = i;
2536 reorder_buf->valid = false;
2537 for (j = 0; j < reorder_buf->buf_size; j++)
2538 __skb_queue_head_init(&entries[j].e.frames);
2542 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2543 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2545 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2546 struct iwl_mvm_add_sta_cmd cmd = {};
2547 struct iwl_mvm_baid_data *baid_data = NULL;
2551 lockdep_assert_held(&mvm->mutex);
2553 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2554 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2558 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2559 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2561 /* sparse doesn't like the __align() so don't check */
2564 * The division below will be OK if either the cache line size
2565 * can be divided by the entry size (ALIGN will round up) or if
2566 * if the entry size can be divided by the cache line size, in
2567 * which case the ALIGN() will do nothing.
2569 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2570 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2574 * Upward align the reorder buffer size to fill an entire cache
2575 * line for each queue, to avoid sharing cache lines between
2578 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2581 * Allocate here so if allocation fails we can bail out early
2582 * before starting the BA session in the firmware
2584 baid_data = kzalloc(sizeof(*baid_data) +
2585 mvm->trans->num_rx_queues *
2592 * This division is why we need the above BUILD_BUG_ON(),
2593 * if that doesn't hold then this will not be right.
2595 baid_data->entries_per_queue =
2596 reorder_buf_size / sizeof(baid_data->entries[0]);
2599 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2600 cmd.sta_id = mvm_sta->sta_id;
2601 cmd.add_modify = STA_MODE_MODIFY;
2603 cmd.add_immediate_ba_tid = (u8) tid;
2604 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2605 cmd.rx_ba_window = cpu_to_le16(buf_size);
2607 cmd.remove_immediate_ba_tid = (u8) tid;
2609 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2610 STA_MODIFY_REMOVE_BA_TID;
2612 status = ADD_STA_SUCCESS;
2613 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2614 iwl_mvm_add_sta_cmd_size(mvm),
2619 switch (status & IWL_ADD_STA_STATUS_MASK) {
2620 case ADD_STA_SUCCESS:
2621 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2622 start ? "start" : "stopp");
2624 case ADD_STA_IMMEDIATE_BA_FAILURE:
2625 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2630 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2631 start ? "start" : "stopp", status);
2641 mvm->rx_ba_sessions++;
2643 if (!iwl_mvm_has_new_rx_api(mvm))
2646 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2650 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2651 IWL_ADD_STA_BAID_SHIFT);
2652 baid_data->baid = baid;
2653 baid_data->timeout = timeout;
2654 baid_data->last_rx = jiffies;
2655 baid_data->rcu_ptr = &mvm->baid_map[baid];
2656 timer_setup(&baid_data->session_timer,
2657 iwl_mvm_rx_agg_session_expired, 0);
2658 baid_data->mvm = mvm;
2659 baid_data->tid = tid;
2660 baid_data->sta_id = mvm_sta->sta_id;
2662 mvm_sta->tid_to_baid[tid] = baid;
2664 mod_timer(&baid_data->session_timer,
2665 TU_TO_EXP_TIME(timeout * 2));
2667 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2669 * protect the BA data with RCU to cover a case where our
2670 * internal RX sync mechanism will timeout (not that it's
2671 * supposed to happen) and we will free the session data while
2672 * RX is being processed in parallel
2674 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2675 mvm_sta->sta_id, tid, baid);
2676 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2677 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2679 u8 baid = mvm_sta->tid_to_baid[tid];
2681 if (mvm->rx_ba_sessions > 0)
2682 /* check that restart flow didn't zero the counter */
2683 mvm->rx_ba_sessions--;
2684 if (!iwl_mvm_has_new_rx_api(mvm))
2687 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2690 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2691 if (WARN_ON(!baid_data))
2694 /* synchronize all rx queues so we can safely delete */
2695 iwl_mvm_free_reorder(mvm, baid_data);
2696 del_timer_sync(&baid_data->session_timer);
2697 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2698 kfree_rcu(baid_data, rcu_head);
2699 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2708 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2709 int tid, u8 queue, bool start)
2711 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2712 struct iwl_mvm_add_sta_cmd cmd = {};
2716 lockdep_assert_held(&mvm->mutex);
2719 mvm_sta->tfd_queue_msk |= BIT(queue);
2720 mvm_sta->tid_disable_agg &= ~BIT(tid);
2722 /* In DQA-mode the queue isn't removed on agg termination */
2723 mvm_sta->tid_disable_agg |= BIT(tid);
2726 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2727 cmd.sta_id = mvm_sta->sta_id;
2728 cmd.add_modify = STA_MODE_MODIFY;
2729 if (!iwl_mvm_has_new_tx_api(mvm))
2730 cmd.modify_mask = STA_MODIFY_QUEUES;
2731 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2732 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2733 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2735 status = ADD_STA_SUCCESS;
2736 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2737 iwl_mvm_add_sta_cmd_size(mvm),
2742 switch (status & IWL_ADD_STA_STATUS_MASK) {
2743 case ADD_STA_SUCCESS:
2747 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2748 start ? "start" : "stopp", status);
2755 const u8 tid_to_mac80211_ac[] = {
2764 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2767 static const u8 tid_to_ucode_ac[] = {
2778 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2779 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2781 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2782 struct iwl_mvm_tid_data *tid_data;
2787 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2790 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2791 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2793 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2794 mvmsta->tid_data[tid].state);
2798 lockdep_assert_held(&mvm->mutex);
2800 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2801 iwl_mvm_has_new_tx_api(mvm)) {
2802 u8 ac = tid_to_mac80211_ac[tid];
2804 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2809 spin_lock_bh(&mvmsta->lock);
2811 /* possible race condition - we entered D0i3 while starting agg */
2812 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2813 spin_unlock_bh(&mvmsta->lock);
2814 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2819 * Note the possible cases:
2820 * 1. An enabled TXQ - TXQ needs to become agg'ed
2821 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2824 txq_id = mvmsta->tid_data[tid].txq_id;
2825 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2826 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2827 IWL_MVM_DQA_MIN_DATA_QUEUE,
2828 IWL_MVM_DQA_MAX_DATA_QUEUE);
2831 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2835 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2836 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2837 } else if (unlikely(mvm->queue_info[txq_id].status ==
2838 IWL_MVM_QUEUE_SHARED)) {
2840 IWL_DEBUG_TX_QUEUES(mvm,
2841 "Can't start tid %d agg on shared queue!\n",
2846 IWL_DEBUG_TX_QUEUES(mvm,
2847 "AGG for tid %d will be on queue #%d\n",
2850 tid_data = &mvmsta->tid_data[tid];
2851 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2852 tid_data->txq_id = txq_id;
2853 *ssn = tid_data->ssn;
2855 IWL_DEBUG_TX_QUEUES(mvm,
2856 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2857 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2858 tid_data->next_reclaimed);
2861 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2862 * to align the wrap around of ssn so we compare relevant values.
2864 normalized_ssn = tid_data->ssn;
2865 if (mvm->trans->cfg->gen2)
2866 normalized_ssn &= 0xff;
2868 if (normalized_ssn == tid_data->next_reclaimed) {
2869 tid_data->state = IWL_AGG_STARTING;
2870 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2872 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2878 spin_unlock_bh(&mvmsta->lock);
2883 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2884 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2887 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2888 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2889 unsigned int wdg_timeout =
2890 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2892 bool alloc_queue = true;
2893 enum iwl_mvm_queue_status queue_status;
2896 struct iwl_trans_txq_scd_cfg cfg = {
2897 .sta_id = mvmsta->sta_id,
2899 .frame_limit = buf_size,
2904 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2905 * manager, so this function should never be called in this case.
2907 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2910 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2911 != IWL_MAX_TID_COUNT);
2913 spin_lock_bh(&mvmsta->lock);
2914 ssn = tid_data->ssn;
2915 queue = tid_data->txq_id;
2916 tid_data->state = IWL_AGG_ON;
2917 mvmsta->agg_tids |= BIT(tid);
2918 tid_data->ssn = 0xffff;
2919 tid_data->amsdu_in_ampdu_allowed = amsdu;
2920 spin_unlock_bh(&mvmsta->lock);
2922 if (iwl_mvm_has_new_tx_api(mvm)) {
2924 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2925 * would have failed, so if we are here there is no need to
2927 * However, if aggregation size is different than the default
2928 * size, the scheduler should be reconfigured.
2929 * We cannot do this with the new TX API, so return unsupported
2930 * for now, until it will be offloaded to firmware..
2931 * Note that if SCD default value changes - this condition
2932 * should be updated as well.
2934 if (buf_size < IWL_FRAME_LIMIT)
2937 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2943 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2945 queue_status = mvm->queue_info[queue].status;
2947 /* Maybe there is no need to even alloc a queue... */
2948 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2949 alloc_queue = false;
2952 * Only reconfig the SCD for the queue if the window size has
2953 * changed from current (become smaller)
2955 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2957 * If reconfiguring an existing queue, it first must be
2960 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2964 "Error draining queue before reconfig\n");
2968 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2969 mvmsta->sta_id, tid,
2973 "Error reconfiguring TXQ #%d\n", queue);
2979 iwl_mvm_enable_txq(mvm, queue,
2980 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2983 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2984 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2985 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2990 /* No need to mark as reserved */
2991 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2995 * Even though in theory the peer could have different
2996 * aggregation reorder buffer sizes for different sessions,
2997 * our ucode doesn't allow for that and has a global limit
2998 * for each station. Therefore, use the minimum of all the
2999 * aggregation sessions and our default value.
3001 mvmsta->max_agg_bufsize =
3002 min(mvmsta->max_agg_bufsize, buf_size);
3003 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3005 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3008 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
3011 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3012 struct iwl_mvm_sta *mvmsta,
3013 struct iwl_mvm_tid_data *tid_data)
3015 u16 txq_id = tid_data->txq_id;
3017 lockdep_assert_held(&mvm->mutex);
3019 if (iwl_mvm_has_new_tx_api(mvm))
3023 * The TXQ is marked as reserved only if no traffic came through yet
3024 * This means no traffic has been sent on this TID (agg'd or not), so
3025 * we no longer have use for the queue. Since it hasn't even been
3026 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3029 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3030 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3031 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3035 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3036 struct ieee80211_sta *sta, u16 tid)
3038 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3039 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3044 * If mac80211 is cleaning its state, then say that we finished since
3045 * our state has been cleared anyway.
3047 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3048 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3052 spin_lock_bh(&mvmsta->lock);
3054 txq_id = tid_data->txq_id;
3056 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3057 mvmsta->sta_id, tid, txq_id, tid_data->state);
3059 mvmsta->agg_tids &= ~BIT(tid);
3061 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3063 switch (tid_data->state) {
3065 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3067 IWL_DEBUG_TX_QUEUES(mvm,
3068 "ssn = %d, next_recl = %d\n",
3069 tid_data->ssn, tid_data->next_reclaimed);
3071 tid_data->ssn = 0xffff;
3072 tid_data->state = IWL_AGG_OFF;
3073 spin_unlock_bh(&mvmsta->lock);
3075 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3077 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3079 case IWL_AGG_STARTING:
3080 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3082 * The agg session has been stopped before it was set up. This
3083 * can happen when the AddBA timer times out for example.
3086 /* No barriers since we are under mutex */
3087 lockdep_assert_held(&mvm->mutex);
3089 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3090 tid_data->state = IWL_AGG_OFF;
3095 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3096 mvmsta->sta_id, tid, tid_data->state);
3098 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3102 spin_unlock_bh(&mvmsta->lock);
3107 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3108 struct ieee80211_sta *sta, u16 tid)
3110 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3111 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3113 enum iwl_mvm_agg_state old_state;
3116 * First set the agg state to OFF to avoid calling
3117 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3119 spin_lock_bh(&mvmsta->lock);
3120 txq_id = tid_data->txq_id;
3121 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3122 mvmsta->sta_id, tid, txq_id, tid_data->state);
3123 old_state = tid_data->state;
3124 tid_data->state = IWL_AGG_OFF;
3125 mvmsta->agg_tids &= ~BIT(tid);
3126 spin_unlock_bh(&mvmsta->lock);
3128 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3130 if (old_state >= IWL_AGG_ON) {
3131 iwl_mvm_drain_sta(mvm, mvmsta, true);
3133 if (iwl_mvm_has_new_tx_api(mvm)) {
3134 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3136 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3137 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3139 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3140 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3141 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3144 iwl_mvm_drain_sta(mvm, mvmsta, false);
3146 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3152 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3154 int i, max = -1, max_offs = -1;
3156 lockdep_assert_held(&mvm->mutex);
3158 /* Pick the unused key offset with the highest 'deleted'
3159 * counter. Every time a key is deleted, all the counters
3160 * are incremented and the one that was just deleted is
3161 * reset to zero. Thus, the highest counter is the one
3162 * that was deleted longest ago. Pick that one.
3164 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3165 if (test_bit(i, mvm->fw_key_table))
3167 if (mvm->fw_key_deleted[i] > max) {
3168 max = mvm->fw_key_deleted[i];
3174 return STA_KEY_IDX_INVALID;
3179 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3180 struct ieee80211_vif *vif,
3181 struct ieee80211_sta *sta)
3183 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3186 return iwl_mvm_sta_from_mac80211(sta);
3189 * The device expects GTKs for station interfaces to be
3190 * installed as GTKs for the AP station. If we have no
3191 * station ID, then use AP's station ID.
3193 if (vif->type == NL80211_IFTYPE_STATION &&
3194 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3195 u8 sta_id = mvmvif->ap_sta_id;
3197 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3198 lockdep_is_held(&mvm->mutex));
3201 * It is possible that the 'sta' parameter is NULL,
3202 * for example when a GTK is removed - the sta_id will then
3203 * be the AP ID, and no station was passed by mac80211.
3205 if (IS_ERR_OR_NULL(sta))
3208 return iwl_mvm_sta_from_mac80211(sta);
3214 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3216 struct ieee80211_key_conf *key, bool mcast,
3217 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3218 u8 key_offset, bool mfp)
3221 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3222 struct iwl_mvm_add_sta_key_cmd cmd;
3230 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3231 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3233 if (sta_id == IWL_MVM_INVALID_STA)
3236 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3237 STA_KEY_FLG_KEYID_MSK;
3238 key_flags = cpu_to_le16(keyidx);
3239 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3241 switch (key->cipher) {
3242 case WLAN_CIPHER_SUITE_TKIP:
3243 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3245 memcpy((void *)&u.cmd.tx_mic_key,
3246 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3249 memcpy((void *)&u.cmd.rx_mic_key,
3250 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3252 pn = atomic64_read(&key->tx_pn);
3255 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3256 for (i = 0; i < 5; i++)
3257 u.cmd_v1.tkip_rx_ttak[i] =
3258 cpu_to_le16(tkip_p1k[i]);
3260 memcpy(u.cmd.common.key, key->key, key->keylen);
3262 case WLAN_CIPHER_SUITE_CCMP:
3263 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3264 memcpy(u.cmd.common.key, key->key, key->keylen);
3266 pn = atomic64_read(&key->tx_pn);
3268 case WLAN_CIPHER_SUITE_WEP104:
3269 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3271 case WLAN_CIPHER_SUITE_WEP40:
3272 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3273 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3275 case WLAN_CIPHER_SUITE_GCMP_256:
3276 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3278 case WLAN_CIPHER_SUITE_GCMP:
3279 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3280 memcpy(u.cmd.common.key, key->key, key->keylen);
3282 pn = atomic64_read(&key->tx_pn);
3285 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3286 memcpy(u.cmd.common.key, key->key, key->keylen);
3290 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3292 key_flags |= cpu_to_le16(STA_KEY_MFP);
3294 u.cmd.common.key_offset = key_offset;
3295 u.cmd.common.key_flags = key_flags;
3296 u.cmd.common.sta_id = sta_id;
3299 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3300 size = sizeof(u.cmd);
3302 size = sizeof(u.cmd_v1);
3305 status = ADD_STA_SUCCESS;
3306 if (cmd_flags & CMD_ASYNC)
3307 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3310 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3314 case ADD_STA_SUCCESS:
3315 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3319 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3326 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3327 struct ieee80211_key_conf *keyconf,
3328 u8 sta_id, bool remove_key)
3330 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3332 /* verify the key details match the required command's expectations */
3333 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3334 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3335 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3336 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3337 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3340 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3341 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3344 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3345 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3348 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3350 struct ieee80211_key_seq seq;
3353 switch (keyconf->cipher) {
3354 case WLAN_CIPHER_SUITE_AES_CMAC:
3355 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3357 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3358 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3359 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3365 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3366 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3367 igtk_cmd.ctrl_flags |=
3368 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3369 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3370 pn = seq.aes_cmac.pn;
3371 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3372 ((u64) pn[4] << 8) |
3373 ((u64) pn[3] << 16) |
3374 ((u64) pn[2] << 24) |
3375 ((u64) pn[1] << 32) |
3376 ((u64) pn[0] << 40));
3379 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3380 remove_key ? "removing" : "installing",
3383 if (!iwl_mvm_has_new_rx_api(mvm)) {
3384 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3385 .ctrl_flags = igtk_cmd.ctrl_flags,
3386 .key_id = igtk_cmd.key_id,
3387 .sta_id = igtk_cmd.sta_id,
3388 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3391 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3392 ARRAY_SIZE(igtk_cmd_v1.igtk));
3393 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3394 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3396 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3397 sizeof(igtk_cmd), &igtk_cmd);
3401 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3402 struct ieee80211_vif *vif,
3403 struct ieee80211_sta *sta)
3405 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3410 if (vif->type == NL80211_IFTYPE_STATION &&
3411 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3412 u8 sta_id = mvmvif->ap_sta_id;
3413 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3414 lockdep_is_held(&mvm->mutex));
3422 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3423 struct ieee80211_vif *vif,
3424 struct ieee80211_sta *sta,
3425 struct ieee80211_key_conf *keyconf,
3431 struct ieee80211_key_seq seq;
3437 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3439 sta_id = mvm_sta->sta_id;
3441 } else if (vif->type == NL80211_IFTYPE_AP &&
3442 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3443 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3445 sta_id = mvmvif->mcast_sta.sta_id;
3447 IWL_ERR(mvm, "Failed to find station id\n");
3451 switch (keyconf->cipher) {
3452 case WLAN_CIPHER_SUITE_TKIP:
3453 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3454 /* get phase 1 key from mac80211 */
3455 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3456 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3457 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3458 seq.tkip.iv32, p1k, 0, key_offset,
3461 case WLAN_CIPHER_SUITE_CCMP:
3462 case WLAN_CIPHER_SUITE_WEP40:
3463 case WLAN_CIPHER_SUITE_WEP104:
3464 case WLAN_CIPHER_SUITE_GCMP:
3465 case WLAN_CIPHER_SUITE_GCMP_256:
3466 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3467 0, NULL, 0, key_offset, mfp);
3470 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3471 0, NULL, 0, key_offset, mfp);
3477 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3478 struct ieee80211_key_conf *keyconf,
3482 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3483 struct iwl_mvm_add_sta_key_cmd cmd;
3485 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3486 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3491 /* This is a valid situation for GTK removal */
3492 if (sta_id == IWL_MVM_INVALID_STA)
3495 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3496 STA_KEY_FLG_KEYID_MSK);
3497 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3498 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3501 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3504 * The fields assigned here are in the same location at the start
3505 * of the command, so we can do this union trick.
3507 u.cmd.common.key_flags = key_flags;
3508 u.cmd.common.key_offset = keyconf->hw_key_idx;
3509 u.cmd.common.sta_id = sta_id;
3511 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3513 status = ADD_STA_SUCCESS;
3514 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3518 case ADD_STA_SUCCESS:
3519 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3523 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3530 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3531 struct ieee80211_vif *vif,
3532 struct ieee80211_sta *sta,
3533 struct ieee80211_key_conf *keyconf,
3536 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3537 struct iwl_mvm_sta *mvm_sta;
3538 u8 sta_id = IWL_MVM_INVALID_STA;
3540 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3542 lockdep_assert_held(&mvm->mutex);
3544 if (vif->type != NL80211_IFTYPE_AP ||
3545 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3546 /* Get the station id from the mvm local station table */
3547 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3549 IWL_ERR(mvm, "Failed to find station\n");
3552 sta_id = mvm_sta->sta_id;
3555 * It is possible that the 'sta' parameter is NULL, and thus
3556 * there is a need to retrieve the sta from the local station
3560 sta = rcu_dereference_protected(
3561 mvm->fw_id_to_mac_id[sta_id],
3562 lockdep_is_held(&mvm->mutex));
3563 if (IS_ERR_OR_NULL(sta)) {
3564 IWL_ERR(mvm, "Invalid station id\n");
3569 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3572 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3574 sta_id = mvmvif->mcast_sta.sta_id;
3577 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3578 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3579 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3580 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3584 /* If the key_offset is not pre-assigned, we need to find a
3585 * new offset to use. In normal cases, the offset is not
3586 * pre-assigned, but during HW_RESTART we want to reuse the
3587 * same indices, so we pass them when this function is called.
3589 * In D3 entry, we need to hardcoded the indices (because the
3590 * firmware hardcodes the PTK offset to 0). In this case, we
3591 * need to make sure we don't overwrite the hw_key_idx in the
3592 * keyconf structure, because otherwise we cannot configure
3593 * the original ones back when resuming.
3595 if (key_offset == STA_KEY_IDX_INVALID) {
3596 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3597 if (key_offset == STA_KEY_IDX_INVALID)
3599 keyconf->hw_key_idx = key_offset;
3602 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3607 * For WEP, the same key is used for multicast and unicast. Upload it
3608 * again, using the same key offset, and now pointing the other one
3609 * to the same key slot (offset).
3610 * If this fails, remove the original as well.
3612 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3613 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3615 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3616 key_offset, !mcast);
3618 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3623 __set_bit(key_offset, mvm->fw_key_table);
3626 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3627 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3628 sta ? sta->addr : zero_addr, ret);
3632 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3633 struct ieee80211_vif *vif,
3634 struct ieee80211_sta *sta,
3635 struct ieee80211_key_conf *keyconf)
3637 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3638 struct iwl_mvm_sta *mvm_sta;
3639 u8 sta_id = IWL_MVM_INVALID_STA;
3642 lockdep_assert_held(&mvm->mutex);
3644 /* Get the station from the mvm local station table */
3645 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3647 sta_id = mvm_sta->sta_id;
3648 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3649 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3652 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3653 keyconf->keyidx, sta_id);
3655 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3656 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3657 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3658 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3660 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3661 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3662 keyconf->hw_key_idx);
3666 /* track which key was deleted last */
3667 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3668 if (mvm->fw_key_deleted[i] < U8_MAX)
3669 mvm->fw_key_deleted[i]++;
3671 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3673 if (sta && !mvm_sta) {
3674 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3678 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3682 /* delete WEP key twice to get rid of (now useless) offset */
3683 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3684 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3685 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3690 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3691 struct ieee80211_vif *vif,
3692 struct ieee80211_key_conf *keyconf,
3693 struct ieee80211_sta *sta, u32 iv32,
3696 struct iwl_mvm_sta *mvm_sta;
3697 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3698 bool mfp = sta ? sta->mfp : false;
3702 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3703 if (WARN_ON_ONCE(!mvm_sta))
3705 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3706 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3713 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3714 struct ieee80211_sta *sta)
3716 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3717 struct iwl_mvm_add_sta_cmd cmd = {
3718 .add_modify = STA_MODE_MODIFY,
3719 .sta_id = mvmsta->sta_id,
3720 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3721 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3725 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3726 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3728 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3731 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3732 struct ieee80211_sta *sta,
3733 enum ieee80211_frame_release_type reason,
3734 u16 cnt, u16 tids, bool more_data,
3735 bool single_sta_queue)
3737 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3738 struct iwl_mvm_add_sta_cmd cmd = {
3739 .add_modify = STA_MODE_MODIFY,
3740 .sta_id = mvmsta->sta_id,
3741 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3742 .sleep_tx_count = cpu_to_le16(cnt),
3743 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3746 unsigned long _tids = tids;
3748 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3749 * Note that this field is reserved and unused by firmware not
3750 * supporting GO uAPSD, so it's safe to always do this.
3752 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3753 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3755 /* If we're releasing frames from aggregation or dqa queues then check
3756 * if all the queues that we're releasing frames from, combined, have:
3757 * - more frames than the service period, in which case more_data
3759 * - fewer than 'cnt' frames, in which case we need to adjust the
3760 * firmware command (but do that unconditionally)
3762 if (single_sta_queue) {
3763 int remaining = cnt;
3766 spin_lock_bh(&mvmsta->lock);
3767 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3768 struct iwl_mvm_tid_data *tid_data;
3771 tid_data = &mvmsta->tid_data[tid];
3773 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3774 if (n_queued > remaining) {
3779 remaining -= n_queued;
3781 sleep_tx_count = cnt - remaining;
3782 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3783 mvmsta->sleep_tx_count = sleep_tx_count;
3784 spin_unlock_bh(&mvmsta->lock);
3786 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3787 if (WARN_ON(cnt - remaining == 0)) {
3788 ieee80211_sta_eosp(sta);
3793 /* Note: this is ignored by firmware not supporting GO uAPSD */
3795 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3797 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3798 mvmsta->next_status_eosp = true;
3799 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3801 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3804 /* block the Tx queues until the FW updated the sleep Tx count */
3805 iwl_trans_block_txq_ptrs(mvm->trans, true);
3807 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3808 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3809 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3811 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3814 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3815 struct iwl_rx_cmd_buffer *rxb)
3817 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3818 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3819 struct ieee80211_sta *sta;
3820 u32 sta_id = le32_to_cpu(notif->sta_id);
3822 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3826 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3827 if (!IS_ERR_OR_NULL(sta))
3828 ieee80211_sta_eosp(sta);
3832 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3833 struct iwl_mvm_sta *mvmsta, bool disable)
3835 struct iwl_mvm_add_sta_cmd cmd = {
3836 .add_modify = STA_MODE_MODIFY,
3837 .sta_id = mvmsta->sta_id,
3838 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3839 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3840 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3844 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3845 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3847 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3850 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3851 struct ieee80211_sta *sta,
3854 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3856 spin_lock_bh(&mvm_sta->lock);
3858 if (mvm_sta->disable_tx == disable) {
3859 spin_unlock_bh(&mvm_sta->lock);
3863 mvm_sta->disable_tx = disable;
3865 /* Tell mac80211 to start/stop queuing tx for this station */
3866 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3868 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3870 spin_unlock_bh(&mvm_sta->lock);
3873 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3874 struct iwl_mvm_vif *mvmvif,
3875 struct iwl_mvm_int_sta *sta,
3878 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3879 struct iwl_mvm_add_sta_cmd cmd = {
3880 .add_modify = STA_MODE_MODIFY,
3881 .sta_id = sta->sta_id,
3882 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3883 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3884 .mac_id_n_color = cpu_to_le32(id),
3888 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3889 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3891 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3894 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3895 struct iwl_mvm_vif *mvmvif,
3898 struct ieee80211_sta *sta;
3899 struct iwl_mvm_sta *mvm_sta;
3902 lockdep_assert_held(&mvm->mutex);
3904 /* Block/unblock all the stations of the given mvmvif */
3905 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3906 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3907 lockdep_is_held(&mvm->mutex));
3908 if (IS_ERR_OR_NULL(sta))
3911 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3912 if (mvm_sta->mac_id_n_color !=
3913 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3916 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3919 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3922 /* Need to block/unblock also multicast station */
3923 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3924 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3925 &mvmvif->mcast_sta, disable);
3928 * Only unblock the broadcast station (FW blocks it for immediate
3929 * quiet, not the driver)
3931 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3932 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3933 &mvmvif->bcast_sta, disable);
3936 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3938 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3939 struct iwl_mvm_sta *mvmsta;
3943 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3945 if (!WARN_ON(!mvmsta))
3946 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3951 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3953 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3956 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3957 * to align the wrap around of ssn so we compare relevant values.
3959 if (mvm->trans->cfg->gen2)
3962 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);