1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <net/mac80211.h>
70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
85 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
101 lockdep_assert_held(&mvm->mutex);
103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
109 if (BIT(sta_id) & reserved_ids)
112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
116 return IWL_MVM_INVALID_STA;
119 /* send station add/update command to firmware */
120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121 bool update, unsigned int flags)
123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
135 u32 agg_size = 0, mpdu_dens = 0;
137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
140 if (!update || (flags & STA_MODIFY_QUEUES)) {
141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
150 WARN_ON(flags & STA_MODIFY_QUEUES);
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
171 switch (sta->rx_nss) {
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
188 case IEEE80211_SMPS_STATIC:
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
196 case IEEE80211_SMPS_OFF:
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
206 mpdu_dens = sta->ht_cap.ampdu_density;
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
240 status = ADD_STA_SUCCESS;
241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
243 &add_sta_cmd, &status);
247 switch (status & IWL_ADD_STA_STATUS_MASK) {
248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
253 IWL_ERR(mvm, "ADD_STA failed\n");
260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
272 ba_data = rcu_dereference(*rcu_ptr);
274 if (WARN_ON(!ba_data))
277 if (!ba_data->timeout)
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
307 /* Disable aggregations for a bitmap of TIDs for a given station */
308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
318 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
321 sta_id = mvm->queue_info[queue].ra_sta_id;
325 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
327 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
332 mvmsta = iwl_mvm_sta_from_mac80211(sta);
334 mvmsta->tid_disable_agg |= disable_agg_tids;
336 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
337 cmd.sta_id = mvmsta->sta_id;
338 cmd.add_modify = STA_MODE_MODIFY;
339 cmd.modify_mask = STA_MODIFY_QUEUES;
340 if (disable_agg_tids)
341 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
343 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
344 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
345 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349 /* Notify FW of queue removal from the STA queues */
350 status = ADD_STA_SUCCESS;
351 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
352 iwl_mvm_add_sta_cmd_size(mvm),
356 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
357 int queue, u8 tid, u8 flags)
359 struct iwl_scd_txq_cfg_cmd cmd = {
361 .action = SCD_CFG_DISABLE_QUEUE,
365 if (iwl_mvm_has_new_tx_api(mvm)) {
366 iwl_trans_txq_free(mvm->trans, queue);
371 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
374 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
376 cmd.action = mvm->queue_info[queue].tid_bitmap ?
377 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
378 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
379 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
381 IWL_DEBUG_TX_QUEUES(mvm,
382 "Disabling TXQ #%d tids=0x%x\n",
384 mvm->queue_info[queue].tid_bitmap);
386 /* If the queue is still enabled - nothing left to do in this func */
387 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
390 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
391 cmd.tid = mvm->queue_info[queue].txq_tid;
393 /* Make sure queue info is correct even though we overwrite it */
394 WARN(mvm->queue_info[queue].tid_bitmap,
395 "TXQ #%d info out-of-sync - tids=0x%x\n",
396 queue, mvm->queue_info[queue].tid_bitmap);
398 /* If we are here - the queue is freed and we can zero out these vals */
399 mvm->queue_info[queue].tid_bitmap = 0;
402 struct iwl_mvm_txq *mvmtxq =
403 iwl_mvm_txq_from_tid(sta, tid);
405 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
408 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
409 mvm->queue_info[queue].reserved = false;
411 iwl_trans_txq_disable(mvm->trans, queue, false);
412 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
413 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
416 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
421 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
423 struct ieee80211_sta *sta;
424 struct iwl_mvm_sta *mvmsta;
425 unsigned long tid_bitmap;
426 unsigned long agg_tids = 0;
430 lockdep_assert_held(&mvm->mutex);
432 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
435 sta_id = mvm->queue_info[queue].ra_sta_id;
436 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
438 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
439 lockdep_is_held(&mvm->mutex));
441 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
444 mvmsta = iwl_mvm_sta_from_mac80211(sta);
446 spin_lock_bh(&mvmsta->lock);
447 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
448 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
449 agg_tids |= BIT(tid);
451 spin_unlock_bh(&mvmsta->lock);
457 * Remove a queue from a station's resources.
458 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
459 * doesn't disable the queue
461 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
463 struct ieee80211_sta *sta;
464 struct iwl_mvm_sta *mvmsta;
465 unsigned long tid_bitmap;
466 unsigned long disable_agg_tids = 0;
470 lockdep_assert_held(&mvm->mutex);
472 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
475 sta_id = mvm->queue_info[queue].ra_sta_id;
476 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
482 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
487 mvmsta = iwl_mvm_sta_from_mac80211(sta);
489 spin_lock_bh(&mvmsta->lock);
490 /* Unmap MAC queues and TIDs from this queue */
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 struct iwl_mvm_txq *mvmtxq =
493 iwl_mvm_txq_from_tid(sta, tid);
495 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
496 disable_agg_tids |= BIT(tid);
497 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
499 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
502 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
503 spin_unlock_bh(&mvmsta->lock);
508 * The TX path may have been using this TXQ_ID from the tid_data,
509 * so make sure it's no longer running so that we can safely reuse
510 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
511 * above, but nothing guarantees we've stopped using them. Thus,
512 * without this, we could get to iwl_mvm_disable_txq() and remove
513 * the queue while still sending frames to it.
517 return disable_agg_tids;
520 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
521 struct ieee80211_sta *old_sta,
524 struct iwl_mvm_sta *mvmsta;
526 unsigned long disable_agg_tids = 0;
530 lockdep_assert_held(&mvm->mutex);
532 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
535 sta_id = mvm->queue_info[queue].ra_sta_id;
536 tid = mvm->queue_info[queue].txq_tid;
538 same_sta = sta_id == new_sta_id;
540 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
541 if (WARN_ON(!mvmsta))
544 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
545 /* Disable the queue */
546 if (disable_agg_tids)
547 iwl_mvm_invalidate_sta_queue(mvm, queue,
548 disable_agg_tids, false);
550 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
553 "Failed to free inactive queue %d (ret=%d)\n",
559 /* If TXQ is allocated to another STA, update removal in FW */
561 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
566 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
567 unsigned long tfd_queue_mask, u8 ac)
570 u8 ac_to_queue[IEEE80211_NUM_ACS];
574 * This protects us against grabbing a queue that's being reconfigured
575 * by the inactivity checker.
577 lockdep_assert_held(&mvm->mutex);
579 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
582 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
584 /* See what ACs the existing queues for this STA have */
585 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
586 /* Only DATA queues can be shared */
587 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
588 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
591 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
595 * The queue to share is chosen only from DATA queues as follows (in
596 * descending priority):
599 * 3. Highest AC queue that is lower than new AC
600 * 4. Any existing AC (there always is at least 1 DATA queue)
603 /* Priority 1: An AC_BE queue */
604 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
605 queue = ac_to_queue[IEEE80211_AC_BE];
606 /* Priority 2: Same AC queue */
607 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
608 queue = ac_to_queue[ac];
609 /* Priority 3a: If new AC is VO and VI exists - use VI */
610 else if (ac == IEEE80211_AC_VO &&
611 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
612 queue = ac_to_queue[IEEE80211_AC_VI];
613 /* Priority 3b: No BE so only AC less than the new one is BK */
614 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
615 queue = ac_to_queue[IEEE80211_AC_BK];
616 /* Priority 4a: No BE nor BK - use VI if exists */
617 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
618 queue = ac_to_queue[IEEE80211_AC_VI];
619 /* Priority 4b: No BE, BK nor VI - use VO if exists */
620 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
621 queue = ac_to_queue[IEEE80211_AC_VO];
623 /* Make sure queue found (or not) is legal */
624 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
625 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
626 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
627 IWL_ERR(mvm, "No DATA queues available to share\n");
635 * If a given queue has a higher AC than the TID stream that is being compared
636 * to, the queue needs to be redirected to the lower AC. This function does that
637 * in such a case, otherwise - if no redirection required - it does nothing,
638 * unless the %force param is true.
640 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
641 int ac, int ssn, unsigned int wdg_timeout,
642 bool force, struct iwl_mvm_txq *txq)
644 struct iwl_scd_txq_cfg_cmd cmd = {
646 .action = SCD_CFG_DISABLE_QUEUE,
651 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
655 * If the AC is lower than current one - FIFO needs to be redirected to
656 * the lowest one of the streams in the queue. Check if this is needed
658 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
659 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
660 * we need to check if the numerical value of X is LARGER than of Y.
662 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
663 IWL_DEBUG_TX_QUEUES(mvm,
664 "No redirection needed on TXQ #%d\n",
669 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
670 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
671 cmd.tid = mvm->queue_info[queue].txq_tid;
672 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
674 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
675 queue, iwl_mvm_ac_to_tx_fifo[ac]);
677 /* Stop the queue and wait for it to empty */
680 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
682 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
688 /* Before redirecting the queue we need to de-activate it */
689 iwl_trans_txq_disable(mvm->trans, queue, false);
690 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
692 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
695 /* Make sure the SCD wrptr is correctly set before reconfiguring */
696 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
698 /* Update the TID "owner" of the queue */
699 mvm->queue_info[queue].txq_tid = tid;
701 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
703 /* Redirect to lower AC */
704 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
705 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
707 /* Update AC marking of the queue */
708 mvm->queue_info[queue].mac80211_ac = ac;
711 * Mark queue as shared in transport if shared
712 * Note this has to be done after queue enablement because enablement
713 * can also set this value, and there is no indication there to shared
717 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
720 /* Continue using the queue */
721 txq->stopped = false;
726 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
731 lockdep_assert_held(&mvm->mutex);
733 /* This should not be hit with new TX path */
734 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
737 /* Start by looking for a free queue */
738 for (i = minq; i <= maxq; i++)
739 if (mvm->queue_info[i].tid_bitmap == 0 &&
740 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
746 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
747 u8 sta_id, u8 tid, unsigned int timeout)
749 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
751 if (tid == IWL_MAX_TID_COUNT) {
753 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
754 mvm->trans->cfg->min_txq_size);
756 queue = iwl_trans_txq_alloc(mvm->trans,
757 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
758 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
761 IWL_DEBUG_TX_QUEUES(mvm,
762 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
767 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
770 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
775 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
776 struct ieee80211_sta *sta, u8 ac,
779 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
780 struct iwl_mvm_txq *mvmtxq =
781 iwl_mvm_txq_from_tid(sta, tid);
782 unsigned int wdg_timeout =
783 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
786 lockdep_assert_held(&mvm->mutex);
788 IWL_DEBUG_TX_QUEUES(mvm,
789 "Allocating queue for sta %d on tid %d\n",
790 mvmsta->sta_id, tid);
791 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
795 mvmtxq->txq_id = queue;
796 mvm->tvqm_info[queue].txq_tid = tid;
797 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
799 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
801 spin_lock_bh(&mvmsta->lock);
802 mvmsta->tid_data[tid].txq_id = queue;
803 spin_unlock_bh(&mvmsta->lock);
808 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
809 struct ieee80211_sta *sta,
810 int queue, u8 sta_id, u8 tid)
812 bool enable_queue = true;
814 /* Make sure this TID isn't already enabled */
815 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
816 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
821 /* Update mappings and refcounts */
822 if (mvm->queue_info[queue].tid_bitmap)
823 enable_queue = false;
825 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
826 mvm->queue_info[queue].ra_sta_id = sta_id;
829 if (tid != IWL_MAX_TID_COUNT)
830 mvm->queue_info[queue].mac80211_ac =
831 tid_to_mac80211_ac[tid];
833 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
835 mvm->queue_info[queue].txq_tid = tid;
839 struct iwl_mvm_txq *mvmtxq =
840 iwl_mvm_txq_from_tid(sta, tid);
842 mvmtxq->txq_id = queue;
845 IWL_DEBUG_TX_QUEUES(mvm,
846 "Enabling TXQ #%d tids=0x%x\n",
847 queue, mvm->queue_info[queue].tid_bitmap);
852 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
854 const struct iwl_trans_txq_scd_cfg *cfg,
855 unsigned int wdg_timeout)
857 struct iwl_scd_txq_cfg_cmd cmd = {
859 .action = SCD_CFG_ENABLE_QUEUE,
860 .window = cfg->frame_limit,
861 .sta_id = cfg->sta_id,
862 .ssn = cpu_to_le16(ssn),
863 .tx_fifo = cfg->fifo,
864 .aggregate = cfg->aggregate,
869 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
872 /* Send the enabling command if we need to */
873 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
876 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
879 le16_add_cpu(&cmd.ssn, 1);
881 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
882 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
887 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
889 struct iwl_scd_txq_cfg_cmd cmd = {
891 .action = SCD_CFG_UPDATE_QUEUE_TID,
894 unsigned long tid_bitmap;
897 lockdep_assert_held(&mvm->mutex);
899 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
902 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
904 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
907 /* Find any TID for queue */
908 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
910 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
912 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
914 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
919 mvm->queue_info[queue].txq_tid = tid;
920 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
924 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
926 struct ieee80211_sta *sta;
927 struct iwl_mvm_sta *mvmsta;
930 unsigned long tid_bitmap;
931 unsigned int wdg_timeout;
935 /* queue sharing is disabled on new TX path */
936 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
939 lockdep_assert_held(&mvm->mutex);
941 sta_id = mvm->queue_info[queue].ra_sta_id;
942 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
944 /* Find TID for queue, and make sure it is the only one on the queue */
945 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
946 if (tid_bitmap != BIT(tid)) {
947 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
952 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
955 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
956 lockdep_is_held(&mvm->mutex));
958 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
961 mvmsta = iwl_mvm_sta_from_mac80211(sta);
962 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
964 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
966 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
967 tid_to_mac80211_ac[tid], ssn,
969 iwl_mvm_txq_from_tid(sta, tid));
971 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
975 /* If aggs should be turned back on - do it */
976 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
977 struct iwl_mvm_add_sta_cmd cmd = {0};
979 mvmsta->tid_disable_agg &= ~BIT(tid);
981 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
982 cmd.sta_id = mvmsta->sta_id;
983 cmd.add_modify = STA_MODE_MODIFY;
984 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
985 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
986 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
988 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
989 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
991 IWL_DEBUG_TX_QUEUES(mvm,
992 "TXQ #%d is now aggregated again\n",
995 /* Mark queue intenally as aggregating again */
996 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1000 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1004 * Remove inactive TIDs of a given queue.
1005 * If all queue TIDs are inactive - mark the queue as inactive
1006 * If only some the queue TIDs are inactive - unmap them from the queue
1008 * Returns %true if all TIDs were removed and the queue could be reused.
1010 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1011 struct iwl_mvm_sta *mvmsta, int queue,
1012 unsigned long tid_bitmap,
1013 unsigned long *unshare_queues,
1014 unsigned long *changetid_queues)
1018 lockdep_assert_held(&mvmsta->lock);
1019 lockdep_assert_held(&mvm->mutex);
1021 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1024 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1025 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1026 /* If some TFDs are still queued - don't mark TID as inactive */
1027 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1028 tid_bitmap &= ~BIT(tid);
1030 /* Don't mark as inactive any TID that has an active BA */
1031 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1032 tid_bitmap &= ~BIT(tid);
1035 /* If all TIDs in the queue are inactive - return it can be reused */
1036 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1037 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1042 * If we are here, this is a shared queue and not all TIDs timed-out.
1043 * Remove the ones that did.
1045 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1048 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1049 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1051 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1054 * We need to take into account a situation in which a TXQ was
1055 * allocated to TID x, and then turned shared by adding TIDs y
1056 * and z. If TID x becomes inactive and is removed from the TXQ,
1057 * ownership must be given to one of the remaining TIDs.
1058 * This is mainly because if TID x continues - a new queue can't
1059 * be allocated for it as long as it is an owner of another TXQ.
1061 * Mark this queue in the right bitmap, we'll send the command
1062 * to the firmware later.
1064 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1065 set_bit(queue, changetid_queues);
1067 IWL_DEBUG_TX_QUEUES(mvm,
1068 "Removing inactive TID %d from shared Q:%d\n",
1072 IWL_DEBUG_TX_QUEUES(mvm,
1073 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1074 mvm->queue_info[queue].tid_bitmap);
1077 * There may be different TIDs with the same mac queues, so make
1078 * sure all TIDs have existing corresponding mac queues enabled
1080 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1082 /* If the queue is marked as shared - "unshare" it */
1083 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1084 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1085 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1087 set_bit(queue, unshare_queues);
1094 * Check for inactivity - this includes checking if any queue
1095 * can be unshared and finding one (and only one) that can be
1097 * This function is also invoked as a sort of clean-up task,
1098 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1100 * Returns the queue number, or -ENOSPC.
1102 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1104 unsigned long now = jiffies;
1105 unsigned long unshare_queues = 0;
1106 unsigned long changetid_queues = 0;
1107 int i, ret, free_queue = -ENOSPC;
1108 struct ieee80211_sta *queue_owner = NULL;
1110 lockdep_assert_held(&mvm->mutex);
1112 if (iwl_mvm_has_new_tx_api(mvm))
1117 /* we skip the CMD queue below by starting at 1 */
1118 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1120 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1121 struct ieee80211_sta *sta;
1122 struct iwl_mvm_sta *mvmsta;
1125 unsigned long inactive_tid_bitmap = 0;
1126 unsigned long queue_tid_bitmap;
1128 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1129 if (!queue_tid_bitmap)
1132 /* If TXQ isn't in active use anyway - nothing to do here... */
1133 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1134 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1137 /* Check to see if there are inactive TIDs on this queue */
1138 for_each_set_bit(tid, &queue_tid_bitmap,
1139 IWL_MAX_TID_COUNT + 1) {
1140 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1141 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1144 inactive_tid_bitmap |= BIT(tid);
1147 /* If all TIDs are active - finish check on this queue */
1148 if (!inactive_tid_bitmap)
1152 * If we are here - the queue hadn't been served recently and is
1156 sta_id = mvm->queue_info[i].ra_sta_id;
1157 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1160 * If the STA doesn't exist anymore, it isn't an error. It could
1161 * be that it was removed since getting the queues, and in this
1162 * case it should've inactivated its queues anyway.
1164 if (IS_ERR_OR_NULL(sta))
1167 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1169 spin_lock_bh(&mvmsta->lock);
1170 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1171 inactive_tid_bitmap,
1174 if (ret >= 0 && free_queue < 0) {
1178 /* only unlock sta lock - we still need the queue info lock */
1179 spin_unlock_bh(&mvmsta->lock);
1183 /* Reconfigure queues requiring reconfiguation */
1184 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1185 iwl_mvm_unshare_queue(mvm, i);
1186 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1187 iwl_mvm_change_queue_tid(mvm, i);
1189 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1190 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1203 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1204 struct ieee80211_sta *sta, u8 ac, int tid)
1206 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1207 struct iwl_trans_txq_scd_cfg cfg = {
1208 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1209 .sta_id = mvmsta->sta_id,
1211 .frame_limit = IWL_FRAME_LIMIT,
1213 unsigned int wdg_timeout =
1214 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1216 unsigned long disable_agg_tids = 0;
1217 enum iwl_mvm_agg_state queue_state;
1218 bool shared_queue = false, inc_ssn;
1220 unsigned long tfd_queue_mask;
1223 lockdep_assert_held(&mvm->mutex);
1225 if (iwl_mvm_has_new_tx_api(mvm))
1226 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1228 spin_lock_bh(&mvmsta->lock);
1229 tfd_queue_mask = mvmsta->tfd_queue_msk;
1230 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1231 spin_unlock_bh(&mvmsta->lock);
1233 if (tid == IWL_MAX_TID_COUNT) {
1234 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1235 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1236 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1237 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1238 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1241 /* If no such queue is found, we'll use a DATA queue instead */
1244 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1245 (mvm->queue_info[mvmsta->reserved_queue].status ==
1246 IWL_MVM_QUEUE_RESERVED)) {
1247 queue = mvmsta->reserved_queue;
1248 mvm->queue_info[queue].reserved = true;
1249 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1253 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1254 IWL_MVM_DQA_MIN_DATA_QUEUE,
1255 IWL_MVM_DQA_MAX_DATA_QUEUE);
1257 /* try harder - perhaps kill an inactive queue */
1258 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1261 /* No free queue - we'll have to share */
1263 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1265 shared_queue = true;
1266 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1271 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1272 * to make sure no one else takes it.
1273 * This will allow avoiding re-acquiring the lock at the end of the
1274 * configuration. On error we'll mark it back as free.
1276 if (queue > 0 && !shared_queue)
1277 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1279 /* This shouldn't happen - out of queues */
1280 if (WARN_ON(queue <= 0)) {
1281 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1287 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1288 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1290 * Mark all DATA queues as allowing to be aggregated at some point
1292 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1293 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1295 IWL_DEBUG_TX_QUEUES(mvm,
1296 "Allocating %squeue #%d to sta %d on tid %d\n",
1297 shared_queue ? "shared " : "", queue,
1298 mvmsta->sta_id, tid);
1301 /* Disable any open aggs on this queue */
1302 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1304 if (disable_agg_tids) {
1305 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1307 iwl_mvm_invalidate_sta_queue(mvm, queue,
1308 disable_agg_tids, false);
1312 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1315 * Mark queue as shared in transport if shared
1316 * Note this has to be done after queue enablement because enablement
1317 * can also set this value, and there is no indication there to shared
1321 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1323 spin_lock_bh(&mvmsta->lock);
1325 * This looks racy, but it is not. We have only one packet for
1326 * this ra/tid in our Tx path since we stop the Qdisc when we
1327 * need to allocate a new TFD queue.
1330 mvmsta->tid_data[tid].seq_number += 0x10;
1331 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1333 mvmsta->tid_data[tid].txq_id = queue;
1334 mvmsta->tfd_queue_msk |= BIT(queue);
1335 queue_state = mvmsta->tid_data[tid].state;
1337 if (mvmsta->reserved_queue == queue)
1338 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1339 spin_unlock_bh(&mvmsta->lock);
1341 if (!shared_queue) {
1342 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1346 /* If we need to re-enable aggregations... */
1347 if (queue_state == IWL_AGG_ON) {
1348 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1353 /* Redirect queue, if needed */
1354 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1356 iwl_mvm_txq_from_tid(sta, tid));
1364 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
1369 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1371 if (tid == IWL_MAX_TID_COUNT)
1372 return IEEE80211_AC_VO; /* MGMT */
1374 return tid_to_mac80211_ac[tid];
1377 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1379 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1382 mutex_lock(&mvm->mutex);
1384 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1386 while (!list_empty(&mvm->add_stream_txqs)) {
1387 struct iwl_mvm_txq *mvmtxq;
1388 struct ieee80211_txq *txq;
1391 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1392 struct iwl_mvm_txq, list);
1394 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1397 if (tid == IEEE80211_NUM_TIDS)
1398 tid = IWL_MAX_TID_COUNT;
1400 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
1401 list_del_init(&mvmtxq->list);
1403 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1407 mutex_unlock(&mvm->mutex);
1410 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1411 struct ieee80211_sta *sta,
1412 enum nl80211_iftype vif_type)
1414 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1417 /* queue reserving is disabled on new TX path */
1418 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1421 /* run the general cleanup/unsharing of queues */
1422 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1424 /* Make sure we have free resources for this STA */
1425 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1426 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1427 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1428 IWL_MVM_QUEUE_FREE))
1429 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1431 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1432 IWL_MVM_DQA_MIN_DATA_QUEUE,
1433 IWL_MVM_DQA_MAX_DATA_QUEUE);
1435 /* try again - this time kick out a queue if needed */
1436 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1438 IWL_ERR(mvm, "No available queues for new station\n");
1442 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1444 mvmsta->reserved_queue = queue;
1446 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1447 queue, mvmsta->sta_id);
1453 * In DQA mode, after a HW restart the queues should be allocated as before, in
1454 * order to avoid race conditions when there are shared queues. This function
1455 * does the re-mapping and queue allocation.
1457 * Note that re-enabling aggregations isn't done in this function.
1459 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1460 struct ieee80211_sta *sta)
1462 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1464 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1466 struct iwl_trans_txq_scd_cfg cfg = {
1467 .sta_id = mvm_sta->sta_id,
1468 .frame_limit = IWL_FRAME_LIMIT,
1471 /* Make sure reserved queue is still marked as such (if allocated) */
1472 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1473 mvm->queue_info[mvm_sta->reserved_queue].status =
1474 IWL_MVM_QUEUE_RESERVED;
1476 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1477 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1478 int txq_id = tid_data->txq_id;
1481 if (txq_id == IWL_MVM_INVALID_QUEUE)
1484 ac = tid_to_mac80211_ac[i];
1486 if (iwl_mvm_has_new_tx_api(mvm)) {
1487 IWL_DEBUG_TX_QUEUES(mvm,
1488 "Re-mapping sta %d tid %d\n",
1489 mvm_sta->sta_id, i);
1490 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1492 tid_data->txq_id = txq_id;
1495 * Since we don't set the seq number after reset, and HW
1496 * sets it now, FW reset will cause the seq num to start
1497 * at 0 again, so driver will need to update it
1498 * internally as well, so it keeps in sync with real val
1500 tid_data->seq_number = 0;
1502 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1505 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1506 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1508 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1510 IWL_DEBUG_TX_QUEUES(mvm,
1511 "Re-mapping sta %d tid %d to queue %d\n",
1512 mvm_sta->sta_id, i, txq_id);
1514 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1515 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1520 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1521 struct iwl_mvm_int_sta *sta,
1523 u16 mac_id, u16 color)
1525 struct iwl_mvm_add_sta_cmd cmd;
1527 u32 status = ADD_STA_SUCCESS;
1529 lockdep_assert_held(&mvm->mutex);
1531 memset(&cmd, 0, sizeof(cmd));
1532 cmd.sta_id = sta->sta_id;
1533 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1535 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1536 cmd.station_type = sta->type;
1538 if (!iwl_mvm_has_new_tx_api(mvm))
1539 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1540 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1543 memcpy(cmd.addr, addr, ETH_ALEN);
1545 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1546 iwl_mvm_add_sta_cmd_size(mvm),
1551 switch (status & IWL_ADD_STA_STATUS_MASK) {
1552 case ADD_STA_SUCCESS:
1553 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1557 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1564 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1565 struct ieee80211_vif *vif,
1566 struct ieee80211_sta *sta)
1568 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1569 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1570 struct iwl_mvm_rxq_dup_data *dup_data;
1572 bool sta_update = false;
1573 unsigned int sta_flags = 0;
1575 lockdep_assert_held(&mvm->mutex);
1577 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1578 sta_id = iwl_mvm_find_free_sta_id(mvm,
1579 ieee80211_vif_type_p2p(vif));
1581 sta_id = mvm_sta->sta_id;
1583 if (sta_id == IWL_MVM_INVALID_STA)
1586 spin_lock_init(&mvm_sta->lock);
1588 /* if this is a HW restart re-alloc existing queues */
1589 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1590 struct iwl_mvm_int_sta tmp_sta = {
1592 .type = mvm_sta->sta_type,
1596 * First add an empty station since allocating
1597 * a queue requires a valid station
1599 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1600 mvmvif->id, mvmvif->color);
1604 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1606 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1610 mvm_sta->sta_id = sta_id;
1611 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1614 if (!mvm->trans->cfg->gen2)
1615 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1617 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1618 mvm_sta->tx_protection = 0;
1619 mvm_sta->tt_tx_protection = false;
1620 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1622 /* HW restart, don't assume the memory has been zeroed */
1623 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1624 mvm_sta->tfd_queue_msk = 0;
1626 /* for HW restart - reset everything but the sequence number */
1627 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1628 u16 seq = mvm_sta->tid_data[i].seq_number;
1629 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1630 mvm_sta->tid_data[i].seq_number = seq;
1633 * Mark all queues for this STA as unallocated and defer TX
1634 * frames until the queue is allocated
1636 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1639 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1640 struct iwl_mvm_txq *mvmtxq =
1641 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1643 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1644 INIT_LIST_HEAD(&mvmtxq->list);
1645 atomic_set(&mvmtxq->tx_request, 0);
1648 mvm_sta->agg_tids = 0;
1650 if (iwl_mvm_has_new_rx_api(mvm) &&
1651 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1654 dup_data = kcalloc(mvm->trans->num_rx_queues,
1655 sizeof(*dup_data), GFP_KERNEL);
1659 * Initialize all the last_seq values to 0xffff which can never
1660 * compare equal to the frame's seq_ctrl in the check in
1661 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1662 * number and fragmented packets don't reach that function.
1664 * This thus allows receiving a packet with seqno 0 and the
1665 * retry bit set as the very first packet on a new TID.
1667 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1668 memset(dup_data[q].last_seq, 0xff,
1669 sizeof(dup_data[q].last_seq));
1670 mvm_sta->dup_data = dup_data;
1673 if (!iwl_mvm_has_new_tx_api(mvm)) {
1674 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1675 ieee80211_vif_type_p2p(vif));
1681 * if rs is registered with mac80211, then "add station" will be handled
1682 * via the corresponding ops, otherwise need to notify rate scaling here
1684 if (iwl_mvm_has_tlc_offload(mvm))
1685 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1687 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1690 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1694 if (vif->type == NL80211_IFTYPE_STATION) {
1696 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1697 mvmvif->ap_sta_id = sta_id;
1699 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1703 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1711 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1714 struct iwl_mvm_add_sta_cmd cmd = {};
1718 lockdep_assert_held(&mvm->mutex);
1720 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1721 cmd.sta_id = mvmsta->sta_id;
1722 cmd.add_modify = STA_MODE_MODIFY;
1723 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1724 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1726 status = ADD_STA_SUCCESS;
1727 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1728 iwl_mvm_add_sta_cmd_size(mvm),
1733 switch (status & IWL_ADD_STA_STATUS_MASK) {
1734 case ADD_STA_SUCCESS:
1735 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1740 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1749 * Remove a station from the FW table. Before sending the command to remove
1750 * the station validate that the station is indeed known to the driver (sanity
1753 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1755 struct ieee80211_sta *sta;
1756 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1761 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1762 lockdep_is_held(&mvm->mutex));
1764 /* Note: internal stations are marked as error values */
1766 IWL_ERR(mvm, "Invalid station id\n");
1770 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1771 sizeof(rm_sta_cmd), &rm_sta_cmd);
1773 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1780 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1781 struct ieee80211_vif *vif,
1782 struct ieee80211_sta *sta)
1784 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1787 lockdep_assert_held(&mvm->mutex);
1789 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1790 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1793 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1795 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1798 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1799 struct iwl_mvm_txq *mvmtxq =
1800 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1802 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1806 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1807 struct iwl_mvm_sta *mvm_sta)
1811 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1815 spin_lock_bh(&mvm_sta->lock);
1816 txq_id = mvm_sta->tid_data[i].txq_id;
1817 spin_unlock_bh(&mvm_sta->lock);
1819 if (txq_id == IWL_MVM_INVALID_QUEUE)
1822 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1830 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1831 struct ieee80211_vif *vif,
1832 struct ieee80211_sta *sta)
1834 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1835 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1836 u8 sta_id = mvm_sta->sta_id;
1839 lockdep_assert_held(&mvm->mutex);
1841 if (iwl_mvm_has_new_rx_api(mvm))
1842 kfree(mvm_sta->dup_data);
1844 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1848 /* flush its queues here since we are freeing mvm_sta */
1849 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1852 if (iwl_mvm_has_new_tx_api(mvm)) {
1853 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1855 u32 q_mask = mvm_sta->tfd_queue_msk;
1857 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1863 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1865 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1867 /* If there is a TXQ still marked as reserved - free it */
1868 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1869 u8 reserved_txq = mvm_sta->reserved_queue;
1870 enum iwl_mvm_queue_status *status;
1873 * If no traffic has gone through the reserved TXQ - it
1874 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1875 * should be manually marked as free again
1877 status = &mvm->queue_info[reserved_txq].status;
1878 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1879 (*status != IWL_MVM_QUEUE_FREE),
1880 "sta_id %d reserved txq %d status %d",
1881 sta_id, reserved_txq, *status))
1884 *status = IWL_MVM_QUEUE_FREE;
1887 if (vif->type == NL80211_IFTYPE_STATION &&
1888 mvmvif->ap_sta_id == sta_id) {
1889 /* if associated - we can't remove the AP STA now */
1890 if (vif->bss_conf.assoc)
1893 /* unassoc - go ahead - remove the AP STA now */
1894 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1896 /* clear d0i3_ap_sta_id if no longer relevant */
1897 if (mvm->d0i3_ap_sta_id == sta_id)
1898 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1902 * This shouldn't happen - the TDLS channel switch should be canceled
1903 * before the STA is removed.
1905 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1906 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1907 cancel_delayed_work(&mvm->tdls_cs.dwork);
1911 * Make sure that the tx response code sees the station as -EBUSY and
1912 * calls the drain worker.
1914 spin_lock_bh(&mvm_sta->lock);
1915 spin_unlock_bh(&mvm_sta->lock);
1917 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1918 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1923 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1924 struct ieee80211_vif *vif,
1927 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1929 lockdep_assert_held(&mvm->mutex);
1931 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1935 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1936 struct iwl_mvm_int_sta *sta,
1937 u32 qmask, enum nl80211_iftype iftype,
1938 enum iwl_sta_type type)
1940 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1941 sta->sta_id == IWL_MVM_INVALID_STA) {
1942 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1943 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1947 sta->tfd_queue_msk = qmask;
1950 /* put a non-NULL value so iterating over the stations won't stop */
1951 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1955 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1957 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1958 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1959 sta->sta_id = IWL_MVM_INVALID_STA;
1962 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1965 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1966 mvm->cfg->base_params->wd_timeout :
1967 IWL_WATCHDOG_DISABLED;
1969 if (iwl_mvm_has_new_tx_api(mvm)) {
1971 iwl_mvm_tvqm_enable_txq(mvm, sta_id,
1974 *queue = tvqm_queue;
1976 struct iwl_trans_txq_scd_cfg cfg = {
1979 .tid = IWL_MAX_TID_COUNT,
1981 .frame_limit = IWL_FRAME_LIMIT,
1984 iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
1988 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1992 lockdep_assert_held(&mvm->mutex);
1994 /* Allocate aux station and assign to it the aux queue */
1995 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1996 NL80211_IFTYPE_UNSPECIFIED,
1997 IWL_STA_AUX_ACTIVITY);
2001 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2002 if (!iwl_mvm_has_new_tx_api(mvm))
2003 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2004 mvm->aux_sta.sta_id,
2005 IWL_MVM_TX_FIFO_MCAST);
2007 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2010 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2015 * For 22000 firmware and on we cannot add queue to a station unknown
2016 * to firmware so enable queue here - after the station was added
2018 if (iwl_mvm_has_new_tx_api(mvm))
2019 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2020 mvm->aux_sta.sta_id,
2021 IWL_MVM_TX_FIFO_MCAST);
2026 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2028 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2031 lockdep_assert_held(&mvm->mutex);
2033 /* Map snif queue to fifo - must happen before adding snif station */
2034 if (!iwl_mvm_has_new_tx_api(mvm))
2035 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2036 mvm->snif_sta.sta_id,
2037 IWL_MVM_TX_FIFO_BE);
2039 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2045 * For 22000 firmware and on we cannot add queue to a station unknown
2046 * to firmware so enable queue here - after the station was added
2048 if (iwl_mvm_has_new_tx_api(mvm))
2049 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2050 mvm->snif_sta.sta_id,
2051 IWL_MVM_TX_FIFO_BE);
2056 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2060 lockdep_assert_held(&mvm->mutex);
2062 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2063 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2065 IWL_WARN(mvm, "Failed sending remove station\n");
2070 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2072 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2075 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2077 lockdep_assert_held(&mvm->mutex);
2079 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2083 * Send the add station command for the vif's broadcast station.
2084 * Assumes that the station was already allocated.
2086 * @mvm: the mvm component
2087 * @vif: the interface to which the broadcast station is added
2088 * @bsta: the broadcast station to add.
2090 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2092 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2093 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2094 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2095 const u8 *baddr = _baddr;
2098 unsigned int wdg_timeout =
2099 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2100 struct iwl_trans_txq_scd_cfg cfg = {
2101 .fifo = IWL_MVM_TX_FIFO_VO,
2102 .sta_id = mvmvif->bcast_sta.sta_id,
2103 .tid = IWL_MAX_TID_COUNT,
2105 .frame_limit = IWL_FRAME_LIMIT,
2108 lockdep_assert_held(&mvm->mutex);
2110 if (!iwl_mvm_has_new_tx_api(mvm)) {
2111 if (vif->type == NL80211_IFTYPE_AP ||
2112 vif->type == NL80211_IFTYPE_ADHOC)
2113 queue = mvm->probe_queue;
2114 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2115 queue = mvm->p2p_dev_queue;
2116 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
2119 bsta->tfd_queue_msk |= BIT(queue);
2121 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2124 if (vif->type == NL80211_IFTYPE_ADHOC)
2125 baddr = vif->bss_conf.bssid;
2127 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2130 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2131 mvmvif->id, mvmvif->color);
2136 * For 22000 firmware and on we cannot add queue to a station unknown
2137 * to firmware so enable queue here - after the station was added
2139 if (iwl_mvm_has_new_tx_api(mvm)) {
2140 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2144 if (vif->type == NL80211_IFTYPE_AP ||
2145 vif->type == NL80211_IFTYPE_ADHOC)
2146 mvm->probe_queue = queue;
2147 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2148 mvm->p2p_dev_queue = queue;
2154 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2155 struct ieee80211_vif *vif)
2157 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2160 lockdep_assert_held(&mvm->mutex);
2162 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2164 switch (vif->type) {
2165 case NL80211_IFTYPE_AP:
2166 case NL80211_IFTYPE_ADHOC:
2167 queue = mvm->probe_queue;
2169 case NL80211_IFTYPE_P2P_DEVICE:
2170 queue = mvm->p2p_dev_queue;
2173 WARN(1, "Can't free bcast queue on vif type %d\n",
2178 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
2179 if (iwl_mvm_has_new_tx_api(mvm))
2182 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2183 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2186 /* Send the FW a request to remove the station from it's internal data
2187 * structures, but DO NOT remove the entry from the local data structures. */
2188 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2190 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2193 lockdep_assert_held(&mvm->mutex);
2195 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2197 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2199 IWL_WARN(mvm, "Failed sending remove station\n");
2203 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2205 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2207 lockdep_assert_held(&mvm->mutex);
2209 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2210 ieee80211_vif_type_p2p(vif),
2211 IWL_STA_GENERAL_PURPOSE);
2214 /* Allocate a new station entry for the broadcast station to the given vif,
2215 * and send it to the FW.
2216 * Note that each P2P mac should have its own broadcast station.
2218 * @mvm: the mvm component
2219 * @vif: the interface to which the broadcast station is added
2220 * @bsta: the broadcast station to add. */
2221 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2223 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2224 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2227 lockdep_assert_held(&mvm->mutex);
2229 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2233 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2236 iwl_mvm_dealloc_int_sta(mvm, bsta);
2241 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2243 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2245 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2249 * Send the FW a request to remove the station from it's internal data
2250 * structures, and in addition remove it from the local data structure.
2252 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2256 lockdep_assert_held(&mvm->mutex);
2258 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2260 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2266 * Allocate a new station entry for the multicast station to the given vif,
2267 * and send it to the FW.
2268 * Note that each AP/GO mac should have its own multicast station.
2270 * @mvm: the mvm component
2271 * @vif: the interface to which the multicast station is added
2273 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2275 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2276 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2277 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2278 const u8 *maddr = _maddr;
2279 struct iwl_trans_txq_scd_cfg cfg = {
2280 .fifo = vif->type == NL80211_IFTYPE_AP ?
2281 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2282 .sta_id = msta->sta_id,
2285 .frame_limit = IWL_FRAME_LIMIT,
2287 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2290 lockdep_assert_held(&mvm->mutex);
2292 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2293 vif->type != NL80211_IFTYPE_ADHOC))
2297 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2298 * invalid, so make sure we use the queue we want.
2299 * Note that this is done here as we want to avoid making DQA
2300 * changes in mac80211 layer.
2302 if (vif->type == NL80211_IFTYPE_ADHOC)
2303 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2306 * While in previous FWs we had to exclude cab queue from TFD queue
2307 * mask, now it is needed as any other queue.
2309 if (!iwl_mvm_has_new_tx_api(mvm) &&
2310 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2311 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2313 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2315 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2316 mvmvif->id, mvmvif->color);
2318 iwl_mvm_dealloc_int_sta(mvm, msta);
2323 * Enable cab queue after the ADD_STA command is sent.
2324 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2325 * command with unknown station id, and for FW that doesn't support
2326 * station API since the cab queue is not included in the
2329 if (iwl_mvm_has_new_tx_api(mvm)) {
2330 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2333 mvmvif->cab_queue = queue;
2334 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2335 IWL_UCODE_TLV_API_STA_TYPE))
2336 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2342 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2343 struct ieee80211_key_conf *keyconf,
2347 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2348 struct iwl_mvm_add_sta_key_cmd cmd;
2350 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2351 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2356 /* This is a valid situation for GTK removal */
2357 if (sta_id == IWL_MVM_INVALID_STA)
2360 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2361 STA_KEY_FLG_KEYID_MSK);
2362 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2363 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2366 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2369 * The fields assigned here are in the same location at the start
2370 * of the command, so we can do this union trick.
2372 u.cmd.common.key_flags = key_flags;
2373 u.cmd.common.key_offset = keyconf->hw_key_idx;
2374 u.cmd.common.sta_id = sta_id;
2376 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2378 status = ADD_STA_SUCCESS;
2379 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2383 case ADD_STA_SUCCESS:
2384 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2388 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2396 * Send the FW a request to remove the station from it's internal data
2397 * structures, and in addition remove it from the local data structure.
2399 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2401 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2404 lockdep_assert_held(&mvm->mutex);
2406 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2408 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2410 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2412 IWL_WARN(mvm, "Failed sending remove station\n");
2417 #define IWL_MAX_RX_BA_SESSIONS 16
2419 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2421 struct iwl_mvm_delba_notif notif = {
2422 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2426 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2429 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2430 struct iwl_mvm_baid_data *data)
2434 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2436 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2438 struct iwl_mvm_reorder_buffer *reorder_buf =
2439 &data->reorder_buf[i];
2440 struct iwl_mvm_reorder_buf_entry *entries =
2441 &data->entries[i * data->entries_per_queue];
2443 spin_lock_bh(&reorder_buf->lock);
2444 if (likely(!reorder_buf->num_stored)) {
2445 spin_unlock_bh(&reorder_buf->lock);
2450 * This shouldn't happen in regular DELBA since the internal
2451 * delBA notification should trigger a release of all frames in
2452 * the reorder buffer.
2456 for (j = 0; j < reorder_buf->buf_size; j++)
2457 __skb_queue_purge(&entries[j].e.frames);
2459 * Prevent timer re-arm. This prevents a very far fetched case
2460 * where we timed out on the notification. There may be prior
2461 * RX frames pending in the RX queue before the notification
2462 * that might get processed between now and the actual deletion
2463 * and we would re-arm the timer although we are deleting the
2466 reorder_buf->removed = true;
2467 spin_unlock_bh(&reorder_buf->lock);
2468 del_timer_sync(&reorder_buf->reorder_timer);
2472 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2473 struct iwl_mvm_baid_data *data,
2474 u16 ssn, u16 buf_size)
2478 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2479 struct iwl_mvm_reorder_buffer *reorder_buf =
2480 &data->reorder_buf[i];
2481 struct iwl_mvm_reorder_buf_entry *entries =
2482 &data->entries[i * data->entries_per_queue];
2485 reorder_buf->num_stored = 0;
2486 reorder_buf->head_sn = ssn;
2487 reorder_buf->buf_size = buf_size;
2488 /* rx reorder timer */
2489 timer_setup(&reorder_buf->reorder_timer,
2490 iwl_mvm_reorder_timer_expired, 0);
2491 spin_lock_init(&reorder_buf->lock);
2492 reorder_buf->mvm = mvm;
2493 reorder_buf->queue = i;
2494 reorder_buf->valid = false;
2495 for (j = 0; j < reorder_buf->buf_size; j++)
2496 __skb_queue_head_init(&entries[j].e.frames);
2500 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2501 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2503 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2504 struct iwl_mvm_add_sta_cmd cmd = {};
2505 struct iwl_mvm_baid_data *baid_data = NULL;
2509 lockdep_assert_held(&mvm->mutex);
2511 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2512 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2516 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2517 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2519 /* sparse doesn't like the __align() so don't check */
2522 * The division below will be OK if either the cache line size
2523 * can be divided by the entry size (ALIGN will round up) or if
2524 * if the entry size can be divided by the cache line size, in
2525 * which case the ALIGN() will do nothing.
2527 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2528 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2532 * Upward align the reorder buffer size to fill an entire cache
2533 * line for each queue, to avoid sharing cache lines between
2536 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2539 * Allocate here so if allocation fails we can bail out early
2540 * before starting the BA session in the firmware
2542 baid_data = kzalloc(sizeof(*baid_data) +
2543 mvm->trans->num_rx_queues *
2550 * This division is why we need the above BUILD_BUG_ON(),
2551 * if that doesn't hold then this will not be right.
2553 baid_data->entries_per_queue =
2554 reorder_buf_size / sizeof(baid_data->entries[0]);
2557 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2558 cmd.sta_id = mvm_sta->sta_id;
2559 cmd.add_modify = STA_MODE_MODIFY;
2561 cmd.add_immediate_ba_tid = (u8) tid;
2562 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2563 cmd.rx_ba_window = cpu_to_le16(buf_size);
2565 cmd.remove_immediate_ba_tid = (u8) tid;
2567 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2568 STA_MODIFY_REMOVE_BA_TID;
2570 status = ADD_STA_SUCCESS;
2571 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2572 iwl_mvm_add_sta_cmd_size(mvm),
2577 switch (status & IWL_ADD_STA_STATUS_MASK) {
2578 case ADD_STA_SUCCESS:
2579 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2580 start ? "start" : "stopp");
2582 case ADD_STA_IMMEDIATE_BA_FAILURE:
2583 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2588 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2589 start ? "start" : "stopp", status);
2599 mvm->rx_ba_sessions++;
2601 if (!iwl_mvm_has_new_rx_api(mvm))
2604 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2608 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2609 IWL_ADD_STA_BAID_SHIFT);
2610 baid_data->baid = baid;
2611 baid_data->timeout = timeout;
2612 baid_data->last_rx = jiffies;
2613 baid_data->rcu_ptr = &mvm->baid_map[baid];
2614 timer_setup(&baid_data->session_timer,
2615 iwl_mvm_rx_agg_session_expired, 0);
2616 baid_data->mvm = mvm;
2617 baid_data->tid = tid;
2618 baid_data->sta_id = mvm_sta->sta_id;
2620 mvm_sta->tid_to_baid[tid] = baid;
2622 mod_timer(&baid_data->session_timer,
2623 TU_TO_EXP_TIME(timeout * 2));
2625 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2627 * protect the BA data with RCU to cover a case where our
2628 * internal RX sync mechanism will timeout (not that it's
2629 * supposed to happen) and we will free the session data while
2630 * RX is being processed in parallel
2632 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2633 mvm_sta->sta_id, tid, baid);
2634 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2635 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2637 u8 baid = mvm_sta->tid_to_baid[tid];
2639 if (mvm->rx_ba_sessions > 0)
2640 /* check that restart flow didn't zero the counter */
2641 mvm->rx_ba_sessions--;
2642 if (!iwl_mvm_has_new_rx_api(mvm))
2645 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2648 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2649 if (WARN_ON(!baid_data))
2652 /* synchronize all rx queues so we can safely delete */
2653 iwl_mvm_free_reorder(mvm, baid_data);
2654 del_timer_sync(&baid_data->session_timer);
2655 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2656 kfree_rcu(baid_data, rcu_head);
2657 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2666 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2667 int tid, u8 queue, bool start)
2669 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2670 struct iwl_mvm_add_sta_cmd cmd = {};
2674 lockdep_assert_held(&mvm->mutex);
2677 mvm_sta->tfd_queue_msk |= BIT(queue);
2678 mvm_sta->tid_disable_agg &= ~BIT(tid);
2680 /* In DQA-mode the queue isn't removed on agg termination */
2681 mvm_sta->tid_disable_agg |= BIT(tid);
2684 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2685 cmd.sta_id = mvm_sta->sta_id;
2686 cmd.add_modify = STA_MODE_MODIFY;
2687 if (!iwl_mvm_has_new_tx_api(mvm))
2688 cmd.modify_mask = STA_MODIFY_QUEUES;
2689 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2690 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2691 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2693 status = ADD_STA_SUCCESS;
2694 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2695 iwl_mvm_add_sta_cmd_size(mvm),
2700 switch (status & IWL_ADD_STA_STATUS_MASK) {
2701 case ADD_STA_SUCCESS:
2705 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2706 start ? "start" : "stopp", status);
2713 const u8 tid_to_mac80211_ac[] = {
2722 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2725 static const u8 tid_to_ucode_ac[] = {
2736 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2737 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2739 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2740 struct iwl_mvm_tid_data *tid_data;
2745 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2748 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2749 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2751 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2752 mvmsta->tid_data[tid].state);
2756 lockdep_assert_held(&mvm->mutex);
2758 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2759 iwl_mvm_has_new_tx_api(mvm)) {
2760 u8 ac = tid_to_mac80211_ac[tid];
2762 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2767 spin_lock_bh(&mvmsta->lock);
2769 /* possible race condition - we entered D0i3 while starting agg */
2770 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2771 spin_unlock_bh(&mvmsta->lock);
2772 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2777 * Note the possible cases:
2778 * 1. An enabled TXQ - TXQ needs to become agg'ed
2779 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2782 txq_id = mvmsta->tid_data[tid].txq_id;
2783 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2784 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2785 IWL_MVM_DQA_MIN_DATA_QUEUE,
2786 IWL_MVM_DQA_MAX_DATA_QUEUE);
2788 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2794 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2795 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2796 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2798 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2799 tid, IWL_MAX_HW_QUEUES - 1);
2802 } else if (unlikely(mvm->queue_info[txq_id].status ==
2803 IWL_MVM_QUEUE_SHARED)) {
2805 IWL_DEBUG_TX_QUEUES(mvm,
2806 "Can't start tid %d agg on shared queue!\n",
2811 IWL_DEBUG_TX_QUEUES(mvm,
2812 "AGG for tid %d will be on queue #%d\n",
2815 tid_data = &mvmsta->tid_data[tid];
2816 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2817 tid_data->txq_id = txq_id;
2818 *ssn = tid_data->ssn;
2820 IWL_DEBUG_TX_QUEUES(mvm,
2821 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2822 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2823 tid_data->next_reclaimed);
2826 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2827 * to align the wrap around of ssn so we compare relevant values.
2829 normalized_ssn = tid_data->ssn;
2830 if (mvm->trans->cfg->gen2)
2831 normalized_ssn &= 0xff;
2833 if (normalized_ssn == tid_data->next_reclaimed) {
2834 tid_data->state = IWL_AGG_STARTING;
2835 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2837 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2843 spin_unlock_bh(&mvmsta->lock);
2848 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2849 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2852 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2853 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2854 unsigned int wdg_timeout =
2855 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2857 bool alloc_queue = true;
2858 enum iwl_mvm_queue_status queue_status;
2861 struct iwl_trans_txq_scd_cfg cfg = {
2862 .sta_id = mvmsta->sta_id,
2864 .frame_limit = buf_size,
2869 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2870 * manager, so this function should never be called in this case.
2872 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2875 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2876 != IWL_MAX_TID_COUNT);
2878 spin_lock_bh(&mvmsta->lock);
2879 ssn = tid_data->ssn;
2880 queue = tid_data->txq_id;
2881 tid_data->state = IWL_AGG_ON;
2882 mvmsta->agg_tids |= BIT(tid);
2883 tid_data->ssn = 0xffff;
2884 tid_data->amsdu_in_ampdu_allowed = amsdu;
2885 spin_unlock_bh(&mvmsta->lock);
2887 if (iwl_mvm_has_new_tx_api(mvm)) {
2889 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2890 * would have failed, so if we are here there is no need to
2892 * However, if aggregation size is different than the default
2893 * size, the scheduler should be reconfigured.
2894 * We cannot do this with the new TX API, so return unsupported
2895 * for now, until it will be offloaded to firmware..
2896 * Note that if SCD default value changes - this condition
2897 * should be updated as well.
2899 if (buf_size < IWL_FRAME_LIMIT)
2902 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2908 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2910 queue_status = mvm->queue_info[queue].status;
2912 /* Maybe there is no need to even alloc a queue... */
2913 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2914 alloc_queue = false;
2917 * Only reconfig the SCD for the queue if the window size has
2918 * changed from current (become smaller)
2920 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2922 * If reconfiguring an existing queue, it first must be
2925 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2929 "Error draining queue before reconfig\n");
2933 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2934 mvmsta->sta_id, tid,
2938 "Error reconfiguring TXQ #%d\n", queue);
2944 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2947 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2948 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2949 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2954 /* No need to mark as reserved */
2955 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2959 * Even though in theory the peer could have different
2960 * aggregation reorder buffer sizes for different sessions,
2961 * our ucode doesn't allow for that and has a global limit
2962 * for each station. Therefore, use the minimum of all the
2963 * aggregation sessions and our default value.
2965 mvmsta->max_agg_bufsize =
2966 min(mvmsta->max_agg_bufsize, buf_size);
2967 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2969 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2972 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
2975 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2976 struct iwl_mvm_sta *mvmsta,
2977 struct iwl_mvm_tid_data *tid_data)
2979 u16 txq_id = tid_data->txq_id;
2981 lockdep_assert_held(&mvm->mutex);
2983 if (iwl_mvm_has_new_tx_api(mvm))
2987 * The TXQ is marked as reserved only if no traffic came through yet
2988 * This means no traffic has been sent on this TID (agg'd or not), so
2989 * we no longer have use for the queue. Since it hasn't even been
2990 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2993 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
2994 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2995 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
2999 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3000 struct ieee80211_sta *sta, u16 tid)
3002 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3003 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3008 * If mac80211 is cleaning its state, then say that we finished since
3009 * our state has been cleared anyway.
3011 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3012 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3016 spin_lock_bh(&mvmsta->lock);
3018 txq_id = tid_data->txq_id;
3020 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3021 mvmsta->sta_id, tid, txq_id, tid_data->state);
3023 mvmsta->agg_tids &= ~BIT(tid);
3025 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3027 switch (tid_data->state) {
3029 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3031 IWL_DEBUG_TX_QUEUES(mvm,
3032 "ssn = %d, next_recl = %d\n",
3033 tid_data->ssn, tid_data->next_reclaimed);
3035 tid_data->ssn = 0xffff;
3036 tid_data->state = IWL_AGG_OFF;
3037 spin_unlock_bh(&mvmsta->lock);
3039 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3041 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3043 case IWL_AGG_STARTING:
3044 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3046 * The agg session has been stopped before it was set up. This
3047 * can happen when the AddBA timer times out for example.
3050 /* No barriers since we are under mutex */
3051 lockdep_assert_held(&mvm->mutex);
3053 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3054 tid_data->state = IWL_AGG_OFF;
3059 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3060 mvmsta->sta_id, tid, tid_data->state);
3062 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3066 spin_unlock_bh(&mvmsta->lock);
3071 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3072 struct ieee80211_sta *sta, u16 tid)
3074 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3075 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3077 enum iwl_mvm_agg_state old_state;
3080 * First set the agg state to OFF to avoid calling
3081 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3083 spin_lock_bh(&mvmsta->lock);
3084 txq_id = tid_data->txq_id;
3085 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3086 mvmsta->sta_id, tid, txq_id, tid_data->state);
3087 old_state = tid_data->state;
3088 tid_data->state = IWL_AGG_OFF;
3089 mvmsta->agg_tids &= ~BIT(tid);
3090 spin_unlock_bh(&mvmsta->lock);
3092 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3094 if (old_state >= IWL_AGG_ON) {
3095 iwl_mvm_drain_sta(mvm, mvmsta, true);
3097 if (iwl_mvm_has_new_tx_api(mvm)) {
3098 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3100 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3101 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3103 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3104 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3105 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3108 iwl_mvm_drain_sta(mvm, mvmsta, false);
3110 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3116 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3118 int i, max = -1, max_offs = -1;
3120 lockdep_assert_held(&mvm->mutex);
3122 /* Pick the unused key offset with the highest 'deleted'
3123 * counter. Every time a key is deleted, all the counters
3124 * are incremented and the one that was just deleted is
3125 * reset to zero. Thus, the highest counter is the one
3126 * that was deleted longest ago. Pick that one.
3128 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3129 if (test_bit(i, mvm->fw_key_table))
3131 if (mvm->fw_key_deleted[i] > max) {
3132 max = mvm->fw_key_deleted[i];
3138 return STA_KEY_IDX_INVALID;
3143 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3144 struct ieee80211_vif *vif,
3145 struct ieee80211_sta *sta)
3147 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3150 return iwl_mvm_sta_from_mac80211(sta);
3153 * The device expects GTKs for station interfaces to be
3154 * installed as GTKs for the AP station. If we have no
3155 * station ID, then use AP's station ID.
3157 if (vif->type == NL80211_IFTYPE_STATION &&
3158 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3159 u8 sta_id = mvmvif->ap_sta_id;
3161 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3162 lockdep_is_held(&mvm->mutex));
3165 * It is possible that the 'sta' parameter is NULL,
3166 * for example when a GTK is removed - the sta_id will then
3167 * be the AP ID, and no station was passed by mac80211.
3169 if (IS_ERR_OR_NULL(sta))
3172 return iwl_mvm_sta_from_mac80211(sta);
3178 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3180 struct ieee80211_key_conf *key, bool mcast,
3181 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3182 u8 key_offset, bool mfp)
3185 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3186 struct iwl_mvm_add_sta_key_cmd cmd;
3194 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3195 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3197 if (sta_id == IWL_MVM_INVALID_STA)
3200 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3201 STA_KEY_FLG_KEYID_MSK;
3202 key_flags = cpu_to_le16(keyidx);
3203 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3205 switch (key->cipher) {
3206 case WLAN_CIPHER_SUITE_TKIP:
3207 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3209 memcpy((void *)&u.cmd.tx_mic_key,
3210 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3213 memcpy((void *)&u.cmd.rx_mic_key,
3214 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3216 pn = atomic64_read(&key->tx_pn);
3219 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3220 for (i = 0; i < 5; i++)
3221 u.cmd_v1.tkip_rx_ttak[i] =
3222 cpu_to_le16(tkip_p1k[i]);
3224 memcpy(u.cmd.common.key, key->key, key->keylen);
3226 case WLAN_CIPHER_SUITE_CCMP:
3227 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3228 memcpy(u.cmd.common.key, key->key, key->keylen);
3230 pn = atomic64_read(&key->tx_pn);
3232 case WLAN_CIPHER_SUITE_WEP104:
3233 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3235 case WLAN_CIPHER_SUITE_WEP40:
3236 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3237 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3239 case WLAN_CIPHER_SUITE_GCMP_256:
3240 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3242 case WLAN_CIPHER_SUITE_GCMP:
3243 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3244 memcpy(u.cmd.common.key, key->key, key->keylen);
3246 pn = atomic64_read(&key->tx_pn);
3249 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3250 memcpy(u.cmd.common.key, key->key, key->keylen);
3254 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3256 key_flags |= cpu_to_le16(STA_KEY_MFP);
3258 u.cmd.common.key_offset = key_offset;
3259 u.cmd.common.key_flags = key_flags;
3260 u.cmd.common.sta_id = sta_id;
3263 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3264 size = sizeof(u.cmd);
3266 size = sizeof(u.cmd_v1);
3269 status = ADD_STA_SUCCESS;
3270 if (cmd_flags & CMD_ASYNC)
3271 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3274 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3278 case ADD_STA_SUCCESS:
3279 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3283 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3290 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3291 struct ieee80211_key_conf *keyconf,
3292 u8 sta_id, bool remove_key)
3294 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3296 /* verify the key details match the required command's expectations */
3297 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3298 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3299 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3300 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3301 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3304 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3305 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3308 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3309 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3312 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3314 struct ieee80211_key_seq seq;
3317 switch (keyconf->cipher) {
3318 case WLAN_CIPHER_SUITE_AES_CMAC:
3319 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3321 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3322 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3323 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3329 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3330 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3331 igtk_cmd.ctrl_flags |=
3332 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3333 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3334 pn = seq.aes_cmac.pn;
3335 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3336 ((u64) pn[4] << 8) |
3337 ((u64) pn[3] << 16) |
3338 ((u64) pn[2] << 24) |
3339 ((u64) pn[1] << 32) |
3340 ((u64) pn[0] << 40));
3343 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3344 remove_key ? "removing" : "installing",
3347 if (!iwl_mvm_has_new_rx_api(mvm)) {
3348 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3349 .ctrl_flags = igtk_cmd.ctrl_flags,
3350 .key_id = igtk_cmd.key_id,
3351 .sta_id = igtk_cmd.sta_id,
3352 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3355 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3356 ARRAY_SIZE(igtk_cmd_v1.igtk));
3357 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3358 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3360 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3361 sizeof(igtk_cmd), &igtk_cmd);
3365 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3366 struct ieee80211_vif *vif,
3367 struct ieee80211_sta *sta)
3369 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3374 if (vif->type == NL80211_IFTYPE_STATION &&
3375 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3376 u8 sta_id = mvmvif->ap_sta_id;
3377 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3378 lockdep_is_held(&mvm->mutex));
3386 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3387 struct ieee80211_vif *vif,
3388 struct ieee80211_sta *sta,
3389 struct ieee80211_key_conf *keyconf,
3395 struct ieee80211_key_seq seq;
3401 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3403 sta_id = mvm_sta->sta_id;
3405 } else if (vif->type == NL80211_IFTYPE_AP &&
3406 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3407 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3409 sta_id = mvmvif->mcast_sta.sta_id;
3411 IWL_ERR(mvm, "Failed to find station id\n");
3415 switch (keyconf->cipher) {
3416 case WLAN_CIPHER_SUITE_TKIP:
3417 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3418 /* get phase 1 key from mac80211 */
3419 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3420 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3421 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3422 seq.tkip.iv32, p1k, 0, key_offset,
3425 case WLAN_CIPHER_SUITE_CCMP:
3426 case WLAN_CIPHER_SUITE_WEP40:
3427 case WLAN_CIPHER_SUITE_WEP104:
3428 case WLAN_CIPHER_SUITE_GCMP:
3429 case WLAN_CIPHER_SUITE_GCMP_256:
3430 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3431 0, NULL, 0, key_offset, mfp);
3434 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3435 0, NULL, 0, key_offset, mfp);
3441 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3442 struct ieee80211_vif *vif,
3443 struct ieee80211_sta *sta,
3444 struct ieee80211_key_conf *keyconf,
3447 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3448 struct iwl_mvm_sta *mvm_sta;
3449 u8 sta_id = IWL_MVM_INVALID_STA;
3451 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3453 lockdep_assert_held(&mvm->mutex);
3455 if (vif->type != NL80211_IFTYPE_AP ||
3456 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3457 /* Get the station id from the mvm local station table */
3458 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3460 IWL_ERR(mvm, "Failed to find station\n");
3463 sta_id = mvm_sta->sta_id;
3466 * It is possible that the 'sta' parameter is NULL, and thus
3467 * there is a need to retrieve the sta from the local station
3471 sta = rcu_dereference_protected(
3472 mvm->fw_id_to_mac_id[sta_id],
3473 lockdep_is_held(&mvm->mutex));
3474 if (IS_ERR_OR_NULL(sta)) {
3475 IWL_ERR(mvm, "Invalid station id\n");
3480 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3483 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3485 sta_id = mvmvif->mcast_sta.sta_id;
3488 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3489 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3490 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3491 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3495 /* If the key_offset is not pre-assigned, we need to find a
3496 * new offset to use. In normal cases, the offset is not
3497 * pre-assigned, but during HW_RESTART we want to reuse the
3498 * same indices, so we pass them when this function is called.
3500 * In D3 entry, we need to hardcoded the indices (because the
3501 * firmware hardcodes the PTK offset to 0). In this case, we
3502 * need to make sure we don't overwrite the hw_key_idx in the
3503 * keyconf structure, because otherwise we cannot configure
3504 * the original ones back when resuming.
3506 if (key_offset == STA_KEY_IDX_INVALID) {
3507 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3508 if (key_offset == STA_KEY_IDX_INVALID)
3510 keyconf->hw_key_idx = key_offset;
3513 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3518 * For WEP, the same key is used for multicast and unicast. Upload it
3519 * again, using the same key offset, and now pointing the other one
3520 * to the same key slot (offset).
3521 * If this fails, remove the original as well.
3523 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3524 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3526 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3527 key_offset, !mcast);
3529 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3534 __set_bit(key_offset, mvm->fw_key_table);
3537 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3538 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3539 sta ? sta->addr : zero_addr, ret);
3543 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3544 struct ieee80211_vif *vif,
3545 struct ieee80211_sta *sta,
3546 struct ieee80211_key_conf *keyconf)
3548 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3549 struct iwl_mvm_sta *mvm_sta;
3550 u8 sta_id = IWL_MVM_INVALID_STA;
3553 lockdep_assert_held(&mvm->mutex);
3555 /* Get the station from the mvm local station table */
3556 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3558 sta_id = mvm_sta->sta_id;
3559 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3560 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3563 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3564 keyconf->keyidx, sta_id);
3566 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3567 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3568 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3569 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3571 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3572 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3573 keyconf->hw_key_idx);
3577 /* track which key was deleted last */
3578 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3579 if (mvm->fw_key_deleted[i] < U8_MAX)
3580 mvm->fw_key_deleted[i]++;
3582 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3584 if (sta && !mvm_sta) {
3585 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3589 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3593 /* delete WEP key twice to get rid of (now useless) offset */
3594 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3595 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3596 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3601 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3602 struct ieee80211_vif *vif,
3603 struct ieee80211_key_conf *keyconf,
3604 struct ieee80211_sta *sta, u32 iv32,
3607 struct iwl_mvm_sta *mvm_sta;
3608 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3609 bool mfp = sta ? sta->mfp : false;
3613 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3614 if (WARN_ON_ONCE(!mvm_sta))
3616 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3617 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3624 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3625 struct ieee80211_sta *sta)
3627 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3628 struct iwl_mvm_add_sta_cmd cmd = {
3629 .add_modify = STA_MODE_MODIFY,
3630 .sta_id = mvmsta->sta_id,
3631 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3632 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3636 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3637 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3639 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3642 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3643 struct ieee80211_sta *sta,
3644 enum ieee80211_frame_release_type reason,
3645 u16 cnt, u16 tids, bool more_data,
3646 bool single_sta_queue)
3648 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3649 struct iwl_mvm_add_sta_cmd cmd = {
3650 .add_modify = STA_MODE_MODIFY,
3651 .sta_id = mvmsta->sta_id,
3652 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3653 .sleep_tx_count = cpu_to_le16(cnt),
3654 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3657 unsigned long _tids = tids;
3659 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3660 * Note that this field is reserved and unused by firmware not
3661 * supporting GO uAPSD, so it's safe to always do this.
3663 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3664 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3666 /* If we're releasing frames from aggregation or dqa queues then check
3667 * if all the queues that we're releasing frames from, combined, have:
3668 * - more frames than the service period, in which case more_data
3670 * - fewer than 'cnt' frames, in which case we need to adjust the
3671 * firmware command (but do that unconditionally)
3673 if (single_sta_queue) {
3674 int remaining = cnt;
3677 spin_lock_bh(&mvmsta->lock);
3678 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3679 struct iwl_mvm_tid_data *tid_data;
3682 tid_data = &mvmsta->tid_data[tid];
3684 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3685 if (n_queued > remaining) {
3690 remaining -= n_queued;
3692 sleep_tx_count = cnt - remaining;
3693 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3694 mvmsta->sleep_tx_count = sleep_tx_count;
3695 spin_unlock_bh(&mvmsta->lock);
3697 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3698 if (WARN_ON(cnt - remaining == 0)) {
3699 ieee80211_sta_eosp(sta);
3704 /* Note: this is ignored by firmware not supporting GO uAPSD */
3706 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3708 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3709 mvmsta->next_status_eosp = true;
3710 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3712 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3715 /* block the Tx queues until the FW updated the sleep Tx count */
3716 iwl_trans_block_txq_ptrs(mvm->trans, true);
3718 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3719 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3720 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3722 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3725 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3726 struct iwl_rx_cmd_buffer *rxb)
3728 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3729 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3730 struct ieee80211_sta *sta;
3731 u32 sta_id = le32_to_cpu(notif->sta_id);
3733 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3737 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3738 if (!IS_ERR_OR_NULL(sta))
3739 ieee80211_sta_eosp(sta);
3743 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3744 struct iwl_mvm_sta *mvmsta, bool disable)
3746 struct iwl_mvm_add_sta_cmd cmd = {
3747 .add_modify = STA_MODE_MODIFY,
3748 .sta_id = mvmsta->sta_id,
3749 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3750 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3751 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3755 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3756 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3758 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3761 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3762 struct ieee80211_sta *sta,
3765 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3767 spin_lock_bh(&mvm_sta->lock);
3769 if (mvm_sta->disable_tx == disable) {
3770 spin_unlock_bh(&mvm_sta->lock);
3774 mvm_sta->disable_tx = disable;
3776 /* Tell mac80211 to start/stop queuing tx for this station */
3777 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3779 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3781 spin_unlock_bh(&mvm_sta->lock);
3784 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3785 struct iwl_mvm_vif *mvmvif,
3786 struct iwl_mvm_int_sta *sta,
3789 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3790 struct iwl_mvm_add_sta_cmd cmd = {
3791 .add_modify = STA_MODE_MODIFY,
3792 .sta_id = sta->sta_id,
3793 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3794 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3795 .mac_id_n_color = cpu_to_le32(id),
3799 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3800 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3802 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3805 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3806 struct iwl_mvm_vif *mvmvif,
3809 struct ieee80211_sta *sta;
3810 struct iwl_mvm_sta *mvm_sta;
3813 lockdep_assert_held(&mvm->mutex);
3815 /* Block/unblock all the stations of the given mvmvif */
3816 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3817 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3818 lockdep_is_held(&mvm->mutex));
3819 if (IS_ERR_OR_NULL(sta))
3822 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3823 if (mvm_sta->mac_id_n_color !=
3824 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3827 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3830 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3833 /* Need to block/unblock also multicast station */
3834 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3835 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3836 &mvmvif->mcast_sta, disable);
3839 * Only unblock the broadcast station (FW blocks it for immediate
3840 * quiet, not the driver)
3842 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3843 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3844 &mvmvif->bcast_sta, disable);
3847 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3849 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3850 struct iwl_mvm_sta *mvmsta;
3854 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3856 if (!WARN_ON(!mvmsta))
3857 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3862 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3864 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3867 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3868 * to align the wrap around of ssn so we compare relevant values.
3870 if (mvm->trans->cfg->gen2)
3873 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);