1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <net/mac80211.h>
70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
85 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
101 lockdep_assert_held(&mvm->mutex);
103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
109 if (BIT(sta_id) & reserved_ids)
112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
116 return IWL_MVM_INVALID_STA;
119 /* send station add/update command to firmware */
120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121 bool update, unsigned int flags)
123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
135 u32 agg_size = 0, mpdu_dens = 0;
137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
140 if (!update || (flags & STA_MODIFY_QUEUES)) {
141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
150 WARN_ON(flags & STA_MODIFY_QUEUES);
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
171 switch (sta->rx_nss) {
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
188 case IEEE80211_SMPS_STATIC:
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
196 case IEEE80211_SMPS_OFF:
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
206 mpdu_dens = sta->ht_cap.ampdu_density;
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
240 status = ADD_STA_SUCCESS;
241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
243 &add_sta_cmd, &status);
247 switch (status & IWL_ADD_STA_STATUS_MASK) {
248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
253 IWL_ERR(mvm, "ADD_STA failed\n");
260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
272 ba_data = rcu_dereference(*rcu_ptr);
274 if (WARN_ON(!ba_data))
277 if (!ba_data->timeout)
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
307 /* Disable aggregations for a bitmap of TIDs for a given station */
308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
318 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
321 sta_id = mvm->queue_info[queue].ra_sta_id;
325 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
327 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
332 mvmsta = iwl_mvm_sta_from_mac80211(sta);
334 mvmsta->tid_disable_agg |= disable_agg_tids;
336 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
337 cmd.sta_id = mvmsta->sta_id;
338 cmd.add_modify = STA_MODE_MODIFY;
339 cmd.modify_mask = STA_MODIFY_QUEUES;
340 if (disable_agg_tids)
341 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
343 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
344 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
345 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349 /* Notify FW of queue removal from the STA queues */
350 status = ADD_STA_SUCCESS;
351 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
352 iwl_mvm_add_sta_cmd_size(mvm),
356 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
357 int queue, u8 tid, u8 flags)
359 struct iwl_scd_txq_cfg_cmd cmd = {
361 .action = SCD_CFG_DISABLE_QUEUE,
365 if (iwl_mvm_has_new_tx_api(mvm)) {
366 iwl_trans_txq_free(mvm->trans, queue);
371 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
374 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
376 cmd.action = mvm->queue_info[queue].tid_bitmap ?
377 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
378 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
379 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
381 IWL_DEBUG_TX_QUEUES(mvm,
382 "Disabling TXQ #%d tids=0x%x\n",
384 mvm->queue_info[queue].tid_bitmap);
386 /* If the queue is still enabled - nothing left to do in this func */
387 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
390 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
391 cmd.tid = mvm->queue_info[queue].txq_tid;
393 /* Make sure queue info is correct even though we overwrite it */
394 WARN(mvm->queue_info[queue].tid_bitmap,
395 "TXQ #%d info out-of-sync - tids=0x%x\n",
396 queue, mvm->queue_info[queue].tid_bitmap);
398 /* If we are here - the queue is freed and we can zero out these vals */
399 mvm->queue_info[queue].tid_bitmap = 0;
402 struct iwl_mvm_txq *mvmtxq =
403 iwl_mvm_txq_from_tid(sta, tid);
405 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
408 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
409 mvm->queue_info[queue].reserved = false;
411 iwl_trans_txq_disable(mvm->trans, queue, false);
412 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
413 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
416 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
421 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
423 struct ieee80211_sta *sta;
424 struct iwl_mvm_sta *mvmsta;
425 unsigned long tid_bitmap;
426 unsigned long agg_tids = 0;
430 lockdep_assert_held(&mvm->mutex);
432 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
435 sta_id = mvm->queue_info[queue].ra_sta_id;
436 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
438 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
439 lockdep_is_held(&mvm->mutex));
441 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
444 mvmsta = iwl_mvm_sta_from_mac80211(sta);
446 spin_lock_bh(&mvmsta->lock);
447 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
448 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
449 agg_tids |= BIT(tid);
451 spin_unlock_bh(&mvmsta->lock);
457 * Remove a queue from a station's resources.
458 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
459 * doesn't disable the queue
461 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
463 struct ieee80211_sta *sta;
464 struct iwl_mvm_sta *mvmsta;
465 unsigned long tid_bitmap;
466 unsigned long disable_agg_tids = 0;
470 lockdep_assert_held(&mvm->mutex);
472 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
475 sta_id = mvm->queue_info[queue].ra_sta_id;
476 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
482 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
487 mvmsta = iwl_mvm_sta_from_mac80211(sta);
489 spin_lock_bh(&mvmsta->lock);
490 /* Unmap MAC queues and TIDs from this queue */
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 struct iwl_mvm_txq *mvmtxq =
493 iwl_mvm_txq_from_tid(sta, tid);
495 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
496 disable_agg_tids |= BIT(tid);
497 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
499 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
502 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
503 spin_unlock_bh(&mvmsta->lock);
508 * The TX path may have been using this TXQ_ID from the tid_data,
509 * so make sure it's no longer running so that we can safely reuse
510 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
511 * above, but nothing guarantees we've stopped using them. Thus,
512 * without this, we could get to iwl_mvm_disable_txq() and remove
513 * the queue while still sending frames to it.
517 return disable_agg_tids;
520 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
521 struct ieee80211_sta *old_sta,
524 struct iwl_mvm_sta *mvmsta;
526 unsigned long disable_agg_tids = 0;
530 lockdep_assert_held(&mvm->mutex);
532 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
535 sta_id = mvm->queue_info[queue].ra_sta_id;
536 tid = mvm->queue_info[queue].txq_tid;
538 same_sta = sta_id == new_sta_id;
540 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
541 if (WARN_ON(!mvmsta))
544 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
545 /* Disable the queue */
546 if (disable_agg_tids)
547 iwl_mvm_invalidate_sta_queue(mvm, queue,
548 disable_agg_tids, false);
550 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
553 "Failed to free inactive queue %d (ret=%d)\n",
559 /* If TXQ is allocated to another STA, update removal in FW */
561 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
566 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
567 unsigned long tfd_queue_mask, u8 ac)
570 u8 ac_to_queue[IEEE80211_NUM_ACS];
574 * This protects us against grabbing a queue that's being reconfigured
575 * by the inactivity checker.
577 lockdep_assert_held(&mvm->mutex);
579 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
582 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
584 /* See what ACs the existing queues for this STA have */
585 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
586 /* Only DATA queues can be shared */
587 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
588 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
591 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
595 * The queue to share is chosen only from DATA queues as follows (in
596 * descending priority):
599 * 3. Highest AC queue that is lower than new AC
600 * 4. Any existing AC (there always is at least 1 DATA queue)
603 /* Priority 1: An AC_BE queue */
604 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
605 queue = ac_to_queue[IEEE80211_AC_BE];
606 /* Priority 2: Same AC queue */
607 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
608 queue = ac_to_queue[ac];
609 /* Priority 3a: If new AC is VO and VI exists - use VI */
610 else if (ac == IEEE80211_AC_VO &&
611 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
612 queue = ac_to_queue[IEEE80211_AC_VI];
613 /* Priority 3b: No BE so only AC less than the new one is BK */
614 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
615 queue = ac_to_queue[IEEE80211_AC_BK];
616 /* Priority 4a: No BE nor BK - use VI if exists */
617 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
618 queue = ac_to_queue[IEEE80211_AC_VI];
619 /* Priority 4b: No BE, BK nor VI - use VO if exists */
620 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
621 queue = ac_to_queue[IEEE80211_AC_VO];
623 /* Make sure queue found (or not) is legal */
624 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
625 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
626 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
627 IWL_ERR(mvm, "No DATA queues available to share\n");
635 * If a given queue has a higher AC than the TID stream that is being compared
636 * to, the queue needs to be redirected to the lower AC. This function does that
637 * in such a case, otherwise - if no redirection required - it does nothing,
638 * unless the %force param is true.
640 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
641 int ac, int ssn, unsigned int wdg_timeout,
642 bool force, struct iwl_mvm_txq *txq)
644 struct iwl_scd_txq_cfg_cmd cmd = {
646 .action = SCD_CFG_DISABLE_QUEUE,
651 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
655 * If the AC is lower than current one - FIFO needs to be redirected to
656 * the lowest one of the streams in the queue. Check if this is needed
658 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
659 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
660 * we need to check if the numerical value of X is LARGER than of Y.
662 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
663 IWL_DEBUG_TX_QUEUES(mvm,
664 "No redirection needed on TXQ #%d\n",
669 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
670 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
671 cmd.tid = mvm->queue_info[queue].txq_tid;
672 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
674 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
675 queue, iwl_mvm_ac_to_tx_fifo[ac]);
677 /* Stop the queue and wait for it to empty */
680 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
682 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
688 /* Before redirecting the queue we need to de-activate it */
689 iwl_trans_txq_disable(mvm->trans, queue, false);
690 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
692 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
695 /* Make sure the SCD wrptr is correctly set before reconfiguring */
696 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
698 /* Update the TID "owner" of the queue */
699 mvm->queue_info[queue].txq_tid = tid;
701 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
703 /* Redirect to lower AC */
704 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
705 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
707 /* Update AC marking of the queue */
708 mvm->queue_info[queue].mac80211_ac = ac;
711 * Mark queue as shared in transport if shared
712 * Note this has to be done after queue enablement because enablement
713 * can also set this value, and there is no indication there to shared
717 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
720 /* Continue using the queue */
721 txq->stopped = false;
726 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
731 lockdep_assert_held(&mvm->mutex);
733 /* This should not be hit with new TX path */
734 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
737 /* Start by looking for a free queue */
738 for (i = minq; i <= maxq; i++)
739 if (mvm->queue_info[i].tid_bitmap == 0 &&
740 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
746 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
747 u8 sta_id, u8 tid, unsigned int timeout)
749 int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
750 mvm->trans->cfg->min_256_ba_txq_size);
752 if (tid == IWL_MAX_TID_COUNT) {
754 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
755 mvm->trans->cfg->min_txq_size);
757 queue = iwl_trans_txq_alloc(mvm->trans,
758 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
759 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
762 IWL_DEBUG_TX_QUEUES(mvm,
763 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
768 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
771 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
776 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
777 struct ieee80211_sta *sta, u8 ac,
780 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
781 struct iwl_mvm_txq *mvmtxq =
782 iwl_mvm_txq_from_tid(sta, tid);
783 unsigned int wdg_timeout =
784 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
787 lockdep_assert_held(&mvm->mutex);
789 IWL_DEBUG_TX_QUEUES(mvm,
790 "Allocating queue for sta %d on tid %d\n",
791 mvmsta->sta_id, tid);
792 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
796 mvmtxq->txq_id = queue;
797 mvm->tvqm_info[queue].txq_tid = tid;
798 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
800 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
802 spin_lock_bh(&mvmsta->lock);
803 mvmsta->tid_data[tid].txq_id = queue;
804 spin_unlock_bh(&mvmsta->lock);
809 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
810 struct ieee80211_sta *sta,
811 int queue, u8 sta_id, u8 tid)
813 bool enable_queue = true;
815 /* Make sure this TID isn't already enabled */
816 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
817 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
822 /* Update mappings and refcounts */
823 if (mvm->queue_info[queue].tid_bitmap)
824 enable_queue = false;
826 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
827 mvm->queue_info[queue].ra_sta_id = sta_id;
830 if (tid != IWL_MAX_TID_COUNT)
831 mvm->queue_info[queue].mac80211_ac =
832 tid_to_mac80211_ac[tid];
834 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
836 mvm->queue_info[queue].txq_tid = tid;
840 struct iwl_mvm_txq *mvmtxq =
841 iwl_mvm_txq_from_tid(sta, tid);
843 mvmtxq->txq_id = queue;
846 IWL_DEBUG_TX_QUEUES(mvm,
847 "Enabling TXQ #%d tids=0x%x\n",
848 queue, mvm->queue_info[queue].tid_bitmap);
853 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
855 const struct iwl_trans_txq_scd_cfg *cfg,
856 unsigned int wdg_timeout)
858 struct iwl_scd_txq_cfg_cmd cmd = {
860 .action = SCD_CFG_ENABLE_QUEUE,
861 .window = cfg->frame_limit,
862 .sta_id = cfg->sta_id,
863 .ssn = cpu_to_le16(ssn),
864 .tx_fifo = cfg->fifo,
865 .aggregate = cfg->aggregate,
870 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
873 /* Send the enabling command if we need to */
874 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
877 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
880 le16_add_cpu(&cmd.ssn, 1);
882 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
883 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
888 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
890 struct iwl_scd_txq_cfg_cmd cmd = {
892 .action = SCD_CFG_UPDATE_QUEUE_TID,
895 unsigned long tid_bitmap;
898 lockdep_assert_held(&mvm->mutex);
900 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
903 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
905 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
908 /* Find any TID for queue */
909 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
911 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
913 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
915 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
920 mvm->queue_info[queue].txq_tid = tid;
921 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
925 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
927 struct ieee80211_sta *sta;
928 struct iwl_mvm_sta *mvmsta;
931 unsigned long tid_bitmap;
932 unsigned int wdg_timeout;
936 /* queue sharing is disabled on new TX path */
937 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
940 lockdep_assert_held(&mvm->mutex);
942 sta_id = mvm->queue_info[queue].ra_sta_id;
943 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
945 /* Find TID for queue, and make sure it is the only one on the queue */
946 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
947 if (tid_bitmap != BIT(tid)) {
948 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
953 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
956 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
957 lockdep_is_held(&mvm->mutex));
959 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
962 mvmsta = iwl_mvm_sta_from_mac80211(sta);
963 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
965 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
967 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
968 tid_to_mac80211_ac[tid], ssn,
970 iwl_mvm_txq_from_tid(sta, tid));
972 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
976 /* If aggs should be turned back on - do it */
977 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
978 struct iwl_mvm_add_sta_cmd cmd = {0};
980 mvmsta->tid_disable_agg &= ~BIT(tid);
982 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
983 cmd.sta_id = mvmsta->sta_id;
984 cmd.add_modify = STA_MODE_MODIFY;
985 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
986 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
987 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
989 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
990 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
992 IWL_DEBUG_TX_QUEUES(mvm,
993 "TXQ #%d is now aggregated again\n",
996 /* Mark queue intenally as aggregating again */
997 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1001 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1005 * Remove inactive TIDs of a given queue.
1006 * If all queue TIDs are inactive - mark the queue as inactive
1007 * If only some the queue TIDs are inactive - unmap them from the queue
1009 * Returns %true if all TIDs were removed and the queue could be reused.
1011 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1012 struct iwl_mvm_sta *mvmsta, int queue,
1013 unsigned long tid_bitmap,
1014 unsigned long *unshare_queues,
1015 unsigned long *changetid_queues)
1019 lockdep_assert_held(&mvmsta->lock);
1020 lockdep_assert_held(&mvm->mutex);
1022 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1025 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1026 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1027 /* If some TFDs are still queued - don't mark TID as inactive */
1028 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1029 tid_bitmap &= ~BIT(tid);
1031 /* Don't mark as inactive any TID that has an active BA */
1032 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1033 tid_bitmap &= ~BIT(tid);
1036 /* If all TIDs in the queue are inactive - return it can be reused */
1037 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1038 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1043 * If we are here, this is a shared queue and not all TIDs timed-out.
1044 * Remove the ones that did.
1046 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1049 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1050 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1052 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1055 * We need to take into account a situation in which a TXQ was
1056 * allocated to TID x, and then turned shared by adding TIDs y
1057 * and z. If TID x becomes inactive and is removed from the TXQ,
1058 * ownership must be given to one of the remaining TIDs.
1059 * This is mainly because if TID x continues - a new queue can't
1060 * be allocated for it as long as it is an owner of another TXQ.
1062 * Mark this queue in the right bitmap, we'll send the command
1063 * to the firmware later.
1065 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1066 set_bit(queue, changetid_queues);
1068 IWL_DEBUG_TX_QUEUES(mvm,
1069 "Removing inactive TID %d from shared Q:%d\n",
1073 IWL_DEBUG_TX_QUEUES(mvm,
1074 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1075 mvm->queue_info[queue].tid_bitmap);
1078 * There may be different TIDs with the same mac queues, so make
1079 * sure all TIDs have existing corresponding mac queues enabled
1081 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1083 /* If the queue is marked as shared - "unshare" it */
1084 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1085 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1086 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1088 set_bit(queue, unshare_queues);
1095 * Check for inactivity - this includes checking if any queue
1096 * can be unshared and finding one (and only one) that can be
1098 * This function is also invoked as a sort of clean-up task,
1099 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1101 * Returns the queue number, or -ENOSPC.
1103 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1105 unsigned long now = jiffies;
1106 unsigned long unshare_queues = 0;
1107 unsigned long changetid_queues = 0;
1108 int i, ret, free_queue = -ENOSPC;
1109 struct ieee80211_sta *queue_owner = NULL;
1111 lockdep_assert_held(&mvm->mutex);
1113 if (iwl_mvm_has_new_tx_api(mvm))
1118 /* we skip the CMD queue below by starting at 1 */
1119 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1121 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1122 struct ieee80211_sta *sta;
1123 struct iwl_mvm_sta *mvmsta;
1126 unsigned long inactive_tid_bitmap = 0;
1127 unsigned long queue_tid_bitmap;
1129 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1130 if (!queue_tid_bitmap)
1133 /* If TXQ isn't in active use anyway - nothing to do here... */
1134 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1135 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1138 /* Check to see if there are inactive TIDs on this queue */
1139 for_each_set_bit(tid, &queue_tid_bitmap,
1140 IWL_MAX_TID_COUNT + 1) {
1141 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1142 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1145 inactive_tid_bitmap |= BIT(tid);
1148 /* If all TIDs are active - finish check on this queue */
1149 if (!inactive_tid_bitmap)
1153 * If we are here - the queue hadn't been served recently and is
1157 sta_id = mvm->queue_info[i].ra_sta_id;
1158 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1161 * If the STA doesn't exist anymore, it isn't an error. It could
1162 * be that it was removed since getting the queues, and in this
1163 * case it should've inactivated its queues anyway.
1165 if (IS_ERR_OR_NULL(sta))
1168 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1170 spin_lock_bh(&mvmsta->lock);
1171 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1172 inactive_tid_bitmap,
1175 if (ret >= 0 && free_queue < 0) {
1179 /* only unlock sta lock - we still need the queue info lock */
1180 spin_unlock_bh(&mvmsta->lock);
1184 /* Reconfigure queues requiring reconfiguation */
1185 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1186 iwl_mvm_unshare_queue(mvm, i);
1187 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1188 iwl_mvm_change_queue_tid(mvm, i);
1190 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1191 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1204 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1205 struct ieee80211_sta *sta, u8 ac, int tid)
1207 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1208 struct iwl_trans_txq_scd_cfg cfg = {
1209 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1210 .sta_id = mvmsta->sta_id,
1212 .frame_limit = IWL_FRAME_LIMIT,
1214 unsigned int wdg_timeout =
1215 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1217 unsigned long disable_agg_tids = 0;
1218 enum iwl_mvm_agg_state queue_state;
1219 bool shared_queue = false, inc_ssn;
1221 unsigned long tfd_queue_mask;
1224 lockdep_assert_held(&mvm->mutex);
1226 if (iwl_mvm_has_new_tx_api(mvm))
1227 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1229 spin_lock_bh(&mvmsta->lock);
1230 tfd_queue_mask = mvmsta->tfd_queue_msk;
1231 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1232 spin_unlock_bh(&mvmsta->lock);
1234 if (tid == IWL_MAX_TID_COUNT) {
1235 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1236 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1237 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1238 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1239 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1242 /* If no such queue is found, we'll use a DATA queue instead */
1245 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1246 (mvm->queue_info[mvmsta->reserved_queue].status ==
1247 IWL_MVM_QUEUE_RESERVED)) {
1248 queue = mvmsta->reserved_queue;
1249 mvm->queue_info[queue].reserved = true;
1250 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1254 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1255 IWL_MVM_DQA_MIN_DATA_QUEUE,
1256 IWL_MVM_DQA_MAX_DATA_QUEUE);
1258 /* try harder - perhaps kill an inactive queue */
1259 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1262 /* No free queue - we'll have to share */
1264 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1266 shared_queue = true;
1267 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1272 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1273 * to make sure no one else takes it.
1274 * This will allow avoiding re-acquiring the lock at the end of the
1275 * configuration. On error we'll mark it back as free.
1277 if (queue > 0 && !shared_queue)
1278 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1280 /* This shouldn't happen - out of queues */
1281 if (WARN_ON(queue <= 0)) {
1282 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1288 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1289 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1291 * Mark all DATA queues as allowing to be aggregated at some point
1293 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1294 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1296 IWL_DEBUG_TX_QUEUES(mvm,
1297 "Allocating %squeue #%d to sta %d on tid %d\n",
1298 shared_queue ? "shared " : "", queue,
1299 mvmsta->sta_id, tid);
1302 /* Disable any open aggs on this queue */
1303 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1305 if (disable_agg_tids) {
1306 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1308 iwl_mvm_invalidate_sta_queue(mvm, queue,
1309 disable_agg_tids, false);
1313 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1316 * Mark queue as shared in transport if shared
1317 * Note this has to be done after queue enablement because enablement
1318 * can also set this value, and there is no indication there to shared
1322 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1324 spin_lock_bh(&mvmsta->lock);
1326 * This looks racy, but it is not. We have only one packet for
1327 * this ra/tid in our Tx path since we stop the Qdisc when we
1328 * need to allocate a new TFD queue.
1331 mvmsta->tid_data[tid].seq_number += 0x10;
1332 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1334 mvmsta->tid_data[tid].txq_id = queue;
1335 mvmsta->tfd_queue_msk |= BIT(queue);
1336 queue_state = mvmsta->tid_data[tid].state;
1338 if (mvmsta->reserved_queue == queue)
1339 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1340 spin_unlock_bh(&mvmsta->lock);
1342 if (!shared_queue) {
1343 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1347 /* If we need to re-enable aggregations... */
1348 if (queue_state == IWL_AGG_ON) {
1349 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1354 /* Redirect queue, if needed */
1355 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1357 iwl_mvm_txq_from_tid(sta, tid));
1365 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
1370 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1372 if (tid == IWL_MAX_TID_COUNT)
1373 return IEEE80211_AC_VO; /* MGMT */
1375 return tid_to_mac80211_ac[tid];
1378 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1380 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1383 mutex_lock(&mvm->mutex);
1385 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1387 while (!list_empty(&mvm->add_stream_txqs)) {
1388 struct iwl_mvm_txq *mvmtxq;
1389 struct ieee80211_txq *txq;
1392 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1393 struct iwl_mvm_txq, list);
1395 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1398 if (tid == IEEE80211_NUM_TIDS)
1399 tid = IWL_MAX_TID_COUNT;
1401 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
1402 list_del_init(&mvmtxq->list);
1404 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1408 mutex_unlock(&mvm->mutex);
1411 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1412 struct ieee80211_sta *sta,
1413 enum nl80211_iftype vif_type)
1415 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1418 /* queue reserving is disabled on new TX path */
1419 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1422 /* run the general cleanup/unsharing of queues */
1423 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1425 /* Make sure we have free resources for this STA */
1426 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1427 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1428 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1429 IWL_MVM_QUEUE_FREE))
1430 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1432 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1433 IWL_MVM_DQA_MIN_DATA_QUEUE,
1434 IWL_MVM_DQA_MAX_DATA_QUEUE);
1436 /* try again - this time kick out a queue if needed */
1437 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1439 IWL_ERR(mvm, "No available queues for new station\n");
1443 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1445 mvmsta->reserved_queue = queue;
1447 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1448 queue, mvmsta->sta_id);
1454 * In DQA mode, after a HW restart the queues should be allocated as before, in
1455 * order to avoid race conditions when there are shared queues. This function
1456 * does the re-mapping and queue allocation.
1458 * Note that re-enabling aggregations isn't done in this function.
1460 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1461 struct ieee80211_sta *sta)
1463 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1465 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1467 struct iwl_trans_txq_scd_cfg cfg = {
1468 .sta_id = mvm_sta->sta_id,
1469 .frame_limit = IWL_FRAME_LIMIT,
1472 /* Make sure reserved queue is still marked as such (if allocated) */
1473 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1474 mvm->queue_info[mvm_sta->reserved_queue].status =
1475 IWL_MVM_QUEUE_RESERVED;
1477 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1478 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1479 int txq_id = tid_data->txq_id;
1482 if (txq_id == IWL_MVM_INVALID_QUEUE)
1485 ac = tid_to_mac80211_ac[i];
1487 if (iwl_mvm_has_new_tx_api(mvm)) {
1488 IWL_DEBUG_TX_QUEUES(mvm,
1489 "Re-mapping sta %d tid %d\n",
1490 mvm_sta->sta_id, i);
1491 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1493 tid_data->txq_id = txq_id;
1496 * Since we don't set the seq number after reset, and HW
1497 * sets it now, FW reset will cause the seq num to start
1498 * at 0 again, so driver will need to update it
1499 * internally as well, so it keeps in sync with real val
1501 tid_data->seq_number = 0;
1503 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1506 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1507 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1509 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1511 IWL_DEBUG_TX_QUEUES(mvm,
1512 "Re-mapping sta %d tid %d to queue %d\n",
1513 mvm_sta->sta_id, i, txq_id);
1515 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1516 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1521 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1522 struct iwl_mvm_int_sta *sta,
1524 u16 mac_id, u16 color)
1526 struct iwl_mvm_add_sta_cmd cmd;
1528 u32 status = ADD_STA_SUCCESS;
1530 lockdep_assert_held(&mvm->mutex);
1532 memset(&cmd, 0, sizeof(cmd));
1533 cmd.sta_id = sta->sta_id;
1534 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1536 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1537 cmd.station_type = sta->type;
1539 if (!iwl_mvm_has_new_tx_api(mvm))
1540 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1541 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1544 memcpy(cmd.addr, addr, ETH_ALEN);
1546 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1547 iwl_mvm_add_sta_cmd_size(mvm),
1552 switch (status & IWL_ADD_STA_STATUS_MASK) {
1553 case ADD_STA_SUCCESS:
1554 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1558 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1565 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1566 struct ieee80211_vif *vif,
1567 struct ieee80211_sta *sta)
1569 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1570 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1571 struct iwl_mvm_rxq_dup_data *dup_data;
1573 bool sta_update = false;
1574 unsigned int sta_flags = 0;
1576 lockdep_assert_held(&mvm->mutex);
1578 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1579 sta_id = iwl_mvm_find_free_sta_id(mvm,
1580 ieee80211_vif_type_p2p(vif));
1582 sta_id = mvm_sta->sta_id;
1584 if (sta_id == IWL_MVM_INVALID_STA)
1587 spin_lock_init(&mvm_sta->lock);
1589 /* if this is a HW restart re-alloc existing queues */
1590 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1591 struct iwl_mvm_int_sta tmp_sta = {
1593 .type = mvm_sta->sta_type,
1597 * First add an empty station since allocating
1598 * a queue requires a valid station
1600 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1601 mvmvif->id, mvmvif->color);
1605 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1607 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1611 mvm_sta->sta_id = sta_id;
1612 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1615 if (!mvm->trans->cfg->gen2)
1616 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1618 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1619 mvm_sta->tx_protection = 0;
1620 mvm_sta->tt_tx_protection = false;
1621 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1623 /* HW restart, don't assume the memory has been zeroed */
1624 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1625 mvm_sta->tfd_queue_msk = 0;
1627 /* for HW restart - reset everything but the sequence number */
1628 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1629 u16 seq = mvm_sta->tid_data[i].seq_number;
1630 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1631 mvm_sta->tid_data[i].seq_number = seq;
1634 * Mark all queues for this STA as unallocated and defer TX
1635 * frames until the queue is allocated
1637 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1640 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1641 struct iwl_mvm_txq *mvmtxq =
1642 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1644 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1645 INIT_LIST_HEAD(&mvmtxq->list);
1646 atomic_set(&mvmtxq->tx_request, 0);
1649 mvm_sta->agg_tids = 0;
1651 if (iwl_mvm_has_new_rx_api(mvm) &&
1652 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1655 dup_data = kcalloc(mvm->trans->num_rx_queues,
1656 sizeof(*dup_data), GFP_KERNEL);
1660 * Initialize all the last_seq values to 0xffff which can never
1661 * compare equal to the frame's seq_ctrl in the check in
1662 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1663 * number and fragmented packets don't reach that function.
1665 * This thus allows receiving a packet with seqno 0 and the
1666 * retry bit set as the very first packet on a new TID.
1668 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1669 memset(dup_data[q].last_seq, 0xff,
1670 sizeof(dup_data[q].last_seq));
1671 mvm_sta->dup_data = dup_data;
1674 if (!iwl_mvm_has_new_tx_api(mvm)) {
1675 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1676 ieee80211_vif_type_p2p(vif));
1682 * if rs is registered with mac80211, then "add station" will be handled
1683 * via the corresponding ops, otherwise need to notify rate scaling here
1685 if (iwl_mvm_has_tlc_offload(mvm))
1686 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1688 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1690 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1693 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1697 if (vif->type == NL80211_IFTYPE_STATION) {
1699 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1700 mvmvif->ap_sta_id = sta_id;
1702 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1706 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1714 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1717 struct iwl_mvm_add_sta_cmd cmd = {};
1721 lockdep_assert_held(&mvm->mutex);
1723 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1724 cmd.sta_id = mvmsta->sta_id;
1725 cmd.add_modify = STA_MODE_MODIFY;
1726 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1727 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1729 status = ADD_STA_SUCCESS;
1730 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1731 iwl_mvm_add_sta_cmd_size(mvm),
1736 switch (status & IWL_ADD_STA_STATUS_MASK) {
1737 case ADD_STA_SUCCESS:
1738 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1743 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1752 * Remove a station from the FW table. Before sending the command to remove
1753 * the station validate that the station is indeed known to the driver (sanity
1756 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1758 struct ieee80211_sta *sta;
1759 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1764 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1765 lockdep_is_held(&mvm->mutex));
1767 /* Note: internal stations are marked as error values */
1769 IWL_ERR(mvm, "Invalid station id\n");
1773 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1774 sizeof(rm_sta_cmd), &rm_sta_cmd);
1776 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1783 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1784 struct ieee80211_vif *vif,
1785 struct ieee80211_sta *sta)
1787 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1790 lockdep_assert_held(&mvm->mutex);
1792 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1793 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1796 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1798 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1801 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1802 struct iwl_mvm_txq *mvmtxq =
1803 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1805 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1809 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1810 struct iwl_mvm_sta *mvm_sta)
1814 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1818 spin_lock_bh(&mvm_sta->lock);
1819 txq_id = mvm_sta->tid_data[i].txq_id;
1820 spin_unlock_bh(&mvm_sta->lock);
1822 if (txq_id == IWL_MVM_INVALID_QUEUE)
1825 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1833 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1834 struct ieee80211_vif *vif,
1835 struct ieee80211_sta *sta)
1837 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1838 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1839 u8 sta_id = mvm_sta->sta_id;
1842 lockdep_assert_held(&mvm->mutex);
1844 if (iwl_mvm_has_new_rx_api(mvm))
1845 kfree(mvm_sta->dup_data);
1847 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1851 /* flush its queues here since we are freeing mvm_sta */
1852 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1855 if (iwl_mvm_has_new_tx_api(mvm)) {
1856 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1858 u32 q_mask = mvm_sta->tfd_queue_msk;
1860 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1866 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1868 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1870 /* If there is a TXQ still marked as reserved - free it */
1871 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1872 u8 reserved_txq = mvm_sta->reserved_queue;
1873 enum iwl_mvm_queue_status *status;
1876 * If no traffic has gone through the reserved TXQ - it
1877 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1878 * should be manually marked as free again
1880 status = &mvm->queue_info[reserved_txq].status;
1881 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1882 (*status != IWL_MVM_QUEUE_FREE),
1883 "sta_id %d reserved txq %d status %d",
1884 sta_id, reserved_txq, *status))
1887 *status = IWL_MVM_QUEUE_FREE;
1890 if (vif->type == NL80211_IFTYPE_STATION &&
1891 mvmvif->ap_sta_id == sta_id) {
1892 /* if associated - we can't remove the AP STA now */
1893 if (vif->bss_conf.assoc)
1896 /* unassoc - go ahead - remove the AP STA now */
1897 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1899 /* clear d0i3_ap_sta_id if no longer relevant */
1900 if (mvm->d0i3_ap_sta_id == sta_id)
1901 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1905 * This shouldn't happen - the TDLS channel switch should be canceled
1906 * before the STA is removed.
1908 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1909 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1910 cancel_delayed_work(&mvm->tdls_cs.dwork);
1914 * Make sure that the tx response code sees the station as -EBUSY and
1915 * calls the drain worker.
1917 spin_lock_bh(&mvm_sta->lock);
1918 spin_unlock_bh(&mvm_sta->lock);
1920 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1921 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1926 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1927 struct ieee80211_vif *vif,
1930 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1932 lockdep_assert_held(&mvm->mutex);
1934 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1938 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1939 struct iwl_mvm_int_sta *sta,
1940 u32 qmask, enum nl80211_iftype iftype,
1941 enum iwl_sta_type type)
1943 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1944 sta->sta_id == IWL_MVM_INVALID_STA) {
1945 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1946 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1950 sta->tfd_queue_msk = qmask;
1953 /* put a non-NULL value so iterating over the stations won't stop */
1954 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1958 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1960 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1961 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1962 sta->sta_id = IWL_MVM_INVALID_STA;
1965 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1968 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1969 mvm->cfg->base_params->wd_timeout :
1970 IWL_WATCHDOG_DISABLED;
1972 if (iwl_mvm_has_new_tx_api(mvm)) {
1974 iwl_mvm_tvqm_enable_txq(mvm, sta_id,
1977 *queue = tvqm_queue;
1979 struct iwl_trans_txq_scd_cfg cfg = {
1982 .tid = IWL_MAX_TID_COUNT,
1984 .frame_limit = IWL_FRAME_LIMIT,
1987 iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
1991 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1995 lockdep_assert_held(&mvm->mutex);
1997 /* Allocate aux station and assign to it the aux queue */
1998 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1999 NL80211_IFTYPE_UNSPECIFIED,
2000 IWL_STA_AUX_ACTIVITY);
2004 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2005 if (!iwl_mvm_has_new_tx_api(mvm))
2006 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2007 mvm->aux_sta.sta_id,
2008 IWL_MVM_TX_FIFO_MCAST);
2010 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2013 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2018 * For 22000 firmware and on we cannot add queue to a station unknown
2019 * to firmware so enable queue here - after the station was added
2021 if (iwl_mvm_has_new_tx_api(mvm))
2022 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2023 mvm->aux_sta.sta_id,
2024 IWL_MVM_TX_FIFO_MCAST);
2029 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2031 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2034 lockdep_assert_held(&mvm->mutex);
2036 /* Map snif queue to fifo - must happen before adding snif station */
2037 if (!iwl_mvm_has_new_tx_api(mvm))
2038 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2039 mvm->snif_sta.sta_id,
2040 IWL_MVM_TX_FIFO_BE);
2042 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2048 * For 22000 firmware and on we cannot add queue to a station unknown
2049 * to firmware so enable queue here - after the station was added
2051 if (iwl_mvm_has_new_tx_api(mvm))
2052 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2053 mvm->snif_sta.sta_id,
2054 IWL_MVM_TX_FIFO_BE);
2059 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2063 lockdep_assert_held(&mvm->mutex);
2065 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2066 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2068 IWL_WARN(mvm, "Failed sending remove station\n");
2073 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2075 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2078 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2080 lockdep_assert_held(&mvm->mutex);
2082 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2086 * Send the add station command for the vif's broadcast station.
2087 * Assumes that the station was already allocated.
2089 * @mvm: the mvm component
2090 * @vif: the interface to which the broadcast station is added
2091 * @bsta: the broadcast station to add.
2093 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2095 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2096 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2097 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2098 const u8 *baddr = _baddr;
2101 unsigned int wdg_timeout =
2102 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2103 struct iwl_trans_txq_scd_cfg cfg = {
2104 .fifo = IWL_MVM_TX_FIFO_VO,
2105 .sta_id = mvmvif->bcast_sta.sta_id,
2106 .tid = IWL_MAX_TID_COUNT,
2108 .frame_limit = IWL_FRAME_LIMIT,
2111 lockdep_assert_held(&mvm->mutex);
2113 if (!iwl_mvm_has_new_tx_api(mvm)) {
2114 if (vif->type == NL80211_IFTYPE_AP ||
2115 vif->type == NL80211_IFTYPE_ADHOC) {
2116 queue = mvm->probe_queue;
2117 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2118 queue = mvm->p2p_dev_queue;
2120 WARN(1, "Missing required TXQ for adding bcast STA\n");
2124 bsta->tfd_queue_msk |= BIT(queue);
2126 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2129 if (vif->type == NL80211_IFTYPE_ADHOC)
2130 baddr = vif->bss_conf.bssid;
2132 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2135 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2136 mvmvif->id, mvmvif->color);
2141 * For 22000 firmware and on we cannot add queue to a station unknown
2142 * to firmware so enable queue here - after the station was added
2144 if (iwl_mvm_has_new_tx_api(mvm)) {
2145 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2149 if (vif->type == NL80211_IFTYPE_AP ||
2150 vif->type == NL80211_IFTYPE_ADHOC)
2151 mvm->probe_queue = queue;
2152 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2153 mvm->p2p_dev_queue = queue;
2159 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2160 struct ieee80211_vif *vif)
2162 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2165 lockdep_assert_held(&mvm->mutex);
2167 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2169 switch (vif->type) {
2170 case NL80211_IFTYPE_AP:
2171 case NL80211_IFTYPE_ADHOC:
2172 queue = mvm->probe_queue;
2174 case NL80211_IFTYPE_P2P_DEVICE:
2175 queue = mvm->p2p_dev_queue;
2178 WARN(1, "Can't free bcast queue on vif type %d\n",
2183 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
2184 if (iwl_mvm_has_new_tx_api(mvm))
2187 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2188 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2191 /* Send the FW a request to remove the station from it's internal data
2192 * structures, but DO NOT remove the entry from the local data structures. */
2193 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2195 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2198 lockdep_assert_held(&mvm->mutex);
2200 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2202 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2204 IWL_WARN(mvm, "Failed sending remove station\n");
2208 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2210 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2212 lockdep_assert_held(&mvm->mutex);
2214 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2215 ieee80211_vif_type_p2p(vif),
2216 IWL_STA_GENERAL_PURPOSE);
2219 /* Allocate a new station entry for the broadcast station to the given vif,
2220 * and send it to the FW.
2221 * Note that each P2P mac should have its own broadcast station.
2223 * @mvm: the mvm component
2224 * @vif: the interface to which the broadcast station is added
2225 * @bsta: the broadcast station to add. */
2226 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2228 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2229 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2232 lockdep_assert_held(&mvm->mutex);
2234 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2238 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2241 iwl_mvm_dealloc_int_sta(mvm, bsta);
2246 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2248 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2250 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2254 * Send the FW a request to remove the station from it's internal data
2255 * structures, and in addition remove it from the local data structure.
2257 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2261 lockdep_assert_held(&mvm->mutex);
2263 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2265 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2271 * Allocate a new station entry for the multicast station to the given vif,
2272 * and send it to the FW.
2273 * Note that each AP/GO mac should have its own multicast station.
2275 * @mvm: the mvm component
2276 * @vif: the interface to which the multicast station is added
2278 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2280 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2281 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2282 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2283 const u8 *maddr = _maddr;
2284 struct iwl_trans_txq_scd_cfg cfg = {
2285 .fifo = vif->type == NL80211_IFTYPE_AP ?
2286 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2287 .sta_id = msta->sta_id,
2290 .frame_limit = IWL_FRAME_LIMIT,
2292 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2295 lockdep_assert_held(&mvm->mutex);
2297 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2298 vif->type != NL80211_IFTYPE_ADHOC))
2302 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2303 * invalid, so make sure we use the queue we want.
2304 * Note that this is done here as we want to avoid making DQA
2305 * changes in mac80211 layer.
2307 if (vif->type == NL80211_IFTYPE_ADHOC)
2308 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2311 * While in previous FWs we had to exclude cab queue from TFD queue
2312 * mask, now it is needed as any other queue.
2314 if (!iwl_mvm_has_new_tx_api(mvm) &&
2315 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2316 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2318 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2320 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2321 mvmvif->id, mvmvif->color);
2323 iwl_mvm_dealloc_int_sta(mvm, msta);
2328 * Enable cab queue after the ADD_STA command is sent.
2329 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2330 * command with unknown station id, and for FW that doesn't support
2331 * station API since the cab queue is not included in the
2334 if (iwl_mvm_has_new_tx_api(mvm)) {
2335 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2338 mvmvif->cab_queue = queue;
2339 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2340 IWL_UCODE_TLV_API_STA_TYPE))
2341 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2347 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2348 struct ieee80211_key_conf *keyconf,
2352 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2353 struct iwl_mvm_add_sta_key_cmd cmd;
2355 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2356 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2361 /* This is a valid situation for GTK removal */
2362 if (sta_id == IWL_MVM_INVALID_STA)
2365 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2366 STA_KEY_FLG_KEYID_MSK);
2367 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2368 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2371 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2374 * The fields assigned here are in the same location at the start
2375 * of the command, so we can do this union trick.
2377 u.cmd.common.key_flags = key_flags;
2378 u.cmd.common.key_offset = keyconf->hw_key_idx;
2379 u.cmd.common.sta_id = sta_id;
2381 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2383 status = ADD_STA_SUCCESS;
2384 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2388 case ADD_STA_SUCCESS:
2389 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2393 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2401 * Send the FW a request to remove the station from it's internal data
2402 * structures, and in addition remove it from the local data structure.
2404 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2406 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2409 lockdep_assert_held(&mvm->mutex);
2411 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2413 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2415 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2417 IWL_WARN(mvm, "Failed sending remove station\n");
2422 #define IWL_MAX_RX_BA_SESSIONS 16
2424 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2426 struct iwl_mvm_rss_sync_notif notif = {
2427 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2431 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2434 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2435 struct iwl_mvm_baid_data *data)
2439 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2441 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2443 struct iwl_mvm_reorder_buffer *reorder_buf =
2444 &data->reorder_buf[i];
2445 struct iwl_mvm_reorder_buf_entry *entries =
2446 &data->entries[i * data->entries_per_queue];
2448 spin_lock_bh(&reorder_buf->lock);
2449 if (likely(!reorder_buf->num_stored)) {
2450 spin_unlock_bh(&reorder_buf->lock);
2455 * This shouldn't happen in regular DELBA since the internal
2456 * delBA notification should trigger a release of all frames in
2457 * the reorder buffer.
2461 for (j = 0; j < reorder_buf->buf_size; j++)
2462 __skb_queue_purge(&entries[j].e.frames);
2464 * Prevent timer re-arm. This prevents a very far fetched case
2465 * where we timed out on the notification. There may be prior
2466 * RX frames pending in the RX queue before the notification
2467 * that might get processed between now and the actual deletion
2468 * and we would re-arm the timer although we are deleting the
2471 reorder_buf->removed = true;
2472 spin_unlock_bh(&reorder_buf->lock);
2473 del_timer_sync(&reorder_buf->reorder_timer);
2477 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2478 struct iwl_mvm_baid_data *data,
2479 u16 ssn, u16 buf_size)
2483 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2484 struct iwl_mvm_reorder_buffer *reorder_buf =
2485 &data->reorder_buf[i];
2486 struct iwl_mvm_reorder_buf_entry *entries =
2487 &data->entries[i * data->entries_per_queue];
2490 reorder_buf->num_stored = 0;
2491 reorder_buf->head_sn = ssn;
2492 reorder_buf->buf_size = buf_size;
2493 /* rx reorder timer */
2494 timer_setup(&reorder_buf->reorder_timer,
2495 iwl_mvm_reorder_timer_expired, 0);
2496 spin_lock_init(&reorder_buf->lock);
2497 reorder_buf->mvm = mvm;
2498 reorder_buf->queue = i;
2499 reorder_buf->valid = false;
2500 for (j = 0; j < reorder_buf->buf_size; j++)
2501 __skb_queue_head_init(&entries[j].e.frames);
2505 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2506 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2508 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2509 struct iwl_mvm_add_sta_cmd cmd = {};
2510 struct iwl_mvm_baid_data *baid_data = NULL;
2514 lockdep_assert_held(&mvm->mutex);
2516 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2517 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2521 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2522 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2524 /* sparse doesn't like the __align() so don't check */
2527 * The division below will be OK if either the cache line size
2528 * can be divided by the entry size (ALIGN will round up) or if
2529 * if the entry size can be divided by the cache line size, in
2530 * which case the ALIGN() will do nothing.
2532 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2533 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2537 * Upward align the reorder buffer size to fill an entire cache
2538 * line for each queue, to avoid sharing cache lines between
2541 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2544 * Allocate here so if allocation fails we can bail out early
2545 * before starting the BA session in the firmware
2547 baid_data = kzalloc(sizeof(*baid_data) +
2548 mvm->trans->num_rx_queues *
2555 * This division is why we need the above BUILD_BUG_ON(),
2556 * if that doesn't hold then this will not be right.
2558 baid_data->entries_per_queue =
2559 reorder_buf_size / sizeof(baid_data->entries[0]);
2562 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2563 cmd.sta_id = mvm_sta->sta_id;
2564 cmd.add_modify = STA_MODE_MODIFY;
2566 cmd.add_immediate_ba_tid = (u8) tid;
2567 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2568 cmd.rx_ba_window = cpu_to_le16(buf_size);
2570 cmd.remove_immediate_ba_tid = (u8) tid;
2572 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2573 STA_MODIFY_REMOVE_BA_TID;
2575 status = ADD_STA_SUCCESS;
2576 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2577 iwl_mvm_add_sta_cmd_size(mvm),
2582 switch (status & IWL_ADD_STA_STATUS_MASK) {
2583 case ADD_STA_SUCCESS:
2584 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2585 start ? "start" : "stopp");
2587 case ADD_STA_IMMEDIATE_BA_FAILURE:
2588 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2593 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2594 start ? "start" : "stopp", status);
2604 mvm->rx_ba_sessions++;
2606 if (!iwl_mvm_has_new_rx_api(mvm))
2609 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2613 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2614 IWL_ADD_STA_BAID_SHIFT);
2615 baid_data->baid = baid;
2616 baid_data->timeout = timeout;
2617 baid_data->last_rx = jiffies;
2618 baid_data->rcu_ptr = &mvm->baid_map[baid];
2619 timer_setup(&baid_data->session_timer,
2620 iwl_mvm_rx_agg_session_expired, 0);
2621 baid_data->mvm = mvm;
2622 baid_data->tid = tid;
2623 baid_data->sta_id = mvm_sta->sta_id;
2625 mvm_sta->tid_to_baid[tid] = baid;
2627 mod_timer(&baid_data->session_timer,
2628 TU_TO_EXP_TIME(timeout * 2));
2630 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2632 * protect the BA data with RCU to cover a case where our
2633 * internal RX sync mechanism will timeout (not that it's
2634 * supposed to happen) and we will free the session data while
2635 * RX is being processed in parallel
2637 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2638 mvm_sta->sta_id, tid, baid);
2639 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2640 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2642 u8 baid = mvm_sta->tid_to_baid[tid];
2644 if (mvm->rx_ba_sessions > 0)
2645 /* check that restart flow didn't zero the counter */
2646 mvm->rx_ba_sessions--;
2647 if (!iwl_mvm_has_new_rx_api(mvm))
2650 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2653 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2654 if (WARN_ON(!baid_data))
2657 /* synchronize all rx queues so we can safely delete */
2658 iwl_mvm_free_reorder(mvm, baid_data);
2659 del_timer_sync(&baid_data->session_timer);
2660 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2661 kfree_rcu(baid_data, rcu_head);
2662 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2671 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2672 int tid, u8 queue, bool start)
2674 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2675 struct iwl_mvm_add_sta_cmd cmd = {};
2679 lockdep_assert_held(&mvm->mutex);
2682 mvm_sta->tfd_queue_msk |= BIT(queue);
2683 mvm_sta->tid_disable_agg &= ~BIT(tid);
2685 /* In DQA-mode the queue isn't removed on agg termination */
2686 mvm_sta->tid_disable_agg |= BIT(tid);
2689 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2690 cmd.sta_id = mvm_sta->sta_id;
2691 cmd.add_modify = STA_MODE_MODIFY;
2692 if (!iwl_mvm_has_new_tx_api(mvm))
2693 cmd.modify_mask = STA_MODIFY_QUEUES;
2694 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2695 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2696 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2698 status = ADD_STA_SUCCESS;
2699 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2700 iwl_mvm_add_sta_cmd_size(mvm),
2705 switch (status & IWL_ADD_STA_STATUS_MASK) {
2706 case ADD_STA_SUCCESS:
2710 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2711 start ? "start" : "stopp", status);
2718 const u8 tid_to_mac80211_ac[] = {
2727 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2730 static const u8 tid_to_ucode_ac[] = {
2741 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2742 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2744 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2745 struct iwl_mvm_tid_data *tid_data;
2750 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2753 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2754 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2756 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2757 mvmsta->tid_data[tid].state);
2761 lockdep_assert_held(&mvm->mutex);
2763 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2764 iwl_mvm_has_new_tx_api(mvm)) {
2765 u8 ac = tid_to_mac80211_ac[tid];
2767 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2772 spin_lock_bh(&mvmsta->lock);
2774 /* possible race condition - we entered D0i3 while starting agg */
2775 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2776 spin_unlock_bh(&mvmsta->lock);
2777 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2782 * Note the possible cases:
2783 * 1. An enabled TXQ - TXQ needs to become agg'ed
2784 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2787 txq_id = mvmsta->tid_data[tid].txq_id;
2788 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2789 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2790 IWL_MVM_DQA_MIN_DATA_QUEUE,
2791 IWL_MVM_DQA_MAX_DATA_QUEUE);
2793 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2799 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2800 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2801 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2803 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2804 tid, IWL_MAX_HW_QUEUES - 1);
2807 } else if (unlikely(mvm->queue_info[txq_id].status ==
2808 IWL_MVM_QUEUE_SHARED)) {
2810 IWL_DEBUG_TX_QUEUES(mvm,
2811 "Can't start tid %d agg on shared queue!\n",
2816 IWL_DEBUG_TX_QUEUES(mvm,
2817 "AGG for tid %d will be on queue #%d\n",
2820 tid_data = &mvmsta->tid_data[tid];
2821 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2822 tid_data->txq_id = txq_id;
2823 *ssn = tid_data->ssn;
2825 IWL_DEBUG_TX_QUEUES(mvm,
2826 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2827 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2828 tid_data->next_reclaimed);
2831 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2832 * to align the wrap around of ssn so we compare relevant values.
2834 normalized_ssn = tid_data->ssn;
2835 if (mvm->trans->cfg->gen2)
2836 normalized_ssn &= 0xff;
2838 if (normalized_ssn == tid_data->next_reclaimed) {
2839 tid_data->state = IWL_AGG_STARTING;
2840 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2842 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2848 spin_unlock_bh(&mvmsta->lock);
2853 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2854 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2857 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2858 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2859 unsigned int wdg_timeout =
2860 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2862 bool alloc_queue = true;
2863 enum iwl_mvm_queue_status queue_status;
2866 struct iwl_trans_txq_scd_cfg cfg = {
2867 .sta_id = mvmsta->sta_id,
2869 .frame_limit = buf_size,
2874 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2875 * manager, so this function should never be called in this case.
2877 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2880 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2881 != IWL_MAX_TID_COUNT);
2883 spin_lock_bh(&mvmsta->lock);
2884 ssn = tid_data->ssn;
2885 queue = tid_data->txq_id;
2886 tid_data->state = IWL_AGG_ON;
2887 mvmsta->agg_tids |= BIT(tid);
2888 tid_data->ssn = 0xffff;
2889 tid_data->amsdu_in_ampdu_allowed = amsdu;
2890 spin_unlock_bh(&mvmsta->lock);
2892 if (iwl_mvm_has_new_tx_api(mvm)) {
2894 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2895 * would have failed, so if we are here there is no need to
2897 * However, if aggregation size is different than the default
2898 * size, the scheduler should be reconfigured.
2899 * We cannot do this with the new TX API, so return unsupported
2900 * for now, until it will be offloaded to firmware..
2901 * Note that if SCD default value changes - this condition
2902 * should be updated as well.
2904 if (buf_size < IWL_FRAME_LIMIT)
2907 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2913 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2915 queue_status = mvm->queue_info[queue].status;
2917 /* Maybe there is no need to even alloc a queue... */
2918 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2919 alloc_queue = false;
2922 * Only reconfig the SCD for the queue if the window size has
2923 * changed from current (become smaller)
2925 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2927 * If reconfiguring an existing queue, it first must be
2930 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2934 "Error draining queue before reconfig\n");
2938 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2939 mvmsta->sta_id, tid,
2943 "Error reconfiguring TXQ #%d\n", queue);
2949 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2952 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2953 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2954 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2959 /* No need to mark as reserved */
2960 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2964 * Even though in theory the peer could have different
2965 * aggregation reorder buffer sizes for different sessions,
2966 * our ucode doesn't allow for that and has a global limit
2967 * for each station. Therefore, use the minimum of all the
2968 * aggregation sessions and our default value.
2970 mvmsta->max_agg_bufsize =
2971 min(mvmsta->max_agg_bufsize, buf_size);
2972 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2974 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2977 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
2980 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2981 struct iwl_mvm_sta *mvmsta,
2982 struct iwl_mvm_tid_data *tid_data)
2984 u16 txq_id = tid_data->txq_id;
2986 lockdep_assert_held(&mvm->mutex);
2988 if (iwl_mvm_has_new_tx_api(mvm))
2992 * The TXQ is marked as reserved only if no traffic came through yet
2993 * This means no traffic has been sent on this TID (agg'd or not), so
2994 * we no longer have use for the queue. Since it hasn't even been
2995 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2998 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
2999 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3000 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3004 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3005 struct ieee80211_sta *sta, u16 tid)
3007 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3008 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3013 * If mac80211 is cleaning its state, then say that we finished since
3014 * our state has been cleared anyway.
3016 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3017 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3021 spin_lock_bh(&mvmsta->lock);
3023 txq_id = tid_data->txq_id;
3025 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3026 mvmsta->sta_id, tid, txq_id, tid_data->state);
3028 mvmsta->agg_tids &= ~BIT(tid);
3030 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3032 switch (tid_data->state) {
3034 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3036 IWL_DEBUG_TX_QUEUES(mvm,
3037 "ssn = %d, next_recl = %d\n",
3038 tid_data->ssn, tid_data->next_reclaimed);
3040 tid_data->ssn = 0xffff;
3041 tid_data->state = IWL_AGG_OFF;
3042 spin_unlock_bh(&mvmsta->lock);
3044 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3046 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3048 case IWL_AGG_STARTING:
3049 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3051 * The agg session has been stopped before it was set up. This
3052 * can happen when the AddBA timer times out for example.
3055 /* No barriers since we are under mutex */
3056 lockdep_assert_held(&mvm->mutex);
3058 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3059 tid_data->state = IWL_AGG_OFF;
3064 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3065 mvmsta->sta_id, tid, tid_data->state);
3067 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3071 spin_unlock_bh(&mvmsta->lock);
3076 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3077 struct ieee80211_sta *sta, u16 tid)
3079 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3080 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3082 enum iwl_mvm_agg_state old_state;
3085 * First set the agg state to OFF to avoid calling
3086 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3088 spin_lock_bh(&mvmsta->lock);
3089 txq_id = tid_data->txq_id;
3090 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3091 mvmsta->sta_id, tid, txq_id, tid_data->state);
3092 old_state = tid_data->state;
3093 tid_data->state = IWL_AGG_OFF;
3094 mvmsta->agg_tids &= ~BIT(tid);
3095 spin_unlock_bh(&mvmsta->lock);
3097 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3099 if (old_state >= IWL_AGG_ON) {
3100 iwl_mvm_drain_sta(mvm, mvmsta, true);
3102 if (iwl_mvm_has_new_tx_api(mvm)) {
3103 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3105 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3106 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3108 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3109 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3110 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3113 iwl_mvm_drain_sta(mvm, mvmsta, false);
3115 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3121 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3123 int i, max = -1, max_offs = -1;
3125 lockdep_assert_held(&mvm->mutex);
3127 /* Pick the unused key offset with the highest 'deleted'
3128 * counter. Every time a key is deleted, all the counters
3129 * are incremented and the one that was just deleted is
3130 * reset to zero. Thus, the highest counter is the one
3131 * that was deleted longest ago. Pick that one.
3133 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3134 if (test_bit(i, mvm->fw_key_table))
3136 if (mvm->fw_key_deleted[i] > max) {
3137 max = mvm->fw_key_deleted[i];
3143 return STA_KEY_IDX_INVALID;
3148 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3149 struct ieee80211_vif *vif,
3150 struct ieee80211_sta *sta)
3152 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3155 return iwl_mvm_sta_from_mac80211(sta);
3158 * The device expects GTKs for station interfaces to be
3159 * installed as GTKs for the AP station. If we have no
3160 * station ID, then use AP's station ID.
3162 if (vif->type == NL80211_IFTYPE_STATION &&
3163 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3164 u8 sta_id = mvmvif->ap_sta_id;
3166 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3167 lockdep_is_held(&mvm->mutex));
3170 * It is possible that the 'sta' parameter is NULL,
3171 * for example when a GTK is removed - the sta_id will then
3172 * be the AP ID, and no station was passed by mac80211.
3174 if (IS_ERR_OR_NULL(sta))
3177 return iwl_mvm_sta_from_mac80211(sta);
3183 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3185 struct ieee80211_key_conf *key, bool mcast,
3186 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3187 u8 key_offset, bool mfp)
3190 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3191 struct iwl_mvm_add_sta_key_cmd cmd;
3199 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3200 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3202 if (sta_id == IWL_MVM_INVALID_STA)
3205 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3206 STA_KEY_FLG_KEYID_MSK;
3207 key_flags = cpu_to_le16(keyidx);
3208 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3210 switch (key->cipher) {
3211 case WLAN_CIPHER_SUITE_TKIP:
3212 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3214 memcpy((void *)&u.cmd.tx_mic_key,
3215 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3218 memcpy((void *)&u.cmd.rx_mic_key,
3219 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3221 pn = atomic64_read(&key->tx_pn);
3224 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3225 for (i = 0; i < 5; i++)
3226 u.cmd_v1.tkip_rx_ttak[i] =
3227 cpu_to_le16(tkip_p1k[i]);
3229 memcpy(u.cmd.common.key, key->key, key->keylen);
3231 case WLAN_CIPHER_SUITE_CCMP:
3232 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3233 memcpy(u.cmd.common.key, key->key, key->keylen);
3235 pn = atomic64_read(&key->tx_pn);
3237 case WLAN_CIPHER_SUITE_WEP104:
3238 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3240 case WLAN_CIPHER_SUITE_WEP40:
3241 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3242 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3244 case WLAN_CIPHER_SUITE_GCMP_256:
3245 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3247 case WLAN_CIPHER_SUITE_GCMP:
3248 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3249 memcpy(u.cmd.common.key, key->key, key->keylen);
3251 pn = atomic64_read(&key->tx_pn);
3254 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3255 memcpy(u.cmd.common.key, key->key, key->keylen);
3259 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3261 key_flags |= cpu_to_le16(STA_KEY_MFP);
3263 u.cmd.common.key_offset = key_offset;
3264 u.cmd.common.key_flags = key_flags;
3265 u.cmd.common.sta_id = sta_id;
3268 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3269 size = sizeof(u.cmd);
3271 size = sizeof(u.cmd_v1);
3274 status = ADD_STA_SUCCESS;
3275 if (cmd_flags & CMD_ASYNC)
3276 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3279 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3283 case ADD_STA_SUCCESS:
3284 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3288 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3295 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3296 struct ieee80211_key_conf *keyconf,
3297 u8 sta_id, bool remove_key)
3299 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3301 /* verify the key details match the required command's expectations */
3302 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3303 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3304 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3305 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3306 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3309 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3310 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3313 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3314 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3317 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3319 struct ieee80211_key_seq seq;
3322 switch (keyconf->cipher) {
3323 case WLAN_CIPHER_SUITE_AES_CMAC:
3324 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3326 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3327 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3328 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3334 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3335 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3336 igtk_cmd.ctrl_flags |=
3337 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3338 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3339 pn = seq.aes_cmac.pn;
3340 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3341 ((u64) pn[4] << 8) |
3342 ((u64) pn[3] << 16) |
3343 ((u64) pn[2] << 24) |
3344 ((u64) pn[1] << 32) |
3345 ((u64) pn[0] << 40));
3348 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3349 remove_key ? "removing" : "installing",
3352 if (!iwl_mvm_has_new_rx_api(mvm)) {
3353 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3354 .ctrl_flags = igtk_cmd.ctrl_flags,
3355 .key_id = igtk_cmd.key_id,
3356 .sta_id = igtk_cmd.sta_id,
3357 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3360 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3361 ARRAY_SIZE(igtk_cmd_v1.igtk));
3362 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3363 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3365 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3366 sizeof(igtk_cmd), &igtk_cmd);
3370 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3371 struct ieee80211_vif *vif,
3372 struct ieee80211_sta *sta)
3374 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3379 if (vif->type == NL80211_IFTYPE_STATION &&
3380 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3381 u8 sta_id = mvmvif->ap_sta_id;
3382 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3383 lockdep_is_held(&mvm->mutex));
3391 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3392 struct ieee80211_vif *vif,
3393 struct ieee80211_sta *sta,
3394 struct ieee80211_key_conf *keyconf,
3400 struct ieee80211_key_seq seq;
3406 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3408 sta_id = mvm_sta->sta_id;
3410 } else if (vif->type == NL80211_IFTYPE_AP &&
3411 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3412 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3414 sta_id = mvmvif->mcast_sta.sta_id;
3416 IWL_ERR(mvm, "Failed to find station id\n");
3420 switch (keyconf->cipher) {
3421 case WLAN_CIPHER_SUITE_TKIP:
3422 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3423 /* get phase 1 key from mac80211 */
3424 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3425 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3426 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3427 seq.tkip.iv32, p1k, 0, key_offset,
3430 case WLAN_CIPHER_SUITE_CCMP:
3431 case WLAN_CIPHER_SUITE_WEP40:
3432 case WLAN_CIPHER_SUITE_WEP104:
3433 case WLAN_CIPHER_SUITE_GCMP:
3434 case WLAN_CIPHER_SUITE_GCMP_256:
3435 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3436 0, NULL, 0, key_offset, mfp);
3439 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3440 0, NULL, 0, key_offset, mfp);
3446 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3447 struct ieee80211_vif *vif,
3448 struct ieee80211_sta *sta,
3449 struct ieee80211_key_conf *keyconf,
3452 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3453 struct iwl_mvm_sta *mvm_sta;
3454 u8 sta_id = IWL_MVM_INVALID_STA;
3456 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3458 lockdep_assert_held(&mvm->mutex);
3460 if (vif->type != NL80211_IFTYPE_AP ||
3461 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3462 /* Get the station id from the mvm local station table */
3463 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3465 IWL_ERR(mvm, "Failed to find station\n");
3468 sta_id = mvm_sta->sta_id;
3471 * It is possible that the 'sta' parameter is NULL, and thus
3472 * there is a need to retrieve the sta from the local station
3476 sta = rcu_dereference_protected(
3477 mvm->fw_id_to_mac_id[sta_id],
3478 lockdep_is_held(&mvm->mutex));
3479 if (IS_ERR_OR_NULL(sta)) {
3480 IWL_ERR(mvm, "Invalid station id\n");
3485 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3488 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3490 sta_id = mvmvif->mcast_sta.sta_id;
3493 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3494 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3495 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3496 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3500 /* If the key_offset is not pre-assigned, we need to find a
3501 * new offset to use. In normal cases, the offset is not
3502 * pre-assigned, but during HW_RESTART we want to reuse the
3503 * same indices, so we pass them when this function is called.
3505 * In D3 entry, we need to hardcoded the indices (because the
3506 * firmware hardcodes the PTK offset to 0). In this case, we
3507 * need to make sure we don't overwrite the hw_key_idx in the
3508 * keyconf structure, because otherwise we cannot configure
3509 * the original ones back when resuming.
3511 if (key_offset == STA_KEY_IDX_INVALID) {
3512 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3513 if (key_offset == STA_KEY_IDX_INVALID)
3515 keyconf->hw_key_idx = key_offset;
3518 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3523 * For WEP, the same key is used for multicast and unicast. Upload it
3524 * again, using the same key offset, and now pointing the other one
3525 * to the same key slot (offset).
3526 * If this fails, remove the original as well.
3528 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3529 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3531 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3532 key_offset, !mcast);
3534 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3539 __set_bit(key_offset, mvm->fw_key_table);
3542 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3543 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3544 sta ? sta->addr : zero_addr, ret);
3548 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3549 struct ieee80211_vif *vif,
3550 struct ieee80211_sta *sta,
3551 struct ieee80211_key_conf *keyconf)
3553 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3554 struct iwl_mvm_sta *mvm_sta;
3555 u8 sta_id = IWL_MVM_INVALID_STA;
3558 lockdep_assert_held(&mvm->mutex);
3560 /* Get the station from the mvm local station table */
3561 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3563 sta_id = mvm_sta->sta_id;
3564 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3565 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3568 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3569 keyconf->keyidx, sta_id);
3571 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3572 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3573 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3574 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3576 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3577 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3578 keyconf->hw_key_idx);
3582 /* track which key was deleted last */
3583 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3584 if (mvm->fw_key_deleted[i] < U8_MAX)
3585 mvm->fw_key_deleted[i]++;
3587 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3589 if (sta && !mvm_sta) {
3590 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3594 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3598 /* delete WEP key twice to get rid of (now useless) offset */
3599 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3600 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3601 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3606 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3607 struct ieee80211_vif *vif,
3608 struct ieee80211_key_conf *keyconf,
3609 struct ieee80211_sta *sta, u32 iv32,
3612 struct iwl_mvm_sta *mvm_sta;
3613 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3614 bool mfp = sta ? sta->mfp : false;
3618 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3619 if (WARN_ON_ONCE(!mvm_sta))
3621 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3622 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3629 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3630 struct ieee80211_sta *sta)
3632 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3633 struct iwl_mvm_add_sta_cmd cmd = {
3634 .add_modify = STA_MODE_MODIFY,
3635 .sta_id = mvmsta->sta_id,
3636 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3637 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3641 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3642 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3644 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3647 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3648 struct ieee80211_sta *sta,
3649 enum ieee80211_frame_release_type reason,
3650 u16 cnt, u16 tids, bool more_data,
3651 bool single_sta_queue)
3653 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3654 struct iwl_mvm_add_sta_cmd cmd = {
3655 .add_modify = STA_MODE_MODIFY,
3656 .sta_id = mvmsta->sta_id,
3657 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3658 .sleep_tx_count = cpu_to_le16(cnt),
3659 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3662 unsigned long _tids = tids;
3664 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3665 * Note that this field is reserved and unused by firmware not
3666 * supporting GO uAPSD, so it's safe to always do this.
3668 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3669 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3671 /* If we're releasing frames from aggregation or dqa queues then check
3672 * if all the queues that we're releasing frames from, combined, have:
3673 * - more frames than the service period, in which case more_data
3675 * - fewer than 'cnt' frames, in which case we need to adjust the
3676 * firmware command (but do that unconditionally)
3678 if (single_sta_queue) {
3679 int remaining = cnt;
3682 spin_lock_bh(&mvmsta->lock);
3683 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3684 struct iwl_mvm_tid_data *tid_data;
3687 tid_data = &mvmsta->tid_data[tid];
3689 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3690 if (n_queued > remaining) {
3695 remaining -= n_queued;
3697 sleep_tx_count = cnt - remaining;
3698 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3699 mvmsta->sleep_tx_count = sleep_tx_count;
3700 spin_unlock_bh(&mvmsta->lock);
3702 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3703 if (WARN_ON(cnt - remaining == 0)) {
3704 ieee80211_sta_eosp(sta);
3709 /* Note: this is ignored by firmware not supporting GO uAPSD */
3711 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3713 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3714 mvmsta->next_status_eosp = true;
3715 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3717 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3720 /* block the Tx queues until the FW updated the sleep Tx count */
3721 iwl_trans_block_txq_ptrs(mvm->trans, true);
3723 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3724 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3725 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3727 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3730 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3731 struct iwl_rx_cmd_buffer *rxb)
3733 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3734 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3735 struct ieee80211_sta *sta;
3736 u32 sta_id = le32_to_cpu(notif->sta_id);
3738 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3742 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3743 if (!IS_ERR_OR_NULL(sta))
3744 ieee80211_sta_eosp(sta);
3748 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3749 struct iwl_mvm_sta *mvmsta, bool disable)
3751 struct iwl_mvm_add_sta_cmd cmd = {
3752 .add_modify = STA_MODE_MODIFY,
3753 .sta_id = mvmsta->sta_id,
3754 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3755 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3756 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3760 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3761 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3763 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3766 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3767 struct ieee80211_sta *sta,
3770 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3772 spin_lock_bh(&mvm_sta->lock);
3774 if (mvm_sta->disable_tx == disable) {
3775 spin_unlock_bh(&mvm_sta->lock);
3779 mvm_sta->disable_tx = disable;
3781 /* Tell mac80211 to start/stop queuing tx for this station */
3782 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3784 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3786 spin_unlock_bh(&mvm_sta->lock);
3789 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3790 struct iwl_mvm_vif *mvmvif,
3791 struct iwl_mvm_int_sta *sta,
3794 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3795 struct iwl_mvm_add_sta_cmd cmd = {
3796 .add_modify = STA_MODE_MODIFY,
3797 .sta_id = sta->sta_id,
3798 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3799 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3800 .mac_id_n_color = cpu_to_le32(id),
3804 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3805 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3807 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3810 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3811 struct iwl_mvm_vif *mvmvif,
3814 struct ieee80211_sta *sta;
3815 struct iwl_mvm_sta *mvm_sta;
3818 lockdep_assert_held(&mvm->mutex);
3820 /* Block/unblock all the stations of the given mvmvif */
3821 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3822 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3823 lockdep_is_held(&mvm->mutex));
3824 if (IS_ERR_OR_NULL(sta))
3827 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3828 if (mvm_sta->mac_id_n_color !=
3829 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3832 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3835 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3838 /* Need to block/unblock also multicast station */
3839 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3840 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3841 &mvmvif->mcast_sta, disable);
3844 * Only unblock the broadcast station (FW blocks it for immediate
3845 * quiet, not the driver)
3847 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3848 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3849 &mvmvif->bcast_sta, disable);
3852 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3854 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3855 struct iwl_mvm_sta *mvmsta;
3859 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3861 if (!WARN_ON(!mvmsta))
3862 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3867 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3869 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3872 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3873 * to align the wrap around of ssn so we compare relevant values.
3875 if (mvm->trans->cfg->gen2)
3878 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);