]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/wireless/intel/iwlwifi/mvm/sta.c
iwlwifi: mvm: support mac80211 TXQs model
[linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / sta.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <net/mac80211.h>
65
66 #include "mvm.h"
67 #include "sta.h"
68 #include "rs.h"
69
70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73                                 u32 sta_id,
74                                 struct ieee80211_key_conf *key, bool mcast,
75                                 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76                                 u8 key_offset, bool mfp);
77
78 /*
79  * New version of ADD_STA_sta command added new fields at the end of the
80  * structure, so sending the size of the relevant API's structure is enough to
81  * support both API versions.
82  */
83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84 {
85         if (iwl_mvm_has_new_rx_api(mvm) ||
86             fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87                 return sizeof(struct iwl_mvm_add_sta_cmd);
88         else
89                 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
90 }
91
92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93                                     enum nl80211_iftype iftype)
94 {
95         int sta_id;
96         u32 reserved_ids = 0;
97
98         BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
99         WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101         lockdep_assert_held(&mvm->mutex);
102
103         /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104         if (iftype != NL80211_IFTYPE_STATION)
105                 reserved_ids = BIT(0);
106
107         /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
108         for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
109                 if (BIT(sta_id) & reserved_ids)
110                         continue;
111
112                 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113                                                lockdep_is_held(&mvm->mutex)))
114                         return sta_id;
115         }
116         return IWL_MVM_INVALID_STA;
117 }
118
119 /* send station add/update command to firmware */
120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121                            bool update, unsigned int flags)
122 {
123         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
124         struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125                 .sta_id = mvm_sta->sta_id,
126                 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127                 .add_modify = update ? 1 : 0,
128                 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
129                                                  STA_FLG_MIMO_EN_MSK |
130                                                  STA_FLG_RTS_MIMO_PROT),
131                 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
132         };
133         int ret;
134         u32 status;
135         u32 agg_size = 0, mpdu_dens = 0;
136
137         if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138                 add_sta_cmd.station_type = mvm_sta->sta_type;
139
140         if (!update || (flags & STA_MODIFY_QUEUES)) {
141                 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
142
143                 if (!iwl_mvm_has_new_tx_api(mvm)) {
144                         add_sta_cmd.tfd_queue_msk =
145                                 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147                         if (flags & STA_MODIFY_QUEUES)
148                                 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149                 } else {
150                         WARN_ON(flags & STA_MODIFY_QUEUES);
151                 }
152         }
153
154         switch (sta->bandwidth) {
155         case IEEE80211_STA_RX_BW_160:
156                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157                 /* fall through */
158         case IEEE80211_STA_RX_BW_80:
159                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160                 /* fall through */
161         case IEEE80211_STA_RX_BW_40:
162                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163                 /* fall through */
164         case IEEE80211_STA_RX_BW_20:
165                 if (sta->ht_cap.ht_supported)
166                         add_sta_cmd.station_flags |=
167                                 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168                 break;
169         }
170
171         switch (sta->rx_nss) {
172         case 1:
173                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174                 break;
175         case 2:
176                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177                 break;
178         case 3 ... 8:
179                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180                 break;
181         }
182
183         switch (sta->smps_mode) {
184         case IEEE80211_SMPS_AUTOMATIC:
185         case IEEE80211_SMPS_NUM_MODES:
186                 WARN_ON(1);
187                 break;
188         case IEEE80211_SMPS_STATIC:
189                 /* override NSS */
190                 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192                 break;
193         case IEEE80211_SMPS_DYNAMIC:
194                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195                 break;
196         case IEEE80211_SMPS_OFF:
197                 /* nothing */
198                 break;
199         }
200
201         if (sta->ht_cap.ht_supported) {
202                 add_sta_cmd.station_flags_msk |=
203                         cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204                                     STA_FLG_AGG_MPDU_DENS_MSK);
205
206                 mpdu_dens = sta->ht_cap.ampdu_density;
207         }
208
209         if (sta->vht_cap.vht_supported) {
210                 agg_size = sta->vht_cap.cap &
211                         IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212                 agg_size >>=
213                         IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214         } else if (sta->ht_cap.ht_supported) {
215                 agg_size = sta->ht_cap.ampdu_factor;
216         }
217
218         add_sta_cmd.station_flags |=
219                 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220         add_sta_cmd.station_flags |=
221                 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
222         if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
223                 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
224
225         if (sta->wme) {
226                 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228                 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
229                         add_sta_cmd.uapsd_acs |= BIT(AC_BK);
230                 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
231                         add_sta_cmd.uapsd_acs |= BIT(AC_BE);
232                 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
233                         add_sta_cmd.uapsd_acs |= BIT(AC_VI);
234                 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235                         add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236                 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
237                 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
238         }
239
240         status = ADD_STA_SUCCESS;
241         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242                                           iwl_mvm_add_sta_cmd_size(mvm),
243                                           &add_sta_cmd, &status);
244         if (ret)
245                 return ret;
246
247         switch (status & IWL_ADD_STA_STATUS_MASK) {
248         case ADD_STA_SUCCESS:
249                 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250                 break;
251         default:
252                 ret = -EIO;
253                 IWL_ERR(mvm, "ADD_STA failed\n");
254                 break;
255         }
256
257         return ret;
258 }
259
260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
261 {
262         struct iwl_mvm_baid_data *data =
263                 from_timer(data, t, session_timer);
264         struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
265         struct iwl_mvm_baid_data *ba_data;
266         struct ieee80211_sta *sta;
267         struct iwl_mvm_sta *mvm_sta;
268         unsigned long timeout;
269
270         rcu_read_lock();
271
272         ba_data = rcu_dereference(*rcu_ptr);
273
274         if (WARN_ON(!ba_data))
275                 goto unlock;
276
277         if (!ba_data->timeout)
278                 goto unlock;
279
280         timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281         if (time_is_after_jiffies(timeout)) {
282                 mod_timer(&ba_data->session_timer, timeout);
283                 goto unlock;
284         }
285
286         /* Timer expired */
287         sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
288
289         /*
290          * sta should be valid unless the following happens:
291          * The firmware asserts which triggers a reconfig flow, but
292          * the reconfig fails before we set the pointer to sta into
293          * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294          * A-MDPU and hence the timer continues to run. Then, the
295          * timer expires and sta is NULL.
296          */
297         if (!sta)
298                 goto unlock;
299
300         mvm_sta = iwl_mvm_sta_from_mac80211(sta);
301         ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302                                       sta->addr, ba_data->tid);
303 unlock:
304         rcu_read_unlock();
305 }
306
307 /* Disable aggregations for a bitmap of TIDs for a given station */
308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309                                         unsigned long disable_agg_tids,
310                                         bool remove_queue)
311 {
312         struct iwl_mvm_add_sta_cmd cmd = {};
313         struct ieee80211_sta *sta;
314         struct iwl_mvm_sta *mvmsta;
315         u32 status;
316         u8 sta_id;
317         int ret;
318
319         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
320                 return -EINVAL;
321
322         sta_id = mvm->queue_info[queue].ra_sta_id;
323
324         rcu_read_lock();
325
326         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
327
328         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
329                 rcu_read_unlock();
330                 return -EINVAL;
331         }
332
333         mvmsta = iwl_mvm_sta_from_mac80211(sta);
334
335         mvmsta->tid_disable_agg |= disable_agg_tids;
336
337         cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
338         cmd.sta_id = mvmsta->sta_id;
339         cmd.add_modify = STA_MODE_MODIFY;
340         cmd.modify_mask = STA_MODIFY_QUEUES;
341         if (disable_agg_tids)
342                 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
343         if (remove_queue)
344                 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
345         cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
346         cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
347
348         rcu_read_unlock();
349
350         /* Notify FW of queue removal from the STA queues */
351         status = ADD_STA_SUCCESS;
352         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
353                                           iwl_mvm_add_sta_cmd_size(mvm),
354                                           &cmd, &status);
355
356         return ret;
357 }
358
359 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
360                                int queue, u8 tid, u8 flags)
361 {
362         struct iwl_scd_txq_cfg_cmd cmd = {
363                 .scd_queue = queue,
364                 .action = SCD_CFG_DISABLE_QUEUE,
365         };
366         int ret;
367
368         if (iwl_mvm_has_new_tx_api(mvm)) {
369                 iwl_trans_txq_free(mvm->trans, queue);
370
371                 return 0;
372         }
373
374         if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
375                 return 0;
376
377         mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
378
379         cmd.action = mvm->queue_info[queue].tid_bitmap ?
380                 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
381         if (cmd.action == SCD_CFG_DISABLE_QUEUE)
382                 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
383
384         IWL_DEBUG_TX_QUEUES(mvm,
385                             "Disabling TXQ #%d tids=0x%x\n",
386                             queue,
387                             mvm->queue_info[queue].tid_bitmap);
388
389         /* If the queue is still enabled - nothing left to do in this func */
390         if (cmd.action == SCD_CFG_ENABLE_QUEUE)
391                 return 0;
392
393         cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
394         cmd.tid = mvm->queue_info[queue].txq_tid;
395
396         /* Make sure queue info is correct even though we overwrite it */
397         WARN(mvm->queue_info[queue].tid_bitmap,
398              "TXQ #%d info out-of-sync - tids=0x%x\n",
399              queue, mvm->queue_info[queue].tid_bitmap);
400
401         /* If we are here - the queue is freed and we can zero out these vals */
402         mvm->queue_info[queue].tid_bitmap = 0;
403
404         if (sta) {
405                 struct iwl_mvm_txq *mvmtxq =
406                         iwl_mvm_txq_from_tid(sta, tid);
407
408                 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
409         }
410
411         /* Regardless if this is a reserved TXQ for a STA - mark it as false */
412         mvm->queue_info[queue].reserved = false;
413
414         iwl_trans_txq_disable(mvm->trans, queue, false);
415         ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
416                                    sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
417
418         if (ret)
419                 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
420                         queue, ret);
421         return ret;
422 }
423
424 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
425 {
426         struct ieee80211_sta *sta;
427         struct iwl_mvm_sta *mvmsta;
428         unsigned long tid_bitmap;
429         unsigned long agg_tids = 0;
430         u8 sta_id;
431         int tid;
432
433         lockdep_assert_held(&mvm->mutex);
434
435         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
436                 return -EINVAL;
437
438         sta_id = mvm->queue_info[queue].ra_sta_id;
439         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
440
441         sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
442                                         lockdep_is_held(&mvm->mutex));
443
444         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
445                 return -EINVAL;
446
447         mvmsta = iwl_mvm_sta_from_mac80211(sta);
448
449         spin_lock_bh(&mvmsta->lock);
450         for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
451                 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
452                         agg_tids |= BIT(tid);
453         }
454         spin_unlock_bh(&mvmsta->lock);
455
456         return agg_tids;
457 }
458
459 /*
460  * Remove a queue from a station's resources.
461  * Note that this only marks as free. It DOESN'T delete a BA agreement, and
462  * doesn't disable the queue
463  */
464 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
465 {
466         struct ieee80211_sta *sta;
467         struct iwl_mvm_sta *mvmsta;
468         unsigned long tid_bitmap;
469         unsigned long disable_agg_tids = 0;
470         u8 sta_id;
471         int tid;
472
473         lockdep_assert_held(&mvm->mutex);
474
475         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
476                 return -EINVAL;
477
478         sta_id = mvm->queue_info[queue].ra_sta_id;
479         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480
481         rcu_read_lock();
482
483         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
484
485         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
486                 rcu_read_unlock();
487                 return 0;
488         }
489
490         mvmsta = iwl_mvm_sta_from_mac80211(sta);
491
492         spin_lock_bh(&mvmsta->lock);
493         /* Unmap MAC queues and TIDs from this queue */
494         for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
495                 struct iwl_mvm_txq *mvmtxq =
496                         iwl_mvm_txq_from_tid(sta, tid);
497
498                 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
499                         disable_agg_tids |= BIT(tid);
500                 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
501
502                 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
503         }
504
505         mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
506         spin_unlock_bh(&mvmsta->lock);
507
508         rcu_read_unlock();
509
510         /*
511          * The TX path may have been using this TXQ_ID from the tid_data,
512          * so make sure it's no longer running so that we can safely reuse
513          * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
514          * above, but nothing guarantees we've stopped using them. Thus,
515          * without this, we could get to iwl_mvm_disable_txq() and remove
516          * the queue while still sending frames to it.
517          */
518         synchronize_net();
519
520         return disable_agg_tids;
521 }
522
523 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
524                                        struct ieee80211_sta *old_sta,
525                                        u8 new_sta_id)
526 {
527         struct iwl_mvm_sta *mvmsta;
528         u8 sta_id, tid;
529         unsigned long disable_agg_tids = 0;
530         bool same_sta;
531         int ret;
532
533         lockdep_assert_held(&mvm->mutex);
534
535         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
536                 return -EINVAL;
537
538         sta_id = mvm->queue_info[queue].ra_sta_id;
539         tid = mvm->queue_info[queue].txq_tid;
540
541         same_sta = sta_id == new_sta_id;
542
543         mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
544         if (WARN_ON(!mvmsta))
545                 return -EINVAL;
546
547         disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
548         /* Disable the queue */
549         if (disable_agg_tids)
550                 iwl_mvm_invalidate_sta_queue(mvm, queue,
551                                              disable_agg_tids, false);
552
553         ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
554         if (ret) {
555                 IWL_ERR(mvm,
556                         "Failed to free inactive queue %d (ret=%d)\n",
557                         queue, ret);
558
559                 return ret;
560         }
561
562         /* If TXQ is allocated to another STA, update removal in FW */
563         if (!same_sta)
564                 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
565
566         return 0;
567 }
568
569 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
570                                     unsigned long tfd_queue_mask, u8 ac)
571 {
572         int queue = 0;
573         u8 ac_to_queue[IEEE80211_NUM_ACS];
574         int i;
575
576         /*
577          * This protects us against grabbing a queue that's being reconfigured
578          * by the inactivity checker.
579          */
580         lockdep_assert_held(&mvm->mutex);
581
582         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
583                 return -EINVAL;
584
585         memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
586
587         /* See what ACs the existing queues for this STA have */
588         for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
589                 /* Only DATA queues can be shared */
590                 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
591                     i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
592                         continue;
593
594                 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
595         }
596
597         /*
598          * The queue to share is chosen only from DATA queues as follows (in
599          * descending priority):
600          * 1. An AC_BE queue
601          * 2. Same AC queue
602          * 3. Highest AC queue that is lower than new AC
603          * 4. Any existing AC (there always is at least 1 DATA queue)
604          */
605
606         /* Priority 1: An AC_BE queue */
607         if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
608                 queue = ac_to_queue[IEEE80211_AC_BE];
609         /* Priority 2: Same AC queue */
610         else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
611                 queue = ac_to_queue[ac];
612         /* Priority 3a: If new AC is VO and VI exists - use VI */
613         else if (ac == IEEE80211_AC_VO &&
614                  ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
615                 queue = ac_to_queue[IEEE80211_AC_VI];
616         /* Priority 3b: No BE so only AC less than the new one is BK */
617         else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
618                 queue = ac_to_queue[IEEE80211_AC_BK];
619         /* Priority 4a: No BE nor BK - use VI if exists */
620         else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
621                 queue = ac_to_queue[IEEE80211_AC_VI];
622         /* Priority 4b: No BE, BK nor VI - use VO if exists */
623         else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
624                 queue = ac_to_queue[IEEE80211_AC_VO];
625
626         /* Make sure queue found (or not) is legal */
627         if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
628             !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
629             (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
630                 IWL_ERR(mvm, "No DATA queues available to share\n");
631                 return -ENOSPC;
632         }
633
634         return queue;
635 }
636
637 /*
638  * If a given queue has a higher AC than the TID stream that is being compared
639  * to, the queue needs to be redirected to the lower AC. This function does that
640  * in such a case, otherwise - if no redirection required - it does nothing,
641  * unless the %force param is true.
642  */
643 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
644                                   int ac, int ssn, unsigned int wdg_timeout,
645                                   bool force, struct iwl_mvm_txq *txq)
646 {
647         struct iwl_scd_txq_cfg_cmd cmd = {
648                 .scd_queue = queue,
649                 .action = SCD_CFG_DISABLE_QUEUE,
650         };
651         bool shared_queue;
652         int ret;
653
654         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
655                 return -EINVAL;
656
657         /*
658          * If the AC is lower than current one - FIFO needs to be redirected to
659          * the lowest one of the streams in the queue. Check if this is needed
660          * here.
661          * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
662          * value 3 and VO with value 0, so to check if ac X is lower than ac Y
663          * we need to check if the numerical value of X is LARGER than of Y.
664          */
665         if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
666                 IWL_DEBUG_TX_QUEUES(mvm,
667                                     "No redirection needed on TXQ #%d\n",
668                                     queue);
669                 return 0;
670         }
671
672         cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
673         cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
674         cmd.tid = mvm->queue_info[queue].txq_tid;
675         shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
676
677         IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
678                             queue, iwl_mvm_ac_to_tx_fifo[ac]);
679
680         /* Stop the queue and wait for it to empty */
681         txq->stopped = true;
682
683         ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
684         if (ret) {
685                 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
686                         queue);
687                 ret = -EIO;
688                 goto out;
689         }
690
691         /* Before redirecting the queue we need to de-activate it */
692         iwl_trans_txq_disable(mvm->trans, queue, false);
693         ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
694         if (ret)
695                 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
696                         ret);
697
698         /* Make sure the SCD wrptr is correctly set before reconfiguring */
699         iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
700
701         /* Update the TID "owner" of the queue */
702         mvm->queue_info[queue].txq_tid = tid;
703
704         /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
705
706         /* Redirect to lower AC */
707         iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
708                              cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
709
710         /* Update AC marking of the queue */
711         mvm->queue_info[queue].mac80211_ac = ac;
712
713         /*
714          * Mark queue as shared in transport if shared
715          * Note this has to be done after queue enablement because enablement
716          * can also set this value, and there is no indication there to shared
717          * queues
718          */
719         if (shared_queue)
720                 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
721
722 out:
723         /* Continue using the queue */
724         txq->stopped = false;
725
726         return ret;
727 }
728
729 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
730                                    u8 minq, u8 maxq)
731 {
732         int i;
733
734         lockdep_assert_held(&mvm->mutex);
735
736         /* This should not be hit with new TX path */
737         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
738                 return -ENOSPC;
739
740         /* Start by looking for a free queue */
741         for (i = minq; i <= maxq; i++)
742                 if (mvm->queue_info[i].tid_bitmap == 0 &&
743                     mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
744                         return i;
745
746         return -ENOSPC;
747 }
748
749 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
750                                    u8 sta_id, u8 tid, unsigned int timeout)
751 {
752         int queue, size = IWL_DEFAULT_QUEUE_SIZE;
753
754         if (tid == IWL_MAX_TID_COUNT) {
755                 tid = IWL_MGMT_TID;
756                 size = IWL_MGMT_QUEUE_SIZE;
757         }
758         queue = iwl_trans_txq_alloc(mvm->trans,
759                                     cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
760                                     sta_id, tid, SCD_QUEUE_CFG, size, timeout);
761
762         if (queue < 0) {
763                 IWL_DEBUG_TX_QUEUES(mvm,
764                                     "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
765                                     sta_id, tid, queue);
766                 return queue;
767         }
768
769         IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
770                             queue, sta_id, tid);
771
772         IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
773
774         return queue;
775 }
776
777 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
778                                         struct ieee80211_sta *sta, u8 ac,
779                                         int tid)
780 {
781         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
782         struct iwl_mvm_txq *mvmtxq =
783                 iwl_mvm_txq_from_tid(sta, tid);
784         unsigned int wdg_timeout =
785                 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
786         int queue = -1;
787
788         lockdep_assert_held(&mvm->mutex);
789
790         IWL_DEBUG_TX_QUEUES(mvm,
791                             "Allocating queue for sta %d on tid %d\n",
792                             mvmsta->sta_id, tid);
793         queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
794         if (queue < 0)
795                 return queue;
796
797         if (sta) {
798                 mvmtxq->txq_id = queue;
799                 mvm->tvqm_info[queue].txq_tid = tid;
800                 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
801         }
802
803         IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
804
805         spin_lock_bh(&mvmsta->lock);
806         mvmsta->tid_data[tid].txq_id = queue;
807         spin_unlock_bh(&mvmsta->lock);
808
809         return 0;
810 }
811
812 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
813                                        struct ieee80211_sta *sta,
814                                        int queue, u8 sta_id, u8 tid)
815 {
816         bool enable_queue = true;
817
818         /* Make sure this TID isn't already enabled */
819         if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
820                 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
821                         queue, tid);
822                 return false;
823         }
824
825         /* Update mappings and refcounts */
826         if (mvm->queue_info[queue].tid_bitmap)
827                 enable_queue = false;
828
829         mvm->queue_info[queue].tid_bitmap |= BIT(tid);
830         mvm->queue_info[queue].ra_sta_id = sta_id;
831
832         if (enable_queue) {
833                 if (tid != IWL_MAX_TID_COUNT)
834                         mvm->queue_info[queue].mac80211_ac =
835                                 tid_to_mac80211_ac[tid];
836                 else
837                         mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
838
839                 mvm->queue_info[queue].txq_tid = tid;
840         }
841
842         if (sta) {
843                 struct iwl_mvm_txq *mvmtxq =
844                         iwl_mvm_txq_from_tid(sta, tid);
845
846                 mvmtxq->txq_id = queue;
847         }
848
849         IWL_DEBUG_TX_QUEUES(mvm,
850                             "Enabling TXQ #%d tids=0x%x\n",
851                             queue, mvm->queue_info[queue].tid_bitmap);
852
853         return enable_queue;
854 }
855
856 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
857                                int queue, u16 ssn,
858                                const struct iwl_trans_txq_scd_cfg *cfg,
859                                unsigned int wdg_timeout)
860 {
861         struct iwl_scd_txq_cfg_cmd cmd = {
862                 .scd_queue = queue,
863                 .action = SCD_CFG_ENABLE_QUEUE,
864                 .window = cfg->frame_limit,
865                 .sta_id = cfg->sta_id,
866                 .ssn = cpu_to_le16(ssn),
867                 .tx_fifo = cfg->fifo,
868                 .aggregate = cfg->aggregate,
869                 .tid = cfg->tid,
870         };
871         bool inc_ssn;
872
873         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
874                 return false;
875
876         /* Send the enabling command if we need to */
877         if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
878                 return false;
879
880         inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
881                                            NULL, wdg_timeout);
882         if (inc_ssn)
883                 le16_add_cpu(&cmd.ssn, 1);
884
885         WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
886              "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
887
888         return inc_ssn;
889 }
890
891 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
892 {
893         struct iwl_scd_txq_cfg_cmd cmd = {
894                 .scd_queue = queue,
895                 .action = SCD_CFG_UPDATE_QUEUE_TID,
896         };
897         int tid;
898         unsigned long tid_bitmap;
899         int ret;
900
901         lockdep_assert_held(&mvm->mutex);
902
903         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
904                 return;
905
906         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
907
908         if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
909                 return;
910
911         /* Find any TID for queue */
912         tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
913         cmd.tid = tid;
914         cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
915
916         ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
917         if (ret) {
918                 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
919                         queue, ret);
920                 return;
921         }
922
923         mvm->queue_info[queue].txq_tid = tid;
924         IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
925                             queue, tid);
926 }
927
928 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
929 {
930         struct ieee80211_sta *sta;
931         struct iwl_mvm_sta *mvmsta;
932         u8 sta_id;
933         int tid = -1;
934         unsigned long tid_bitmap;
935         unsigned int wdg_timeout;
936         int ssn;
937         int ret = true;
938
939         /* queue sharing is disabled on new TX path */
940         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
941                 return;
942
943         lockdep_assert_held(&mvm->mutex);
944
945         sta_id = mvm->queue_info[queue].ra_sta_id;
946         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
947
948         /* Find TID for queue, and make sure it is the only one on the queue */
949         tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
950         if (tid_bitmap != BIT(tid)) {
951                 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
952                         queue, tid_bitmap);
953                 return;
954         }
955
956         IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
957                             tid);
958
959         sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
960                                         lockdep_is_held(&mvm->mutex));
961
962         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
963                 return;
964
965         mvmsta = iwl_mvm_sta_from_mac80211(sta);
966         wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
967
968         ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
969
970         ret = iwl_mvm_redirect_queue(mvm, queue, tid,
971                                      tid_to_mac80211_ac[tid], ssn,
972                                      wdg_timeout, true,
973                                      iwl_mvm_txq_from_tid(sta, tid));
974         if (ret) {
975                 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
976                 return;
977         }
978
979         /* If aggs should be turned back on - do it */
980         if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
981                 struct iwl_mvm_add_sta_cmd cmd = {0};
982
983                 mvmsta->tid_disable_agg &= ~BIT(tid);
984
985                 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
986                 cmd.sta_id = mvmsta->sta_id;
987                 cmd.add_modify = STA_MODE_MODIFY;
988                 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
989                 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
990                 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
991
992                 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
993                                            iwl_mvm_add_sta_cmd_size(mvm), &cmd);
994                 if (!ret) {
995                         IWL_DEBUG_TX_QUEUES(mvm,
996                                             "TXQ #%d is now aggregated again\n",
997                                             queue);
998
999                         /* Mark queue intenally as aggregating again */
1000                         iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1001                 }
1002         }
1003
1004         mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1005 }
1006
1007 /*
1008  * Remove inactive TIDs of a given queue.
1009  * If all queue TIDs are inactive - mark the queue as inactive
1010  * If only some the queue TIDs are inactive - unmap them from the queue
1011  *
1012  * Returns %true if all TIDs were removed and the queue could be reused.
1013  */
1014 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1015                                          struct iwl_mvm_sta *mvmsta, int queue,
1016                                          unsigned long tid_bitmap,
1017                                          unsigned long *unshare_queues,
1018                                          unsigned long *changetid_queues)
1019 {
1020         int tid;
1021
1022         lockdep_assert_held(&mvmsta->lock);
1023         lockdep_assert_held(&mvm->mutex);
1024
1025         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1026                 return false;
1027
1028         /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1029         for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1030                 /* If some TFDs are still queued - don't mark TID as inactive */
1031                 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1032                         tid_bitmap &= ~BIT(tid);
1033
1034                 /* Don't mark as inactive any TID that has an active BA */
1035                 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1036                         tid_bitmap &= ~BIT(tid);
1037         }
1038
1039         /* If all TIDs in the queue are inactive - return it can be reused */
1040         if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1041                 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1042                 return true;
1043         }
1044
1045         /*
1046          * If we are here, this is a shared queue and not all TIDs timed-out.
1047          * Remove the ones that did.
1048          */
1049         for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1050                 u16 tid_bitmap;
1051
1052                 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1053                 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1054
1055                 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1056
1057                 /*
1058                  * We need to take into account a situation in which a TXQ was
1059                  * allocated to TID x, and then turned shared by adding TIDs y
1060                  * and z. If TID x becomes inactive and is removed from the TXQ,
1061                  * ownership must be given to one of the remaining TIDs.
1062                  * This is mainly because if TID x continues - a new queue can't
1063                  * be allocated for it as long as it is an owner of another TXQ.
1064                  *
1065                  * Mark this queue in the right bitmap, we'll send the command
1066                  * to the firmware later.
1067                  */
1068                 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1069                         set_bit(queue, changetid_queues);
1070
1071                 IWL_DEBUG_TX_QUEUES(mvm,
1072                                     "Removing inactive TID %d from shared Q:%d\n",
1073                                     tid, queue);
1074         }
1075
1076         IWL_DEBUG_TX_QUEUES(mvm,
1077                             "TXQ #%d left with tid bitmap 0x%x\n", queue,
1078                             mvm->queue_info[queue].tid_bitmap);
1079
1080         /*
1081          * There may be different TIDs with the same mac queues, so make
1082          * sure all TIDs have existing corresponding mac queues enabled
1083          */
1084         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1085
1086         /* If the queue is marked as shared - "unshare" it */
1087         if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1088             mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1089                 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1090                                     queue);
1091                 set_bit(queue, unshare_queues);
1092         }
1093
1094         return false;
1095 }
1096
1097 /*
1098  * Check for inactivity - this includes checking if any queue
1099  * can be unshared and finding one (and only one) that can be
1100  * reused.
1101  * This function is also invoked as a sort of clean-up task,
1102  * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1103  *
1104  * Returns the queue number, or -ENOSPC.
1105  */
1106 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1107 {
1108         unsigned long now = jiffies;
1109         unsigned long unshare_queues = 0;
1110         unsigned long changetid_queues = 0;
1111         int i, ret, free_queue = -ENOSPC;
1112         struct ieee80211_sta *queue_owner  = NULL;
1113
1114         lockdep_assert_held(&mvm->mutex);
1115
1116         if (iwl_mvm_has_new_tx_api(mvm))
1117                 return -ENOSPC;
1118
1119         rcu_read_lock();
1120
1121         /* we skip the CMD queue below by starting at 1 */
1122         BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1123
1124         for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1125                 struct ieee80211_sta *sta;
1126                 struct iwl_mvm_sta *mvmsta;
1127                 u8 sta_id;
1128                 int tid;
1129                 unsigned long inactive_tid_bitmap = 0;
1130                 unsigned long queue_tid_bitmap;
1131
1132                 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1133                 if (!queue_tid_bitmap)
1134                         continue;
1135
1136                 /* If TXQ isn't in active use anyway - nothing to do here... */
1137                 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1138                     mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1139                         continue;
1140
1141                 /* Check to see if there are inactive TIDs on this queue */
1142                 for_each_set_bit(tid, &queue_tid_bitmap,
1143                                  IWL_MAX_TID_COUNT + 1) {
1144                         if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1145                                        IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1146                                 continue;
1147
1148                         inactive_tid_bitmap |= BIT(tid);
1149                 }
1150
1151                 /* If all TIDs are active - finish check on this queue */
1152                 if (!inactive_tid_bitmap)
1153                         continue;
1154
1155                 /*
1156                  * If we are here - the queue hadn't been served recently and is
1157                  * in use
1158                  */
1159
1160                 sta_id = mvm->queue_info[i].ra_sta_id;
1161                 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1162
1163                 /*
1164                  * If the STA doesn't exist anymore, it isn't an error. It could
1165                  * be that it was removed since getting the queues, and in this
1166                  * case it should've inactivated its queues anyway.
1167                  */
1168                 if (IS_ERR_OR_NULL(sta))
1169                         continue;
1170
1171                 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1172
1173                 spin_lock_bh(&mvmsta->lock);
1174                 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1175                                                    inactive_tid_bitmap,
1176                                                    &unshare_queues,
1177                                                    &changetid_queues);
1178                 if (ret >= 0 && free_queue < 0) {
1179                         queue_owner = sta;
1180                         free_queue = ret;
1181                 }
1182                 /* only unlock sta lock - we still need the queue info lock */
1183                 spin_unlock_bh(&mvmsta->lock);
1184         }
1185
1186
1187         /* Reconfigure queues requiring reconfiguation */
1188         for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1189                 iwl_mvm_unshare_queue(mvm, i);
1190         for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1191                 iwl_mvm_change_queue_tid(mvm, i);
1192
1193         if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1194                 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1195                                                   alloc_for_sta);
1196                 if (ret) {
1197                         rcu_read_unlock();
1198                         return ret;
1199                 }
1200         }
1201
1202         rcu_read_unlock();
1203
1204         return free_queue;
1205 }
1206
1207 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1208                                    struct ieee80211_sta *sta, u8 ac, int tid)
1209 {
1210         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1211         struct iwl_trans_txq_scd_cfg cfg = {
1212                 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1213                 .sta_id = mvmsta->sta_id,
1214                 .tid = tid,
1215                 .frame_limit = IWL_FRAME_LIMIT,
1216         };
1217         unsigned int wdg_timeout =
1218                 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1219         int queue = -1;
1220         unsigned long disable_agg_tids = 0;
1221         enum iwl_mvm_agg_state queue_state;
1222         bool shared_queue = false, inc_ssn;
1223         int ssn;
1224         unsigned long tfd_queue_mask;
1225         int ret;
1226
1227         lockdep_assert_held(&mvm->mutex);
1228
1229         if (iwl_mvm_has_new_tx_api(mvm))
1230                 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1231
1232         spin_lock_bh(&mvmsta->lock);
1233         tfd_queue_mask = mvmsta->tfd_queue_msk;
1234         ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1235         spin_unlock_bh(&mvmsta->lock);
1236
1237         if (tid == IWL_MAX_TID_COUNT) {
1238                 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1239                                                 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1240                                                 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1241                 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1242                         IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1243                                             queue);
1244
1245                 /* If no such queue is found, we'll use a DATA queue instead */
1246         }
1247
1248         if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1249             (mvm->queue_info[mvmsta->reserved_queue].status ==
1250                         IWL_MVM_QUEUE_RESERVED)) {
1251                 queue = mvmsta->reserved_queue;
1252                 mvm->queue_info[queue].reserved = true;
1253                 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1254         }
1255
1256         if (queue < 0)
1257                 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1258                                                 IWL_MVM_DQA_MIN_DATA_QUEUE,
1259                                                 IWL_MVM_DQA_MAX_DATA_QUEUE);
1260         if (queue < 0) {
1261                 /* try harder - perhaps kill an inactive queue */
1262                 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1263         }
1264
1265         /* No free queue - we'll have to share */
1266         if (queue <= 0) {
1267                 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1268                 if (queue > 0) {
1269                         shared_queue = true;
1270                         mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1271                 }
1272         }
1273
1274         /*
1275          * Mark TXQ as ready, even though it hasn't been fully configured yet,
1276          * to make sure no one else takes it.
1277          * This will allow avoiding re-acquiring the lock at the end of the
1278          * configuration. On error we'll mark it back as free.
1279          */
1280         if (queue > 0 && !shared_queue)
1281                 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1282
1283         /* This shouldn't happen - out of queues */
1284         if (WARN_ON(queue <= 0)) {
1285                 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1286                         tid, cfg.sta_id);
1287                 return queue;
1288         }
1289
1290         /*
1291          * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1292          * but for configuring the SCD to send A-MPDUs we need to mark the queue
1293          * as aggregatable.
1294          * Mark all DATA queues as allowing to be aggregated at some point
1295          */
1296         cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1297                          queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1298
1299         IWL_DEBUG_TX_QUEUES(mvm,
1300                             "Allocating %squeue #%d to sta %d on tid %d\n",
1301                             shared_queue ? "shared " : "", queue,
1302                             mvmsta->sta_id, tid);
1303
1304         if (shared_queue) {
1305                 /* Disable any open aggs on this queue */
1306                 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1307
1308                 if (disable_agg_tids) {
1309                         IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1310                                             queue);
1311                         iwl_mvm_invalidate_sta_queue(mvm, queue,
1312                                                      disable_agg_tids, false);
1313                 }
1314         }
1315
1316         inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1317
1318         /*
1319          * Mark queue as shared in transport if shared
1320          * Note this has to be done after queue enablement because enablement
1321          * can also set this value, and there is no indication there to shared
1322          * queues
1323          */
1324         if (shared_queue)
1325                 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1326
1327         spin_lock_bh(&mvmsta->lock);
1328         /*
1329          * This looks racy, but it is not. We have only one packet for
1330          * this ra/tid in our Tx path since we stop the Qdisc when we
1331          * need to allocate a new TFD queue.
1332          */
1333         if (inc_ssn) {
1334                 mvmsta->tid_data[tid].seq_number += 0x10;
1335                 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1336         }
1337         mvmsta->tid_data[tid].txq_id = queue;
1338         mvmsta->tfd_queue_msk |= BIT(queue);
1339         queue_state = mvmsta->tid_data[tid].state;
1340
1341         if (mvmsta->reserved_queue == queue)
1342                 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1343         spin_unlock_bh(&mvmsta->lock);
1344
1345         if (!shared_queue) {
1346                 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1347                 if (ret)
1348                         goto out_err;
1349
1350                 /* If we need to re-enable aggregations... */
1351                 if (queue_state == IWL_AGG_ON) {
1352                         ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1353                         if (ret)
1354                                 goto out_err;
1355                 }
1356         } else {
1357                 /* Redirect queue, if needed */
1358                 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1359                                              wdg_timeout, false,
1360                                              iwl_mvm_txq_from_tid(sta, tid));
1361                 if (ret)
1362                         goto out_err;
1363         }
1364
1365         return 0;
1366
1367 out_err:
1368         iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
1369
1370         return ret;
1371 }
1372
1373 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1374 {
1375         if (tid == IWL_MAX_TID_COUNT)
1376                 return IEEE80211_AC_VO; /* MGMT */
1377
1378         return tid_to_mac80211_ac[tid];
1379 }
1380
1381 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1382 {
1383         struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1384                                            add_stream_wk);
1385
1386         mutex_lock(&mvm->mutex);
1387
1388         iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1389
1390         while (!list_empty(&mvm->add_stream_txqs)) {
1391                 struct iwl_mvm_txq *mvmtxq;
1392                 struct ieee80211_txq *txq;
1393                 u8 tid;
1394
1395                 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1396                                           struct iwl_mvm_txq, list);
1397
1398                 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1399                                    drv_priv);
1400                 tid = txq->tid;
1401                 if (tid == IEEE80211_NUM_TIDS)
1402                         tid = IWL_MAX_TID_COUNT;
1403
1404                 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
1405                 list_del_init(&mvmtxq->list);
1406                 local_bh_disable();
1407                 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1408                 local_bh_enable();
1409         }
1410
1411         mutex_unlock(&mvm->mutex);
1412 }
1413
1414 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1415                                       struct ieee80211_sta *sta,
1416                                       enum nl80211_iftype vif_type)
1417 {
1418         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1419         int queue;
1420
1421         /* queue reserving is disabled on new TX path */
1422         if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1423                 return 0;
1424
1425         /* run the general cleanup/unsharing of queues */
1426         iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1427
1428         /* Make sure we have free resources for this STA */
1429         if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1430             !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1431             (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1432              IWL_MVM_QUEUE_FREE))
1433                 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1434         else
1435                 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1436                                                 IWL_MVM_DQA_MIN_DATA_QUEUE,
1437                                                 IWL_MVM_DQA_MAX_DATA_QUEUE);
1438         if (queue < 0) {
1439                 /* try again - this time kick out a queue if needed */
1440                 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1441                 if (queue < 0) {
1442                         IWL_ERR(mvm, "No available queues for new station\n");
1443                         return -ENOSPC;
1444                 }
1445         }
1446         mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1447
1448         mvmsta->reserved_queue = queue;
1449
1450         IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1451                             queue, mvmsta->sta_id);
1452
1453         return 0;
1454 }
1455
1456 /*
1457  * In DQA mode, after a HW restart the queues should be allocated as before, in
1458  * order to avoid race conditions when there are shared queues. This function
1459  * does the re-mapping and queue allocation.
1460  *
1461  * Note that re-enabling aggregations isn't done in this function.
1462  */
1463 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1464                                                  struct ieee80211_sta *sta)
1465 {
1466         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1467         unsigned int wdg =
1468                 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1469         int i;
1470         struct iwl_trans_txq_scd_cfg cfg = {
1471                 .sta_id = mvm_sta->sta_id,
1472                 .frame_limit = IWL_FRAME_LIMIT,
1473         };
1474
1475         /* Make sure reserved queue is still marked as such (if allocated) */
1476         if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1477                 mvm->queue_info[mvm_sta->reserved_queue].status =
1478                         IWL_MVM_QUEUE_RESERVED;
1479
1480         for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1481                 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1482                 int txq_id = tid_data->txq_id;
1483                 int ac;
1484
1485                 if (txq_id == IWL_MVM_INVALID_QUEUE)
1486                         continue;
1487
1488                 ac = tid_to_mac80211_ac[i];
1489
1490                 if (iwl_mvm_has_new_tx_api(mvm)) {
1491                         IWL_DEBUG_TX_QUEUES(mvm,
1492                                             "Re-mapping sta %d tid %d\n",
1493                                             mvm_sta->sta_id, i);
1494                         txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1495                                                          i, wdg);
1496                         tid_data->txq_id = txq_id;
1497
1498                         /*
1499                          * Since we don't set the seq number after reset, and HW
1500                          * sets it now, FW reset will cause the seq num to start
1501                          * at 0 again, so driver will need to update it
1502                          * internally as well, so it keeps in sync with real val
1503                          */
1504                         tid_data->seq_number = 0;
1505                 } else {
1506                         u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1507
1508                         cfg.tid = i;
1509                         cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1510                         cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1511                                          txq_id ==
1512                                          IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1513
1514                         IWL_DEBUG_TX_QUEUES(mvm,
1515                                             "Re-mapping sta %d tid %d to queue %d\n",
1516                                             mvm_sta->sta_id, i, txq_id);
1517
1518                         iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1519                         mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1520                 }
1521         }
1522 }
1523
1524 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1525                                       struct iwl_mvm_int_sta *sta,
1526                                       const u8 *addr,
1527                                       u16 mac_id, u16 color)
1528 {
1529         struct iwl_mvm_add_sta_cmd cmd;
1530         int ret;
1531         u32 status = ADD_STA_SUCCESS;
1532
1533         lockdep_assert_held(&mvm->mutex);
1534
1535         memset(&cmd, 0, sizeof(cmd));
1536         cmd.sta_id = sta->sta_id;
1537         cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1538                                                              color));
1539         if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1540                 cmd.station_type = sta->type;
1541
1542         if (!iwl_mvm_has_new_tx_api(mvm))
1543                 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1544         cmd.tid_disable_tx = cpu_to_le16(0xffff);
1545
1546         if (addr)
1547                 memcpy(cmd.addr, addr, ETH_ALEN);
1548
1549         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1550                                           iwl_mvm_add_sta_cmd_size(mvm),
1551                                           &cmd, &status);
1552         if (ret)
1553                 return ret;
1554
1555         switch (status & IWL_ADD_STA_STATUS_MASK) {
1556         case ADD_STA_SUCCESS:
1557                 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1558                 return 0;
1559         default:
1560                 ret = -EIO;
1561                 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1562                         status);
1563                 break;
1564         }
1565         return ret;
1566 }
1567
1568 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1569                     struct ieee80211_vif *vif,
1570                     struct ieee80211_sta *sta)
1571 {
1572         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1573         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1574         struct iwl_mvm_rxq_dup_data *dup_data;
1575         int i, ret, sta_id;
1576         bool sta_update = false;
1577         unsigned int sta_flags = 0;
1578
1579         lockdep_assert_held(&mvm->mutex);
1580
1581         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1582                 sta_id = iwl_mvm_find_free_sta_id(mvm,
1583                                                   ieee80211_vif_type_p2p(vif));
1584         else
1585                 sta_id = mvm_sta->sta_id;
1586
1587         if (sta_id == IWL_MVM_INVALID_STA)
1588                 return -ENOSPC;
1589
1590         spin_lock_init(&mvm_sta->lock);
1591
1592         /* if this is a HW restart re-alloc existing queues */
1593         if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1594                 struct iwl_mvm_int_sta tmp_sta = {
1595                         .sta_id = sta_id,
1596                         .type = mvm_sta->sta_type,
1597                 };
1598
1599                 /*
1600                  * First add an empty station since allocating
1601                  * a queue requires a valid station
1602                  */
1603                 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1604                                                  mvmvif->id, mvmvif->color);
1605                 if (ret)
1606                         goto err;
1607
1608                 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1609                 sta_update = true;
1610                 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1611                 goto update_fw;
1612         }
1613
1614         mvm_sta->sta_id = sta_id;
1615         mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1616                                                       mvmvif->color);
1617         mvm_sta->vif = vif;
1618         if (!mvm->trans->cfg->gen2)
1619                 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1620         else
1621                 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1622         mvm_sta->tx_protection = 0;
1623         mvm_sta->tt_tx_protection = false;
1624         mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1625
1626         /* HW restart, don't assume the memory has been zeroed */
1627         mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1628         mvm_sta->tfd_queue_msk = 0;
1629
1630         /* for HW restart - reset everything but the sequence number */
1631         for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1632                 u16 seq = mvm_sta->tid_data[i].seq_number;
1633                 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1634                 mvm_sta->tid_data[i].seq_number = seq;
1635
1636                 /*
1637                  * Mark all queues for this STA as unallocated and defer TX
1638                  * frames until the queue is allocated
1639                  */
1640                 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1641         }
1642
1643         for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1644                 struct iwl_mvm_txq *mvmtxq =
1645                         iwl_mvm_txq_from_mac80211(sta->txq[i]);
1646
1647                 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1648                 INIT_LIST_HEAD(&mvmtxq->list);
1649                 spin_lock_init(&mvmtxq->tx_path_lock);
1650         }
1651
1652         mvm_sta->agg_tids = 0;
1653
1654         if (iwl_mvm_has_new_rx_api(mvm) &&
1655             !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1656                 int q;
1657
1658                 dup_data = kcalloc(mvm->trans->num_rx_queues,
1659                                    sizeof(*dup_data), GFP_KERNEL);
1660                 if (!dup_data)
1661                         return -ENOMEM;
1662                 /*
1663                  * Initialize all the last_seq values to 0xffff which can never
1664                  * compare equal to the frame's seq_ctrl in the check in
1665                  * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1666                  * number and fragmented packets don't reach that function.
1667                  *
1668                  * This thus allows receiving a packet with seqno 0 and the
1669                  * retry bit set as the very first packet on a new TID.
1670                  */
1671                 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1672                         memset(dup_data[q].last_seq, 0xff,
1673                                sizeof(dup_data[q].last_seq));
1674                 mvm_sta->dup_data = dup_data;
1675         }
1676
1677         if (!iwl_mvm_has_new_tx_api(mvm)) {
1678                 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1679                                                  ieee80211_vif_type_p2p(vif));
1680                 if (ret)
1681                         goto err;
1682         }
1683
1684         /*
1685          * if rs is registered with mac80211, then "add station" will be handled
1686          * via the corresponding ops, otherwise need to notify rate scaling here
1687          */
1688         if (iwl_mvm_has_tlc_offload(mvm))
1689                 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1690
1691         iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1692
1693 update_fw:
1694         ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1695         if (ret)
1696                 goto err;
1697
1698         if (vif->type == NL80211_IFTYPE_STATION) {
1699                 if (!sta->tdls) {
1700                         WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1701                         mvmvif->ap_sta_id = sta_id;
1702                 } else {
1703                         WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1704                 }
1705         }
1706
1707         rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1708
1709         return 0;
1710
1711 err:
1712         return ret;
1713 }
1714
1715 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1716                       bool drain)
1717 {
1718         struct iwl_mvm_add_sta_cmd cmd = {};
1719         int ret;
1720         u32 status;
1721
1722         lockdep_assert_held(&mvm->mutex);
1723
1724         cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1725         cmd.sta_id = mvmsta->sta_id;
1726         cmd.add_modify = STA_MODE_MODIFY;
1727         cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1728         cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1729
1730         status = ADD_STA_SUCCESS;
1731         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1732                                           iwl_mvm_add_sta_cmd_size(mvm),
1733                                           &cmd, &status);
1734         if (ret)
1735                 return ret;
1736
1737         switch (status & IWL_ADD_STA_STATUS_MASK) {
1738         case ADD_STA_SUCCESS:
1739                 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1740                                mvmsta->sta_id);
1741                 break;
1742         default:
1743                 ret = -EIO;
1744                 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1745                         mvmsta->sta_id);
1746                 break;
1747         }
1748
1749         return ret;
1750 }
1751
1752 /*
1753  * Remove a station from the FW table. Before sending the command to remove
1754  * the station validate that the station is indeed known to the driver (sanity
1755  * only).
1756  */
1757 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1758 {
1759         struct ieee80211_sta *sta;
1760         struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1761                 .sta_id = sta_id,
1762         };
1763         int ret;
1764
1765         sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1766                                         lockdep_is_held(&mvm->mutex));
1767
1768         /* Note: internal stations are marked as error values */
1769         if (!sta) {
1770                 IWL_ERR(mvm, "Invalid station id\n");
1771                 return -EINVAL;
1772         }
1773
1774         ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1775                                    sizeof(rm_sta_cmd), &rm_sta_cmd);
1776         if (ret) {
1777                 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1778                 return ret;
1779         }
1780
1781         return 0;
1782 }
1783
1784 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1785                                        struct ieee80211_vif *vif,
1786                                        struct ieee80211_sta *sta)
1787 {
1788         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1789         int i;
1790
1791         lockdep_assert_held(&mvm->mutex);
1792
1793         for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1794                 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1795                         continue;
1796
1797                 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1798                                     0);
1799                 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1800         }
1801
1802         for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1803                 struct iwl_mvm_txq *mvmtxq =
1804                         iwl_mvm_txq_from_mac80211(sta->txq[i]);
1805
1806                 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1807         }
1808 }
1809
1810 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1811                                   struct iwl_mvm_sta *mvm_sta)
1812 {
1813         int i;
1814
1815         for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1816                 u16 txq_id;
1817                 int ret;
1818
1819                 spin_lock_bh(&mvm_sta->lock);
1820                 txq_id = mvm_sta->tid_data[i].txq_id;
1821                 spin_unlock_bh(&mvm_sta->lock);
1822
1823                 if (txq_id == IWL_MVM_INVALID_QUEUE)
1824                         continue;
1825
1826                 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1827                 if (ret)
1828                         return ret;
1829         }
1830
1831         return 0;
1832 }
1833
1834 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1835                    struct ieee80211_vif *vif,
1836                    struct ieee80211_sta *sta)
1837 {
1838         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1839         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1840         u8 sta_id = mvm_sta->sta_id;
1841         int ret;
1842
1843         lockdep_assert_held(&mvm->mutex);
1844
1845         if (iwl_mvm_has_new_rx_api(mvm))
1846                 kfree(mvm_sta->dup_data);
1847
1848         ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1849         if (ret)
1850                 return ret;
1851
1852         /* flush its queues here since we are freeing mvm_sta */
1853         ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1854         if (ret)
1855                 return ret;
1856         if (iwl_mvm_has_new_tx_api(mvm)) {
1857                 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1858         } else {
1859                 u32 q_mask = mvm_sta->tfd_queue_msk;
1860
1861                 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1862                                                      q_mask);
1863         }
1864         if (ret)
1865                 return ret;
1866
1867         ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1868
1869         iwl_mvm_disable_sta_queues(mvm, vif, sta);
1870
1871         /* If there is a TXQ still marked as reserved - free it */
1872         if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1873                 u8 reserved_txq = mvm_sta->reserved_queue;
1874                 enum iwl_mvm_queue_status *status;
1875
1876                 /*
1877                  * If no traffic has gone through the reserved TXQ - it
1878                  * is still marked as IWL_MVM_QUEUE_RESERVED, and
1879                  * should be manually marked as free again
1880                  */
1881                 status = &mvm->queue_info[reserved_txq].status;
1882                 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1883                          (*status != IWL_MVM_QUEUE_FREE),
1884                          "sta_id %d reserved txq %d status %d",
1885                          sta_id, reserved_txq, *status))
1886                         return -EINVAL;
1887
1888                 *status = IWL_MVM_QUEUE_FREE;
1889         }
1890
1891         if (vif->type == NL80211_IFTYPE_STATION &&
1892             mvmvif->ap_sta_id == sta_id) {
1893                 /* if associated - we can't remove the AP STA now */
1894                 if (vif->bss_conf.assoc)
1895                         return ret;
1896
1897                 /* unassoc - go ahead - remove the AP STA now */
1898                 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1899
1900                 /* clear d0i3_ap_sta_id if no longer relevant */
1901                 if (mvm->d0i3_ap_sta_id == sta_id)
1902                         mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1903         }
1904
1905         /*
1906          * This shouldn't happen - the TDLS channel switch should be canceled
1907          * before the STA is removed.
1908          */
1909         if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1910                 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1911                 cancel_delayed_work(&mvm->tdls_cs.dwork);
1912         }
1913
1914         /*
1915          * Make sure that the tx response code sees the station as -EBUSY and
1916          * calls the drain worker.
1917          */
1918         spin_lock_bh(&mvm_sta->lock);
1919         spin_unlock_bh(&mvm_sta->lock);
1920
1921         ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1922         RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1923
1924         return ret;
1925 }
1926
1927 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1928                       struct ieee80211_vif *vif,
1929                       u8 sta_id)
1930 {
1931         int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1932
1933         lockdep_assert_held(&mvm->mutex);
1934
1935         RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1936         return ret;
1937 }
1938
1939 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1940                              struct iwl_mvm_int_sta *sta,
1941                              u32 qmask, enum nl80211_iftype iftype,
1942                              enum iwl_sta_type type)
1943 {
1944         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1945             sta->sta_id == IWL_MVM_INVALID_STA) {
1946                 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1947                 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1948                         return -ENOSPC;
1949         }
1950
1951         sta->tfd_queue_msk = qmask;
1952         sta->type = type;
1953
1954         /* put a non-NULL value so iterating over the stations won't stop */
1955         rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1956         return 0;
1957 }
1958
1959 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1960 {
1961         RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1962         memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1963         sta->sta_id = IWL_MVM_INVALID_STA;
1964 }
1965
1966 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1967                                           u8 sta_id, u8 fifo)
1968 {
1969         unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1970                                         mvm->cfg->base_params->wd_timeout :
1971                                         IWL_WATCHDOG_DISABLED;
1972
1973         if (iwl_mvm_has_new_tx_api(mvm)) {
1974                 int tvqm_queue =
1975                         iwl_mvm_tvqm_enable_txq(mvm, sta_id,
1976                                                 IWL_MAX_TID_COUNT,
1977                                                 wdg_timeout);
1978                 *queue = tvqm_queue;
1979         } else {
1980                 struct iwl_trans_txq_scd_cfg cfg = {
1981                         .fifo = fifo,
1982                         .sta_id = sta_id,
1983                         .tid = IWL_MAX_TID_COUNT,
1984                         .aggregate = false,
1985                         .frame_limit = IWL_FRAME_LIMIT,
1986                 };
1987
1988                 iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
1989         }
1990 }
1991
1992 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1993 {
1994         int ret;
1995
1996         lockdep_assert_held(&mvm->mutex);
1997
1998         /* Allocate aux station and assign to it the aux queue */
1999         ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2000                                        NL80211_IFTYPE_UNSPECIFIED,
2001                                        IWL_STA_AUX_ACTIVITY);
2002         if (ret)
2003                 return ret;
2004
2005         /* Map Aux queue to fifo - needs to happen before adding Aux station */
2006         if (!iwl_mvm_has_new_tx_api(mvm))
2007                 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2008                                               mvm->aux_sta.sta_id,
2009                                               IWL_MVM_TX_FIFO_MCAST);
2010
2011         ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2012                                          MAC_INDEX_AUX, 0);
2013         if (ret) {
2014                 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2015                 return ret;
2016         }
2017
2018         /*
2019          * For 22000 firmware and on we cannot add queue to a station unknown
2020          * to firmware so enable queue here - after the station was added
2021          */
2022         if (iwl_mvm_has_new_tx_api(mvm))
2023                 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2024                                               mvm->aux_sta.sta_id,
2025                                               IWL_MVM_TX_FIFO_MCAST);
2026
2027         return 0;
2028 }
2029
2030 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2031 {
2032         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2033         int ret;
2034
2035         lockdep_assert_held(&mvm->mutex);
2036
2037         /* Map snif queue to fifo - must happen before adding snif station */
2038         if (!iwl_mvm_has_new_tx_api(mvm))
2039                 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2040                                               mvm->snif_sta.sta_id,
2041                                               IWL_MVM_TX_FIFO_BE);
2042
2043         ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2044                                          mvmvif->id, 0);
2045         if (ret)
2046                 return ret;
2047
2048         /*
2049          * For 22000 firmware and on we cannot add queue to a station unknown
2050          * to firmware so enable queue here - after the station was added
2051          */
2052         if (iwl_mvm_has_new_tx_api(mvm))
2053                 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2054                                               mvm->snif_sta.sta_id,
2055                                               IWL_MVM_TX_FIFO_BE);
2056
2057         return 0;
2058 }
2059
2060 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2061 {
2062         int ret;
2063
2064         lockdep_assert_held(&mvm->mutex);
2065
2066         iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2067         ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2068         if (ret)
2069                 IWL_WARN(mvm, "Failed sending remove station\n");
2070
2071         return ret;
2072 }
2073
2074 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2075 {
2076         iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2077 }
2078
2079 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2080 {
2081         lockdep_assert_held(&mvm->mutex);
2082
2083         iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2084 }
2085
2086 /*
2087  * Send the add station command for the vif's broadcast station.
2088  * Assumes that the station was already allocated.
2089  *
2090  * @mvm: the mvm component
2091  * @vif: the interface to which the broadcast station is added
2092  * @bsta: the broadcast station to add.
2093  */
2094 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2095 {
2096         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2097         struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2098         static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2099         const u8 *baddr = _baddr;
2100         int queue;
2101         int ret;
2102         unsigned int wdg_timeout =
2103                 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2104         struct iwl_trans_txq_scd_cfg cfg = {
2105                 .fifo = IWL_MVM_TX_FIFO_VO,
2106                 .sta_id = mvmvif->bcast_sta.sta_id,
2107                 .tid = IWL_MAX_TID_COUNT,
2108                 .aggregate = false,
2109                 .frame_limit = IWL_FRAME_LIMIT,
2110         };
2111
2112         lockdep_assert_held(&mvm->mutex);
2113
2114         if (!iwl_mvm_has_new_tx_api(mvm)) {
2115                 if (vif->type == NL80211_IFTYPE_AP ||
2116                     vif->type == NL80211_IFTYPE_ADHOC)
2117                         queue = mvm->probe_queue;
2118                 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2119                         queue = mvm->p2p_dev_queue;
2120                 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
2121                         return -EINVAL;
2122
2123                 bsta->tfd_queue_msk |= BIT(queue);
2124
2125                 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2126         }
2127
2128         if (vif->type == NL80211_IFTYPE_ADHOC)
2129                 baddr = vif->bss_conf.bssid;
2130
2131         if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2132                 return -ENOSPC;
2133
2134         ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2135                                          mvmvif->id, mvmvif->color);
2136         if (ret)
2137                 return ret;
2138
2139         /*
2140          * For 22000 firmware and on we cannot add queue to a station unknown
2141          * to firmware so enable queue here - after the station was added
2142          */
2143         if (iwl_mvm_has_new_tx_api(mvm)) {
2144                 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2145                                                 IWL_MAX_TID_COUNT,
2146                                                 wdg_timeout);
2147
2148                 if (vif->type == NL80211_IFTYPE_AP ||
2149                     vif->type == NL80211_IFTYPE_ADHOC)
2150                         mvm->probe_queue = queue;
2151                 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2152                         mvm->p2p_dev_queue = queue;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2159                                           struct ieee80211_vif *vif)
2160 {
2161         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2162         int queue;
2163
2164         lockdep_assert_held(&mvm->mutex);
2165
2166         iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2167
2168         switch (vif->type) {
2169         case NL80211_IFTYPE_AP:
2170         case NL80211_IFTYPE_ADHOC:
2171                 queue = mvm->probe_queue;
2172                 break;
2173         case NL80211_IFTYPE_P2P_DEVICE:
2174                 queue = mvm->p2p_dev_queue;
2175                 break;
2176         default:
2177                 WARN(1, "Can't free bcast queue on vif type %d\n",
2178                      vif->type);
2179                 return;
2180         }
2181
2182         iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
2183         if (iwl_mvm_has_new_tx_api(mvm))
2184                 return;
2185
2186         WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2187         mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2188 }
2189
2190 /* Send the FW a request to remove the station from it's internal data
2191  * structures, but DO NOT remove the entry from the local data structures. */
2192 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2193 {
2194         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2195         int ret;
2196
2197         lockdep_assert_held(&mvm->mutex);
2198
2199         iwl_mvm_free_bcast_sta_queues(mvm, vif);
2200
2201         ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2202         if (ret)
2203                 IWL_WARN(mvm, "Failed sending remove station\n");
2204         return ret;
2205 }
2206
2207 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2208 {
2209         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2210
2211         lockdep_assert_held(&mvm->mutex);
2212
2213         return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2214                                         ieee80211_vif_type_p2p(vif),
2215                                         IWL_STA_GENERAL_PURPOSE);
2216 }
2217
2218 /* Allocate a new station entry for the broadcast station to the given vif,
2219  * and send it to the FW.
2220  * Note that each P2P mac should have its own broadcast station.
2221  *
2222  * @mvm: the mvm component
2223  * @vif: the interface to which the broadcast station is added
2224  * @bsta: the broadcast station to add. */
2225 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2226 {
2227         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2228         struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2229         int ret;
2230
2231         lockdep_assert_held(&mvm->mutex);
2232
2233         ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2234         if (ret)
2235                 return ret;
2236
2237         ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2238
2239         if (ret)
2240                 iwl_mvm_dealloc_int_sta(mvm, bsta);
2241
2242         return ret;
2243 }
2244
2245 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2246 {
2247         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2248
2249         iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2250 }
2251
2252 /*
2253  * Send the FW a request to remove the station from it's internal data
2254  * structures, and in addition remove it from the local data structure.
2255  */
2256 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2257 {
2258         int ret;
2259
2260         lockdep_assert_held(&mvm->mutex);
2261
2262         ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2263
2264         iwl_mvm_dealloc_bcast_sta(mvm, vif);
2265
2266         return ret;
2267 }
2268
2269 /*
2270  * Allocate a new station entry for the multicast station to the given vif,
2271  * and send it to the FW.
2272  * Note that each AP/GO mac should have its own multicast station.
2273  *
2274  * @mvm: the mvm component
2275  * @vif: the interface to which the multicast station is added
2276  */
2277 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2278 {
2279         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2280         struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2281         static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2282         const u8 *maddr = _maddr;
2283         struct iwl_trans_txq_scd_cfg cfg = {
2284                 .fifo = IWL_MVM_TX_FIFO_MCAST,
2285                 .sta_id = msta->sta_id,
2286                 .tid = 0,
2287                 .aggregate = false,
2288                 .frame_limit = IWL_FRAME_LIMIT,
2289         };
2290         unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2291         int ret;
2292
2293         lockdep_assert_held(&mvm->mutex);
2294
2295         if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2296                     vif->type != NL80211_IFTYPE_ADHOC))
2297                 return -ENOTSUPP;
2298
2299         /*
2300          * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2301          * invalid, so make sure we use the queue we want.
2302          * Note that this is done here as we want to avoid making DQA
2303          * changes in mac80211 layer.
2304          */
2305         if (vif->type == NL80211_IFTYPE_ADHOC)
2306                 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2307
2308         /*
2309          * While in previous FWs we had to exclude cab queue from TFD queue
2310          * mask, now it is needed as any other queue.
2311          */
2312         if (!iwl_mvm_has_new_tx_api(mvm) &&
2313             fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2314                 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2315                                    timeout);
2316                 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2317         }
2318         ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2319                                          mvmvif->id, mvmvif->color);
2320         if (ret) {
2321                 iwl_mvm_dealloc_int_sta(mvm, msta);
2322                 return ret;
2323         }
2324
2325         /*
2326          * Enable cab queue after the ADD_STA command is sent.
2327          * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2328          * command with unknown station id, and for FW that doesn't support
2329          * station API since the cab queue is not included in the
2330          * tfd_queue_mask.
2331          */
2332         if (iwl_mvm_has_new_tx_api(mvm)) {
2333                 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2334                                                     0,
2335                                                     timeout);
2336                 mvmvif->cab_queue = queue;
2337         } else if (!fw_has_api(&mvm->fw->ucode_capa,
2338                                IWL_UCODE_TLV_API_STA_TYPE))
2339                 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2340                                    timeout);
2341
2342         if (mvmvif->ap_wep_key) {
2343                 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2344
2345                 if (key_offset == STA_KEY_IDX_INVALID)
2346                         return -ENOSPC;
2347
2348                 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2349                                            mvmvif->ap_wep_key, 1, 0, NULL, 0,
2350                                            key_offset, 0);
2351                 if (ret)
2352                         return ret;
2353         }
2354
2355         return 0;
2356 }
2357
2358 /*
2359  * Send the FW a request to remove the station from it's internal data
2360  * structures, and in addition remove it from the local data structure.
2361  */
2362 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2363 {
2364         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2365         int ret;
2366
2367         lockdep_assert_held(&mvm->mutex);
2368
2369         iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2370
2371         iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2372
2373         ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2374         if (ret)
2375                 IWL_WARN(mvm, "Failed sending remove station\n");
2376
2377         return ret;
2378 }
2379
2380 #define IWL_MAX_RX_BA_SESSIONS 16
2381
2382 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2383 {
2384         struct iwl_mvm_delba_notif notif = {
2385                 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2386                 .metadata.sync = 1,
2387                 .delba.baid = baid,
2388         };
2389         iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2390 };
2391
2392 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2393                                  struct iwl_mvm_baid_data *data)
2394 {
2395         int i;
2396
2397         iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2398
2399         for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2400                 int j;
2401                 struct iwl_mvm_reorder_buffer *reorder_buf =
2402                         &data->reorder_buf[i];
2403                 struct iwl_mvm_reorder_buf_entry *entries =
2404                         &data->entries[i * data->entries_per_queue];
2405
2406                 spin_lock_bh(&reorder_buf->lock);
2407                 if (likely(!reorder_buf->num_stored)) {
2408                         spin_unlock_bh(&reorder_buf->lock);
2409                         continue;
2410                 }
2411
2412                 /*
2413                  * This shouldn't happen in regular DELBA since the internal
2414                  * delBA notification should trigger a release of all frames in
2415                  * the reorder buffer.
2416                  */
2417                 WARN_ON(1);
2418
2419                 for (j = 0; j < reorder_buf->buf_size; j++)
2420                         __skb_queue_purge(&entries[j].e.frames);
2421                 /*
2422                  * Prevent timer re-arm. This prevents a very far fetched case
2423                  * where we timed out on the notification. There may be prior
2424                  * RX frames pending in the RX queue before the notification
2425                  * that might get processed between now and the actual deletion
2426                  * and we would re-arm the timer although we are deleting the
2427                  * reorder buffer.
2428                  */
2429                 reorder_buf->removed = true;
2430                 spin_unlock_bh(&reorder_buf->lock);
2431                 del_timer_sync(&reorder_buf->reorder_timer);
2432         }
2433 }
2434
2435 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2436                                         struct iwl_mvm_baid_data *data,
2437                                         u16 ssn, u16 buf_size)
2438 {
2439         int i;
2440
2441         for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2442                 struct iwl_mvm_reorder_buffer *reorder_buf =
2443                         &data->reorder_buf[i];
2444                 struct iwl_mvm_reorder_buf_entry *entries =
2445                         &data->entries[i * data->entries_per_queue];
2446                 int j;
2447
2448                 reorder_buf->num_stored = 0;
2449                 reorder_buf->head_sn = ssn;
2450                 reorder_buf->buf_size = buf_size;
2451                 /* rx reorder timer */
2452                 timer_setup(&reorder_buf->reorder_timer,
2453                             iwl_mvm_reorder_timer_expired, 0);
2454                 spin_lock_init(&reorder_buf->lock);
2455                 reorder_buf->mvm = mvm;
2456                 reorder_buf->queue = i;
2457                 reorder_buf->valid = false;
2458                 for (j = 0; j < reorder_buf->buf_size; j++)
2459                         __skb_queue_head_init(&entries[j].e.frames);
2460         }
2461 }
2462
2463 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2464                        int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2465 {
2466         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2467         struct iwl_mvm_add_sta_cmd cmd = {};
2468         struct iwl_mvm_baid_data *baid_data = NULL;
2469         int ret;
2470         u32 status;
2471
2472         lockdep_assert_held(&mvm->mutex);
2473
2474         if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2475                 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2476                 return -ENOSPC;
2477         }
2478
2479         if (iwl_mvm_has_new_rx_api(mvm) && start) {
2480                 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2481
2482                 /* sparse doesn't like the __align() so don't check */
2483 #ifndef __CHECKER__
2484                 /*
2485                  * The division below will be OK if either the cache line size
2486                  * can be divided by the entry size (ALIGN will round up) or if
2487                  * if the entry size can be divided by the cache line size, in
2488                  * which case the ALIGN() will do nothing.
2489                  */
2490                 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2491                              sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2492 #endif
2493
2494                 /*
2495                  * Upward align the reorder buffer size to fill an entire cache
2496                  * line for each queue, to avoid sharing cache lines between
2497                  * different queues.
2498                  */
2499                 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2500
2501                 /*
2502                  * Allocate here so if allocation fails we can bail out early
2503                  * before starting the BA session in the firmware
2504                  */
2505                 baid_data = kzalloc(sizeof(*baid_data) +
2506                                     mvm->trans->num_rx_queues *
2507                                     reorder_buf_size,
2508                                     GFP_KERNEL);
2509                 if (!baid_data)
2510                         return -ENOMEM;
2511
2512                 /*
2513                  * This division is why we need the above BUILD_BUG_ON(),
2514                  * if that doesn't hold then this will not be right.
2515                  */
2516                 baid_data->entries_per_queue =
2517                         reorder_buf_size / sizeof(baid_data->entries[0]);
2518         }
2519
2520         cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2521         cmd.sta_id = mvm_sta->sta_id;
2522         cmd.add_modify = STA_MODE_MODIFY;
2523         if (start) {
2524                 cmd.add_immediate_ba_tid = (u8) tid;
2525                 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2526                 cmd.rx_ba_window = cpu_to_le16(buf_size);
2527         } else {
2528                 cmd.remove_immediate_ba_tid = (u8) tid;
2529         }
2530         cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2531                                   STA_MODIFY_REMOVE_BA_TID;
2532
2533         status = ADD_STA_SUCCESS;
2534         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2535                                           iwl_mvm_add_sta_cmd_size(mvm),
2536                                           &cmd, &status);
2537         if (ret)
2538                 goto out_free;
2539
2540         switch (status & IWL_ADD_STA_STATUS_MASK) {
2541         case ADD_STA_SUCCESS:
2542                 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2543                              start ? "start" : "stopp");
2544                 break;
2545         case ADD_STA_IMMEDIATE_BA_FAILURE:
2546                 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2547                 ret = -ENOSPC;
2548                 break;
2549         default:
2550                 ret = -EIO;
2551                 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2552                         start ? "start" : "stopp", status);
2553                 break;
2554         }
2555
2556         if (ret)
2557                 goto out_free;
2558
2559         if (start) {
2560                 u8 baid;
2561
2562                 mvm->rx_ba_sessions++;
2563
2564                 if (!iwl_mvm_has_new_rx_api(mvm))
2565                         return 0;
2566
2567                 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2568                         ret = -EINVAL;
2569                         goto out_free;
2570                 }
2571                 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2572                             IWL_ADD_STA_BAID_SHIFT);
2573                 baid_data->baid = baid;
2574                 baid_data->timeout = timeout;
2575                 baid_data->last_rx = jiffies;
2576                 baid_data->rcu_ptr = &mvm->baid_map[baid];
2577                 timer_setup(&baid_data->session_timer,
2578                             iwl_mvm_rx_agg_session_expired, 0);
2579                 baid_data->mvm = mvm;
2580                 baid_data->tid = tid;
2581                 baid_data->sta_id = mvm_sta->sta_id;
2582
2583                 mvm_sta->tid_to_baid[tid] = baid;
2584                 if (timeout)
2585                         mod_timer(&baid_data->session_timer,
2586                                   TU_TO_EXP_TIME(timeout * 2));
2587
2588                 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2589                 /*
2590                  * protect the BA data with RCU to cover a case where our
2591                  * internal RX sync mechanism will timeout (not that it's
2592                  * supposed to happen) and we will free the session data while
2593                  * RX is being processed in parallel
2594                  */
2595                 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2596                              mvm_sta->sta_id, tid, baid);
2597                 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2598                 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2599         } else  {
2600                 u8 baid = mvm_sta->tid_to_baid[tid];
2601
2602                 if (mvm->rx_ba_sessions > 0)
2603                         /* check that restart flow didn't zero the counter */
2604                         mvm->rx_ba_sessions--;
2605                 if (!iwl_mvm_has_new_rx_api(mvm))
2606                         return 0;
2607
2608                 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2609                         return -EINVAL;
2610
2611                 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2612                 if (WARN_ON(!baid_data))
2613                         return -EINVAL;
2614
2615                 /* synchronize all rx queues so we can safely delete */
2616                 iwl_mvm_free_reorder(mvm, baid_data);
2617                 del_timer_sync(&baid_data->session_timer);
2618                 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2619                 kfree_rcu(baid_data, rcu_head);
2620                 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2621         }
2622         return 0;
2623
2624 out_free:
2625         kfree(baid_data);
2626         return ret;
2627 }
2628
2629 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2630                        int tid, u8 queue, bool start)
2631 {
2632         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2633         struct iwl_mvm_add_sta_cmd cmd = {};
2634         int ret;
2635         u32 status;
2636
2637         lockdep_assert_held(&mvm->mutex);
2638
2639         if (start) {
2640                 mvm_sta->tfd_queue_msk |= BIT(queue);
2641                 mvm_sta->tid_disable_agg &= ~BIT(tid);
2642         } else {
2643                 /* In DQA-mode the queue isn't removed on agg termination */
2644                 mvm_sta->tid_disable_agg |= BIT(tid);
2645         }
2646
2647         cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2648         cmd.sta_id = mvm_sta->sta_id;
2649         cmd.add_modify = STA_MODE_MODIFY;
2650         if (!iwl_mvm_has_new_tx_api(mvm))
2651                 cmd.modify_mask = STA_MODIFY_QUEUES;
2652         cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2653         cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2654         cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2655
2656         status = ADD_STA_SUCCESS;
2657         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2658                                           iwl_mvm_add_sta_cmd_size(mvm),
2659                                           &cmd, &status);
2660         if (ret)
2661                 return ret;
2662
2663         switch (status & IWL_ADD_STA_STATUS_MASK) {
2664         case ADD_STA_SUCCESS:
2665                 break;
2666         default:
2667                 ret = -EIO;
2668                 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2669                         start ? "start" : "stopp", status);
2670                 break;
2671         }
2672
2673         return ret;
2674 }
2675
2676 const u8 tid_to_mac80211_ac[] = {
2677         IEEE80211_AC_BE,
2678         IEEE80211_AC_BK,
2679         IEEE80211_AC_BK,
2680         IEEE80211_AC_BE,
2681         IEEE80211_AC_VI,
2682         IEEE80211_AC_VI,
2683         IEEE80211_AC_VO,
2684         IEEE80211_AC_VO,
2685         IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2686 };
2687
2688 static const u8 tid_to_ucode_ac[] = {
2689         AC_BE,
2690         AC_BK,
2691         AC_BK,
2692         AC_BE,
2693         AC_VI,
2694         AC_VI,
2695         AC_VO,
2696         AC_VO,
2697 };
2698
2699 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2700                              struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2701 {
2702         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2703         struct iwl_mvm_tid_data *tid_data;
2704         u16 normalized_ssn;
2705         int txq_id;
2706         int ret;
2707
2708         if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2709                 return -EINVAL;
2710
2711         if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2712             mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2713                 IWL_ERR(mvm,
2714                         "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2715                         mvmsta->tid_data[tid].state);
2716                 return -ENXIO;
2717         }
2718
2719         lockdep_assert_held(&mvm->mutex);
2720
2721         if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2722             iwl_mvm_has_new_tx_api(mvm)) {
2723                 u8 ac = tid_to_mac80211_ac[tid];
2724
2725                 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2726                 if (ret)
2727                         return ret;
2728         }
2729
2730         spin_lock_bh(&mvmsta->lock);
2731
2732         /* possible race condition - we entered D0i3 while starting agg */
2733         if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2734                 spin_unlock_bh(&mvmsta->lock);
2735                 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2736                 return -EIO;
2737         }
2738
2739         /*
2740          * Note the possible cases:
2741          *  1. An enabled TXQ - TXQ needs to become agg'ed
2742          *  2. The TXQ hasn't yet been enabled, so find a free one and mark
2743          *      it as reserved
2744          */
2745         txq_id = mvmsta->tid_data[tid].txq_id;
2746         if (txq_id == IWL_MVM_INVALID_QUEUE) {
2747                 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2748                                                  IWL_MVM_DQA_MIN_DATA_QUEUE,
2749                                                  IWL_MVM_DQA_MAX_DATA_QUEUE);
2750                 if (txq_id < 0) {
2751                         ret = txq_id;
2752                         IWL_ERR(mvm, "Failed to allocate agg queue\n");
2753                         goto out;
2754                 }
2755
2756                 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2757                 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2758         } else if (unlikely(mvm->queue_info[txq_id].status ==
2759                             IWL_MVM_QUEUE_SHARED)) {
2760                 ret = -ENXIO;
2761                 IWL_DEBUG_TX_QUEUES(mvm,
2762                                     "Can't start tid %d agg on shared queue!\n",
2763                                     tid);
2764                 goto out;
2765         }
2766
2767         IWL_DEBUG_TX_QUEUES(mvm,
2768                             "AGG for tid %d will be on queue #%d\n",
2769                             tid, txq_id);
2770
2771         tid_data = &mvmsta->tid_data[tid];
2772         tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2773         tid_data->txq_id = txq_id;
2774         *ssn = tid_data->ssn;
2775
2776         IWL_DEBUG_TX_QUEUES(mvm,
2777                             "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2778                             mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2779                             tid_data->next_reclaimed);
2780
2781         /*
2782          * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2783          * to align the wrap around of ssn so we compare relevant values.
2784          */
2785         normalized_ssn = tid_data->ssn;
2786         if (mvm->trans->cfg->gen2)
2787                 normalized_ssn &= 0xff;
2788
2789         if (normalized_ssn == tid_data->next_reclaimed) {
2790                 tid_data->state = IWL_AGG_STARTING;
2791                 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2792         } else {
2793                 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2794         }
2795
2796         ret = 0;
2797
2798 out:
2799         spin_unlock_bh(&mvmsta->lock);
2800
2801         return ret;
2802 }
2803
2804 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2805                             struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2806                             bool amsdu)
2807 {
2808         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2809         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2810         unsigned int wdg_timeout =
2811                 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2812         int queue, ret;
2813         bool alloc_queue = true;
2814         enum iwl_mvm_queue_status queue_status;
2815         u16 ssn;
2816
2817         struct iwl_trans_txq_scd_cfg cfg = {
2818                 .sta_id = mvmsta->sta_id,
2819                 .tid = tid,
2820                 .frame_limit = buf_size,
2821                 .aggregate = true,
2822         };
2823
2824         /*
2825          * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2826          * manager, so this function should never be called in this case.
2827          */
2828         if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2829                 return -EINVAL;
2830
2831         BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2832                      != IWL_MAX_TID_COUNT);
2833
2834         spin_lock_bh(&mvmsta->lock);
2835         ssn = tid_data->ssn;
2836         queue = tid_data->txq_id;
2837         tid_data->state = IWL_AGG_ON;
2838         mvmsta->agg_tids |= BIT(tid);
2839         tid_data->ssn = 0xffff;
2840         tid_data->amsdu_in_ampdu_allowed = amsdu;
2841         spin_unlock_bh(&mvmsta->lock);
2842
2843         if (iwl_mvm_has_new_tx_api(mvm)) {
2844                 /*
2845                  * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2846                  * would have failed, so if we are here there is no need to
2847                  * allocate a queue.
2848                  * However, if aggregation size is different than the default
2849                  * size, the scheduler should be reconfigured.
2850                  * We cannot do this with the new TX API, so return unsupported
2851                  * for now, until it will be offloaded to firmware..
2852                  * Note that if SCD default value changes - this condition
2853                  * should be updated as well.
2854                  */
2855                 if (buf_size < IWL_FRAME_LIMIT)
2856                         return -ENOTSUPP;
2857
2858                 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2859                 if (ret)
2860                         return -EIO;
2861                 goto out;
2862         }
2863
2864         cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2865
2866         queue_status = mvm->queue_info[queue].status;
2867
2868         /* Maybe there is no need to even alloc a queue... */
2869         if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2870                 alloc_queue = false;
2871
2872         /*
2873          * Only reconfig the SCD for the queue if the window size has
2874          * changed from current (become smaller)
2875          */
2876         if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2877                 /*
2878                  * If reconfiguring an existing queue, it first must be
2879                  * drained
2880                  */
2881                 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2882                                                      BIT(queue));
2883                 if (ret) {
2884                         IWL_ERR(mvm,
2885                                 "Error draining queue before reconfig\n");
2886                         return ret;
2887                 }
2888
2889                 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2890                                            mvmsta->sta_id, tid,
2891                                            buf_size, ssn);
2892                 if (ret) {
2893                         IWL_ERR(mvm,
2894                                 "Error reconfiguring TXQ #%d\n", queue);
2895                         return ret;
2896                 }
2897         }
2898
2899         if (alloc_queue)
2900                 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
2901                                    &cfg, wdg_timeout);
2902
2903         /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2904         if (queue_status != IWL_MVM_QUEUE_SHARED) {
2905                 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2906                 if (ret)
2907                         return -EIO;
2908         }
2909
2910         /* No need to mark as reserved */
2911         mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2912
2913 out:
2914         /*
2915          * Even though in theory the peer could have different
2916          * aggregation reorder buffer sizes for different sessions,
2917          * our ucode doesn't allow for that and has a global limit
2918          * for each station. Therefore, use the minimum of all the
2919          * aggregation sessions and our default value.
2920          */
2921         mvmsta->max_agg_bufsize =
2922                 min(mvmsta->max_agg_bufsize, buf_size);
2923         mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2924
2925         IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2926                      sta->addr, tid);
2927
2928         return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
2929 }
2930
2931 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2932                                         struct iwl_mvm_sta *mvmsta,
2933                                         struct iwl_mvm_tid_data *tid_data)
2934 {
2935         u16 txq_id = tid_data->txq_id;
2936
2937         lockdep_assert_held(&mvm->mutex);
2938
2939         if (iwl_mvm_has_new_tx_api(mvm))
2940                 return;
2941
2942         /*
2943          * The TXQ is marked as reserved only if no traffic came through yet
2944          * This means no traffic has been sent on this TID (agg'd or not), so
2945          * we no longer have use for the queue. Since it hasn't even been
2946          * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2947          * free.
2948          */
2949         if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
2950                 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2951                 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
2952         }
2953 }
2954
2955 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2956                             struct ieee80211_sta *sta, u16 tid)
2957 {
2958         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2959         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2960         u16 txq_id;
2961         int err;
2962
2963         /*
2964          * If mac80211 is cleaning its state, then say that we finished since
2965          * our state has been cleared anyway.
2966          */
2967         if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2968                 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2969                 return 0;
2970         }
2971
2972         spin_lock_bh(&mvmsta->lock);
2973
2974         txq_id = tid_data->txq_id;
2975
2976         IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2977                             mvmsta->sta_id, tid, txq_id, tid_data->state);
2978
2979         mvmsta->agg_tids &= ~BIT(tid);
2980
2981         iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
2982
2983         switch (tid_data->state) {
2984         case IWL_AGG_ON:
2985                 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2986
2987                 IWL_DEBUG_TX_QUEUES(mvm,
2988                                     "ssn = %d, next_recl = %d\n",
2989                                     tid_data->ssn, tid_data->next_reclaimed);
2990
2991                 tid_data->ssn = 0xffff;
2992                 tid_data->state = IWL_AGG_OFF;
2993                 spin_unlock_bh(&mvmsta->lock);
2994
2995                 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2996
2997                 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2998                 return 0;
2999         case IWL_AGG_STARTING:
3000         case IWL_EMPTYING_HW_QUEUE_ADDBA:
3001                 /*
3002                  * The agg session has been stopped before it was set up. This
3003                  * can happen when the AddBA timer times out for example.
3004                  */
3005
3006                 /* No barriers since we are under mutex */
3007                 lockdep_assert_held(&mvm->mutex);
3008
3009                 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3010                 tid_data->state = IWL_AGG_OFF;
3011                 err = 0;
3012                 break;
3013         default:
3014                 IWL_ERR(mvm,
3015                         "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3016                         mvmsta->sta_id, tid, tid_data->state);
3017                 IWL_ERR(mvm,
3018                         "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3019                 err = -EINVAL;
3020         }
3021
3022         spin_unlock_bh(&mvmsta->lock);
3023
3024         return err;
3025 }
3026
3027 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3028                             struct ieee80211_sta *sta, u16 tid)
3029 {
3030         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3031         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3032         u16 txq_id;
3033         enum iwl_mvm_agg_state old_state;
3034
3035         /*
3036          * First set the agg state to OFF to avoid calling
3037          * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3038          */
3039         spin_lock_bh(&mvmsta->lock);
3040         txq_id = tid_data->txq_id;
3041         IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3042                             mvmsta->sta_id, tid, txq_id, tid_data->state);
3043         old_state = tid_data->state;
3044         tid_data->state = IWL_AGG_OFF;
3045         mvmsta->agg_tids &= ~BIT(tid);
3046         spin_unlock_bh(&mvmsta->lock);
3047
3048         iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3049
3050         if (old_state >= IWL_AGG_ON) {
3051                 iwl_mvm_drain_sta(mvm, mvmsta, true);
3052
3053                 if (iwl_mvm_has_new_tx_api(mvm)) {
3054                         if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3055                                                    BIT(tid), 0))
3056                                 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3057                         iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3058                 } else {
3059                         if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3060                                 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3061                         iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3062                 }
3063
3064                 iwl_mvm_drain_sta(mvm, mvmsta, false);
3065
3066                 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3067         }
3068
3069         return 0;
3070 }
3071
3072 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3073 {
3074         int i, max = -1, max_offs = -1;
3075
3076         lockdep_assert_held(&mvm->mutex);
3077
3078         /* Pick the unused key offset with the highest 'deleted'
3079          * counter. Every time a key is deleted, all the counters
3080          * are incremented and the one that was just deleted is
3081          * reset to zero. Thus, the highest counter is the one
3082          * that was deleted longest ago. Pick that one.
3083          */
3084         for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3085                 if (test_bit(i, mvm->fw_key_table))
3086                         continue;
3087                 if (mvm->fw_key_deleted[i] > max) {
3088                         max = mvm->fw_key_deleted[i];
3089                         max_offs = i;
3090                 }
3091         }
3092
3093         if (max_offs < 0)
3094                 return STA_KEY_IDX_INVALID;
3095
3096         return max_offs;
3097 }
3098
3099 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3100                                                struct ieee80211_vif *vif,
3101                                                struct ieee80211_sta *sta)
3102 {
3103         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3104
3105         if (sta)
3106                 return iwl_mvm_sta_from_mac80211(sta);
3107
3108         /*
3109          * The device expects GTKs for station interfaces to be
3110          * installed as GTKs for the AP station. If we have no
3111          * station ID, then use AP's station ID.
3112          */
3113         if (vif->type == NL80211_IFTYPE_STATION &&
3114             mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3115                 u8 sta_id = mvmvif->ap_sta_id;
3116
3117                 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3118                                             lockdep_is_held(&mvm->mutex));
3119
3120                 /*
3121                  * It is possible that the 'sta' parameter is NULL,
3122                  * for example when a GTK is removed - the sta_id will then
3123                  * be the AP ID, and no station was passed by mac80211.
3124                  */
3125                 if (IS_ERR_OR_NULL(sta))
3126                         return NULL;
3127
3128                 return iwl_mvm_sta_from_mac80211(sta);
3129         }
3130
3131         return NULL;
3132 }
3133
3134 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3135                                 u32 sta_id,
3136                                 struct ieee80211_key_conf *key, bool mcast,
3137                                 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3138                                 u8 key_offset, bool mfp)
3139 {
3140         union {
3141                 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3142                 struct iwl_mvm_add_sta_key_cmd cmd;
3143         } u = {};
3144         __le16 key_flags;
3145         int ret;
3146         u32 status;
3147         u16 keyidx;
3148         u64 pn = 0;
3149         int i, size;
3150         bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3151                                   IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3152
3153         if (sta_id == IWL_MVM_INVALID_STA)
3154                 return -EINVAL;
3155
3156         keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3157                  STA_KEY_FLG_KEYID_MSK;
3158         key_flags = cpu_to_le16(keyidx);
3159         key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3160
3161         switch (key->cipher) {
3162         case WLAN_CIPHER_SUITE_TKIP:
3163                 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3164                 if (new_api) {
3165                         memcpy((void *)&u.cmd.tx_mic_key,
3166                                &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3167                                IWL_MIC_KEY_SIZE);
3168
3169                         memcpy((void *)&u.cmd.rx_mic_key,
3170                                &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3171                                IWL_MIC_KEY_SIZE);
3172                         pn = atomic64_read(&key->tx_pn);
3173
3174                 } else {
3175                         u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3176                         for (i = 0; i < 5; i++)
3177                                 u.cmd_v1.tkip_rx_ttak[i] =
3178                                         cpu_to_le16(tkip_p1k[i]);
3179                 }
3180                 memcpy(u.cmd.common.key, key->key, key->keylen);
3181                 break;
3182         case WLAN_CIPHER_SUITE_CCMP:
3183                 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3184                 memcpy(u.cmd.common.key, key->key, key->keylen);
3185                 if (new_api)
3186                         pn = atomic64_read(&key->tx_pn);
3187                 break;
3188         case WLAN_CIPHER_SUITE_WEP104:
3189                 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3190                 /* fall through */
3191         case WLAN_CIPHER_SUITE_WEP40:
3192                 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3193                 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3194                 break;
3195         case WLAN_CIPHER_SUITE_GCMP_256:
3196                 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3197                 /* fall through */
3198         case WLAN_CIPHER_SUITE_GCMP:
3199                 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3200                 memcpy(u.cmd.common.key, key->key, key->keylen);
3201                 if (new_api)
3202                         pn = atomic64_read(&key->tx_pn);
3203                 break;
3204         default:
3205                 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3206                 memcpy(u.cmd.common.key, key->key, key->keylen);
3207         }
3208
3209         if (mcast)
3210                 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3211         if (mfp)
3212                 key_flags |= cpu_to_le16(STA_KEY_MFP);
3213
3214         u.cmd.common.key_offset = key_offset;
3215         u.cmd.common.key_flags = key_flags;
3216         u.cmd.common.sta_id = sta_id;
3217
3218         if (new_api) {
3219                 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3220                 size = sizeof(u.cmd);
3221         } else {
3222                 size = sizeof(u.cmd_v1);
3223         }
3224
3225         status = ADD_STA_SUCCESS;
3226         if (cmd_flags & CMD_ASYNC)
3227                 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3228                                            &u.cmd);
3229         else
3230                 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3231                                                   &u.cmd, &status);
3232
3233         switch (status) {
3234         case ADD_STA_SUCCESS:
3235                 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3236                 break;
3237         default:
3238                 ret = -EIO;
3239                 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3240                 break;
3241         }
3242
3243         return ret;
3244 }
3245
3246 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3247                                  struct ieee80211_key_conf *keyconf,
3248                                  u8 sta_id, bool remove_key)
3249 {
3250         struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3251
3252         /* verify the key details match the required command's expectations */
3253         if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3254                     (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3255                     (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3256                      keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3257                      keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3258                 return -EINVAL;
3259
3260         if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3261                     keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3262                 return -EINVAL;
3263
3264         igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3265         igtk_cmd.sta_id = cpu_to_le32(sta_id);
3266
3267         if (remove_key) {
3268                 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3269         } else {
3270                 struct ieee80211_key_seq seq;
3271                 const u8 *pn;
3272
3273                 switch (keyconf->cipher) {
3274                 case WLAN_CIPHER_SUITE_AES_CMAC:
3275                         igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3276                         break;
3277                 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3278                 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3279                         igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3280                         break;
3281                 default:
3282                         return -EINVAL;
3283                 }
3284
3285                 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3286                 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3287                         igtk_cmd.ctrl_flags |=
3288                                 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3289                 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3290                 pn = seq.aes_cmac.pn;
3291                 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3292                                                        ((u64) pn[4] << 8) |
3293                                                        ((u64) pn[3] << 16) |
3294                                                        ((u64) pn[2] << 24) |
3295                                                        ((u64) pn[1] << 32) |
3296                                                        ((u64) pn[0] << 40));
3297         }
3298
3299         IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3300                        remove_key ? "removing" : "installing",
3301                        igtk_cmd.sta_id);
3302
3303         if (!iwl_mvm_has_new_rx_api(mvm)) {
3304                 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3305                         .ctrl_flags = igtk_cmd.ctrl_flags,
3306                         .key_id = igtk_cmd.key_id,
3307                         .sta_id = igtk_cmd.sta_id,
3308                         .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3309                 };
3310
3311                 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3312                        ARRAY_SIZE(igtk_cmd_v1.igtk));
3313                 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3314                                             sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3315         }
3316         return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3317                                     sizeof(igtk_cmd), &igtk_cmd);
3318 }
3319
3320
3321 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3322                                        struct ieee80211_vif *vif,
3323                                        struct ieee80211_sta *sta)
3324 {
3325         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3326
3327         if (sta)
3328                 return sta->addr;
3329
3330         if (vif->type == NL80211_IFTYPE_STATION &&
3331             mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3332                 u8 sta_id = mvmvif->ap_sta_id;
3333                 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3334                                                 lockdep_is_held(&mvm->mutex));
3335                 return sta->addr;
3336         }
3337
3338
3339         return NULL;
3340 }
3341
3342 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3343                                  struct ieee80211_vif *vif,
3344                                  struct ieee80211_sta *sta,
3345                                  struct ieee80211_key_conf *keyconf,
3346                                  u8 key_offset,
3347                                  bool mcast)
3348 {
3349         int ret;
3350         const u8 *addr;
3351         struct ieee80211_key_seq seq;
3352         u16 p1k[5];
3353         u32 sta_id;
3354         bool mfp = false;
3355
3356         if (sta) {
3357                 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3358
3359                 sta_id = mvm_sta->sta_id;
3360                 mfp = sta->mfp;
3361         } else if (vif->type == NL80211_IFTYPE_AP &&
3362                    !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3363                 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3364
3365                 sta_id = mvmvif->mcast_sta.sta_id;
3366         } else {
3367                 IWL_ERR(mvm, "Failed to find station id\n");
3368                 return -EINVAL;
3369         }
3370
3371         switch (keyconf->cipher) {
3372         case WLAN_CIPHER_SUITE_TKIP:
3373                 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3374                 /* get phase 1 key from mac80211 */
3375                 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3376                 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3377                 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3378                                            seq.tkip.iv32, p1k, 0, key_offset,
3379                                            mfp);
3380                 break;
3381         case WLAN_CIPHER_SUITE_CCMP:
3382         case WLAN_CIPHER_SUITE_WEP40:
3383         case WLAN_CIPHER_SUITE_WEP104:
3384         case WLAN_CIPHER_SUITE_GCMP:
3385         case WLAN_CIPHER_SUITE_GCMP_256:
3386                 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3387                                            0, NULL, 0, key_offset, mfp);
3388                 break;
3389         default:
3390                 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3391                                            0, NULL, 0, key_offset, mfp);
3392         }
3393
3394         return ret;
3395 }
3396
3397 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3398                                     struct ieee80211_key_conf *keyconf,
3399                                     bool mcast)
3400 {
3401         union {
3402                 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3403                 struct iwl_mvm_add_sta_key_cmd cmd;
3404         } u = {};
3405         bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3406                                   IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3407         __le16 key_flags;
3408         int ret, size;
3409         u32 status;
3410
3411         /* This is a valid situation for GTK removal */
3412         if (sta_id == IWL_MVM_INVALID_STA)
3413                 return 0;
3414
3415         key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3416                                  STA_KEY_FLG_KEYID_MSK);
3417         key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3418         key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3419
3420         if (mcast)
3421                 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3422
3423         /*
3424          * The fields assigned here are in the same location at the start
3425          * of the command, so we can do this union trick.
3426          */
3427         u.cmd.common.key_flags = key_flags;
3428         u.cmd.common.key_offset = keyconf->hw_key_idx;
3429         u.cmd.common.sta_id = sta_id;
3430
3431         size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3432
3433         status = ADD_STA_SUCCESS;
3434         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3435                                           &status);
3436
3437         switch (status) {
3438         case ADD_STA_SUCCESS:
3439                 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3440                 break;
3441         default:
3442                 ret = -EIO;
3443                 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3444                 break;
3445         }
3446
3447         return ret;
3448 }
3449
3450 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3451                         struct ieee80211_vif *vif,
3452                         struct ieee80211_sta *sta,
3453                         struct ieee80211_key_conf *keyconf,
3454                         u8 key_offset)
3455 {
3456         bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3457         struct iwl_mvm_sta *mvm_sta;
3458         u8 sta_id = IWL_MVM_INVALID_STA;
3459         int ret;
3460         static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3461
3462         lockdep_assert_held(&mvm->mutex);
3463
3464         if (vif->type != NL80211_IFTYPE_AP ||
3465             keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3466                 /* Get the station id from the mvm local station table */
3467                 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3468                 if (!mvm_sta) {
3469                         IWL_ERR(mvm, "Failed to find station\n");
3470                         return -EINVAL;
3471                 }
3472                 sta_id = mvm_sta->sta_id;
3473
3474                 /*
3475                  * It is possible that the 'sta' parameter is NULL, and thus
3476                  * there is a need to retrieve the sta from the local station
3477                  * table.
3478                  */
3479                 if (!sta) {
3480                         sta = rcu_dereference_protected(
3481                                 mvm->fw_id_to_mac_id[sta_id],
3482                                 lockdep_is_held(&mvm->mutex));
3483                         if (IS_ERR_OR_NULL(sta)) {
3484                                 IWL_ERR(mvm, "Invalid station id\n");
3485                                 return -EINVAL;
3486                         }
3487                 }
3488
3489                 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3490                         return -EINVAL;
3491         } else {
3492                 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3493
3494                 sta_id = mvmvif->mcast_sta.sta_id;
3495         }
3496
3497         if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3498             keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3499             keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3500                 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3501                 goto end;
3502         }
3503
3504         /* If the key_offset is not pre-assigned, we need to find a
3505          * new offset to use.  In normal cases, the offset is not
3506          * pre-assigned, but during HW_RESTART we want to reuse the
3507          * same indices, so we pass them when this function is called.
3508          *
3509          * In D3 entry, we need to hardcoded the indices (because the
3510          * firmware hardcodes the PTK offset to 0).  In this case, we
3511          * need to make sure we don't overwrite the hw_key_idx in the
3512          * keyconf structure, because otherwise we cannot configure
3513          * the original ones back when resuming.
3514          */
3515         if (key_offset == STA_KEY_IDX_INVALID) {
3516                 key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3517                 if (key_offset == STA_KEY_IDX_INVALID)
3518                         return -ENOSPC;
3519                 keyconf->hw_key_idx = key_offset;
3520         }
3521
3522         ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3523         if (ret)
3524                 goto end;
3525
3526         /*
3527          * For WEP, the same key is used for multicast and unicast. Upload it
3528          * again, using the same key offset, and now pointing the other one
3529          * to the same key slot (offset).
3530          * If this fails, remove the original as well.
3531          */
3532         if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3533              keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3534             sta) {
3535                 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3536                                             key_offset, !mcast);
3537                 if (ret) {
3538                         __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3539                         goto end;
3540                 }
3541         }
3542
3543         __set_bit(key_offset, mvm->fw_key_table);
3544
3545 end:
3546         IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3547                       keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3548                       sta ? sta->addr : zero_addr, ret);
3549         return ret;
3550 }
3551
3552 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3553                            struct ieee80211_vif *vif,
3554                            struct ieee80211_sta *sta,
3555                            struct ieee80211_key_conf *keyconf)
3556 {
3557         bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3558         struct iwl_mvm_sta *mvm_sta;
3559         u8 sta_id = IWL_MVM_INVALID_STA;
3560         int ret, i;
3561
3562         lockdep_assert_held(&mvm->mutex);
3563
3564         /* Get the station from the mvm local station table */
3565         mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3566         if (mvm_sta)
3567                 sta_id = mvm_sta->sta_id;
3568         else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3569                 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3570
3571
3572         IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3573                       keyconf->keyidx, sta_id);
3574
3575         if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3576                         keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3577                         keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3578                 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3579
3580         if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3581                 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3582                         keyconf->hw_key_idx);
3583                 return -ENOENT;
3584         }
3585
3586         /* track which key was deleted last */
3587         for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3588                 if (mvm->fw_key_deleted[i] < U8_MAX)
3589                         mvm->fw_key_deleted[i]++;
3590         }
3591         mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3592
3593         if (sta && !mvm_sta) {
3594                 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3595                 return 0;
3596         }
3597
3598         ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3599         if (ret)
3600                 return ret;
3601
3602         /* delete WEP key twice to get rid of (now useless) offset */
3603         if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3604             keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3605                 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3606
3607         return ret;
3608 }
3609
3610 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3611                              struct ieee80211_vif *vif,
3612                              struct ieee80211_key_conf *keyconf,
3613                              struct ieee80211_sta *sta, u32 iv32,
3614                              u16 *phase1key)
3615 {
3616         struct iwl_mvm_sta *mvm_sta;
3617         bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3618         bool mfp = sta ? sta->mfp : false;
3619
3620         rcu_read_lock();
3621
3622         mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3623         if (WARN_ON_ONCE(!mvm_sta))
3624                 goto unlock;
3625         iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3626                              iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3627                              mfp);
3628
3629  unlock:
3630         rcu_read_unlock();
3631 }
3632
3633 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3634                                 struct ieee80211_sta *sta)
3635 {
3636         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3637         struct iwl_mvm_add_sta_cmd cmd = {
3638                 .add_modify = STA_MODE_MODIFY,
3639                 .sta_id = mvmsta->sta_id,
3640                 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3641                 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3642         };
3643         int ret;
3644
3645         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3646                                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3647         if (ret)
3648                 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3649 }
3650
3651 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3652                                        struct ieee80211_sta *sta,
3653                                        enum ieee80211_frame_release_type reason,
3654                                        u16 cnt, u16 tids, bool more_data,
3655                                        bool single_sta_queue)
3656 {
3657         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3658         struct iwl_mvm_add_sta_cmd cmd = {
3659                 .add_modify = STA_MODE_MODIFY,
3660                 .sta_id = mvmsta->sta_id,
3661                 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3662                 .sleep_tx_count = cpu_to_le16(cnt),
3663                 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3664         };
3665         int tid, ret;
3666         unsigned long _tids = tids;
3667
3668         /* convert TIDs to ACs - we don't support TSPEC so that's OK
3669          * Note that this field is reserved and unused by firmware not
3670          * supporting GO uAPSD, so it's safe to always do this.
3671          */
3672         for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3673                 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3674
3675         /* If we're releasing frames from aggregation or dqa queues then check
3676          * if all the queues that we're releasing frames from, combined, have:
3677          *  - more frames than the service period, in which case more_data
3678          *    needs to be set
3679          *  - fewer than 'cnt' frames, in which case we need to adjust the
3680          *    firmware command (but do that unconditionally)
3681          */
3682         if (single_sta_queue) {
3683                 int remaining = cnt;
3684                 int sleep_tx_count;
3685
3686                 spin_lock_bh(&mvmsta->lock);
3687                 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3688                         struct iwl_mvm_tid_data *tid_data;
3689                         u16 n_queued;
3690
3691                         tid_data = &mvmsta->tid_data[tid];
3692
3693                         n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3694                         if (n_queued > remaining) {
3695                                 more_data = true;
3696                                 remaining = 0;
3697                                 break;
3698                         }
3699                         remaining -= n_queued;
3700                 }
3701                 sleep_tx_count = cnt - remaining;
3702                 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3703                         mvmsta->sleep_tx_count = sleep_tx_count;
3704                 spin_unlock_bh(&mvmsta->lock);
3705
3706                 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3707                 if (WARN_ON(cnt - remaining == 0)) {
3708                         ieee80211_sta_eosp(sta);
3709                         return;
3710                 }
3711         }
3712
3713         /* Note: this is ignored by firmware not supporting GO uAPSD */
3714         if (more_data)
3715                 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3716
3717         if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3718                 mvmsta->next_status_eosp = true;
3719                 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3720         } else {
3721                 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3722         }
3723
3724         /* block the Tx queues until the FW updated the sleep Tx count */
3725         iwl_trans_block_txq_ptrs(mvm->trans, true);
3726
3727         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3728                                    CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3729                                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3730         if (ret)
3731                 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3732 }
3733
3734 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3735                            struct iwl_rx_cmd_buffer *rxb)
3736 {
3737         struct iwl_rx_packet *pkt = rxb_addr(rxb);
3738         struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3739         struct ieee80211_sta *sta;
3740         u32 sta_id = le32_to_cpu(notif->sta_id);
3741
3742         if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3743                 return;
3744
3745         rcu_read_lock();
3746         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3747         if (!IS_ERR_OR_NULL(sta))
3748                 ieee80211_sta_eosp(sta);
3749         rcu_read_unlock();
3750 }
3751
3752 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3753                                    struct iwl_mvm_sta *mvmsta, bool disable)
3754 {
3755         struct iwl_mvm_add_sta_cmd cmd = {
3756                 .add_modify = STA_MODE_MODIFY,
3757                 .sta_id = mvmsta->sta_id,
3758                 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3759                 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3760                 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3761         };
3762         int ret;
3763
3764         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3765                                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3766         if (ret)
3767                 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3768 }
3769
3770 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3771                                       struct ieee80211_sta *sta,
3772                                       bool disable)
3773 {
3774         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3775
3776         spin_lock_bh(&mvm_sta->lock);
3777
3778         if (mvm_sta->disable_tx == disable) {
3779                 spin_unlock_bh(&mvm_sta->lock);
3780                 return;
3781         }
3782
3783         mvm_sta->disable_tx = disable;
3784
3785         /* Tell mac80211 to start/stop queuing tx for this station */
3786         ieee80211_sta_block_awake(mvm->hw, sta, disable);
3787
3788         iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3789
3790         spin_unlock_bh(&mvm_sta->lock);
3791 }
3792
3793 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3794                                               struct iwl_mvm_vif *mvmvif,
3795                                               struct iwl_mvm_int_sta *sta,
3796                                               bool disable)
3797 {
3798         u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3799         struct iwl_mvm_add_sta_cmd cmd = {
3800                 .add_modify = STA_MODE_MODIFY,
3801                 .sta_id = sta->sta_id,
3802                 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3803                 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3804                 .mac_id_n_color = cpu_to_le32(id),
3805         };
3806         int ret;
3807
3808         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3809                                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3810         if (ret)
3811                 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3812 }
3813
3814 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3815                                        struct iwl_mvm_vif *mvmvif,
3816                                        bool disable)
3817 {
3818         struct ieee80211_sta *sta;
3819         struct iwl_mvm_sta *mvm_sta;
3820         int i;
3821
3822         lockdep_assert_held(&mvm->mutex);
3823
3824         /* Block/unblock all the stations of the given mvmvif */
3825         for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3826                 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3827                                                 lockdep_is_held(&mvm->mutex));
3828                 if (IS_ERR_OR_NULL(sta))
3829                         continue;
3830
3831                 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3832                 if (mvm_sta->mac_id_n_color !=
3833                     FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3834                         continue;
3835
3836                 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3837         }
3838
3839         if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3840                 return;
3841
3842         /* Need to block/unblock also multicast station */
3843         if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3844                 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3845                                                   &mvmvif->mcast_sta, disable);
3846
3847         /*
3848          * Only unblock the broadcast station (FW blocks it for immediate
3849          * quiet, not the driver)
3850          */
3851         if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3852                 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3853                                                   &mvmvif->bcast_sta, disable);
3854 }
3855
3856 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3857 {
3858         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3859         struct iwl_mvm_sta *mvmsta;
3860
3861         rcu_read_lock();
3862
3863         mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3864
3865         if (!WARN_ON(!mvmsta))
3866                 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3867
3868         rcu_read_unlock();
3869 }
3870
3871 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3872 {
3873         u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3874
3875         /*
3876          * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3877          * to align the wrap around of ssn so we compare relevant values.
3878          */
3879         if (mvm->trans->cfg->gen2)
3880                 sn &= 0xff;
3881
3882         return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3883 }