2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static struct mt76_txwi_cache *
20 mt76_alloc_txwi(struct mt76_dev *dev)
22 struct mt76_txwi_cache *t;
26 size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
27 t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
31 addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
38 static struct mt76_txwi_cache *
39 __mt76_get_txwi(struct mt76_dev *dev)
41 struct mt76_txwi_cache *t = NULL;
43 spin_lock_bh(&dev->lock);
44 if (!list_empty(&dev->txwi_cache)) {
45 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
49 spin_unlock_bh(&dev->lock);
54 struct mt76_txwi_cache *
55 mt76_get_txwi(struct mt76_dev *dev)
57 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
62 return mt76_alloc_txwi(dev);
66 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
71 spin_lock_bh(&dev->lock);
72 list_add(&t->list, &dev->txwi_cache);
73 spin_unlock_bh(&dev->lock);
76 void mt76_tx_free(struct mt76_dev *dev)
78 struct mt76_txwi_cache *t;
80 while ((t = __mt76_get_txwi(dev)) != NULL)
81 dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
86 mt76_txq_get_qid(struct ieee80211_txq *txq)
95 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
97 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
99 if (!ieee80211_is_data_qos(hdr->frame_control) ||
100 !ieee80211_is_data_present(hdr->frame_control))
103 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
107 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
108 __acquires(&dev->status_list.lock)
110 __skb_queue_head_init(list);
111 spin_lock_bh(&dev->status_list.lock);
112 __acquire(&dev->status_list.lock);
114 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
117 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
118 __releases(&dev->status_list.unlock)
122 spin_unlock_bh(&dev->status_list.lock);
123 __release(&dev->status_list.unlock);
125 while ((skb = __skb_dequeue(list)) != NULL)
126 ieee80211_tx_status(dev->hw, skb);
128 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
131 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
132 struct sk_buff_head *list)
134 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
135 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
136 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
141 if ((flags & done) != done)
144 __skb_unlink(skb, &dev->status_list);
146 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
147 if (flags & MT_TX_CB_TXS_FAILED) {
148 ieee80211_tx_info_clear_status(info);
149 info->status.rates[0].idx = -1;
150 info->flags |= IEEE80211_TX_STAT_ACK;
153 __skb_queue_tail(list, skb);
157 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
158 struct sk_buff_head *list)
160 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
162 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
165 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
168 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
169 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
173 return MT_PACKET_ID_NO_ACK;
175 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
176 return MT_PACKET_ID_NO_ACK;
178 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
179 IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
180 return MT_PACKET_ID_NO_SKB;
182 spin_lock_bh(&dev->status_list.lock);
184 memset(cb, 0, sizeof(*cb));
185 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
186 if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
187 wcid->packet_id == MT_PACKET_ID_NO_SKB)
188 wcid->packet_id = MT_PACKET_ID_FIRST;
190 pid = wcid->packet_id;
191 cb->wcid = wcid->idx;
193 cb->jiffies = jiffies;
195 __skb_queue_tail(&dev->status_list, skb);
196 spin_unlock_bh(&dev->status_list.lock);
200 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
203 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
204 struct sk_buff_head *list)
206 struct sk_buff *skb, *tmp;
208 skb_queue_walk_safe(&dev->status_list, skb, tmp) {
209 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
211 if (wcid && cb->wcid != wcid->idx)
214 if (cb->pktid == pktid)
218 !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
221 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
222 MT_TX_CB_TXS_DONE, list);
227 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
230 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
232 struct sk_buff_head list;
234 mt76_tx_status_lock(dev, &list);
235 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
236 mt76_tx_status_unlock(dev, &list);
238 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
240 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
242 struct sk_buff_head list;
245 ieee80211_free_txskb(dev->hw, skb);
249 mt76_tx_status_lock(dev, &list);
250 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
251 mt76_tx_status_unlock(dev, &list);
253 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
256 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
257 struct mt76_wcid *wcid, struct sk_buff *skb)
259 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
260 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
261 struct mt76_queue *q;
262 int qid = skb_get_queue_mapping(skb);
264 if (WARN_ON(qid >= MT_TXQ_PSD)) {
266 skb_set_queue_mapping(skb, qid);
269 if (!wcid->tx_rate_set)
270 ieee80211_get_tx_rates(info->control.vif, sta, skb,
271 info->control.rates, 1);
273 if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
274 struct ieee80211_txq *txq;
275 struct mt76_txq *mtxq;
278 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
280 mtxq = (struct mt76_txq *) txq->drv_priv;
283 mt76_check_agg_ssn(mtxq, skb);
288 spin_lock_bh(&q->lock);
289 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
290 dev->queue_ops->kick(dev, q);
292 if (q->queued > q->ndesc - 8)
293 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
294 spin_unlock_bh(&q->lock);
296 EXPORT_SYMBOL_GPL(mt76_tx);
298 static struct sk_buff *
299 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
301 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
304 skb = skb_dequeue(&mtxq->retry_q);
306 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
308 if (ps && skb_queue_empty(&mtxq->retry_q))
309 ieee80211_sta_set_buffered(txq->sta, tid, false);
314 skb = ieee80211_tx_dequeue(dev->hw, txq);
322 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
323 struct sk_buff *skb, bool last)
325 struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
326 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
327 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
329 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
331 info->flags |= IEEE80211_TX_STATUS_EOSP |
332 IEEE80211_TX_CTL_REQ_TX_STATUS;
334 mt76_skb_set_moredata(skb, !last);
335 dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
339 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
340 u16 tids, int nframes,
341 enum ieee80211_frame_release_type reason,
344 struct mt76_dev *dev = hw->priv;
345 struct sk_buff *last_skb = NULL;
346 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
349 spin_lock_bh(&hwq->lock);
350 for (i = 0; tids && nframes; i++, tids >>= 1) {
351 struct ieee80211_txq *txq = sta->txq[i];
352 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
359 skb = mt76_txq_dequeue(dev, mtxq, true);
364 mt76_check_agg_ssn(mtxq, skb);
368 mt76_queue_ps_skb(dev, sta, last_skb, false);
375 mt76_queue_ps_skb(dev, sta, last_skb, true);
376 dev->queue_ops->kick(dev, hwq);
378 spin_unlock_bh(&hwq->lock);
380 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
383 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
384 struct mt76_txq *mtxq, bool *empty)
386 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
387 struct ieee80211_tx_info *info;
388 struct mt76_wcid *wcid = mtxq->wcid;
390 int n_frames = 1, limit;
391 struct ieee80211_tx_rate tx_rate;
396 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
401 skb = mt76_txq_dequeue(dev, mtxq, false);
407 info = IEEE80211_SKB_CB(skb);
408 if (!wcid->tx_rate_set)
409 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
410 info->control.rates, 1);
411 tx_rate = info->control.rates[0];
413 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
414 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
415 limit = ampdu ? 16 : 3;
418 mt76_check_agg_ssn(mtxq, skb);
420 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
431 if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
432 test_bit(MT76_RESET, &dev->state))
435 skb = mt76_txq_dequeue(dev, mtxq, false);
441 info = IEEE80211_SKB_CB(skb);
442 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
444 if (ampdu != cur_ampdu ||
445 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
446 skb_queue_tail(&mtxq->retry_q, skb);
450 info->control.rates[0] = tx_rate;
453 mt76_check_agg_ssn(mtxq, skb);
455 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
461 } while (n_frames < limit);
465 hwq->entry[idx].schedule = true;
468 dev->queue_ops->kick(dev, hwq);
474 mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
476 struct mt76_txq *mtxq, *mtxq_last;
480 mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
481 while (!list_empty(&hwq->swq)) {
485 if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
486 test_bit(MT76_RESET, &dev->state))
489 mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
490 if (mtxq->send_bar && mtxq->aggr) {
491 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
492 struct ieee80211_sta *sta = txq->sta;
493 struct ieee80211_vif *vif = txq->vif;
494 u16 agg_ssn = mtxq->agg_ssn;
497 mtxq->send_bar = false;
498 spin_unlock_bh(&hwq->lock);
499 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
500 spin_lock_bh(&hwq->lock);
504 list_del_init(&mtxq->list);
506 cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
508 list_add_tail(&mtxq->list, &hwq->swq);
515 if (mtxq == mtxq_last)
522 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
528 if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
531 len = mt76_txq_schedule_list(dev, hwq);
535 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
537 void mt76_txq_schedule_all(struct mt76_dev *dev)
541 for (i = 0; i <= MT_TXQ_BK; i++) {
542 struct mt76_queue *q = &dev->q_tx[i];
544 spin_lock_bh(&q->lock);
545 mt76_txq_schedule(dev, q);
546 spin_unlock_bh(&q->lock);
549 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
551 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
556 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
557 struct ieee80211_txq *txq = sta->txq[i];
558 struct mt76_txq *mtxq;
563 mtxq = (struct mt76_txq *)txq->drv_priv;
565 spin_lock_bh(&mtxq->hwq->lock);
566 mtxq->send_bar = mtxq->aggr && send_bar;
567 if (!list_empty(&mtxq->list))
568 list_del_init(&mtxq->list);
569 spin_unlock_bh(&mtxq->hwq->lock);
572 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
574 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
576 struct mt76_dev *dev = hw->priv;
577 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
578 struct mt76_queue *hwq = mtxq->hwq;
580 spin_lock_bh(&hwq->lock);
581 if (list_empty(&mtxq->list))
582 list_add_tail(&mtxq->list, &hwq->swq);
583 mt76_txq_schedule(dev, hwq);
584 spin_unlock_bh(&hwq->lock);
586 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
588 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
590 struct mt76_txq *mtxq;
591 struct mt76_queue *hwq;
597 mtxq = (struct mt76_txq *) txq->drv_priv;
600 spin_lock_bh(&hwq->lock);
601 if (!list_empty(&mtxq->list))
602 list_del_init(&mtxq->list);
603 spin_unlock_bh(&hwq->lock);
605 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
606 ieee80211_free_txskb(dev->hw, skb);
608 EXPORT_SYMBOL_GPL(mt76_txq_remove);
610 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
612 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
614 INIT_LIST_HEAD(&mtxq->list);
615 skb_queue_head_init(&mtxq->retry_q);
617 mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
619 EXPORT_SYMBOL_GPL(mt76_txq_init);
621 u8 mt76_ac_to_hwq(u8 ac)
623 static const u8 wmm_queue_map[] = {
624 [IEEE80211_AC_BE] = 0,
625 [IEEE80211_AC_BK] = 1,
626 [IEEE80211_AC_VI] = 2,
627 [IEEE80211_AC_VO] = 3,
630 if (WARN_ON(ac >= IEEE80211_NUM_ACS))
633 return wmm_queue_map[ac];
635 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);