2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 #include <linux/log2.h>
28 #include <linux/bitfield.h>
30 /* when under memory pressure rx ring refill may fail and needs a retry */
31 #define HTT_RX_RING_REFILL_RETRY_MS 50
33 #define HTT_RX_RING_REFILL_RESCHED_MS 5
35 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
37 static struct sk_buff *
38 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
40 struct ath10k_skb_rxcb *rxcb;
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
50 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
53 struct ath10k_skb_rxcb *rxcb;
57 if (htt->rx_ring.in_ord_rx) {
58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59 skb = ATH10K_RXCB_SKB(rxcb);
60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
61 skb->len + skb_tailroom(skb),
63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
76 dev_kfree_skb_any(skb);
80 htt->rx_ring.fill_cnt = 0;
81 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
86 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
88 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
91 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
93 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
96 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
99 htt->rx_ring.paddrs_ring_32 = vaddr;
102 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
105 htt->rx_ring.paddrs_ring_64 = vaddr;
108 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
109 dma_addr_t paddr, int idx)
111 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
114 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
115 dma_addr_t paddr, int idx)
117 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
120 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
122 htt->rx_ring.paddrs_ring_32[idx] = 0;
125 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
127 htt->rx_ring.paddrs_ring_64[idx] = 0;
130 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
132 return (void *)htt->rx_ring.paddrs_ring_32;
135 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
137 return (void *)htt->rx_ring.paddrs_ring_64;
140 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
142 struct htt_rx_desc *rx_desc;
143 struct ath10k_skb_rxcb *rxcb;
148 /* The Full Rx Reorder firmware has no way of telling the host
149 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
150 * To keep things simple make sure ring is always half empty. This
151 * guarantees there'll be no replenishment overruns possible.
153 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
155 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
157 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
163 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
165 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
168 /* Clear rx_desc attention word before posting to Rx ring */
169 rx_desc = (struct htt_rx_desc *)skb->data;
170 rx_desc->attention.flags = __cpu_to_le32(0);
172 paddr = dma_map_single(htt->ar->dev, skb->data,
173 skb->len + skb_tailroom(skb),
176 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
177 dev_kfree_skb_any(skb);
182 rxcb = ATH10K_SKB_RXCB(skb);
184 htt->rx_ring.netbufs_ring[idx] = skb;
185 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
186 htt->rx_ring.fill_cnt++;
188 if (htt->rx_ring.in_ord_rx) {
189 hash_add(htt->rx_ring.skb_table,
190 &ATH10K_SKB_RXCB(skb)->hlist,
196 idx &= htt->rx_ring.size_mask;
201 * Make sure the rx buffer is updated before available buffer
202 * index to avoid any potential rx ring corruption.
205 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
209 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
211 lockdep_assert_held(&htt->rx_ring.lock);
212 return __ath10k_htt_rx_ring_fill_n(htt, num);
215 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
217 int ret, num_deficit, num_to_fill;
219 /* Refilling the whole RX ring buffer proves to be a bad idea. The
220 * reason is RX may take up significant amount of CPU cycles and starve
221 * other tasks, e.g. TX on an ethernet device while acting as a bridge
222 * with ath10k wlan interface. This ended up with very poor performance
223 * once CPU the host system was overwhelmed with RX on ath10k.
225 * By limiting the number of refills the replenishing occurs
226 * progressively. This in turns makes use of the fact tasklets are
227 * processed in FIFO order. This means actual RX processing can starve
228 * out refilling. If there's not enough buffers on RX ring FW will not
229 * report RX until it is refilled with enough buffers. This
230 * automatically balances load wrt to CPU power.
232 * This probably comes at a cost of lower maximum throughput but
233 * improves the average and stability.
235 spin_lock_bh(&htt->rx_ring.lock);
236 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
237 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
238 num_deficit -= num_to_fill;
239 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
240 if (ret == -ENOMEM) {
242 * Failed to fill it to the desired level -
243 * we'll start a timer and try again next time.
244 * As long as enough buffers are left in the ring for
245 * another A-MPDU rx, no special recovery is needed.
247 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
248 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
249 } else if (num_deficit > 0) {
250 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
251 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
253 spin_unlock_bh(&htt->rx_ring.lock);
256 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
258 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
260 ath10k_htt_rx_msdu_buff_replenish(htt);
263 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
265 struct ath10k_htt *htt = &ar->htt;
268 spin_lock_bh(&htt->rx_ring.lock);
269 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
270 htt->rx_ring.fill_cnt));
271 spin_unlock_bh(&htt->rx_ring.lock);
274 ath10k_htt_rx_ring_free(htt);
279 void ath10k_htt_rx_free(struct ath10k_htt *htt)
281 del_timer_sync(&htt->rx_ring.refill_retry_timer);
283 skb_queue_purge(&htt->rx_msdus_q);
284 skb_queue_purge(&htt->rx_in_ord_compl_q);
285 skb_queue_purge(&htt->tx_fetch_ind_q);
287 ath10k_htt_rx_ring_free(htt);
289 dma_free_coherent(htt->ar->dev,
290 ath10k_htt_get_rx_ring_size(htt),
291 ath10k_htt_get_vaddr_ring(htt),
292 htt->rx_ring.base_paddr);
294 dma_free_coherent(htt->ar->dev,
295 sizeof(*htt->rx_ring.alloc_idx.vaddr),
296 htt->rx_ring.alloc_idx.vaddr,
297 htt->rx_ring.alloc_idx.paddr);
299 kfree(htt->rx_ring.netbufs_ring);
302 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
304 struct ath10k *ar = htt->ar;
306 struct sk_buff *msdu;
308 lockdep_assert_held(&htt->rx_ring.lock);
310 if (htt->rx_ring.fill_cnt == 0) {
311 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
315 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
316 msdu = htt->rx_ring.netbufs_ring[idx];
317 htt->rx_ring.netbufs_ring[idx] = NULL;
318 ath10k_htt_reset_paddrs_ring(htt, idx);
321 idx &= htt->rx_ring.size_mask;
322 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
323 htt->rx_ring.fill_cnt--;
325 dma_unmap_single(htt->ar->dev,
326 ATH10K_SKB_RXCB(msdu)->paddr,
327 msdu->len + skb_tailroom(msdu),
329 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
330 msdu->data, msdu->len + skb_tailroom(msdu));
335 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
336 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
337 struct sk_buff_head *amsdu)
339 struct ath10k *ar = htt->ar;
340 int msdu_len, msdu_chaining = 0;
341 struct sk_buff *msdu;
342 struct htt_rx_desc *rx_desc;
344 lockdep_assert_held(&htt->rx_ring.lock);
347 int last_msdu, msdu_len_invalid, msdu_chained;
349 msdu = ath10k_htt_rx_netbuf_pop(htt);
351 __skb_queue_purge(amsdu);
355 __skb_queue_tail(amsdu, msdu);
357 rx_desc = (struct htt_rx_desc *)msdu->data;
359 /* FIXME: we must report msdu payload since this is what caller
362 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
363 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
366 * Sanity check - confirm the HW is finished filling in the
368 * If the HW and SW are working correctly, then it's guaranteed
369 * that the HW's MAC DMA is done before this point in the SW.
370 * To prevent the case that we handle a stale Rx descriptor,
371 * just assert for now until we have a way to recover.
373 if (!(__le32_to_cpu(rx_desc->attention.flags)
374 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
375 __skb_queue_purge(amsdu);
379 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
380 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
381 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
382 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
383 RX_MSDU_START_INFO0_MSDU_LENGTH);
384 msdu_chained = rx_desc->frag_info.ring2_more_count;
386 if (msdu_len_invalid)
390 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
391 msdu_len -= msdu->len;
393 /* Note: Chained buffers do not contain rx descriptor */
394 while (msdu_chained--) {
395 msdu = ath10k_htt_rx_netbuf_pop(htt);
397 __skb_queue_purge(amsdu);
401 __skb_queue_tail(amsdu, msdu);
403 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
404 msdu_len -= msdu->len;
408 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
409 RX_MSDU_END_INFO0_LAST_MSDU;
411 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
412 sizeof(*rx_desc) - sizeof(u32));
418 if (skb_queue_empty(amsdu))
422 * Don't refill the ring yet.
424 * First, the elements popped here are still in use - it is not
425 * safe to overwrite them until the matching call to
426 * mpdu_desc_list_next. Second, for efficiency it is preferable to
427 * refill the rx ring with 1 PPDU's worth of rx buffers (something
428 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
429 * (something like 3 buffers). Consequently, we'll rely on the txrx
430 * SW to tell us when it is done pulling all the PPDU's rx buffers
431 * out of the rx ring, and then refill it just once.
434 return msdu_chaining;
437 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
440 struct ath10k *ar = htt->ar;
441 struct ath10k_skb_rxcb *rxcb;
442 struct sk_buff *msdu;
444 lockdep_assert_held(&htt->rx_ring.lock);
446 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
450 rxcb = ATH10K_SKB_RXCB(msdu);
451 hash_del(&rxcb->hlist);
452 htt->rx_ring.fill_cnt--;
454 dma_unmap_single(htt->ar->dev, rxcb->paddr,
455 msdu->len + skb_tailroom(msdu),
457 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
458 msdu->data, msdu->len + skb_tailroom(msdu));
463 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
464 struct htt_rx_in_ord_ind *ev,
465 struct sk_buff_head *list)
467 struct ath10k *ar = htt->ar;
468 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
469 struct htt_rx_desc *rxd;
470 struct sk_buff *msdu;
475 lockdep_assert_held(&htt->rx_ring.lock);
477 msdu_count = __le16_to_cpu(ev->msdu_count);
478 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
480 while (msdu_count--) {
481 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
483 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
485 __skb_queue_purge(list);
489 __skb_queue_tail(list, msdu);
492 rxd = (void *)msdu->data;
494 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
496 skb_put(msdu, sizeof(*rxd));
497 skb_pull(msdu, sizeof(*rxd));
498 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
500 if (!(__le32_to_cpu(rxd->attention.flags) &
501 RX_ATTENTION_FLAGS_MSDU_DONE)) {
502 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
513 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
514 struct htt_rx_in_ord_ind *ev,
515 struct sk_buff_head *list)
517 struct ath10k *ar = htt->ar;
518 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
519 struct htt_rx_desc *rxd;
520 struct sk_buff *msdu;
525 lockdep_assert_held(&htt->rx_ring.lock);
527 msdu_count = __le16_to_cpu(ev->msdu_count);
528 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
530 while (msdu_count--) {
531 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
532 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
534 __skb_queue_purge(list);
538 __skb_queue_tail(list, msdu);
541 rxd = (void *)msdu->data;
543 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
545 skb_put(msdu, sizeof(*rxd));
546 skb_pull(msdu, sizeof(*rxd));
547 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
549 if (!(__le32_to_cpu(rxd->attention.flags) &
550 RX_ATTENTION_FLAGS_MSDU_DONE)) {
551 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
562 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
564 struct ath10k *ar = htt->ar;
566 void *vaddr, *vaddr_ring;
568 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
570 htt->rx_confused = false;
572 /* XXX: The fill level could be changed during runtime in response to
573 * the host processing latency. Is this really worth it?
575 htt->rx_ring.size = HTT_RX_RING_SIZE;
576 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
577 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
579 if (!is_power_of_2(htt->rx_ring.size)) {
580 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
584 htt->rx_ring.netbufs_ring =
585 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
587 if (!htt->rx_ring.netbufs_ring)
590 size = ath10k_htt_get_rx_ring_size(htt);
592 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
596 ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
597 htt->rx_ring.base_paddr = paddr;
599 vaddr = dma_alloc_coherent(htt->ar->dev,
600 sizeof(*htt->rx_ring.alloc_idx.vaddr),
605 htt->rx_ring.alloc_idx.vaddr = vaddr;
606 htt->rx_ring.alloc_idx.paddr = paddr;
607 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
608 *htt->rx_ring.alloc_idx.vaddr = 0;
610 /* Initialize the Rx refill retry timer */
611 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
613 spin_lock_init(&htt->rx_ring.lock);
615 htt->rx_ring.fill_cnt = 0;
616 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
617 hash_init(htt->rx_ring.skb_table);
619 skb_queue_head_init(&htt->rx_msdus_q);
620 skb_queue_head_init(&htt->rx_in_ord_compl_q);
621 skb_queue_head_init(&htt->tx_fetch_ind_q);
622 atomic_set(&htt->num_mpdus_ready, 0);
624 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
625 htt->rx_ring.size, htt->rx_ring.fill_level);
629 dma_free_coherent(htt->ar->dev,
630 ath10k_htt_get_rx_ring_size(htt),
632 htt->rx_ring.base_paddr);
634 kfree(htt->rx_ring.netbufs_ring);
639 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
640 enum htt_rx_mpdu_encrypt_type type)
643 case HTT_RX_MPDU_ENCRYPT_NONE:
645 case HTT_RX_MPDU_ENCRYPT_WEP40:
646 case HTT_RX_MPDU_ENCRYPT_WEP104:
647 return IEEE80211_WEP_IV_LEN;
648 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
649 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
650 return IEEE80211_TKIP_IV_LEN;
651 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
652 return IEEE80211_CCMP_HDR_LEN;
653 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
654 return IEEE80211_CCMP_256_HDR_LEN;
655 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
656 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
657 return IEEE80211_GCMP_HDR_LEN;
658 case HTT_RX_MPDU_ENCRYPT_WEP128:
659 case HTT_RX_MPDU_ENCRYPT_WAPI:
663 ath10k_warn(ar, "unsupported encryption type %d\n", type);
667 #define MICHAEL_MIC_LEN 8
669 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
670 enum htt_rx_mpdu_encrypt_type type)
673 case HTT_RX_MPDU_ENCRYPT_NONE:
674 case HTT_RX_MPDU_ENCRYPT_WEP40:
675 case HTT_RX_MPDU_ENCRYPT_WEP104:
676 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
677 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
679 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
680 return IEEE80211_CCMP_MIC_LEN;
681 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
682 return IEEE80211_CCMP_256_MIC_LEN;
683 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
684 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
685 return IEEE80211_GCMP_MIC_LEN;
686 case HTT_RX_MPDU_ENCRYPT_WEP128:
687 case HTT_RX_MPDU_ENCRYPT_WAPI:
691 ath10k_warn(ar, "unsupported encryption type %d\n", type);
695 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
696 enum htt_rx_mpdu_encrypt_type type)
699 case HTT_RX_MPDU_ENCRYPT_NONE:
700 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
701 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
702 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
703 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
705 case HTT_RX_MPDU_ENCRYPT_WEP40:
706 case HTT_RX_MPDU_ENCRYPT_WEP104:
707 return IEEE80211_WEP_ICV_LEN;
708 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
709 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
710 return IEEE80211_TKIP_ICV_LEN;
711 case HTT_RX_MPDU_ENCRYPT_WEP128:
712 case HTT_RX_MPDU_ENCRYPT_WAPI:
716 ath10k_warn(ar, "unsupported encryption type %d\n", type);
720 struct amsdu_subframe_hdr {
726 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
728 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
734 ret = RATE_INFO_BW_20;
737 ret = RATE_INFO_BW_40;
740 ret = RATE_INFO_BW_80;
743 ret = RATE_INFO_BW_160;
750 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
751 struct ieee80211_rx_status *status,
752 struct htt_rx_desc *rxd)
754 struct ieee80211_supported_band *sband;
755 u8 cck, rate, bw, sgi, mcs, nss;
758 u32 info1, info2, info3;
760 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
761 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
762 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
764 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
768 /* To get legacy rate index band is required. Since band can't
769 * be undefined check if freq is non-zero.
774 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
775 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
776 rate &= ~RX_PPDU_START_RATE_FLAG;
778 sband = &ar->mac.sbands[status->band];
779 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
782 case HTT_RX_HT_WITH_TXBF:
783 /* HT-SIG - Table 20-11 in info2 and info3 */
786 bw = (info2 >> 7) & 1;
787 sgi = (info3 >> 7) & 1;
789 status->rate_idx = mcs;
790 status->encoding = RX_ENC_HT;
792 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
794 status->bw = RATE_INFO_BW_40;
797 case HTT_RX_VHT_WITH_TXBF:
798 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
803 group_id = (info2 >> 4) & 0x3F;
805 if (GROUP_ID_IS_SU_MIMO(group_id)) {
806 mcs = (info3 >> 4) & 0x0F;
807 nss = ((info2 >> 10) & 0x07) + 1;
809 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
810 * so it's impossible to decode MCS. Also since
811 * firmware consumes Group Id Management frames host
812 * has no knowledge regarding group/user position
813 * mapping so it's impossible to pick the correct Nsts
816 * Bandwidth and SGI are valid so report the rateinfo
817 * on best-effort basis.
824 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
825 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
826 __le32_to_cpu(rxd->attention.flags),
827 __le32_to_cpu(rxd->mpdu_start.info0),
828 __le32_to_cpu(rxd->mpdu_start.info1),
829 __le32_to_cpu(rxd->msdu_start.common.info0),
830 __le32_to_cpu(rxd->msdu_start.common.info1),
831 rxd->ppdu_start.info0,
832 __le32_to_cpu(rxd->ppdu_start.info1),
833 __le32_to_cpu(rxd->ppdu_start.info2),
834 __le32_to_cpu(rxd->ppdu_start.info3),
835 __le32_to_cpu(rxd->ppdu_start.info4));
837 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
838 __le32_to_cpu(rxd->msdu_end.common.info0),
839 __le32_to_cpu(rxd->mpdu_end.info0));
841 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
842 "rx desc msdu payload: ",
843 rxd->msdu_payload, 50);
846 status->rate_idx = mcs;
850 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
852 status->bw = ath10k_bw_to_mac80211_bw(bw);
853 status->encoding = RX_ENC_VHT;
860 static struct ieee80211_channel *
861 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
863 struct ath10k_peer *peer;
864 struct ath10k_vif *arvif;
865 struct cfg80211_chan_def def;
868 lockdep_assert_held(&ar->data_lock);
873 if (rxd->attention.flags &
874 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
877 if (!(rxd->msdu_end.common.info0 &
878 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
881 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
882 RX_MPDU_START_INFO0_PEER_IDX);
884 peer = ath10k_peer_find_by_id(ar, peer_id);
888 arvif = ath10k_get_arvif(ar, peer->vdev_id);
889 if (WARN_ON_ONCE(!arvif))
892 if (ath10k_mac_vif_chan(arvif->vif, &def))
898 static struct ieee80211_channel *
899 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
901 struct ath10k_vif *arvif;
902 struct cfg80211_chan_def def;
904 lockdep_assert_held(&ar->data_lock);
906 list_for_each_entry(arvif, &ar->arvifs, list) {
907 if (arvif->vdev_id == vdev_id &&
908 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
916 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
917 struct ieee80211_chanctx_conf *conf,
920 struct cfg80211_chan_def *def = data;
925 static struct ieee80211_channel *
926 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
928 struct cfg80211_chan_def def = {};
930 ieee80211_iter_chan_contexts_atomic(ar->hw,
931 ath10k_htt_rx_h_any_chan_iter,
937 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
938 struct ieee80211_rx_status *status,
939 struct htt_rx_desc *rxd,
942 struct ieee80211_channel *ch;
944 spin_lock_bh(&ar->data_lock);
945 ch = ar->scan_channel;
949 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
951 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
953 ch = ath10k_htt_rx_h_any_channel(ar);
955 ch = ar->tgt_oper_chan;
956 spin_unlock_bh(&ar->data_lock);
961 status->band = ch->band;
962 status->freq = ch->center_freq;
967 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
968 struct ieee80211_rx_status *status,
969 struct htt_rx_desc *rxd)
973 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
974 status->chains &= ~BIT(i);
976 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
977 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
978 rxd->ppdu_start.rssi_chains[i].pri20_mhz;
980 status->chains |= BIT(i);
984 /* FIXME: Get real NF */
985 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
986 rxd->ppdu_start.rssi_comb;
987 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
990 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
991 struct ieee80211_rx_status *status,
992 struct htt_rx_desc *rxd)
994 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
995 * means all prior MSDUs in a PPDU are reported to mac80211 without the
996 * TSF. Is it worth holding frames until end of PPDU is known?
998 * FIXME: Can we get/compute 64bit TSF?
1000 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1001 status->flag |= RX_FLAG_MACTIME_END;
1004 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1005 struct sk_buff_head *amsdu,
1006 struct ieee80211_rx_status *status,
1009 struct sk_buff *first;
1010 struct htt_rx_desc *rxd;
1014 if (skb_queue_empty(amsdu))
1017 first = skb_peek(amsdu);
1018 rxd = (void *)first->data - sizeof(*rxd);
1020 is_first_ppdu = !!(rxd->attention.flags &
1021 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1022 is_last_ppdu = !!(rxd->attention.flags &
1023 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1025 if (is_first_ppdu) {
1026 /* New PPDU starts so clear out the old per-PPDU status. */
1028 status->rate_idx = 0;
1030 status->encoding = RX_ENC_LEGACY;
1031 status->bw = RATE_INFO_BW_20;
1033 status->flag &= ~RX_FLAG_MACTIME_END;
1034 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1036 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1037 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1038 status->ampdu_reference = ar->ampdu_reference;
1040 ath10k_htt_rx_h_signal(ar, status, rxd);
1041 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1042 ath10k_htt_rx_h_rates(ar, status, rxd);
1046 ath10k_htt_rx_h_mactime(ar, status, rxd);
1048 /* set ampdu last segment flag */
1049 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1050 ar->ampdu_reference++;
1054 static const char * const tid_to_ac[] = {
1065 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1070 if (!ieee80211_is_data_qos(hdr->frame_control))
1073 qc = ieee80211_get_qos_ctl(hdr);
1074 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1076 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1078 snprintf(out, size, "tid %d", tid);
1083 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1084 struct ieee80211_rx_status *rx_status,
1085 struct sk_buff *skb)
1087 struct ieee80211_rx_status *status;
1089 status = IEEE80211_SKB_RXCB(skb);
1090 *status = *rx_status;
1092 __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1095 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1097 struct ieee80211_rx_status *status;
1098 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1101 status = IEEE80211_SKB_RXCB(skb);
1103 ath10k_dbg(ar, ATH10K_DBG_DATA,
1104 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1107 ieee80211_get_SA(hdr),
1108 ath10k_get_tid(hdr, tid, sizeof(tid)),
1109 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1111 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1112 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1113 (status->encoding == RX_ENC_HT) ? "ht" : "",
1114 (status->encoding == RX_ENC_VHT) ? "vht" : "",
1115 (status->bw == RATE_INFO_BW_40) ? "40" : "",
1116 (status->bw == RATE_INFO_BW_80) ? "80" : "",
1117 (status->bw == RATE_INFO_BW_160) ? "160" : "",
1118 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1122 status->band, status->flag,
1123 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1124 !!(status->flag & RX_FLAG_MMIC_ERROR),
1125 !!(status->flag & RX_FLAG_AMSDU_MORE));
1126 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1127 skb->data, skb->len);
1128 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1129 trace_ath10k_rx_payload(ar, skb->data, skb->len);
1131 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1134 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1135 struct ieee80211_hdr *hdr)
1137 int len = ieee80211_hdrlen(hdr->frame_control);
1139 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1140 ar->running_fw->fw_file.fw_features))
1141 len = round_up(len, 4);
1146 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1147 struct sk_buff *msdu,
1148 struct ieee80211_rx_status *status,
1149 enum htt_rx_mpdu_encrypt_type enctype,
1152 struct ieee80211_hdr *hdr;
1153 struct htt_rx_desc *rxd;
1159 rxd = (void *)msdu->data - sizeof(*rxd);
1160 is_first = !!(rxd->msdu_end.common.info0 &
1161 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1162 is_last = !!(rxd->msdu_end.common.info0 &
1163 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1165 /* Delivered decapped frame:
1167 * [crypto param] <-- can be trimmed if !fcs_err &&
1168 * !decrypt_err && !peer_idx_invalid
1169 * [amsdu header] <-- only if A-MSDU
1172 * [FCS] <-- at end, needs to be trimmed
1175 /* This probably shouldn't happen but warn just in case */
1176 if (unlikely(WARN_ON_ONCE(!is_first)))
1179 /* This probably shouldn't happen but warn just in case */
1180 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1183 skb_trim(msdu, msdu->len - FCS_LEN);
1185 /* In most cases this will be true for sniffed frames. It makes sense
1186 * to deliver them as-is without stripping the crypto param. This is
1187 * necessary for software based decryption.
1189 * If there's no error then the frame is decrypted. At least that is
1190 * the case for frames that come in via fragmented rx indication.
1195 /* The payload is decrypted so strip crypto params. Start from tail
1196 * since hdr is used to compute some stuff.
1199 hdr = (void *)msdu->data;
1202 if (status->flag & RX_FLAG_IV_STRIPPED) {
1203 skb_trim(msdu, msdu->len -
1204 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1206 skb_trim(msdu, msdu->len -
1207 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1210 if (status->flag & RX_FLAG_MIC_STRIPPED)
1211 skb_trim(msdu, msdu->len -
1212 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1215 if (status->flag & RX_FLAG_ICV_STRIPPED)
1216 skb_trim(msdu, msdu->len -
1217 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1221 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1222 !ieee80211_has_morefrags(hdr->frame_control) &&
1223 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1224 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1227 if (status->flag & RX_FLAG_IV_STRIPPED) {
1228 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1229 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1231 memmove((void *)msdu->data + crypto_len,
1232 (void *)msdu->data, hdr_len);
1233 skb_pull(msdu, crypto_len);
1237 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1238 struct sk_buff *msdu,
1239 struct ieee80211_rx_status *status,
1240 const u8 first_hdr[64],
1241 enum htt_rx_mpdu_encrypt_type enctype)
1243 struct ieee80211_hdr *hdr;
1244 struct htt_rx_desc *rxd;
1249 int bytes_aligned = ar->hw_params.decap_align_bytes;
1251 /* Delivered decapped frame:
1252 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1255 * Note: The nwifi header doesn't have QoS Control and is
1256 * (always?) a 3addr frame.
1258 * Note2: There's no A-MSDU subframe header. Even if it's part
1262 /* pull decapped header and copy SA & DA */
1263 rxd = (void *)msdu->data - sizeof(*rxd);
1265 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1266 skb_put(msdu, l3_pad_bytes);
1268 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1270 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1271 ether_addr_copy(da, ieee80211_get_DA(hdr));
1272 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1273 skb_pull(msdu, hdr_len);
1275 /* push original 802.11 header */
1276 hdr = (struct ieee80211_hdr *)first_hdr;
1277 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1279 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1280 memcpy(skb_push(msdu,
1281 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1282 (void *)hdr + round_up(hdr_len, bytes_aligned),
1283 ath10k_htt_rx_crypto_param_len(ar, enctype));
1286 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1288 /* original 802.11 header has a different DA and in
1289 * case of 4addr it may also have different SA
1291 hdr = (struct ieee80211_hdr *)msdu->data;
1292 ether_addr_copy(ieee80211_get_DA(hdr), da);
1293 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1296 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1297 struct sk_buff *msdu,
1298 enum htt_rx_mpdu_encrypt_type enctype)
1300 struct ieee80211_hdr *hdr;
1301 struct htt_rx_desc *rxd;
1302 size_t hdr_len, crypto_len;
1304 bool is_first, is_last, is_amsdu;
1305 int bytes_aligned = ar->hw_params.decap_align_bytes;
1307 rxd = (void *)msdu->data - sizeof(*rxd);
1308 hdr = (void *)rxd->rx_hdr_status;
1310 is_first = !!(rxd->msdu_end.common.info0 &
1311 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1312 is_last = !!(rxd->msdu_end.common.info0 &
1313 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1314 is_amsdu = !(is_first && is_last);
1319 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1320 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1322 rfc1042 += round_up(hdr_len, bytes_aligned) +
1323 round_up(crypto_len, bytes_aligned);
1327 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1332 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1333 struct sk_buff *msdu,
1334 struct ieee80211_rx_status *status,
1335 const u8 first_hdr[64],
1336 enum htt_rx_mpdu_encrypt_type enctype)
1338 struct ieee80211_hdr *hdr;
1345 struct htt_rx_desc *rxd;
1346 int bytes_aligned = ar->hw_params.decap_align_bytes;
1348 /* Delivered decapped frame:
1349 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1353 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1354 if (WARN_ON_ONCE(!rfc1042))
1357 rxd = (void *)msdu->data - sizeof(*rxd);
1358 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1359 skb_put(msdu, l3_pad_bytes);
1360 skb_pull(msdu, l3_pad_bytes);
1362 /* pull decapped header and copy SA & DA */
1363 eth = (struct ethhdr *)msdu->data;
1364 ether_addr_copy(da, eth->h_dest);
1365 ether_addr_copy(sa, eth->h_source);
1366 skb_pull(msdu, sizeof(struct ethhdr));
1368 /* push rfc1042/llc/snap */
1369 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1370 sizeof(struct rfc1042_hdr));
1372 /* push original 802.11 header */
1373 hdr = (struct ieee80211_hdr *)first_hdr;
1374 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1376 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1377 memcpy(skb_push(msdu,
1378 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1379 (void *)hdr + round_up(hdr_len, bytes_aligned),
1380 ath10k_htt_rx_crypto_param_len(ar, enctype));
1383 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1385 /* original 802.11 header has a different DA and in
1386 * case of 4addr it may also have different SA
1388 hdr = (struct ieee80211_hdr *)msdu->data;
1389 ether_addr_copy(ieee80211_get_DA(hdr), da);
1390 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1393 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1394 struct sk_buff *msdu,
1395 struct ieee80211_rx_status *status,
1396 const u8 first_hdr[64],
1397 enum htt_rx_mpdu_encrypt_type enctype)
1399 struct ieee80211_hdr *hdr;
1402 struct htt_rx_desc *rxd;
1403 int bytes_aligned = ar->hw_params.decap_align_bytes;
1405 /* Delivered decapped frame:
1406 * [amsdu header] <-- replaced with 802.11 hdr
1411 rxd = (void *)msdu->data - sizeof(*rxd);
1412 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1414 skb_put(msdu, l3_pad_bytes);
1415 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1417 hdr = (struct ieee80211_hdr *)first_hdr;
1418 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1420 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1421 memcpy(skb_push(msdu,
1422 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1423 (void *)hdr + round_up(hdr_len, bytes_aligned),
1424 ath10k_htt_rx_crypto_param_len(ar, enctype));
1427 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1430 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1431 struct sk_buff *msdu,
1432 struct ieee80211_rx_status *status,
1434 enum htt_rx_mpdu_encrypt_type enctype,
1437 struct htt_rx_desc *rxd;
1438 enum rx_msdu_decap_format decap;
1440 /* First msdu's decapped header:
1441 * [802.11 header] <-- padded to 4 bytes long
1442 * [crypto param] <-- padded to 4 bytes long
1443 * [amsdu header] <-- only if A-MSDU
1446 * Other (2nd, 3rd, ..) msdu's decapped header:
1447 * [amsdu header] <-- only if A-MSDU
1451 rxd = (void *)msdu->data - sizeof(*rxd);
1452 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1453 RX_MSDU_START_INFO1_DECAP_FORMAT);
1456 case RX_MSDU_DECAP_RAW:
1457 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1460 case RX_MSDU_DECAP_NATIVE_WIFI:
1461 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1464 case RX_MSDU_DECAP_ETHERNET2_DIX:
1465 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1467 case RX_MSDU_DECAP_8023_SNAP_LLC:
1468 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1474 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1476 struct htt_rx_desc *rxd;
1478 bool is_ip4, is_ip6;
1479 bool is_tcp, is_udp;
1480 bool ip_csum_ok, tcpudp_csum_ok;
1482 rxd = (void *)skb->data - sizeof(*rxd);
1483 flags = __le32_to_cpu(rxd->attention.flags);
1484 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1486 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1487 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1488 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1489 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1490 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1491 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1493 if (!is_ip4 && !is_ip6)
1494 return CHECKSUM_NONE;
1495 if (!is_tcp && !is_udp)
1496 return CHECKSUM_NONE;
1498 return CHECKSUM_NONE;
1499 if (!tcpudp_csum_ok)
1500 return CHECKSUM_NONE;
1502 return CHECKSUM_UNNECESSARY;
1505 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1507 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1510 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1511 struct sk_buff_head *amsdu,
1512 struct ieee80211_rx_status *status,
1513 bool fill_crypt_header,
1515 enum ath10k_pkt_rx_err *err)
1517 struct sk_buff *first;
1518 struct sk_buff *last;
1519 struct sk_buff *msdu;
1520 struct htt_rx_desc *rxd;
1521 struct ieee80211_hdr *hdr;
1522 enum htt_rx_mpdu_encrypt_type enctype;
1526 bool has_crypto_err;
1528 bool has_peer_idx_invalid;
1533 if (skb_queue_empty(amsdu))
1536 first = skb_peek(amsdu);
1537 rxd = (void *)first->data - sizeof(*rxd);
1539 is_mgmt = !!(rxd->attention.flags &
1540 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1542 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1543 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1545 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1546 * decapped header. It'll be used for undecapping of each MSDU.
1548 hdr = (void *)rxd->rx_hdr_status;
1549 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1552 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1554 /* Each A-MSDU subframe will use the original header as the base and be
1555 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1557 hdr = (void *)first_hdr;
1559 if (ieee80211_is_data_qos(hdr->frame_control)) {
1560 qos = ieee80211_get_qos_ctl(hdr);
1561 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1564 /* Some attention flags are valid only in the last MSDU. */
1565 last = skb_peek_tail(amsdu);
1566 rxd = (void *)last->data - sizeof(*rxd);
1567 attention = __le32_to_cpu(rxd->attention.flags);
1569 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1570 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1571 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1572 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1574 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1575 * e.g. due to fcs error, missing peer or invalid key data it will
1576 * report the frame as raw.
1578 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1581 !has_peer_idx_invalid);
1583 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1584 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1585 RX_FLAG_MMIC_ERROR |
1587 RX_FLAG_IV_STRIPPED |
1588 RX_FLAG_ONLY_MONITOR |
1589 RX_FLAG_MMIC_STRIPPED);
1592 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1595 status->flag |= RX_FLAG_MMIC_ERROR;
1599 *err = ATH10K_PKT_RX_ERR_FCS;
1600 else if (has_tkip_err)
1601 *err = ATH10K_PKT_RX_ERR_TKIP;
1602 else if (has_crypto_err)
1603 *err = ATH10K_PKT_RX_ERR_CRYPT;
1604 else if (has_peer_idx_invalid)
1605 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1608 /* Firmware reports all necessary management frames via WMI already.
1609 * They are not reported to monitor interfaces at all so pass the ones
1610 * coming via HTT to monitor interfaces instead. This simplifies
1614 status->flag |= RX_FLAG_ONLY_MONITOR;
1617 status->flag |= RX_FLAG_DECRYPTED;
1619 if (likely(!is_mgmt))
1620 status->flag |= RX_FLAG_MMIC_STRIPPED;
1622 if (fill_crypt_header)
1623 status->flag |= RX_FLAG_MIC_STRIPPED |
1624 RX_FLAG_ICV_STRIPPED;
1626 status->flag |= RX_FLAG_IV_STRIPPED;
1629 skb_queue_walk(amsdu, msdu) {
1630 ath10k_htt_rx_h_csum_offload(msdu);
1631 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1634 /* Undecapping involves copying the original 802.11 header back
1635 * to sk_buff. If frame is protected and hardware has decrypted
1636 * it then remove the protected bit.
1643 if (fill_crypt_header)
1646 hdr = (void *)msdu->data;
1647 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1651 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1652 struct sk_buff_head *amsdu,
1653 struct ieee80211_rx_status *status)
1655 struct sk_buff *msdu;
1656 struct sk_buff *first_subframe;
1658 first_subframe = skb_peek(amsdu);
1660 while ((msdu = __skb_dequeue(amsdu))) {
1661 /* Setup per-MSDU flags */
1662 if (skb_queue_empty(amsdu))
1663 status->flag &= ~RX_FLAG_AMSDU_MORE;
1665 status->flag |= RX_FLAG_AMSDU_MORE;
1667 if (msdu == first_subframe) {
1668 first_subframe = NULL;
1669 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1671 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1674 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1678 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1679 unsigned long int *unchain_cnt)
1681 struct sk_buff *skb, *first;
1684 int amsdu_len = skb_queue_len(amsdu);
1686 /* TODO: Might could optimize this by using
1687 * skb_try_coalesce or similar method to
1688 * decrease copying, or maybe get mac80211 to
1689 * provide a way to just receive a list of
1693 first = __skb_dequeue(amsdu);
1695 /* Allocate total length all at once. */
1696 skb_queue_walk(amsdu, skb)
1697 total_len += skb->len;
1699 space = total_len - skb_tailroom(first);
1701 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1702 /* TODO: bump some rx-oom error stat */
1703 /* put it back together so we can free the
1704 * whole list at once.
1706 __skb_queue_head(amsdu, first);
1710 /* Walk list again, copying contents into
1713 while ((skb = __skb_dequeue(amsdu))) {
1714 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1716 dev_kfree_skb_any(skb);
1719 __skb_queue_head(amsdu, first);
1721 *unchain_cnt += amsdu_len - 1;
1726 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1727 struct sk_buff_head *amsdu,
1728 unsigned long int *drop_cnt,
1729 unsigned long int *unchain_cnt)
1731 struct sk_buff *first;
1732 struct htt_rx_desc *rxd;
1733 enum rx_msdu_decap_format decap;
1735 first = skb_peek(amsdu);
1736 rxd = (void *)first->data - sizeof(*rxd);
1737 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1738 RX_MSDU_START_INFO1_DECAP_FORMAT);
1740 /* FIXME: Current unchaining logic can only handle simple case of raw
1741 * msdu chaining. If decapping is other than raw the chaining may be
1742 * more complex and this isn't handled by the current code. Don't even
1743 * try re-constructing such frames - it'll be pretty much garbage.
1745 if (decap != RX_MSDU_DECAP_RAW ||
1746 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1747 *drop_cnt += skb_queue_len(amsdu);
1748 __skb_queue_purge(amsdu);
1752 ath10k_unchain_msdu(amsdu, unchain_cnt);
1755 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1756 struct sk_buff_head *amsdu,
1757 struct ieee80211_rx_status *rx_status)
1759 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1760 * invalid/dangerous frames.
1763 if (!rx_status->freq) {
1764 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1768 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1769 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1776 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1777 struct sk_buff_head *amsdu,
1778 struct ieee80211_rx_status *rx_status,
1779 unsigned long int *drop_cnt)
1781 if (skb_queue_empty(amsdu))
1784 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1788 *drop_cnt += skb_queue_len(amsdu);
1790 __skb_queue_purge(amsdu);
1793 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1795 struct ath10k *ar = htt->ar;
1796 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1797 struct sk_buff_head amsdu;
1799 unsigned long int drop_cnt = 0;
1800 unsigned long int unchain_cnt = 0;
1801 unsigned long int drop_cnt_filter = 0;
1802 unsigned long int msdus_to_queue, num_msdus;
1803 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
1804 u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
1806 __skb_queue_head_init(&amsdu);
1808 spin_lock_bh(&htt->rx_ring.lock);
1809 if (htt->rx_confused) {
1810 spin_unlock_bh(&htt->rx_ring.lock);
1813 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1814 spin_unlock_bh(&htt->rx_ring.lock);
1817 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1818 __skb_queue_purge(&amsdu);
1819 /* FIXME: It's probably a good idea to reboot the
1820 * device instead of leaving it inoperable.
1822 htt->rx_confused = true;
1826 num_msdus = skb_queue_len(&amsdu);
1828 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1830 /* only for ret = 1 indicates chained msdus */
1832 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
1834 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
1835 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
1836 msdus_to_queue = skb_queue_len(&amsdu);
1837 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
1839 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
1840 unchain_cnt, drop_cnt, drop_cnt_filter,
1846 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1847 struct htt_rx_indication *rx)
1849 struct ath10k *ar = htt->ar;
1850 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1851 int num_mpdu_ranges;
1852 int i, mpdu_count = 0;
1856 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1857 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1858 peer_id = __le16_to_cpu(rx->hdr.peer_id);
1859 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
1861 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1863 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1865 (sizeof(struct htt_rx_indication_mpdu_range) *
1868 for (i = 0; i < num_mpdu_ranges; i++)
1869 mpdu_count += mpdu_ranges[i].mpdu_count;
1871 atomic_add(mpdu_count, &htt->num_mpdus_ready);
1873 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
1877 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1878 struct sk_buff *skb)
1880 struct ath10k_htt *htt = &ar->htt;
1881 struct htt_resp *resp = (struct htt_resp *)skb->data;
1882 struct htt_tx_done tx_done = {};
1883 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1888 case HTT_DATA_TX_STATUS_NO_ACK:
1889 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1891 case HTT_DATA_TX_STATUS_OK:
1892 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1894 case HTT_DATA_TX_STATUS_DISCARD:
1895 case HTT_DATA_TX_STATUS_POSTPONE:
1896 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1897 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1900 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1901 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1905 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1906 resp->data_tx_completion.num_msdus);
1908 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1909 msdu_id = resp->data_tx_completion.msdus[i];
1910 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1912 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1913 * interrupt and main interrupt (MSI/-X range case) for the same
1914 * HTC service so it should be safe to use kfifo_put w/o lock.
1916 * From kfifo_put() documentation:
1917 * Note that with only one concurrent reader and one concurrent
1918 * writer, you don't need extra locking to use these macro.
1920 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1921 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1922 tx_done.msdu_id, tx_done.status);
1923 ath10k_txrx_tx_unref(htt, &tx_done);
1928 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1930 struct htt_rx_addba *ev = &resp->rx_addba;
1931 struct ath10k_peer *peer;
1932 struct ath10k_vif *arvif;
1933 u16 info0, tid, peer_id;
1935 info0 = __le16_to_cpu(ev->info0);
1936 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1937 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1939 ath10k_dbg(ar, ATH10K_DBG_HTT,
1940 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1941 tid, peer_id, ev->window_size);
1943 spin_lock_bh(&ar->data_lock);
1944 peer = ath10k_peer_find_by_id(ar, peer_id);
1946 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1948 spin_unlock_bh(&ar->data_lock);
1952 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1954 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1956 spin_unlock_bh(&ar->data_lock);
1960 ath10k_dbg(ar, ATH10K_DBG_HTT,
1961 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1962 peer->addr, tid, ev->window_size);
1964 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1965 spin_unlock_bh(&ar->data_lock);
1968 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1970 struct htt_rx_delba *ev = &resp->rx_delba;
1971 struct ath10k_peer *peer;
1972 struct ath10k_vif *arvif;
1973 u16 info0, tid, peer_id;
1975 info0 = __le16_to_cpu(ev->info0);
1976 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1977 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1979 ath10k_dbg(ar, ATH10K_DBG_HTT,
1980 "htt rx delba tid %hu peer_id %hu\n",
1983 spin_lock_bh(&ar->data_lock);
1984 peer = ath10k_peer_find_by_id(ar, peer_id);
1986 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1988 spin_unlock_bh(&ar->data_lock);
1992 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1994 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1996 spin_unlock_bh(&ar->data_lock);
2000 ath10k_dbg(ar, ATH10K_DBG_HTT,
2001 "htt rx stop rx ba session sta %pM tid %hu\n",
2004 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2005 spin_unlock_bh(&ar->data_lock);
2008 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
2009 struct sk_buff_head *amsdu)
2011 struct sk_buff *msdu;
2012 struct htt_rx_desc *rxd;
2014 if (skb_queue_empty(list))
2017 if (WARN_ON(!skb_queue_empty(amsdu)))
2020 while ((msdu = __skb_dequeue(list))) {
2021 __skb_queue_tail(amsdu, msdu);
2023 rxd = (void *)msdu->data - sizeof(*rxd);
2024 if (rxd->msdu_end.common.info0 &
2025 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
2029 msdu = skb_peek_tail(amsdu);
2030 rxd = (void *)msdu->data - sizeof(*rxd);
2031 if (!(rxd->msdu_end.common.info0 &
2032 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
2033 skb_queue_splice_init(amsdu, list);
2040 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
2041 struct sk_buff *skb)
2043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2045 if (!ieee80211_has_protected(hdr->frame_control))
2048 /* Offloaded frames are already decrypted but firmware insists they are
2049 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2050 * will drop the frame.
2053 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2054 status->flag |= RX_FLAG_DECRYPTED |
2055 RX_FLAG_IV_STRIPPED |
2056 RX_FLAG_MMIC_STRIPPED;
2059 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2060 struct sk_buff_head *list)
2062 struct ath10k_htt *htt = &ar->htt;
2063 struct ieee80211_rx_status *status = &htt->rx_status;
2064 struct htt_rx_offload_msdu *rx;
2065 struct sk_buff *msdu;
2068 while ((msdu = __skb_dequeue(list))) {
2069 /* Offloaded frames don't have Rx descriptor. Instead they have
2070 * a short meta information header.
2073 rx = (void *)msdu->data;
2075 skb_put(msdu, sizeof(*rx));
2076 skb_pull(msdu, sizeof(*rx));
2078 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2079 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2080 dev_kfree_skb_any(msdu);
2084 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2086 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2087 * actual payload is unaligned. Align the frame. Otherwise
2088 * mac80211 complains. This shouldn't reduce performance much
2089 * because these offloaded frames are rare.
2091 offset = 4 - ((unsigned long)msdu->data & 3);
2092 skb_put(msdu, offset);
2093 memmove(msdu->data + offset, msdu->data, msdu->len);
2094 skb_pull(msdu, offset);
2096 /* FIXME: The frame is NWifi. Re-construct QoS Control
2097 * if possible later.
2100 memset(status, 0, sizeof(*status));
2101 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2103 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2104 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2105 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2109 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2111 struct ath10k_htt *htt = &ar->htt;
2112 struct htt_resp *resp = (void *)skb->data;
2113 struct ieee80211_rx_status *status = &htt->rx_status;
2114 struct sk_buff_head list;
2115 struct sk_buff_head amsdu;
2124 lockdep_assert_held(&htt->rx_ring.lock);
2126 if (htt->rx_confused)
2129 skb_pull(skb, sizeof(resp->hdr));
2130 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2132 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2133 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2134 vdev_id = resp->rx_in_ord_ind.vdev_id;
2135 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2136 offload = !!(resp->rx_in_ord_ind.info &
2137 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2138 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2140 ath10k_dbg(ar, ATH10K_DBG_HTT,
2141 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2142 vdev_id, peer_id, tid, offload, frag, msdu_count);
2144 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2145 ath10k_warn(ar, "dropping invalid in order rx indication\n");
2149 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2150 * extracted and processed.
2152 __skb_queue_head_init(&list);
2153 if (ar->hw_params.target_64bit)
2154 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2157 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2161 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2162 htt->rx_confused = true;
2166 /* Offloaded frames are very different and need to be handled
2170 ath10k_htt_rx_h_rx_offload(ar, &list);
2172 while (!skb_queue_empty(&list)) {
2173 __skb_queue_head_init(&amsdu);
2174 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2177 /* Note: The in-order indication may report interleaved
2178 * frames from different PPDUs meaning reported rx rate
2179 * to mac80211 isn't accurate/reliable. It's still
2180 * better to report something than nothing though. This
2181 * should still give an idea about rx rate to the user.
2183 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
2184 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
2185 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
2187 ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
2192 /* Should not happen. */
2193 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2194 htt->rx_confused = true;
2195 __skb_queue_purge(&list);
2202 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2203 const __le32 *resp_ids,
2209 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2212 for (i = 0; i < num_resp_ids; i++) {
2213 resp_id = le32_to_cpu(resp_ids[i]);
2215 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2218 /* TODO: free resp_id */
2222 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2224 struct ieee80211_hw *hw = ar->hw;
2225 struct ieee80211_txq *txq;
2226 struct htt_resp *resp = (struct htt_resp *)skb->data;
2227 struct htt_tx_fetch_record *record;
2229 size_t max_num_bytes;
2230 size_t max_num_msdus;
2233 const __le32 *resp_ids;
2241 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2243 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2244 if (unlikely(skb->len < len)) {
2245 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2249 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2250 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2252 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2253 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2255 if (unlikely(skb->len < len)) {
2256 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2260 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2261 num_records, num_resp_ids,
2262 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2264 if (!ar->htt.tx_q_state.enabled) {
2265 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2269 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2270 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2276 for (i = 0; i < num_records; i++) {
2277 record = &resp->tx_fetch_ind.records[i];
2278 peer_id = MS(le16_to_cpu(record->info),
2279 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2280 tid = MS(le16_to_cpu(record->info),
2281 HTT_TX_FETCH_RECORD_INFO_TID);
2282 max_num_msdus = le16_to_cpu(record->num_msdus);
2283 max_num_bytes = le32_to_cpu(record->num_bytes);
2285 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2286 i, peer_id, tid, max_num_msdus, max_num_bytes);
2288 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2289 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2290 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2295 spin_lock_bh(&ar->data_lock);
2296 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2297 spin_unlock_bh(&ar->data_lock);
2299 /* It is okay to release the lock and use txq because RCU read
2303 if (unlikely(!txq)) {
2304 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2312 while (num_msdus < max_num_msdus &&
2313 num_bytes < max_num_bytes) {
2314 ret = ath10k_mac_tx_push_txq(hw, txq);
2322 record->num_msdus = cpu_to_le16(num_msdus);
2323 record->num_bytes = cpu_to_le32(num_bytes);
2325 ath10k_htt_tx_txq_recalc(hw, txq);
2330 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2331 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2333 ret = ath10k_htt_tx_fetch_resp(ar,
2334 resp->tx_fetch_ind.token,
2335 resp->tx_fetch_ind.fetch_seq_num,
2336 resp->tx_fetch_ind.records,
2338 if (unlikely(ret)) {
2339 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2340 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2341 /* FIXME: request fw restart */
2344 ath10k_htt_tx_txq_sync(ar);
2347 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2348 struct sk_buff *skb)
2350 const struct htt_resp *resp = (void *)skb->data;
2354 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2356 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2357 if (unlikely(skb->len < len)) {
2358 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2362 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2363 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2365 if (unlikely(skb->len < len)) {
2366 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2370 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2371 resp->tx_fetch_confirm.resp_ids,
2375 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2376 struct sk_buff *skb)
2378 const struct htt_resp *resp = (void *)skb->data;
2379 const struct htt_tx_mode_switch_record *record;
2380 struct ieee80211_txq *txq;
2381 struct ath10k_txq *artxq;
2384 enum htt_tx_mode_switch_mode mode;
2393 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2395 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2396 if (unlikely(skb->len < len)) {
2397 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2401 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2402 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2404 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2405 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2406 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2407 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2409 ath10k_dbg(ar, ATH10K_DBG_HTT,
2410 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2411 info0, info1, enable, num_records, mode, threshold);
2413 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2415 if (unlikely(skb->len < len)) {
2416 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2421 case HTT_TX_MODE_SWITCH_PUSH:
2422 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2425 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2433 ar->htt.tx_q_state.enabled = enable;
2434 ar->htt.tx_q_state.mode = mode;
2435 ar->htt.tx_q_state.num_push_allowed = threshold;
2439 for (i = 0; i < num_records; i++) {
2440 record = &resp->tx_mode_switch_ind.records[i];
2441 info0 = le16_to_cpu(record->info0);
2442 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2443 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2445 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2446 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2447 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2452 spin_lock_bh(&ar->data_lock);
2453 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2454 spin_unlock_bh(&ar->data_lock);
2456 /* It is okay to release the lock and use txq because RCU read
2460 if (unlikely(!txq)) {
2461 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2466 spin_lock_bh(&ar->htt.tx_lock);
2467 artxq = (void *)txq->drv_priv;
2468 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2469 spin_unlock_bh(&ar->htt.tx_lock);
2474 ath10k_mac_tx_push_pending(ar);
2477 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2481 release = ath10k_htt_t2h_msg_handler(ar, skb);
2483 /* Free the indication buffer */
2485 dev_kfree_skb_any(skb);
2488 static inline bool is_valid_legacy_rate(u8 rate)
2490 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2491 18, 24, 36, 48, 54};
2494 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2495 if (rate == legacy_rates[i])
2503 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2504 struct ieee80211_sta *sta,
2505 struct ath10k_per_peer_tx_stats *peer_stats)
2507 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2509 struct rate_info txrate;
2511 lockdep_assert_held(&ar->data_lock);
2513 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2514 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2515 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2516 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
2517 sgi = ATH10K_HW_GI(peer_stats->flags);
2519 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
2520 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
2524 if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
2525 (txrate.mcs > 7 || txrate.nss < 1)) {
2526 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
2527 txrate.mcs, txrate.nss);
2531 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2533 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
2534 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
2535 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2537 if (!is_valid_legacy_rate(rate)) {
2538 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2543 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2545 if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2547 arsta->txrate.legacy = rate;
2548 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
2549 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
2550 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
2552 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
2553 arsta->txrate.mcs = txrate.mcs;
2557 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2559 arsta->txrate.nss = txrate.nss;
2560 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
2563 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2564 struct sk_buff *skb)
2566 struct htt_resp *resp = (struct htt_resp *)skb->data;
2567 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2568 struct htt_per_peer_tx_stats_ind *tx_stats;
2569 struct ieee80211_sta *sta;
2570 struct ath10k_peer *peer;
2572 u8 ppdu_len, num_ppdu;
2574 num_ppdu = resp->peer_tx_stats.num_ppdu;
2575 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
2577 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
2578 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
2582 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2583 (resp->peer_tx_stats.payload);
2584 peer_id = __le16_to_cpu(tx_stats->peer_id);
2587 spin_lock_bh(&ar->data_lock);
2588 peer = ath10k_peer_find_by_id(ar, peer_id);
2590 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2596 for (i = 0; i < num_ppdu; i++) {
2597 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2598 (resp->peer_tx_stats.payload + i * ppdu_len);
2600 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
2601 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
2602 p_tx_stats->failed_bytes =
2603 __le32_to_cpu(tx_stats->failed_bytes);
2604 p_tx_stats->ratecode = tx_stats->ratecode;
2605 p_tx_stats->flags = tx_stats->flags;
2606 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
2607 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
2608 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
2610 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2614 spin_unlock_bh(&ar->data_lock);
2618 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
2620 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
2621 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2622 struct ath10k_10_2_peer_tx_stats *tx_stats;
2623 struct ieee80211_sta *sta;
2624 struct ath10k_peer *peer;
2625 u16 log_type = __le16_to_cpu(hdr->log_type);
2628 if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
2631 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
2632 ATH10K_10_2_TX_STATS_OFFSET);
2634 if (!tx_stats->tx_ppdu_cnt)
2637 peer_id = tx_stats->peer_id;
2640 spin_lock_bh(&ar->data_lock);
2641 peer = ath10k_peer_find_by_id(ar, peer_id);
2643 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
2649 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
2650 p_tx_stats->succ_bytes =
2651 __le16_to_cpu(tx_stats->success_bytes[i]);
2652 p_tx_stats->retry_bytes =
2653 __le16_to_cpu(tx_stats->retry_bytes[i]);
2654 p_tx_stats->failed_bytes =
2655 __le16_to_cpu(tx_stats->failed_bytes[i]);
2656 p_tx_stats->ratecode = tx_stats->ratecode[i];
2657 p_tx_stats->flags = tx_stats->flags[i];
2658 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
2659 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
2660 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
2662 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2664 spin_unlock_bh(&ar->data_lock);
2670 spin_unlock_bh(&ar->data_lock);
2674 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2676 struct ath10k_htt *htt = &ar->htt;
2677 struct htt_resp *resp = (struct htt_resp *)skb->data;
2678 enum htt_t2h_msg_type type;
2680 /* confirm alignment */
2681 if (!IS_ALIGNED((unsigned long)skb->data, 4))
2682 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2684 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2685 resp->hdr.msg_type);
2687 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2688 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2689 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2692 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2695 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2696 htt->target_version_major = resp->ver_resp.major;
2697 htt->target_version_minor = resp->ver_resp.minor;
2698 complete(&htt->target_version_received);
2701 case HTT_T2H_MSG_TYPE_RX_IND:
2702 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2704 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2705 struct htt_peer_map_event ev = {
2706 .vdev_id = resp->peer_map.vdev_id,
2707 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2709 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2710 ath10k_peer_map_event(htt, &ev);
2713 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2714 struct htt_peer_unmap_event ev = {
2715 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2717 ath10k_peer_unmap_event(htt, &ev);
2720 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2721 struct htt_tx_done tx_done = {};
2722 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2723 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
2725 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2728 case HTT_MGMT_TX_STATUS_OK:
2729 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2730 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
2732 (resp->mgmt_tx_completion.flags &
2733 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
2735 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
2739 case HTT_MGMT_TX_STATUS_RETRY:
2740 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2742 case HTT_MGMT_TX_STATUS_DROP:
2743 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2747 status = ath10k_txrx_tx_unref(htt, &tx_done);
2749 spin_lock_bh(&htt->tx_lock);
2750 ath10k_htt_tx_mgmt_dec_pending(htt);
2751 spin_unlock_bh(&htt->tx_lock);
2755 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2756 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2758 case HTT_T2H_MSG_TYPE_SEC_IND: {
2759 struct ath10k *ar = htt->ar;
2760 struct htt_security_indication *ev = &resp->security_indication;
2762 ath10k_dbg(ar, ATH10K_DBG_HTT,
2763 "sec ind peer_id %d unicast %d type %d\n",
2764 __le16_to_cpu(ev->peer_id),
2765 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2766 MS(ev->flags, HTT_SECURITY_TYPE));
2767 complete(&ar->install_key_done);
2770 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2771 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2772 skb->data, skb->len);
2773 atomic_inc(&htt->num_mpdus_ready);
2776 case HTT_T2H_MSG_TYPE_TEST:
2778 case HTT_T2H_MSG_TYPE_STATS_CONF:
2779 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2781 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2782 /* Firmware can return tx frames if it's unable to fully
2783 * process them and suspects host may be able to fix it. ath10k
2784 * sends all tx frames as already inspected so this shouldn't
2785 * happen unless fw has a bug.
2787 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2789 case HTT_T2H_MSG_TYPE_RX_ADDBA:
2790 ath10k_htt_rx_addba(ar, resp);
2792 case HTT_T2H_MSG_TYPE_RX_DELBA:
2793 ath10k_htt_rx_delba(ar, resp);
2795 case HTT_T2H_MSG_TYPE_PKTLOG: {
2796 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2798 offsetof(struct htt_resp,
2799 pktlog_msg.payload));
2801 if (ath10k_peer_stats_enabled(ar))
2802 ath10k_fetch_10_2_tx_stats(ar,
2803 resp->pktlog_msg.payload);
2806 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2807 /* Ignore this event because mac80211 takes care of Rx
2808 * aggregation reordering.
2812 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2813 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2816 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2818 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2819 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2820 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2822 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
2823 ath10k_dbg(ar, ATH10K_DBG_HTT,
2824 "htt chan change freq %u phymode %s\n",
2825 freq, ath10k_wmi_phymode_str(phymode));
2828 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2830 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2831 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2833 if (!tx_fetch_ind) {
2834 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2837 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2840 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2841 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2843 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2844 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2846 case HTT_T2H_MSG_TYPE_PEER_STATS:
2847 ath10k_htt_fetch_peer_stats(ar, skb);
2849 case HTT_T2H_MSG_TYPE_EN_STATS:
2851 ath10k_warn(ar, "htt event (%d) not handled\n",
2852 resp->hdr.msg_type);
2853 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2854 skb->data, skb->len);
2859 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2861 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2862 struct sk_buff *skb)
2864 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2865 dev_kfree_skb_any(skb);
2867 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2869 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
2871 struct sk_buff *skb;
2873 while (quota < budget) {
2874 if (skb_queue_empty(&ar->htt.rx_msdus_q))
2877 skb = __skb_dequeue(&ar->htt.rx_msdus_q);
2880 ath10k_process_rx(ar, skb);
2887 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2889 struct ath10k_htt *htt = &ar->htt;
2890 struct htt_tx_done tx_done = {};
2891 struct sk_buff_head tx_ind_q;
2892 struct sk_buff *skb;
2893 unsigned long flags;
2894 int quota = 0, done, ret;
2895 bool resched_napi = false;
2897 __skb_queue_head_init(&tx_ind_q);
2899 /* Process pending frames before dequeuing more data
2902 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2903 if (quota == budget) {
2904 resched_napi = true;
2908 while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
2909 spin_lock_bh(&htt->rx_ring.lock);
2910 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
2911 spin_unlock_bh(&htt->rx_ring.lock);
2913 dev_kfree_skb_any(skb);
2915 resched_napi = true;
2920 while (atomic_read(&htt->num_mpdus_ready)) {
2921 ret = ath10k_htt_rx_handle_amsdu(htt);
2923 resched_napi = true;
2926 atomic_dec(&htt->num_mpdus_ready);
2929 /* Deliver received data after processing data from hardware */
2930 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2932 /* From NAPI documentation:
2933 * The napi poll() function may also process TX completions, in which
2934 * case if it processes the entire TX ring then it should count that
2935 * work as the rest of the budget.
2937 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2940 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2941 * From kfifo_get() documentation:
2942 * Note that with only one concurrent reader and one concurrent writer,
2943 * you don't need extra locking to use these macro.
2945 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2946 ath10k_txrx_tx_unref(htt, &tx_done);
2948 ath10k_mac_tx_push_pending(ar);
2950 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2951 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2952 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2954 while ((skb = __skb_dequeue(&tx_ind_q))) {
2955 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2956 dev_kfree_skb_any(skb);
2960 ath10k_htt_rx_msdu_buff_replenish(htt);
2961 /* In case of rx failure or more data to read, report budget
2962 * to reschedule NAPI poll
2964 done = resched_napi ? budget : quota;
2968 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
2970 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
2971 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
2972 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
2973 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
2974 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
2975 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
2978 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
2979 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
2980 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
2981 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
2982 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
2983 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
2986 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
2988 struct ath10k *ar = htt->ar;
2990 if (ar->hw_params.target_64bit)
2991 htt->rx_ops = &htt_rx_ops_64;
2993 htt->rx_ops = &htt_rx_ops_32;