2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <linux/kernel.h>
22 #include <linux/spinlock.h>
23 #include <linux/skbuff.h>
24 #include <linux/leds.h>
25 #include <linux/usb.h>
26 #include <linux/average.h>
27 #include <net/mac80211.h>
30 #define MT_TX_RING_SIZE 256
31 #define MT_MCU_RING_SIZE 32
32 #define MT_RX_BUF_SIZE 2048
37 struct mt76_reg_pair {
48 u32 (*rr)(struct mt76_dev *dev, u32 offset);
49 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
50 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
51 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
53 int (*wr_rp)(struct mt76_dev *dev, u32 base,
54 const struct mt76_reg_pair *rp, int len);
55 int (*rd_rp)(struct mt76_dev *dev, u32 base,
56 struct mt76_reg_pair *rp, int len);
57 enum mt76_bus_type type;
60 #define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
61 #define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
64 MT_TXQ_VO = IEEE80211_AC_VO,
65 MT_TXQ_VI = IEEE80211_AC_VI,
66 MT_TXQ_BE = IEEE80211_AC_BE,
67 MT_TXQ_BK = IEEE80211_AC_BK,
81 struct mt76_queue_buf {
94 struct mt76_queue_entry {
100 struct mt76_txwi_cache *txwi;
101 struct mt76u_buf ubuf;
106 struct mt76_queue_regs {
111 } __packed __aligned(4);
114 struct mt76_queue_regs __iomem *regs;
117 struct mt76_queue_entry *entry;
118 struct mt76_desc *desc;
120 struct list_head swq;
135 struct sk_buff *rx_head;
136 struct page_frag_cache rx_page;
137 spinlock_t rx_page_lock;
140 struct mt76_mcu_ops {
141 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
142 int len, bool wait_resp);
143 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
144 const struct mt76_reg_pair *rp, int len);
145 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
146 struct mt76_reg_pair *rp, int len);
147 int (*mcu_restart)(struct mt76_dev *dev);
150 struct mt76_queue_ops {
151 int (*init)(struct mt76_dev *dev);
153 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
155 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
156 struct mt76_queue_buf *buf, int nbufs, u32 info,
157 struct sk_buff *skb, void *txwi);
159 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
160 struct sk_buff *skb, struct mt76_wcid *wcid,
161 struct ieee80211_sta *sta);
163 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
164 struct sk_buff *skb, u32 tx_info);
166 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
167 int *len, u32 *info, bool *more);
169 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
171 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
174 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
177 enum mt76_wcid_flags {
178 MT_WCID_FLAG_CHECK_PS,
182 #define MT76_N_WCIDS 128
184 DECLARE_EWMA(signal, 10, 8);
187 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
189 struct work_struct aggr_work;
193 struct ewma_signal rssi;
202 u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
214 struct list_head list;
215 struct mt76_queue *hwq;
216 struct mt76_wcid *wcid;
218 struct sk_buff_head retry_q;
225 struct mt76_txwi_cache {
228 struct list_head list;
233 struct rcu_head rcu_head;
235 struct mt76_dev *dev;
238 struct delayed_work reorder_work;
244 u8 started:1, stopped:1, timer_pending:1;
246 struct sk_buff *reorder_buf[];
249 #define MT_TX_CB_DMA_DONE BIT(0)
250 #define MT_TX_CB_TXS_DONE BIT(1)
251 #define MT_TX_CB_TXS_FAILED BIT(2)
253 #define MT_PACKET_ID_MASK GENMASK(7, 0)
254 #define MT_PACKET_ID_NO_ACK 0
255 #define MT_PACKET_ID_NO_SKB 1
256 #define MT_PACKET_ID_FIRST 2
258 #define MT_TX_STATUS_SKB_TIMEOUT HZ
261 unsigned long jiffies;
268 MT76_STATE_INITIALIZED,
270 MT76_STATE_MCU_RUNNING,
283 struct mt76_driver_ops {
286 void (*update_survey)(struct mt76_dev *dev);
288 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
289 struct sk_buff *skb, struct mt76_queue *q,
290 struct mt76_wcid *wcid,
291 struct ieee80211_sta *sta, u32 *tx_info);
293 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
294 struct mt76_queue_entry *e, bool flush);
296 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
298 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
299 struct sk_buff *skb);
301 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
303 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
306 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
307 struct ieee80211_sta *sta);
309 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
310 struct ieee80211_sta *sta);
312 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
313 struct ieee80211_sta *sta);
316 struct mt76_channel_state {
322 struct ieee80211_supported_band sband;
323 struct mt76_channel_state *chan;
326 struct mt76_rate_power {
340 #define MT_VEND_TYPE_EEPROM BIT(31)
341 #define MT_VEND_TYPE_CFG BIT(30)
342 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
344 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
346 MT_VEND_DEV_MODE = 0x1,
348 MT_VEND_MULTI_WRITE = 0x6,
349 MT_VEND_MULTI_READ = 0x7,
350 MT_VEND_READ_EEPROM = 0x9,
351 MT_VEND_WRITE_FCE = 0x42,
352 MT_VEND_WRITE_CFG = 0x46,
353 MT_VEND_READ_CFG = 0x47,
363 MT_EP_OUT_INBAND_CMD,
372 #define MT_SG_MAX_SIZE 8
373 #define MT_NUM_TX_ENTRIES 256
374 #define MT_NUM_RX_ENTRIES 128
375 #define MCU_RESP_URB_SIZE 1024
377 struct mutex usb_ctrl_mtx;
380 struct tasklet_struct rx_tasklet;
381 struct tasklet_struct tx_tasklet;
382 struct delayed_work stat_work;
384 u8 out_ep[__MT_EP_OUT_MAX];
386 u8 in_ep[__MT_EP_IN_MAX];
396 struct mt76_reg_pair *rp;
407 wait_queue_head_t wait;
408 struct sk_buff_head res_q;
418 struct ieee80211_hw *hw;
419 struct cfg80211_chan_def chandef;
420 struct ieee80211_channel *main_chan;
427 const struct mt76_bus_ops *bus;
428 const struct mt76_driver_ops *drv;
429 const struct mt76_mcu_ops *mcu_ops;
432 struct net_device napi_dev;
434 struct napi_struct napi[__MT_RXQ_MAX];
435 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
437 struct list_head txwi_cache;
438 struct mt76_queue q_tx[__MT_TXQ_MAX];
439 struct mt76_queue q_rx[__MT_RXQ_MAX];
440 const struct mt76_queue_ops *queue_ops;
443 wait_queue_head_t tx_wait;
444 struct sk_buff_head status_list;
446 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
448 struct mt76_wcid global_wcid;
449 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
451 u8 macaddr[ETH_ALEN];
458 struct mt76_sband sband_2g;
459 struct mt76_sband sband_5g;
460 struct debugfs_blob_wrapper eeprom;
461 struct debugfs_blob_wrapper otp;
462 struct mt76_hw_cap cap;
464 struct mt76_rate_power rate_power;
470 struct led_classdev led_cdev;
480 struct mt76_mmio mmio;
493 struct mt76_rx_status {
494 struct mt76_wcid *wcid;
496 unsigned long reorder_time;
513 s8 chain_signal[IEEE80211_MAX_CHAINS];
516 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
517 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
518 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
519 #define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__)
521 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
522 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
524 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
525 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
526 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
527 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
528 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
529 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
531 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
533 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
534 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
536 #define mt76_get_field(_dev, _reg, _field) \
537 FIELD_GET(_field, mt76_rr(dev, _reg))
539 #define mt76_rmw_field(_dev, _reg, _field, _val) \
540 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
542 #define __mt76_rmw_field(_dev, _reg, _field, _val) \
543 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
545 #define mt76_hw(dev) (dev)->mt76.hw
547 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
550 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
552 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
555 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
557 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
559 static inline u16 mt76_chip(struct mt76_dev *dev)
561 return dev->rev >> 16;
564 static inline u16 mt76_rev(struct mt76_dev *dev)
566 return dev->rev & 0xffff;
569 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
570 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
572 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
573 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
574 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
575 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
576 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
577 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
579 static inline struct mt76_channel_state *
580 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
582 struct mt76_sband *msband;
585 if (c->band == NL80211_BAND_2GHZ)
586 msband = &dev->sband_2g;
588 msband = &dev->sband_5g;
590 idx = c - &msband->sband.channels[0];
591 return &msband->chan[idx];
594 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
595 const struct ieee80211_ops *ops,
596 const struct mt76_driver_ops *drv_ops);
597 int mt76_register_device(struct mt76_dev *dev, bool vht,
598 struct ieee80211_rate *rates, int n_rates);
599 void mt76_unregister_device(struct mt76_dev *dev);
601 struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
602 void mt76_seq_puts_array(struct seq_file *file, const char *str,
605 int mt76_eeprom_init(struct mt76_dev *dev, int len);
606 void mt76_eeprom_override(struct mt76_dev *dev);
608 /* increment with wrap-around */
609 static inline int mt76_incr(int val, int size)
611 return (val + 1) & (size - 1);
614 /* decrement with wrap-around */
615 static inline int mt76_decr(int val, int size)
617 return (val - 1) & (size - 1);
620 u8 mt76_ac_to_hwq(u8 ac);
622 static inline struct ieee80211_txq *
623 mtxq_to_txq(struct mt76_txq *mtxq)
627 return container_of(ptr, struct ieee80211_txq, drv_priv);
630 static inline struct ieee80211_sta *
631 wcid_to_sta(struct mt76_wcid *wcid)
635 if (!wcid || !wcid->sta)
638 return container_of(ptr, struct ieee80211_sta, drv_priv);
641 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
643 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
644 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
645 return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
648 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
649 struct sk_buff *skb, struct mt76_wcid *wcid,
650 struct ieee80211_sta *sta);
652 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
653 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
654 struct mt76_wcid *wcid, struct sk_buff *skb);
655 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
656 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
657 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
658 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
660 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
661 void mt76_txq_schedule_all(struct mt76_dev *dev);
662 void mt76_release_buffered_frames(struct ieee80211_hw *hw,
663 struct ieee80211_sta *sta,
664 u16 tids, int nframes,
665 enum ieee80211_frame_release_type reason,
667 void mt76_set_channel(struct mt76_dev *dev);
668 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
669 struct survey_info *survey);
670 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
672 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
674 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
676 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
677 struct ieee80211_key_conf *key);
679 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
680 __acquires(&dev->status_list.lock);
681 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
682 __releases(&dev->status_list.lock);
684 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
685 struct sk_buff *skb);
686 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
687 struct mt76_wcid *wcid, int pktid,
688 struct sk_buff_head *list);
689 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
690 struct sk_buff_head *list);
691 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
692 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
694 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
695 struct ieee80211_sta *sta,
696 enum ieee80211_sta_state old_state,
697 enum ieee80211_sta_state new_state);
698 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
699 struct ieee80211_sta *sta);
701 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
703 int mt76_get_min_avg_rssi(struct mt76_dev *dev);
705 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
708 void mt76_csa_check(struct mt76_dev *dev);
709 void mt76_csa_finish(struct mt76_dev *dev);
712 void mt76_tx_free(struct mt76_dev *dev);
713 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
714 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
715 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
716 struct napi_struct *napi);
717 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
718 struct napi_struct *napi);
719 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
722 static inline bool mt76u_urb_error(struct urb *urb)
724 return urb->status &&
725 urb->status != -ECONNRESET &&
726 urb->status != -ESHUTDOWN &&
727 urb->status != -ENOENT;
730 /* Map hardware queues to usb endpoints */
731 static inline u8 q2ep(u8 qid)
733 /* TODO: take management packets to queue 5 */
738 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
741 struct usb_interface *intf = to_usb_interface(dev->dev);
742 struct usb_device *udev = interface_to_usbdev(intf);
743 struct mt76_usb *usb = &dev->usb;
747 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
749 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
751 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
754 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
755 u8 req_type, u16 val, u16 offset,
756 void *buf, size_t len);
757 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
758 const u16 offset, const u32 val);
759 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
760 int mt76u_submit_rx_buffers(struct mt76_dev *dev);
761 int mt76u_alloc_queues(struct mt76_dev *dev);
762 void mt76u_stop_queues(struct mt76_dev *dev);
763 void mt76u_stop_stat_wk(struct mt76_dev *dev);
764 void mt76u_queues_deinit(struct mt76_dev *dev);
767 mt76_mcu_msg_alloc(const void *data, int head_len,
768 int data_len, int tail_len);
769 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
770 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
771 unsigned long expires);