2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #ifndef __MT76X02_UTIL_H
19 #define __MT76X02_UTIL_H
21 #include <linux/kfifo.h>
23 #include "mt76x02_mac.h"
24 #include "mt76x02_dfs.h"
26 struct mt76x02_mac_stats {
34 #define MT_MAX_CHAINS 2
35 struct mt76x02_rx_freq_cal {
36 s8 high_gain[MT_MAX_CHAINS];
37 s8 rssi_offset[MT_MAX_CHAINS];
44 struct mt76x02_calibration {
45 struct mt76x02_rx_freq_cal rx;
47 u8 agc_gain_init[MT_MAX_CHAINS];
48 u8 agc_gain_cur[MT_MAX_CHAINS];
59 bool tssi_comp_pending;
61 bool channel_cal_done;
65 struct mt76_dev mt76; /* must be first */
67 struct mac_address macaddr_list[8];
69 struct mutex phy_mutex;
73 DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
75 struct sk_buff *rx_head;
77 struct tasklet_struct tx_tasklet;
78 struct tasklet_struct pre_tbtt_tasklet;
79 struct delayed_work cal_work;
80 struct delayed_work mac_work;
82 struct mt76x02_mac_stats stats;
83 atomic_t avg_ampdu_len;
86 struct sk_buff *beacons[8];
93 struct mt76x02_calibration cal;
96 s8 target_power_delta[2];
106 struct mt76x02_dfs_pattern_detector dfs_pd;
109 extern struct ieee80211_rate mt76x02_rates[12];
111 void mt76x02_configure_filter(struct ieee80211_hw *hw,
112 unsigned int changed_flags,
113 unsigned int *total_flags, u64 multicast);
114 int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
115 struct ieee80211_sta *sta);
116 int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
117 struct ieee80211_sta *sta);
119 void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
121 int mt76x02_add_interface(struct ieee80211_hw *hw,
122 struct ieee80211_vif *vif);
123 void mt76x02_remove_interface(struct ieee80211_hw *hw,
124 struct ieee80211_vif *vif);
126 int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
127 struct ieee80211_ampdu_params *params);
128 int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
129 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
130 struct ieee80211_key_conf *key);
131 int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
132 u16 queue, const struct ieee80211_tx_queue_params *params);
133 void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
134 struct ieee80211_vif *vif,
135 struct ieee80211_sta *sta);
136 s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
137 const struct ieee80211_tx_rate *rate);
138 int mt76x02_insert_hdr_pad(struct sk_buff *skb);
139 void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
140 void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
141 void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
142 struct mt76_queue_entry *e, bool flush);
143 bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update);
144 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
145 struct sk_buff *skb);
147 extern const u16 mt76x02_beacon_offsets[16];
148 void mt76x02_set_beacon_offsets(struct mt76_dev *dev);
149 void mt76x02_set_irq_mask(struct mt76_dev *dev, u32 clear, u32 set);
150 void mt76x02_mac_start(struct mt76_dev *dev);
152 static inline void mt76x02_irq_enable(struct mt76_dev *dev, u32 mask)
154 mt76x02_set_irq_mask(dev, 0, mask);
157 static inline void mt76x02_irq_disable(struct mt76_dev *dev, u32 mask)
159 mt76x02_set_irq_mask(dev, mask, 0);
163 mt76x02_wait_for_txrx_idle(struct mt76_dev *dev)
165 return __mt76_poll_msec(dev, MT_MAC_STATUS,
166 MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
170 static inline struct mt76x02_sta *
171 mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx)
173 struct mt76_wcid *wcid;
175 if (idx >= ARRAY_SIZE(dev->wcid))
178 wcid = rcu_dereference(dev->wcid[idx]);
182 return container_of(wcid, struct mt76x02_sta, wcid);
185 static inline struct mt76_wcid *
186 mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
194 return &sta->vif->group_wcid;