2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/kernel.h>
21 #include "mt76x02_phy.h"
23 void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
27 val = mt76_rr(dev, MT_BBP(AGC, 0));
30 switch (dev->mt76.chainmask & 0xf) {
39 mt76_wr(dev, MT_BBP(AGC, 0), val);
41 val = mt76_rr(dev, MT_BBP(AGC, 0));
43 EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
45 void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
49 txpath = (dev->mt76.chainmask >> 8) & 0xf;
52 mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
55 mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
59 EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac);
62 mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
66 val |= (v1 & (BIT(6) - 1)) << 0;
67 val |= (v2 & (BIT(6) - 1)) << 8;
68 val |= (v3 & (BIT(6) - 1)) << 16;
69 val |= (v4 & (BIT(6) - 1)) << 24;
73 int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
78 for (i = 0; i < sizeof(r->all); i++)
79 ret = max(ret, r->all[i]);
83 EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
85 void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
89 for (i = 0; i < sizeof(r->all); i++)
90 if (r->all[i] > limit)
93 EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
95 void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
99 for (i = 0; i < sizeof(r->all); i++)
102 EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
104 void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
106 struct mt76_rate_power *t = &dev->mt76.rate_power;
108 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
109 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
111 mt76_wr(dev, MT_TX_PWR_CFG_0,
112 mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
114 mt76_wr(dev, MT_TX_PWR_CFG_1,
115 mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
117 mt76_wr(dev, MT_TX_PWR_CFG_2,
118 mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
120 mt76_wr(dev, MT_TX_PWR_CFG_3,
121 mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
123 mt76_wr(dev, MT_TX_PWR_CFG_4,
124 mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
125 mt76_wr(dev, MT_TX_PWR_CFG_7,
126 mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
128 mt76_wr(dev, MT_TX_PWR_CFG_8,
129 mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
130 mt76_wr(dev, MT_TX_PWR_CFG_9,
131 mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
133 EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
135 int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
137 struct mt76x02_sta *sta;
138 struct mt76_wcid *wcid;
139 int i, j, min_rssi = 0;
145 for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
146 unsigned long mask = dev->mt76.wcid_mask[i];
151 for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
155 wcid = rcu_dereference(dev->mt76.wcid[j]);
159 sta = container_of(wcid, struct mt76x02_sta, wcid);
160 spin_lock(&dev->mt76.rx_lock);
161 if (sta->inactive_count++ < 5)
162 cur_rssi = ewma_signal_read(&sta->rssi);
165 spin_unlock(&dev->mt76.rx_lock);
167 if (cur_rssi < min_rssi)
180 EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
182 void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
184 int core_val, agc_val;
187 case NL80211_CHAN_WIDTH_80:
191 case NL80211_CHAN_WIDTH_40:
201 mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
202 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
203 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
204 mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
206 EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
208 void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
212 case NL80211_BAND_2GHZ:
213 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
214 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
216 case NL80211_BAND_5GHZ:
217 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
218 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
222 mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
225 EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
227 bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
229 u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
233 false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
234 dev->cal.false_cca = false_cca;
235 if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
236 dev->cal.agc_gain_adjust += 2;
238 } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
239 (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
240 dev->cal.agc_gain_adjust -= 2;
246 EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);