]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/wireless/rsi/rsi_91x_core.c
Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
[linux.git] / drivers / net / wireless / rsi / rsi_91x_core.c
1 /**
2  * Copyright (c) 2014 Redpine Signals Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include "rsi_mgmt.h"
18 #include "rsi_common.h"
19 #include "rsi_hal.h"
20 #include "rsi_coex.h"
21
22 /**
23  * rsi_determine_min_weight_queue() - This function determines the queue with
24  *                                    the min weight.
25  * @common: Pointer to the driver private structure.
26  *
27  * Return: q_num: Corresponding queue number.
28  */
29 static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
30 {
31         struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
32         u32 q_len = 0;
33         u8 ii = 0;
34
35         for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
36                 q_len = skb_queue_len(&common->tx_queue[ii]);
37                 if ((tx_qinfo[ii].pkt_contended) && q_len) {
38                         common->min_weight = tx_qinfo[ii].weight;
39                         break;
40                 }
41         }
42         return ii;
43 }
44
45 /**
46  * rsi_recalculate_weights() - This function recalculates the weights
47  *                             corresponding to each queue.
48  * @common: Pointer to the driver private structure.
49  *
50  * Return: recontend_queue bool variable
51  */
52 static bool rsi_recalculate_weights(struct rsi_common *common)
53 {
54         struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
55         bool recontend_queue = false;
56         u8 ii = 0;
57         u32 q_len = 0;
58
59         for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
60                 q_len = skb_queue_len(&common->tx_queue[ii]);
61                 /* Check for the need of contention */
62                 if (q_len) {
63                         if (tx_qinfo[ii].pkt_contended) {
64                                 tx_qinfo[ii].weight =
65                                 ((tx_qinfo[ii].weight > common->min_weight) ?
66                                  tx_qinfo[ii].weight - common->min_weight : 0);
67                         } else {
68                                 tx_qinfo[ii].pkt_contended = 1;
69                                 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
70                                 recontend_queue = true;
71                         }
72                 } else { /* No packets so no contention */
73                         tx_qinfo[ii].weight = 0;
74                         tx_qinfo[ii].pkt_contended = 0;
75                 }
76         }
77
78         return recontend_queue;
79 }
80
81 /**
82  * rsi_get_num_pkts_dequeue() - This function determines the number of
83  *                              packets to be dequeued based on the number
84  *                              of bytes calculated using txop.
85  *
86  * @common: Pointer to the driver private structure.
87  * @q_num: the queue from which pkts have to be dequeued
88  *
89  * Return: pkt_num: Number of pkts to be dequeued.
90  */
91 static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
92 {
93         struct rsi_hw *adapter = common->priv;
94         struct sk_buff *skb;
95         u32 pkt_cnt = 0;
96         s16 txop = common->tx_qinfo[q_num].txop * 32;
97         __le16 r_txop;
98         struct ieee80211_rate rate;
99         struct ieee80211_hdr *wh;
100         struct ieee80211_vif *vif;
101
102         rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
103         if (q_num == VI_Q)
104                 txop = ((txop << 5) / 80);
105
106         if (skb_queue_len(&common->tx_queue[q_num]))
107                 skb = skb_peek(&common->tx_queue[q_num]);
108         else
109                 return 0;
110
111         do {
112                 wh = (struct ieee80211_hdr *)skb->data;
113                 vif = rsi_get_vif(adapter, wh->addr2);
114                 r_txop = ieee80211_generic_frame_duration(adapter->hw,
115                                                           vif,
116                                                           common->band,
117                                                           skb->len, &rate);
118                 txop -= le16_to_cpu(r_txop);
119                 pkt_cnt += 1;
120                 /*checking if pkts are still there*/
121                 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
122                         skb = skb->next;
123                 else
124                         break;
125
126         } while (txop > 0);
127
128         return pkt_cnt;
129 }
130
131 /**
132  * rsi_core_determine_hal_queue() - This function determines the queue from
133  *                                  which packet has to be dequeued.
134  * @common: Pointer to the driver private structure.
135  *
136  * Return: q_num: Corresponding queue number on success.
137  */
138 static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
139 {
140         bool recontend_queue = false;
141         u32 q_len = 0;
142         u8 q_num = INVALID_QUEUE;
143         u8 ii = 0;
144
145         if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) {
146                 q_num = MGMT_BEACON_Q;
147                 return q_num;
148         }
149         if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
150                 if (!common->mgmt_q_block)
151                         q_num = MGMT_SOFT_Q;
152                 return q_num;
153         }
154
155         if (common->hw_data_qs_blocked)
156                 return q_num;
157
158         if (common->pkt_cnt != 0) {
159                 --common->pkt_cnt;
160                 return common->selected_qnum;
161         }
162
163 get_queue_num:
164         recontend_queue = false;
165
166         q_num = rsi_determine_min_weight_queue(common);
167
168         ii = q_num;
169
170         /* Selecting the queue with least back off */
171         for (; ii < NUM_EDCA_QUEUES; ii++) {
172                 q_len = skb_queue_len(&common->tx_queue[ii]);
173                 if (((common->tx_qinfo[ii].pkt_contended) &&
174                      (common->tx_qinfo[ii].weight < common->min_weight)) &&
175                       q_len) {
176                         common->min_weight = common->tx_qinfo[ii].weight;
177                         q_num = ii;
178                 }
179         }
180
181         if (q_num < NUM_EDCA_QUEUES)
182                 common->tx_qinfo[q_num].pkt_contended = 0;
183
184         /* Adjust the back off values for all queues again */
185         recontend_queue = rsi_recalculate_weights(common);
186
187         q_len = skb_queue_len(&common->tx_queue[q_num]);
188         if (!q_len) {
189                 /* If any queues are freshly contended and the selected queue
190                  * doesn't have any packets
191                  * then get the queue number again with fresh values
192                  */
193                 if (recontend_queue)
194                         goto get_queue_num;
195
196                 q_num = INVALID_QUEUE;
197                 return q_num;
198         }
199
200         common->selected_qnum = q_num;
201         q_len = skb_queue_len(&common->tx_queue[q_num]);
202
203         if (q_num == VO_Q || q_num == VI_Q) {
204                 common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
205                 common->pkt_cnt -= 1;
206         }
207
208         return q_num;
209 }
210
211 /**
212  * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
213  *                        specified by the queue number.
214  * @common: Pointer to the driver private structure.
215  * @skb: Pointer to the socket buffer structure.
216  *
217  * Return: None.
218  */
219 static void rsi_core_queue_pkt(struct rsi_common *common,
220                                struct sk_buff *skb)
221 {
222         u8 q_num = skb->priority;
223         if (q_num >= NUM_SOFT_QUEUES) {
224                 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
225                         __func__, q_num);
226                 dev_kfree_skb(skb);
227                 return;
228         }
229
230         skb_queue_tail(&common->tx_queue[q_num], skb);
231 }
232
233 /**
234  * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
235  *                          specified by the queue number.
236  * @common: Pointer to the driver private structure.
237  * @q_num: Queue number.
238  *
239  * Return: Pointer to sk_buff structure.
240  */
241 static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
242                                             u8 q_num)
243 {
244         if (q_num >= NUM_SOFT_QUEUES) {
245                 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
246                         __func__, q_num);
247                 return NULL;
248         }
249
250         return skb_dequeue(&common->tx_queue[q_num]);
251 }
252
253 /**
254  * rsi_core_qos_processor() - This function is used to determine the wmm queue
255  *                            based on the backoff procedure. Data packets are
256  *                            dequeued from the selected hal queue and sent to
257  *                            the below layers.
258  * @common: Pointer to the driver private structure.
259  *
260  * Return: None.
261  */
262 void rsi_core_qos_processor(struct rsi_common *common)
263 {
264         struct rsi_hw *adapter = common->priv;
265         struct sk_buff *skb;
266         unsigned long tstamp_1, tstamp_2;
267         u8 q_num;
268         int status;
269
270         tstamp_1 = jiffies;
271         while (1) {
272                 q_num = rsi_core_determine_hal_queue(common);
273                 rsi_dbg(DATA_TX_ZONE,
274                         "%s: Queue number = %d\n", __func__, q_num);
275
276                 if (q_num == INVALID_QUEUE) {
277                         rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
278                         break;
279                 }
280                 if (common->hibernate_resume)
281                         break;
282
283                 mutex_lock(&common->tx_lock);
284
285                 status = adapter->check_hw_queue_status(adapter, q_num);
286                 if ((status <= 0)) {
287                         mutex_unlock(&common->tx_lock);
288                         break;
289                 }
290
291                 if ((q_num < MGMT_SOFT_Q) &&
292                     ((skb_queue_len(&common->tx_queue[q_num])) <=
293                       MIN_DATA_QUEUE_WATER_MARK)) {
294                         if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
295                                 ieee80211_wake_queue(adapter->hw,
296                                                      WME_AC(q_num));
297                 }
298
299                 skb = rsi_core_dequeue_pkt(common, q_num);
300                 if (skb == NULL) {
301                         rsi_dbg(ERR_ZONE, "skb null\n");
302                         mutex_unlock(&common->tx_lock);
303                         break;
304                 }
305                 if (q_num == MGMT_BEACON_Q) {
306                         status = rsi_send_pkt_to_bus(common, skb);
307                         dev_kfree_skb(skb);
308                 } else {
309 #ifdef CONFIG_RSI_COEX
310                         if (common->coex_mode > 1) {
311                                 status = rsi_coex_send_pkt(common, skb,
312                                                            RSI_WLAN_Q);
313                         } else {
314 #endif
315                                 if (q_num == MGMT_SOFT_Q)
316                                         status = rsi_send_mgmt_pkt(common, skb);
317                                 else
318                                         status = rsi_send_data_pkt(common, skb);
319 #ifdef CONFIG_RSI_COEX
320                         }
321 #endif
322                 }
323
324                 if (status) {
325                         mutex_unlock(&common->tx_lock);
326                         break;
327                 }
328
329                 common->tx_stats.total_tx_pkt_send[q_num]++;
330
331                 tstamp_2 = jiffies;
332                 mutex_unlock(&common->tx_lock);
333
334                 if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000))
335                         schedule();
336         }
337 }
338
339 struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr)
340 {
341         int i;
342
343         for (i = 0; i < common->max_stations; i++) {
344                 if (!common->stations[i].sta)
345                         continue;
346                 if (!(memcmp(common->stations[i].sta->addr,
347                              mac_addr, ETH_ALEN)))
348                         return &common->stations[i];
349         }
350         return NULL;
351 }
352
353 struct ieee80211_vif *rsi_get_vif(struct rsi_hw *adapter, u8 *mac)
354 {
355         struct ieee80211_vif *vif;
356         int i;
357
358         for (i = 0; i < RSI_MAX_VIFS; i++) {
359                 vif = adapter->vifs[i];
360                 if (!vif)
361                         continue;
362                 if (!memcmp(vif->addr, mac, ETH_ALEN))
363                         return vif;
364         }
365         return NULL;
366 }
367
368 /**
369  * rsi_core_xmit() - This function transmits the packets received from mac80211
370  * @common: Pointer to the driver private structure.
371  * @skb: Pointer to the socket buffer structure.
372  *
373  * Return: None.
374  */
375 void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
376 {
377         struct rsi_hw *adapter = common->priv;
378         struct ieee80211_tx_info *info;
379         struct skb_info *tx_params;
380         struct ieee80211_hdr *wh = NULL;
381         struct ieee80211_vif *vif;
382         u8 q_num, tid = 0;
383         struct rsi_sta *rsta = NULL;
384
385         if ((!skb) || (!skb->len)) {
386                 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
387                         __func__);
388                 goto xmit_fail;
389         }
390         if (common->fsm_state != FSM_MAC_INIT_DONE) {
391                 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
392                 goto xmit_fail;
393         }
394         if (common->wow_flags & RSI_WOW_ENABLED) {
395                 rsi_dbg(ERR_ZONE,
396                         "%s: Blocking Tx_packets when WOWLAN is enabled\n",
397                         __func__);
398                 goto xmit_fail;
399         }
400
401         info = IEEE80211_SKB_CB(skb);
402         tx_params = (struct skb_info *)info->driver_data;
403         wh = (struct ieee80211_hdr *)&skb->data[0];
404         tx_params->sta_id = 0;
405
406         vif = rsi_get_vif(adapter, wh->addr2);
407         if (!vif)
408                 goto xmit_fail;
409         tx_params->vif = vif;
410         tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
411         if ((ieee80211_is_mgmt(wh->frame_control)) ||
412             (ieee80211_is_ctl(wh->frame_control)) ||
413             (ieee80211_is_qos_nullfunc(wh->frame_control))) {
414                 if (ieee80211_is_assoc_req(wh->frame_control) ||
415                     ieee80211_is_reassoc_req(wh->frame_control)) {
416                         struct ieee80211_bss_conf *bss = &vif->bss_conf;
417
418                         common->eapol4_confirm = false;
419                         rsi_hal_send_sta_notify_frame(common,
420                                                       RSI_IFTYPE_STATION,
421                                                       STA_CONNECTED, bss->bssid,
422                                                       bss->qos, bss->aid, 0,
423                                                       vif);
424                 }
425
426                 q_num = MGMT_SOFT_Q;
427                 skb->priority = q_num;
428
429                 if (rsi_prepare_mgmt_desc(common, skb)) {
430                         rsi_dbg(ERR_ZONE, "Failed to prepare desc\n");
431                         goto xmit_fail;
432                 }
433         } else {
434                 if (ieee80211_is_data_qos(wh->frame_control)) {
435                         u8 *qos = ieee80211_get_qos_ctl(wh);
436
437                         tid = *qos & IEEE80211_QOS_CTL_TID_MASK;
438                         skb->priority = TID_TO_WME_AC(tid);
439                 } else {
440                         tid = IEEE80211_NONQOS_TID;
441                         skb->priority = BE_Q;
442                 }
443
444                 q_num = skb->priority;
445                 tx_params->tid = tid;
446
447                 if (((vif->type == NL80211_IFTYPE_AP) ||
448                      (vif->type == NL80211_IFTYPE_P2P_GO)) &&
449                     (!is_broadcast_ether_addr(wh->addr1)) &&
450                     (!is_multicast_ether_addr(wh->addr1))) {
451                         rsta = rsi_find_sta(common, wh->addr1);
452                         if (!rsta)
453                                 goto xmit_fail;
454                         tx_params->sta_id = rsta->sta_id;
455                 } else {
456                         tx_params->sta_id = 0;
457                 }
458
459                 if (rsta) {
460                         /* Start aggregation if not done for this tid */
461                         if (!rsta->start_tx_aggr[tid]) {
462                                 rsta->start_tx_aggr[tid] = true;
463                                 ieee80211_start_tx_ba_session(rsta->sta,
464                                                               tid, 0);
465                         }
466                 }
467                 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
468                         q_num = MGMT_SOFT_Q;
469                         skb->priority = q_num;
470                 }
471                 if (rsi_prepare_data_desc(common, skb)) {
472                         rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n");
473                         goto xmit_fail;
474                 }
475         }
476
477         if ((q_num < MGMT_SOFT_Q) &&
478             ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
479              DATA_QUEUE_WATER_MARK)) {
480                 rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
481                 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
482                         ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
483                 rsi_set_event(&common->tx_thread.event);
484                 goto xmit_fail;
485         }
486
487         rsi_core_queue_pkt(common, skb);
488         rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__);
489         rsi_set_event(&common->tx_thread.event);
490
491         return;
492
493 xmit_fail:
494         rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
495         /* Dropping pkt here */
496         ieee80211_free_txskb(common->priv->hw, skb);
497 }