]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/wireless/ath/wcn36xx/dxe.c
5ab3e31c9ffadab87a57617701650117bc1fd28d
[linux.git] / drivers / net / wireless / ath / wcn36xx / dxe.c
1 /*
2  * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 /* DXE - DMA transfer engine
18  * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19  * through low channels data packets are transfered
20  * through high channels managment packets are transfered
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/interrupt.h>
26 #include <linux/soc/qcom/smem_state.h>
27 #include "wcn36xx.h"
28 #include "txrx.h"
29
30 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
31 {
32         wcn36xx_dbg(WCN36XX_DBG_DXE,
33                     "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
34                     addr, data);
35
36         writel(data, wcn->ccu_base + addr);
37 }
38
39 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
40 {
41         wcn36xx_dbg(WCN36XX_DBG_DXE,
42                     "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
43                     addr, data);
44
45         writel(data, wcn->dxe_base + addr);
46 }
47
48 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
49 {
50         *data = readl(wcn->dxe_base + addr);
51
52         wcn36xx_dbg(WCN36XX_DBG_DXE,
53                     "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
54                     addr, *data);
55 }
56
57 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
58 {
59         struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
60         int i;
61
62         for (i = 0; i < ch->desc_num && ctl; i++) {
63                 next = ctl->next;
64                 kfree(ctl);
65                 ctl = next;
66         }
67 }
68
69 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
70 {
71         struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72         struct wcn36xx_dxe_ctl *cur_ctl = NULL;
73         int i;
74
75         spin_lock_init(&ch->lock);
76         for (i = 0; i < ch->desc_num; i++) {
77                 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
78                 if (!cur_ctl)
79                         goto out_fail;
80
81                 cur_ctl->ctl_blk_order = i;
82                 if (i == 0) {
83                         ch->head_blk_ctl = cur_ctl;
84                         ch->tail_blk_ctl = cur_ctl;
85                 } else if (ch->desc_num - 1 == i) {
86                         prev_ctl->next = cur_ctl;
87                         cur_ctl->next = ch->head_blk_ctl;
88                 } else {
89                         prev_ctl->next = cur_ctl;
90                 }
91                 prev_ctl = cur_ctl;
92         }
93
94         return 0;
95
96 out_fail:
97         wcn36xx_dxe_free_ctl_block(ch);
98         return -ENOMEM;
99 }
100
101 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
102 {
103         int ret;
104
105         wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106         wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107         wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108         wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
109
110         wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111         wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112         wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113         wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
114
115         wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
116         wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
117
118         wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119         wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
120
121         wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122         wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
123
124         wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125         wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
126
127         wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128         wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
129
130         /* DXE control block allocation */
131         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
132         if (ret)
133                 goto out_err;
134         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
135         if (ret)
136                 goto out_err;
137         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
138         if (ret)
139                 goto out_err;
140         ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
141         if (ret)
142                 goto out_err;
143
144         /* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
145         ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146                                           WCN36XX_SMSM_WLAN_TX_ENABLE |
147                                           WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148                                           WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
149         if (ret)
150                 goto out_err;
151
152         return 0;
153
154 out_err:
155         wcn36xx_err("Failed to allocate DXE control blocks\n");
156         wcn36xx_dxe_free_ctl_blks(wcn);
157         return -ENOMEM;
158 }
159
160 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
161 {
162         wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163         wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164         wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165         wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
166 }
167
168 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
169 {
170         struct wcn36xx_dxe_desc *cur_dxe = NULL;
171         struct wcn36xx_dxe_desc *prev_dxe = NULL;
172         struct wcn36xx_dxe_ctl *cur_ctl = NULL;
173         size_t size;
174         int i;
175
176         size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177         wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size,
178                                                &wcn_ch->dma_addr,
179                                                GFP_KERNEL);
180         if (!wcn_ch->cpu_addr)
181                 return -ENOMEM;
182
183         cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
184         cur_ctl = wcn_ch->head_blk_ctl;
185
186         for (i = 0; i < wcn_ch->desc_num; i++) {
187                 cur_ctl->desc = cur_dxe;
188                 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
189                         i * sizeof(struct wcn36xx_dxe_desc);
190
191                 switch (wcn_ch->ch_type) {
192                 case WCN36XX_DXE_CH_TX_L:
193                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
194                         cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
195                         break;
196                 case WCN36XX_DXE_CH_TX_H:
197                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
198                         cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
199                         break;
200                 case WCN36XX_DXE_CH_RX_L:
201                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
202                         cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
203                         break;
204                 case WCN36XX_DXE_CH_RX_H:
205                         cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
206                         cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
207                         break;
208                 }
209                 if (0 == i) {
210                         cur_dxe->phy_next_l = 0;
211                 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
212                         prev_dxe->phy_next_l =
213                                 cur_ctl->desc_phy_addr;
214                 } else if (i == (wcn_ch->desc_num - 1)) {
215                         prev_dxe->phy_next_l =
216                                 cur_ctl->desc_phy_addr;
217                         cur_dxe->phy_next_l =
218                                 wcn_ch->head_blk_ctl->desc_phy_addr;
219                 }
220                 cur_ctl = cur_ctl->next;
221                 prev_dxe = cur_dxe;
222                 cur_dxe++;
223         }
224
225         return 0;
226 }
227
228 static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
229 {
230         size_t size;
231
232         size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
233         dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
234 }
235
236 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
237                                    struct wcn36xx_dxe_mem_pool *pool)
238 {
239         int i, chunk_size = pool->chunk_size;
240         dma_addr_t bd_phy_addr = pool->phy_addr;
241         void *bd_cpu_addr = pool->virt_addr;
242         struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
243
244         for (i = 0; i < ch->desc_num; i++) {
245                 /* Only every second dxe needs a bd pointer,
246                    the other will point to the skb data */
247                 if (!(i & 1)) {
248                         cur->bd_phy_addr = bd_phy_addr;
249                         cur->bd_cpu_addr = bd_cpu_addr;
250                         bd_phy_addr += chunk_size;
251                         bd_cpu_addr += chunk_size;
252                 } else {
253                         cur->bd_phy_addr = 0;
254                         cur->bd_cpu_addr = NULL;
255                 }
256                 cur = cur->next;
257         }
258 }
259
260 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
261 {
262         int reg_data = 0;
263
264         wcn36xx_dxe_read_register(wcn,
265                                   WCN36XX_DXE_INT_MASK_REG,
266                                   &reg_data);
267
268         reg_data |= wcn_ch;
269
270         wcn36xx_dxe_write_register(wcn,
271                                    WCN36XX_DXE_INT_MASK_REG,
272                                    (int)reg_data);
273         return 0;
274 }
275
276 static int wcn36xx_dxe_fill_skb(struct device *dev,
277                                 struct wcn36xx_dxe_ctl *ctl,
278                                 gfp_t gfp)
279 {
280         struct wcn36xx_dxe_desc *dxe = ctl->desc;
281         struct sk_buff *skb;
282
283         skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
284         if (skb == NULL)
285                 return -ENOMEM;
286
287         dxe->dst_addr_l = dma_map_single(dev,
288                                          skb_tail_pointer(skb),
289                                          WCN36XX_PKT_SIZE,
290                                          DMA_FROM_DEVICE);
291         if (dma_mapping_error(dev, dxe->dst_addr_l)) {
292                 dev_err(dev, "unable to map skb\n");
293                 kfree_skb(skb);
294                 return -ENOMEM;
295         }
296         ctl->skb = skb;
297
298         return 0;
299 }
300
301 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
302                                     struct wcn36xx_dxe_ch *wcn_ch)
303 {
304         int i;
305         struct wcn36xx_dxe_ctl *cur_ctl = NULL;
306
307         cur_ctl = wcn_ch->head_blk_ctl;
308
309         for (i = 0; i < wcn_ch->desc_num; i++) {
310                 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
311                 cur_ctl = cur_ctl->next;
312         }
313
314         return 0;
315 }
316
317 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
318                                      struct wcn36xx_dxe_ch *wcn_ch)
319 {
320         struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
321         int i;
322
323         for (i = 0; i < wcn_ch->desc_num; i++) {
324                 kfree_skb(cur->skb);
325                 cur = cur->next;
326         }
327 }
328
329 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
330 {
331         struct ieee80211_tx_info *info;
332         struct sk_buff *skb;
333         unsigned long flags;
334
335         spin_lock_irqsave(&wcn->dxe_lock, flags);
336         skb = wcn->tx_ack_skb;
337         wcn->tx_ack_skb = NULL;
338         spin_unlock_irqrestore(&wcn->dxe_lock, flags);
339
340         if (!skb) {
341                 wcn36xx_warn("Spurious TX complete indication\n");
342                 return;
343         }
344
345         info = IEEE80211_SKB_CB(skb);
346
347         if (status == 1)
348                 info->flags |= IEEE80211_TX_STAT_ACK;
349
350         wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
351
352         ieee80211_tx_status_irqsafe(wcn->hw, skb);
353         ieee80211_wake_queues(wcn->hw);
354 }
355
356 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
357 {
358         struct wcn36xx_dxe_ctl *ctl;
359         struct ieee80211_tx_info *info;
360         unsigned long flags;
361
362         /*
363          * Make at least one loop of do-while because in case ring is
364          * completely full head and tail are pointing to the same element
365          * and while-do will not make any cycles.
366          */
367         spin_lock_irqsave(&ch->lock, flags);
368         ctl = ch->tail_blk_ctl;
369         do {
370                 if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
371                         break;
372
373                 if (ctl->skb &&
374                     READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
375                         dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
376                                          ctl->skb->len, DMA_TO_DEVICE);
377                         info = IEEE80211_SKB_CB(ctl->skb);
378                         if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
379                                 /* Keep frame until TX status comes */
380                                 ieee80211_free_txskb(wcn->hw, ctl->skb);
381                         }
382
383                         if (wcn->queues_stopped) {
384                                 wcn->queues_stopped = false;
385                                 ieee80211_wake_queues(wcn->hw);
386                         }
387
388                         ctl->skb = NULL;
389                 }
390                 ctl = ctl->next;
391         } while (ctl != ch->head_blk_ctl);
392
393         ch->tail_blk_ctl = ctl;
394         spin_unlock_irqrestore(&ch->lock, flags);
395 }
396
397 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
398 {
399         struct wcn36xx *wcn = (struct wcn36xx *)dev;
400         int int_src, int_reason;
401
402         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
403
404         if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
405                 wcn36xx_dxe_read_register(wcn,
406                                           WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
407                                           &int_reason);
408
409                 wcn36xx_dxe_write_register(wcn,
410                                            WCN36XX_DXE_0_INT_CLR,
411                                            WCN36XX_INT_MASK_CHAN_TX_H);
412
413                 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
414                         wcn36xx_dxe_write_register(wcn,
415                                                    WCN36XX_DXE_0_INT_ERR_CLR,
416                                                    WCN36XX_INT_MASK_CHAN_TX_H);
417
418                         wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
419                                         int_src);
420                 }
421
422                 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
423                         wcn36xx_dxe_write_register(wcn,
424                                                    WCN36XX_DXE_0_INT_DONE_CLR,
425                                                    WCN36XX_INT_MASK_CHAN_TX_H);
426                 }
427
428                 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
429                         wcn36xx_dxe_write_register(wcn,
430                                                    WCN36XX_DXE_0_INT_ED_CLR,
431                                                    WCN36XX_INT_MASK_CHAN_TX_H);
432                 }
433
434                 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
435                             int_reason);
436
437                 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
438                                   WCN36XX_CH_STAT_INT_ED_MASK))
439                         reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
440         }
441
442         if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
443                 wcn36xx_dxe_read_register(wcn,
444                                           WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
445                                           &int_reason);
446
447                 wcn36xx_dxe_write_register(wcn,
448                                            WCN36XX_DXE_0_INT_CLR,
449                                            WCN36XX_INT_MASK_CHAN_TX_L);
450
451
452                 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
453                         wcn36xx_dxe_write_register(wcn,
454                                                    WCN36XX_DXE_0_INT_ERR_CLR,
455                                                    WCN36XX_INT_MASK_CHAN_TX_L);
456
457                         wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
458                                         int_src);
459                 }
460
461                 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
462                         wcn36xx_dxe_write_register(wcn,
463                                                    WCN36XX_DXE_0_INT_DONE_CLR,
464                                                    WCN36XX_INT_MASK_CHAN_TX_L);
465                 }
466
467                 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
468                         wcn36xx_dxe_write_register(wcn,
469                                                    WCN36XX_DXE_0_INT_ED_CLR,
470                                                    WCN36XX_INT_MASK_CHAN_TX_L);
471                 }
472
473                 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
474                             int_reason);
475
476                 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
477                                   WCN36XX_CH_STAT_INT_ED_MASK))
478                         reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
479         }
480
481         return IRQ_HANDLED;
482 }
483
484 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
485 {
486         struct wcn36xx *wcn = (struct wcn36xx *)dev;
487
488         wcn36xx_dxe_rx_frame(wcn);
489
490         return IRQ_HANDLED;
491 }
492
493 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
494 {
495         int ret;
496
497         ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
498                           IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
499         if (ret) {
500                 wcn36xx_err("failed to alloc tx irq\n");
501                 goto out_err;
502         }
503
504         ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
505                           "wcn36xx_rx", wcn);
506         if (ret) {
507                 wcn36xx_err("failed to alloc rx irq\n");
508                 goto out_txirq;
509         }
510
511         enable_irq_wake(wcn->rx_irq);
512
513         return 0;
514
515 out_txirq:
516         free_irq(wcn->tx_irq, wcn);
517 out_err:
518         return ret;
519
520 }
521
522 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
523                                      struct wcn36xx_dxe_ch *ch,
524                                      u32 ctrl,
525                                      u32 en_mask,
526                                      u32 int_mask,
527                                      u32 status_reg)
528 {
529         struct wcn36xx_dxe_desc *dxe;
530         struct wcn36xx_dxe_ctl *ctl;
531         dma_addr_t  dma_addr;
532         struct sk_buff *skb;
533         u32 int_reason;
534         int ret;
535
536         wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
537         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
538
539         if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
540                 wcn36xx_dxe_write_register(wcn,
541                                            WCN36XX_DXE_0_INT_ERR_CLR,
542                                            int_mask);
543
544                 wcn36xx_err("DXE IRQ reported error on RX channel\n");
545         }
546
547         if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
548                 wcn36xx_dxe_write_register(wcn,
549                                            WCN36XX_DXE_0_INT_DONE_CLR,
550                                            int_mask);
551
552         if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
553                 wcn36xx_dxe_write_register(wcn,
554                                            WCN36XX_DXE_0_INT_ED_CLR,
555                                            int_mask);
556
557         if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
558                             WCN36XX_CH_STAT_INT_ED_MASK)))
559                 return 0;
560
561         spin_lock(&ch->lock);
562
563         ctl = ch->head_blk_ctl;
564         dxe = ctl->desc;
565
566         while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
567                 skb = ctl->skb;
568                 dma_addr = dxe->dst_addr_l;
569                 ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
570                 if (0 == ret) {
571                         /* new skb allocation ok. Use the new one and queue
572                          * the old one to network system.
573                          */
574                         dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
575                                         DMA_FROM_DEVICE);
576                         wcn36xx_rx_skb(wcn, skb);
577                 } /* else keep old skb not submitted and use it for rx DMA */
578
579                 dxe->ctrl = ctrl;
580                 ctl = ctl->next;
581                 dxe = ctl->desc;
582         }
583         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
584
585         ch->head_blk_ctl = ctl;
586
587         spin_unlock(&ch->lock);
588
589         return 0;
590 }
591
592 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
593 {
594         int int_src;
595
596         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
597
598         /* RX_LOW_PRI */
599         if (int_src & WCN36XX_DXE_INT_CH1_MASK)
600                 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
601                                           WCN36XX_DXE_CTRL_RX_L,
602                                           WCN36XX_DXE_INT_CH1_MASK,
603                                           WCN36XX_INT_MASK_CHAN_RX_L,
604                                           WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
605
606         /* RX_HIGH_PRI */
607         if (int_src & WCN36XX_DXE_INT_CH3_MASK)
608                 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
609                                           WCN36XX_DXE_CTRL_RX_H,
610                                           WCN36XX_DXE_INT_CH3_MASK,
611                                           WCN36XX_INT_MASK_CHAN_RX_H,
612                                           WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
613
614         if (!int_src)
615                 wcn36xx_warn("No DXE interrupt pending\n");
616 }
617
618 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
619 {
620         size_t s;
621         void *cpu_addr;
622
623         /* Allocate BD headers for MGMT frames */
624
625         /* Where this come from ask QC */
626         wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
627                 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
628
629         s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
630         cpu_addr = dma_zalloc_coherent(wcn->dev, s,
631                                        &wcn->mgmt_mem_pool.phy_addr,
632                                        GFP_KERNEL);
633         if (!cpu_addr)
634                 goto out_err;
635
636         wcn->mgmt_mem_pool.virt_addr = cpu_addr;
637
638         /* Allocate BD headers for DATA frames */
639
640         /* Where this come from ask QC */
641         wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
642                 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
643
644         s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
645         cpu_addr = dma_zalloc_coherent(wcn->dev, s,
646                                        &wcn->data_mem_pool.phy_addr,
647                                        GFP_KERNEL);
648         if (!cpu_addr)
649                 goto out_err;
650
651         wcn->data_mem_pool.virt_addr = cpu_addr;
652
653         return 0;
654
655 out_err:
656         wcn36xx_dxe_free_mem_pools(wcn);
657         wcn36xx_err("Failed to allocate BD mempool\n");
658         return -ENOMEM;
659 }
660
661 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
662 {
663         if (wcn->mgmt_mem_pool.virt_addr)
664                 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
665                                   WCN36XX_DXE_CH_DESC_NUMB_TX_H,
666                                   wcn->mgmt_mem_pool.virt_addr,
667                                   wcn->mgmt_mem_pool.phy_addr);
668
669         if (wcn->data_mem_pool.virt_addr) {
670                 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
671                                   WCN36XX_DXE_CH_DESC_NUMB_TX_L,
672                                   wcn->data_mem_pool.virt_addr,
673                                   wcn->data_mem_pool.phy_addr);
674         }
675 }
676
677 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
678                          struct wcn36xx_vif *vif_priv,
679                          struct wcn36xx_tx_bd *bd,
680                          struct sk_buff *skb,
681                          bool is_low)
682 {
683         struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
684         struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
685         struct wcn36xx_dxe_ch *ch = NULL;
686         unsigned long flags;
687         int ret;
688
689         ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
690
691         spin_lock_irqsave(&ch->lock, flags);
692         ctl_bd = ch->head_blk_ctl;
693         ctl_skb = ctl_bd->next;
694
695         /*
696          * If skb is not null that means that we reached the tail of the ring
697          * hence ring is full. Stop queues to let mac80211 back off until ring
698          * has an empty slot again.
699          */
700         if (NULL != ctl_skb->skb) {
701                 ieee80211_stop_queues(wcn->hw);
702                 wcn->queues_stopped = true;
703                 spin_unlock_irqrestore(&ch->lock, flags);
704                 return -EBUSY;
705         }
706
707         if (unlikely(ctl_skb->bd_cpu_addr)) {
708                 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
709                 ret = -EINVAL;
710                 goto unlock;
711         }
712
713         desc_bd = ctl_bd->desc;
714         desc_skb = ctl_skb->desc;
715
716         ctl_bd->skb = NULL;
717
718         /* write buffer descriptor */
719         memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
720
721         /* Set source address of the BD we send */
722         desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
723         desc_bd->dst_addr_l = ch->dxe_wq;
724         desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
725
726         wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
727
728         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
729                          (char *)desc_bd, sizeof(*desc_bd));
730         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
731                          "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
732                          sizeof(struct wcn36xx_tx_bd));
733
734         desc_skb->src_addr_l = dma_map_single(wcn->dev,
735                                               skb->data,
736                                               skb->len,
737                                               DMA_TO_DEVICE);
738         if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
739                 dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
740                 ret = -ENOMEM;
741                 goto unlock;
742         }
743
744         ctl_skb->skb = skb;
745         desc_skb->dst_addr_l = ch->dxe_wq;
746         desc_skb->fr_len = ctl_skb->skb->len;
747
748         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
749                          (char *)desc_skb, sizeof(*desc_skb));
750         wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
751                          (char *)ctl_skb->skb->data, ctl_skb->skb->len);
752
753         /* Move the head of the ring to the next empty descriptor */
754          ch->head_blk_ctl = ctl_skb->next;
755
756         /* Commit all previous writes and set descriptors to VALID */
757         wmb();
758         desc_skb->ctrl = ch->ctrl_skb;
759         wmb();
760         desc_bd->ctrl = ch->ctrl_bd;
761
762         /*
763          * When connected and trying to send data frame chip can be in sleep
764          * mode and writing to the register will not wake up the chip. Instead
765          * notify chip about new frame through SMSM bus.
766          */
767         if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
768                 qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
769                                             WCN36XX_SMSM_WLAN_TX_ENABLE,
770                                             WCN36XX_SMSM_WLAN_TX_ENABLE);
771         } else {
772                 /* indicate End Of Packet and generate interrupt on descriptor
773                  * done.
774                  */
775                 wcn36xx_dxe_write_register(wcn,
776                         ch->reg_ctrl, ch->def_ctrl);
777         }
778
779         ret = 0;
780 unlock:
781         spin_unlock_irqrestore(&ch->lock, flags);
782         return ret;
783 }
784
785 int wcn36xx_dxe_init(struct wcn36xx *wcn)
786 {
787         int reg_data = 0, ret;
788
789         reg_data = WCN36XX_DXE_REG_RESET;
790         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
791
792         /* Select channels for rx avail and xfer done interrupts... */
793         reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
794                     WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
795         if (wcn->is_pronto)
796                 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
797         else
798                 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
799
800         /***************************************/
801         /* Init descriptors for TX LOW channel */
802         /***************************************/
803         ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
804         if (ret) {
805                 dev_err(wcn->dev, "Error allocating descriptor\n");
806                 return ret;
807         }
808         wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
809
810         /* Write channel head to a NEXT register */
811         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
812                 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
813
814         /* Program DMA destination addr for TX LOW */
815         wcn36xx_dxe_write_register(wcn,
816                 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
817                 WCN36XX_DXE_WQ_TX_L);
818
819         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
820         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
821
822         /***************************************/
823         /* Init descriptors for TX HIGH channel */
824         /***************************************/
825         ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
826         if (ret) {
827                 dev_err(wcn->dev, "Error allocating descriptor\n");
828                 goto out_err_txh_ch;
829         }
830
831         wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
832
833         /* Write channel head to a NEXT register */
834         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
835                 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
836
837         /* Program DMA destination addr for TX HIGH */
838         wcn36xx_dxe_write_register(wcn,
839                 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
840                 WCN36XX_DXE_WQ_TX_H);
841
842         wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
843
844         /* Enable channel interrupts */
845         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
846
847         /***************************************/
848         /* Init descriptors for RX LOW channel */
849         /***************************************/
850         ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
851         if (ret) {
852                 dev_err(wcn->dev, "Error allocating descriptor\n");
853                 goto out_err_rxl_ch;
854         }
855
856
857         /* For RX we need to preallocated buffers */
858         wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
859
860         /* Write channel head to a NEXT register */
861         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
862                 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
863
864         /* Write DMA source address */
865         wcn36xx_dxe_write_register(wcn,
866                 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
867                 WCN36XX_DXE_WQ_RX_L);
868
869         /* Program preallocated destination address */
870         wcn36xx_dxe_write_register(wcn,
871                 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
872                 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
873
874         /* Enable default control registers */
875         wcn36xx_dxe_write_register(wcn,
876                 WCN36XX_DXE_REG_CTL_RX_L,
877                 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
878
879         /* Enable channel interrupts */
880         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
881
882         /***************************************/
883         /* Init descriptors for RX HIGH channel */
884         /***************************************/
885         ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
886         if (ret) {
887                 dev_err(wcn->dev, "Error allocating descriptor\n");
888                 goto out_err_rxh_ch;
889         }
890
891         /* For RX we need to prealocat buffers */
892         wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
893
894         /* Write chanel head to a NEXT register */
895         wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
896                 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
897
898         /* Write DMA source address */
899         wcn36xx_dxe_write_register(wcn,
900                 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
901                 WCN36XX_DXE_WQ_RX_H);
902
903         /* Program preallocated destination address */
904         wcn36xx_dxe_write_register(wcn,
905                 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
906                  wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
907
908         /* Enable default control registers */
909         wcn36xx_dxe_write_register(wcn,
910                 WCN36XX_DXE_REG_CTL_RX_H,
911                 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
912
913         /* Enable channel interrupts */
914         wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
915
916         ret = wcn36xx_dxe_request_irqs(wcn);
917         if (ret < 0)
918                 goto out_err_irq;
919
920         return 0;
921
922 out_err_irq:
923         wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
924 out_err_rxh_ch:
925         wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
926 out_err_rxl_ch:
927         wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
928 out_err_txh_ch:
929         wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
930
931         return ret;
932 }
933
934 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
935 {
936         free_irq(wcn->tx_irq, wcn);
937         free_irq(wcn->rx_irq, wcn);
938
939         if (wcn->tx_ack_skb) {
940                 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
941                 wcn->tx_ack_skb = NULL;
942         }
943
944         wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
945         wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
946 }