2 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
25 #include <linux/of_device.h>
26 #include <linux/platform_device.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/clk.h>
29 #define WCN3990_CE_ATTR_FLAGS 0
30 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
31 #define CE_POLL_PIPE 4
33 static char *const ce_name[] = {
48 static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
49 {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
50 {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
51 {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
52 {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
55 static struct ath10k_wcn3990_clk_info clk_cfg[] = {
56 {NULL, "cxo_ref_clk_pin", 0, false},
59 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
60 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
61 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
62 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
63 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
65 static const struct ath10k_snoc_drv_priv drv_priv = {
66 .hw_rev = ATH10K_HW_WCN3990,
67 .dma_mask = DMA_BIT_MASK(37),
70 static struct ce_attr host_ce_config_wlan[] = {
71 /* CE0: host->target HTC control streams */
73 .flags = CE_ATTR_FLAGS,
77 .send_cb = ath10k_snoc_htc_tx_cb,
80 /* CE1: target->host HTT + HTC control */
82 .flags = CE_ATTR_FLAGS,
86 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
89 /* CE2: target->host WMI */
91 .flags = CE_ATTR_FLAGS,
95 .recv_cb = ath10k_snoc_htc_rx_cb,
98 /* CE3: host->target WMI */
100 .flags = CE_ATTR_FLAGS,
104 .send_cb = ath10k_snoc_htc_tx_cb,
107 /* CE4: host->target HTT */
109 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
113 .send_cb = ath10k_snoc_htt_tx_cb,
116 /* CE5: target->host HTT (ipa_uc->target ) */
118 .flags = CE_ATTR_FLAGS,
121 .dest_nentries = 512,
122 .recv_cb = ath10k_snoc_htt_rx_cb,
125 /* CE6: target autonomous hif_memcpy */
127 .flags = CE_ATTR_FLAGS,
133 /* CE7: ce_diag, the Diagnostic Window */
135 .flags = CE_ATTR_FLAGS,
141 /* CE8: Target to uMC */
143 .flags = CE_ATTR_FLAGS,
146 .dest_nentries = 128,
149 /* CE9 target->host HTT */
151 .flags = CE_ATTR_FLAGS,
154 .dest_nentries = 512,
155 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
158 /* CE10: target->host HTT */
160 .flags = CE_ATTR_FLAGS,
163 .dest_nentries = 512,
164 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
167 /* CE11: target -> host PKTLOG */
169 .flags = CE_ATTR_FLAGS,
172 .dest_nentries = 512,
173 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
177 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
179 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
180 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
184 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
185 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
189 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
190 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
194 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
195 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
199 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
200 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
204 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
205 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
209 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
210 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
214 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
215 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
219 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
220 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
224 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
225 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
229 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
230 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
234 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
235 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
239 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
240 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
244 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
245 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
249 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
250 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
254 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
255 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
259 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
260 __cpu_to_le32(PIPEDIR_OUT),
263 { /* in = DL = target -> host */
264 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
265 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
268 { /* in = DL = target -> host */
269 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
270 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
273 { /* in = DL = target -> host pktlog */
274 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
275 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
278 /* (Additions here) */
287 void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
289 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
291 iowrite32(value, ar_snoc->mem + offset);
294 u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
296 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
299 val = ioread32(ar_snoc->mem + offset);
304 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
306 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
307 struct ath10k *ar = pipe->hif_ce_state;
308 struct ath10k_ce *ce = ath10k_ce_priv(ar);
313 skb = dev_alloc_skb(pipe->buf_sz);
317 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
319 paddr = dma_map_single(ar->dev, skb->data,
320 skb->len + skb_tailroom(skb),
322 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
323 ath10k_warn(ar, "failed to dma map snoc rx buf\n");
324 dev_kfree_skb_any(skb);
328 ATH10K_SKB_RXCB(skb)->paddr = paddr;
330 spin_lock_bh(&ce->ce_lock);
331 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
332 spin_unlock_bh(&ce->ce_lock);
334 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
336 dev_kfree_skb_any(skb);
343 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
345 struct ath10k *ar = pipe->hif_ce_state;
346 struct ath10k_ce *ce = ath10k_ce_priv(ar);
347 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
348 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
351 if (pipe->buf_sz == 0)
354 if (!ce_pipe->dest_ring)
357 spin_lock_bh(&ce->ce_lock);
358 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
359 spin_unlock_bh(&ce->ce_lock);
361 ret = __ath10k_snoc_rx_post_buf(pipe);
365 ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
366 mod_timer(&ar_snoc->rx_post_retry, jiffies +
367 ATH10K_SNOC_RX_POST_RETRY_MS);
373 static void ath10k_snoc_rx_post(struct ath10k *ar)
375 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
378 for (i = 0; i < CE_COUNT; i++)
379 ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
382 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
383 void (*callback)(struct ath10k *ar,
384 struct sk_buff *skb))
386 struct ath10k *ar = ce_state->ar;
387 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
388 struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
390 struct sk_buff_head list;
391 void *transfer_context;
392 unsigned int nbytes, max_nbytes;
394 __skb_queue_head_init(&list);
395 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
397 skb = transfer_context;
398 max_nbytes = skb->len + skb_tailroom(skb);
399 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
400 max_nbytes, DMA_FROM_DEVICE);
402 if (unlikely(max_nbytes < nbytes)) {
403 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
405 dev_kfree_skb_any(skb);
409 skb_put(skb, nbytes);
410 __skb_queue_tail(&list, skb);
413 while ((skb = __skb_dequeue(&list))) {
414 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
415 ce_state->id, skb->len);
420 ath10k_snoc_rx_post_pipe(pipe_info);
423 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
425 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
428 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
430 /* CE4 polling needs to be done whenever CE pipe which transports
431 * HTT Rx (target->host) is processed.
433 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
435 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
438 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
440 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
441 ath10k_htt_t2h_msg_handler(ar, skb);
444 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
446 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
447 ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
450 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
452 struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
453 struct ath10k *ar = ar_snoc->ar;
455 ath10k_snoc_rx_post(ar);
458 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
460 struct ath10k *ar = ce_state->ar;
461 struct sk_buff_head list;
464 __skb_queue_head_init(&list);
465 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
469 __skb_queue_tail(&list, skb);
472 while ((skb = __skb_dequeue(&list)))
473 ath10k_htc_tx_completion_handler(ar, skb);
476 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
478 struct ath10k *ar = ce_state->ar;
481 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
485 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
486 skb->len, DMA_TO_DEVICE);
487 ath10k_htt_hif_tx_complete(ar, skb);
491 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
492 struct ath10k_hif_sg_item *items, int n_items)
494 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
495 struct ath10k_ce *ce = ath10k_ce_priv(ar);
496 struct ath10k_snoc_pipe *snoc_pipe;
497 struct ath10k_ce_pipe *ce_pipe;
500 snoc_pipe = &ar_snoc->pipe_info[pipe_id];
501 ce_pipe = snoc_pipe->ce_hdl;
502 spin_lock_bh(&ce->ce_lock);
504 for (i = 0; i < n_items - 1; i++) {
505 ath10k_dbg(ar, ATH10K_DBG_SNOC,
506 "snoc tx item %d paddr %pad len %d n_items %d\n",
507 i, &items[i].paddr, items[i].len, n_items);
509 err = ath10k_ce_send_nolock(ce_pipe,
510 items[i].transfer_context,
513 items[i].transfer_id,
514 CE_SEND_FLAG_GATHER);
519 ath10k_dbg(ar, ATH10K_DBG_SNOC,
520 "snoc tx item %d paddr %pad len %d n_items %d\n",
521 i, &items[i].paddr, items[i].len, n_items);
523 err = ath10k_ce_send_nolock(ce_pipe,
524 items[i].transfer_context,
527 items[i].transfer_id,
532 spin_unlock_bh(&ce->ce_lock);
538 __ath10k_ce_send_revert(ce_pipe);
540 spin_unlock_bh(&ce->ce_lock);
544 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
545 struct bmi_target_info *target_info)
547 target_info->version = ATH10K_HW_WCN3990;
548 target_info->type = ATH10K_HW_WCN3990;
553 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
555 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
557 ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
559 return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
562 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
567 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
570 resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
572 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
575 ath10k_ce_per_engine_service(ar, pipe);
578 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
580 u8 *ul_pipe, u8 *dl_pipe)
582 const struct service_to_pipe *entry;
583 bool ul_set = false, dl_set = false;
586 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
588 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
589 entry = &target_service_to_ce_map_wlan[i];
591 if (__le32_to_cpu(entry->service_id) != service_id)
594 switch (__le32_to_cpu(entry->pipedir)) {
599 *dl_pipe = __le32_to_cpu(entry->pipenum);
604 *ul_pipe = __le32_to_cpu(entry->pipenum);
610 *dl_pipe = __le32_to_cpu(entry->pipenum);
611 *ul_pipe = __le32_to_cpu(entry->pipenum);
618 if (WARN_ON(!ul_set || !dl_set))
624 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
625 u8 *ul_pipe, u8 *dl_pipe)
627 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
629 (void)ath10k_snoc_hif_map_service_to_pipe(ar,
630 ATH10K_HTC_SVC_ID_RSVD_CTRL,
634 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
636 ath10k_ce_disable_interrupts(ar);
639 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
641 ath10k_ce_enable_interrupts(ar);
644 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
646 struct ath10k_ce_pipe *ce_pipe;
647 struct ath10k_ce_ring *ce_ring;
652 ar = snoc_pipe->hif_ce_state;
653 ce_pipe = snoc_pipe->ce_hdl;
654 ce_ring = ce_pipe->dest_ring;
659 if (!snoc_pipe->buf_sz)
662 for (i = 0; i < ce_ring->nentries; i++) {
663 skb = ce_ring->per_transfer_context[i];
667 ce_ring->per_transfer_context[i] = NULL;
669 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
670 skb->len + skb_tailroom(skb),
672 dev_kfree_skb_any(skb);
676 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
678 struct ath10k_ce_pipe *ce_pipe;
679 struct ath10k_ce_ring *ce_ring;
680 struct ath10k_snoc *ar_snoc;
685 ar = snoc_pipe->hif_ce_state;
686 ar_snoc = ath10k_snoc_priv(ar);
687 ce_pipe = snoc_pipe->ce_hdl;
688 ce_ring = ce_pipe->src_ring;
693 if (!snoc_pipe->buf_sz)
696 for (i = 0; i < ce_ring->nentries; i++) {
697 skb = ce_ring->per_transfer_context[i];
701 ce_ring->per_transfer_context[i] = NULL;
703 ath10k_htc_tx_completion_handler(ar, skb);
707 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
709 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
710 struct ath10k_snoc_pipe *pipe_info;
713 del_timer_sync(&ar_snoc->rx_post_retry);
714 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
715 pipe_info = &ar_snoc->pipe_info[pipe_num];
716 ath10k_snoc_rx_pipe_cleanup(pipe_info);
717 ath10k_snoc_tx_pipe_cleanup(pipe_info);
721 static void ath10k_snoc_hif_stop(struct ath10k *ar)
723 ath10k_snoc_irq_disable(ar);
724 ath10k_snoc_buffer_cleanup(ar);
725 napi_synchronize(&ar->napi);
726 napi_disable(&ar->napi);
727 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
730 static int ath10k_snoc_hif_start(struct ath10k *ar)
732 ath10k_snoc_irq_enable(ar);
733 ath10k_snoc_rx_post(ar);
735 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
740 static int ath10k_snoc_init_pipes(struct ath10k *ar)
744 for (i = 0; i < CE_COUNT; i++) {
745 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
747 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
756 static int ath10k_snoc_wlan_enable(struct ath10k *ar)
761 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
765 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
767 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
769 ath10k_snoc_wlan_disable(ar);
770 ath10k_ce_free_rri(ar);
773 static int ath10k_snoc_hif_power_up(struct ath10k *ar)
777 ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
778 __func__, ar->state);
780 ret = ath10k_snoc_wlan_enable(ar);
782 ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
786 ath10k_ce_alloc_rri(ar);
788 ret = ath10k_snoc_init_pipes(ar);
790 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
791 goto err_wlan_enable;
794 napi_enable(&ar->napi);
798 ath10k_snoc_wlan_disable(ar);
803 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
804 .read32 = ath10k_snoc_read32,
805 .write32 = ath10k_snoc_write32,
806 .start = ath10k_snoc_hif_start,
807 .stop = ath10k_snoc_hif_stop,
808 .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
809 .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
810 .power_up = ath10k_snoc_hif_power_up,
811 .power_down = ath10k_snoc_hif_power_down,
812 .tx_sg = ath10k_snoc_hif_tx_sg,
813 .send_complete_check = ath10k_snoc_hif_send_complete_check,
814 .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
815 .get_target_info = ath10k_snoc_hif_get_target_info,
818 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
819 .read32 = ath10k_snoc_read32,
820 .write32 = ath10k_snoc_write32,
823 int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
825 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
828 for (i = 0; i < CE_COUNT_MAX; i++) {
829 if (ar_snoc->ce_irqs[i].irq_line == irq)
832 ath10k_err(ar, "No matching CE id for irq %d\n", irq);
837 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
839 struct ath10k *ar = arg;
840 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
841 int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
843 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
844 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
849 ath10k_snoc_irq_disable(ar);
850 napi_schedule(&ar->napi);
855 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
857 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
860 ath10k_ce_per_engine_service_any(ar);
861 done = ath10k_htt_txrx_compl_task(ar, budget);
865 ath10k_snoc_irq_enable(ar);
871 void ath10k_snoc_init_napi(struct ath10k *ar)
873 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
877 static int ath10k_snoc_request_irq(struct ath10k *ar)
879 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
880 int irqflags = IRQF_TRIGGER_RISING;
883 for (id = 0; id < CE_COUNT_MAX; id++) {
884 ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
885 ath10k_snoc_per_engine_handler,
886 irqflags, ce_name[id], ar);
889 "failed to register IRQ handler for CE %d: %d",
898 for (id -= 1; id >= 0; id--)
899 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
904 static void ath10k_snoc_free_irq(struct ath10k *ar)
906 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
909 for (id = 0; id < CE_COUNT_MAX; id++)
910 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
913 static int ath10k_snoc_resource_init(struct ath10k *ar)
915 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
916 struct platform_device *pdev;
917 struct resource *res;
921 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
923 ath10k_err(ar, "Memory base not found in DT\n");
927 ar_snoc->mem_pa = res->start;
928 ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
931 ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
936 for (i = 0; i < CE_COUNT; i++) {
937 res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
939 ath10k_err(ar, "failed to get IRQ%d\n", i);
943 ar_snoc->ce_irqs[i].irq_line = res->start;
950 static int ath10k_snoc_setup_resource(struct ath10k *ar)
952 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
953 struct ath10k_ce *ce = ath10k_ce_priv(ar);
954 struct ath10k_snoc_pipe *pipe;
957 timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
958 spin_lock_init(&ce->ce_lock);
959 for (i = 0; i < CE_COUNT; i++) {
960 pipe = &ar_snoc->pipe_info[i];
961 pipe->ce_hdl = &ce->ce_states[i];
963 pipe->hif_ce_state = ar;
965 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
967 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
972 pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
974 ath10k_snoc_init_napi(ar);
979 static void ath10k_snoc_release_resource(struct ath10k *ar)
983 netif_napi_del(&ar->napi);
984 for (i = 0; i < CE_COUNT; i++)
985 ath10k_ce_free_pipe(ar, i);
988 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
989 struct ath10k_wcn3990_vreg_info *vreg_info)
991 struct regulator *reg;
994 reg = devm_regulator_get_optional(dev, vreg_info->name);
999 if (ret == -EPROBE_DEFER) {
1000 ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1004 if (vreg_info->required) {
1005 ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1006 vreg_info->name, ret);
1009 ath10k_dbg(ar, ATH10K_DBG_SNOC,
1010 "Optional regulator %s doesn't exist: %d\n",
1011 vreg_info->name, ret);
1015 vreg_info->reg = reg;
1018 ath10k_dbg(ar, ATH10K_DBG_SNOC,
1019 "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1020 vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1021 vreg_info->load_ua, vreg_info->settle_delay);
1026 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1027 struct ath10k_wcn3990_clk_info *clk_info)
1032 handle = devm_clk_get(dev, clk_info->name);
1033 if (IS_ERR(handle)) {
1034 ret = PTR_ERR(handle);
1035 if (clk_info->required) {
1036 ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1037 clk_info->name, ret);
1040 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1046 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1047 clk_info->name, clk_info->freq);
1049 clk_info->handle = handle;
1054 static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
1056 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1057 struct ath10k_wcn3990_vreg_info *vreg_info;
1061 for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1062 vreg_info = &ar_snoc->vreg[i];
1064 if (!vreg_info->reg)
1067 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1070 ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1074 "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1075 vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1076 goto err_reg_config;
1079 if (vreg_info->load_ua) {
1080 ret = regulator_set_load(vreg_info->reg,
1081 vreg_info->load_ua);
1084 "failed to set regulator %s load: %d\n",
1086 vreg_info->load_ua);
1087 goto err_reg_config;
1091 ret = regulator_enable(vreg_info->reg);
1093 ath10k_err(ar, "failed to enable regulator %s\n",
1095 goto err_reg_config;
1098 if (vreg_info->settle_delay)
1099 udelay(vreg_info->settle_delay);
1105 for (; i >= 0; i--) {
1106 vreg_info = &ar_snoc->vreg[i];
1108 if (!vreg_info->reg)
1111 regulator_disable(vreg_info->reg);
1112 regulator_set_load(vreg_info->reg, 0);
1113 regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1119 static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
1121 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1122 struct ath10k_wcn3990_vreg_info *vreg_info;
1126 for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1127 vreg_info = &ar_snoc->vreg[i];
1129 if (!vreg_info->reg)
1132 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1135 ret = regulator_disable(vreg_info->reg);
1137 ath10k_err(ar, "failed to disable regulator %s\n",
1140 ret = regulator_set_load(vreg_info->reg, 0);
1142 ath10k_err(ar, "failed to set load %s\n",
1145 ret = regulator_set_voltage(vreg_info->reg, 0,
1148 ath10k_err(ar, "failed to set voltage %s\n",
1155 static int ath10k_wcn3990_clk_init(struct ath10k *ar)
1157 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1158 struct ath10k_wcn3990_clk_info *clk_info;
1162 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1163 clk_info = &ar_snoc->clk[i];
1165 if (!clk_info->handle)
1168 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1171 if (clk_info->freq) {
1172 ret = clk_set_rate(clk_info->handle, clk_info->freq);
1175 ath10k_err(ar, "failed to set clock %s freq %u\n",
1176 clk_info->name, clk_info->freq);
1177 goto err_clock_config;
1181 ret = clk_prepare_enable(clk_info->handle);
1183 ath10k_err(ar, "failed to enable clock %s\n",
1185 goto err_clock_config;
1192 for (; i >= 0; i--) {
1193 clk_info = &ar_snoc->clk[i];
1195 if (!clk_info->handle)
1198 clk_disable_unprepare(clk_info->handle);
1204 static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
1206 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1207 struct ath10k_wcn3990_clk_info *clk_info;
1210 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1211 clk_info = &ar_snoc->clk[i];
1213 if (!clk_info->handle)
1216 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1219 clk_disable_unprepare(clk_info->handle);
1225 static int ath10k_hw_power_on(struct ath10k *ar)
1229 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1231 ret = ath10k_wcn3990_vreg_on(ar);
1235 ret = ath10k_wcn3990_clk_init(ar);
1242 ath10k_wcn3990_vreg_off(ar);
1246 static int ath10k_hw_power_off(struct ath10k *ar)
1250 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1252 ath10k_wcn3990_clk_deinit(ar);
1254 ret = ath10k_wcn3990_vreg_off(ar);
1259 static const struct of_device_id ath10k_snoc_dt_match[] = {
1260 { .compatible = "qcom,wcn3990-wifi",
1265 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1267 static int ath10k_snoc_probe(struct platform_device *pdev)
1269 const struct ath10k_snoc_drv_priv *drv_data;
1270 const struct of_device_id *of_id;
1271 struct ath10k_snoc *ar_snoc;
1277 of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1279 dev_err(&pdev->dev, "failed to find matching device tree id\n");
1283 drv_data = of_id->data;
1286 ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1288 dev_err(dev, "failed to set dma mask: %d", ret);
1292 ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1293 drv_data->hw_rev, &ath10k_snoc_hif_ops);
1295 dev_err(dev, "failed to allocate core\n");
1299 ar_snoc = ath10k_snoc_priv(ar);
1300 ar_snoc->dev = pdev;
1301 platform_set_drvdata(pdev, ar);
1303 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1304 ar->ce_priv = &ar_snoc->ce;
1306 ath10k_snoc_resource_init(ar);
1308 ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1309 goto err_core_destroy;
1312 ath10k_snoc_setup_resource(ar);
1314 ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1315 goto err_core_destroy;
1317 ret = ath10k_snoc_request_irq(ar);
1319 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1320 goto err_release_resource;
1323 ar_snoc->vreg = vreg_cfg;
1324 for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1325 ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1330 ar_snoc->clk = clk_cfg;
1331 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1332 ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1337 ret = ath10k_hw_power_on(ar);
1339 ath10k_err(ar, "failed to power on device: %d\n", ret);
1343 ret = ath10k_core_register(ar, drv_data->hw_rev);
1345 ath10k_err(ar, "failed to register driver core: %d\n", ret);
1346 goto err_hw_power_off;
1349 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1350 ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
1355 ath10k_hw_power_off(ar);
1358 ath10k_snoc_free_irq(ar);
1360 err_release_resource:
1361 ath10k_snoc_release_resource(ar);
1364 ath10k_core_destroy(ar);
1369 static int ath10k_snoc_remove(struct platform_device *pdev)
1371 struct ath10k *ar = platform_get_drvdata(pdev);
1373 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1374 ath10k_core_unregister(ar);
1375 ath10k_hw_power_off(ar);
1376 ath10k_snoc_free_irq(ar);
1377 ath10k_snoc_release_resource(ar);
1378 ath10k_core_destroy(ar);
1383 static struct platform_driver ath10k_snoc_driver = {
1384 .probe = ath10k_snoc_probe,
1385 .remove = ath10k_snoc_remove,
1387 .name = "ath10k_snoc",
1388 .owner = THIS_MODULE,
1389 .of_match_table = ath10k_snoc_dt_match,
1393 static int __init ath10k_snoc_init(void)
1397 ret = platform_driver_register(&ath10k_snoc_driver);
1399 pr_err("failed to register ath10k snoc driver: %d\n",
1404 module_init(ath10k_snoc_init);
1406 static void __exit ath10k_snoc_exit(void)
1408 platform_driver_unregister(&ath10k_snoc_driver);
1410 module_exit(ath10k_snoc_exit);
1412 MODULE_AUTHOR("Qualcomm");
1413 MODULE_LICENSE("Dual BSD/GPL");
1414 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");