1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * The full GNU General Public License is included in this distribution in the
23 * file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #ifndef __iwl_trans_int_pcie_h__
65 #define __iwl_trans_int_pcie_h__
67 #include <linux/spinlock.h>
68 #include <linux/interrupt.h>
69 #include <linux/skbuff.h>
70 #include <linux/wait.h>
71 #include <linux/pci.h>
72 #include <linux/timer.h>
73 #include <linux/cpu.h>
77 #include "iwl-trans.h"
78 #include "iwl-debug.h"
80 #include "iwl-op-mode.h"
83 /* We need 2 entries for the TX command and header, and another one might
84 * be needed for potential data in the SKB's head. The remaining ones can
87 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
90 * RX related structures and functions
92 #define RX_NUM_QUEUES 1
93 #define RX_POST_REQ_ALLOC 2
94 #define RX_CLAIM_REQ_ALLOC 8
95 #define RX_PENDING_WATERMARK 16
96 #define FIRST_RX_QUEUE 512
100 /*This file includes the declaration that are internal to the
101 * trans_pcie layer */
104 * struct iwl_rx_mem_buffer
105 * @page_dma: bus address of rxb page
106 * @page: driver's pointer to the rxb page
107 * @invalid: rxb is in driver ownership - not owned by HW
108 * @vid: index of this rxb in the global table
110 struct iwl_rx_mem_buffer {
115 struct list_head list;
119 * struct isr_statistics - interrupt statistics
122 struct isr_statistics {
137 * struct iwl_rx_transfer_desc - transfer descriptor
138 * @addr: ptr to free buffer start address
139 * @rbid: unique tag of the buffer
140 * @reserved: reserved
142 struct iwl_rx_transfer_desc {
148 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
151 * struct iwl_rx_completion_desc - completion descriptor
152 * @reserved1: reserved
153 * @rbid: unique tag of the received buffer
154 * @flags: flags (0: fragmented, all others: reserved)
155 * @reserved2: reserved
157 struct iwl_rx_completion_desc {
165 * struct iwl_rxq - Rx queue
167 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
168 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
169 * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
170 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
171 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
172 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
173 * @tr_tail: driver's pointer to the transmission ring tail buffer
174 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
175 * @cr_tail: driver's pointer to the completion ring tail buffer
176 * @cr_tail_dma: physical address of the buffer for the completion ring tail
177 * @read: Shared index to newest available Rx buffer
178 * @write: Shared index to oldest written Rx packet
179 * @free_count: Number of pre-allocated buffers in rx_free
180 * @used_count: Number of RBDs handled to allocator to use for allocation
182 * @rx_free: list of RBDs with allocated RB ready for use
183 * @rx_used: list of RBDs with no RB attached
184 * @need_update: flag to indicate we need to update read/write index
185 * @rb_stts: driver's pointer to receive buffer status
186 * @rb_stts_dma: bus address of receive buffer status
188 * @queue: actual rx queue. Not used for multi-rx queue.
190 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
199 struct iwl_rx_completion_desc *cd;
201 dma_addr_t used_bd_dma;
203 dma_addr_t tr_tail_dma;
205 dma_addr_t cr_tail_dma;
212 struct list_head rx_free;
213 struct list_head rx_used;
216 dma_addr_t rb_stts_dma;
218 struct napi_struct napi;
219 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
223 * struct iwl_rb_allocator - Rx allocator
224 * @req_pending: number of requests the allcator had not processed yet
225 * @req_ready: number of requests honored and ready for claiming
226 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
227 * the queue. This is a list of &struct iwl_rx_mem_buffer
228 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
229 * of &struct iwl_rx_mem_buffer
230 * @lock: protects the rbd_allocated and rbd_empty lists
231 * @alloc_wq: work queue for background calls
232 * @rx_alloc: work struct for background calls
234 struct iwl_rb_allocator {
235 atomic_t req_pending;
237 struct list_head rbd_allocated;
238 struct list_head rbd_empty;
240 struct workqueue_struct *alloc_wq;
241 struct work_struct rx_alloc;
251 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
252 * @index -- current index
254 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
256 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
260 * iwl_get_closed_rb_stts - get closed rb stts from different structs
261 * @rxq - the rxq to get the rb stts from
263 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
266 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
267 __le16 *rb_stts = rxq->rb_stts;
269 return READ_ONCE(*rb_stts);
271 struct iwl_rb_status *rb_stts = rxq->rb_stts;
273 return READ_ONCE(rb_stts->closed_rb_num);
278 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
279 * @index -- current index
281 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
283 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
286 struct iwl_cmd_meta {
287 /* only for SYNC commands, iff the reply skb is wanted */
288 struct iwl_host_cmd *source;
294 #define TFD_TX_CMD_SLOTS 256
295 #define TFD_CMD_SLOTS 32
298 * The FH will write back to the first TB only, so we need to copy some data
299 * into the buffer regardless of whether it should be mapped or not.
300 * This indicates how big the first TB must be to include the scratch buffer
301 * and the assigned PN.
302 * Since PN location is 8 bytes at offset 12, it's 20 now.
303 * If we make it bigger then allocations will be bigger and copy slower, so
304 * that's probably not useful.
306 #define IWL_FIRST_TB_SIZE 20
307 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
309 struct iwl_pcie_txq_entry {
310 struct iwl_device_cmd *cmd;
312 /* buffer to free after command completes */
313 const void *free_buf;
314 struct iwl_cmd_meta meta;
317 struct iwl_pcie_first_tb_buf {
318 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
322 * struct iwl_txq - Tx Queue for DMA
323 * @q: generic Rx/Tx queue descriptor
324 * @tfds: transmit frame descriptors (DMA memory)
325 * @first_tb_bufs: start of command headers, including scratch buffers, for
326 * the writeback -- this is DMA memory and an array holding one buffer
327 * for each command on the queue
328 * @first_tb_dma: DMA address for the first_tb_bufs start
329 * @entries: transmit entries (driver state)
331 * @stuck_timer: timer that fires if queue gets stuck
332 * @trans_pcie: pointer back to transport (for timer)
333 * @need_update: indicates need to update read/write index
334 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
335 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
336 * @frozen: tx stuck queue timer is frozen
337 * @frozen_expiry_remainder: remember how long until the timer fires
338 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
339 * @write_ptr: 1-st empty entry (index) host_w
340 * @read_ptr: last used entry (index) host_r
341 * @dma_addr: physical addr for BD's
342 * @n_window: safe queue window
344 * @low_mark: low watermark, resume queue if free space more than this
345 * @high_mark: high watermark, stop queue if free space less than this
347 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
348 * descriptors) and required locking structures.
350 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
351 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
352 * there might be HW changes in the future). For the normal TX
353 * queues, n_window, which is the size of the software queue data
354 * is also 256; however, for the command queue, n_window is only
355 * 32 since we don't need so many commands pending. Since the HW
356 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
357 * This means that we end up with the following:
358 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
359 * SW entries: | 0 | ... | 31 |
360 * where N is a number between 0 and 7. This means that the SW
361 * data is a window overlayed over the HW queue.
365 struct iwl_pcie_first_tb_buf *first_tb_bufs;
366 dma_addr_t first_tb_dma;
367 struct iwl_pcie_txq_entry *entries;
369 unsigned long frozen_expiry_remainder;
370 struct timer_list stuck_timer;
371 struct iwl_trans_pcie *trans_pcie;
376 unsigned long wd_timeout;
377 struct sk_buff_head overflow_q;
378 struct iwl_dma_ptr bc_tbl;
391 static inline dma_addr_t
392 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
394 return txq->first_tb_dma +
395 sizeof(struct iwl_pcie_first_tb_buf) * idx;
398 struct iwl_tso_hdr_page {
403 #ifdef CONFIG_IWLWIFI_DEBUGFS
405 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
408 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
409 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
410 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
411 * set the file can no longer be used.
413 enum iwl_fw_mon_dbgfs_state {
414 IWL_FW_MON_DBGFS_STATE_CLOSED,
415 IWL_FW_MON_DBGFS_STATE_OPEN,
416 IWL_FW_MON_DBGFS_STATE_DISABLED,
421 * enum iwl_shared_irq_flags - level of sharing for irq
422 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
423 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
425 enum iwl_shared_irq_flags {
426 IWL_SHARED_IRQ_NON_RX = BIT(0),
427 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
431 * enum iwl_image_response_code - image response values
432 * @IWL_IMAGE_RESP_DEF: the default value of the register
433 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
434 * @IWL_IMAGE_RESP_FAIL: iml reading failed
436 enum iwl_image_response_code {
437 IWL_IMAGE_RESP_DEF = 0,
438 IWL_IMAGE_RESP_SUCCESS = 1,
439 IWL_IMAGE_RESP_FAIL = 2,
443 * struct cont_rec: continuous recording data structure
444 * @prev_wr_ptr: the last address that was read in monitor_data
446 * @prev_wrap_cnt: the wrap count that was used during the last read in
447 * monitor_data debugfs file
448 * @state: the state of monitor_data debugfs file as described
449 * in &iwl_fw_mon_dbgfs_state enum
450 * @mutex: locked while reading from monitor_data debugfs file
452 #ifdef CONFIG_IWLWIFI_DEBUGFS
457 /* Used to sync monitor_data debugfs file with driver unload flow */
463 * struct iwl_trans_pcie - PCIe transport specific data
464 * @rxq: all the RX queue data
465 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
466 * @global_table: table mapping received VID from hw to rxb
467 * @rba: allocator for RX replenishing
468 * @ctxt_info: context information for FW self init
469 * @ctxt_info_gen3: context information for gen3 devices
470 * @prph_info: prph info for self init
471 * @prph_scratch: prph scratch for self init
472 * @ctxt_info_dma_addr: dma addr of context information
473 * @prph_info_dma_addr: dma addr of prph info
474 * @prph_scratch_dma_addr: dma addr of prph scratch
475 * @ctxt_info_dma_addr: dma addr of context information
476 * @init_dram: DRAM data of firmware image (including paging).
477 * Context information addresses will be taken from here.
478 * This is driver's local copy for keeping track of size and
479 * count for allocating and freeing the memory.
480 * @trans: pointer to the generic transport area
481 * @scd_base_addr: scheduler sram base address in SRAM
482 * @scd_bc_tbls: pointer to the byte count table of the scheduler
483 * @kw: keep warm address
484 * @pci_dev: basic pci-network driver stuff
485 * @hw_base: pci hardware address support
486 * @ucode_write_complete: indicates that the ucode has been copied.
487 * @ucode_write_waitq: wait queue for uCode load
488 * @cmd_queue - command queue number
489 * @def_rx_queue - default rx queue number
490 * @rx_buf_size: Rx buffer size
491 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
492 * @scd_set_active: should the transport configure the SCD for HCMD queue
493 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
495 * @rx_page_order: page order for receive buffer size
496 * @reg_lock: protect hw register access
497 * @mutex: to protect stop_device / start_fw / start_hw
498 * @cmd_in_flight: true when we have a host command in flight
499 #ifdef CONFIG_IWLWIFI_DEBUGFS
500 * @fw_mon_data: fw continuous recording data
502 * @msix_entries: array of MSI-X entries
503 * @msix_enabled: true if managed to enable MSI-X
504 * @shared_vec_mask: the type of causes the shared vector handles
505 * (see iwl_shared_irq_flags).
506 * @alloc_vecs: the number of interrupt vectors allocated by the OS
507 * @def_irq: default irq for non rx causes
508 * @fh_init_mask: initial unmasked fh causes
509 * @hw_init_mask: initial unmasked hw causes
510 * @fh_mask: current unmasked fh causes
511 * @hw_mask: current unmasked hw causes
512 * @in_rescan: true if we have triggered a device rescan
513 * @base_rb_stts: base virtual address of receive buffer status for all queues
514 * @base_rb_stts_dma: base physical address of receive buffer status
516 struct iwl_trans_pcie {
518 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
519 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
520 struct iwl_rb_allocator rba;
522 struct iwl_context_info *ctxt_info;
523 struct iwl_context_info_gen3 *ctxt_info_gen3;
525 struct iwl_prph_info *prph_info;
526 struct iwl_prph_scratch *prph_scratch;
527 dma_addr_t ctxt_info_dma_addr;
528 dma_addr_t prph_info_dma_addr;
529 dma_addr_t prph_scratch_dma_addr;
530 dma_addr_t iml_dma_addr;
531 struct iwl_trans *trans;
533 struct net_device napi_dev;
535 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
539 dma_addr_t ict_tbl_dma;
542 bool is_down, opmode_down;
544 struct isr_statistics isr_stats;
550 struct iwl_dma_ptr scd_bc_tbls;
551 struct iwl_dma_ptr kw;
553 struct iwl_txq *txq_memory;
554 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
555 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
556 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
558 /* PCI bus related data */
559 struct pci_dev *pci_dev;
560 void __iomem *hw_base;
562 bool ucode_write_complete;
563 wait_queue_head_t ucode_write_waitq;
564 wait_queue_head_t wait_command_queue;
565 wait_queue_head_t d0i3_waitq;
567 u8 page_offs, dev_cmd_offs;
572 unsigned int cmd_q_wdg_timeout;
573 u8 n_no_reclaim_cmds;
574 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
578 enum iwl_amsdu_size rx_buf_size;
582 bool pcie_dbg_dumped_once;
585 /*protect hw register */
587 bool cmd_hold_nic_awake;
588 bool ref_cmd_in_flight;
590 #ifdef CONFIG_IWLWIFI_DEBUGFS
591 struct cont_rec fw_mon_data;
594 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
603 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
604 u16 tx_cmd_queue_size;
608 dma_addr_t base_rb_stts_dma;
611 static inline struct iwl_trans_pcie *
612 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
614 return (void *)trans->trans_specific;
617 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
618 struct msix_entry *entry)
621 * Before sending the interrupt the HW disables it to prevent
622 * a nested interrupt. This is done by writing 1 to the corresponding
623 * bit in the mask register. After handling the interrupt, it should be
624 * re-enabled by clearing this bit. This register is defined as
625 * write 1 clear (W1C) register, meaning that it's being clear
626 * by writing 1 to the bit.
628 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
631 static inline struct iwl_trans *
632 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
634 return container_of((void *)trans_pcie, struct iwl_trans,
639 * Convention: trans API functions: iwl_trans_pcie_XXX
640 * Other functions: iwl_pcie_XXX
642 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
643 const struct pci_device_id *ent,
644 const struct iwl_cfg *cfg);
645 void iwl_trans_pcie_free(struct iwl_trans *trans);
647 /*****************************************************
649 ******************************************************/
650 int _iwl_pcie_rx_init(struct iwl_trans *trans);
651 int iwl_pcie_rx_init(struct iwl_trans *trans);
652 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
653 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
654 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
655 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
656 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
657 int iwl_pcie_rx_stop(struct iwl_trans *trans);
658 void iwl_pcie_rx_free(struct iwl_trans *trans);
659 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
660 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
661 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
662 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
663 struct iwl_rxq *rxq);
664 int iwl_pcie_rx_alloc(struct iwl_trans *trans);
666 /*****************************************************
667 * ICT - interrupt handling
668 ******************************************************/
669 irqreturn_t iwl_pcie_isr(int irq, void *data);
670 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
671 void iwl_pcie_free_ict(struct iwl_trans *trans);
672 void iwl_pcie_reset_ict(struct iwl_trans *trans);
673 void iwl_pcie_disable_ict(struct iwl_trans *trans);
675 /*****************************************************
677 ******************************************************/
678 int iwl_pcie_tx_init(struct iwl_trans *trans);
679 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
681 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
682 int iwl_pcie_tx_stop(struct iwl_trans *trans);
683 void iwl_pcie_tx_free(struct iwl_trans *trans);
684 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
685 const struct iwl_trans_txq_scd_cfg *cfg,
686 unsigned int wdg_timeout);
687 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
689 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
691 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
692 struct iwl_txq *txq);
693 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
694 struct iwl_device_cmd *dev_cmd, int txq_id);
695 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
696 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
697 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
698 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
699 struct iwl_txq *txq);
700 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
701 struct iwl_rx_cmd_buffer *rxb);
702 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
703 struct sk_buff_head *skbs);
704 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
705 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
706 struct iwl_txq *txq, u16 byte_cnt,
709 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
712 if (trans->cfg->use_tfh) {
713 struct iwl_tfh_tfd *tfd = _tfd;
714 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
716 return le16_to_cpu(tb->tb_len);
718 struct iwl_tfd *tfd = _tfd;
719 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
721 return le16_to_cpu(tb->hi_n_len) >> 4;
725 /*****************************************************
727 ******************************************************/
728 void iwl_pcie_dump_csr(struct iwl_trans *trans);
730 /*****************************************************
732 ******************************************************/
733 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
735 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
737 clear_bit(STATUS_INT_ENABLED, &trans->status);
738 if (!trans_pcie->msix_enabled) {
739 /* disable interrupts from uCode/NIC to host */
740 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
742 /* acknowledge/clear/reset any interrupts still pending
743 * from uCode or flow handler (Rx/Tx DMA) */
744 iwl_write32(trans, CSR_INT, 0xffffffff);
745 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
747 /* disable all the interrupt we might use */
748 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
749 trans_pcie->fh_init_mask);
750 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
751 trans_pcie->hw_init_mask);
753 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
756 #define IWL_NUM_OF_COMPLETION_RINGS 31
757 #define IWL_NUM_OF_TRANSFER_RINGS 527
759 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
764 while (start < fw->num_sec &&
765 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
766 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
774 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
775 const struct fw_desc *sec,
776 struct iwl_dram_data *dram)
778 dram->block = dma_alloc_coherent(trans->dev, sec->len,
784 dram->size = sec->len;
785 memcpy(dram->block, sec->data, sec->len);
790 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
792 struct iwl_self_init_dram *dram = &trans->init_dram;
796 WARN_ON(dram->fw_cnt);
800 for (i = 0; i < dram->fw_cnt; i++)
801 dma_free_coherent(trans->dev, dram->fw[i].size,
802 dram->fw[i].block, dram->fw[i].physical);
809 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
811 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
813 spin_lock(&trans_pcie->irq_lock);
814 _iwl_disable_interrupts(trans);
815 spin_unlock(&trans_pcie->irq_lock);
818 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
820 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
822 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
823 set_bit(STATUS_INT_ENABLED, &trans->status);
824 if (!trans_pcie->msix_enabled) {
825 trans_pcie->inta_mask = CSR_INI_SET_MASK;
826 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
829 * fh/hw_mask keeps all the unmasked causes.
830 * Unlike msi, in msix cause is enabled when it is unset.
832 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
833 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
834 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
835 ~trans_pcie->fh_mask);
836 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
837 ~trans_pcie->hw_mask);
841 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
843 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
845 spin_lock(&trans_pcie->irq_lock);
846 _iwl_enable_interrupts(trans);
847 spin_unlock(&trans_pcie->irq_lock);
849 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
851 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
853 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
854 trans_pcie->hw_mask = msk;
857 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
859 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
861 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
862 trans_pcie->fh_mask = msk;
865 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
867 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
869 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
870 if (!trans_pcie->msix_enabled) {
871 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
872 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
874 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
875 trans_pcie->hw_init_mask);
876 iwl_enable_fh_int_msk_msix(trans,
877 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
881 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
883 return index & (q->n_window - 1);
886 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
887 struct iwl_txq *txq, int idx)
889 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
891 if (trans->cfg->use_tfh)
892 idx = iwl_pcie_get_cmd_index(txq, idx);
894 return txq->tfds + trans_pcie->tfd_size * idx;
897 static inline const char *queue_name(struct device *dev,
898 struct iwl_trans_pcie *trans_p, int i)
900 if (trans_p->shared_vec_mask) {
901 int vec = trans_p->shared_vec_mask &
902 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
905 return DRV_NAME ": shared IRQ";
907 return devm_kasprintf(dev, GFP_KERNEL,
908 DRV_NAME ": queue %d", i + vec);
911 return DRV_NAME ": default queue";
913 if (i == trans_p->alloc_vecs - 1)
914 return DRV_NAME ": exception";
916 return devm_kasprintf(dev, GFP_KERNEL,
917 DRV_NAME ": queue %d", i);
920 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
922 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
924 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
925 if (!trans_pcie->msix_enabled) {
926 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
927 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
929 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
930 trans_pcie->fh_init_mask);
931 iwl_enable_hw_int_msk_msix(trans,
932 MSIX_HW_INT_CAUSES_REG_RF_KILL);
935 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
937 * On 9000-series devices this bit isn't enabled by default, so
938 * when we power down the device we need set the bit to allow it
939 * to wake up the PCI-E bus for RF-kill interrupts.
941 iwl_set_bit(trans, CSR_GP_CNTRL,
942 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
946 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
948 static inline void iwl_wake_queue(struct iwl_trans *trans,
951 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
953 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
954 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
955 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
959 static inline void iwl_stop_queue(struct iwl_trans *trans,
962 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
964 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
965 iwl_op_mode_queue_full(trans->op_mode, txq->id);
966 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
968 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
972 static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
974 int index = iwl_pcie_get_cmd_index(q, i);
975 int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
976 int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
979 (index >= r && index < w) :
980 !(index < r && index >= w);
983 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
985 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
987 lockdep_assert_held(&trans_pcie->mutex);
989 if (trans_pcie->debug_rfkill)
992 return !(iwl_read32(trans, CSR_GP_CNTRL) &
993 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
996 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
997 u32 reg, u32 mask, u32 value)
1001 #ifdef CONFIG_IWLWIFI_DEBUG
1002 WARN_ON_ONCE(value & ~mask);
1005 v = iwl_read32(trans, reg);
1008 iwl_write32(trans, reg, v);
1011 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
1014 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
1017 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
1020 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
1023 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1025 return (trans->dbg_dest_tlv || trans->ini_valid);
1028 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1029 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
1030 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1032 #ifdef CONFIG_IWLWIFI_DEBUGFS
1033 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1035 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
1038 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
1039 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
1041 void iwl_pcie_rx_allocator_work(struct work_struct *data);
1043 /* common functions that are used by gen2 transport */
1044 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1045 void iwl_pcie_apm_config(struct iwl_trans *trans);
1046 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1047 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1048 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1049 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1050 bool was_in_rfkill);
1051 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1052 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1053 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1054 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1055 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1056 int slots_num, bool cmd_queue);
1057 int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1058 struct iwl_txq *txq, int slots_num, bool cmd_queue);
1059 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1060 struct iwl_dma_ptr *ptr, size_t size);
1061 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1062 void iwl_pcie_apply_destination(struct iwl_trans *trans);
1063 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
1064 struct sk_buff *skb);
1066 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
1069 /* common functions that are used by gen3 transport */
1070 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1072 /* transport gen 2 exported functions */
1073 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1074 const struct fw_img *fw, bool run_in_rfkill);
1075 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1076 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1077 struct iwl_txq *txq);
1078 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1079 struct iwl_txq **intxq, int size,
1080 unsigned int timeout);
1081 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1082 struct iwl_txq *txq,
1083 struct iwl_host_cmd *hcmd);
1084 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1085 __le16 flags, u8 sta_id, u8 tid,
1086 int cmd_id, int size,
1087 unsigned int timeout);
1088 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1089 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
1090 struct iwl_device_cmd *dev_cmd, int txq_id);
1091 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1092 struct iwl_host_cmd *cmd);
1093 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
1095 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
1096 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
1097 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
1098 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1099 #endif /* __iwl_trans_int_pcie_h__ */