1 /******************************************************************************
3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
7 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
26 * Contact Information:
27 * Intel Linux Wireless <linuxwifi@intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *****************************************************************************/
31 #ifndef __iwl_trans_int_pcie_h__
32 #define __iwl_trans_int_pcie_h__
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/skbuff.h>
37 #include <linux/wait.h>
38 #include <linux/pci.h>
39 #include <linux/timer.h>
40 #include <linux/cpu.h>
44 #include "iwl-trans.h"
45 #include "iwl-debug.h"
47 #include "iwl-op-mode.h"
49 /* We need 2 entries for the TX command and header, and another one might
50 * be needed for potential data in the SKB's head. The remaining ones can
53 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
56 * RX related structures and functions
58 #define RX_NUM_QUEUES 1
59 #define RX_POST_REQ_ALLOC 2
60 #define RX_CLAIM_REQ_ALLOC 8
61 #define RX_PENDING_WATERMARK 16
65 /*This file includes the declaration that are internal to the
69 * struct iwl_rx_mem_buffer
70 * @page_dma: bus address of rxb page
71 * @page: driver's pointer to the rxb page
72 * @invalid: rxb is in driver ownership - not owned by HW
73 * @vid: index of this rxb in the global table
75 struct iwl_rx_mem_buffer {
80 struct list_head list;
84 * struct isr_statistics - interrupt statistics
87 struct isr_statistics {
102 * struct iwl_rxq - Rx queue
104 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
105 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
106 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
107 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
108 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
109 * @read: Shared index to newest available Rx buffer
110 * @write: Shared index to oldest written Rx packet
111 * @free_count: Number of pre-allocated buffers in rx_free
112 * @used_count: Number of RBDs handled to allocator to use for allocation
114 * @rx_free: list of RBDs with allocated RB ready for use
115 * @rx_used: list of RBDs with no RB attached
116 * @need_update: flag to indicate we need to update read/write index
117 * @rb_stts: driver's pointer to receive buffer status
118 * @rb_stts_dma: bus address of receive buffer status
120 * @queue: actual rx queue. Not used for multi-rx queue.
122 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
129 dma_addr_t used_bd_dma;
136 struct list_head rx_free;
137 struct list_head rx_used;
139 struct iwl_rb_status *rb_stts;
140 dma_addr_t rb_stts_dma;
142 struct napi_struct napi;
143 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
147 * struct iwl_rb_allocator - Rx allocator
148 * @req_pending: number of requests the allcator had not processed yet
149 * @req_ready: number of requests honored and ready for claiming
150 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
151 * the queue. This is a list of &struct iwl_rx_mem_buffer
152 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
153 * of &struct iwl_rx_mem_buffer
154 * @lock: protects the rbd_allocated and rbd_empty lists
155 * @alloc_wq: work queue for background calls
156 * @rx_alloc: work struct for background calls
158 struct iwl_rb_allocator {
159 atomic_t req_pending;
161 struct list_head rbd_allocated;
162 struct list_head rbd_empty;
164 struct workqueue_struct *alloc_wq;
165 struct work_struct rx_alloc;
175 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
176 * @index -- current index
178 static inline int iwl_queue_inc_wrap(int index)
180 return ++index & (TFD_QUEUE_SIZE_MAX - 1);
184 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
185 * @index -- current index
187 static inline int iwl_queue_dec_wrap(int index)
189 return --index & (TFD_QUEUE_SIZE_MAX - 1);
192 struct iwl_cmd_meta {
193 /* only for SYNC commands, iff the reply skb is wanted */
194 struct iwl_host_cmd *source;
200 #define TFD_TX_CMD_SLOTS 256
201 #define TFD_CMD_SLOTS 32
204 * The FH will write back to the first TB only, so we need to copy some data
205 * into the buffer regardless of whether it should be mapped or not.
206 * This indicates how big the first TB must be to include the scratch buffer
207 * and the assigned PN.
208 * Since PN location is 8 bytes at offset 12, it's 20 now.
209 * If we make it bigger then allocations will be bigger and copy slower, so
210 * that's probably not useful.
212 #define IWL_FIRST_TB_SIZE 20
213 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
215 struct iwl_pcie_txq_entry {
216 struct iwl_device_cmd *cmd;
218 /* buffer to free after command completes */
219 const void *free_buf;
220 struct iwl_cmd_meta meta;
223 struct iwl_pcie_first_tb_buf {
224 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
228 * struct iwl_txq - Tx Queue for DMA
229 * @q: generic Rx/Tx queue descriptor
230 * @tfds: transmit frame descriptors (DMA memory)
231 * @first_tb_bufs: start of command headers, including scratch buffers, for
232 * the writeback -- this is DMA memory and an array holding one buffer
233 * for each command on the queue
234 * @first_tb_dma: DMA address for the first_tb_bufs start
235 * @entries: transmit entries (driver state)
237 * @stuck_timer: timer that fires if queue gets stuck
238 * @trans_pcie: pointer back to transport (for timer)
239 * @need_update: indicates need to update read/write index
240 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
241 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
242 * @frozen: tx stuck queue timer is frozen
243 * @frozen_expiry_remainder: remember how long until the timer fires
244 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
245 * @write_ptr: 1-st empty entry (index) host_w
246 * @read_ptr: last used entry (index) host_r
247 * @dma_addr: physical addr for BD's
248 * @n_window: safe queue window
250 * @low_mark: low watermark, resume queue if free space more than this
251 * @high_mark: high watermark, stop queue if free space less than this
253 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
254 * descriptors) and required locking structures.
256 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
257 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
258 * there might be HW changes in the future). For the normal TX
259 * queues, n_window, which is the size of the software queue data
260 * is also 256; however, for the command queue, n_window is only
261 * 32 since we don't need so many commands pending. Since the HW
262 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
263 * This means that we end up with the following:
264 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
265 * SW entries: | 0 | ... | 31 |
266 * where N is a number between 0 and 7. This means that the SW
267 * data is a window overlayed over the HW queue.
271 struct iwl_pcie_first_tb_buf *first_tb_bufs;
272 dma_addr_t first_tb_dma;
273 struct iwl_pcie_txq_entry *entries;
275 unsigned long frozen_expiry_remainder;
276 struct timer_list stuck_timer;
277 struct iwl_trans_pcie *trans_pcie;
282 unsigned long wd_timeout;
283 struct sk_buff_head overflow_q;
284 struct iwl_dma_ptr bc_tbl;
295 static inline dma_addr_t
296 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
298 return txq->first_tb_dma +
299 sizeof(struct iwl_pcie_first_tb_buf) * idx;
302 struct iwl_tso_hdr_page {
308 * enum iwl_shared_irq_flags - level of sharing for irq
309 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
310 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
312 enum iwl_shared_irq_flags {
313 IWL_SHARED_IRQ_NON_RX = BIT(0),
314 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
318 * struct iwl_dram_data
319 * @physical: page phy pointer
320 * @block: pointer to the allocated block/page
321 * @size: size of the block/page
323 struct iwl_dram_data {
330 * struct iwl_self_init_dram - dram data used by self init process
331 * @fw: lmac and umac dram data
332 * @fw_cnt: total number of items in array
333 * @paging: paging dram data
334 * @paging_cnt: total number of items in array
336 struct iwl_self_init_dram {
337 struct iwl_dram_data *fw;
339 struct iwl_dram_data *paging;
344 * struct iwl_trans_pcie - PCIe transport specific data
345 * @rxq: all the RX queue data
346 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
347 * @global_table: table mapping received VID from hw to rxb
348 * @rba: allocator for RX replenishing
349 * @ctxt_info: context information for FW self init
350 * @ctxt_info_dma_addr: dma addr of context information
351 * @init_dram: DRAM data of firmware image (including paging).
352 * Context information addresses will be taken from here.
353 * This is driver's local copy for keeping track of size and
354 * count for allocating and freeing the memory.
355 * @trans: pointer to the generic transport area
356 * @scd_base_addr: scheduler sram base address in SRAM
357 * @scd_bc_tbls: pointer to the byte count table of the scheduler
358 * @kw: keep warm address
359 * @pci_dev: basic pci-network driver stuff
360 * @hw_base: pci hardware address support
361 * @ucode_write_complete: indicates that the ucode has been copied.
362 * @ucode_write_waitq: wait queue for uCode load
363 * @cmd_queue - command queue number
364 * @rx_buf_size: Rx buffer size
365 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
366 * @scd_set_active: should the transport configure the SCD for HCMD queue
367 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
369 * @rx_page_order: page order for receive buffer size
370 * @reg_lock: protect hw register access
371 * @mutex: to protect stop_device / start_fw / start_hw
372 * @cmd_in_flight: true when we have a host command in flight
373 * @fw_mon_phys: physical address of the buffer for the firmware monitor
374 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
375 * @fw_mon_size: size of the buffer for the firmware monitor
376 * @msix_entries: array of MSI-X entries
377 * @msix_enabled: true if managed to enable MSI-X
378 * @shared_vec_mask: the type of causes the shared vector handles
379 * (see iwl_shared_irq_flags).
380 * @alloc_vecs: the number of interrupt vectors allocated by the OS
381 * @def_irq: default irq for non rx causes
382 * @fh_init_mask: initial unmasked fh causes
383 * @hw_init_mask: initial unmasked hw causes
384 * @fh_mask: current unmasked fh causes
385 * @hw_mask: current unmasked hw causes
386 * @in_rescan: true if we have triggered a device rescan
387 * @scheduled_for_removal: true if we have scheduled a device removal
389 struct iwl_trans_pcie {
391 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
392 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
393 struct iwl_rb_allocator rba;
394 struct iwl_context_info *ctxt_info;
395 dma_addr_t ctxt_info_dma_addr;
396 struct iwl_self_init_dram init_dram;
397 struct iwl_trans *trans;
399 struct net_device napi_dev;
401 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
405 dma_addr_t ict_tbl_dma;
408 bool is_down, opmode_down;
410 struct isr_statistics isr_stats;
416 struct iwl_dma_ptr scd_bc_tbls;
417 struct iwl_dma_ptr kw;
419 struct iwl_txq *txq_memory;
420 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
421 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
422 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
424 /* PCI bus related data */
425 struct pci_dev *pci_dev;
426 void __iomem *hw_base;
428 bool ucode_write_complete;
429 wait_queue_head_t ucode_write_waitq;
430 wait_queue_head_t wait_command_queue;
431 wait_queue_head_t d0i3_waitq;
433 u8 page_offs, dev_cmd_offs;
437 unsigned int cmd_q_wdg_timeout;
438 u8 n_no_reclaim_cmds;
439 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
443 enum iwl_amsdu_size rx_buf_size;
447 bool pcie_dbg_dumped_once;
450 /*protect hw register */
452 bool cmd_hold_nic_awake;
453 bool ref_cmd_in_flight;
455 dma_addr_t fw_mon_phys;
456 struct page *fw_mon_page;
459 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
468 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
469 u16 tx_cmd_queue_size;
471 bool scheduled_for_removal;
474 static inline struct iwl_trans_pcie *
475 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
477 return (void *)trans->trans_specific;
480 static inline struct iwl_trans *
481 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
483 return container_of((void *)trans_pcie, struct iwl_trans,
488 * Convention: trans API functions: iwl_trans_pcie_XXX
489 * Other functions: iwl_pcie_XXX
491 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
492 const struct pci_device_id *ent,
493 const struct iwl_cfg *cfg);
494 void iwl_trans_pcie_free(struct iwl_trans *trans);
496 /*****************************************************
498 ******************************************************/
499 int iwl_pcie_rx_init(struct iwl_trans *trans);
500 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
501 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
502 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
503 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
504 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
505 int iwl_pcie_rx_stop(struct iwl_trans *trans);
506 void iwl_pcie_rx_free(struct iwl_trans *trans);
508 /*****************************************************
509 * ICT - interrupt handling
510 ******************************************************/
511 irqreturn_t iwl_pcie_isr(int irq, void *data);
512 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
513 void iwl_pcie_free_ict(struct iwl_trans *trans);
514 void iwl_pcie_reset_ict(struct iwl_trans *trans);
515 void iwl_pcie_disable_ict(struct iwl_trans *trans);
517 /*****************************************************
519 ******************************************************/
520 int iwl_pcie_tx_init(struct iwl_trans *trans);
521 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
522 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
523 int iwl_pcie_tx_stop(struct iwl_trans *trans);
524 void iwl_pcie_tx_free(struct iwl_trans *trans);
525 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
526 const struct iwl_trans_txq_scd_cfg *cfg,
527 unsigned int wdg_timeout);
528 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
530 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
532 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
533 struct iwl_txq *txq);
534 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
535 struct iwl_device_cmd *dev_cmd, int txq_id);
536 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
537 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
538 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
539 struct iwl_rx_cmd_buffer *rxb);
540 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
541 struct sk_buff_head *skbs);
542 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
544 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
547 if (trans->cfg->use_tfh) {
548 struct iwl_tfh_tfd *tfd = _tfd;
549 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
551 return le16_to_cpu(tb->tb_len);
553 struct iwl_tfd *tfd = _tfd;
554 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
556 return le16_to_cpu(tb->hi_n_len) >> 4;
560 /*****************************************************
562 ******************************************************/
563 void iwl_pcie_dump_csr(struct iwl_trans *trans);
565 /*****************************************************
567 ******************************************************/
568 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
572 clear_bit(STATUS_INT_ENABLED, &trans->status);
573 if (!trans_pcie->msix_enabled) {
574 /* disable interrupts from uCode/NIC to host */
575 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
577 /* acknowledge/clear/reset any interrupts still pending
578 * from uCode or flow handler (Rx/Tx DMA) */
579 iwl_write32(trans, CSR_INT, 0xffffffff);
580 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
582 /* disable all the interrupt we might use */
583 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
584 trans_pcie->fh_init_mask);
585 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
586 trans_pcie->hw_init_mask);
588 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
591 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
593 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
595 spin_lock(&trans_pcie->irq_lock);
596 _iwl_disable_interrupts(trans);
597 spin_unlock(&trans_pcie->irq_lock);
600 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
602 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
604 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
605 set_bit(STATUS_INT_ENABLED, &trans->status);
606 if (!trans_pcie->msix_enabled) {
607 trans_pcie->inta_mask = CSR_INI_SET_MASK;
608 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
611 * fh/hw_mask keeps all the unmasked causes.
612 * Unlike msi, in msix cause is enabled when it is unset.
614 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
615 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
616 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
617 ~trans_pcie->fh_mask);
618 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
619 ~trans_pcie->hw_mask);
623 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
625 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
627 spin_lock(&trans_pcie->irq_lock);
628 _iwl_enable_interrupts(trans);
629 spin_unlock(&trans_pcie->irq_lock);
631 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
633 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
635 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
636 trans_pcie->hw_mask = msk;
639 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
641 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
643 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
644 trans_pcie->fh_mask = msk;
647 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
649 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
651 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
652 if (!trans_pcie->msix_enabled) {
653 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
654 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
656 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
657 trans_pcie->hw_init_mask);
658 iwl_enable_fh_int_msk_msix(trans,
659 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
663 static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
665 return index & (q->n_window - 1);
668 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
669 struct iwl_txq *txq, int idx)
671 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
673 if (trans->cfg->use_tfh)
674 idx = iwl_pcie_get_cmd_index(txq, idx);
676 return txq->tfds + trans_pcie->tfd_size * idx;
679 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
681 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
683 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
684 if (!trans_pcie->msix_enabled) {
685 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
686 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
688 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
689 trans_pcie->fh_init_mask);
690 iwl_enable_hw_int_msk_msix(trans,
691 MSIX_HW_INT_CAUSES_REG_RF_KILL);
694 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
696 * On 9000-series devices this bit isn't enabled by default, so
697 * when we power down the device we need set the bit to allow it
698 * to wake up the PCI-E bus for RF-kill interrupts.
700 iwl_set_bit(trans, CSR_GP_CNTRL,
701 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
705 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
707 static inline void iwl_wake_queue(struct iwl_trans *trans,
710 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
712 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
713 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
714 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
718 static inline void iwl_stop_queue(struct iwl_trans *trans,
721 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
723 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
724 iwl_op_mode_queue_full(trans->op_mode, txq->id);
725 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
727 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
731 static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
733 return q->write_ptr >= q->read_ptr ?
734 (i >= q->read_ptr && i < q->write_ptr) :
735 !(i < q->read_ptr && i >= q->write_ptr);
738 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
740 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
742 lockdep_assert_held(&trans_pcie->mutex);
744 if (trans_pcie->debug_rfkill)
747 return !(iwl_read32(trans, CSR_GP_CNTRL) &
748 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
751 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
752 u32 reg, u32 mask, u32 value)
756 #ifdef CONFIG_IWLWIFI_DEBUG
757 WARN_ON_ONCE(value & ~mask);
760 v = iwl_read32(trans, reg);
763 iwl_write32(trans, reg, v);
766 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
769 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
772 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
775 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
778 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
780 #ifdef CONFIG_IWLWIFI_DEBUGFS
781 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
783 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
789 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
790 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
792 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
794 void iwl_pcie_rx_allocator_work(struct work_struct *data);
796 /* common functions that are used by gen2 transport */
797 void iwl_pcie_apm_config(struct iwl_trans *trans);
798 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
799 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
800 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
801 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
803 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
804 int iwl_queue_space(const struct iwl_txq *q);
805 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
806 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
807 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
808 int slots_num, bool cmd_queue);
809 int iwl_pcie_txq_alloc(struct iwl_trans *trans,
810 struct iwl_txq *txq, int slots_num, bool cmd_queue);
811 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
812 struct iwl_dma_ptr *ptr, size_t size);
813 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
814 void iwl_pcie_apply_destination(struct iwl_trans *trans);
815 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
816 struct sk_buff *skb);
818 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
821 /* transport gen 2 exported functions */
822 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
823 const struct fw_img *fw, bool run_in_rfkill);
824 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
825 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
826 struct iwl_tx_queue_cfg_cmd *cmd,
827 int cmd_id, int size,
828 unsigned int timeout);
829 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
830 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
831 struct iwl_device_cmd *dev_cmd, int txq_id);
832 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
833 struct iwl_host_cmd *cmd);
834 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
836 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
837 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
838 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
839 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
840 #endif /* __iwl_trans_int_pcie_h__ */