2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/irq.h>
40 #include <linux/spinlock_types.h>
41 #include <linux/semaphore.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/radix-tree.h>
45 #include <linux/workqueue.h>
46 #include <linux/mempool.h>
47 #include <linux/interrupt.h>
48 #include <linux/idr.h>
49 #include <linux/notifier.h>
51 #include <linux/mlx5/device.h>
52 #include <linux/mlx5/doorbell.h>
53 #include <linux/mlx5/eq.h>
54 #include <linux/timecounter.h>
55 #include <linux/ptp_clock_kernel.h>
58 MLX5_BOARD_ID_LEN = 64,
59 MLX5_MAX_NAME_LEN = 16,
63 /* one minute for the sake of bringup. Generally, commands must always
64 * complete and we may need to increase this timeout value
66 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
67 MLX5_CMD_WQ_MAX_NAME = 32,
73 CMD_STATUS_SUCCESS = 0,
79 MLX5_SQP_IEEE_1588 = 2,
81 MLX5_SQP_SYNC_UMR = 4,
89 MLX5_ATOMIC_MODE_OFFSET = 16,
90 MLX5_ATOMIC_MODE_IB_COMP = 1,
91 MLX5_ATOMIC_MODE_CX = 2,
92 MLX5_ATOMIC_MODE_8B = 3,
93 MLX5_ATOMIC_MODE_16B = 4,
94 MLX5_ATOMIC_MODE_32B = 5,
95 MLX5_ATOMIC_MODE_64B = 6,
96 MLX5_ATOMIC_MODE_128B = 7,
97 MLX5_ATOMIC_MODE_256B = 8,
101 MLX5_REG_QPTS = 0x4002,
102 MLX5_REG_QETCR = 0x4005,
103 MLX5_REG_QTCT = 0x400a,
104 MLX5_REG_QPDPM = 0x4013,
105 MLX5_REG_QCAM = 0x4019,
106 MLX5_REG_DCBX_PARAM = 0x4020,
107 MLX5_REG_DCBX_APP = 0x4021,
108 MLX5_REG_FPGA_CAP = 0x4022,
109 MLX5_REG_FPGA_CTRL = 0x4023,
110 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
111 MLX5_REG_PCAP = 0x5001,
112 MLX5_REG_PMTU = 0x5003,
113 MLX5_REG_PTYS = 0x5004,
114 MLX5_REG_PAOS = 0x5006,
115 MLX5_REG_PFCC = 0x5007,
116 MLX5_REG_PPCNT = 0x5008,
117 MLX5_REG_PPTB = 0x500b,
118 MLX5_REG_PBMC = 0x500c,
119 MLX5_REG_PMAOS = 0x5012,
120 MLX5_REG_PUDE = 0x5009,
121 MLX5_REG_PMPE = 0x5010,
122 MLX5_REG_PELC = 0x500e,
123 MLX5_REG_PVLC = 0x500f,
124 MLX5_REG_PCMR = 0x5041,
125 MLX5_REG_PMLP = 0x5002,
126 MLX5_REG_PPLM = 0x5023,
127 MLX5_REG_PCAM = 0x507f,
128 MLX5_REG_NODE_DESC = 0x6001,
129 MLX5_REG_HOST_ENDIANNESS = 0x7004,
130 MLX5_REG_MCIA = 0x9014,
131 MLX5_REG_MLCR = 0x902b,
132 MLX5_REG_MTRC_CAP = 0x9040,
133 MLX5_REG_MTRC_CONF = 0x9041,
134 MLX5_REG_MTRC_STDB = 0x9042,
135 MLX5_REG_MTRC_CTRL = 0x9043,
136 MLX5_REG_MPCNT = 0x9051,
137 MLX5_REG_MTPPS = 0x9053,
138 MLX5_REG_MTPPSE = 0x9054,
139 MLX5_REG_MPEGC = 0x9056,
140 MLX5_REG_MCQI = 0x9061,
141 MLX5_REG_MCC = 0x9062,
142 MLX5_REG_MCDA = 0x9063,
143 MLX5_REG_MCAM = 0x907f,
146 enum mlx5_qpts_trust_state {
147 MLX5_QPTS_TRUST_PCP = 1,
148 MLX5_QPTS_TRUST_DSCP = 2,
151 enum mlx5_dcbx_oper_mode {
152 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
153 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
157 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
158 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
159 MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
160 MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
163 enum mlx5_page_fault_resume_flags {
164 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
165 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
166 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
167 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
176 enum port_state_policy {
177 MLX5_POLICY_DOWN = 0,
179 MLX5_POLICY_FOLLOW = 2,
180 MLX5_POLICY_INVALID = 0xffffffff
183 struct mlx5_field_desc {
188 struct mlx5_rsc_debug {
189 struct mlx5_core_dev *dev;
191 enum dbg_rsc_type type;
193 struct mlx5_field_desc fields[0];
196 enum mlx5_dev_event {
197 MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
200 enum mlx5_port_status {
205 struct mlx5_bfreg_info {
207 int num_low_latency_bfregs;
211 * protect bfreg allocation data structs
217 u32 num_static_sys_pages;
218 u32 total_num_bfregs;
222 struct mlx5_cmd_first {
226 struct mlx5_cmd_msg {
227 struct list_head list;
228 struct cmd_msg_cache *parent;
230 struct mlx5_cmd_first first;
231 struct mlx5_cmd_mailbox *next;
234 struct mlx5_cmd_debug {
235 struct dentry *dbg_root;
236 struct dentry *dbg_in;
237 struct dentry *dbg_out;
238 struct dentry *dbg_outlen;
239 struct dentry *dbg_status;
240 struct dentry *dbg_run;
248 struct cmd_msg_cache {
249 /* protect block chain allocations
252 struct list_head head;
253 unsigned int max_inbox_size;
254 unsigned int num_ent;
258 MLX5_NUM_COMMAND_CACHES = 5,
261 struct mlx5_cmd_stats {
266 struct dentry *count;
267 /* protect command average calculations */
275 dma_addr_t alloc_dma;
286 /* protect command queue allocations
288 spinlock_t alloc_lock;
290 /* protect token allocations
292 spinlock_t token_lock;
294 unsigned long bitmask;
295 char wq_name[MLX5_CMD_WQ_MAX_NAME];
296 struct workqueue_struct *wq;
297 struct semaphore sem;
298 struct semaphore pages_sem;
300 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
301 struct dma_pool *pool;
302 struct mlx5_cmd_debug dbg;
303 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
304 int checksum_disabled;
305 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
308 struct mlx5_port_caps {
315 struct mlx5_cmd_mailbox {
318 struct mlx5_cmd_mailbox *next;
321 struct mlx5_buf_list {
326 struct mlx5_frag_buf {
327 struct mlx5_buf_list *frags;
333 struct mlx5_frag_buf_ctrl {
334 struct mlx5_buf_list *frags;
343 struct mlx5_core_psv {
355 struct mlx5_core_sig_ctx {
356 struct mlx5_core_psv psv_memory;
357 struct mlx5_core_psv psv_wire;
358 struct ib_sig_err err_item;
359 bool sig_status_checked;
369 struct mlx5_core_mkey {
377 #define MLX5_24BIT_MASK ((1 << 24) - 1)
380 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
381 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
382 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
386 MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
389 struct mlx5_core_rsc_common {
390 enum mlx5_res_type res;
392 struct completion free;
395 struct mlx5_uars_page {
399 struct list_head list;
401 unsigned long *reg_bitmap; /* for non fast path bf regs */
402 unsigned long *fp_bitmap;
403 unsigned int reg_avail;
404 unsigned int fp_avail;
405 struct kref ref_count;
406 struct mlx5_core_dev *mdev;
409 struct mlx5_bfreg_head {
410 /* protect blue flame registers allocations */
412 struct list_head list;
415 struct mlx5_bfreg_data {
416 struct mlx5_bfreg_head reg_head;
417 struct mlx5_bfreg_head wc_head;
420 struct mlx5_sq_bfreg {
422 struct mlx5_uars_page *up;
428 struct mlx5_core_health {
429 struct health_buffer __iomem *health;
430 __be32 __iomem *health_counter;
431 struct timer_list timer;
435 /* wq spinlock to synchronize draining */
437 struct workqueue_struct *wq;
439 struct work_struct work;
440 struct delayed_work recover_work;
443 struct mlx5_qp_table {
444 struct notifier_block nb;
446 /* protect radix tree
449 struct radix_tree_root tree;
452 struct mlx5_mkey_table {
453 /* protect radix tree
456 struct radix_tree_root tree;
459 struct mlx5_vf_context {
463 enum port_state_policy policy;
466 struct mlx5_core_sriov {
467 struct mlx5_vf_context *vfs_ctx;
472 struct mlx5_fc_stats {
473 spinlock_t counters_idr_lock; /* protects counters_idr */
474 struct idr counters_idr;
475 struct list_head counters;
476 struct llist_head addlist;
477 struct llist_head dellist;
479 struct workqueue_struct *wq;
480 struct delayed_work work;
481 unsigned long next_query;
482 unsigned long sampling_interval; /* jiffies */
490 struct mlx5_eq_table;
492 struct mlx5_rate_limit {
498 struct mlx5_rl_entry {
499 struct mlx5_rate_limit rl;
504 struct mlx5_rl_table {
505 /* protect rate limit table */
506 struct mutex rl_lock;
510 struct mlx5_rl_entry *rl_entry;
514 char name[MLX5_MAX_NAME_LEN];
515 struct mlx5_eq_table *eq_table;
518 struct mlx5_nb pg_nb;
519 struct workqueue_struct *pg_wq;
520 struct rb_root page_root;
523 struct list_head free_list;
527 struct mlx5_core_health health;
529 /* start: qp staff */
530 struct mlx5_qp_table qp_table;
531 struct dentry *qp_debugfs;
532 struct dentry *eq_debugfs;
533 struct dentry *cq_debugfs;
534 struct dentry *cmdif_debugfs;
537 /* start: mkey staff */
538 struct mlx5_mkey_table mkey_table;
539 /* end: mkey staff */
541 /* start: alloc staff */
542 /* protect buffer alocation according to numa node */
543 struct mutex alloc_mutex;
546 struct mutex pgdir_mutex;
547 struct list_head pgdir_list;
548 /* end: alloc staff */
549 struct dentry *dbg_root;
551 /* protect mkey key part */
552 spinlock_t mkey_lock;
555 struct list_head dev_list;
556 struct list_head ctx_list;
558 struct mlx5_events *events;
560 struct mlx5_flow_steering *steering;
561 struct mlx5_mpfs *mpfs;
562 struct mlx5_eswitch *eswitch;
563 struct mlx5_core_sriov sriov;
564 struct mlx5_lag *lag;
565 struct mlx5_devcom *devcom;
566 unsigned long pci_dev_data;
567 struct mlx5_fc_stats fc_stats;
568 struct mlx5_rl_table rl_table;
570 struct mlx5_bfreg_data bfregs;
571 struct mlx5_uars_page *uar;
574 enum mlx5_device_state {
575 MLX5_DEVICE_STATE_UP,
576 MLX5_DEVICE_STATE_INTERNAL_ERROR,
579 enum mlx5_interface_state {
580 MLX5_INTERFACE_STATE_UP = BIT(0),
583 enum mlx5_pci_status {
584 MLX5_PCI_STATUS_DISABLED,
585 MLX5_PCI_STATUS_ENABLED,
588 enum mlx5_pagefault_type_flags {
589 MLX5_PFAULT_REQUESTOR = 1 << 0,
590 MLX5_PFAULT_WRITE = 1 << 1,
591 MLX5_PFAULT_RDMA = 1 << 2,
595 struct list_head tirs_list;
599 struct mlx5e_resources {
602 struct mlx5_core_mkey mkey;
603 struct mlx5_sq_bfreg bfreg;
606 #define MLX5_MAX_RESERVED_GIDS 8
608 struct mlx5_rsvd_gids {
614 #define MAX_PIN_NUM 8
616 u8 pin_caps[MAX_PIN_NUM];
617 struct work_struct out_work;
618 u64 start[MAX_PIN_NUM];
623 struct mlx5_core_dev *mdev;
624 struct mlx5_nb pps_nb;
626 struct cyclecounter cycles;
627 struct timecounter tc;
628 struct hwtstamp_config hwtstamp_config;
630 unsigned long overflow_period;
631 struct delayed_work overflow_work;
632 struct ptp_clock *ptp;
633 struct ptp_clock_info ptp_info;
634 struct mlx5_pps pps_info;
637 struct mlx5_fw_tracer;
640 struct mlx5_core_dev {
641 struct pci_dev *pdev;
643 struct mutex pci_status_mutex;
644 enum mlx5_pci_status pci_status;
646 char board_id[MLX5_BOARD_ID_LEN];
648 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
650 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
651 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
652 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
653 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
654 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
655 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
659 phys_addr_t iseg_base;
660 struct mlx5_init_seg __iomem *iseg;
661 phys_addr_t bar_addr;
662 enum mlx5_device_state state;
663 /* sync interface state */
664 struct mutex intf_state_mutex;
665 unsigned long intf_state;
666 struct mlx5_priv priv;
667 struct mlx5_profile *profile;
670 struct mlx5e_resources mlx5e_res;
671 struct mlx5_vxlan *vxlan;
673 struct mlx5_rsvd_gids reserved_gids;
676 #ifdef CONFIG_MLX5_FPGA
677 struct mlx5_fpga_device *fpga;
679 struct mlx5_clock clock;
680 struct mlx5_ib_clock_info *clock_info;
681 struct page *clock_info_page;
682 struct mlx5_fw_tracer *tracer;
688 struct mlx5_db_pgdir *pgdir;
689 struct mlx5_ib_user_db_page *user_page;
696 MLX5_COMP_EQ_SIZE = 1024,
700 MLX5_PTYS_IB = 1 << 0,
701 MLX5_PTYS_EN = 1 << 2,
704 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
707 MLX5_CMD_ENT_STATE_PENDING_COMP,
710 struct mlx5_cmd_work_ent {
712 struct mlx5_cmd_msg *in;
713 struct mlx5_cmd_msg *out;
716 mlx5_cmd_cbk_t callback;
717 struct delayed_work cb_timeout_work;
720 struct completion done;
721 struct mlx5_cmd *cmd;
722 struct work_struct work;
723 struct mlx5_cmd_layout *lay;
739 enum phy_port_state {
743 struct mlx5_hca_vport_context {
748 enum port_state_policy policy;
749 enum phy_port_state phys_state;
750 enum ib_port_state vport_state;
751 u8 port_physical_state;
760 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
765 u16 qkey_violation_counter;
766 u16 pkey_violation_counter;
770 static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
772 return buf->frags->buf + offset;
775 #define STRUCT_FIELD(header, field) \
776 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
777 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
779 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
781 return pci_get_drvdata(pdev);
784 extern struct dentry *mlx5_debugfs_root;
786 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
788 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
791 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
793 return ioread32be(&dev->iseg->fw_rev) >> 16;
796 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
798 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
801 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
803 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
806 static inline u32 mlx5_base_mkey(const u32 key)
808 return key & 0xffffff00u;
811 static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
812 u8 log_stride, u8 log_sz,
814 struct mlx5_frag_buf_ctrl *fbc)
817 fbc->log_stride = log_stride;
818 fbc->log_sz = log_sz;
819 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
820 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
821 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
822 fbc->strides_offset = strides_offset;
825 static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
826 u8 log_stride, u8 log_sz,
827 struct mlx5_frag_buf_ctrl *fbc)
829 mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
832 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
837 ix += fbc->strides_offset;
838 frag = ix >> fbc->log_frag_strides;
840 return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
844 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
846 u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
848 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
851 int mlx5_cmd_init(struct mlx5_core_dev *dev);
852 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
853 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
854 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
856 struct mlx5_async_ctx {
857 struct mlx5_core_dev *dev;
858 atomic_t num_inflight;
859 struct wait_queue_head wait;
862 struct mlx5_async_work;
864 typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
866 struct mlx5_async_work {
867 struct mlx5_async_ctx *ctx;
868 mlx5_async_cbk_t user_callback;
871 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
872 struct mlx5_async_ctx *ctx);
873 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
874 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
875 void *out, int out_size, mlx5_async_cbk_t callback,
876 struct mlx5_async_work *work);
878 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
880 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
881 void *out, int out_size);
882 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
884 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
885 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
886 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
887 void mlx5_health_flush(struct mlx5_core_dev *dev);
888 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
889 int mlx5_health_init(struct mlx5_core_dev *dev);
890 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
891 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
892 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
893 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
894 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
895 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
896 struct mlx5_frag_buf *buf, int node);
897 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
898 int size, struct mlx5_frag_buf *buf);
899 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
900 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
901 struct mlx5_frag_buf *buf, int node);
902 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
903 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
904 gfp_t flags, int npages);
905 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
906 struct mlx5_cmd_mailbox *head);
907 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
908 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
909 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
910 struct mlx5_core_mkey *mkey,
911 struct mlx5_async_ctx *async_ctx, u32 *in,
912 int inlen, u32 *out, int outlen,
913 mlx5_async_cbk_t callback,
914 struct mlx5_async_work *context);
915 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
916 struct mlx5_core_mkey *mkey,
918 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
919 struct mlx5_core_mkey *mkey);
920 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
921 u32 *out, int outlen);
922 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
923 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
924 int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
925 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
926 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
927 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
928 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
929 s32 npages, bool ec_function);
930 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
931 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
932 void mlx5_register_debugfs(void);
933 void mlx5_unregister_debugfs(void);
935 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
936 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
937 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
939 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
940 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
942 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
943 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
944 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
945 int size_in, void *data_out, int size_out,
946 u16 reg_num, int arg, int write);
948 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
949 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
951 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
953 const char *mlx5_command_str(int command);
954 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
955 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
956 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
957 int npsvs, u32 *sig_index);
958 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
959 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
960 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
961 struct mlx5_odp_caps *odp_caps);
962 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
963 u8 port_num, void *out, size_t sz);
964 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
965 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
966 u32 wq_num, u8 type, int error);
969 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
970 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
971 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
972 struct mlx5_rate_limit *rl);
973 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
974 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
975 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
976 struct mlx5_rate_limit *rl_1);
977 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
978 bool map_wc, bool fast_path);
979 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
981 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
983 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
984 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
985 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
986 u8 roce_version, u8 roce_l3_type, const u8 *gid,
987 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
989 static inline int fw_initializing(struct mlx5_core_dev *dev)
991 return ioread32be(&dev->iseg->initializing) >> 31;
994 static inline u32 mlx5_mkey_to_idx(u32 mkey)
999 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1001 return mkey_idx << 8;
1004 static inline u8 mlx5_mkey_variant(u32 mkey)
1010 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
1011 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
1015 MR_CACHE_LAST_STD_ENTRY = 20,
1016 MLX5_IMR_MTT_CACHE_ENTRY,
1017 MLX5_IMR_KSM_CACHE_ENTRY,
1018 MAX_MR_CACHE_ENTRIES
1022 MLX5_INTERFACE_PROTOCOL_IB = 0,
1023 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1026 struct mlx5_interface {
1027 void * (*add)(struct mlx5_core_dev *dev);
1028 void (*remove)(struct mlx5_core_dev *dev, void *context);
1029 int (*attach)(struct mlx5_core_dev *dev, void *context);
1030 void (*detach)(struct mlx5_core_dev *dev, void *context);
1032 struct list_head list;
1035 int mlx5_register_interface(struct mlx5_interface *intf);
1036 void mlx5_unregister_interface(struct mlx5_interface *intf);
1037 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1038 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1040 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1042 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1043 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1044 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1045 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1046 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1047 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1048 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1052 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1053 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1055 #ifdef CONFIG_MLX5_CORE_IPOIB
1056 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1057 struct ib_device *ibdev,
1059 void (*setup)(struct net_device *));
1060 #endif /* CONFIG_MLX5_CORE_IPOIB */
1061 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1062 struct ib_device *device,
1063 struct rdma_netdev_alloc_params *params);
1065 struct mlx5_profile {
1071 } mr_cache[MAX_MR_CACHE_ENTRIES];
1075 MLX5_PCI_DEV_IS_VF = 1 << 0,
1078 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1080 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1083 static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
1085 return dev->caps.embedded_cpu;
1088 static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev)
1090 return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1093 #define MLX5_HOST_PF_MAX_VFS (127u)
1094 static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
1096 if (mlx5_core_is_ecpf_esw_manager(dev))
1097 return MLX5_HOST_PF_MAX_VFS;
1099 return pci_sriov_get_totalvfs(dev->pdev);
1102 static inline int mlx5_get_gid_table_len(u16 param)
1105 pr_warn("gid table length is zero\n");
1109 return 8 * (1 << param);
1112 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1114 return !!(dev->priv.rl_table.max_size);
1117 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1119 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1120 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1123 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1125 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1128 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1130 return mlx5_core_is_mp_slave(dev) ||
1131 mlx5_core_is_mp_master(dev);
1134 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1136 if (!mlx5_core_mp_enabled(dev))
1139 return MLX5_CAP_GEN(dev, native_port_num);
1143 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1146 #endif /* MLX5_DRIVER_H */