1 /* SPDX-License-Identifier: GPL-2.0
2 * Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #include <linux/etherdevice.h>
15 #include <linux/sizes.h>
17 #include "rvu_struct.h"
20 #define MBOX_SIZE SZ_64K
22 /* AF/PF: PF initiated, PF/VF VF initiated */
23 #define MBOX_DOWN_RX_START 0
24 #define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
25 #define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
26 #define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
27 /* AF/PF: AF initiated, PF/VF PF initiated */
28 #define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
29 #define MBOX_UP_RX_SIZE SZ_1K
30 #define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
31 #define MBOX_UP_TX_SIZE SZ_1K
33 #if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
34 # error "incorrect mailbox area sizes"
37 #define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
39 #define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
41 #define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
43 /* Mailbox directions */
44 #define MBOX_DIR_AFPF 0 /* AF replies to PF */
45 #define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
46 #define MBOX_DIR_PFVF 2 /* PF replies to VF */
47 #define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
48 #define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
49 #define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
50 #define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
51 #define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
53 struct otx2_mbox_dev {
54 void *mbase; /* This dev's mbox region */
56 u16 msg_size; /* Total msg size to be sent */
57 u16 rsp_size; /* Total rsp size to be sure the reply is ok */
58 u16 num_msgs; /* No of msgs sent or waiting for response */
59 u16 msgs_acked; /* No of msgs for which response is received */
64 void *hwbase; /* Mbox region advertised by HW */
65 void *reg_base;/* CSR base for this dev */
66 u64 trigger; /* Trigger mbox notification */
67 u16 tr_shift; /* Mbox trigger shift */
68 u64 rx_start; /* Offset of Rx region in mbox memory */
69 u64 tx_start; /* Offset of Tx region in mbox memory */
70 u16 rx_size; /* Size of Rx region */
71 u16 tx_size; /* Size of Tx region */
72 u16 ndevs; /* The number of peers */
73 struct otx2_mbox_dev *dev;
76 /* Header which preceeds all mbox messages */
78 u16 num_msgs; /* No of msgs embedded */
81 /* Header which preceeds every msg and is also part of it */
83 u16 pcifunc; /* Who's sending this msg */
84 u16 id; /* Mbox message ID */
85 #define OTX2_MBOX_REQ_SIG (0xdead)
86 #define OTX2_MBOX_RSP_SIG (0xbeef)
87 u16 sig; /* Signature, for validating corrupted msgs */
88 #define OTX2_MBOX_VERSION (0x0001)
89 u16 ver; /* Version of msg's structure for this ID */
90 u16 next_msgoff; /* Offset of next msg within mailbox region */
91 int rc; /* Msg process'ed response code */
94 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
95 void otx2_mbox_destroy(struct otx2_mbox *mbox);
96 int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
97 struct pci_dev *pdev, void __force *reg_base,
98 int direction, int ndevs);
99 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
100 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
101 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
102 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
103 int size, int size_rsp);
104 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
105 struct mbox_msghdr *msg);
106 int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
107 u16 pcifunc, u16 id);
108 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
109 const char *otx2_mbox_id2name(u16 id);
110 static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
113 return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
116 /* Mailbox message types */
117 #define MBOX_MSG_MASK 0xFFFF
118 #define MBOX_MSG_INVALID 0xFFFE
119 #define MBOX_MSG_MAX 0xFFFF
121 #define MBOX_MESSAGES \
122 /* Generic mbox IDs (range 0x000 - 0x1FF) */ \
123 M(READY, 0x001, msg_req, ready_msg_rsp) \
124 M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
125 M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
126 M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
127 /* CGX mbox IDs (range 0x200 - 0x3FF) */ \
128 M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
129 M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
130 M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
131 M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
132 cgx_mac_addr_set_or_get) \
133 M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
134 cgx_mac_addr_set_or_get) \
135 M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
136 M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
137 M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
138 M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
139 M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
140 M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
141 M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
142 /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
143 M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
144 M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
145 M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
146 M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
147 /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
148 /* TIM mbox IDs (range 0x800 - 0x9FF) */ \
149 /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
150 /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
151 /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
152 M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
153 M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
154 M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
155 M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
156 M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
157 M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
158 M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
159 M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \
160 M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \
161 M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp)
163 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
164 #define MBOX_UP_CGX_MESSAGES \
165 M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
168 #define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
174 /* Mailbox message formats */
176 #define RVU_DEFAULT_PF_FUNC 0xFFFF
178 /* Generic request msg used for those mbox messages which
179 * don't send any data in the request.
182 struct mbox_msghdr hdr;
185 /* Generic rsponse msg used a ack or response for those mbox
186 * messages which doesn't have a specific rsp msg format.
189 struct mbox_msghdr hdr;
192 struct ready_msg_rsp {
193 struct mbox_msghdr hdr;
194 u16 sclk_feq; /* SCLK frequency */
197 /* Structure for requesting resource provisioning.
198 * 'modify' flag to be used when either requesting more
199 * or to detach partial of a cetain resource type.
200 * Rest of the fields specify how many of what type to
204 struct mbox_msghdr hdr;
214 /* Structure for relinquishing resources.
215 * 'partial' flag to be used when relinquishing all resources
216 * but only of a certain type. If not set, all resources of all
217 * types provisioned to the RVU function will be detached.
220 struct mbox_msghdr hdr;
230 #define MSIX_VECTOR_INVALID 0xFFFF
231 #define MAX_RVU_BLKLF_CNT 256
233 struct msix_offset_rsp {
234 struct mbox_msghdr hdr;
241 u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
242 u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
243 u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
244 u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
247 /* CGX mbox message formats */
249 struct cgx_stats_rsp {
250 struct mbox_msghdr hdr;
251 #define CGX_RX_STATS_COUNT 13
252 #define CGX_TX_STATS_COUNT 18
253 u64 rx_stats[CGX_RX_STATS_COUNT];
254 u64 tx_stats[CGX_TX_STATS_COUNT];
257 /* Structure for requesting the operation for
258 * setting/getting mac address in the CGX interface
260 struct cgx_mac_addr_set_or_get {
261 struct mbox_msghdr hdr;
262 u8 mac_addr[ETH_ALEN];
265 struct cgx_link_user_info {
267 uint64_t full_duplex:1;
268 uint64_t lmac_type_id:4;
269 uint64_t speed:20; /* speed in Mbps */
270 #define LMACTYPE_STR_LEN 16
271 char lmac_type[LMACTYPE_STR_LEN];
274 struct cgx_link_info_msg {
275 struct mbox_msghdr hdr;
276 struct cgx_link_user_info link_info;
279 /* NPA mbox message formats */
281 /* NPA mailbox error codes
285 NPA_AF_ERR_PARAM = -301,
286 NPA_AF_ERR_AQ_FULL = -302,
287 NPA_AF_ERR_AQ_ENQUEUE = -303,
288 NPA_AF_ERR_AF_LF_INVALID = -304,
289 NPA_AF_ERR_AF_LF_ALLOC = -305,
290 NPA_AF_ERR_LF_RESET = -306,
293 /* For NPA LF context alloc and init */
294 struct npa_lf_alloc_req {
295 struct mbox_msghdr hdr;
297 int aura_sz; /* No of auras */
298 u32 nr_pools; /* No of pools */
301 struct npa_lf_alloc_rsp {
302 struct mbox_msghdr hdr;
303 u32 stack_pg_ptrs; /* No of ptrs per stack page */
304 u32 stack_pg_bytes; /* Size of stack page */
305 u16 qints; /* NPA_AF_CONST::QINTS */
308 /* NPA AQ enqueue msg */
309 struct npa_aq_enq_req {
310 struct mbox_msghdr hdr;
315 /* Valid when op == WRITE/INIT and ctype == AURA.
316 * LF fills the pool_id in aura.pool_addr. AF will translate
317 * the pool_id to pool context pointer.
319 struct npa_aura_s aura;
320 /* Valid when op == WRITE/INIT and ctype == POOL */
321 struct npa_pool_s pool;
323 /* Mask data when op == WRITE (1=write, 0=don't write) */
325 /* Valid when op == WRITE and ctype == AURA */
326 struct npa_aura_s aura_mask;
327 /* Valid when op == WRITE and ctype == POOL */
328 struct npa_pool_s pool_mask;
332 struct npa_aq_enq_rsp {
333 struct mbox_msghdr hdr;
335 /* Valid when op == READ and ctype == AURA */
336 struct npa_aura_s aura;
337 /* Valid when op == READ and ctype == POOL */
338 struct npa_pool_s pool;
342 /* Disable all contexts of type 'ctype' */
343 struct hwctx_disable_req {
344 struct mbox_msghdr hdr;
348 /* NIX mailbox error codes
352 NIX_AF_ERR_PARAM = -401,
353 NIX_AF_ERR_AQ_FULL = -402,
354 NIX_AF_ERR_AQ_ENQUEUE = -403,
355 NIX_AF_ERR_AF_LF_INVALID = -404,
356 NIX_AF_ERR_AF_LF_ALLOC = -405,
357 NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
358 NIX_AF_ERR_TLX_INVALID = -407,
359 NIX_AF_ERR_RSS_SIZE_INVALID = -408,
360 NIX_AF_ERR_RSS_GRPS_INVALID = -409,
361 NIX_AF_ERR_FRS_INVALID = -410,
362 NIX_AF_ERR_RX_LINK_INVALID = -411,
363 NIX_AF_INVAL_TXSCHQ_CFG = -412,
364 NIX_AF_SMQ_FLUSH_FAILED = -413,
365 NIX_AF_ERR_LF_RESET = -414,
368 /* For NIX LF context alloc and init */
369 struct nix_lf_alloc_req {
370 struct mbox_msghdr hdr;
372 u32 rq_cnt; /* No of receive queues */
373 u32 sq_cnt; /* No of send queues */
374 u32 cq_cnt; /* No of completion queues */
380 u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
383 struct nix_lf_alloc_rsp {
384 struct mbox_msghdr hdr;
388 u8 rx_chan_cnt; /* total number of RX channels */
389 u8 tx_chan_cnt; /* total number of TX channels */
392 u8 mac_addr[ETH_ALEN];
395 /* NIX AQ enqueue msg */
396 struct nix_aq_enq_req {
397 struct mbox_msghdr hdr;
402 struct nix_rq_ctx_s rq;
403 struct nix_sq_ctx_s sq;
404 struct nix_cq_ctx_s cq;
405 struct nix_rsse_s rss;
406 struct nix_rx_mce_s mce;
409 struct nix_rq_ctx_s rq_mask;
410 struct nix_sq_ctx_s sq_mask;
411 struct nix_cq_ctx_s cq_mask;
412 struct nix_rsse_s rss_mask;
413 struct nix_rx_mce_s mce_mask;
417 struct nix_aq_enq_rsp {
418 struct mbox_msghdr hdr;
420 struct nix_rq_ctx_s rq;
421 struct nix_sq_ctx_s sq;
422 struct nix_cq_ctx_s cq;
423 struct nix_rsse_s rss;
424 struct nix_rx_mce_s mce;
428 /* Tx scheduler/shaper mailbox messages */
430 #define MAX_TXSCHQ_PER_FUNC 128
432 struct nix_txsch_alloc_req {
433 struct mbox_msghdr hdr;
434 /* Scheduler queue count request at each level */
435 u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
436 u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
439 struct nix_txsch_alloc_rsp {
440 struct mbox_msghdr hdr;
441 /* Scheduler queue count allocated at each level */
442 u16 schq_contig[NIX_TXSCH_LVL_CNT];
443 u16 schq[NIX_TXSCH_LVL_CNT];
444 /* Scheduler queue list allocated at each level */
445 u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
446 u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
449 struct nix_txsch_free_req {
450 struct mbox_msghdr hdr;
451 #define TXSCHQ_FREE_ALL BIT_ULL(0)
453 /* Scheduler queue level to be freed */
455 /* List of scheduler queues to be freed */
459 struct nix_txschq_config {
460 struct mbox_msghdr hdr;
461 u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
462 #define TXSCHQ_IDX_SHIFT 16
463 #define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
464 #define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
466 #define MAX_REGS_PER_MBOX_MSG 20
467 u64 reg[MAX_REGS_PER_MBOX_MSG];
468 u64 regval[MAX_REGS_PER_MBOX_MSG];
471 struct nix_vtag_config {
472 struct mbox_msghdr hdr;
474 /* cfg_type is '0' for tx vlan cfg
475 * cfg_type is '1' for rx vlan cfg
479 /* valid when cfg_type is '0' */
481 /* tx vlan0 tag(C-VLAN) */
483 /* tx vlan1 tag(S-VLAN) */
485 /* insert tx vlan tag */
487 /* insert tx double vlan tag */
491 /* valid when cfg_type is '1' */
493 /* rx vtag type index */
497 /* rx vtag capture */
503 struct nix_rss_flowkey_cfg {
504 struct mbox_msghdr hdr;
505 int mcam_index; /* MCAM entry index to modify */
506 u32 flowkey_cfg; /* Flowkey types selected */
507 u8 group; /* RSS context or group */