1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #include <linux/pci.h>
15 #include <linux/iommu.h>
19 #include "otx2_txrx.h"
22 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
24 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
27 #define PCI_CFG_REG_BAR_NUM 2
28 #define PCI_MBOX_BAR_NUM 4
32 enum arua_mapped_qtypes {
37 /* NIX LF interrupts range*/
38 #define NIX_LF_QINT_VEC_START 0x00
39 #define NIX_LF_CINT_VEC_START 0x40
40 #define NIX_LF_GINT_VEC 0x80
41 #define NIX_LF_ERR_VEC 0x81
42 #define NIX_LF_POISON_VEC 0x82
44 /* RSS configuration */
45 struct otx2_rss_info {
49 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
50 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
51 u8 key[RSS_HASH_KEY_SIZE];
54 /* NIX (or NPC) RX errors */
65 NPC_ERRLVL_NIX = 0x0F,
68 enum otx2_errcodes_re {
69 /* NPC_ERRLVL_RE errcodes */
71 ERRCODE_FCS_RCV = 0x8,
72 ERRCODE_UNDERSIZE = 0x10,
73 ERRCODE_OVERSIZE = 0x11,
74 ERRCODE_OL2_LEN_MISMATCH = 0x12,
75 /* NPC_ERRLVL_NIX errcodes */
76 ERRCODE_OL3_LEN = 0x10,
77 ERRCODE_OL4_LEN = 0x11,
78 ERRCODE_OL4_CSUM = 0x12,
79 ERRCODE_IL3_LEN = 0x20,
80 ERRCODE_IL4_LEN = 0x21,
81 ERRCODE_IL4_CSUM = 0x22,
84 /* Driver counted stats */
85 struct otx2_drv_stats {
87 atomic_t rx_oversize_errs;
88 atomic_t rx_undersize_errs;
89 atomic_t rx_csum_errs;
91 atomic_t rx_other_errs;
95 struct otx2_mbox mbox;
96 struct work_struct mbox_wrk;
97 struct otx2_mbox mbox_up;
98 struct work_struct mbox_up_wrk;
99 struct otx2_nic *pfvf;
100 void *bbuf_base; /* Bounce buffer for mbox memory */
101 struct mutex lock; /* serialize mailbox access */
102 int num_msgs; /* mbox number of messages */
103 int up_num_msgs; /* mbox_up number of messages */
107 struct pci_dev *pdev;
108 struct otx2_rss_info rss_info;
117 u32 stack_pg_ptrs; /* No of ptrs per stack page */
118 u32 stack_pg_bytes; /* Size of stack page */
122 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
124 /* HW settings, coalescing etc */
133 u8 cint_cnt; /* CQ interrupt count */
134 u16 npa_msixoff; /* Offset of NPA vectors */
135 u16 nix_msixoff; /* Offset of NIX vectors */
137 cpumask_var_t *affinity_mask;
140 struct otx2_drv_stats drv_stats;
144 struct delayed_work pool_refill_work;
149 void __iomem *reg_base;
150 struct net_device *netdev;
153 u16 rbsize; /* Receive buffer size */
155 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
158 struct otx2_qset qset;
160 struct pci_dev *pdev;
165 struct workqueue_struct *mbox_wq;
167 u16 pcifunc; /* RVU PF_FUNC */
168 struct cgx_link_user_info linfo;
171 struct work_struct reset_task;
172 struct refill_work *refill_wrk;
174 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
178 static inline bool is_96xx_A0(struct pci_dev *pdev)
180 return (pdev->revision == 0x00) &&
181 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
184 static inline bool is_96xx_B0(struct pci_dev *pdev)
186 return (pdev->revision == 0x01) &&
187 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
190 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
192 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
193 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
194 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
196 if (is_96xx_A0(pfvf->pdev)) {
197 /* Time based irq coalescing is not supported */
198 pfvf->hw.cq_qcount_wait = 0x0;
200 /* Due to HW issue previous silicons required minimum
201 * 600 unused CQE to avoid CQ overflow.
203 pfvf->hw.rq_skid = 600;
204 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
208 /* Register read/write APIs */
209 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
213 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
215 blkaddr = nic->nix_blkaddr;
218 blkaddr = BLKADDR_NPA;
221 blkaddr = BLKADDR_RVUM;
225 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
226 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
228 return nic->reg_base + offset;
231 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
233 void __iomem *addr = otx2_get_regaddr(nic, offset);
238 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
240 void __iomem *addr = otx2_get_regaddr(nic, offset);
245 /* Mbox bounce buffer APIs */
246 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
248 struct otx2_mbox *otx2_mbox;
249 struct otx2_mbox_dev *mdev;
251 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
252 if (!mbox->bbuf_base)
255 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
256 * prepare all mbox messages in bounce buffer instead of directly
259 otx2_mbox = &mbox->mbox;
260 mdev = &otx2_mbox->dev[0];
261 mdev->mbase = mbox->bbuf_base;
263 otx2_mbox = &mbox->mbox_up;
264 mdev = &otx2_mbox->dev[0];
265 mdev->mbase = mbox->bbuf_base;
269 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
271 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
272 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
273 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
274 struct mbox_hdr *hdr;
277 if (mdev->mbase == hw_mbase)
280 hdr = hw_mbase + mbox->rx_start;
281 msg_size = hdr->msg_size;
283 if (msg_size > mbox->rx_size - msgs_offset)
284 msg_size = mbox->rx_size - msgs_offset;
286 /* Copy mbox messages from mbox memory to bounce buffer */
287 memcpy(mdev->mbase + mbox->rx_start,
288 hw_mbase + mbox->rx_start, msg_size + msgs_offset);
291 static inline void otx2_mbox_lock_init(struct mbox *mbox)
293 mutex_init(&mbox->lock);
296 static inline void otx2_mbox_lock(struct mbox *mbox)
298 mutex_lock(&mbox->lock);
301 static inline void otx2_mbox_unlock(struct mbox *mbox)
303 mutex_unlock(&mbox->lock);
306 /* With the absence of API for 128-bit IO memory access for arm64,
307 * implement required operations at place.
309 #if defined(CONFIG_ARM64)
310 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
312 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
313 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
316 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
320 __asm__ volatile(".cpu generic+lse\n"
321 "ldadd %x[i], %x[r], [%[b]]"
322 : [r]"=r"(result), "+m"(*ptr)
323 : [i]"r"(incr), [b]"r"(ptr)
328 static inline u64 otx2_lmt_flush(uint64_t addr)
332 __asm__ volatile(".cpu generic+lse\n"
333 "ldeor xzr,%x[rf],[%[rs]]"
340 #define otx2_write128(lo, hi, addr)
341 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
342 #define otx2_lmt_flush(addr) ({ 0; })
345 /* Alloc pointer from pool/aura */
346 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
348 u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
349 NPA_LF_AURA_OP_ALLOCX(0));
350 u64 incr = (u64)aura | BIT_ULL(63);
352 return otx2_atomic64_add(incr, ptr);
355 /* Free pointer to a pool/aura */
356 static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
359 otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
360 otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
363 /* Update page ref count */
364 static inline void otx2_get_page(struct otx2_pool *pool)
370 page_ref_add(pool->page, pool->pageref);
375 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
377 if (type == AURA_NIX_SQ)
378 return pfvf->hw.rqpool_cnt + idx;
385 static inline int otx2_sync_mbox_msg(struct mbox *mbox)
389 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
391 otx2_mbox_msg_send(&mbox->mbox, 0);
392 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
396 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
399 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
403 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
405 otx2_mbox_msg_send(&mbox->mbox_up, devid);
406 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
410 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
413 /* Use this API to send mbox msgs in atomic context
414 * where sleeping is not allowed
416 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
420 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
422 otx2_mbox_msg_send(&mbox->mbox, 0);
423 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
427 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
430 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
431 static struct _req_type __maybe_unused \
432 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
434 struct _req_type *req; \
436 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
437 &mbox->mbox, 0, sizeof(struct _req_type), \
438 sizeof(struct _rsp_type)); \
441 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
449 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
451 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
452 struct _req_type *req, \
453 struct _rsp_type *rsp); \
458 /* Time to wait before watchdog kicks off */
459 #define OTX2_TX_TIMEOUT (100 * HZ)
461 #define RVU_PFVF_PF_SHIFT 10
462 #define RVU_PFVF_PF_MASK 0x3F
463 #define RVU_PFVF_FUNC_SHIFT 0
464 #define RVU_PFVF_FUNC_MASK 0x3FF
466 static inline int rvu_get_pf(u16 pcifunc)
468 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
471 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
473 size_t offset, size_t size,
474 enum dma_data_direction dir)
478 iova = dma_map_page_attrs(pfvf->dev, page,
479 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
480 if (unlikely(dma_mapping_error(pfvf->dev, iova)))
481 return (dma_addr_t)NULL;
485 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
486 dma_addr_t addr, size_t size,
487 enum dma_data_direction dir)
489 dma_unmap_page_attrs(pfvf->dev, addr, size,
490 dir, DMA_ATTR_SKIP_CPU_SYNC);
494 void otx2_free_cints(struct otx2_nic *pfvf, int n);
495 void otx2_set_cints_affinity(struct otx2_nic *pfvf);
496 int otx2_set_mac_address(struct net_device *netdev, void *p);
497 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
498 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
499 void otx2_get_mac_from_af(struct net_device *netdev);
500 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
502 /* RVU block related APIs */
503 int otx2_attach_npa_nix(struct otx2_nic *pfvf);
504 int otx2_detach_resources(struct mbox *mbox);
505 int otx2_config_npa(struct otx2_nic *pfvf);
506 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
507 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
508 void otx2_aura_pool_free(struct otx2_nic *pfvf);
509 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
510 void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
511 int otx2_config_nix(struct otx2_nic *pfvf);
512 int otx2_config_nix_queues(struct otx2_nic *pfvf);
513 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
514 int otx2_txsch_alloc(struct otx2_nic *pfvf);
515 int otx2_txschq_stop(struct otx2_nic *pfvf);
516 void otx2_sqb_flush(struct otx2_nic *pfvf);
517 dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
519 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
520 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
521 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
522 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
524 /* RSS configuration APIs*/
525 int otx2_rss_init(struct otx2_nic *pfvf);
528 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
529 struct msix_offset_rsp *rsp);
530 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
531 struct npa_lf_alloc_rsp *rsp);
532 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
533 struct nix_lf_alloc_rsp *rsp);
534 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
535 struct nix_txsch_alloc_rsp *rsp);
537 int otx2_open(struct net_device *netdev);
538 int otx2_stop(struct net_device *netdev);
539 #endif /* OTX2_COMMON_H */