1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell RVU Ethernet driver
4 * Copyright (C) 2020 Marvell.
11 #include <linux/ethtool.h>
12 #include <linux/pci.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/ptp_clock_kernel.h>
16 #include <linux/timecounter.h>
17 #include <linux/soc/marvell/octeontx2/asm.h>
18 #include <net/macsec.h>
19 #include <net/pkt_cls.h>
20 #include <net/devlink.h>
21 #include <linux/time64.h>
22 #include <linux/dim.h>
23 #include <uapi/linux/if_macsec.h>
28 #include "otx2_txrx.h"
29 #include "otx2_devlink.h"
30 #include <rvu_trace.h>
33 /* IPv4 flag more fragment bit */
34 #define IPV4_FLAG_MORE 0x20
37 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
38 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
39 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
41 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
42 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
45 #define PCI_CFG_REG_BAR_NUM 2
46 #define PCI_MBOX_BAR_NUM 4
51 /* Max priority supported for PFC */
52 #define NIX_PF_PFC_PRIO_MAX 8
55 enum arua_mapped_qtypes {
60 /* NIX LF interrupts range*/
61 #define NIX_LF_QINT_VEC_START 0x00
62 #define NIX_LF_CINT_VEC_START 0x40
63 #define NIX_LF_GINT_VEC 0x80
64 #define NIX_LF_ERR_VEC 0x81
65 #define NIX_LF_POISON_VEC 0x82
67 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */
68 #define SEND_CQ_SKID 2000
70 #define OTX2_GET_RX_STATS(reg) \
71 otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
72 #define OTX2_GET_TX_STATS(reg) \
73 otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
75 struct otx2_lmt_info {
79 /* RSS configuration */
81 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
84 struct otx2_rss_info {
88 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
89 u8 key[RSS_HASH_KEY_SIZE];
90 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
93 /* NIX (or NPC) RX errors */
104 NPC_ERRLVL_NIX = 0x0F,
107 enum otx2_errcodes_re {
108 /* NPC_ERRLVL_RE errcodes */
110 ERRCODE_FCS_RCV = 0x8,
111 ERRCODE_UNDERSIZE = 0x10,
112 ERRCODE_OVERSIZE = 0x11,
113 ERRCODE_OL2_LEN_MISMATCH = 0x12,
114 /* NPC_ERRLVL_NIX errcodes */
115 ERRCODE_OL3_LEN = 0x10,
116 ERRCODE_OL4_LEN = 0x11,
117 ERRCODE_OL4_CSUM = 0x12,
118 ERRCODE_IL3_LEN = 0x20,
119 ERRCODE_IL4_LEN = 0x21,
120 ERRCODE_IL4_CSUM = 0x22,
124 enum nix_stat_lf_tx {
134 enum nix_stat_lf_rx {
145 RX_DRP_L3BCAST = 0xa,
146 RX_DRP_L3MCAST = 0xb,
150 struct otx2_dev_stats {
166 /* Driver counted stats */
167 struct otx2_drv_stats {
168 atomic_t rx_fcs_errs;
169 atomic_t rx_oversize_errs;
170 atomic_t rx_undersize_errs;
171 atomic_t rx_csum_errs;
172 atomic_t rx_len_errs;
173 atomic_t rx_other_errs;
177 struct otx2_mbox mbox;
178 struct work_struct mbox_wrk;
179 struct otx2_mbox mbox_up;
180 struct work_struct mbox_up_wrk;
181 struct otx2_nic *pfvf;
182 void *bbuf_base; /* Bounce buffer for mbox memory */
183 struct mutex lock; /* serialize mailbox access */
184 int num_msgs; /* mbox number of messages */
185 int up_num_msgs; /* mbox_up number of messages */
188 /* Egress rate limiting definitions */
189 #define MAX_BURST_EXPONENT 0x0FULL
190 #define MAX_BURST_MANTISSA 0xFFULL
191 #define MAX_BURST_SIZE 130816ULL
192 #define MAX_RATE_DIVIDER_EXPONENT 12ULL
193 #define MAX_RATE_EXPONENT 0x0FULL
194 #define MAX_RATE_MANTISSA 0xFFULL
196 /* Bitfields in NIX_TLX_PIR register */
197 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
198 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
199 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
200 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
201 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
204 struct pci_dev *pdev;
205 struct otx2_rss_info rss_info;
210 u16 non_qos_queues; /* tx queues plus xdp queues */
216 #define OTX2_DEFAULT_RBUF_LEN 2048
221 u32 stack_pg_ptrs; /* No of ptrs per stack page */
222 u32 stack_pg_bytes; /* Size of stack page */
226 u8 txschq_link_cfg_lvl;
227 u8 txschq_aggr_lvl_rr_prio;
228 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
229 u16 matchall_ipolicer;
233 /* HW settings, coalescing etc */
251 u8 cint_cnt; /* CQ interrupt count */
252 u16 npa_msixoff; /* Offset of NPA vectors */
253 u16 nix_msixoff; /* Offset of NIX vectors */
255 cpumask_var_t *affinity_mask;
258 struct otx2_dev_stats dev_stats;
259 struct otx2_drv_stats drv_stats;
260 u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
261 u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
262 u64 cgx_fec_corr_blks;
263 u64 cgx_fec_uncorr_blks;
264 u8 cgx_links; /* No. of CGX links present in HW */
265 u8 lbk_links; /* No. of LBK links present in HW */
266 u8 tx_link; /* Transmit channel link number */
269 #define CN10K_LMTST 2
271 #define CN10K_PTP_ONESTEP 4
272 #define CN10K_HW_MACSEC 5
273 #define QOS_CIR_PIR_SUPPORT 6
274 unsigned long cap_flag;
276 #define LMT_LINE_SIZE 128
277 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
279 struct otx2_lmt_info __percpu *lmt_info;
287 struct otx2_vf_config {
289 struct delayed_work link_event_work;
290 bool intf_down; /* interface was either configured or not */
298 struct work_struct work;
303 struct delayed_work pool_refill_work;
305 struct napi_struct *napi;
308 /* PTPv2 originTimestamp structure */
309 struct ptpv2_tstamp {
310 __be16 seconds_msb; /* 16 bits + */
311 __be32 seconds_lsb; /* 32 bits = 48 bits*/
316 struct ptp_clock_info ptp_info;
317 struct ptp_clock *ptp_clock;
318 struct otx2_nic *nic;
320 struct cyclecounter cycle_counter;
321 struct timecounter time_counter;
323 struct delayed_work extts_work;
327 struct ptp_pin_desc extts_config;
328 u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
329 u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
330 u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp);
331 struct delayed_work synctstamp_work;
336 #define OTX2_HW_TIMESTAMP_LEN 8
338 struct otx2_mac_table {
344 struct otx2_flow_config {
348 #define OTX2_DEFAULT_FLOWCOUNT 16
349 #define OTX2_MAX_UNICAST_FLOWS 8
350 #define OTX2_MAX_VLAN_FLOWS 1
351 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
352 #define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
353 OTX2_MAX_UNICAST_FLOWS + \
358 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
359 #define OTX2_VF_VLAN_RX_INDEX 0
360 #define OTX2_VF_VLAN_TX_INDEX 1
361 u32 *bmap_to_dmacindex;
362 unsigned long *dmacflt_bmap;
363 struct list_head flow_list;
364 u32 dmacflt_max_flows;
366 struct list_head flow_list_tc;
371 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
372 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
374 int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
375 void (*aura_freeptr)(void *dev, int aura, u64 buf);
378 #define CN10K_MCS_SA_PER_SC 4
380 /* Stats which need to be accumulated in software because
381 * of shared counters in hardware.
383 struct cn10k_txsc_stats {
387 u64 InPktsUnknownSCI;
392 struct cn10k_rxsc_stats {
393 u64 InOctetsValidated;
394 u64 InOctetsDecrypted;
401 u64 InPktsNotUsingSA;
405 struct cn10k_mcs_txsc {
406 struct macsec_secy *sw_secy;
407 struct cn10k_txsc_stats stats;
408 struct list_head entry;
409 enum macsec_validation_type last_validate_frames;
410 bool last_replay_protect;
415 u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
417 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
419 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN];
420 ssci_t ssci[CN10K_MCS_SA_PER_SC];
421 bool vlan_dev; /* macsec running on VLAN ? */
424 struct cn10k_mcs_rxsc {
425 struct macsec_secy *sw_secy;
426 struct macsec_rx_sc *sw_rxsc;
427 struct cn10k_rxsc_stats stats;
428 struct list_head entry;
431 u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
433 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
434 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN];
435 ssci_t ssci[CN10K_MCS_SA_PER_SC];
438 struct cn10k_mcs_cfg {
439 struct list_head txsc_list;
440 struct list_head rxsc_list;
444 void __iomem *reg_base;
445 struct net_device *netdev;
446 struct dev_hw_ops *hw_ops;
449 u16 rbsize; /* Receive buffer size */
451 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
452 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
453 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
454 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
455 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
456 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
457 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
458 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
459 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
460 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
461 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
462 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
463 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
464 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
465 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
466 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
467 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
471 struct bpf_prog *xdp_prog;
472 struct otx2_qset qset;
474 struct pci_dev *pdev;
479 struct mbox *mbox_pfvf;
480 struct workqueue_struct *mbox_wq;
481 struct workqueue_struct *mbox_pfvf_wq;
484 u16 pcifunc; /* RVU PF_FUNC */
485 u16 bpid[NIX_MAX_BPID_CHAN];
486 struct otx2_vf_config *vf_configs;
487 struct cgx_link_user_info linfo;
490 struct otx2_flow_config *flow_cfg;
491 struct otx2_mac_table *mac_table;
494 struct work_struct reset_task;
495 struct workqueue_struct *flr_wq;
496 struct flr_work *flr_wrk;
497 struct refill_work *refill_wrk;
498 struct workqueue_struct *otx2_wq;
499 struct work_struct rx_mode_work;
504 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
506 /* LMTST Lines info */
507 struct qmem *dync_lmt;
512 struct otx2_ptp *ptp;
513 struct hwtstamp_config tstamp;
515 unsigned long rq_bmap;
518 struct otx2_devlink *dl;
522 u8 *queue_to_pfc_map;
523 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
524 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
529 /* napi event count. It is needed for adaptive irq coalescing. */
532 #if IS_ENABLED(CONFIG_MACSEC)
533 struct cn10k_mcs_cfg *macsec_cfg;
537 static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
539 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
542 static inline bool is_96xx_A0(struct pci_dev *pdev)
544 return (pdev->revision == 0x00) &&
545 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
548 static inline bool is_96xx_B0(struct pci_dev *pdev)
550 return (pdev->revision == 0x01) &&
551 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
554 /* REVID for PCIe devices.
555 * Bits 0..1: minor pass, bit 3..2: major pass
558 #define PCI_REVISION_ID_96XX 0x00
559 #define PCI_REVISION_ID_95XX 0x10
560 #define PCI_REVISION_ID_95XXN 0x20
561 #define PCI_REVISION_ID_98XX 0x30
562 #define PCI_REVISION_ID_95XXMM 0x40
563 #define PCI_REVISION_ID_95XXO 0xE0
565 static inline bool is_dev_otx2(struct pci_dev *pdev)
567 u8 midr = pdev->revision & 0xF0;
569 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
570 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
571 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
574 static inline bool is_dev_cn10kb(struct pci_dev *pdev)
576 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
579 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
581 struct otx2_hw *hw = &pfvf->hw;
583 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
584 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
585 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
587 __set_bit(HW_TSO, &hw->cap_flag);
589 if (is_96xx_A0(pfvf->pdev)) {
590 __clear_bit(HW_TSO, &hw->cap_flag);
592 /* Time based irq coalescing is not supported */
593 pfvf->hw.cq_qcount_wait = 0x0;
595 /* Due to HW issue previous silicons required minimum
596 * 600 unused CQE to avoid CQ overflow.
598 pfvf->hw.rq_skid = 600;
599 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
601 if (is_96xx_B0(pfvf->pdev))
602 __clear_bit(HW_TSO, &hw->cap_flag);
604 if (!is_dev_otx2(pfvf->pdev)) {
605 __set_bit(CN10K_MBOX, &hw->cap_flag);
606 __set_bit(CN10K_LMTST, &hw->cap_flag);
607 __set_bit(CN10K_RPM, &hw->cap_flag);
608 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
609 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
612 if (is_dev_cn10kb(pfvf->pdev))
613 __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
616 /* Register read/write APIs */
617 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
621 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
623 blkaddr = nic->nix_blkaddr;
626 blkaddr = BLKADDR_NPA;
629 blkaddr = BLKADDR_RVUM;
633 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
634 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
636 return nic->reg_base + offset;
639 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
641 void __iomem *addr = otx2_get_regaddr(nic, offset);
646 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
648 void __iomem *addr = otx2_get_regaddr(nic, offset);
653 /* Mbox bounce buffer APIs */
654 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
656 struct otx2_mbox *otx2_mbox;
657 struct otx2_mbox_dev *mdev;
659 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
660 if (!mbox->bbuf_base)
663 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
664 * prepare all mbox messages in bounce buffer instead of directly
667 otx2_mbox = &mbox->mbox;
668 mdev = &otx2_mbox->dev[0];
669 mdev->mbase = mbox->bbuf_base;
671 otx2_mbox = &mbox->mbox_up;
672 mdev = &otx2_mbox->dev[0];
673 mdev->mbase = mbox->bbuf_base;
677 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
679 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
680 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
681 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
682 struct mbox_hdr *hdr;
685 if (mdev->mbase == hw_mbase)
688 hdr = hw_mbase + mbox->rx_start;
689 msg_size = hdr->msg_size;
691 if (msg_size > mbox->rx_size - msgs_offset)
692 msg_size = mbox->rx_size - msgs_offset;
694 /* Copy mbox messages from mbox memory to bounce buffer */
695 memcpy(mdev->mbase + mbox->rx_start,
696 hw_mbase + mbox->rx_start, msg_size + msgs_offset);
699 /* With the absence of API for 128-bit IO memory access for arm64,
700 * implement required operations at place.
702 #if defined(CONFIG_ARM64)
703 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
705 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
706 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
709 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
713 __asm__ volatile(".cpu generic+lse\n"
714 "ldadd %x[i], %x[r], [%[b]]"
715 : [r]"=r"(result), "+m"(*ptr)
716 : [i]"r"(incr), [b]"r"(ptr)
722 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
723 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
726 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
727 u64 *ptrs, u64 num_ptrs)
729 struct otx2_lmt_info *lmt_info;
730 u64 size = 0, count_eot = 0;
731 u64 tar_addr, val = 0;
733 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
734 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
735 /* LMTID is same as AURA Id */
736 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
737 /* Set if [127:64] of last 128bit word has a valid pointer */
738 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
739 /* Set AURA ID to free pointer */
740 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
741 /* Target address for LMTST flush tells HW how many 128bit
742 * words are valid from NPA_LF_AURA_BATCH_FREE0.
744 * tar_addr[6:4] is LMTST size-1 in units of 128b.
747 size = (sizeof(u64) * num_ptrs) / 16;
750 tar_addr |= ((size - 1) & 0x7) << 4;
753 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
754 /* Perform LMTST flush */
755 cn10k_lmt_flush(val, tar_addr);
758 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
760 struct otx2_nic *pfvf = dev;
765 /* Free only one buffer at time during init and teardown */
766 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
770 /* Alloc pointer from pool/aura */
771 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
773 u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
774 u64 incr = (u64)aura | BIT_ULL(63);
776 return otx2_atomic64_add(incr, ptr);
779 /* Free pointer to a pool/aura */
780 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
782 struct otx2_nic *pfvf = dev;
783 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
785 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
788 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
790 if (type == AURA_NIX_SQ)
791 return pfvf->hw.rqpool_cnt + idx;
798 static inline int otx2_sync_mbox_msg(struct mbox *mbox)
802 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
804 otx2_mbox_msg_send(&mbox->mbox, 0);
805 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
809 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
812 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
816 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
818 otx2_mbox_msg_send(&mbox->mbox_up, devid);
819 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
823 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
826 /* Use this API to send mbox msgs in atomic context
827 * where sleeping is not allowed
829 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
833 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
835 otx2_mbox_msg_send(&mbox->mbox, 0);
836 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
840 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
843 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
844 static struct _req_type __maybe_unused \
845 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
847 struct _req_type *req; \
849 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
850 &mbox->mbox, 0, sizeof(struct _req_type), \
851 sizeof(struct _rsp_type)); \
854 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
856 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
863 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
865 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
866 struct _req_type *req, \
867 struct _rsp_type *rsp); \
873 /* Time to wait before watchdog kicks off */
874 #define OTX2_TX_TIMEOUT (100 * HZ)
876 #define RVU_PFVF_PF_SHIFT 10
877 #define RVU_PFVF_PF_MASK 0x3F
878 #define RVU_PFVF_FUNC_SHIFT 0
879 #define RVU_PFVF_FUNC_MASK 0x3FF
881 static inline bool is_otx2_vf(u16 pcifunc)
883 return !!(pcifunc & RVU_PFVF_FUNC_MASK);
886 static inline int rvu_get_pf(u16 pcifunc)
888 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
891 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
893 size_t offset, size_t size,
894 enum dma_data_direction dir)
898 iova = dma_map_page_attrs(pfvf->dev, page,
899 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
900 if (unlikely(dma_mapping_error(pfvf->dev, iova)))
901 return (dma_addr_t)NULL;
905 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
906 dma_addr_t addr, size_t size,
907 enum dma_data_direction dir)
909 dma_unmap_page_attrs(pfvf->dev, addr, size,
910 dir, DMA_ATTR_SKIP_CPU_SYNC);
913 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
917 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
918 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
920 /* check if qidx falls under QOS queues */
921 if (qidx >= pfvf->hw.non_qos_queues)
922 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
924 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
929 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf)
931 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues;
934 static inline u64 otx2_convert_rate(u64 rate)
938 /* Convert bytes per second to Mbps */
939 converted_rate = rate * 8;
940 converted_rate = max_t(u64, converted_rate / 1000000, 1);
942 return converted_rate;
945 static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf)
947 /* return here if MCAM entries not allocated */
951 return pfvf->flow_cfg->nr_flows;
955 void otx2_free_cints(struct otx2_nic *pfvf, int n);
956 void otx2_set_cints_affinity(struct otx2_nic *pfvf);
957 int otx2_set_mac_address(struct net_device *netdev, void *p);
958 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
959 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
960 void otx2_get_mac_from_af(struct net_device *netdev);
961 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
962 int otx2_config_pause_frm(struct otx2_nic *pfvf);
963 void otx2_setup_segmentation(struct otx2_nic *pfvf);
965 /* RVU block related APIs */
966 int otx2_attach_npa_nix(struct otx2_nic *pfvf);
967 int otx2_detach_resources(struct mbox *mbox);
968 int otx2_config_npa(struct otx2_nic *pfvf);
969 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
970 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
971 void otx2_aura_pool_free(struct otx2_nic *pfvf);
972 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
973 void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
974 int otx2_config_nix(struct otx2_nic *pfvf);
975 int otx2_config_nix_queues(struct otx2_nic *pfvf);
976 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
977 int otx2_txsch_alloc(struct otx2_nic *pfvf);
978 void otx2_txschq_stop(struct otx2_nic *pfvf);
979 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
980 void otx2_free_pending_sqe(struct otx2_nic *pfvf);
981 void otx2_sqb_flush(struct otx2_nic *pfvf);
982 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
984 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
985 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
986 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
987 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
988 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
989 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
990 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
991 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
992 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
994 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
995 int stack_pages, int numptrs, int buf_size, int type);
996 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
997 int pool_id, int numptrs);
999 /* RSS configuration APIs*/
1000 int otx2_rss_init(struct otx2_nic *pfvf);
1001 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
1002 void otx2_set_rss_key(struct otx2_nic *pfvf);
1003 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
1006 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
1007 struct msix_offset_rsp *rsp);
1008 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
1009 struct npa_lf_alloc_rsp *rsp);
1010 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
1011 struct nix_lf_alloc_rsp *rsp);
1012 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
1013 struct nix_txsch_alloc_rsp *rsp);
1014 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
1015 struct cgx_stats_rsp *rsp);
1016 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
1017 struct cgx_fec_stats_rsp *rsp);
1018 void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
1019 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
1020 struct nix_bp_cfg_rsp *rsp);
1022 /* Device stats APIs */
1023 void otx2_get_dev_stats(struct otx2_nic *pfvf);
1024 void otx2_get_stats64(struct net_device *netdev,
1025 struct rtnl_link_stats64 *stats);
1026 void otx2_update_lmac_stats(struct otx2_nic *pfvf);
1027 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
1028 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
1029 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
1030 void otx2_set_ethtool_ops(struct net_device *netdev);
1031 void otx2vf_set_ethtool_ops(struct net_device *netdev);
1033 int otx2_open(struct net_device *netdev);
1034 int otx2_stop(struct net_device *netdev);
1035 int otx2_set_real_num_queues(struct net_device *netdev,
1036 int tx_queues, int rx_queues);
1037 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
1038 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
1040 /* MCAM filter related APIs */
1041 int otx2_mcam_flow_init(struct otx2_nic *pf);
1042 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
1043 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
1044 void otx2_mcam_flow_del(struct otx2_nic *pf);
1045 int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
1046 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
1047 int otx2_get_flow(struct otx2_nic *pfvf,
1048 struct ethtool_rxnfc *nfc, u32 location);
1049 int otx2_get_all_flows(struct otx2_nic *pfvf,
1050 struct ethtool_rxnfc *nfc, u32 *rule_locs);
1051 int otx2_add_flow(struct otx2_nic *pfvf,
1052 struct ethtool_rxnfc *nfc);
1053 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
1054 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
1055 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
1056 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
1057 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
1058 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
1059 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
1060 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
1061 u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
1062 int otx2_handle_ntuple_tc_features(struct net_device *netdev,
1063 netdev_features_t features);
1064 int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
1065 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
1066 u64 iova, int size);
1069 int otx2_init_tc(struct otx2_nic *nic);
1070 void otx2_shutdown_tc(struct otx2_nic *nic);
1071 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1073 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
1075 /* CGX/RPM DMAC filters support */
1076 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
1077 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
1078 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
1079 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
1080 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
1081 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
1085 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
1086 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
1087 int otx2_dcbnl_set_ops(struct net_device *dev);
1089 int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
1090 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
1091 int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
1092 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
1095 #if IS_ENABLED(CONFIG_MACSEC)
1096 /* MACSEC offload support */
1097 int cn10k_mcs_init(struct otx2_nic *pfvf);
1098 void cn10k_mcs_free(struct otx2_nic *pfvf);
1099 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
1101 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
1102 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
1103 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
1104 struct mcs_intr_info *event)
1106 #endif /* CONFIG_MACSEC */
1109 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs)
1111 struct otx2_hw *hw = &pfvf->hw;
1113 hw->tc_tx_queues = qos_txqs;
1114 INIT_LIST_HEAD(&pfvf->qos.qos_tree);
1115 mutex_init(&pfvf->qos.qos_lock);
1118 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf)
1120 mutex_destroy(&pfvf->qos.qos_lock);
1123 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
1124 struct net_device *sb_dev);
1125 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
1126 void otx2_qos_config_txschq(struct otx2_nic *pfvf);
1127 void otx2_clean_qos_queues(struct otx2_nic *pfvf);
1128 #endif /* OTX2_COMMON_H */