1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <net/page_pool/helpers.h>
56 #include <linux/align.h>
57 #include <net/netdev_queues.h>
61 #include "bnxt_hwrm.h"
63 #include "bnxt_sriov.h"
64 #include "bnxt_ethtool.h"
70 #include "bnxt_devlink.h"
71 #include "bnxt_debugfs.h"
72 #include "bnxt_hwmon.h"
74 #define BNXT_TX_TIMEOUT (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
85 #define BNXT_TX_PUSH_THRESH 164
87 /* indexed by enum board_idx */
91 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
92 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
93 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
94 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
95 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
96 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
97 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
98 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
99 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
100 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
101 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
102 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
103 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
104 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
105 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
106 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
108 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
109 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
110 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
111 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
112 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
113 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
114 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
115 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
116 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
117 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
118 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
119 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
120 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
121 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
123 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
124 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
125 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
126 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
127 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
128 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
129 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
130 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
132 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
133 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
135 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
136 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
137 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
138 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
139 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
142 static const struct pci_device_id bnxt_pci_tbl[] = {
143 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
144 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
145 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
146 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
147 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
148 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
149 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
150 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
151 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
152 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
153 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
154 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
155 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
156 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
157 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
158 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
159 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
160 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
161 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
162 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
163 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
164 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
165 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
166 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
167 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
168 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
170 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
171 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
174 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
176 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
177 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
178 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
179 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
180 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
181 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
182 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
183 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
184 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
185 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
186 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
192 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
193 #ifdef CONFIG_BNXT_SRIOV
194 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
195 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
196 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
197 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
198 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
199 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
200 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
201 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
202 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
204 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
205 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
207 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
210 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
211 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
212 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
213 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
214 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
219 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
221 static const u16 bnxt_vf_req_snif[] = {
225 HWRM_CFA_L2_FILTER_ALLOC,
228 static const u16 bnxt_async_events_arr[] = {
229 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
230 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
231 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
232 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
233 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
234 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
235 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
236 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
237 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
238 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
239 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
240 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
241 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
242 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
243 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
244 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
247 static struct workqueue_struct *bnxt_pf_wq;
249 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
250 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
251 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
253 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
260 .src = BNXT_IPV6_MASK_NONE,
261 .dst = BNXT_IPV6_MASK_NONE,
266 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
268 .src = cpu_to_be16(0xffff),
269 .dst = cpu_to_be16(0xffff),
273 .src = BNXT_IPV6_MASK_ALL,
274 .dst = BNXT_IPV6_MASK_ALL,
279 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
281 .src = cpu_to_be16(0xffff),
282 .dst = cpu_to_be16(0xffff),
286 .src = cpu_to_be32(0xffffffff),
287 .dst = cpu_to_be32(0xffffffff),
292 static bool bnxt_vf_pciid(enum board_idx idx)
294 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
295 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
296 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
297 idx == NETXTREME_E_P5_VF_HV);
300 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
301 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
302 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
304 #define BNXT_CP_DB_IRQ_DIS(db) \
305 writel(DB_CP_IRQ_DIS_FLAGS, db)
307 #define BNXT_DB_CQ(db, idx) \
308 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
310 #define BNXT_DB_NQ_P5(db, idx) \
311 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
314 #define BNXT_DB_NQ_P7(db, idx) \
315 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
316 DB_RING_IDX(db, idx), (db)->doorbell)
318 #define BNXT_DB_CQ_ARM(db, idx) \
319 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
321 #define BNXT_DB_NQ_ARM_P5(db, idx) \
322 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
323 DB_RING_IDX(db, idx), (db)->doorbell)
325 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
327 if (bp->flags & BNXT_FLAG_CHIP_P7)
328 BNXT_DB_NQ_P7(db, idx);
329 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
330 BNXT_DB_NQ_P5(db, idx);
335 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
337 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
338 BNXT_DB_NQ_ARM_P5(db, idx);
340 BNXT_DB_CQ_ARM(db, idx);
343 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
345 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
346 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
347 DB_RING_IDX(db, idx), db->doorbell);
352 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
354 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
358 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
360 schedule_delayed_work(&bp->fw_reset_task, delay);
363 static void __bnxt_queue_sp_work(struct bnxt *bp)
366 queue_work(bnxt_pf_wq, &bp->sp_task);
368 schedule_work(&bp->sp_task);
371 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
373 set_bit(event, &bp->sp_event);
374 __bnxt_queue_sp_work(bp);
377 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
379 if (!rxr->bnapi->in_reset) {
380 rxr->bnapi->in_reset = true;
381 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
382 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
384 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
385 __bnxt_queue_sp_work(bp);
387 rxr->rx_next_cons = 0xffff;
390 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
393 struct bnxt_napi *bnapi = txr->bnapi;
398 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
399 txr->txq_index, txr->tx_hw_cons,
400 txr->tx_cons, txr->tx_prod, curr);
403 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
406 const u16 bnxt_lhint_arr[] = {
407 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
408 TX_BD_FLAGS_LHINT_512_TO_1023,
409 TX_BD_FLAGS_LHINT_1024_TO_2047,
410 TX_BD_FLAGS_LHINT_1024_TO_2047,
411 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
412 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
413 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
414 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
415 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
416 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
417 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
418 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
419 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
420 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
421 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
422 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
423 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
424 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
425 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
428 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
430 struct metadata_dst *md_dst = skb_metadata_dst(skb);
432 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
435 return md_dst->u.port_info.port_id;
438 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
441 /* Sync BD data before updating doorbell */
443 bnxt_db_write(bp, &txr->tx_db, prod);
444 txr->kick_pending = 0;
447 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
449 struct bnxt *bp = netdev_priv(dev);
450 struct tx_bd *txbd, *txbd0;
451 struct tx_bd_ext *txbd1;
452 struct netdev_queue *txq;
455 unsigned int length, pad = 0;
456 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
458 struct pci_dev *pdev = bp->pdev;
459 struct bnxt_tx_ring_info *txr;
460 struct bnxt_sw_tx_bd *tx_buf;
463 i = skb_get_queue_mapping(skb);
464 if (unlikely(i >= bp->tx_nr_rings)) {
465 dev_kfree_skb_any(skb);
466 dev_core_stats_tx_dropped_inc(dev);
470 txq = netdev_get_tx_queue(dev, i);
471 txr = &bp->tx_ring[bp->tx_ring_map[i]];
474 free_size = bnxt_tx_avail(bp, txr);
475 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
476 /* We must have raced with NAPI cleanup */
477 if (net_ratelimit() && txr->kick_pending)
478 netif_warn(bp, tx_err, dev,
479 "bnxt: ring busy w/ flush pending!\n");
480 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
482 return NETDEV_TX_BUSY;
485 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
489 len = skb_headlen(skb);
490 last_frag = skb_shinfo(skb)->nr_frags;
492 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
494 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
496 tx_buf->nr_frags = last_frag;
499 cfa_action = bnxt_xmit_get_cfa_action(skb);
500 if (skb_vlan_tag_present(skb)) {
501 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
502 skb_vlan_tag_get(skb);
503 /* Currently supports 8021Q, 8021AD vlan offloads
504 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
506 if (skb->vlan_proto == htons(ETH_P_8021Q))
507 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
510 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
511 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
513 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
514 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
515 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
518 ptp->tx_hdr_off += VLAN_HLEN;
519 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
520 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
522 atomic_inc(&bp->ptp_cfg->tx_avail);
527 if (unlikely(skb->no_fcs))
528 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
530 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
532 struct tx_push_buffer *tx_push_buf = txr->tx_push;
533 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
534 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
535 void __iomem *db = txr->tx_db.doorbell;
536 void *pdata = tx_push_buf->data;
540 /* Set COAL_NOW to be ready quickly for the next push */
541 tx_push->tx_bd_len_flags_type =
542 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
543 TX_BD_TYPE_LONG_TX_BD |
544 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
545 TX_BD_FLAGS_COAL_NOW |
546 TX_BD_FLAGS_PACKET_END |
547 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
549 if (skb->ip_summed == CHECKSUM_PARTIAL)
550 tx_push1->tx_bd_hsize_lflags =
551 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
553 tx_push1->tx_bd_hsize_lflags = 0;
555 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
556 tx_push1->tx_bd_cfa_action =
557 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
559 end = pdata + length;
560 end = PTR_ALIGN(end, 8) - 1;
563 skb_copy_from_linear_data(skb, pdata, len);
565 for (j = 0; j < last_frag; j++) {
566 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
569 fptr = skb_frag_address_safe(frag);
573 memcpy(pdata, fptr, skb_frag_size(frag));
574 pdata += skb_frag_size(frag);
577 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
578 txbd->tx_bd_haddr = txr->data_mapping;
579 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
580 prod = NEXT_TX(prod);
581 tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
582 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
583 memcpy(txbd, tx_push1, sizeof(*txbd));
584 prod = NEXT_TX(prod);
586 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
587 DB_RING_IDX(&txr->tx_db, prod));
588 WRITE_ONCE(txr->tx_prod, prod);
591 netdev_tx_sent_queue(txq, skb->len);
592 wmb(); /* Sync is_push and byte queue before pushing data */
594 push_len = (length + sizeof(*tx_push) + 7) / 8;
596 __iowrite64_copy(db, tx_push_buf, 16);
597 __iowrite32_copy(db + 4, tx_push_buf + 1,
598 (push_len - 16) << 1);
600 __iowrite64_copy(db, tx_push_buf, push_len);
607 if (length < BNXT_MIN_PKT_SIZE) {
608 pad = BNXT_MIN_PKT_SIZE - length;
609 if (skb_pad(skb, pad))
610 /* SKB already freed. */
611 goto tx_kick_pending;
612 length = BNXT_MIN_PKT_SIZE;
615 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
617 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
620 dma_unmap_addr_set(tx_buf, mapping, mapping);
621 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
622 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
624 txbd->tx_bd_haddr = cpu_to_le64(mapping);
625 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
627 prod = NEXT_TX(prod);
628 txbd1 = (struct tx_bd_ext *)
629 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
631 txbd1->tx_bd_hsize_lflags = lflags;
632 if (skb_is_gso(skb)) {
633 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
636 if (skb->encapsulation) {
638 hdr_len = skb_inner_transport_offset(skb) +
639 sizeof(struct udphdr);
641 hdr_len = skb_inner_tcp_all_headers(skb);
642 } else if (udp_gso) {
643 hdr_len = skb_transport_offset(skb) +
644 sizeof(struct udphdr);
646 hdr_len = skb_tcp_all_headers(skb);
649 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
651 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
652 length = skb_shinfo(skb)->gso_size;
653 txbd1->tx_bd_mss = cpu_to_le32(length);
655 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
656 txbd1->tx_bd_hsize_lflags |=
657 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
658 txbd1->tx_bd_mss = 0;
662 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
663 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
668 flags |= bnxt_lhint_arr[length];
669 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
671 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
672 txbd1->tx_bd_cfa_action =
673 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
675 for (i = 0; i < last_frag; i++) {
676 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
678 prod = NEXT_TX(prod);
679 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
681 len = skb_frag_size(frag);
682 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
685 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
688 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
689 dma_unmap_addr_set(tx_buf, mapping, mapping);
691 txbd->tx_bd_haddr = cpu_to_le64(mapping);
693 flags = len << TX_BD_LEN_SHIFT;
694 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
698 txbd->tx_bd_len_flags_type =
699 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
700 TX_BD_FLAGS_PACKET_END);
702 netdev_tx_sent_queue(txq, skb->len);
704 skb_tx_timestamp(skb);
706 prod = NEXT_TX(prod);
707 WRITE_ONCE(txr->tx_prod, prod);
709 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
710 bnxt_txr_db_kick(bp, txr, prod);
712 if (free_size >= bp->tx_wake_thresh)
713 txbd0->tx_bd_len_flags_type |=
714 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
715 txr->kick_pending = 1;
720 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
721 if (netdev_xmit_more() && !tx_buf->is_push) {
722 txbd0->tx_bd_len_flags_type &=
723 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
724 bnxt_txr_db_kick(bp, txr, prod);
727 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
733 if (BNXT_TX_PTP_IS_SET(lflags))
734 atomic_inc(&bp->ptp_cfg->tx_avail);
738 /* start back at beginning and unmap skb */
740 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
741 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
742 skb_headlen(skb), DMA_TO_DEVICE);
743 prod = NEXT_TX(prod);
745 /* unmap remaining mapped pages */
746 for (i = 0; i < last_frag; i++) {
747 prod = NEXT_TX(prod);
748 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
749 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
750 skb_frag_size(&skb_shinfo(skb)->frags[i]),
755 dev_kfree_skb_any(skb);
757 if (txr->kick_pending)
758 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
759 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
760 dev_core_stats_tx_dropped_inc(dev);
764 static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
767 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
768 struct pci_dev *pdev = bp->pdev;
769 u16 hw_cons = txr->tx_hw_cons;
770 unsigned int tx_bytes = 0;
771 u16 cons = txr->tx_cons;
774 while (RING_TX(bp, cons) != hw_cons) {
775 struct bnxt_sw_tx_bd *tx_buf;
779 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
780 cons = NEXT_TX(cons);
784 if (unlikely(!skb)) {
785 bnxt_sched_reset_txr(bp, txr, cons);
790 tx_bytes += skb->len;
792 if (tx_buf->is_push) {
797 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
798 skb_headlen(skb), DMA_TO_DEVICE);
799 last = tx_buf->nr_frags;
801 for (j = 0; j < last; j++) {
802 cons = NEXT_TX(cons);
803 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
806 dma_unmap_addr(tx_buf, mapping),
807 skb_frag_size(&skb_shinfo(skb)->frags[j]),
810 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
811 if (BNXT_CHIP_P5(bp)) {
812 /* PTP worker takes ownership of the skb */
813 if (!bnxt_get_tx_ts_p5(bp, skb))
816 atomic_inc(&bp->ptp_cfg->tx_avail);
821 cons = NEXT_TX(cons);
823 dev_consume_skb_any(skb);
826 WRITE_ONCE(txr->tx_cons, cons);
828 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
829 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
830 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
833 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
835 struct bnxt_tx_ring_info *txr;
838 bnxt_for_each_napi_tx(i, bnapi, txr) {
839 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
840 __bnxt_tx_int(bp, txr, budget);
842 bnapi->events &= ~BNXT_TX_CMP_EVENT;
845 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
846 struct bnxt_rx_ring_info *rxr,
847 unsigned int *offset,
852 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
853 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
856 page = page_pool_dev_alloc_pages(rxr->page_pool);
862 *mapping = page_pool_get_dma_addr(page) + *offset;
866 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
870 struct pci_dev *pdev = bp->pdev;
872 if (gfp == GFP_ATOMIC)
873 data = napi_alloc_frag(bp->rx_buf_size);
875 data = netdev_alloc_frag(bp->rx_buf_size);
879 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
880 bp->rx_buf_use_size, bp->rx_dir,
881 DMA_ATTR_WEAK_ORDERING);
883 if (dma_mapping_error(&pdev->dev, *mapping)) {
890 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
893 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
894 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
897 if (BNXT_RX_PAGE_MODE(bp)) {
900 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
905 mapping += bp->rx_dma_offset;
907 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
909 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
915 rx_buf->data_ptr = data + bp->rx_offset;
917 rx_buf->mapping = mapping;
919 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
923 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
925 u16 prod = rxr->rx_prod;
926 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
927 struct bnxt *bp = rxr->bnapi->bp;
928 struct rx_bd *cons_bd, *prod_bd;
930 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
931 cons_rx_buf = &rxr->rx_buf_ring[cons];
933 prod_rx_buf->data = data;
934 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
936 prod_rx_buf->mapping = cons_rx_buf->mapping;
938 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
939 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
941 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
944 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
946 u16 next, max = rxr->rx_agg_bmap_size;
948 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
950 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
954 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
955 struct bnxt_rx_ring_info *rxr,
959 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
960 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
963 u16 sw_prod = rxr->rx_sw_agg_prod;
964 unsigned int offset = 0;
966 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
971 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
972 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
974 __set_bit(sw_prod, rxr->rx_agg_bmap);
975 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
976 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
978 rx_agg_buf->page = page;
979 rx_agg_buf->offset = offset;
980 rx_agg_buf->mapping = mapping;
981 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
982 rxbd->rx_bd_opaque = sw_prod;
986 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
987 struct bnxt_cp_ring_info *cpr,
988 u16 cp_cons, u16 curr)
990 struct rx_agg_cmp *agg;
992 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
993 agg = (struct rx_agg_cmp *)
994 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
998 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
999 struct bnxt_rx_ring_info *rxr,
1000 u16 agg_id, u16 curr)
1002 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
1004 return &tpa_info->agg_arr[curr];
1007 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
1008 u16 start, u32 agg_bufs, bool tpa)
1010 struct bnxt_napi *bnapi = cpr->bnapi;
1011 struct bnxt *bp = bnapi->bp;
1012 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1013 u16 prod = rxr->rx_agg_prod;
1014 u16 sw_prod = rxr->rx_sw_agg_prod;
1015 bool p5_tpa = false;
1018 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1021 for (i = 0; i < agg_bufs; i++) {
1023 struct rx_agg_cmp *agg;
1024 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
1025 struct rx_bd *prod_bd;
1029 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
1031 agg = bnxt_get_agg(bp, cpr, idx, start + i);
1032 cons = agg->rx_agg_cmp_opaque;
1033 __clear_bit(cons, rxr->rx_agg_bmap);
1035 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
1036 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
1038 __set_bit(sw_prod, rxr->rx_agg_bmap);
1039 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
1040 cons_rx_buf = &rxr->rx_agg_ring[cons];
1042 /* It is possible for sw_prod to be equal to cons, so
1043 * set cons_rx_buf->page to NULL first.
1045 page = cons_rx_buf->page;
1046 cons_rx_buf->page = NULL;
1047 prod_rx_buf->page = page;
1048 prod_rx_buf->offset = cons_rx_buf->offset;
1050 prod_rx_buf->mapping = cons_rx_buf->mapping;
1052 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
1054 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1055 prod_bd->rx_bd_opaque = sw_prod;
1057 prod = NEXT_RX_AGG(prod);
1058 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
1060 rxr->rx_agg_prod = prod;
1061 rxr->rx_sw_agg_prod = sw_prod;
1064 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
1065 struct bnxt_rx_ring_info *rxr,
1066 u16 cons, void *data, u8 *data_ptr,
1067 dma_addr_t dma_addr,
1068 unsigned int offset_and_len)
1070 unsigned int len = offset_and_len & 0xffff;
1071 struct page *page = data;
1072 u16 prod = rxr->rx_prod;
1073 struct sk_buff *skb;
1076 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1077 if (unlikely(err)) {
1078 bnxt_reuse_rx_data(rxr, cons, data);
1081 dma_addr -= bp->rx_dma_offset;
1082 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1084 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1086 page_pool_recycle_direct(rxr->page_pool, page);
1089 skb_mark_for_recycle(skb);
1090 skb_reserve(skb, bp->rx_offset);
1091 __skb_put(skb, len);
1096 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1097 struct bnxt_rx_ring_info *rxr,
1098 u16 cons, void *data, u8 *data_ptr,
1099 dma_addr_t dma_addr,
1100 unsigned int offset_and_len)
1102 unsigned int payload = offset_and_len >> 16;
1103 unsigned int len = offset_and_len & 0xffff;
1105 struct page *page = data;
1106 u16 prod = rxr->rx_prod;
1107 struct sk_buff *skb;
1110 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1111 if (unlikely(err)) {
1112 bnxt_reuse_rx_data(rxr, cons, data);
1115 dma_addr -= bp->rx_dma_offset;
1116 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1119 if (unlikely(!payload))
1120 payload = eth_get_headlen(bp->dev, data_ptr, len);
1122 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1124 page_pool_recycle_direct(rxr->page_pool, page);
1128 skb_mark_for_recycle(skb);
1129 off = (void *)data_ptr - page_address(page);
1130 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1131 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1132 payload + NET_IP_ALIGN);
1134 frag = &skb_shinfo(skb)->frags[0];
1135 skb_frag_size_sub(frag, payload);
1136 skb_frag_off_add(frag, payload);
1137 skb->data_len -= payload;
1138 skb->tail += payload;
1143 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1144 struct bnxt_rx_ring_info *rxr, u16 cons,
1145 void *data, u8 *data_ptr,
1146 dma_addr_t dma_addr,
1147 unsigned int offset_and_len)
1149 u16 prod = rxr->rx_prod;
1150 struct sk_buff *skb;
1153 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1154 if (unlikely(err)) {
1155 bnxt_reuse_rx_data(rxr, cons, data);
1159 skb = napi_build_skb(data, bp->rx_buf_size);
1160 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1161 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1163 skb_free_frag(data);
1167 skb_reserve(skb, bp->rx_offset);
1168 skb_put(skb, offset_and_len & 0xffff);
1172 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1173 struct bnxt_cp_ring_info *cpr,
1174 struct skb_shared_info *shinfo,
1175 u16 idx, u32 agg_bufs, bool tpa,
1176 struct xdp_buff *xdp)
1178 struct bnxt_napi *bnapi = cpr->bnapi;
1179 struct pci_dev *pdev = bp->pdev;
1180 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1181 u16 prod = rxr->rx_agg_prod;
1182 u32 i, total_frag_len = 0;
1183 bool p5_tpa = false;
1185 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
1188 for (i = 0; i < agg_bufs; i++) {
1189 skb_frag_t *frag = &shinfo->frags[i];
1191 struct rx_agg_cmp *agg;
1192 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1197 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1199 agg = bnxt_get_agg(bp, cpr, idx, i);
1200 cons = agg->rx_agg_cmp_opaque;
1201 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1202 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1204 cons_rx_buf = &rxr->rx_agg_ring[cons];
1205 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1206 cons_rx_buf->offset, frag_len);
1207 shinfo->nr_frags = i + 1;
1208 __clear_bit(cons, rxr->rx_agg_bmap);
1210 /* It is possible for bnxt_alloc_rx_page() to allocate
1211 * a sw_prod index that equals the cons index, so we
1212 * need to clear the cons entry now.
1214 mapping = cons_rx_buf->mapping;
1215 page = cons_rx_buf->page;
1216 cons_rx_buf->page = NULL;
1218 if (xdp && page_is_pfmemalloc(page))
1219 xdp_buff_set_frag_pfmemalloc(xdp);
1221 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1223 cons_rx_buf->page = page;
1225 /* Update prod since possibly some pages have been
1226 * allocated already.
1228 rxr->rx_agg_prod = prod;
1229 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1233 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1236 total_frag_len += frag_len;
1237 prod = NEXT_RX_AGG(prod);
1239 rxr->rx_agg_prod = prod;
1240 return total_frag_len;
1243 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1244 struct bnxt_cp_ring_info *cpr,
1245 struct sk_buff *skb, u16 idx,
1246 u32 agg_bufs, bool tpa)
1248 struct skb_shared_info *shinfo = skb_shinfo(skb);
1249 u32 total_frag_len = 0;
1251 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1252 agg_bufs, tpa, NULL);
1253 if (!total_frag_len) {
1254 skb_mark_for_recycle(skb);
1259 skb->data_len += total_frag_len;
1260 skb->len += total_frag_len;
1261 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1265 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1266 struct bnxt_cp_ring_info *cpr,
1267 struct xdp_buff *xdp, u16 idx,
1268 u32 agg_bufs, bool tpa)
1270 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1271 u32 total_frag_len = 0;
1273 if (!xdp_buff_has_frags(xdp))
1274 shinfo->nr_frags = 0;
1276 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1277 idx, agg_bufs, tpa, xdp);
1278 if (total_frag_len) {
1279 xdp_buff_set_frags_flag(xdp);
1280 shinfo->nr_frags = agg_bufs;
1281 shinfo->xdp_frags_size = total_frag_len;
1283 return total_frag_len;
1286 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1287 u8 agg_bufs, u32 *raw_cons)
1290 struct rx_agg_cmp *agg;
1292 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1293 last = RING_CMP(*raw_cons);
1294 agg = (struct rx_agg_cmp *)
1295 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1296 return RX_AGG_CMP_VALID(agg, *raw_cons);
1299 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1303 struct bnxt *bp = bnapi->bp;
1304 struct pci_dev *pdev = bp->pdev;
1305 struct sk_buff *skb;
1307 skb = napi_alloc_skb(&bnapi->napi, len);
1311 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1314 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1315 len + NET_IP_ALIGN);
1317 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1324 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1325 u32 *raw_cons, void *cmp)
1327 struct rx_cmp *rxcmp = cmp;
1328 u32 tmp_raw_cons = *raw_cons;
1329 u8 cmp_type, agg_bufs = 0;
1331 cmp_type = RX_CMP_TYPE(rxcmp);
1333 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1334 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1336 RX_CMP_AGG_BUFS_SHIFT;
1337 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1338 struct rx_tpa_end_cmp *tpa_end = cmp;
1340 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1343 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1347 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1350 *raw_cons = tmp_raw_cons;
1354 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1356 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1357 u16 idx = agg_id & MAX_TPA_P5_MASK;
1359 if (test_bit(idx, map->agg_idx_bmap))
1360 idx = find_first_zero_bit(map->agg_idx_bmap,
1361 BNXT_AGG_IDX_BMAP_SIZE);
1362 __set_bit(idx, map->agg_idx_bmap);
1363 map->agg_id_tbl[agg_id] = idx;
1367 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1369 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1371 __clear_bit(idx, map->agg_idx_bmap);
1374 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1376 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1378 return map->agg_id_tbl[agg_id];
1381 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
1382 struct rx_tpa_start_cmp *tpa_start,
1383 struct rx_tpa_start_cmp_ext *tpa_start1)
1385 tpa_info->cfa_code_valid = 1;
1386 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1387 tpa_info->vlan_valid = 0;
1388 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1389 tpa_info->vlan_valid = 1;
1390 tpa_info->metadata =
1391 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1395 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
1396 struct rx_tpa_start_cmp *tpa_start,
1397 struct rx_tpa_start_cmp_ext *tpa_start1)
1399 tpa_info->vlan_valid = 0;
1400 if (TPA_START_VLAN_VALID(tpa_start)) {
1401 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
1402 u32 vlan_proto = ETH_P_8021Q;
1404 tpa_info->vlan_valid = 1;
1405 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
1406 vlan_proto = ETH_P_8021AD;
1407 tpa_info->metadata = vlan_proto << 16 |
1408 TPA_START_METADATA0_TCI(tpa_start1);
1412 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1413 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
1414 struct rx_tpa_start_cmp_ext *tpa_start1)
1416 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1417 struct bnxt_tpa_info *tpa_info;
1418 u16 cons, prod, agg_id;
1419 struct rx_bd *prod_bd;
1422 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1423 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1424 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1426 agg_id = TPA_START_AGG_ID(tpa_start);
1428 cons = tpa_start->rx_tpa_start_cmp_opaque;
1429 prod = rxr->rx_prod;
1430 cons_rx_buf = &rxr->rx_buf_ring[cons];
1431 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
1432 tpa_info = &rxr->rx_tpa[agg_id];
1434 if (unlikely(cons != rxr->rx_next_cons ||
1435 TPA_START_ERROR(tpa_start))) {
1436 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1437 cons, rxr->rx_next_cons,
1438 TPA_START_ERROR_CODE(tpa_start1));
1439 bnxt_sched_reset_rxr(bp, rxr);
1442 prod_rx_buf->data = tpa_info->data;
1443 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1445 mapping = tpa_info->mapping;
1446 prod_rx_buf->mapping = mapping;
1448 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
1450 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1452 tpa_info->data = cons_rx_buf->data;
1453 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1454 cons_rx_buf->data = NULL;
1455 tpa_info->mapping = cons_rx_buf->mapping;
1458 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1459 RX_TPA_START_CMP_LEN_SHIFT;
1460 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1461 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1462 tpa_info->gso_type = SKB_GSO_TCPV4;
1463 if (TPA_START_IS_IPV6(tpa_start1))
1464 tpa_info->gso_type = SKB_GSO_TCPV6;
1465 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1466 else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
1467 TPA_START_HASH_TYPE(tpa_start) == 3)
1468 tpa_info->gso_type = SKB_GSO_TCPV6;
1469 tpa_info->rss_hash =
1470 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1472 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1473 tpa_info->gso_type = 0;
1474 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1476 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1477 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1478 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
1479 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
1481 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
1482 tpa_info->agg_count = 0;
1484 rxr->rx_prod = NEXT_RX(prod);
1485 cons = RING_RX(bp, NEXT_RX(cons));
1486 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
1487 cons_rx_buf = &rxr->rx_buf_ring[cons];
1489 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1490 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1491 cons_rx_buf->data = NULL;
1494 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1497 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1501 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1503 struct udphdr *uh = NULL;
1505 if (ip_proto == htons(ETH_P_IP)) {
1506 struct iphdr *iph = (struct iphdr *)skb->data;
1508 if (iph->protocol == IPPROTO_UDP)
1509 uh = (struct udphdr *)(iph + 1);
1511 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1513 if (iph->nexthdr == IPPROTO_UDP)
1514 uh = (struct udphdr *)(iph + 1);
1518 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1520 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1525 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1526 int payload_off, int tcp_ts,
1527 struct sk_buff *skb)
1532 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1533 u32 hdr_info = tpa_info->hdr_info;
1534 bool loopback = false;
1536 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1537 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1538 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1540 /* If the packet is an internal loopback packet, the offsets will
1541 * have an extra 4 bytes.
1543 if (inner_mac_off == 4) {
1545 } else if (inner_mac_off > 4) {
1546 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1549 /* We only support inner iPv4/ipv6. If we don't see the
1550 * correct protocol ID, it must be a loopback packet where
1551 * the offsets are off by 4.
1553 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1557 /* internal loopback packet, subtract all offsets by 4 */
1563 nw_off = inner_ip_off - ETH_HLEN;
1564 skb_set_network_header(skb, nw_off);
1565 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1566 struct ipv6hdr *iph = ipv6_hdr(skb);
1568 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1569 len = skb->len - skb_transport_offset(skb);
1571 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1573 struct iphdr *iph = ip_hdr(skb);
1575 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1576 len = skb->len - skb_transport_offset(skb);
1578 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1581 if (inner_mac_off) { /* tunnel */
1582 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1585 bnxt_gro_tunnel(skb, proto);
1591 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1592 int payload_off, int tcp_ts,
1593 struct sk_buff *skb)
1596 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1597 u32 hdr_info = tpa_info->hdr_info;
1598 int iphdr_len, nw_off;
1600 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1601 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1602 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1604 nw_off = inner_ip_off - ETH_HLEN;
1605 skb_set_network_header(skb, nw_off);
1606 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1607 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1608 skb_set_transport_header(skb, nw_off + iphdr_len);
1610 if (inner_mac_off) { /* tunnel */
1611 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1614 bnxt_gro_tunnel(skb, proto);
1620 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1621 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1623 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1624 int payload_off, int tcp_ts,
1625 struct sk_buff *skb)
1629 int len, nw_off, tcp_opt_len = 0;
1634 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1637 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1639 skb_set_network_header(skb, nw_off);
1641 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1642 len = skb->len - skb_transport_offset(skb);
1644 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1645 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1646 struct ipv6hdr *iph;
1648 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1650 skb_set_network_header(skb, nw_off);
1651 iph = ipv6_hdr(skb);
1652 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1653 len = skb->len - skb_transport_offset(skb);
1655 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1657 dev_kfree_skb_any(skb);
1661 if (nw_off) /* tunnel */
1662 bnxt_gro_tunnel(skb, skb->protocol);
1667 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1668 struct bnxt_tpa_info *tpa_info,
1669 struct rx_tpa_end_cmp *tpa_end,
1670 struct rx_tpa_end_cmp_ext *tpa_end1,
1671 struct sk_buff *skb)
1677 segs = TPA_END_TPA_SEGS(tpa_end);
1681 NAPI_GRO_CB(skb)->count = segs;
1682 skb_shinfo(skb)->gso_size =
1683 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1684 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1685 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1686 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1688 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1689 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1691 tcp_gro_complete(skb);
1696 /* Given the cfa_code of a received packet determine which
1697 * netdev (vf-rep or PF) the packet is destined to.
1699 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1701 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1703 /* if vf-rep dev is NULL, the must belongs to the PF */
1704 return dev ? dev : bp->dev;
1707 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1708 struct bnxt_cp_ring_info *cpr,
1710 struct rx_tpa_end_cmp *tpa_end,
1711 struct rx_tpa_end_cmp_ext *tpa_end1,
1714 struct bnxt_napi *bnapi = cpr->bnapi;
1715 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1716 struct net_device *dev = bp->dev;
1717 u8 *data_ptr, agg_bufs;
1719 struct bnxt_tpa_info *tpa_info;
1721 struct sk_buff *skb;
1722 u16 idx = 0, agg_id;
1726 if (unlikely(bnapi->in_reset)) {
1727 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1730 return ERR_PTR(-EBUSY);
1734 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
1735 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1736 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1737 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1738 tpa_info = &rxr->rx_tpa[agg_id];
1739 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1740 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1741 agg_bufs, tpa_info->agg_count);
1742 agg_bufs = tpa_info->agg_count;
1744 tpa_info->agg_count = 0;
1745 *event |= BNXT_AGG_EVENT;
1746 bnxt_free_agg_idx(rxr, agg_id);
1748 gro = !!(bp->flags & BNXT_FLAG_GRO);
1750 agg_id = TPA_END_AGG_ID(tpa_end);
1751 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1752 tpa_info = &rxr->rx_tpa[agg_id];
1753 idx = RING_CMP(*raw_cons);
1755 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1756 return ERR_PTR(-EBUSY);
1758 *event |= BNXT_AGG_EVENT;
1759 idx = NEXT_CMP(idx);
1761 gro = !!TPA_END_GRO(tpa_end);
1763 data = tpa_info->data;
1764 data_ptr = tpa_info->data_ptr;
1766 len = tpa_info->len;
1767 mapping = tpa_info->mapping;
1769 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1770 bnxt_abort_tpa(cpr, idx, agg_bufs);
1771 if (agg_bufs > MAX_SKB_FRAGS)
1772 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1773 agg_bufs, (int)MAX_SKB_FRAGS);
1777 if (len <= bp->rx_copy_thresh) {
1778 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1780 bnxt_abort_tpa(cpr, idx, agg_bufs);
1781 cpr->sw_stats.rx.rx_oom_discards += 1;
1786 dma_addr_t new_mapping;
1788 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1790 bnxt_abort_tpa(cpr, idx, agg_bufs);
1791 cpr->sw_stats.rx.rx_oom_discards += 1;
1795 tpa_info->data = new_data;
1796 tpa_info->data_ptr = new_data + bp->rx_offset;
1797 tpa_info->mapping = new_mapping;
1799 skb = napi_build_skb(data, bp->rx_buf_size);
1800 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1801 bp->rx_buf_use_size, bp->rx_dir,
1802 DMA_ATTR_WEAK_ORDERING);
1805 skb_free_frag(data);
1806 bnxt_abort_tpa(cpr, idx, agg_bufs);
1807 cpr->sw_stats.rx.rx_oom_discards += 1;
1810 skb_reserve(skb, bp->rx_offset);
1815 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1817 /* Page reuse already handled by bnxt_rx_pages(). */
1818 cpr->sw_stats.rx.rx_oom_discards += 1;
1823 if (tpa_info->cfa_code_valid)
1824 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
1825 skb->protocol = eth_type_trans(skb, dev);
1827 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1828 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1830 if (tpa_info->vlan_valid &&
1831 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1832 __be16 vlan_proto = htons(tpa_info->metadata >>
1833 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1834 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1836 if (eth_type_vlan(vlan_proto)) {
1837 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1844 skb_checksum_none_assert(skb);
1845 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1846 skb->ip_summed = CHECKSUM_UNNECESSARY;
1848 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1852 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1857 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1858 struct rx_agg_cmp *rx_agg)
1860 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1861 struct bnxt_tpa_info *tpa_info;
1863 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1864 tpa_info = &rxr->rx_tpa[agg_id];
1865 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1866 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1869 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1870 struct sk_buff *skb)
1872 skb_mark_for_recycle(skb);
1874 if (skb->dev != bp->dev) {
1875 /* this packet belongs to a vf-rep */
1876 bnxt_vf_rep_rx(bp, skb);
1879 skb_record_rx_queue(skb, bnapi->index);
1880 napi_gro_receive(&bnapi->napi, skb);
1883 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
1884 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
1886 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1888 if (BNXT_PTP_RX_TS_VALID(flags))
1890 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
1898 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
1899 struct rx_cmp *rxcmp,
1900 struct rx_cmp_ext *rxcmp1)
1905 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1906 __le32 flags2 = rxcmp1->rx_cmp_flags2;
1909 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
1912 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1913 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1914 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
1915 if (eth_type_vlan(vlan_proto))
1916 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1919 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1920 if (RX_CMP_VLAN_VALID(rxcmp)) {
1921 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
1923 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
1924 vlan_proto = htons(ETH_P_8021Q);
1925 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
1926 vlan_proto = htons(ETH_P_8021AD);
1929 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
1930 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1939 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
1940 struct rx_cmp *rxcmp)
1944 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
1946 case EXT_OP_INNER_4:
1947 case EXT_OP_OUTER_4:
1948 case EXT_OP_INNFL_3:
1949 case EXT_OP_OUTFL_3:
1950 return PKT_HASH_TYPE_L4;
1952 return PKT_HASH_TYPE_L3;
1956 /* returns the following:
1957 * 1 - 1 packet successfully received
1958 * 0 - successful TPA_START, packet not completed yet
1959 * -EBUSY - completion ring does not have all the agg buffers yet
1960 * -ENOMEM - packet aborted due to out of memory
1961 * -EIO - packet aborted due to hw error indicated in BD
1963 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1964 u32 *raw_cons, u8 *event)
1966 struct bnxt_napi *bnapi = cpr->bnapi;
1967 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1968 struct net_device *dev = bp->dev;
1969 struct rx_cmp *rxcmp;
1970 struct rx_cmp_ext *rxcmp1;
1971 u32 tmp_raw_cons = *raw_cons;
1972 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1973 struct bnxt_sw_rx_bd *rx_buf;
1975 u8 *data_ptr, agg_bufs, cmp_type;
1976 bool xdp_active = false;
1977 dma_addr_t dma_addr;
1978 struct sk_buff *skb;
1979 struct xdp_buff xdp;
1985 rxcmp = (struct rx_cmp *)
1986 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1988 cmp_type = RX_CMP_TYPE(rxcmp);
1990 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1991 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1992 goto next_rx_no_prod_no_len;
1995 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1996 cp_cons = RING_CMP(tmp_raw_cons);
1997 rxcmp1 = (struct rx_cmp_ext *)
1998 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2000 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2003 /* The valid test of the entry must be done first before
2004 * reading any further.
2007 prod = rxr->rx_prod;
2009 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
2010 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2011 bnxt_tpa_start(bp, rxr, cmp_type,
2012 (struct rx_tpa_start_cmp *)rxcmp,
2013 (struct rx_tpa_start_cmp_ext *)rxcmp1);
2015 *event |= BNXT_RX_EVENT;
2016 goto next_rx_no_prod_no_len;
2018 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2019 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
2020 (struct rx_tpa_end_cmp *)rxcmp,
2021 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
2028 bnxt_deliver_skb(bp, bnapi, skb);
2031 *event |= BNXT_RX_EVENT;
2032 goto next_rx_no_prod_no_len;
2035 cons = rxcmp->rx_cmp_opaque;
2036 if (unlikely(cons != rxr->rx_next_cons)) {
2037 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
2039 /* 0xffff is forced error, don't print it */
2040 if (rxr->rx_next_cons != 0xffff)
2041 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2042 cons, rxr->rx_next_cons);
2043 bnxt_sched_reset_rxr(bp, rxr);
2046 goto next_rx_no_prod_no_len;
2048 rx_buf = &rxr->rx_buf_ring[cons];
2049 data = rx_buf->data;
2050 data_ptr = rx_buf->data_ptr;
2053 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
2054 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
2057 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
2060 cp_cons = NEXT_CMP(cp_cons);
2061 *event |= BNXT_AGG_EVENT;
2063 *event |= BNXT_RX_EVENT;
2065 rx_buf->data = NULL;
2066 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2067 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2069 bnxt_reuse_rx_data(rxr, cons, data);
2071 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
2075 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2076 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
2077 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
2078 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
2079 netdev_warn_once(bp->dev, "RX buffer error %x\n",
2081 bnxt_sched_reset_rxr(bp, rxr);
2084 goto next_rx_no_len;
2087 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
2088 len = flags >> RX_CMP_LEN_SHIFT;
2089 dma_addr = rx_buf->mapping;
2091 if (bnxt_xdp_attached(bp, rxr)) {
2092 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
2094 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
2098 cpr->sw_stats.rx.rx_oom_discards += 1;
2107 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
2113 if (len <= bp->rx_copy_thresh) {
2114 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
2115 bnxt_reuse_rx_data(rxr, cons, data);
2119 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
2122 bnxt_xdp_buff_frags_free(rxr, &xdp);
2124 cpr->sw_stats.rx.rx_oom_discards += 1;
2131 if (rx_buf->data_ptr == data_ptr)
2132 payload = misc & RX_CMP_PAYLOAD_OFFSET;
2135 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
2138 cpr->sw_stats.rx.rx_oom_discards += 1;
2146 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
2148 cpr->sw_stats.rx.rx_oom_discards += 1;
2153 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
2155 /* we should be able to free the old skb here */
2156 bnxt_xdp_buff_frags_free(rxr, &xdp);
2157 cpr->sw_stats.rx.rx_oom_discards += 1;
2164 if (RX_CMP_HASH_VALID(rxcmp)) {
2165 enum pkt_hash_types type;
2167 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2168 type = bnxt_rss_ext_op(bp, rxcmp);
2170 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
2172 /* RSS profiles 1 and 3 with extract code 0 for inner
2175 if (hash_type != 1 && hash_type != 3)
2176 type = PKT_HASH_TYPE_L3;
2178 type = PKT_HASH_TYPE_L4;
2180 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2183 if (cmp_type == CMP_TYPE_RX_L2_CMP)
2184 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
2185 skb->protocol = eth_type_trans(skb, dev);
2187 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
2188 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
2193 skb_checksum_none_assert(skb);
2194 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2195 if (dev->features & NETIF_F_RXCSUM) {
2196 skb->ip_summed = CHECKSUM_UNNECESSARY;
2197 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2200 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2201 if (dev->features & NETIF_F_RXCSUM)
2202 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
2206 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
2207 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
2210 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2211 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2213 spin_lock_bh(&ptp->ptp_lock);
2214 ns = timecounter_cyc2time(&ptp->tc, ts);
2215 spin_unlock_bh(&ptp->ptp_lock);
2216 memset(skb_hwtstamps(skb), 0,
2217 sizeof(*skb_hwtstamps(skb)));
2218 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2222 bnxt_deliver_skb(bp, bnapi, skb);
2226 cpr->rx_packets += 1;
2227 cpr->rx_bytes += len;
2230 rxr->rx_prod = NEXT_RX(prod);
2231 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
2233 next_rx_no_prod_no_len:
2234 *raw_cons = tmp_raw_cons;
2239 /* In netpoll mode, if we are using a combined completion ring, we need to
2240 * discard the rx packets and recycle the buffers.
2242 static int bnxt_force_rx_discard(struct bnxt *bp,
2243 struct bnxt_cp_ring_info *cpr,
2244 u32 *raw_cons, u8 *event)
2246 u32 tmp_raw_cons = *raw_cons;
2247 struct rx_cmp_ext *rxcmp1;
2248 struct rx_cmp *rxcmp;
2253 cp_cons = RING_CMP(tmp_raw_cons);
2254 rxcmp = (struct rx_cmp *)
2255 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2257 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2258 cp_cons = RING_CMP(tmp_raw_cons);
2259 rxcmp1 = (struct rx_cmp_ext *)
2260 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2262 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2265 /* The valid test of the entry must be done first before
2266 * reading any further.
2269 cmp_type = RX_CMP_TYPE(rxcmp);
2270 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
2271 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
2272 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2273 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2274 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2275 struct rx_tpa_end_cmp_ext *tpa_end1;
2277 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2278 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2279 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2281 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2282 if (rc && rc != -EBUSY)
2283 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2287 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2289 struct bnxt_fw_health *fw_health = bp->fw_health;
2290 u32 reg = fw_health->regs[reg_idx];
2291 u32 reg_type, reg_off, val = 0;
2293 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2294 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2296 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2297 pci_read_config_dword(bp->pdev, reg_off, &val);
2299 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2300 reg_off = fw_health->mapped_regs[reg_idx];
2302 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2303 val = readl(bp->bar0 + reg_off);
2305 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2306 val = readl(bp->bar1 + reg_off);
2309 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2310 val &= fw_health->fw_reset_inprog_reg_mask;
2314 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2318 for (i = 0; i < bp->rx_nr_rings; i++) {
2319 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2320 struct bnxt_ring_grp_info *grp_info;
2322 grp_info = &bp->grp_info[grp_idx];
2323 if (grp_info->agg_fw_ring_id == ring_id)
2326 return INVALID_HW_RING_ID;
2329 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
2331 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2333 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
2334 return link_info->force_link_speed2;
2335 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
2336 return link_info->force_pam4_link_speed;
2337 return link_info->force_link_speed;
2340 static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
2342 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2344 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2345 link_info->req_link_speed = link_info->force_link_speed2;
2346 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2347 switch (link_info->req_link_speed) {
2348 case BNXT_LINK_SPEED_50GB_PAM4:
2349 case BNXT_LINK_SPEED_100GB_PAM4:
2350 case BNXT_LINK_SPEED_200GB_PAM4:
2351 case BNXT_LINK_SPEED_400GB_PAM4:
2352 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2354 case BNXT_LINK_SPEED_100GB_PAM4_112:
2355 case BNXT_LINK_SPEED_200GB_PAM4_112:
2356 case BNXT_LINK_SPEED_400GB_PAM4_112:
2357 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
2360 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2364 link_info->req_link_speed = link_info->force_link_speed;
2365 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
2366 if (link_info->force_pam4_link_speed) {
2367 link_info->req_link_speed = link_info->force_pam4_link_speed;
2368 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
2372 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
2374 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2376 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2377 link_info->advertising = link_info->auto_link_speeds2;
2380 link_info->advertising = link_info->auto_link_speeds;
2381 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
2384 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
2386 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2388 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2389 if (link_info->req_link_speed != link_info->force_link_speed2)
2393 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
2394 link_info->req_link_speed != link_info->force_link_speed)
2396 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
2397 link_info->req_link_speed != link_info->force_pam4_link_speed)
2402 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
2404 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2406 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2407 if (link_info->advertising != link_info->auto_link_speeds2)
2411 if (link_info->advertising != link_info->auto_link_speeds ||
2412 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
2417 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
2419 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
2421 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
2423 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
2424 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
2426 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
2428 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
2430 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
2432 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
2433 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
2435 /* Return true if the workqueue has to be scheduled */
2436 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2438 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2441 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2442 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2443 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2445 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2446 netdev_warn(bp->dev, "Pause Storm detected!\n");
2448 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2449 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2451 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
2452 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
2453 char *threshold_type;
2454 bool notify = false;
2458 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
2459 threshold_type = "warning";
2461 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
2462 threshold_type = "critical";
2464 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
2465 threshold_type = "fatal";
2467 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
2468 threshold_type = "shutdown";
2471 netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
2474 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
2480 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
2481 dir_str, threshold_type);
2482 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
2483 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
2484 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
2486 bp->thermal_threshold_type = type;
2487 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
2493 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2500 #define BNXT_GET_EVENT_PORT(data) \
2502 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2504 #define BNXT_EVENT_RING_TYPE(data2) \
2506 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2508 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2509 (BNXT_EVENT_RING_TYPE(data2) == \
2510 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2512 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2513 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2514 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2516 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2517 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2518 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2520 #define BNXT_PHC_BITS 48
2522 static int bnxt_async_event_process(struct bnxt *bp,
2523 struct hwrm_async_event_cmpl *cmpl)
2525 u16 event_id = le16_to_cpu(cmpl->event_id);
2526 u32 data1 = le32_to_cpu(cmpl->event_data1);
2527 u32 data2 = le32_to_cpu(cmpl->event_data2);
2529 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2530 event_id, data1, data2);
2532 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2534 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2535 struct bnxt_link_info *link_info = &bp->link_info;
2538 goto async_event_process_exit;
2540 /* print unsupported speed warning in forced speed mode only */
2541 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2542 (data1 & 0x20000)) {
2543 u16 fw_speed = bnxt_get_force_speed(link_info);
2544 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2546 if (speed != SPEED_UNKNOWN)
2547 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2550 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2553 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2554 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2555 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2557 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2558 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2560 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2561 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2563 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2564 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2569 if (bp->pf.port_id != port_id)
2572 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2575 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2577 goto async_event_process_exit;
2578 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2580 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2581 char *type_str = "Solicited";
2584 goto async_event_process_exit;
2586 bp->fw_reset_timestamp = jiffies;
2587 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2588 if (!bp->fw_reset_min_dsecs)
2589 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2590 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2591 if (!bp->fw_reset_max_dsecs)
2592 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2593 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2594 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2595 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2597 bp->fw_health->fatalities++;
2598 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2599 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2600 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2601 type_str = "Non-fatal";
2602 bp->fw_health->survivals++;
2603 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2605 netif_warn(bp, hw, bp->dev,
2606 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2607 type_str, data1, data2,
2608 bp->fw_reset_min_dsecs * 100,
2609 bp->fw_reset_max_dsecs * 100);
2610 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2613 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2614 struct bnxt_fw_health *fw_health = bp->fw_health;
2615 char *status_desc = "healthy";
2619 goto async_event_process_exit;
2621 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2622 fw_health->enabled = false;
2623 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2626 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2627 fw_health->tmr_multiplier =
2628 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2629 bp->current_interval * 10);
2630 fw_health->tmr_counter = fw_health->tmr_multiplier;
2631 if (!fw_health->enabled)
2632 fw_health->last_fw_heartbeat =
2633 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2634 fw_health->last_fw_reset_cnt =
2635 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2636 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2637 if (status != BNXT_FW_STATUS_HEALTHY)
2638 status_desc = "unhealthy";
2639 netif_info(bp, drv, bp->dev,
2640 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2641 fw_health->primary ? "primary" : "backup", status,
2642 status_desc, fw_health->last_fw_reset_cnt);
2643 if (!fw_health->enabled) {
2644 /* Make sure tmr_counter is set and visible to
2645 * bnxt_health_check() before setting enabled to true.
2648 fw_health->enabled = true;
2650 goto async_event_process_exit;
2652 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2653 netif_notice(bp, hw, bp->dev,
2654 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2656 goto async_event_process_exit;
2657 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2658 struct bnxt_rx_ring_info *rxr;
2661 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
2662 goto async_event_process_exit;
2664 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2665 BNXT_EVENT_RING_TYPE(data2), data1);
2666 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2667 goto async_event_process_exit;
2669 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2670 if (grp_idx == INVALID_HW_RING_ID) {
2671 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2673 goto async_event_process_exit;
2675 rxr = bp->bnapi[grp_idx]->rx_ring;
2676 bnxt_sched_reset_rxr(bp, rxr);
2677 goto async_event_process_exit;
2679 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2680 struct bnxt_fw_health *fw_health = bp->fw_health;
2682 netif_notice(bp, hw, bp->dev,
2683 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2686 fw_health->echo_req_data1 = data1;
2687 fw_health->echo_req_data2 = data2;
2688 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2691 goto async_event_process_exit;
2693 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2694 bnxt_ptp_pps_event(bp, data1, data2);
2695 goto async_event_process_exit;
2697 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2698 if (bnxt_event_error_report(bp, data1, data2))
2700 goto async_event_process_exit;
2702 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2703 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2704 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2705 if (BNXT_PTP_USE_RTC(bp)) {
2706 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2710 goto async_event_process_exit;
2712 spin_lock_bh(&ptp->ptp_lock);
2713 bnxt_ptp_update_current_time(bp);
2714 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2715 BNXT_PHC_BITS) | ptp->current_time);
2716 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2717 spin_unlock_bh(&ptp->ptp_lock);
2721 goto async_event_process_exit;
2723 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2724 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2726 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2727 goto async_event_process_exit;
2730 goto async_event_process_exit;
2732 __bnxt_queue_sp_work(bp);
2733 async_event_process_exit:
2737 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2739 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2740 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2741 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2742 (struct hwrm_fwd_req_cmpl *)txcmp;
2744 switch (cmpl_type) {
2745 case CMPL_BASE_TYPE_HWRM_DONE:
2746 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2747 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2750 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2751 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2753 if ((vf_id < bp->pf.first_vf_id) ||
2754 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2755 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2760 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2761 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2764 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2765 bnxt_async_event_process(bp,
2766 (struct hwrm_async_event_cmpl *)txcmp);
2776 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2778 struct bnxt_napi *bnapi = dev_instance;
2779 struct bnxt *bp = bnapi->bp;
2780 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2781 u32 cons = RING_CMP(cpr->cp_raw_cons);
2784 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2785 napi_schedule(&bnapi->napi);
2789 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2791 u32 raw_cons = cpr->cp_raw_cons;
2792 u16 cons = RING_CMP(raw_cons);
2793 struct tx_cmp *txcmp;
2795 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2797 return TX_CMP_VALID(txcmp, raw_cons);
2800 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2802 struct bnxt_napi *bnapi = dev_instance;
2803 struct bnxt *bp = bnapi->bp;
2804 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2805 u32 cons = RING_CMP(cpr->cp_raw_cons);
2808 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2810 if (!bnxt_has_work(bp, cpr)) {
2811 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2812 /* return if erroneous interrupt */
2813 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2817 /* disable ring IRQ */
2818 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2820 /* Return here if interrupt is shared and is disabled. */
2821 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2824 napi_schedule(&bnapi->napi);
2828 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2831 struct bnxt_napi *bnapi = cpr->bnapi;
2832 u32 raw_cons = cpr->cp_raw_cons;
2836 struct tx_cmp *txcmp;
2838 cpr->has_more_work = 0;
2839 cpr->had_work_done = 1;
2844 cons = RING_CMP(raw_cons);
2845 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2847 if (!TX_CMP_VALID(txcmp, raw_cons))
2850 /* The valid test of the entry must be done first before
2851 * reading any further.
2854 cmp_type = TX_CMP_TYPE(txcmp);
2855 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2856 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
2857 u32 opaque = txcmp->tx_cmp_opaque;
2858 struct bnxt_tx_ring_info *txr;
2861 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
2862 event |= BNXT_TX_CMP_EVENT;
2863 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2864 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2866 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2867 tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
2869 /* return full budget so NAPI will complete. */
2870 if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
2872 raw_cons = NEXT_RAW_CMP(raw_cons);
2874 cpr->has_more_work = 1;
2877 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
2878 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
2880 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2882 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2884 if (likely(rc >= 0))
2886 /* Increment rx_pkts when rc is -ENOMEM to count towards
2887 * the NAPI budget. Otherwise, we may potentially loop
2888 * here forever if we consistently cannot allocate
2891 else if (rc == -ENOMEM && budget)
2893 else if (rc == -EBUSY) /* partial completion */
2895 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
2896 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
2897 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
2898 bnxt_hwrm_handler(bp, txcmp);
2900 raw_cons = NEXT_RAW_CMP(raw_cons);
2902 if (rx_pkts && rx_pkts == budget) {
2903 cpr->has_more_work = 1;
2908 if (event & BNXT_REDIRECT_EVENT)
2911 if (event & BNXT_TX_EVENT) {
2912 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
2913 u16 prod = txr->tx_prod;
2915 /* Sync BD data before updating doorbell */
2918 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2921 cpr->cp_raw_cons = raw_cons;
2922 bnapi->events |= event;
2926 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2929 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
2930 bnapi->tx_int(bp, bnapi, budget);
2932 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2933 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2935 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2937 if (bnapi->events & BNXT_AGG_EVENT) {
2938 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2940 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2942 bnapi->events &= BNXT_TX_CMP_EVENT;
2945 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2948 struct bnxt_napi *bnapi = cpr->bnapi;
2951 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2953 /* ACK completion ring before freeing tx ring and producing new
2954 * buffers in rx/agg rings to prevent overflowing the completion
2957 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2959 __bnxt_poll_work_done(bp, bnapi, budget);
2963 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2965 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2966 struct bnxt *bp = bnapi->bp;
2967 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2968 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2969 struct tx_cmp *txcmp;
2970 struct rx_cmp_ext *rxcmp1;
2971 u32 cp_cons, tmp_raw_cons;
2972 u32 raw_cons = cpr->cp_raw_cons;
2973 bool flush_xdp = false;
2980 cp_cons = RING_CMP(raw_cons);
2981 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2983 if (!TX_CMP_VALID(txcmp, raw_cons))
2986 /* The valid test of the entry must be done first before
2987 * reading any further.
2990 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2991 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2992 cp_cons = RING_CMP(tmp_raw_cons);
2993 rxcmp1 = (struct rx_cmp_ext *)
2994 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2996 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2999 /* force an error to recycle the buffer */
3000 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
3001 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
3003 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
3004 if (likely(rc == -EIO) && budget)
3006 else if (rc == -EBUSY) /* partial completion */
3008 if (event & BNXT_REDIRECT_EVENT)
3010 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
3011 CMPL_BASE_TYPE_HWRM_DONE)) {
3012 bnxt_hwrm_handler(bp, txcmp);
3015 "Invalid completion received on special ring\n");
3017 raw_cons = NEXT_RAW_CMP(raw_cons);
3019 if (rx_pkts == budget)
3023 cpr->cp_raw_cons = raw_cons;
3024 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
3025 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3027 if (event & BNXT_AGG_EVENT)
3028 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
3032 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
3033 napi_complete_done(napi, rx_pkts);
3034 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3039 static int bnxt_poll(struct napi_struct *napi, int budget)
3041 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3042 struct bnxt *bp = bnapi->bp;
3043 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3046 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3047 napi_complete(napi);
3051 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
3053 if (work_done >= budget) {
3055 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3059 if (!bnxt_has_work(bp, cpr)) {
3060 if (napi_complete_done(napi, work_done))
3061 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
3065 if (bp->flags & BNXT_FLAG_DIM) {
3066 struct dim_sample dim_sample = {};
3068 dim_update_sample(cpr->event_ctr,
3072 net_dim(&cpr->dim, dim_sample);
3077 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
3079 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3080 int i, work_done = 0;
3082 for (i = 0; i < cpr->cp_ring_count; i++) {
3083 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3085 if (cpr2->had_nqe_notify) {
3086 work_done += __bnxt_poll_work(bp, cpr2,
3087 budget - work_done);
3088 cpr->has_more_work |= cpr2->has_more_work;
3094 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
3095 u64 dbr_type, int budget)
3097 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3100 for (i = 0; i < cpr->cp_ring_count; i++) {
3101 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
3102 struct bnxt_db_info *db;
3104 if (cpr2->had_work_done) {
3107 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
3108 cpr2->had_nqe_notify = 0;
3113 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
3114 DB_RING_IDX(db, cpr2->cp_raw_cons),
3116 cpr2->had_work_done = 0;
3119 __bnxt_poll_work_done(bp, bnapi, budget);
3122 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
3124 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
3125 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3126 struct bnxt_cp_ring_info *cpr_rx;
3127 u32 raw_cons = cpr->cp_raw_cons;
3128 struct bnxt *bp = bnapi->bp;
3129 struct nqe_cn *nqcmp;
3133 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
3134 napi_complete(napi);
3137 if (cpr->has_more_work) {
3138 cpr->has_more_work = 0;
3139 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
3144 cons = RING_CMP(raw_cons);
3145 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3147 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
3148 if (cpr->has_more_work)
3151 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
3153 cpr->cp_raw_cons = raw_cons;
3154 if (napi_complete_done(napi, work_done))
3155 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
3160 /* The valid test of the entry must be done first before
3161 * reading any further.
3165 type = le16_to_cpu(nqcmp->type);
3166 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
3167 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
3168 u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
3169 struct bnxt_cp_ring_info *cpr2;
3171 /* No more budget for RX work */
3172 if (budget && work_done >= budget &&
3173 cq_type == BNXT_NQ_HDL_TYPE_RX)
3176 idx = BNXT_NQ_HDL_IDX(idx);
3177 cpr2 = &cpr->cp_ring_arr[idx];
3178 cpr2->had_nqe_notify = 1;
3179 cpr2->toggle = NQE_CN_TOGGLE(type);
3180 work_done += __bnxt_poll_work(bp, cpr2,
3181 budget - work_done);
3182 cpr->has_more_work |= cpr2->has_more_work;
3184 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
3186 raw_cons = NEXT_RAW_CMP(raw_cons);
3188 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
3189 if (raw_cons != cpr->cp_raw_cons) {
3190 cpr->cp_raw_cons = raw_cons;
3191 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
3194 cpr_rx = &cpr->cp_ring_arr[0];
3195 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
3196 (bp->flags & BNXT_FLAG_DIM)) {
3197 struct dim_sample dim_sample = {};
3199 dim_update_sample(cpr->event_ctr,
3203 net_dim(&cpr->dim, dim_sample);
3208 static void bnxt_free_tx_skbs(struct bnxt *bp)
3211 struct pci_dev *pdev = bp->pdev;
3216 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
3217 for (i = 0; i < bp->tx_nr_rings; i++) {
3218 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3221 if (!txr->tx_buf_ring)
3224 for (j = 0; j < max_idx;) {
3225 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
3226 struct sk_buff *skb;
3229 if (i < bp->tx_nr_rings_xdp &&
3230 tx_buf->action == XDP_REDIRECT) {
3231 dma_unmap_single(&pdev->dev,
3232 dma_unmap_addr(tx_buf, mapping),
3233 dma_unmap_len(tx_buf, len),
3235 xdp_return_frame(tx_buf->xdpf);
3237 tx_buf->xdpf = NULL;
3250 if (tx_buf->is_push) {
3256 dma_unmap_single(&pdev->dev,
3257 dma_unmap_addr(tx_buf, mapping),
3261 last = tx_buf->nr_frags;
3263 for (k = 0; k < last; k++, j++) {
3264 int ring_idx = j & bp->tx_ring_mask;
3265 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
3267 tx_buf = &txr->tx_buf_ring[ring_idx];
3270 dma_unmap_addr(tx_buf, mapping),
3271 skb_frag_size(frag), DMA_TO_DEVICE);
3275 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
3279 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3281 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3282 struct pci_dev *pdev = bp->pdev;
3283 struct bnxt_tpa_idx_map *map;
3284 int i, max_idx, max_agg_idx;
3286 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
3287 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
3289 goto skip_rx_tpa_free;
3291 for (i = 0; i < bp->max_tpa; i++) {
3292 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
3293 u8 *data = tpa_info->data;
3298 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
3299 bp->rx_buf_use_size, bp->rx_dir,
3300 DMA_ATTR_WEAK_ORDERING);
3302 tpa_info->data = NULL;
3304 skb_free_frag(data);
3308 if (!rxr->rx_buf_ring)
3309 goto skip_rx_buf_free;
3311 for (i = 0; i < max_idx; i++) {
3312 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
3313 dma_addr_t mapping = rx_buf->mapping;
3314 void *data = rx_buf->data;
3319 rx_buf->data = NULL;
3320 if (BNXT_RX_PAGE_MODE(bp)) {
3321 page_pool_recycle_direct(rxr->page_pool, data);
3323 dma_unmap_single_attrs(&pdev->dev, mapping,
3324 bp->rx_buf_use_size, bp->rx_dir,
3325 DMA_ATTR_WEAK_ORDERING);
3326 skb_free_frag(data);
3331 if (!rxr->rx_agg_ring)
3332 goto skip_rx_agg_free;
3334 for (i = 0; i < max_agg_idx; i++) {
3335 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
3336 struct page *page = rx_agg_buf->page;
3341 rx_agg_buf->page = NULL;
3342 __clear_bit(i, rxr->rx_agg_bmap);
3344 page_pool_recycle_direct(rxr->page_pool, page);
3348 map = rxr->rx_tpa_idx_map;
3350 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3353 static void bnxt_free_rx_skbs(struct bnxt *bp)
3360 for (i = 0; i < bp->rx_nr_rings; i++)
3361 bnxt_free_one_rx_ring_skbs(bp, i);
3364 static void bnxt_free_skbs(struct bnxt *bp)
3366 bnxt_free_tx_skbs(bp);
3367 bnxt_free_rx_skbs(bp);
3370 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
3372 u8 init_val = ctxm->init_value;
3373 u16 offset = ctxm->init_offset;
3379 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
3380 memset(p, init_val, len);
3383 for (i = 0; i < len; i += ctxm->entry_size)
3384 *(p2 + i + offset) = init_val;
3387 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3389 struct pci_dev *pdev = bp->pdev;
3395 for (i = 0; i < rmem->nr_pages; i++) {
3396 if (!rmem->pg_arr[i])
3399 dma_free_coherent(&pdev->dev, rmem->page_size,
3400 rmem->pg_arr[i], rmem->dma_arr[i]);
3402 rmem->pg_arr[i] = NULL;
3406 size_t pg_tbl_size = rmem->nr_pages * 8;
3408 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3409 pg_tbl_size = rmem->page_size;
3410 dma_free_coherent(&pdev->dev, pg_tbl_size,
3411 rmem->pg_tbl, rmem->pg_tbl_map);
3412 rmem->pg_tbl = NULL;
3414 if (rmem->vmem_size && *rmem->vmem) {
3420 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3422 struct pci_dev *pdev = bp->pdev;
3426 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3427 valid_bit = PTU_PTE_VALID;
3428 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3429 size_t pg_tbl_size = rmem->nr_pages * 8;
3431 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3432 pg_tbl_size = rmem->page_size;
3433 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3440 for (i = 0; i < rmem->nr_pages; i++) {
3441 u64 extra_bits = valid_bit;
3443 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3447 if (!rmem->pg_arr[i])
3451 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
3453 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3454 if (i == rmem->nr_pages - 2 &&
3455 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3456 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3457 else if (i == rmem->nr_pages - 1 &&
3458 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3459 extra_bits |= PTU_PTE_LAST;
3461 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3465 if (rmem->vmem_size) {
3466 *rmem->vmem = vzalloc(rmem->vmem_size);
3473 static void bnxt_free_tpa_info(struct bnxt *bp)
3477 for (i = 0; i < bp->rx_nr_rings; i++) {
3478 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3480 kfree(rxr->rx_tpa_idx_map);
3481 rxr->rx_tpa_idx_map = NULL;
3483 for (j = 0; j < bp->max_tpa; j++) {
3484 kfree(rxr->rx_tpa[j].agg_arr);
3485 rxr->rx_tpa[j].agg_arr = NULL;
3493 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3497 bp->max_tpa = MAX_TPA;
3498 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
3499 if (!bp->max_tpa_v2)
3501 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3504 for (i = 0; i < bp->rx_nr_rings; i++) {
3505 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3506 struct rx_agg_cmp *agg;
3508 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3513 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3515 for (j = 0; j < bp->max_tpa; j++) {
3516 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3519 rxr->rx_tpa[j].agg_arr = agg;
3521 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3523 if (!rxr->rx_tpa_idx_map)
3529 static void bnxt_free_rx_rings(struct bnxt *bp)
3536 bnxt_free_tpa_info(bp);
3537 for (i = 0; i < bp->rx_nr_rings; i++) {
3538 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3539 struct bnxt_ring_struct *ring;
3542 bpf_prog_put(rxr->xdp_prog);
3544 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3545 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3547 page_pool_destroy(rxr->page_pool);
3548 rxr->page_pool = NULL;
3550 kfree(rxr->rx_agg_bmap);
3551 rxr->rx_agg_bmap = NULL;
3553 ring = &rxr->rx_ring_struct;
3554 bnxt_free_ring(bp, &ring->ring_mem);
3556 ring = &rxr->rx_agg_ring_struct;
3557 bnxt_free_ring(bp, &ring->ring_mem);
3561 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3562 struct bnxt_rx_ring_info *rxr)
3564 struct page_pool_params pp = { 0 };
3566 pp.pool_size = bp->rx_agg_ring_size;
3567 if (BNXT_RX_PAGE_MODE(bp))
3568 pp.pool_size += bp->rx_ring_size;
3569 pp.nid = dev_to_node(&bp->pdev->dev);
3570 pp.napi = &rxr->bnapi->napi;
3571 pp.netdev = bp->dev;
3572 pp.dev = &bp->pdev->dev;
3573 pp.dma_dir = bp->rx_dir;
3574 pp.max_len = PAGE_SIZE;
3575 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3577 rxr->page_pool = page_pool_create(&pp);
3578 if (IS_ERR(rxr->page_pool)) {
3579 int err = PTR_ERR(rxr->page_pool);
3581 rxr->page_pool = NULL;
3587 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3589 int i, rc = 0, agg_rings = 0;
3594 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3597 for (i = 0; i < bp->rx_nr_rings; i++) {
3598 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3599 struct bnxt_ring_struct *ring;
3601 ring = &rxr->rx_ring_struct;
3603 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3607 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3611 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3615 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3619 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3627 ring = &rxr->rx_agg_ring_struct;
3628 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3633 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3634 mem_size = rxr->rx_agg_bmap_size / 8;
3635 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3636 if (!rxr->rx_agg_bmap)
3640 if (bp->flags & BNXT_FLAG_TPA)
3641 rc = bnxt_alloc_tpa_info(bp);
3645 static void bnxt_free_tx_rings(struct bnxt *bp)
3648 struct pci_dev *pdev = bp->pdev;
3653 for (i = 0; i < bp->tx_nr_rings; i++) {
3654 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3655 struct bnxt_ring_struct *ring;
3658 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3659 txr->tx_push, txr->tx_push_mapping);
3660 txr->tx_push = NULL;
3663 ring = &txr->tx_ring_struct;
3665 bnxt_free_ring(bp, &ring->ring_mem);
3669 #define BNXT_TC_TO_RING_BASE(bp, tc) \
3670 ((tc) * (bp)->tx_nr_rings_per_tc)
3672 #define BNXT_RING_TO_TC_OFF(bp, tx) \
3673 ((tx) % (bp)->tx_nr_rings_per_tc)
3675 #define BNXT_RING_TO_TC(bp, tx) \
3676 ((tx) / (bp)->tx_nr_rings_per_tc)
3678 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3681 struct pci_dev *pdev = bp->pdev;
3683 bp->tx_push_size = 0;
3684 if (bp->tx_push_thresh) {
3687 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3688 bp->tx_push_thresh);
3690 if (push_size > 256) {
3692 bp->tx_push_thresh = 0;
3695 bp->tx_push_size = push_size;
3698 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3699 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3700 struct bnxt_ring_struct *ring;
3703 ring = &txr->tx_ring_struct;
3705 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3709 ring->grp_idx = txr->bnapi->index;
3710 if (bp->tx_push_size) {
3713 /* One pre-allocated DMA buffer to backup
3716 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3718 &txr->tx_push_mapping,
3724 mapping = txr->tx_push_mapping +
3725 sizeof(struct tx_push_bd);
3726 txr->data_mapping = cpu_to_le64(mapping);
3728 qidx = bp->tc_to_qidx[j];
3729 ring->queue_id = bp->q_info[qidx].queue_id;
3730 spin_lock_init(&txr->xdp_tx_lock);
3731 if (i < bp->tx_nr_rings_xdp)
3733 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
3739 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3741 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3743 kfree(cpr->cp_desc_ring);
3744 cpr->cp_desc_ring = NULL;
3745 ring->ring_mem.pg_arr = NULL;
3746 kfree(cpr->cp_desc_mapping);
3747 cpr->cp_desc_mapping = NULL;
3748 ring->ring_mem.dma_arr = NULL;
3751 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3753 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3754 if (!cpr->cp_desc_ring)
3756 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3758 if (!cpr->cp_desc_mapping)
3763 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3769 for (i = 0; i < bp->cp_nr_rings; i++) {
3770 struct bnxt_napi *bnapi = bp->bnapi[i];
3774 bnxt_free_cp_arrays(&bnapi->cp_ring);
3778 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3780 int i, n = bp->cp_nr_pages;
3782 for (i = 0; i < bp->cp_nr_rings; i++) {
3783 struct bnxt_napi *bnapi = bp->bnapi[i];
3788 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3795 static void bnxt_free_cp_rings(struct bnxt *bp)
3802 for (i = 0; i < bp->cp_nr_rings; i++) {
3803 struct bnxt_napi *bnapi = bp->bnapi[i];
3804 struct bnxt_cp_ring_info *cpr;
3805 struct bnxt_ring_struct *ring;
3811 cpr = &bnapi->cp_ring;
3812 ring = &cpr->cp_ring_struct;
3814 bnxt_free_ring(bp, &ring->ring_mem);
3816 if (!cpr->cp_ring_arr)
3819 for (j = 0; j < cpr->cp_ring_count; j++) {
3820 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
3822 ring = &cpr2->cp_ring_struct;
3823 bnxt_free_ring(bp, &ring->ring_mem);
3824 bnxt_free_cp_arrays(cpr2);
3826 kfree(cpr->cp_ring_arr);
3827 cpr->cp_ring_arr = NULL;
3828 cpr->cp_ring_count = 0;
3832 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
3833 struct bnxt_cp_ring_info *cpr)
3835 struct bnxt_ring_mem_info *rmem;
3836 struct bnxt_ring_struct *ring;
3839 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3841 bnxt_free_cp_arrays(cpr);
3844 ring = &cpr->cp_ring_struct;
3845 rmem = &ring->ring_mem;
3846 rmem->nr_pages = bp->cp_nr_pages;
3847 rmem->page_size = HW_CMPD_RING_SIZE;
3848 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3849 rmem->dma_arr = cpr->cp_desc_mapping;
3850 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3851 rc = bnxt_alloc_ring(bp, rmem);
3853 bnxt_free_ring(bp, rmem);
3854 bnxt_free_cp_arrays(cpr);
3859 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3861 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3862 int i, j, rc, ulp_base_vec, ulp_msix;
3863 int tcs = bp->num_tc;
3867 ulp_msix = bnxt_get_ulp_msix_num(bp);
3868 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3869 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
3870 struct bnxt_napi *bnapi = bp->bnapi[i];
3871 struct bnxt_cp_ring_info *cpr, *cpr2;
3872 struct bnxt_ring_struct *ring;
3873 int cp_count = 0, k;
3879 cpr = &bnapi->cp_ring;
3881 ring = &cpr->cp_ring_struct;
3883 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3887 if (ulp_msix && i >= ulp_base_vec)
3888 ring->map_idx = i + ulp_msix;
3892 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
3895 if (i < bp->rx_nr_rings) {
3899 if (i < bp->tx_nr_rings_xdp) {
3902 } else if ((sh && i < bp->tx_nr_rings) ||
3903 (!sh && i >= bp->rx_nr_rings)) {
3908 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
3910 if (!cpr->cp_ring_arr)
3912 cpr->cp_ring_count = cp_count;
3914 for (k = 0; k < cp_count; k++) {
3915 cpr2 = &cpr->cp_ring_arr[k];
3916 rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
3919 cpr2->bnapi = bnapi;
3922 bp->rx_ring[i].rx_cpr = cpr2;
3923 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
3927 n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
3928 bp->tx_ring[n].tx_cpr = cpr2;
3929 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
3938 static void bnxt_init_ring_struct(struct bnxt *bp)
3942 for (i = 0; i < bp->cp_nr_rings; i++) {
3943 struct bnxt_napi *bnapi = bp->bnapi[i];
3944 struct bnxt_ring_mem_info *rmem;
3945 struct bnxt_cp_ring_info *cpr;
3946 struct bnxt_rx_ring_info *rxr;
3947 struct bnxt_tx_ring_info *txr;
3948 struct bnxt_ring_struct *ring;
3953 cpr = &bnapi->cp_ring;
3954 ring = &cpr->cp_ring_struct;
3955 rmem = &ring->ring_mem;
3956 rmem->nr_pages = bp->cp_nr_pages;
3957 rmem->page_size = HW_CMPD_RING_SIZE;
3958 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3959 rmem->dma_arr = cpr->cp_desc_mapping;
3960 rmem->vmem_size = 0;
3962 rxr = bnapi->rx_ring;
3966 ring = &rxr->rx_ring_struct;
3967 rmem = &ring->ring_mem;
3968 rmem->nr_pages = bp->rx_nr_pages;
3969 rmem->page_size = HW_RXBD_RING_SIZE;
3970 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3971 rmem->dma_arr = rxr->rx_desc_mapping;
3972 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3973 rmem->vmem = (void **)&rxr->rx_buf_ring;
3975 ring = &rxr->rx_agg_ring_struct;
3976 rmem = &ring->ring_mem;
3977 rmem->nr_pages = bp->rx_agg_nr_pages;
3978 rmem->page_size = HW_RXBD_RING_SIZE;
3979 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3980 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3981 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3982 rmem->vmem = (void **)&rxr->rx_agg_ring;
3985 bnxt_for_each_napi_tx(j, bnapi, txr) {
3986 ring = &txr->tx_ring_struct;
3987 rmem = &ring->ring_mem;
3988 rmem->nr_pages = bp->tx_nr_pages;
3989 rmem->page_size = HW_TXBD_RING_SIZE;
3990 rmem->pg_arr = (void **)txr->tx_desc_ring;
3991 rmem->dma_arr = txr->tx_desc_mapping;
3992 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3993 rmem->vmem = (void **)&txr->tx_buf_ring;
3998 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
4002 struct rx_bd **rx_buf_ring;
4004 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
4005 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
4009 rxbd = rx_buf_ring[i];
4013 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
4014 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
4015 rxbd->rx_bd_opaque = prod;
4020 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4022 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
4023 struct net_device *dev = bp->dev;
4027 prod = rxr->rx_prod;
4028 for (i = 0; i < bp->rx_ring_size; i++) {
4029 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
4030 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
4031 ring_nr, i, bp->rx_ring_size);
4034 prod = NEXT_RX(prod);
4036 rxr->rx_prod = prod;
4038 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
4041 prod = rxr->rx_agg_prod;
4042 for (i = 0; i < bp->rx_agg_ring_size; i++) {
4043 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
4044 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
4045 ring_nr, i, bp->rx_ring_size);
4048 prod = NEXT_RX_AGG(prod);
4050 rxr->rx_agg_prod = prod;
4056 for (i = 0; i < bp->max_tpa; i++) {
4057 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
4061 rxr->rx_tpa[i].data = data;
4062 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
4063 rxr->rx_tpa[i].mapping = mapping;
4069 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
4071 struct bnxt_rx_ring_info *rxr;
4072 struct bnxt_ring_struct *ring;
4075 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
4076 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
4078 if (NET_IP_ALIGN == 2)
4079 type |= RX_BD_FLAGS_SOP;
4081 rxr = &bp->rx_ring[ring_nr];
4082 ring = &rxr->rx_ring_struct;
4083 bnxt_init_rxbd_pages(ring, type);
4085 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
4088 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
4089 bpf_prog_add(bp->xdp_prog, 1);
4090 rxr->xdp_prog = bp->xdp_prog;
4092 ring->fw_ring_id = INVALID_HW_RING_ID;
4094 ring = &rxr->rx_agg_ring_struct;
4095 ring->fw_ring_id = INVALID_HW_RING_ID;
4097 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
4098 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
4099 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
4101 bnxt_init_rxbd_pages(ring, type);
4104 return bnxt_alloc_one_rx_ring(bp, ring_nr);
4107 static void bnxt_init_cp_rings(struct bnxt *bp)
4111 for (i = 0; i < bp->cp_nr_rings; i++) {
4112 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
4113 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4115 ring->fw_ring_id = INVALID_HW_RING_ID;
4116 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4117 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4118 if (!cpr->cp_ring_arr)
4120 for (j = 0; j < cpr->cp_ring_count; j++) {
4121 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
4123 ring = &cpr2->cp_ring_struct;
4124 ring->fw_ring_id = INVALID_HW_RING_ID;
4125 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
4126 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
4131 static int bnxt_init_rx_rings(struct bnxt *bp)
4135 if (BNXT_RX_PAGE_MODE(bp)) {
4136 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
4137 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
4139 bp->rx_offset = BNXT_RX_OFFSET;
4140 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
4143 for (i = 0; i < bp->rx_nr_rings; i++) {
4144 rc = bnxt_init_one_rx_ring(bp, i);
4152 static int bnxt_init_tx_rings(struct bnxt *bp)
4156 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
4157 BNXT_MIN_TX_DESC_CNT);
4159 for (i = 0; i < bp->tx_nr_rings; i++) {
4160 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4161 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4163 ring->fw_ring_id = INVALID_HW_RING_ID;
4165 if (i >= bp->tx_nr_rings_xdp)
4166 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
4167 NETDEV_QUEUE_TYPE_TX,
4174 static void bnxt_free_ring_grps(struct bnxt *bp)
4176 kfree(bp->grp_info);
4177 bp->grp_info = NULL;
4180 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
4185 bp->grp_info = kcalloc(bp->cp_nr_rings,
4186 sizeof(struct bnxt_ring_grp_info),
4191 for (i = 0; i < bp->cp_nr_rings; i++) {
4193 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
4194 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4195 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
4196 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
4197 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4202 static void bnxt_free_vnics(struct bnxt *bp)
4204 kfree(bp->vnic_info);
4205 bp->vnic_info = NULL;
4209 static int bnxt_alloc_vnics(struct bnxt *bp)
4213 #ifdef CONFIG_RFS_ACCEL
4214 if (bp->flags & BNXT_FLAG_RFS) {
4215 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
4217 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4218 num_vnics += bp->rx_nr_rings;
4222 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4225 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
4230 bp->nr_vnics = num_vnics;
4234 static void bnxt_init_vnics(struct bnxt *bp)
4236 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
4239 for (i = 0; i < bp->nr_vnics; i++) {
4240 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4243 vnic->fw_vnic_id = INVALID_HW_RING_ID;
4244 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
4245 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
4247 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
4249 if (bp->vnic_info[i].rss_hash_key) {
4250 if (i == BNXT_VNIC_DEFAULT) {
4251 u8 *key = (void *)vnic->rss_hash_key;
4254 if (!bp->rss_hash_key_valid &&
4255 !bp->rss_hash_key_updated) {
4256 get_random_bytes(bp->rss_hash_key,
4258 bp->rss_hash_key_updated = true;
4261 memcpy(vnic->rss_hash_key, bp->rss_hash_key,
4264 if (!bp->rss_hash_key_updated)
4267 bp->rss_hash_key_updated = false;
4268 bp->rss_hash_key_valid = true;
4270 bp->toeplitz_prefix = 0;
4271 for (k = 0; k < 8; k++) {
4272 bp->toeplitz_prefix <<= 8;
4273 bp->toeplitz_prefix |= key[k];
4276 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
4283 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
4287 pages = ring_size / desc_per_pg;
4294 while (pages & (pages - 1))
4300 void bnxt_set_tpa_flags(struct bnxt *bp)
4302 bp->flags &= ~BNXT_FLAG_TPA;
4303 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
4305 if (bp->dev->features & NETIF_F_LRO)
4306 bp->flags |= BNXT_FLAG_LRO;
4307 else if (bp->dev->features & NETIF_F_GRO_HW)
4308 bp->flags |= BNXT_FLAG_GRO;
4311 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
4314 void bnxt_set_ring_params(struct bnxt *bp)
4316 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
4317 u32 agg_factor = 0, agg_ring_size = 0;
4319 /* 8 for CRC and VLAN */
4320 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
4322 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
4323 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4325 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
4326 ring_size = bp->rx_ring_size;
4327 bp->rx_agg_ring_size = 0;
4328 bp->rx_agg_nr_pages = 0;
4330 if (bp->flags & BNXT_FLAG_TPA)
4331 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
4333 bp->flags &= ~BNXT_FLAG_JUMBO;
4334 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
4337 bp->flags |= BNXT_FLAG_JUMBO;
4338 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4339 if (jumbo_factor > agg_factor)
4340 agg_factor = jumbo_factor;
4343 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
4344 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
4345 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
4346 bp->rx_ring_size, ring_size);
4347 bp->rx_ring_size = ring_size;
4349 agg_ring_size = ring_size * agg_factor;
4351 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
4353 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
4354 u32 tmp = agg_ring_size;
4356 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
4357 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
4358 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
4359 tmp, agg_ring_size);
4361 bp->rx_agg_ring_size = agg_ring_size;
4362 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
4364 if (BNXT_RX_PAGE_MODE(bp)) {
4365 rx_space = PAGE_SIZE;
4366 rx_size = PAGE_SIZE -
4367 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
4368 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4370 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
4371 rx_space = rx_size + NET_SKB_PAD +
4372 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4376 bp->rx_buf_use_size = rx_size;
4377 bp->rx_buf_size = rx_space;
4379 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
4380 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
4382 ring_size = bp->tx_ring_size;
4383 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
4384 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
4386 max_rx_cmpl = bp->rx_ring_size;
4387 /* MAX TPA needs to be added because TPA_START completions are
4388 * immediately recycled, so the TPA completions are not bound by
4391 if (bp->flags & BNXT_FLAG_TPA)
4392 max_rx_cmpl += bp->max_tpa;
4393 /* RX and TPA completions are 32-byte, all others are 16-byte */
4394 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
4395 bp->cp_ring_size = ring_size;
4397 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4398 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4399 bp->cp_nr_pages = MAX_CP_PAGES;
4400 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4401 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4402 ring_size, bp->cp_ring_size);
4404 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4405 bp->cp_ring_mask = bp->cp_bit - 1;
4408 /* Changing allocation mode of RX rings.
4409 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4411 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4413 struct net_device *dev = bp->dev;
4416 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4417 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4419 if (bp->xdp_prog->aux->xdp_has_frags)
4420 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4423 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4424 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4425 bp->flags |= BNXT_FLAG_JUMBO;
4426 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4428 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4429 bp->rx_skb_func = bnxt_rx_page_skb;
4431 bp->rx_dir = DMA_BIDIRECTIONAL;
4432 /* Disable LRO or GRO_HW */
4433 netdev_update_features(dev);
4435 dev->max_mtu = bp->max_mtu;
4436 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4437 bp->rx_dir = DMA_FROM_DEVICE;
4438 bp->rx_skb_func = bnxt_rx_skb;
4443 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4446 struct bnxt_vnic_info *vnic;
4447 struct pci_dev *pdev = bp->pdev;
4452 for (i = 0; i < bp->nr_vnics; i++) {
4453 vnic = &bp->vnic_info[i];
4455 kfree(vnic->fw_grp_ids);
4456 vnic->fw_grp_ids = NULL;
4458 kfree(vnic->uc_list);
4459 vnic->uc_list = NULL;
4461 if (vnic->mc_list) {
4462 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4463 vnic->mc_list, vnic->mc_list_mapping);
4464 vnic->mc_list = NULL;
4467 if (vnic->rss_table) {
4468 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4470 vnic->rss_table_dma_addr);
4471 vnic->rss_table = NULL;
4474 vnic->rss_hash_key = NULL;
4479 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4481 int i, rc = 0, size;
4482 struct bnxt_vnic_info *vnic;
4483 struct pci_dev *pdev = bp->pdev;
4486 for (i = 0; i < bp->nr_vnics; i++) {
4487 vnic = &bp->vnic_info[i];
4489 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4490 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4493 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4494 if (!vnic->uc_list) {
4501 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4502 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4504 dma_alloc_coherent(&pdev->dev,
4506 &vnic->mc_list_mapping,
4508 if (!vnic->mc_list) {
4514 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4515 goto vnic_skip_grps;
4517 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4518 max_rings = bp->rx_nr_rings;
4522 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4523 if (!vnic->fw_grp_ids) {
4528 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
4529 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4532 /* Allocate rss table and hash key */
4533 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4534 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4535 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4537 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4538 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4539 vnic->rss_table_size,
4540 &vnic->rss_table_dma_addr,
4542 if (!vnic->rss_table) {
4547 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4548 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4556 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4558 struct bnxt_hwrm_wait_token *token;
4560 dma_pool_destroy(bp->hwrm_dma_pool);
4561 bp->hwrm_dma_pool = NULL;
4564 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4565 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4569 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4571 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4573 BNXT_HWRM_DMA_ALIGN, 0);
4574 if (!bp->hwrm_dma_pool)
4577 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4582 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4584 kfree(stats->hw_masks);
4585 stats->hw_masks = NULL;
4586 kfree(stats->sw_stats);
4587 stats->sw_stats = NULL;
4588 if (stats->hw_stats) {
4589 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4590 stats->hw_stats_map);
4591 stats->hw_stats = NULL;
4595 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4598 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4599 &stats->hw_stats_map, GFP_KERNEL);
4600 if (!stats->hw_stats)
4603 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4604 if (!stats->sw_stats)
4608 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4609 if (!stats->hw_masks)
4615 bnxt_free_stats_mem(bp, stats);
4619 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4623 for (i = 0; i < count; i++)
4627 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4631 for (i = 0; i < count; i++)
4632 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4635 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4636 struct bnxt_stats_mem *stats)
4638 struct hwrm_func_qstats_ext_output *resp;
4639 struct hwrm_func_qstats_ext_input *req;
4643 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4644 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
4647 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4651 req->fid = cpu_to_le16(0xffff);
4652 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4654 resp = hwrm_req_hold(bp, req);
4655 rc = hwrm_req_send(bp, req);
4657 hw_masks = &resp->rx_ucast_pkts;
4658 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4660 hwrm_req_drop(bp, req);
4664 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4665 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4667 static void bnxt_init_stats(struct bnxt *bp)
4669 struct bnxt_napi *bnapi = bp->bnapi[0];
4670 struct bnxt_cp_ring_info *cpr;
4671 struct bnxt_stats_mem *stats;
4672 __le64 *rx_stats, *tx_stats;
4673 int rc, rx_count, tx_count;
4674 u64 *rx_masks, *tx_masks;
4678 cpr = &bnapi->cp_ring;
4679 stats = &cpr->stats;
4680 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4682 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4683 mask = (1ULL << 48) - 1;
4686 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4688 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4689 stats = &bp->port_stats;
4690 rx_stats = stats->hw_stats;
4691 rx_masks = stats->hw_masks;
4692 rx_count = sizeof(struct rx_port_stats) / 8;
4693 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4694 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4695 tx_count = sizeof(struct tx_port_stats) / 8;
4697 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4698 rc = bnxt_hwrm_port_qstats(bp, flags);
4700 mask = (1ULL << 40) - 1;
4702 bnxt_fill_masks(rx_masks, mask, rx_count);
4703 bnxt_fill_masks(tx_masks, mask, tx_count);
4705 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4706 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4707 bnxt_hwrm_port_qstats(bp, 0);
4710 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4711 stats = &bp->rx_port_stats_ext;
4712 rx_stats = stats->hw_stats;
4713 rx_masks = stats->hw_masks;
4714 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4715 stats = &bp->tx_port_stats_ext;
4716 tx_stats = stats->hw_stats;
4717 tx_masks = stats->hw_masks;
4718 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4720 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4721 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4723 mask = (1ULL << 40) - 1;
4725 bnxt_fill_masks(rx_masks, mask, rx_count);
4727 bnxt_fill_masks(tx_masks, mask, tx_count);
4729 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4731 bnxt_copy_hw_masks(tx_masks, tx_stats,
4733 bnxt_hwrm_port_qstats_ext(bp, 0);
4738 static void bnxt_free_port_stats(struct bnxt *bp)
4740 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4741 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4743 bnxt_free_stats_mem(bp, &bp->port_stats);
4744 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4745 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4748 static void bnxt_free_ring_stats(struct bnxt *bp)
4755 for (i = 0; i < bp->cp_nr_rings; i++) {
4756 struct bnxt_napi *bnapi = bp->bnapi[i];
4757 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4759 bnxt_free_stats_mem(bp, &cpr->stats);
4763 static int bnxt_alloc_stats(struct bnxt *bp)
4768 size = bp->hw_ring_stats_size;
4770 for (i = 0; i < bp->cp_nr_rings; i++) {
4771 struct bnxt_napi *bnapi = bp->bnapi[i];
4772 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4774 cpr->stats.len = size;
4775 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4779 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4782 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4785 if (bp->port_stats.hw_stats)
4786 goto alloc_ext_stats;
4788 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4789 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4793 bp->flags |= BNXT_FLAG_PORT_STATS;
4796 /* Display extended statistics only if FW supports it */
4797 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4798 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4801 if (bp->rx_port_stats_ext.hw_stats)
4802 goto alloc_tx_ext_stats;
4804 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4805 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4806 /* Extended stats are optional */
4811 if (bp->tx_port_stats_ext.hw_stats)
4814 if (bp->hwrm_spec_code >= 0x10902 ||
4815 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4816 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4817 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4818 /* Extended stats are optional */
4822 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4826 static void bnxt_clear_ring_indices(struct bnxt *bp)
4833 for (i = 0; i < bp->cp_nr_rings; i++) {
4834 struct bnxt_napi *bnapi = bp->bnapi[i];
4835 struct bnxt_cp_ring_info *cpr;
4836 struct bnxt_rx_ring_info *rxr;
4837 struct bnxt_tx_ring_info *txr;
4842 cpr = &bnapi->cp_ring;
4843 cpr->cp_raw_cons = 0;
4845 bnxt_for_each_napi_tx(j, bnapi, txr) {
4848 txr->tx_hw_cons = 0;
4851 rxr = bnapi->rx_ring;
4854 rxr->rx_agg_prod = 0;
4855 rxr->rx_sw_agg_prod = 0;
4856 rxr->rx_next_cons = 0;
4862 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4864 u8 type = fltr->type, flags = fltr->flags;
4866 INIT_LIST_HEAD(&fltr->list);
4867 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
4868 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
4869 list_add_tail(&fltr->list, &bp->usr_fltr_list);
4872 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4874 if (!list_empty(&fltr->list))
4875 list_del_init(&fltr->list);
4878 void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
4880 struct bnxt_filter_base *usr_fltr, *tmp;
4882 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
4883 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
4885 bnxt_del_one_usr_fltr(bp, usr_fltr);
4889 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
4891 hlist_del(&fltr->hash);
4892 bnxt_del_one_usr_fltr(bp, fltr);
4894 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
4895 bp->ntp_fltr_count--;
4900 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
4904 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4905 * safe to delete the hash table.
4907 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4908 struct hlist_head *head;
4909 struct hlist_node *tmp;
4910 struct bnxt_ntuple_filter *fltr;
4912 head = &bp->ntp_fltr_hash_tbl[i];
4913 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
4914 bnxt_del_l2_filter(bp, fltr->l2_fltr);
4915 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
4916 !list_empty(&fltr->base.list)))
4918 bnxt_del_fltr(bp, &fltr->base);
4924 bitmap_free(bp->ntp_fltr_bmap);
4925 bp->ntp_fltr_bmap = NULL;
4926 bp->ntp_fltr_count = 0;
4929 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4933 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
4936 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4937 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4939 bp->ntp_fltr_count = 0;
4940 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
4942 if (!bp->ntp_fltr_bmap)
4948 static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
4952 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
4953 struct hlist_head *head;
4954 struct hlist_node *tmp;
4955 struct bnxt_l2_filter *fltr;
4957 head = &bp->l2_fltr_hash_tbl[i];
4958 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
4959 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
4960 !list_empty(&fltr->base.list)))
4962 bnxt_del_fltr(bp, &fltr->base);
4967 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
4971 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
4972 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
4973 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
4976 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4978 bnxt_free_vnic_attributes(bp);
4979 bnxt_free_tx_rings(bp);
4980 bnxt_free_rx_rings(bp);
4981 bnxt_free_cp_rings(bp);
4982 bnxt_free_all_cp_arrays(bp);
4983 bnxt_free_ntp_fltrs(bp, false);
4984 bnxt_free_l2_filters(bp, false);
4986 bnxt_free_ring_stats(bp);
4987 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4988 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4989 bnxt_free_port_stats(bp);
4990 bnxt_free_ring_grps(bp);
4991 bnxt_free_vnics(bp);
4992 kfree(bp->tx_ring_map);
4993 bp->tx_ring_map = NULL;
5001 bnxt_clear_ring_indices(bp);
5005 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
5007 int i, j, rc, size, arr_size;
5011 /* Allocate bnapi mem pointer array and mem block for
5014 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
5016 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
5017 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
5023 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
5024 bp->bnapi[i] = bnapi;
5025 bp->bnapi[i]->index = i;
5026 bp->bnapi[i]->bp = bp;
5027 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5028 struct bnxt_cp_ring_info *cpr =
5029 &bp->bnapi[i]->cp_ring;
5031 cpr->cp_ring_struct.ring_mem.flags =
5032 BNXT_RMEM_RING_PTE_FLAG;
5036 bp->rx_ring = kcalloc(bp->rx_nr_rings,
5037 sizeof(struct bnxt_rx_ring_info),
5042 for (i = 0; i < bp->rx_nr_rings; i++) {
5043 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5045 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5046 rxr->rx_ring_struct.ring_mem.flags =
5047 BNXT_RMEM_RING_PTE_FLAG;
5048 rxr->rx_agg_ring_struct.ring_mem.flags =
5049 BNXT_RMEM_RING_PTE_FLAG;
5051 rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
5053 rxr->bnapi = bp->bnapi[i];
5054 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
5057 bp->tx_ring = kcalloc(bp->tx_nr_rings,
5058 sizeof(struct bnxt_tx_ring_info),
5063 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
5066 if (!bp->tx_ring_map)
5069 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5072 j = bp->rx_nr_rings;
5074 for (i = 0; i < bp->tx_nr_rings; i++) {
5075 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5076 struct bnxt_napi *bnapi2;
5078 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
5079 txr->tx_ring_struct.ring_mem.flags =
5080 BNXT_RMEM_RING_PTE_FLAG;
5081 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
5082 if (i >= bp->tx_nr_rings_xdp) {
5083 int k = j + BNXT_RING_TO_TC_OFF(bp, i);
5085 bnapi2 = bp->bnapi[k];
5086 txr->txq_index = i - bp->tx_nr_rings_xdp;
5088 BNXT_RING_TO_TC(bp, txr->txq_index);
5089 bnapi2->tx_ring[txr->tx_napi_idx] = txr;
5090 bnapi2->tx_int = bnxt_tx_int;
5092 bnapi2 = bp->bnapi[j];
5093 bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
5094 bnapi2->tx_ring[0] = txr;
5095 bnapi2->tx_int = bnxt_tx_int_xdp;
5098 txr->bnapi = bnapi2;
5099 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
5100 txr->tx_cpr = &bnapi2->cp_ring;
5103 rc = bnxt_alloc_stats(bp);
5106 bnxt_init_stats(bp);
5108 rc = bnxt_alloc_ntp_fltrs(bp);
5112 rc = bnxt_alloc_vnics(bp);
5117 rc = bnxt_alloc_all_cp_arrays(bp);
5121 bnxt_init_ring_struct(bp);
5123 rc = bnxt_alloc_rx_rings(bp);
5127 rc = bnxt_alloc_tx_rings(bp);
5131 rc = bnxt_alloc_cp_rings(bp);
5135 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
5136 BNXT_VNIC_MCAST_FLAG |
5137 BNXT_VNIC_UCAST_FLAG;
5138 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
5139 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
5140 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
5142 rc = bnxt_alloc_vnic_attributes(bp);
5148 bnxt_free_mem(bp, true);
5152 static void bnxt_disable_int(struct bnxt *bp)
5159 for (i = 0; i < bp->cp_nr_rings; i++) {
5160 struct bnxt_napi *bnapi = bp->bnapi[i];
5161 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5162 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5164 if (ring->fw_ring_id != INVALID_HW_RING_ID)
5165 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5169 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
5171 struct bnxt_napi *bnapi = bp->bnapi[n];
5172 struct bnxt_cp_ring_info *cpr;
5174 cpr = &bnapi->cp_ring;
5175 return cpr->cp_ring_struct.map_idx;
5178 static void bnxt_disable_int_sync(struct bnxt *bp)
5185 atomic_inc(&bp->intr_sem);
5187 bnxt_disable_int(bp);
5188 for (i = 0; i < bp->cp_nr_rings; i++) {
5189 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
5191 synchronize_irq(bp->irq_tbl[map_idx].vector);
5195 static void bnxt_enable_int(struct bnxt *bp)
5199 atomic_set(&bp->intr_sem, 0);
5200 for (i = 0; i < bp->cp_nr_rings; i++) {
5201 struct bnxt_napi *bnapi = bp->bnapi[i];
5202 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5204 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
5208 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
5211 DECLARE_BITMAP(async_events_bmap, 256);
5212 u32 *events = (u32 *)async_events_bmap;
5213 struct hwrm_func_drv_rgtr_output *resp;
5214 struct hwrm_func_drv_rgtr_input *req;
5218 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
5222 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
5223 FUNC_DRV_RGTR_REQ_ENABLES_VER |
5224 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5226 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
5227 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
5228 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
5229 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
5230 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
5231 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
5232 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
5233 req->flags = cpu_to_le32(flags);
5234 req->ver_maj_8b = DRV_VER_MAJ;
5235 req->ver_min_8b = DRV_VER_MIN;
5236 req->ver_upd_8b = DRV_VER_UPD;
5237 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
5238 req->ver_min = cpu_to_le16(DRV_VER_MIN);
5239 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
5245 memset(data, 0, sizeof(data));
5246 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
5247 u16 cmd = bnxt_vf_req_snif[i];
5248 unsigned int bit, idx;
5252 data[idx] |= 1 << bit;
5255 for (i = 0; i < 8; i++)
5256 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
5259 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
5262 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
5263 req->flags |= cpu_to_le32(
5264 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
5266 memset(async_events_bmap, 0, sizeof(async_events_bmap));
5267 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
5268 u16 event_id = bnxt_async_events_arr[i];
5270 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
5271 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5273 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
5276 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
5278 if (bmap && bmap_size) {
5279 for (i = 0; i < bmap_size; i++) {
5280 if (test_bit(i, bmap))
5281 __set_bit(i, async_events_bmap);
5284 for (i = 0; i < 8; i++)
5285 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
5289 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
5291 resp = hwrm_req_hold(bp, req);
5292 rc = hwrm_req_send(bp, req);
5294 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
5296 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
5297 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
5299 hwrm_req_drop(bp, req);
5303 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5305 struct hwrm_func_drv_unrgtr_input *req;
5308 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
5311 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
5314 return hwrm_req_send(bp, req);
5317 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5319 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5321 struct hwrm_tunnel_dst_port_free_input *req;
5324 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
5325 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
5327 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
5328 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
5331 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
5335 req->tunnel_type = tunnel_type;
5337 switch (tunnel_type) {
5338 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
5339 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
5341 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
5343 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
5344 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
5346 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
5348 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5349 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5350 bp->vxlan_gpe_port = 0;
5351 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5357 rc = hwrm_req_send(bp, req);
5359 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
5361 if (bp->flags & BNXT_FLAG_TPA)
5362 bnxt_set_tpa(bp, true);
5366 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5369 struct hwrm_tunnel_dst_port_alloc_output *resp;
5370 struct hwrm_tunnel_dst_port_alloc_input *req;
5373 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
5377 req->tunnel_type = tunnel_type;
5378 req->tunnel_dst_port_val = port;
5380 resp = hwrm_req_hold(bp, req);
5381 rc = hwrm_req_send(bp, req);
5383 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
5388 switch (tunnel_type) {
5389 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
5390 bp->vxlan_port = port;
5391 bp->vxlan_fw_dst_port_id =
5392 le16_to_cpu(resp->tunnel_dst_port_id);
5394 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
5395 bp->nge_port = port;
5396 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
5398 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5399 bp->vxlan_gpe_port = port;
5400 bp->vxlan_gpe_fw_dst_port_id =
5401 le16_to_cpu(resp->tunnel_dst_port_id);
5406 if (bp->flags & BNXT_FLAG_TPA)
5407 bnxt_set_tpa(bp, true);
5410 hwrm_req_drop(bp, req);
5414 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
5416 struct hwrm_cfa_l2_set_rx_mask_input *req;
5417 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5420 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
5424 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5425 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
5426 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5427 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5429 req->mask = cpu_to_le32(vnic->rx_mask);
5430 return hwrm_req_send_silent(bp, req);
5433 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5435 if (!atomic_dec_and_test(&fltr->refcnt))
5437 spin_lock_bh(&bp->ntp_fltr_lock);
5438 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
5439 spin_unlock_bh(&bp->ntp_fltr_lock);
5442 hlist_del_rcu(&fltr->base.hash);
5443 bnxt_del_one_usr_fltr(bp, &fltr->base);
5444 if (fltr->base.flags) {
5445 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
5446 bp->ntp_fltr_count--;
5448 spin_unlock_bh(&bp->ntp_fltr_lock);
5449 kfree_rcu(fltr, base.rcu);
5452 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
5453 struct bnxt_l2_key *key,
5456 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
5457 struct bnxt_l2_filter *fltr;
5459 hlist_for_each_entry_rcu(fltr, head, base.hash) {
5460 struct bnxt_l2_key *l2_key = &fltr->l2_key;
5462 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
5463 l2_key->vlan == key->vlan)
5469 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
5470 struct bnxt_l2_key *key,
5473 struct bnxt_l2_filter *fltr = NULL;
5476 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5478 atomic_inc(&fltr->refcnt);
5483 #define BNXT_IPV4_4TUPLE(bp, fkeys) \
5484 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5485 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
5486 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5487 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
5489 #define BNXT_IPV6_4TUPLE(bp, fkeys) \
5490 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
5491 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
5492 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
5493 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
5495 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
5497 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5498 if (BNXT_IPV4_4TUPLE(bp, fkeys))
5499 return sizeof(fkeys->addrs.v4addrs) +
5500 sizeof(fkeys->ports);
5502 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
5503 return sizeof(fkeys->addrs.v4addrs);
5506 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
5507 if (BNXT_IPV6_4TUPLE(bp, fkeys))
5508 return sizeof(fkeys->addrs.v6addrs) +
5509 sizeof(fkeys->ports);
5511 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
5512 return sizeof(fkeys->addrs.v6addrs);
5518 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
5519 const unsigned char *key)
5521 u64 prefix = bp->toeplitz_prefix, hash = 0;
5522 struct bnxt_ipv4_tuple tuple4;
5523 struct bnxt_ipv6_tuple tuple6;
5527 len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
5531 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5532 tuple4.v4addrs = fkeys->addrs.v4addrs;
5533 tuple4.ports = fkeys->ports;
5534 four_tuple = (unsigned char *)&tuple4;
5536 tuple6.v6addrs = fkeys->addrs.v6addrs;
5537 tuple6.ports = fkeys->ports;
5538 four_tuple = (unsigned char *)&tuple6;
5541 for (i = 0, j = 8; i < len; i++, j++) {
5542 u8 byte = four_tuple[i];
5545 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
5549 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
5552 /* The valid part of the hash is in the upper 32 bits. */
5553 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
5556 #ifdef CONFIG_RFS_ACCEL
5557 static struct bnxt_l2_filter *
5558 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
5560 struct bnxt_l2_filter *fltr;
5563 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5564 BNXT_L2_FLTR_HASH_MASK;
5565 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5570 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5571 struct bnxt_l2_key *key, u32 idx)
5573 struct hlist_head *head;
5575 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
5576 fltr->l2_key.vlan = key->vlan;
5577 fltr->base.type = BNXT_FLTR_TYPE_L2;
5578 if (fltr->base.flags) {
5581 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5585 fltr->base.sw_id = (u16)bit_id;
5586 bp->ntp_fltr_count++;
5588 head = &bp->l2_fltr_hash_tbl[idx];
5589 hlist_add_head_rcu(&fltr->base.hash, head);
5590 bnxt_insert_usr_fltr(bp, &fltr->base);
5591 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
5592 atomic_set(&fltr->refcnt, 1);
5596 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5597 struct bnxt_l2_key *key,
5600 struct bnxt_l2_filter *fltr;
5604 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5605 BNXT_L2_FLTR_HASH_MASK;
5606 fltr = bnxt_lookup_l2_filter(bp, key, idx);
5610 fltr = kzalloc(sizeof(*fltr), gfp);
5612 return ERR_PTR(-ENOMEM);
5613 spin_lock_bh(&bp->ntp_fltr_lock);
5614 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5615 spin_unlock_bh(&bp->ntp_fltr_lock);
5617 bnxt_del_l2_filter(bp, fltr);
5623 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
5624 struct bnxt_l2_key *key,
5627 struct bnxt_l2_filter *fltr;
5631 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
5632 BNXT_L2_FLTR_HASH_MASK;
5633 spin_lock_bh(&bp->ntp_fltr_lock);
5634 fltr = __bnxt_lookup_l2_filter(bp, key, idx);
5636 fltr = ERR_PTR(-EEXIST);
5637 goto l2_filter_exit;
5639 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
5641 fltr = ERR_PTR(-ENOMEM);
5642 goto l2_filter_exit;
5644 fltr->base.flags = flags;
5645 rc = bnxt_init_l2_filter(bp, fltr, key, idx);
5647 spin_unlock_bh(&bp->ntp_fltr_lock);
5648 bnxt_del_l2_filter(bp, fltr);
5653 spin_unlock_bh(&bp->ntp_fltr_lock);
5657 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
5659 #ifdef CONFIG_BNXT_SRIOV
5660 struct bnxt_vf_info *vf = &pf->vf[vf_idx];
5664 return INVALID_HW_RING_ID;
5668 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5670 struct hwrm_cfa_l2_filter_free_input *req;
5671 u16 target_id = 0xffff;
5674 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5675 struct bnxt_pf_info *pf = &bp->pf;
5677 if (fltr->base.vf_idx >= pf->active_vfs)
5680 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5681 if (target_id == INVALID_HW_RING_ID)
5685 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5689 req->target_id = cpu_to_le16(target_id);
5690 req->l2_filter_id = fltr->base.filter_id;
5691 return hwrm_req_send(bp, req);
5694 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
5696 struct hwrm_cfa_l2_filter_alloc_output *resp;
5697 struct hwrm_cfa_l2_filter_alloc_input *req;
5698 u16 target_id = 0xffff;
5701 if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
5702 struct bnxt_pf_info *pf = &bp->pf;
5704 if (fltr->base.vf_idx >= pf->active_vfs)
5707 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
5709 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5713 req->target_id = cpu_to_le16(target_id);
5714 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5716 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5718 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5719 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
5721 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5722 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5723 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5724 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
5725 eth_broadcast_addr(req->l2_addr_mask);
5727 if (fltr->l2_key.vlan) {
5729 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
5730 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
5731 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
5733 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
5734 req->l2_ivlan_mask = cpu_to_le16(0xfff);
5737 resp = hwrm_req_hold(bp, req);
5738 rc = hwrm_req_send(bp, req);
5740 fltr->base.filter_id = resp->l2_filter_id;
5741 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
5743 hwrm_req_drop(bp, req);
5747 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
5748 struct bnxt_ntuple_filter *fltr)
5750 struct hwrm_cfa_ntuple_filter_free_input *req;
5753 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
5754 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
5758 req->ntuple_filter_id = fltr->base.filter_id;
5759 return hwrm_req_send(bp, req);
5762 #define BNXT_NTP_FLTR_FLAGS \
5763 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
5764 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
5765 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
5766 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
5767 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
5768 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
5769 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
5770 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
5771 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
5772 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
5773 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
5774 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
5775 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
5777 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
5778 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
5780 void bnxt_fill_ipv6_mask(__be32 mask[4])
5784 for (i = 0; i < 4; i++)
5785 mask[i] = cpu_to_be32(~0);
5789 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
5790 struct hwrm_cfa_ntuple_filter_alloc_input *req,
5793 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
5794 struct bnxt_vnic_info *vnic;
5797 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
5798 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5799 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
5800 req->enables |= cpu_to_le32(enables);
5801 req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
5805 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
5806 req->flags |= cpu_to_le32(flags);
5807 req->dst_id = cpu_to_le16(rxq);
5811 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5812 struct bnxt_ntuple_filter *fltr)
5814 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
5815 struct hwrm_cfa_ntuple_filter_alloc_input *req;
5816 struct bnxt_flow_masks *masks = &fltr->fmasks;
5817 struct flow_keys *keys = &fltr->fkeys;
5818 struct bnxt_l2_filter *l2_fltr;
5819 struct bnxt_vnic_info *vnic;
5822 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
5826 l2_fltr = fltr->l2_fltr;
5827 req->l2_filter_id = l2_fltr->base.filter_id;
5829 if (fltr->base.flags & BNXT_ACT_DROP) {
5831 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
5832 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
5833 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
5835 vnic = &bp->vnic_info[fltr->base.rxq + 1];
5836 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
5838 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
5840 req->ethertype = htons(ETH_P_IP);
5841 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
5842 req->ip_protocol = keys->basic.ip_proto;
5844 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
5845 req->ethertype = htons(ETH_P_IPV6);
5847 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
5848 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
5849 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
5850 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
5851 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
5853 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
5854 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
5855 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5856 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
5858 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5859 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5861 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5864 req->src_port = keys->ports.src;
5865 req->src_port_mask = masks->ports.src;
5866 req->dst_port = keys->ports.dst;
5867 req->dst_port_mask = masks->ports.dst;
5869 resp = hwrm_req_hold(bp, req);
5870 rc = hwrm_req_send(bp, req);
5872 fltr->base.filter_id = resp->ntuple_filter_id;
5873 hwrm_req_drop(bp, req);
5877 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5880 struct bnxt_l2_filter *fltr;
5881 struct bnxt_l2_key key;
5884 ether_addr_copy(key.dst_mac_addr, mac_addr);
5886 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
5888 return PTR_ERR(fltr);
5890 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
5891 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
5893 bnxt_del_l2_filter(bp, fltr);
5895 bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
5899 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5901 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5903 /* Any associated ntuple filters will also be cleared by firmware. */
5904 for (i = 0; i < num_of_vnics; i++) {
5905 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5907 for (j = 0; j < vnic->uc_filter_count; j++) {
5908 struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
5910 bnxt_hwrm_l2_filter_free(bp, fltr);
5911 bnxt_del_l2_filter(bp, fltr);
5913 vnic->uc_filter_count = 0;
5917 #define BNXT_DFLT_TUNL_TPA_BMAP \
5918 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
5919 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
5920 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
5922 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
5923 struct hwrm_vnic_tpa_cfg_input *req)
5925 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
5927 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
5931 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
5932 if (bp->vxlan_gpe_port)
5933 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
5935 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
5937 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
5938 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
5941 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5943 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5944 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5945 struct hwrm_vnic_tpa_cfg_input *req;
5948 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5951 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5956 u16 mss = bp->dev->mtu - 40;
5957 u32 nsegs, n, segs = 0, flags;
5959 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5960 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5961 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5962 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5963 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5964 if (tpa_flags & BNXT_FLAG_GRO)
5965 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5967 req->flags = cpu_to_le32(flags);
5970 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5971 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5972 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5974 /* Number of segs are log2 units, and first packet is not
5975 * included as part of this units.
5977 if (mss <= BNXT_RX_PAGE_SIZE) {
5978 n = BNXT_RX_PAGE_SIZE / mss;
5979 nsegs = (MAX_SKB_FRAGS - 1) * n;
5981 n = mss / BNXT_RX_PAGE_SIZE;
5982 if (mss & (BNXT_RX_PAGE_SIZE - 1))
5984 nsegs = (MAX_SKB_FRAGS - n) / n;
5987 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
5988 segs = MAX_TPA_SEGS_P5;
5989 max_aggs = bp->max_tpa;
5991 segs = ilog2(nsegs);
5993 req->max_agg_segs = cpu_to_le16(segs);
5994 req->max_aggs = cpu_to_le16(max_aggs);
5996 req->min_agg_len = cpu_to_le32(512);
5997 bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
5999 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6001 return hwrm_req_send(bp, req);
6004 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
6006 struct bnxt_ring_grp_info *grp_info;
6008 grp_info = &bp->grp_info[ring->grp_idx];
6009 return grp_info->cp_fw_ring_id;
6012 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
6014 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6015 return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
6017 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
6020 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
6022 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6023 return txr->tx_cpr->cp_ring_struct.fw_ring_id;
6025 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
6028 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
6032 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6033 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
6035 entries = HW_HASH_INDEX_SIZE;
6037 bp->rss_indir_tbl_entries = entries;
6038 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
6040 if (!bp->rss_indir_tbl)
6045 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
6047 u16 max_rings, max_entries, pad, i;
6049 if (!bp->rx_nr_rings)
6052 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6053 max_rings = bp->rx_nr_rings - 1;
6055 max_rings = bp->rx_nr_rings;
6057 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
6059 for (i = 0; i < max_entries; i++)
6060 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
6062 pad = bp->rss_indir_tbl_entries - max_entries;
6064 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
6067 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
6069 u16 i, tbl_size, max_ring = 0;
6071 if (!bp->rss_indir_tbl)
6074 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6075 for (i = 0; i < tbl_size; i++)
6076 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
6080 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
6082 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6085 return bnxt_calc_nr_ring_pages(rx_rings - 1,
6086 BNXT_RSS_TABLE_ENTRIES_P5);
6088 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6093 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
6095 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
6098 /* Fill the RSS indirection table with ring group ids */
6099 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
6101 j = bp->rss_indir_tbl[i];
6102 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
6106 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
6107 struct bnxt_vnic_info *vnic)
6109 __le16 *ring_tbl = vnic->rss_table;
6110 struct bnxt_rx_ring_info *rxr;
6113 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
6115 for (i = 0; i < tbl_size; i++) {
6118 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
6119 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
6121 j = bp->rss_indir_tbl[i];
6122 rxr = &bp->rx_ring[j];
6124 ring_id = rxr->rx_ring_struct.fw_ring_id;
6125 *ring_tbl++ = cpu_to_le16(ring_id);
6126 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6127 *ring_tbl++ = cpu_to_le16(ring_id);
6132 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
6133 struct bnxt_vnic_info *vnic)
6135 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6136 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
6137 if (bp->flags & BNXT_FLAG_CHIP_P7)
6138 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
6140 bnxt_fill_hw_rss_tbl(bp, vnic);
6143 if (bp->rss_hash_delta) {
6144 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
6145 if (bp->rss_hash_cfg & bp->rss_hash_delta)
6146 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
6148 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
6150 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
6152 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
6153 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
6154 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
6157 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
6159 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6160 struct hwrm_vnic_rss_cfg_input *req;
6163 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
6164 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
6167 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6172 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6173 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6174 return hwrm_req_send(bp, req);
6177 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
6179 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6180 struct hwrm_vnic_rss_cfg_input *req;
6181 dma_addr_t ring_tbl_map;
6185 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
6189 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6191 return hwrm_req_send(bp, req);
6193 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
6194 ring_tbl_map = vnic->rss_table_dma_addr;
6195 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
6197 hwrm_req_hold(bp, req);
6198 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
6199 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
6200 req->ring_table_pair_index = i;
6201 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
6202 rc = hwrm_req_send(bp, req);
6208 hwrm_req_drop(bp, req);
6212 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
6214 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6215 struct hwrm_vnic_rss_qcfg_output *resp;
6216 struct hwrm_vnic_rss_qcfg_input *req;
6218 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
6221 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6222 /* all contexts configured to same hash_type, zero always exists */
6223 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6224 resp = hwrm_req_hold(bp, req);
6225 if (!hwrm_req_send(bp, req)) {
6226 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
6227 bp->rss_hash_delta = 0;
6229 hwrm_req_drop(bp, req);
6232 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
6234 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6235 struct hwrm_vnic_plcmodes_cfg_input *req;
6238 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
6242 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
6243 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
6245 if (BNXT_RX_PAGE_MODE(bp)) {
6246 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
6248 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
6249 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
6251 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
6252 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
6253 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
6255 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
6256 return hwrm_req_send(bp, req);
6259 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
6262 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
6264 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
6267 req->rss_cos_lb_ctx_id =
6268 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
6270 hwrm_req_send(bp, req);
6271 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
6274 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
6278 for (i = 0; i < bp->nr_vnics; i++) {
6279 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
6281 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
6282 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
6283 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
6286 bp->rsscos_nr_ctxs = 0;
6289 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
6291 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
6292 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
6295 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
6299 resp = hwrm_req_hold(bp, req);
6300 rc = hwrm_req_send(bp, req);
6302 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
6303 le16_to_cpu(resp->rss_cos_lb_ctx_id);
6304 hwrm_req_drop(bp, req);
6309 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
6311 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
6312 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
6313 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
6316 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
6318 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
6319 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6320 struct hwrm_vnic_cfg_input *req;
6321 unsigned int ring = 0, grp_idx;
6325 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
6329 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6330 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
6332 req->default_rx_ring_id =
6333 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
6334 req->default_cmpl_ring_id =
6335 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
6337 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
6338 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
6341 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
6342 /* Only RSS support for now TBD: COS & LB */
6343 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
6344 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
6345 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6346 VNIC_CFG_REQ_ENABLES_MRU);
6347 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
6348 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
6349 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
6350 VNIC_CFG_REQ_ENABLES_MRU);
6351 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
6353 req->rss_rule = cpu_to_le16(0xffff);
6356 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
6357 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
6358 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
6359 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
6361 req->cos_rule = cpu_to_le16(0xffff);
6364 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
6366 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
6368 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
6369 ring = bp->rx_nr_rings - 1;
6371 grp_idx = bp->rx_ring[ring].bnapi->index;
6372 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
6373 req->lb_rule = cpu_to_le16(0xffff);
6375 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
6377 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
6378 #ifdef CONFIG_BNXT_SRIOV
6380 def_vlan = bp->vf.vlan;
6382 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
6383 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
6384 if (!vnic_id && bnxt_ulp_registered(bp->edev))
6385 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
6387 return hwrm_req_send(bp, req);
6390 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
6392 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
6393 struct hwrm_vnic_free_input *req;
6395 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
6399 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
6401 hwrm_req_send(bp, req);
6402 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
6406 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
6410 for (i = 0; i < bp->nr_vnics; i++)
6411 bnxt_hwrm_vnic_free_one(bp, i);
6414 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
6415 unsigned int start_rx_ring_idx,
6416 unsigned int nr_rings)
6418 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
6419 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6420 struct hwrm_vnic_alloc_output *resp;
6421 struct hwrm_vnic_alloc_input *req;
6424 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
6428 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6429 goto vnic_no_ring_grps;
6431 /* map ring groups to this vnic */
6432 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
6433 grp_idx = bp->rx_ring[i].bnapi->index;
6434 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
6435 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
6439 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
6443 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
6444 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
6445 if (vnic_id == BNXT_VNIC_DEFAULT)
6446 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
6448 resp = hwrm_req_hold(bp, req);
6449 rc = hwrm_req_send(bp, req);
6451 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
6452 hwrm_req_drop(bp, req);
6456 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
6458 struct hwrm_vnic_qcaps_output *resp;
6459 struct hwrm_vnic_qcaps_input *req;
6462 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
6463 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
6464 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
6465 if (bp->hwrm_spec_code < 0x10600)
6468 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
6472 resp = hwrm_req_hold(bp, req);
6473 rc = hwrm_req_send(bp, req);
6475 u32 flags = le32_to_cpu(resp->flags);
6477 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
6478 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
6479 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
6481 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
6482 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
6484 /* Older P5 fw before EXT_HW_STATS support did not set
6485 * VLAN_STRIP_CAP properly.
6487 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
6488 (BNXT_CHIP_P5(bp) &&
6489 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
6490 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
6491 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
6492 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
6493 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
6494 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
6495 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
6496 if (bp->max_tpa_v2) {
6497 if (BNXT_CHIP_P5(bp))
6498 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
6500 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
6502 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6503 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
6504 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
6505 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
6506 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
6507 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
6508 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
6509 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
6510 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
6511 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
6513 hwrm_req_drop(bp, req);
6517 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
6519 struct hwrm_ring_grp_alloc_output *resp;
6520 struct hwrm_ring_grp_alloc_input *req;
6524 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6527 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
6531 resp = hwrm_req_hold(bp, req);
6532 for (i = 0; i < bp->rx_nr_rings; i++) {
6533 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
6535 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
6536 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
6537 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
6538 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
6540 rc = hwrm_req_send(bp, req);
6545 bp->grp_info[grp_idx].fw_grp_id =
6546 le32_to_cpu(resp->ring_group_id);
6548 hwrm_req_drop(bp, req);
6552 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
6554 struct hwrm_ring_grp_free_input *req;
6557 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
6560 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
6563 hwrm_req_hold(bp, req);
6564 for (i = 0; i < bp->cp_nr_rings; i++) {
6565 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
6567 req->ring_group_id =
6568 cpu_to_le32(bp->grp_info[i].fw_grp_id);
6570 hwrm_req_send(bp, req);
6571 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
6573 hwrm_req_drop(bp, req);
6576 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6577 struct bnxt_ring_struct *ring,
6578 u32 ring_type, u32 map_index)
6580 struct hwrm_ring_alloc_output *resp;
6581 struct hwrm_ring_alloc_input *req;
6582 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
6583 struct bnxt_ring_grp_info *grp_info;
6587 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
6592 if (rmem->nr_pages > 1) {
6593 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
6594 /* Page size is in log2 units */
6595 req->page_size = BNXT_PAGE_SHIFT;
6596 req->page_tbl_depth = 1;
6598 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
6601 /* Association of ring index with doorbell index and MSIX number */
6602 req->logical_id = cpu_to_le16(map_index);
6604 switch (ring_type) {
6605 case HWRM_RING_ALLOC_TX: {
6606 struct bnxt_tx_ring_info *txr;
6608 txr = container_of(ring, struct bnxt_tx_ring_info,
6610 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
6611 /* Association of transmit ring with completion ring */
6612 grp_info = &bp->grp_info[ring->grp_idx];
6613 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
6614 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
6615 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6616 req->queue_id = cpu_to_le16(ring->queue_id);
6617 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6618 req->cmpl_coal_cnt =
6619 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
6622 case HWRM_RING_ALLOC_RX:
6623 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6624 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
6625 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6628 /* Association of rx ring with stats context */
6629 grp_info = &bp->grp_info[ring->grp_idx];
6630 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
6631 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6632 req->enables |= cpu_to_le32(
6633 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6634 if (NET_IP_ALIGN == 2)
6635 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
6636 req->flags = cpu_to_le16(flags);
6639 case HWRM_RING_ALLOC_AGG:
6640 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6641 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
6642 /* Association of agg ring with rx ring */
6643 grp_info = &bp->grp_info[ring->grp_idx];
6644 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
6645 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
6646 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
6647 req->enables |= cpu_to_le32(
6648 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
6649 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
6651 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
6653 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
6655 case HWRM_RING_ALLOC_CMPL:
6656 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
6657 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6658 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6659 /* Association of cp ring with nq */
6660 grp_info = &bp->grp_info[map_index];
6661 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
6662 req->cq_handle = cpu_to_le64(ring->handle);
6663 req->enables |= cpu_to_le32(
6664 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
6665 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
6666 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6669 case HWRM_RING_ALLOC_NQ:
6670 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
6671 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
6672 if (bp->flags & BNXT_FLAG_USING_MSIX)
6673 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
6676 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
6681 resp = hwrm_req_hold(bp, req);
6682 rc = hwrm_req_send(bp, req);
6683 err = le16_to_cpu(resp->error_code);
6684 ring_id = le16_to_cpu(resp->ring_id);
6685 hwrm_req_drop(bp, req);
6689 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
6690 ring_type, rc, err);
6693 ring->fw_ring_id = ring_id;
6697 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
6702 struct hwrm_func_cfg_input *req;
6704 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
6708 req->fid = cpu_to_le16(0xffff);
6709 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6710 req->async_event_cr = cpu_to_le16(idx);
6711 return hwrm_req_send(bp, req);
6713 struct hwrm_func_vf_cfg_input *req;
6715 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
6720 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
6721 req->async_event_cr = cpu_to_le16(idx);
6722 return hwrm_req_send(bp, req);
6726 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
6729 switch (ring_type) {
6730 case HWRM_RING_ALLOC_TX:
6731 db->db_ring_mask = bp->tx_ring_mask;
6733 case HWRM_RING_ALLOC_RX:
6734 db->db_ring_mask = bp->rx_ring_mask;
6736 case HWRM_RING_ALLOC_AGG:
6737 db->db_ring_mask = bp->rx_agg_ring_mask;
6739 case HWRM_RING_ALLOC_CMPL:
6740 case HWRM_RING_ALLOC_NQ:
6741 db->db_ring_mask = bp->cp_ring_mask;
6744 if (bp->flags & BNXT_FLAG_CHIP_P7) {
6745 db->db_epoch_mask = db->db_ring_mask + 1;
6746 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
6750 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
6751 u32 map_idx, u32 xid)
6753 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6754 switch (ring_type) {
6755 case HWRM_RING_ALLOC_TX:
6756 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
6758 case HWRM_RING_ALLOC_RX:
6759 case HWRM_RING_ALLOC_AGG:
6760 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
6762 case HWRM_RING_ALLOC_CMPL:
6763 db->db_key64 = DBR_PATH_L2;
6765 case HWRM_RING_ALLOC_NQ:
6766 db->db_key64 = DBR_PATH_L2;
6769 db->db_key64 |= (u64)xid << DBR_XID_SFT;
6771 if (bp->flags & BNXT_FLAG_CHIP_P7)
6772 db->db_key64 |= DBR_VALID;
6774 db->doorbell = bp->bar1 + bp->db_offset;
6776 db->doorbell = bp->bar1 + map_idx * 0x80;
6777 switch (ring_type) {
6778 case HWRM_RING_ALLOC_TX:
6779 db->db_key32 = DB_KEY_TX;
6781 case HWRM_RING_ALLOC_RX:
6782 case HWRM_RING_ALLOC_AGG:
6783 db->db_key32 = DB_KEY_RX;
6785 case HWRM_RING_ALLOC_CMPL:
6786 db->db_key32 = DB_KEY_CP;
6790 bnxt_set_db_mask(bp, db, ring_type);
6793 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
6795 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
6799 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6800 type = HWRM_RING_ALLOC_NQ;
6802 type = HWRM_RING_ALLOC_CMPL;
6803 for (i = 0; i < bp->cp_nr_rings; i++) {
6804 struct bnxt_napi *bnapi = bp->bnapi[i];
6805 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6806 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
6807 u32 map_idx = ring->map_idx;
6808 unsigned int vector;
6810 vector = bp->irq_tbl[map_idx].vector;
6811 disable_irq_nosync(vector);
6812 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6817 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
6818 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
6820 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
6823 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
6825 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
6829 type = HWRM_RING_ALLOC_TX;
6830 for (i = 0; i < bp->tx_nr_rings; i++) {
6831 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6832 struct bnxt_ring_struct *ring;
6835 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6836 struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
6837 struct bnxt_napi *bnapi = txr->bnapi;
6838 u32 type2 = HWRM_RING_ALLOC_CMPL;
6840 ring = &cpr2->cp_ring_struct;
6841 ring->handle = BNXT_SET_NQ_HDL(cpr2);
6842 map_idx = bnapi->index;
6843 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6846 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6848 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6850 ring = &txr->tx_ring_struct;
6852 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6855 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
6858 type = HWRM_RING_ALLOC_RX;
6859 for (i = 0; i < bp->rx_nr_rings; i++) {
6860 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6861 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6862 struct bnxt_napi *bnapi = rxr->bnapi;
6863 u32 map_idx = bnapi->index;
6865 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6868 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
6869 /* If we have agg rings, post agg buffers first. */
6871 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6872 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
6873 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
6874 struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
6875 u32 type2 = HWRM_RING_ALLOC_CMPL;
6877 ring = &cpr2->cp_ring_struct;
6878 ring->handle = BNXT_SET_NQ_HDL(cpr2);
6879 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
6882 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6884 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6889 type = HWRM_RING_ALLOC_AGG;
6890 for (i = 0; i < bp->rx_nr_rings; i++) {
6891 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6892 struct bnxt_ring_struct *ring =
6893 &rxr->rx_agg_ring_struct;
6894 u32 grp_idx = ring->grp_idx;
6895 u32 map_idx = grp_idx + bp->rx_nr_rings;
6897 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6901 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6903 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
6904 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6905 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6912 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6913 struct bnxt_ring_struct *ring,
6914 u32 ring_type, int cmpl_ring_id)
6916 struct hwrm_ring_free_output *resp;
6917 struct hwrm_ring_free_input *req;
6921 if (BNXT_NO_FW_ACCESS(bp))
6924 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6928 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6929 req->ring_type = ring_type;
6930 req->ring_id = cpu_to_le16(ring->fw_ring_id);
6932 resp = hwrm_req_hold(bp, req);
6933 rc = hwrm_req_send(bp, req);
6934 error_code = le16_to_cpu(resp->error_code);
6935 hwrm_req_drop(bp, req);
6937 if (rc || error_code) {
6938 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6939 ring_type, rc, error_code);
6945 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6953 for (i = 0; i < bp->tx_nr_rings; i++) {
6954 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6955 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6957 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6958 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6960 hwrm_ring_free_send_msg(bp, ring,
6961 RING_FREE_REQ_RING_TYPE_TX,
6962 close_path ? cmpl_ring_id :
6963 INVALID_HW_RING_ID);
6964 ring->fw_ring_id = INVALID_HW_RING_ID;
6968 for (i = 0; i < bp->rx_nr_rings; i++) {
6969 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6970 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6971 u32 grp_idx = rxr->bnapi->index;
6973 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6974 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6976 hwrm_ring_free_send_msg(bp, ring,
6977 RING_FREE_REQ_RING_TYPE_RX,
6978 close_path ? cmpl_ring_id :
6979 INVALID_HW_RING_ID);
6980 ring->fw_ring_id = INVALID_HW_RING_ID;
6981 bp->grp_info[grp_idx].rx_fw_ring_id =
6986 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
6987 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6989 type = RING_FREE_REQ_RING_TYPE_RX;
6990 for (i = 0; i < bp->rx_nr_rings; i++) {
6991 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6992 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6993 u32 grp_idx = rxr->bnapi->index;
6995 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6996 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6998 hwrm_ring_free_send_msg(bp, ring, type,
6999 close_path ? cmpl_ring_id :
7000 INVALID_HW_RING_ID);
7001 ring->fw_ring_id = INVALID_HW_RING_ID;
7002 bp->grp_info[grp_idx].agg_fw_ring_id =
7007 /* The completion rings are about to be freed. After that the
7008 * IRQ doorbell will not work anymore. So we need to disable
7011 bnxt_disable_int_sync(bp);
7013 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7014 type = RING_FREE_REQ_RING_TYPE_NQ;
7016 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
7017 for (i = 0; i < bp->cp_nr_rings; i++) {
7018 struct bnxt_napi *bnapi = bp->bnapi[i];
7019 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7020 struct bnxt_ring_struct *ring;
7023 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
7024 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
7026 ring = &cpr2->cp_ring_struct;
7027 if (ring->fw_ring_id == INVALID_HW_RING_ID)
7029 hwrm_ring_free_send_msg(bp, ring,
7030 RING_FREE_REQ_RING_TYPE_L2_CMPL,
7031 INVALID_HW_RING_ID);
7032 ring->fw_ring_id = INVALID_HW_RING_ID;
7034 ring = &cpr->cp_ring_struct;
7035 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
7036 hwrm_ring_free_send_msg(bp, ring, type,
7037 INVALID_HW_RING_ID);
7038 ring->fw_ring_id = INVALID_HW_RING_ID;
7039 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
7044 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7046 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7049 static int bnxt_hwrm_get_rings(struct bnxt *bp)
7051 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7052 struct hwrm_func_qcfg_output *resp;
7053 struct hwrm_func_qcfg_input *req;
7056 if (bp->hwrm_spec_code < 0x10601)
7059 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7063 req->fid = cpu_to_le16(0xffff);
7064 resp = hwrm_req_hold(bp, req);
7065 rc = hwrm_req_send(bp, req);
7067 hwrm_req_drop(bp, req);
7071 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7072 if (BNXT_NEW_RM(bp)) {
7075 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
7076 hw_resc->resv_hw_ring_grps =
7077 le32_to_cpu(resp->alloc_hw_ring_grps);
7078 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
7079 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
7080 cp = le16_to_cpu(resp->alloc_cmpl_rings);
7081 stats = le16_to_cpu(resp->alloc_stat_ctx);
7082 hw_resc->resv_irqs = cp;
7083 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7084 int rx = hw_resc->resv_rx_rings;
7085 int tx = hw_resc->resv_tx_rings;
7087 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7089 if (cp < (rx + tx)) {
7090 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
7092 goto get_rings_exit;
7093 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7095 hw_resc->resv_rx_rings = rx;
7096 hw_resc->resv_tx_rings = tx;
7098 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
7099 hw_resc->resv_hw_ring_grps = rx;
7101 hw_resc->resv_cp_rings = cp;
7102 hw_resc->resv_stat_ctxs = stats;
7105 hwrm_req_drop(bp, req);
7109 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
7111 struct hwrm_func_qcfg_output *resp;
7112 struct hwrm_func_qcfg_input *req;
7115 if (bp->hwrm_spec_code < 0x10601)
7118 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7122 req->fid = cpu_to_le16(fid);
7123 resp = hwrm_req_hold(bp, req);
7124 rc = hwrm_req_send(bp, req);
7126 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
7128 hwrm_req_drop(bp, req);
7132 static bool bnxt_rfs_supported(struct bnxt *bp);
7134 static struct hwrm_func_cfg_input *
7135 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7137 struct hwrm_func_cfg_input *req;
7140 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
7143 req->fid = cpu_to_le16(0xffff);
7144 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7145 req->num_tx_rings = cpu_to_le16(hwr->tx);
7146 if (BNXT_NEW_RM(bp)) {
7147 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
7148 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7149 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7150 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
7151 enables |= hwr->cp_p5 ?
7152 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7154 enables |= hwr->cp ?
7155 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7156 enables |= hwr->grp ?
7157 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7159 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
7160 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
7162 req->num_rx_rings = cpu_to_le16(hwr->rx);
7163 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7164 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7165 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7166 req->num_msix = cpu_to_le16(hwr->cp);
7168 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7169 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7171 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7172 req->num_vnics = cpu_to_le16(hwr->vnic);
7174 req->enables = cpu_to_le32(enables);
7178 static struct hwrm_func_vf_cfg_input *
7179 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7181 struct hwrm_func_vf_cfg_input *req;
7184 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
7187 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
7188 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
7189 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7190 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
7191 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
7192 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7193 enables |= hwr->cp_p5 ?
7194 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7196 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
7197 enables |= hwr->grp ?
7198 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
7200 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
7201 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
7203 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
7204 req->num_tx_rings = cpu_to_le16(hwr->tx);
7205 req->num_rx_rings = cpu_to_le16(hwr->rx);
7206 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
7207 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7208 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
7210 req->num_cmpl_rings = cpu_to_le16(hwr->cp);
7211 req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
7213 req->num_stat_ctxs = cpu_to_le16(hwr->stat);
7214 req->num_vnics = cpu_to_le16(hwr->vnic);
7216 req->enables = cpu_to_le32(enables);
7221 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7223 struct hwrm_func_cfg_input *req;
7226 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7230 if (!req->enables) {
7231 hwrm_req_drop(bp, req);
7235 rc = hwrm_req_send(bp, req);
7239 if (bp->hwrm_spec_code < 0x10601)
7240 bp->hw_resc.resv_tx_rings = hwr->tx;
7242 return bnxt_hwrm_get_rings(bp);
7246 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7248 struct hwrm_func_vf_cfg_input *req;
7251 if (!BNXT_NEW_RM(bp)) {
7252 bp->hw_resc.resv_tx_rings = hwr->tx;
7256 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7260 rc = hwrm_req_send(bp, req);
7264 return bnxt_hwrm_get_rings(bp);
7267 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7270 return bnxt_hwrm_reserve_pf_rings(bp, hwr);
7272 return bnxt_hwrm_reserve_vf_rings(bp, hwr);
7275 int bnxt_nq_rings_in_use(struct bnxt *bp)
7277 int cp = bp->cp_nr_rings;
7278 int ulp_msix, ulp_base;
7280 ulp_msix = bnxt_get_ulp_msix_num(bp);
7282 ulp_base = bnxt_get_ulp_msix_base(bp);
7284 if ((ulp_base + ulp_msix) > cp)
7285 cp = ulp_base + ulp_msix;
7290 static int bnxt_cp_rings_in_use(struct bnxt *bp)
7294 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7295 return bnxt_nq_rings_in_use(bp);
7297 cp = bp->tx_nr_rings + bp->rx_nr_rings;
7301 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
7303 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
7304 int cp = bp->cp_nr_rings;
7309 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
7310 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
7312 return cp + ulp_stat;
7315 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7319 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
7320 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
7322 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7323 rss_ctx *= hwr->vnic;
7327 return BNXT_VF_MAX_RSS_CTX;
7328 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
7329 return hwr->grp + 1;
7333 /* Check if a default RSS map needs to be setup. This function is only
7334 * used on older firmware that does not require reserving RX rings.
7336 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
7338 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7340 /* The RSS map is valid for RX rings set to resv_rx_rings */
7341 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
7342 hw_resc->resv_rx_rings = bp->rx_nr_rings;
7343 if (!netif_is_rxfh_configured(bp->dev))
7344 bnxt_set_dflt_rss_indir_tbl(bp);
7348 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
7350 if (bp->flags & BNXT_FLAG_RFS) {
7351 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
7353 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7354 return rx_rings + 1;
7359 static bool bnxt_need_reserve_rings(struct bnxt *bp)
7361 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7362 int cp = bnxt_cp_rings_in_use(bp);
7363 int nq = bnxt_nq_rings_in_use(bp);
7364 int rx = bp->rx_nr_rings, stat;
7367 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
7368 bp->hwrm_spec_code >= 0x10601)
7371 /* Old firmware does not need RX ring reservations but we still
7372 * need to setup a default RSS map when needed. With new firmware
7373 * we go through RX ring reservations first and then set up the
7374 * RSS map for the successfully reserved RX rings when needed.
7376 if (!BNXT_NEW_RM(bp)) {
7377 bnxt_check_rss_tbl_no_rmgr(bp);
7381 vnic = bnxt_get_total_vnics(bp, rx);
7383 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7385 stat = bnxt_get_func_stat_ctxs(bp);
7386 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
7387 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
7388 (hw_resc->resv_hw_ring_grps != grp &&
7389 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
7391 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
7392 hw_resc->resv_irqs != nq)
7397 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7399 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7401 hwr->tx = hw_resc->resv_tx_rings;
7402 if (BNXT_NEW_RM(bp)) {
7403 hwr->rx = hw_resc->resv_rx_rings;
7404 hwr->cp = hw_resc->resv_irqs;
7405 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7406 hwr->cp_p5 = hw_resc->resv_cp_rings;
7407 hwr->grp = hw_resc->resv_hw_ring_grps;
7408 hwr->vnic = hw_resc->resv_vnics;
7409 hwr->stat = hw_resc->resv_stat_ctxs;
7410 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
7414 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7416 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
7417 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
7420 static int __bnxt_reserve_rings(struct bnxt *bp)
7422 struct bnxt_hw_rings hwr = {0};
7427 if (!bnxt_need_reserve_rings(bp))
7430 hwr.cp = bnxt_nq_rings_in_use(bp);
7431 hwr.tx = bp->tx_nr_rings;
7432 hwr.rx = bp->rx_nr_rings;
7433 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7435 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7436 hwr.cp_p5 = hwr.rx + hwr.tx;
7438 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
7440 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7442 hwr.grp = bp->rx_nr_rings;
7443 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
7444 hwr.stat = bnxt_get_func_stat_ctxs(bp);
7446 rc = bnxt_hwrm_reserve_rings(bp, &hwr);
7450 bnxt_copy_reserved_rings(bp, &hwr);
7453 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7455 rx_rings = hwr.rx >> 1;
7457 if (netif_running(bp->dev))
7460 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7461 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7462 bp->dev->hw_features &= ~NETIF_F_LRO;
7463 bp->dev->features &= ~NETIF_F_LRO;
7464 bnxt_set_ring_params(bp);
7467 rx_rings = min_t(int, rx_rings, hwr.grp);
7468 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
7469 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
7470 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
7471 hwr.cp = min_t(int, hwr.cp, hwr.stat);
7472 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
7473 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7474 hwr.rx = rx_rings << 1;
7475 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
7476 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
7477 bp->tx_nr_rings = hwr.tx;
7479 /* If we cannot reserve all the RX rings, reset the RSS map only
7480 * if absolutely necessary
7482 if (rx_rings != bp->rx_nr_rings) {
7483 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
7484 rx_rings, bp->rx_nr_rings);
7485 if (netif_is_rxfh_configured(bp->dev) &&
7486 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
7487 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
7488 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
7489 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
7490 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
7493 bp->rx_nr_rings = rx_rings;
7494 bp->cp_nr_rings = hwr.cp;
7496 if (!bnxt_rings_ok(bp, &hwr))
7499 if (!netif_is_rxfh_configured(bp->dev))
7500 bnxt_set_dflt_rss_indir_tbl(bp);
7505 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7507 struct hwrm_func_vf_cfg_input *req;
7510 if (!BNXT_NEW_RM(bp))
7513 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
7514 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
7515 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7516 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7517 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7518 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
7519 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
7520 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7521 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7523 req->flags = cpu_to_le32(flags);
7524 return hwrm_req_send_silent(bp, req);
7527 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7529 struct hwrm_func_cfg_input *req;
7532 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
7533 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
7534 if (BNXT_NEW_RM(bp)) {
7535 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
7536 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
7537 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
7538 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
7539 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
7540 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
7541 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
7543 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
7546 req->flags = cpu_to_le32(flags);
7547 return hwrm_req_send_silent(bp, req);
7550 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
7552 if (bp->hwrm_spec_code < 0x10801)
7556 return bnxt_hwrm_check_pf_rings(bp, hwr);
7558 return bnxt_hwrm_check_vf_rings(bp, hwr);
7561 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
7563 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7564 struct hwrm_ring_aggint_qcaps_output *resp;
7565 struct hwrm_ring_aggint_qcaps_input *req;
7568 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
7569 coal_cap->num_cmpl_dma_aggr_max = 63;
7570 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
7571 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
7572 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
7573 coal_cap->int_lat_tmr_min_max = 65535;
7574 coal_cap->int_lat_tmr_max_max = 65535;
7575 coal_cap->num_cmpl_aggr_int_max = 65535;
7576 coal_cap->timer_units = 80;
7578 if (bp->hwrm_spec_code < 0x10902)
7581 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
7584 resp = hwrm_req_hold(bp, req);
7585 rc = hwrm_req_send_silent(bp, req);
7587 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
7588 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
7589 coal_cap->num_cmpl_dma_aggr_max =
7590 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
7591 coal_cap->num_cmpl_dma_aggr_during_int_max =
7592 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
7593 coal_cap->cmpl_aggr_dma_tmr_max =
7594 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
7595 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
7596 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
7597 coal_cap->int_lat_tmr_min_max =
7598 le16_to_cpu(resp->int_lat_tmr_min_max);
7599 coal_cap->int_lat_tmr_max_max =
7600 le16_to_cpu(resp->int_lat_tmr_max_max);
7601 coal_cap->num_cmpl_aggr_int_max =
7602 le16_to_cpu(resp->num_cmpl_aggr_int_max);
7603 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
7605 hwrm_req_drop(bp, req);
7608 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
7610 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7612 return usec * 1000 / coal_cap->timer_units;
7615 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
7616 struct bnxt_coal *hw_coal,
7617 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7619 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7620 u16 val, tmr, max, flags = hw_coal->flags;
7621 u32 cmpl_params = coal_cap->cmpl_params;
7623 max = hw_coal->bufs_per_record * 128;
7624 if (hw_coal->budget)
7625 max = hw_coal->bufs_per_record * hw_coal->budget;
7626 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
7628 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
7629 req->num_cmpl_aggr_int = cpu_to_le16(val);
7631 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
7632 req->num_cmpl_dma_aggr = cpu_to_le16(val);
7634 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
7635 coal_cap->num_cmpl_dma_aggr_during_int_max);
7636 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
7638 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
7639 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
7640 req->int_lat_tmr_max = cpu_to_le16(tmr);
7642 /* min timer set to 1/2 of interrupt timer */
7643 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
7645 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
7646 req->int_lat_tmr_min = cpu_to_le16(val);
7647 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7650 /* buf timer set to 1/4 of interrupt timer */
7651 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
7652 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
7655 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
7656 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
7657 val = clamp_t(u16, tmr, 1,
7658 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
7659 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
7661 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
7664 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
7665 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
7666 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
7667 req->flags = cpu_to_le16(flags);
7668 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
7671 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
7672 struct bnxt_coal *hw_coal)
7674 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
7675 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7676 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
7677 u32 nq_params = coal_cap->nq_params;
7681 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
7684 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7688 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
7690 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
7692 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
7693 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
7694 req->int_lat_tmr_min = cpu_to_le16(tmr);
7695 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
7696 return hwrm_req_send(bp, req);
7699 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
7701 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
7702 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7703 struct bnxt_coal coal;
7706 /* Tick values in micro seconds.
7707 * 1 coal_buf x bufs_per_record = 1 completion record.
7709 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
7711 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
7712 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
7714 if (!bnapi->rx_ring)
7717 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7721 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
7723 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
7725 return hwrm_req_send(bp, req_rx);
7729 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7730 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7732 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
7734 req->ring_id = cpu_to_le16(ring_id);
7735 return hwrm_req_send(bp, req);
7739 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
7740 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
7742 struct bnxt_tx_ring_info *txr;
7745 bnxt_for_each_napi_tx(i, bnapi, txr) {
7748 ring_id = bnxt_cp_ring_for_tx(bp, txr);
7749 req->ring_id = cpu_to_le16(ring_id);
7750 rc = hwrm_req_send(bp, req);
7753 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7759 int bnxt_hwrm_set_coal(struct bnxt *bp)
7761 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
7764 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7768 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
7770 hwrm_req_drop(bp, req_rx);
7774 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
7775 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
7777 hwrm_req_hold(bp, req_rx);
7778 hwrm_req_hold(bp, req_tx);
7779 for (i = 0; i < bp->cp_nr_rings; i++) {
7780 struct bnxt_napi *bnapi = bp->bnapi[i];
7781 struct bnxt_coal *hw_coal;
7783 if (!bnapi->rx_ring)
7784 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7786 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
7790 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
7793 if (bnapi->rx_ring && bnapi->tx_ring[0]) {
7794 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
7799 hw_coal = &bp->rx_coal;
7801 hw_coal = &bp->tx_coal;
7802 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
7804 hwrm_req_drop(bp, req_rx);
7805 hwrm_req_drop(bp, req_tx);
7809 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
7811 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
7812 struct hwrm_stat_ctx_free_input *req;
7818 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7821 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
7823 if (BNXT_FW_MAJ(bp) <= 20) {
7824 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
7825 hwrm_req_drop(bp, req);
7828 hwrm_req_hold(bp, req0);
7830 hwrm_req_hold(bp, req);
7831 for (i = 0; i < bp->cp_nr_rings; i++) {
7832 struct bnxt_napi *bnapi = bp->bnapi[i];
7833 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7835 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
7836 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
7838 req0->stat_ctx_id = req->stat_ctx_id;
7839 hwrm_req_send(bp, req0);
7841 hwrm_req_send(bp, req);
7843 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
7846 hwrm_req_drop(bp, req);
7848 hwrm_req_drop(bp, req0);
7851 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
7853 struct hwrm_stat_ctx_alloc_output *resp;
7854 struct hwrm_stat_ctx_alloc_input *req;
7857 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7860 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
7864 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
7865 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
7867 resp = hwrm_req_hold(bp, req);
7868 for (i = 0; i < bp->cp_nr_rings; i++) {
7869 struct bnxt_napi *bnapi = bp->bnapi[i];
7870 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
7872 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
7874 rc = hwrm_req_send(bp, req);
7878 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
7880 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
7882 hwrm_req_drop(bp, req);
7886 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
7888 struct hwrm_func_qcfg_output *resp;
7889 struct hwrm_func_qcfg_input *req;
7893 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
7897 req->fid = cpu_to_le16(0xffff);
7898 resp = hwrm_req_hold(bp, req);
7899 rc = hwrm_req_send(bp, req);
7901 goto func_qcfg_exit;
7903 #ifdef CONFIG_BNXT_SRIOV
7905 struct bnxt_vf_info *vf = &bp->vf;
7907 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
7909 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
7912 flags = le16_to_cpu(resp->flags);
7913 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
7914 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
7915 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
7916 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
7917 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
7919 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
7920 bp->flags |= BNXT_FLAG_MULTI_HOST;
7922 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
7923 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
7925 switch (resp->port_partition_type) {
7926 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
7927 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
7928 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
7929 bp->port_partition_type = resp->port_partition_type;
7932 if (bp->hwrm_spec_code < 0x10707 ||
7933 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
7934 bp->br_mode = BRIDGE_MODE_VEB;
7935 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
7936 bp->br_mode = BRIDGE_MODE_VEPA;
7938 bp->br_mode = BRIDGE_MODE_UNDEF;
7940 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
7942 bp->max_mtu = BNXT_MAX_MTU;
7945 goto func_qcfg_exit;
7947 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
7948 if (BNXT_CHIP_P5(bp)) {
7950 bp->db_offset = DB_PF_OFFSET_P5;
7952 bp->db_offset = DB_VF_OFFSET_P5;
7954 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
7956 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
7957 bp->db_size <= bp->db_offset)
7958 bp->db_size = pci_resource_len(bp->pdev, 2);
7961 hwrm_req_drop(bp, req);
7965 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
7966 u8 init_val, u8 init_offset,
7969 ctxm->init_value = init_val;
7970 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
7972 ctxm->init_offset = init_offset * 4;
7974 ctxm->init_value = 0;
7977 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
7979 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7982 for (type = 0; type < ctx_max; type++) {
7983 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
7986 if (!ctxm->max_entries)
7989 if (ctxm->instance_bmap)
7990 n = hweight32(ctxm->instance_bmap);
7991 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
7998 #define BNXT_CTX_INIT_VALID(flags) \
8000 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
8002 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
8004 struct hwrm_func_backing_store_qcaps_v2_output *resp;
8005 struct hwrm_func_backing_store_qcaps_v2_input *req;
8006 struct bnxt_ctx_mem_info *ctx;
8010 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
8014 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8019 resp = hwrm_req_hold(bp, req);
8021 for (type = 0; type < BNXT_CTX_V2_MAX; ) {
8022 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8023 u8 init_val, init_off, i;
8027 req->type = cpu_to_le16(type);
8028 rc = hwrm_req_send(bp, req);
8031 flags = le32_to_cpu(resp->flags);
8032 type = le16_to_cpu(resp->next_valid_type);
8033 if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
8036 ctxm->type = le16_to_cpu(resp->type);
8037 ctxm->entry_size = le16_to_cpu(resp->entry_size);
8038 ctxm->flags = flags;
8039 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
8040 ctxm->entry_multiple = resp->entry_multiple;
8041 ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
8042 ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
8043 init_val = resp->ctx_init_value;
8044 init_off = resp->ctx_init_offset;
8045 bnxt_init_ctx_initializer(ctxm, init_val, init_off,
8046 BNXT_CTX_INIT_VALID(flags));
8047 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
8048 BNXT_MAX_SPLIT_ENTRY);
8049 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
8051 ctxm->split[i] = le32_to_cpu(*p);
8053 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
8056 hwrm_req_drop(bp, req);
8060 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
8062 struct hwrm_func_backing_store_qcaps_output *resp;
8063 struct hwrm_func_backing_store_qcaps_input *req;
8066 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
8069 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8070 return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
8072 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
8076 resp = hwrm_req_hold(bp, req);
8077 rc = hwrm_req_send_silent(bp, req);
8079 struct bnxt_ctx_mem_type *ctxm;
8080 struct bnxt_ctx_mem_info *ctx;
8081 u8 init_val, init_idx = 0;
8086 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8093 init_val = resp->ctx_kind_initializer;
8094 init_mask = le16_to_cpu(resp->ctx_init_mask);
8096 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8097 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
8098 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
8099 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
8100 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
8101 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
8102 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
8103 (init_mask & (1 << init_idx++)) != 0);
8105 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8106 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
8107 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
8108 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
8109 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
8110 (init_mask & (1 << init_idx++)) != 0);
8112 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8113 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
8114 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
8115 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
8116 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
8117 (init_mask & (1 << init_idx++)) != 0);
8119 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8120 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
8121 ctxm->max_entries = ctxm->vnic_entries +
8122 le16_to_cpu(resp->vnic_max_ring_table_entries);
8123 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
8124 bnxt_init_ctx_initializer(ctxm, init_val,
8125 resp->vnic_init_offset,
8126 (init_mask & (1 << init_idx++)) != 0);
8128 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8129 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
8130 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
8131 bnxt_init_ctx_initializer(ctxm, init_val,
8132 resp->stat_init_offset,
8133 (init_mask & (1 << init_idx++)) != 0);
8135 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8136 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
8137 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
8138 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
8139 ctxm->entry_multiple = resp->tqm_entries_multiple;
8140 if (!ctxm->entry_multiple)
8141 ctxm->entry_multiple = 1;
8143 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
8145 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8146 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
8147 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
8148 ctxm->mrav_num_entries_units =
8149 le16_to_cpu(resp->mrav_num_entries_units);
8150 bnxt_init_ctx_initializer(ctxm, init_val,
8151 resp->mrav_init_offset,
8152 (init_mask & (1 << init_idx++)) != 0);
8154 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8155 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
8156 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
8158 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
8159 if (!ctx->tqm_fp_rings_count)
8160 ctx->tqm_fp_rings_count = bp->max_q;
8161 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
8162 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
8164 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8165 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
8166 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
8168 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
8173 hwrm_req_drop(bp, req);
8177 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
8180 if (!rmem->nr_pages)
8183 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
8184 if (rmem->depth >= 1) {
8185 if (rmem->depth == 2)
8189 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
8191 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
8195 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
8196 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
8197 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
8198 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
8199 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
8200 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
8202 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
8204 struct hwrm_func_backing_store_cfg_input *req;
8205 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8206 struct bnxt_ctx_pg_info *ctx_pg;
8207 struct bnxt_ctx_mem_type *ctxm;
8208 void **__req = (void **)&req;
8209 u32 req_len = sizeof(*req);
8210 __le32 *num_entries;
8221 if (req_len > bp->hwrm_max_ext_req_len)
8222 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
8223 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
8227 req->enables = cpu_to_le32(enables);
8228 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
8229 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8230 ctx_pg = ctxm->pg_info;
8231 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
8232 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
8233 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
8234 req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
8235 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8236 &req->qpc_pg_size_qpc_lvl,
8237 &req->qpc_page_dir);
8239 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
8240 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
8242 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
8243 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8244 ctx_pg = ctxm->pg_info;
8245 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
8246 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
8247 req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
8248 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8249 &req->srq_pg_size_srq_lvl,
8250 &req->srq_page_dir);
8252 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
8253 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8254 ctx_pg = ctxm->pg_info;
8255 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
8256 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
8257 req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
8258 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8259 &req->cq_pg_size_cq_lvl,
8262 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
8263 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8264 ctx_pg = ctxm->pg_info;
8265 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
8266 req->vnic_num_ring_table_entries =
8267 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
8268 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
8269 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8270 &req->vnic_pg_size_vnic_lvl,
8271 &req->vnic_page_dir);
8273 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
8274 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8275 ctx_pg = ctxm->pg_info;
8276 req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
8277 req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
8278 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8279 &req->stat_pg_size_stat_lvl,
8280 &req->stat_page_dir);
8282 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
8285 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8286 ctx_pg = ctxm->pg_info;
8287 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
8288 units = ctxm->mrav_num_entries_units;
8290 u32 num_mr, num_ah = ctxm->mrav_av_entries;
8293 num_mr = ctx_pg->entries - num_ah;
8294 entries = ((num_mr / units) << 16) | (num_ah / units);
8295 req->mrav_num_entries = cpu_to_le32(entries);
8296 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
8298 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
8299 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8300 &req->mrav_pg_size_mrav_lvl,
8301 &req->mrav_page_dir);
8303 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
8304 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8305 ctx_pg = ctxm->pg_info;
8306 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
8307 req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
8308 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8309 &req->tim_pg_size_tim_lvl,
8310 &req->tim_page_dir);
8312 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8313 for (i = 0, num_entries = &req->tqm_sp_num_entries,
8314 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
8315 pg_dir = &req->tqm_sp_page_dir,
8316 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
8317 ctx_pg = ctxm->pg_info;
8318 i < BNXT_MAX_TQM_RINGS;
8319 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
8320 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
8321 if (!(enables & ena))
8324 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
8325 *num_entries = cpu_to_le32(ctx_pg->entries);
8326 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
8328 req->flags = cpu_to_le32(flags);
8329 return hwrm_req_send(bp, req);
8332 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
8333 struct bnxt_ctx_pg_info *ctx_pg)
8335 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8337 rmem->page_size = BNXT_PAGE_SIZE;
8338 rmem->pg_arr = ctx_pg->ctx_pg_arr;
8339 rmem->dma_arr = ctx_pg->ctx_dma_arr;
8340 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
8341 if (rmem->depth >= 1)
8342 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
8343 return bnxt_alloc_ring(bp, rmem);
8346 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
8347 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
8348 u8 depth, struct bnxt_ctx_mem_type *ctxm)
8350 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8356 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8357 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
8358 ctx_pg->nr_pages = 0;
8361 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
8365 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
8367 if (!ctx_pg->ctx_pg_tbl)
8369 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
8370 rmem->nr_pages = nr_tbls;
8371 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8374 for (i = 0; i < nr_tbls; i++) {
8375 struct bnxt_ctx_pg_info *pg_tbl;
8377 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
8380 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
8381 rmem = &pg_tbl->ring_mem;
8382 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
8383 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
8385 rmem->nr_pages = MAX_CTX_PAGES;
8386 rmem->ctx_mem = ctxm;
8387 if (i == (nr_tbls - 1)) {
8388 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
8391 rmem->nr_pages = rem;
8393 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
8398 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
8399 if (rmem->nr_pages > 1 || depth)
8401 rmem->ctx_mem = ctxm;
8402 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
8407 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
8408 struct bnxt_ctx_pg_info *ctx_pg)
8410 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
8412 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
8413 ctx_pg->ctx_pg_tbl) {
8414 int i, nr_tbls = rmem->nr_pages;
8416 for (i = 0; i < nr_tbls; i++) {
8417 struct bnxt_ctx_pg_info *pg_tbl;
8418 struct bnxt_ring_mem_info *rmem2;
8420 pg_tbl = ctx_pg->ctx_pg_tbl[i];
8423 rmem2 = &pg_tbl->ring_mem;
8424 bnxt_free_ring(bp, rmem2);
8425 ctx_pg->ctx_pg_arr[i] = NULL;
8427 ctx_pg->ctx_pg_tbl[i] = NULL;
8429 kfree(ctx_pg->ctx_pg_tbl);
8430 ctx_pg->ctx_pg_tbl = NULL;
8432 bnxt_free_ring(bp, rmem);
8433 ctx_pg->nr_pages = 0;
8436 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
8437 struct bnxt_ctx_mem_type *ctxm, u32 entries,
8440 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8441 int i, rc = 0, n = 1;
8444 if (!ctxm->entry_size || !ctx_pg)
8446 if (ctxm->instance_bmap)
8447 n = hweight32(ctxm->instance_bmap);
8448 if (ctxm->entry_multiple)
8449 entries = roundup(entries, ctxm->entry_multiple);
8450 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
8451 mem_size = entries * ctxm->entry_size;
8452 for (i = 0; i < n && !rc; i++) {
8453 ctx_pg[i].entries = entries;
8454 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
8455 ctxm->init_value ? ctxm : NULL);
8460 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
8461 struct bnxt_ctx_mem_type *ctxm,
8464 struct hwrm_func_backing_store_cfg_v2_input *req;
8465 u32 instance_bmap = ctxm->instance_bmap;
8466 int i, j, rc = 0, n = 1;
8469 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
8473 n = hweight32(ctxm->instance_bmap);
8477 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
8480 hwrm_req_hold(bp, req);
8481 req->type = cpu_to_le16(ctxm->type);
8482 req->entry_size = cpu_to_le16(ctxm->entry_size);
8483 req->subtype_valid_cnt = ctxm->split_entry_cnt;
8484 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
8485 p[i] = cpu_to_le32(ctxm->split[i]);
8486 for (i = 0, j = 0; j < n && !rc; i++) {
8487 struct bnxt_ctx_pg_info *ctx_pg;
8489 if (!(instance_bmap & (1 << i)))
8491 req->instance = cpu_to_le16(i);
8492 ctx_pg = &ctxm->pg_info[j++];
8493 if (!ctx_pg->entries)
8495 req->num_entries = cpu_to_le32(ctx_pg->entries);
8496 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
8497 &req->page_size_pbl_level,
8501 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
8502 rc = hwrm_req_send(bp, req);
8504 hwrm_req_drop(bp, req);
8508 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
8510 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8511 struct bnxt_ctx_mem_type *ctxm;
8518 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
8519 last_type = BNXT_CTX_MAX - 1;
8521 last_type = BNXT_CTX_L2_MAX - 1;
8522 ctx->ctx_arr[last_type].last = 1;
8524 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
8525 ctxm = &ctx->ctx_arr[type];
8527 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
8534 void bnxt_free_ctx_mem(struct bnxt *bp)
8536 struct bnxt_ctx_mem_info *ctx = bp->ctx;
8542 for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
8543 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
8544 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
8549 if (ctxm->instance_bmap)
8550 n = hweight32(ctxm->instance_bmap);
8551 for (i = 0; i < n; i++)
8552 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
8555 ctxm->pg_info = NULL;
8558 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
8563 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8565 struct bnxt_ctx_mem_type *ctxm;
8566 struct bnxt_ctx_mem_info *ctx;
8567 u32 l2_qps, qp1_qps, max_qps;
8568 u32 ena, entries_sp, entries;
8569 u32 srqs, max_srqs, min;
8577 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
8579 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
8584 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
8587 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8588 l2_qps = ctxm->qp_l2_entries;
8589 qp1_qps = ctxm->qp_qp1_entries;
8590 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
8591 max_qps = ctxm->max_entries;
8592 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8593 srqs = ctxm->srq_l2_entries;
8594 max_srqs = ctxm->max_entries;
8596 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
8598 extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
8599 /* allocate extra qps if fw supports RoCE fast qp destroy feature */
8600 extra_qps += fast_qpmd_qps;
8601 extra_srqs = min_t(u32, 8192, max_srqs - srqs);
8603 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
8606 ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
8607 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
8612 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
8613 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
8617 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
8618 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
8619 extra_qps * 2, pg_lvl);
8623 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
8624 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8628 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
8629 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
8633 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
8636 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
8637 /* 128K extra is needed to accommodate static AH context
8638 * allocation by f/w.
8640 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
8641 num_ah = min_t(u32, num_mr, 1024 * 128);
8642 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
8643 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
8644 ctxm->mrav_av_entries = num_ah;
8646 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
8649 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
8651 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
8652 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
8655 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
8658 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
8659 min = ctxm->min_entries;
8660 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
8661 2 * (extra_qps + qp1_qps) + min;
8662 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
8666 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
8667 entries = l2_qps + 2 * (extra_qps + qp1_qps);
8668 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
8671 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
8672 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
8673 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
8675 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
8676 rc = bnxt_backing_store_cfg_v2(bp, ena);
8678 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
8680 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
8684 ctx->flags |= BNXT_CTX_FLAG_INITED;
8688 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
8690 struct hwrm_func_resource_qcaps_output *resp;
8691 struct hwrm_func_resource_qcaps_input *req;
8692 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8695 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
8699 req->fid = cpu_to_le16(0xffff);
8700 resp = hwrm_req_hold(bp, req);
8701 rc = hwrm_req_send_silent(bp, req);
8703 goto hwrm_func_resc_qcaps_exit;
8705 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
8707 goto hwrm_func_resc_qcaps_exit;
8709 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
8710 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8711 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
8712 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8713 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
8714 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8715 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
8716 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8717 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
8718 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
8719 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
8720 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8721 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
8722 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8723 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
8724 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8726 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8727 u16 max_msix = le16_to_cpu(resp->max_msix);
8729 hw_resc->max_nqs = max_msix;
8730 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
8734 struct bnxt_pf_info *pf = &bp->pf;
8736 pf->vf_resv_strategy =
8737 le16_to_cpu(resp->vf_reservation_strategy);
8738 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
8739 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
8741 hwrm_func_resc_qcaps_exit:
8742 hwrm_req_drop(bp, req);
8746 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
8748 struct hwrm_port_mac_ptp_qcfg_output *resp;
8749 struct hwrm_port_mac_ptp_qcfg_input *req;
8750 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
8755 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
8760 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
8764 req->port_id = cpu_to_le16(bp->pf.port_id);
8765 resp = hwrm_req_hold(bp, req);
8766 rc = hwrm_req_send(bp, req);
8770 flags = resp->flags;
8771 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
8776 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
8784 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
8785 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
8786 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
8787 } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
8788 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
8789 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
8794 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
8795 rc = bnxt_ptp_init(bp, phc_cfg);
8797 netdev_warn(bp->dev, "PTP initialization failed.\n");
8799 hwrm_req_drop(bp, req);
8810 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
8812 struct hwrm_func_qcaps_output *resp;
8813 struct hwrm_func_qcaps_input *req;
8814 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8815 u32 flags, flags_ext, flags_ext2;
8818 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
8822 req->fid = cpu_to_le16(0xffff);
8823 resp = hwrm_req_hold(bp, req);
8824 rc = hwrm_req_send(bp, req);
8826 goto hwrm_func_qcaps_exit;
8828 flags = le32_to_cpu(resp->flags);
8829 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
8830 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
8831 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
8832 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
8833 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
8834 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
8835 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
8836 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
8837 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
8838 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
8839 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
8840 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
8841 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
8842 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
8843 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
8844 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
8845 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
8846 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
8848 flags_ext = le32_to_cpu(resp->flags_ext);
8849 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
8850 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
8851 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
8852 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
8853 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
8854 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
8855 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
8856 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
8857 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
8858 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
8859 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
8860 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
8861 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
8862 bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
8864 flags_ext2 = le32_to_cpu(resp->flags_ext2);
8865 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
8866 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
8867 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
8868 bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
8870 bp->tx_push_thresh = 0;
8871 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
8872 BNXT_FW_MAJ(bp) > 217)
8873 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
8875 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
8876 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
8877 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
8878 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
8879 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
8880 if (!hw_resc->max_hw_ring_grps)
8881 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
8882 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
8883 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
8884 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
8886 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
8887 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
8888 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
8889 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
8890 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
8891 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
8894 struct bnxt_pf_info *pf = &bp->pf;
8896 pf->fw_fid = le16_to_cpu(resp->fid);
8897 pf->port_id = le16_to_cpu(resp->port_id);
8898 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
8899 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
8900 pf->max_vfs = le16_to_cpu(resp->max_vfs);
8901 bp->flags &= ~BNXT_FLAG_WOL_CAP;
8902 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
8903 bp->flags |= BNXT_FLAG_WOL_CAP;
8904 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
8905 bp->fw_cap |= BNXT_FW_CAP_PTP;
8912 #ifdef CONFIG_BNXT_SRIOV
8913 struct bnxt_vf_info *vf = &bp->vf;
8915 vf->fw_fid = le16_to_cpu(resp->fid);
8916 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
8920 hwrm_func_qcaps_exit:
8921 hwrm_req_drop(bp, req);
8925 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
8927 struct hwrm_dbg_qcaps_output *resp;
8928 struct hwrm_dbg_qcaps_input *req;
8932 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
8935 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
8939 req->fid = cpu_to_le16(0xffff);
8940 resp = hwrm_req_hold(bp, req);
8941 rc = hwrm_req_send(bp, req);
8943 goto hwrm_dbg_qcaps_exit;
8945 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
8947 hwrm_dbg_qcaps_exit:
8948 hwrm_req_drop(bp, req);
8951 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
8953 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
8957 rc = __bnxt_hwrm_func_qcaps(bp);
8961 bnxt_hwrm_dbg_qcaps(bp);
8963 rc = bnxt_hwrm_queue_qportcfg(bp);
8965 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
8968 if (bp->hwrm_spec_code >= 0x10803) {
8969 rc = bnxt_alloc_ctx_mem(bp);
8972 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8974 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
8979 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
8981 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
8982 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
8986 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
8989 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
8993 resp = hwrm_req_hold(bp, req);
8994 rc = hwrm_req_send(bp, req);
8996 goto hwrm_cfa_adv_qcaps_exit;
8998 flags = le32_to_cpu(resp->flags);
9000 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
9001 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
9004 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
9005 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
9008 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
9009 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
9011 hwrm_cfa_adv_qcaps_exit:
9012 hwrm_req_drop(bp, req);
9016 static int __bnxt_alloc_fw_health(struct bnxt *bp)
9021 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
9025 mutex_init(&bp->fw_health->lock);
9029 static int bnxt_alloc_fw_health(struct bnxt *bp)
9033 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
9034 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9037 rc = __bnxt_alloc_fw_health(bp);
9039 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
9040 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9047 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
9049 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
9050 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
9051 BNXT_FW_HEALTH_WIN_MAP_OFF);
9054 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
9056 struct bnxt_fw_health *fw_health = bp->fw_health;
9062 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
9063 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9064 fw_health->status_reliable = false;
9066 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
9067 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
9068 fw_health->resets_reliable = false;
9071 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
9079 bp->fw_health->status_reliable = false;
9081 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
9082 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
9084 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
9085 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
9086 if (!bp->chip_num) {
9087 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
9088 bp->chip_num = readl(bp->bar0 +
9089 BNXT_FW_HEALTH_WIN_BASE +
9090 BNXT_GRC_REG_CHIP_NUM);
9092 if (!BNXT_CHIP_P5(bp))
9095 status_loc = BNXT_GRC_REG_STATUS_P5 |
9096 BNXT_FW_HEALTH_REG_TYPE_BAR0;
9098 status_loc = readl(hs + offsetof(struct hcomm_status,
9102 if (__bnxt_alloc_fw_health(bp)) {
9103 netdev_warn(bp->dev, "no memory for firmware status checks\n");
9107 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
9108 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
9109 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
9110 __bnxt_map_fw_health_reg(bp, status_loc);
9111 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
9112 BNXT_FW_HEALTH_WIN_OFF(status_loc);
9115 bp->fw_health->status_reliable = true;
9118 static int bnxt_map_fw_health_regs(struct bnxt *bp)
9120 struct bnxt_fw_health *fw_health = bp->fw_health;
9121 u32 reg_base = 0xffffffff;
9124 bp->fw_health->status_reliable = false;
9125 bp->fw_health->resets_reliable = false;
9126 /* Only pre-map the monitoring GRC registers using window 3 */
9127 for (i = 0; i < 4; i++) {
9128 u32 reg = fw_health->regs[i];
9130 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
9132 if (reg_base == 0xffffffff)
9133 reg_base = reg & BNXT_GRC_BASE_MASK;
9134 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
9136 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9138 bp->fw_health->status_reliable = true;
9139 bp->fw_health->resets_reliable = true;
9140 if (reg_base == 0xffffffff)
9143 __bnxt_map_fw_health_reg(bp, reg_base);
9147 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
9152 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
9153 bp->fw_health->status_reliable = true;
9154 bp->fw_health->resets_reliable = true;
9156 bnxt_try_map_fw_health_reg(bp);
9160 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
9162 struct bnxt_fw_health *fw_health = bp->fw_health;
9163 struct hwrm_error_recovery_qcfg_output *resp;
9164 struct hwrm_error_recovery_qcfg_input *req;
9167 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
9170 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
9174 resp = hwrm_req_hold(bp, req);
9175 rc = hwrm_req_send(bp, req);
9177 goto err_recovery_out;
9178 fw_health->flags = le32_to_cpu(resp->flags);
9179 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
9180 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
9182 goto err_recovery_out;
9184 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
9185 fw_health->master_func_wait_dsecs =
9186 le32_to_cpu(resp->master_func_wait_period);
9187 fw_health->normal_func_wait_dsecs =
9188 le32_to_cpu(resp->normal_func_wait_period);
9189 fw_health->post_reset_wait_dsecs =
9190 le32_to_cpu(resp->master_func_wait_period_after_reset);
9191 fw_health->post_reset_max_wait_dsecs =
9192 le32_to_cpu(resp->max_bailout_time_after_reset);
9193 fw_health->regs[BNXT_FW_HEALTH_REG] =
9194 le32_to_cpu(resp->fw_health_status_reg);
9195 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
9196 le32_to_cpu(resp->fw_heartbeat_reg);
9197 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
9198 le32_to_cpu(resp->fw_reset_cnt_reg);
9199 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
9200 le32_to_cpu(resp->reset_inprogress_reg);
9201 fw_health->fw_reset_inprog_reg_mask =
9202 le32_to_cpu(resp->reset_inprogress_reg_mask);
9203 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
9204 if (fw_health->fw_reset_seq_cnt >= 16) {
9206 goto err_recovery_out;
9208 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
9209 fw_health->fw_reset_seq_regs[i] =
9210 le32_to_cpu(resp->reset_reg[i]);
9211 fw_health->fw_reset_seq_vals[i] =
9212 le32_to_cpu(resp->reset_reg_val[i]);
9213 fw_health->fw_reset_seq_delay_msec[i] =
9214 resp->delay_after_reset[i];
9217 hwrm_req_drop(bp, req);
9219 rc = bnxt_map_fw_health_regs(bp);
9221 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
9225 static int bnxt_hwrm_func_reset(struct bnxt *bp)
9227 struct hwrm_func_reset_input *req;
9230 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
9235 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
9236 return hwrm_req_send(bp, req);
9239 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
9241 struct hwrm_nvm_get_dev_info_output nvm_info;
9243 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
9244 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
9245 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
9246 nvm_info.nvm_cfg_ver_upd);
9249 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
9251 struct hwrm_queue_qportcfg_output *resp;
9252 struct hwrm_queue_qportcfg_input *req;
9257 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
9261 resp = hwrm_req_hold(bp, req);
9262 rc = hwrm_req_send(bp, req);
9266 if (!resp->max_configurable_queues) {
9270 bp->max_tc = resp->max_configurable_queues;
9271 bp->max_lltc = resp->max_configurable_lossless_queues;
9272 if (bp->max_tc > BNXT_MAX_QUEUE)
9273 bp->max_tc = BNXT_MAX_QUEUE;
9275 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
9276 qptr = &resp->queue_id0;
9277 for (i = 0, j = 0; i < bp->max_tc; i++) {
9278 bp->q_info[j].queue_id = *qptr;
9279 bp->q_ids[i] = *qptr++;
9280 bp->q_info[j].queue_profile = *qptr++;
9281 bp->tc_to_qidx[j] = j;
9282 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
9283 (no_rdma && BNXT_PF(bp)))
9286 bp->max_q = bp->max_tc;
9287 bp->max_tc = max_t(u8, j, 1);
9289 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
9292 if (bp->max_lltc > bp->max_tc)
9293 bp->max_lltc = bp->max_tc;
9296 hwrm_req_drop(bp, req);
9300 static int bnxt_hwrm_poll(struct bnxt *bp)
9302 struct hwrm_ver_get_input *req;
9305 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9309 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9310 req->hwrm_intf_min = HWRM_VERSION_MINOR;
9311 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9313 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
9314 rc = hwrm_req_send(bp, req);
9318 static int bnxt_hwrm_ver_get(struct bnxt *bp)
9320 struct hwrm_ver_get_output *resp;
9321 struct hwrm_ver_get_input *req;
9322 u16 fw_maj, fw_min, fw_bld, fw_rsv;
9323 u32 dev_caps_cfg, hwrm_ver;
9326 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
9330 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9331 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
9332 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
9333 req->hwrm_intf_min = HWRM_VERSION_MINOR;
9334 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
9336 resp = hwrm_req_hold(bp, req);
9337 rc = hwrm_req_send(bp, req);
9339 goto hwrm_ver_get_exit;
9341 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
9343 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
9344 resp->hwrm_intf_min_8b << 8 |
9345 resp->hwrm_intf_upd_8b;
9346 if (resp->hwrm_intf_maj_8b < 1) {
9347 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
9348 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9349 resp->hwrm_intf_upd_8b);
9350 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
9353 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
9354 HWRM_VERSION_UPDATE;
9356 if (bp->hwrm_spec_code > hwrm_ver)
9357 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9358 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
9359 HWRM_VERSION_UPDATE);
9361 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
9362 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
9363 resp->hwrm_intf_upd_8b);
9365 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
9366 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
9367 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
9368 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
9369 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
9370 len = FW_VER_STR_LEN;
9372 fw_maj = resp->hwrm_fw_maj_8b;
9373 fw_min = resp->hwrm_fw_min_8b;
9374 fw_bld = resp->hwrm_fw_bld_8b;
9375 fw_rsv = resp->hwrm_fw_rsvd_8b;
9376 len = BC_HWRM_STR_LEN;
9378 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
9379 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
9382 if (strlen(resp->active_pkg_name)) {
9383 int fw_ver_len = strlen(bp->fw_ver_str);
9385 snprintf(bp->fw_ver_str + fw_ver_len,
9386 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
9387 resp->active_pkg_name);
9388 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
9391 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
9392 if (!bp->hwrm_cmd_timeout)
9393 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
9394 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
9395 if (!bp->hwrm_cmd_max_timeout)
9396 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
9397 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
9398 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
9399 bp->hwrm_cmd_max_timeout / 1000);
9401 if (resp->hwrm_intf_maj_8b >= 1) {
9402 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
9403 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
9405 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
9406 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
9408 bp->chip_num = le16_to_cpu(resp->chip_num);
9409 bp->chip_rev = resp->chip_rev;
9410 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
9412 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
9414 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
9415 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
9416 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
9417 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
9419 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
9420 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
9423 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
9424 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
9427 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
9428 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
9431 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
9432 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
9435 hwrm_req_drop(bp, req);
9439 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
9441 struct hwrm_fw_set_time_input *req;
9443 time64_t now = ktime_get_real_seconds();
9446 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
9447 bp->hwrm_spec_code < 0x10400)
9450 time64_to_tm(now, 0, &tm);
9451 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
9455 req->year = cpu_to_le16(1900 + tm.tm_year);
9456 req->month = 1 + tm.tm_mon;
9457 req->day = tm.tm_mday;
9458 req->hour = tm.tm_hour;
9459 req->minute = tm.tm_min;
9460 req->second = tm.tm_sec;
9461 return hwrm_req_send(bp, req);
9464 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
9469 sw_tmp = (*sw & ~mask) | hw;
9470 if (hw < (*sw & mask))
9472 WRITE_ONCE(*sw, sw_tmp);
9475 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
9476 int count, bool ignore_zero)
9480 for (i = 0; i < count; i++) {
9481 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
9483 if (ignore_zero && !hw)
9486 if (masks[i] == -1ULL)
9489 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
9493 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
9495 if (!stats->hw_stats)
9498 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9499 stats->hw_masks, stats->len / 8, false);
9502 static void bnxt_accumulate_all_stats(struct bnxt *bp)
9504 struct bnxt_stats_mem *ring0_stats;
9505 bool ignore_zero = false;
9508 /* Chip bug. Counter intermittently becomes 0. */
9509 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9512 for (i = 0; i < bp->cp_nr_rings; i++) {
9513 struct bnxt_napi *bnapi = bp->bnapi[i];
9514 struct bnxt_cp_ring_info *cpr;
9515 struct bnxt_stats_mem *stats;
9517 cpr = &bnapi->cp_ring;
9518 stats = &cpr->stats;
9520 ring0_stats = stats;
9521 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
9522 ring0_stats->hw_masks,
9523 ring0_stats->len / 8, ignore_zero);
9525 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9526 struct bnxt_stats_mem *stats = &bp->port_stats;
9527 __le64 *hw_stats = stats->hw_stats;
9528 u64 *sw_stats = stats->sw_stats;
9529 u64 *masks = stats->hw_masks;
9532 cnt = sizeof(struct rx_port_stats) / 8;
9533 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9535 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9536 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9537 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
9538 cnt = sizeof(struct tx_port_stats) / 8;
9539 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
9541 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
9542 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
9543 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
9547 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
9549 struct hwrm_port_qstats_input *req;
9550 struct bnxt_pf_info *pf = &bp->pf;
9553 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
9556 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9559 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
9564 req->port_id = cpu_to_le16(pf->port_id);
9565 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
9566 BNXT_TX_PORT_STATS_BYTE_OFFSET);
9567 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9568 return hwrm_req_send(bp, req);
9571 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
9573 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
9574 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
9575 struct hwrm_port_qstats_ext_output *resp_qs;
9576 struct hwrm_port_qstats_ext_input *req_qs;
9577 struct bnxt_pf_info *pf = &bp->pf;
9581 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
9584 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
9587 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
9591 req_qs->flags = flags;
9592 req_qs->port_id = cpu_to_le16(pf->port_id);
9593 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
9594 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
9595 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
9596 sizeof(struct tx_port_stats_ext) : 0;
9597 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
9598 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
9599 resp_qs = hwrm_req_hold(bp, req_qs);
9600 rc = hwrm_req_send(bp, req_qs);
9602 bp->fw_rx_stats_ext_size =
9603 le16_to_cpu(resp_qs->rx_stat_size) / 8;
9604 if (BNXT_FW_MAJ(bp) < 220 &&
9605 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
9606 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
9608 bp->fw_tx_stats_ext_size = tx_stat_size ?
9609 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
9611 bp->fw_rx_stats_ext_size = 0;
9612 bp->fw_tx_stats_ext_size = 0;
9614 hwrm_req_drop(bp, req_qs);
9619 if (bp->fw_tx_stats_ext_size <=
9620 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
9621 bp->pri2cos_valid = 0;
9625 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
9629 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
9631 resp_qc = hwrm_req_hold(bp, req_qc);
9632 rc = hwrm_req_send(bp, req_qc);
9637 pri2cos = &resp_qc->pri0_cos_queue_id;
9638 for (i = 0; i < 8; i++) {
9639 u8 queue_id = pri2cos[i];
9642 /* Per port queue IDs start from 0, 10, 20, etc */
9643 queue_idx = queue_id % 10;
9644 if (queue_idx > BNXT_MAX_QUEUE) {
9645 bp->pri2cos_valid = false;
9646 hwrm_req_drop(bp, req_qc);
9649 for (j = 0; j < bp->max_q; j++) {
9650 if (bp->q_ids[j] == queue_id)
9651 bp->pri2cos_idx[i] = queue_idx;
9654 bp->pri2cos_valid = true;
9656 hwrm_req_drop(bp, req_qc);
9661 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
9663 bnxt_hwrm_tunnel_dst_port_free(bp,
9664 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9665 bnxt_hwrm_tunnel_dst_port_free(bp,
9666 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9669 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
9675 tpa_flags = bp->flags & BNXT_FLAG_TPA;
9676 else if (BNXT_NO_FW_ACCESS(bp))
9678 for (i = 0; i < bp->nr_vnics; i++) {
9679 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
9681 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
9689 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
9693 for (i = 0; i < bp->nr_vnics; i++)
9694 bnxt_hwrm_vnic_set_rss(bp, i, false);
9697 static void bnxt_clear_vnic(struct bnxt *bp)
9702 bnxt_hwrm_clear_vnic_filter(bp);
9703 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
9704 /* clear all RSS setting before free vnic ctx */
9705 bnxt_hwrm_clear_vnic_rss(bp);
9706 bnxt_hwrm_vnic_ctx_free(bp);
9708 /* before free the vnic, undo the vnic tpa settings */
9709 if (bp->flags & BNXT_FLAG_TPA)
9710 bnxt_set_tpa(bp, false);
9711 bnxt_hwrm_vnic_free(bp);
9712 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9713 bnxt_hwrm_vnic_ctx_free(bp);
9716 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
9719 bnxt_clear_vnic(bp);
9720 bnxt_hwrm_ring_free(bp, close_path);
9721 bnxt_hwrm_ring_grp_free(bp);
9723 bnxt_hwrm_stat_ctx_free(bp);
9724 bnxt_hwrm_free_tunnel_ports(bp);
9728 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
9730 struct hwrm_func_cfg_input *req;
9734 if (br_mode == BRIDGE_MODE_VEB)
9735 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
9736 else if (br_mode == BRIDGE_MODE_VEPA)
9737 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
9741 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9745 req->fid = cpu_to_le16(0xffff);
9746 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
9747 req->evb_mode = evb_mode;
9748 return hwrm_req_send(bp, req);
9751 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
9753 struct hwrm_func_cfg_input *req;
9756 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
9759 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
9763 req->fid = cpu_to_le16(0xffff);
9764 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
9765 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
9767 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
9769 return hwrm_req_send(bp, req);
9772 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
9774 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
9777 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
9780 /* allocate context for vnic */
9781 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
9783 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9785 goto vnic_setup_err;
9787 bp->rsscos_nr_ctxs++;
9789 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9790 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
9792 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
9794 goto vnic_setup_err;
9796 bp->rsscos_nr_ctxs++;
9800 /* configure default vnic, ring grp */
9801 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9803 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9805 goto vnic_setup_err;
9808 /* Enable RSS hashing on vnic */
9809 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
9811 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
9813 goto vnic_setup_err;
9816 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9817 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9819 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9828 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
9832 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
9833 for (i = 0; i < nr_ctxs; i++) {
9834 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
9836 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
9840 bp->rsscos_nr_ctxs++;
9845 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
9847 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
9851 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
9853 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
9857 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
9858 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
9860 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
9867 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
9869 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9870 return __bnxt_setup_vnic_p5(bp, vnic_id);
9872 return __bnxt_setup_vnic(bp, vnic_id);
9875 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
9876 u16 start_rx_ring_idx, int rx_rings)
9880 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
9882 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
9886 return bnxt_setup_vnic(bp, vnic_id);
9889 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
9893 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
9894 return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
9897 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
9900 for (i = 0; i < bp->rx_nr_rings; i++) {
9901 struct bnxt_vnic_info *vnic;
9902 u16 vnic_id = i + 1;
9905 if (vnic_id >= bp->nr_vnics)
9908 vnic = &bp->vnic_info[vnic_id];
9909 vnic->flags |= BNXT_VNIC_RFS_FLAG;
9910 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
9911 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
9912 if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
9918 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
9919 static bool bnxt_promisc_ok(struct bnxt *bp)
9921 #ifdef CONFIG_BNXT_SRIOV
9922 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
9928 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
9930 unsigned int rc = 0;
9932 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
9934 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
9939 rc = bnxt_hwrm_vnic_cfg(bp, 1);
9941 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
9948 static int bnxt_cfg_rx_mode(struct bnxt *);
9949 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
9951 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
9953 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
9955 unsigned int rx_nr_rings = bp->rx_nr_rings;
9958 rc = bnxt_hwrm_stat_ctx_alloc(bp);
9960 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
9966 rc = bnxt_hwrm_ring_alloc(bp);
9968 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
9972 rc = bnxt_hwrm_ring_grp_alloc(bp);
9974 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
9978 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9981 /* default vnic 0 */
9982 rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
9984 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
9989 bnxt_hwrm_func_qcfg(bp);
9991 rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
9994 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
9995 bnxt_hwrm_update_rss_hash_cfg(bp);
9997 if (bp->flags & BNXT_FLAG_RFS) {
9998 rc = bnxt_alloc_rfs_vnics(bp);
10003 if (bp->flags & BNXT_FLAG_TPA) {
10004 rc = bnxt_set_tpa(bp, true);
10010 bnxt_update_vf_mac(bp);
10012 /* Filter for default vnic 0 */
10013 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
10015 if (BNXT_VF(bp) && rc == -ENODEV)
10016 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
10018 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
10021 vnic->uc_filter_count = 1;
10024 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
10027 if (bp->dev->flags & IFF_BROADCAST)
10028 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10030 if (bp->dev->flags & IFF_PROMISC)
10031 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10033 if (bp->dev->flags & IFF_ALLMULTI) {
10034 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10035 vnic->mc_list_count = 0;
10036 } else if (bp->dev->flags & IFF_MULTICAST) {
10039 bnxt_mc_list_updated(bp, &mask);
10040 vnic->rx_mask |= mask;
10043 rc = bnxt_cfg_rx_mode(bp);
10048 rc = bnxt_hwrm_set_coal(bp);
10050 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
10053 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10054 rc = bnxt_setup_nitroa0_vnic(bp);
10056 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
10061 bnxt_hwrm_func_qcfg(bp);
10062 netdev_update_features(bp->dev);
10068 bnxt_hwrm_resource_free(bp, 0, true);
10073 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
10075 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
10079 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
10081 bnxt_init_cp_rings(bp);
10082 bnxt_init_rx_rings(bp);
10083 bnxt_init_tx_rings(bp);
10084 bnxt_init_ring_grps(bp, irq_re_init);
10085 bnxt_init_vnics(bp);
10087 return bnxt_init_chip(bp, irq_re_init);
10090 static int bnxt_set_real_num_queues(struct bnxt *bp)
10093 struct net_device *dev = bp->dev;
10095 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
10096 bp->tx_nr_rings_xdp);
10100 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
10104 #ifdef CONFIG_RFS_ACCEL
10105 if (bp->flags & BNXT_FLAG_RFS)
10106 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
10112 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10115 int _rx = *rx, _tx = *tx;
10118 *rx = min_t(int, _rx, max);
10119 *tx = min_t(int, _tx, max);
10124 while (_rx + _tx > max) {
10125 if (_rx > _tx && _rx > 1)
10136 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
10138 return (tx - tx_xdp) / tx_sets + tx_xdp;
10141 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
10143 int tcs = bp->num_tc;
10147 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
10150 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
10152 int tcs = bp->num_tc;
10154 return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
10155 bp->tx_nr_rings_xdp;
10158 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
10161 int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
10163 if (tx_cp != *tx) {
10164 int tx_saved = tx_cp, rc;
10166 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
10169 if (tx_cp != tx_saved)
10170 *tx = bnxt_num_cp_to_tx(bp, tx_cp);
10173 return __bnxt_trim_rings(bp, rx, tx, max, sh);
10176 static void bnxt_setup_msix(struct bnxt *bp)
10178 const int len = sizeof(bp->irq_tbl[0].name);
10179 struct net_device *dev = bp->dev;
10186 for (i = 0; i < tcs; i++) {
10187 count = bp->tx_nr_rings_per_tc;
10188 off = BNXT_TC_TO_RING_BASE(bp, i);
10189 netdev_set_tc_queue(dev, i, count, off);
10193 for (i = 0; i < bp->cp_nr_rings; i++) {
10194 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10197 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10199 else if (i < bp->rx_nr_rings)
10204 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
10206 bp->irq_tbl[map_idx].handler = bnxt_msix;
10210 static void bnxt_setup_inta(struct bnxt *bp)
10212 const int len = sizeof(bp->irq_tbl[0].name);
10215 netdev_reset_tc(bp->dev);
10219 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
10221 bp->irq_tbl[0].handler = bnxt_inta;
10224 static int bnxt_init_int_mode(struct bnxt *bp);
10226 static int bnxt_setup_int_mode(struct bnxt *bp)
10230 if (!bp->irq_tbl) {
10231 rc = bnxt_init_int_mode(bp);
10232 if (rc || !bp->irq_tbl)
10233 return rc ?: -ENODEV;
10236 if (bp->flags & BNXT_FLAG_USING_MSIX)
10237 bnxt_setup_msix(bp);
10239 bnxt_setup_inta(bp);
10241 rc = bnxt_set_real_num_queues(bp);
10245 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
10247 return bp->hw_resc.max_rsscos_ctxs;
10250 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
10252 return bp->hw_resc.max_vnics;
10255 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
10257 return bp->hw_resc.max_stat_ctxs;
10260 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
10262 return bp->hw_resc.max_cp_rings;
10265 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
10267 unsigned int cp = bp->hw_resc.max_cp_rings;
10269 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10270 cp -= bnxt_get_ulp_msix_num(bp);
10275 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
10277 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10279 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10280 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
10282 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
10285 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
10287 bp->hw_resc.max_irqs = max_irqs;
10290 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
10294 cp = bnxt_get_max_func_cp_rings_for_en(bp);
10295 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10296 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
10298 return cp - bp->cp_nr_rings;
10301 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
10303 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
10306 int bnxt_get_avail_msix(struct bnxt *bp, int num)
10308 int max_cp = bnxt_get_max_func_cp_rings(bp);
10309 int max_irq = bnxt_get_max_func_irqs(bp);
10310 int total_req = bp->cp_nr_rings + num;
10311 int max_idx, avail_msix;
10313 max_idx = bp->total_irqs;
10314 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
10315 max_idx = min_t(int, bp->total_irqs, max_cp);
10316 avail_msix = max_idx - bp->cp_nr_rings;
10317 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
10320 if (max_irq < total_req) {
10321 num = max_irq - bp->cp_nr_rings;
10328 static int bnxt_get_num_msix(struct bnxt *bp)
10330 if (!BNXT_NEW_RM(bp))
10331 return bnxt_get_max_func_irqs(bp);
10333 return bnxt_nq_rings_in_use(bp);
10336 static int bnxt_init_msix(struct bnxt *bp)
10338 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
10339 struct msix_entry *msix_ent;
10341 total_vecs = bnxt_get_num_msix(bp);
10342 max = bnxt_get_max_func_irqs(bp);
10343 if (total_vecs > max)
10349 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
10353 for (i = 0; i < total_vecs; i++) {
10354 msix_ent[i].entry = i;
10355 msix_ent[i].vector = 0;
10358 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
10361 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
10362 ulp_msix = bnxt_get_ulp_msix_num(bp);
10363 if (total_vecs < 0 || total_vecs < ulp_msix) {
10365 goto msix_setup_exit;
10368 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
10370 for (i = 0; i < total_vecs; i++)
10371 bp->irq_tbl[i].vector = msix_ent[i].vector;
10373 bp->total_irqs = total_vecs;
10374 /* Trim rings based upon num of vectors allocated */
10375 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
10376 total_vecs - ulp_msix, min == 1);
10378 goto msix_setup_exit;
10380 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
10381 bp->cp_nr_rings = (min == 1) ?
10382 max_t(int, tx_cp, bp->rx_nr_rings) :
10383 tx_cp + bp->rx_nr_rings;
10387 goto msix_setup_exit;
10389 bp->flags |= BNXT_FLAG_USING_MSIX;
10394 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
10395 kfree(bp->irq_tbl);
10396 bp->irq_tbl = NULL;
10397 pci_disable_msix(bp->pdev);
10402 static int bnxt_init_inta(struct bnxt *bp)
10404 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
10408 bp->total_irqs = 1;
10409 bp->rx_nr_rings = 1;
10410 bp->tx_nr_rings = 1;
10411 bp->cp_nr_rings = 1;
10412 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10413 bp->irq_tbl[0].vector = bp->pdev->irq;
10417 static int bnxt_init_int_mode(struct bnxt *bp)
10421 if (bp->flags & BNXT_FLAG_MSIX_CAP)
10422 rc = bnxt_init_msix(bp);
10424 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
10425 /* fallback to INTA */
10426 rc = bnxt_init_inta(bp);
10431 static void bnxt_clear_int_mode(struct bnxt *bp)
10433 if (bp->flags & BNXT_FLAG_USING_MSIX)
10434 pci_disable_msix(bp->pdev);
10436 kfree(bp->irq_tbl);
10437 bp->irq_tbl = NULL;
10438 bp->flags &= ~BNXT_FLAG_USING_MSIX;
10441 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
10443 bool irq_cleared = false;
10444 int tcs = bp->num_tc;
10447 if (!bnxt_need_reserve_rings(bp))
10450 if (irq_re_init && BNXT_NEW_RM(bp) &&
10451 bnxt_get_num_msix(bp) != bp->total_irqs) {
10452 bnxt_ulp_irq_stop(bp);
10453 bnxt_clear_int_mode(bp);
10454 irq_cleared = true;
10456 rc = __bnxt_reserve_rings(bp);
10459 rc = bnxt_init_int_mode(bp);
10460 bnxt_ulp_irq_restart(bp, rc);
10463 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
10466 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
10467 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
10468 netdev_err(bp->dev, "tx ring reservation failure\n");
10469 netdev_reset_tc(bp->dev);
10471 if (bp->tx_nr_rings_xdp)
10472 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
10474 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10480 static void bnxt_free_irq(struct bnxt *bp)
10482 struct bnxt_irq *irq;
10485 #ifdef CONFIG_RFS_ACCEL
10486 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
10487 bp->dev->rx_cpu_rmap = NULL;
10489 if (!bp->irq_tbl || !bp->bnapi)
10492 for (i = 0; i < bp->cp_nr_rings; i++) {
10493 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10495 irq = &bp->irq_tbl[map_idx];
10496 if (irq->requested) {
10497 if (irq->have_cpumask) {
10498 irq_set_affinity_hint(irq->vector, NULL);
10499 free_cpumask_var(irq->cpu_mask);
10500 irq->have_cpumask = 0;
10502 free_irq(irq->vector, bp->bnapi[i]);
10505 irq->requested = 0;
10509 static int bnxt_request_irq(struct bnxt *bp)
10512 unsigned long flags = 0;
10513 #ifdef CONFIG_RFS_ACCEL
10514 struct cpu_rmap *rmap;
10517 rc = bnxt_setup_int_mode(bp);
10519 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
10523 #ifdef CONFIG_RFS_ACCEL
10524 rmap = bp->dev->rx_cpu_rmap;
10526 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
10527 flags = IRQF_SHARED;
10529 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
10530 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
10531 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
10533 #ifdef CONFIG_RFS_ACCEL
10534 if (rmap && bp->bnapi[i]->rx_ring) {
10535 rc = irq_cpu_rmap_add(rmap, irq->vector);
10537 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
10542 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
10547 netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
10548 irq->requested = 1;
10550 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
10551 int numa_node = dev_to_node(&bp->pdev->dev);
10553 irq->have_cpumask = 1;
10554 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
10556 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
10558 netdev_warn(bp->dev,
10559 "Set affinity failed, IRQ = %d\n",
10568 static void bnxt_del_napi(struct bnxt *bp)
10575 for (i = 0; i < bp->rx_nr_rings; i++)
10576 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
10577 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
10578 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
10580 for (i = 0; i < bp->cp_nr_rings; i++) {
10581 struct bnxt_napi *bnapi = bp->bnapi[i];
10583 __netif_napi_del(&bnapi->napi);
10585 /* We called __netif_napi_del(), we need
10586 * to respect an RCU grace period before freeing napi structures.
10591 static void bnxt_init_napi(struct bnxt *bp)
10594 unsigned int cp_nr_rings = bp->cp_nr_rings;
10595 struct bnxt_napi *bnapi;
10597 if (bp->flags & BNXT_FLAG_USING_MSIX) {
10598 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
10600 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
10601 poll_fn = bnxt_poll_p5;
10602 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10604 for (i = 0; i < cp_nr_rings; i++) {
10605 bnapi = bp->bnapi[i];
10606 netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
10608 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10609 bnapi = bp->bnapi[cp_nr_rings];
10610 netif_napi_add(bp->dev, &bnapi->napi,
10611 bnxt_poll_nitroa0);
10614 bnapi = bp->bnapi[0];
10615 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
10619 static void bnxt_disable_napi(struct bnxt *bp)
10624 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
10627 for (i = 0; i < bp->cp_nr_rings; i++) {
10628 struct bnxt_napi *bnapi = bp->bnapi[i];
10629 struct bnxt_cp_ring_info *cpr;
10631 cpr = &bnapi->cp_ring;
10632 if (bnapi->tx_fault)
10633 cpr->sw_stats.tx.tx_resets++;
10634 if (bnapi->in_reset)
10635 cpr->sw_stats.rx.rx_resets++;
10636 napi_disable(&bnapi->napi);
10637 if (bnapi->rx_ring)
10638 cancel_work_sync(&cpr->dim.work);
10642 static void bnxt_enable_napi(struct bnxt *bp)
10646 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
10647 for (i = 0; i < bp->cp_nr_rings; i++) {
10648 struct bnxt_napi *bnapi = bp->bnapi[i];
10649 struct bnxt_cp_ring_info *cpr;
10651 bnapi->tx_fault = 0;
10653 cpr = &bnapi->cp_ring;
10654 bnapi->in_reset = false;
10656 if (bnapi->rx_ring) {
10657 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
10658 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
10660 napi_enable(&bnapi->napi);
10664 void bnxt_tx_disable(struct bnxt *bp)
10667 struct bnxt_tx_ring_info *txr;
10670 for (i = 0; i < bp->tx_nr_rings; i++) {
10671 txr = &bp->tx_ring[i];
10672 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
10675 /* Make sure napi polls see @dev_state change */
10677 /* Drop carrier first to prevent TX timeout */
10678 netif_carrier_off(bp->dev);
10679 /* Stop all TX queues */
10680 netif_tx_disable(bp->dev);
10683 void bnxt_tx_enable(struct bnxt *bp)
10686 struct bnxt_tx_ring_info *txr;
10688 for (i = 0; i < bp->tx_nr_rings; i++) {
10689 txr = &bp->tx_ring[i];
10690 WRITE_ONCE(txr->dev_state, 0);
10692 /* Make sure napi polls see @dev_state change */
10694 netif_tx_wake_all_queues(bp->dev);
10695 if (BNXT_LINK_IS_UP(bp))
10696 netif_carrier_on(bp->dev);
10699 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
10701 u8 active_fec = link_info->active_fec_sig_mode &
10702 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
10704 switch (active_fec) {
10706 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
10708 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
10709 return "Clause 74 BaseR";
10710 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
10711 return "Clause 91 RS(528,514)";
10712 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
10713 return "Clause 91 RS544_1XN";
10714 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
10715 return "Clause 91 RS(544,514)";
10716 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
10717 return "Clause 91 RS272_1XN";
10718 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
10719 return "Clause 91 RS(272,257)";
10723 void bnxt_report_link(struct bnxt *bp)
10725 if (BNXT_LINK_IS_UP(bp)) {
10726 const char *signal = "";
10727 const char *flow_ctrl;
10728 const char *duplex;
10732 netif_carrier_on(bp->dev);
10733 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
10734 if (speed == SPEED_UNKNOWN) {
10735 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
10738 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
10742 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
10743 flow_ctrl = "ON - receive & transmit";
10744 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
10745 flow_ctrl = "ON - transmit";
10746 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
10747 flow_ctrl = "ON - receive";
10749 flow_ctrl = "none";
10750 if (bp->link_info.phy_qcfg_resp.option_flags &
10751 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
10752 u8 sig_mode = bp->link_info.active_fec_sig_mode &
10753 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
10754 switch (sig_mode) {
10755 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
10758 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
10759 signal = "(PAM4 56Gbps) ";
10761 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
10762 signal = "(PAM4 112Gbps) ";
10768 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
10769 speed, signal, duplex, flow_ctrl);
10770 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
10771 netdev_info(bp->dev, "EEE is %s\n",
10772 bp->eee.eee_active ? "active" :
10774 fec = bp->link_info.fec_cfg;
10775 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
10776 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
10777 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
10778 bnxt_report_fec(&bp->link_info));
10780 netif_carrier_off(bp->dev);
10781 netdev_err(bp->dev, "NIC Link is Down\n");
10785 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
10787 if (!resp->supported_speeds_auto_mode &&
10788 !resp->supported_speeds_force_mode &&
10789 !resp->supported_pam4_speeds_auto_mode &&
10790 !resp->supported_pam4_speeds_force_mode &&
10791 !resp->supported_speeds2_auto_mode &&
10792 !resp->supported_speeds2_force_mode)
10797 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
10799 struct bnxt_link_info *link_info = &bp->link_info;
10800 struct hwrm_port_phy_qcaps_output *resp;
10801 struct hwrm_port_phy_qcaps_input *req;
10804 if (bp->hwrm_spec_code < 0x10201)
10807 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
10811 resp = hwrm_req_hold(bp, req);
10812 rc = hwrm_req_send(bp, req);
10814 goto hwrm_phy_qcaps_exit;
10816 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
10817 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
10818 struct ethtool_keee *eee = &bp->eee;
10819 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
10821 _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
10822 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
10823 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
10824 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
10825 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
10828 if (bp->hwrm_spec_code >= 0x10a01) {
10829 if (bnxt_phy_qcaps_no_speed(resp)) {
10830 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
10831 netdev_warn(bp->dev, "Ethernet link disabled\n");
10832 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
10833 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
10834 netdev_info(bp->dev, "Ethernet link enabled\n");
10835 /* Phy re-enabled, reprobe the speeds */
10836 link_info->support_auto_speeds = 0;
10837 link_info->support_pam4_auto_speeds = 0;
10838 link_info->support_auto_speeds2 = 0;
10841 if (resp->supported_speeds_auto_mode)
10842 link_info->support_auto_speeds =
10843 le16_to_cpu(resp->supported_speeds_auto_mode);
10844 if (resp->supported_pam4_speeds_auto_mode)
10845 link_info->support_pam4_auto_speeds =
10846 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
10847 if (resp->supported_speeds2_auto_mode)
10848 link_info->support_auto_speeds2 =
10849 le16_to_cpu(resp->supported_speeds2_auto_mode);
10851 bp->port_count = resp->port_cnt;
10853 hwrm_phy_qcaps_exit:
10854 hwrm_req_drop(bp, req);
10858 static bool bnxt_support_dropped(u16 advertising, u16 supported)
10860 u16 diff = advertising ^ supported;
10862 return ((supported | diff) != supported);
10865 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
10867 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
10869 /* Check if any advertised speeds are no longer supported. The caller
10870 * holds the link_lock mutex, so we can modify link_info settings.
10872 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
10873 if (bnxt_support_dropped(link_info->advertising,
10874 link_info->support_auto_speeds2)) {
10875 link_info->advertising = link_info->support_auto_speeds2;
10880 if (bnxt_support_dropped(link_info->advertising,
10881 link_info->support_auto_speeds)) {
10882 link_info->advertising = link_info->support_auto_speeds;
10885 if (bnxt_support_dropped(link_info->advertising_pam4,
10886 link_info->support_pam4_auto_speeds)) {
10887 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
10893 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
10895 struct bnxt_link_info *link_info = &bp->link_info;
10896 struct hwrm_port_phy_qcfg_output *resp;
10897 struct hwrm_port_phy_qcfg_input *req;
10898 u8 link_state = link_info->link_state;
10899 bool support_changed;
10902 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
10906 resp = hwrm_req_hold(bp, req);
10907 rc = hwrm_req_send(bp, req);
10909 hwrm_req_drop(bp, req);
10910 if (BNXT_VF(bp) && rc == -ENODEV) {
10911 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
10917 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
10918 link_info->phy_link_status = resp->link;
10919 link_info->duplex = resp->duplex_cfg;
10920 if (bp->hwrm_spec_code >= 0x10800)
10921 link_info->duplex = resp->duplex_state;
10922 link_info->pause = resp->pause;
10923 link_info->auto_mode = resp->auto_mode;
10924 link_info->auto_pause_setting = resp->auto_pause;
10925 link_info->lp_pause = resp->link_partner_adv_pause;
10926 link_info->force_pause_setting = resp->force_pause;
10927 link_info->duplex_setting = resp->duplex_cfg;
10928 if (link_info->phy_link_status == BNXT_LINK_LINK) {
10929 link_info->link_speed = le16_to_cpu(resp->link_speed);
10930 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
10931 link_info->active_lanes = resp->active_lanes;
10933 link_info->link_speed = 0;
10934 link_info->active_lanes = 0;
10936 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
10937 link_info->force_pam4_link_speed =
10938 le16_to_cpu(resp->force_pam4_link_speed);
10939 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
10940 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
10941 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
10942 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
10943 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
10944 link_info->auto_pam4_link_speeds =
10945 le16_to_cpu(resp->auto_pam4_link_speed_mask);
10946 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
10947 link_info->lp_auto_link_speeds =
10948 le16_to_cpu(resp->link_partner_adv_speeds);
10949 link_info->lp_auto_pam4_link_speeds =
10950 resp->link_partner_pam4_adv_speeds;
10951 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
10952 link_info->phy_ver[0] = resp->phy_maj;
10953 link_info->phy_ver[1] = resp->phy_min;
10954 link_info->phy_ver[2] = resp->phy_bld;
10955 link_info->media_type = resp->media_type;
10956 link_info->phy_type = resp->phy_type;
10957 link_info->transceiver = resp->xcvr_pkg_type;
10958 link_info->phy_addr = resp->eee_config_phy_addr &
10959 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
10960 link_info->module_status = resp->module_status;
10962 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
10963 struct ethtool_keee *eee = &bp->eee;
10966 eee->eee_active = 0;
10967 if (resp->eee_config_phy_addr &
10968 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
10969 eee->eee_active = 1;
10970 fw_speeds = le16_to_cpu(
10971 resp->link_partner_adv_eee_link_speed_mask);
10972 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
10975 /* Pull initial EEE config */
10976 if (!chng_link_state) {
10977 if (resp->eee_config_phy_addr &
10978 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
10979 eee->eee_enabled = 1;
10981 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
10982 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
10984 if (resp->eee_config_phy_addr &
10985 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
10988 eee->tx_lpi_enabled = 1;
10989 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
10990 eee->tx_lpi_timer = le32_to_cpu(tmr) &
10991 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
10996 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
10997 if (bp->hwrm_spec_code >= 0x10504) {
10998 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
10999 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
11001 /* TODO: need to add more logic to report VF link */
11002 if (chng_link_state) {
11003 if (link_info->phy_link_status == BNXT_LINK_LINK)
11004 link_info->link_state = BNXT_LINK_STATE_UP;
11006 link_info->link_state = BNXT_LINK_STATE_DOWN;
11007 if (link_state != link_info->link_state)
11008 bnxt_report_link(bp);
11010 /* always link down if not require to update link state */
11011 link_info->link_state = BNXT_LINK_STATE_DOWN;
11013 hwrm_req_drop(bp, req);
11015 if (!BNXT_PHY_CFG_ABLE(bp))
11018 support_changed = bnxt_support_speed_dropped(link_info);
11019 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
11020 bnxt_hwrm_set_link_setting(bp, true, false);
11024 static void bnxt_get_port_module_status(struct bnxt *bp)
11026 struct bnxt_link_info *link_info = &bp->link_info;
11027 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
11030 if (bnxt_update_link(bp, true))
11033 module_status = link_info->module_status;
11034 switch (module_status) {
11035 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
11036 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
11037 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
11038 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
11040 if (bp->hwrm_spec_code >= 0x10201) {
11041 netdev_warn(bp->dev, "Module part number %s\n",
11042 resp->phy_vendor_partnumber);
11044 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
11045 netdev_warn(bp->dev, "TX is disabled\n");
11046 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
11047 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
11052 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11054 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
11055 if (bp->hwrm_spec_code >= 0x10201)
11057 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
11058 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11059 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
11060 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11061 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
11063 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11065 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
11066 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
11067 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
11068 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
11070 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
11071 if (bp->hwrm_spec_code >= 0x10201) {
11072 req->auto_pause = req->force_pause;
11073 req->enables |= cpu_to_le32(
11074 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
11079 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
11081 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
11082 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
11083 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11085 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
11086 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
11087 } else if (bp->link_info.advertising) {
11088 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
11089 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
11091 if (bp->link_info.advertising_pam4) {
11093 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
11094 req->auto_link_pam4_speed_mask =
11095 cpu_to_le16(bp->link_info.advertising_pam4);
11097 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
11098 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
11100 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
11101 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
11102 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
11103 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
11104 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
11105 (u32)bp->link_info.req_link_speed);
11106 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
11107 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11108 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
11110 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
11114 /* tell chimp that the setting takes effect immediately */
11115 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
11118 int bnxt_hwrm_set_pause(struct bnxt *bp)
11120 struct hwrm_port_phy_cfg_input *req;
11123 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11127 bnxt_hwrm_set_pause_common(bp, req);
11129 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
11130 bp->link_info.force_link_chng)
11131 bnxt_hwrm_set_link_common(bp, req);
11133 rc = hwrm_req_send(bp, req);
11134 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
11135 /* since changing of pause setting doesn't trigger any link
11136 * change event, the driver needs to update the current pause
11137 * result upon successfully return of the phy_cfg command
11139 bp->link_info.pause =
11140 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
11141 bp->link_info.auto_pause_setting = 0;
11142 if (!bp->link_info.force_link_chng)
11143 bnxt_report_link(bp);
11145 bp->link_info.force_link_chng = false;
11149 static void bnxt_hwrm_set_eee(struct bnxt *bp,
11150 struct hwrm_port_phy_cfg_input *req)
11152 struct ethtool_keee *eee = &bp->eee;
11154 if (eee->eee_enabled) {
11156 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
11158 if (eee->tx_lpi_enabled)
11159 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
11161 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
11163 req->flags |= cpu_to_le32(flags);
11164 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
11165 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
11166 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
11168 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
11172 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
11174 struct hwrm_port_phy_cfg_input *req;
11177 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11182 bnxt_hwrm_set_pause_common(bp, req);
11184 bnxt_hwrm_set_link_common(bp, req);
11187 bnxt_hwrm_set_eee(bp, req);
11188 return hwrm_req_send(bp, req);
11191 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
11193 struct hwrm_port_phy_cfg_input *req;
11196 if (!BNXT_SINGLE_PF(bp))
11199 if (pci_num_vf(bp->pdev) &&
11200 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
11203 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
11207 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
11208 rc = hwrm_req_send(bp, req);
11210 mutex_lock(&bp->link_lock);
11211 /* Device is not obliged link down in certain scenarios, even
11212 * when forced. Setting the state unknown is consistent with
11213 * driver startup and will force link state to be reported
11214 * during subsequent open based on PORT_PHY_QCFG.
11216 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
11217 mutex_unlock(&bp->link_lock);
11222 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11224 #ifdef CONFIG_TEE_BNXT_FW
11225 int rc = tee_bnxt_fw_load();
11228 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11232 netdev_err(bp->dev, "OP-TEE not supported\n");
11237 static int bnxt_try_recover_fw(struct bnxt *bp)
11239 if (bp->fw_health && bp->fw_health->status_reliable) {
11244 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11245 rc = bnxt_hwrm_poll(bp);
11246 if (!BNXT_FW_IS_BOOTING(sts) &&
11247 !BNXT_FW_IS_RECOVERING(sts))
11250 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
11252 if (!BNXT_FW_IS_HEALTHY(sts)) {
11253 netdev_err(bp->dev,
11254 "Firmware not responding, status: 0x%x\n",
11258 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11259 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11260 return bnxt_fw_reset_via_optee(bp);
11268 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
11270 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11272 if (!BNXT_NEW_RM(bp))
11273 return; /* no resource reservations required */
11275 hw_resc->resv_cp_rings = 0;
11276 hw_resc->resv_stat_ctxs = 0;
11277 hw_resc->resv_irqs = 0;
11278 hw_resc->resv_tx_rings = 0;
11279 hw_resc->resv_rx_rings = 0;
11280 hw_resc->resv_hw_ring_grps = 0;
11281 hw_resc->resv_vnics = 0;
11282 hw_resc->resv_rsscos_ctxs = 0;
11284 bp->tx_nr_rings = 0;
11285 bp->rx_nr_rings = 0;
11289 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
11293 if (!BNXT_NEW_RM(bp))
11294 return 0; /* no resource reservations required */
11296 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
11298 netdev_err(bp->dev, "resc_qcaps failed\n");
11300 bnxt_clear_reservations(bp, fw_reset);
11305 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
11307 struct hwrm_func_drv_if_change_output *resp;
11308 struct hwrm_func_drv_if_change_input *req;
11309 bool fw_reset = !bp->irq_tbl;
11310 bool resc_reinit = false;
11314 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
11317 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
11322 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
11323 resp = hwrm_req_hold(bp, req);
11325 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
11326 while (retry < BNXT_FW_IF_RETRY) {
11327 rc = hwrm_req_send(bp, req);
11335 if (rc == -EAGAIN) {
11336 hwrm_req_drop(bp, req);
11339 flags = le32_to_cpu(resp->flags);
11341 rc = bnxt_try_recover_fw(bp);
11344 hwrm_req_drop(bp, req);
11349 bnxt_inv_fw_health_reg(bp);
11353 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
11354 resc_reinit = true;
11355 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
11356 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
11359 bnxt_remap_fw_health_regs(bp);
11361 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
11362 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
11363 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11366 if (resc_reinit || fw_reset) {
11368 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11369 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11371 bnxt_free_ctx_mem(bp);
11373 rc = bnxt_fw_init_one(bp);
11375 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11376 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11379 bnxt_clear_int_mode(bp);
11380 rc = bnxt_init_int_mode(bp);
11382 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11383 netdev_err(bp->dev, "init int mode failed\n");
11387 rc = bnxt_cancel_reservations(bp, fw_reset);
11392 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
11394 struct hwrm_port_led_qcaps_output *resp;
11395 struct hwrm_port_led_qcaps_input *req;
11396 struct bnxt_pf_info *pf = &bp->pf;
11400 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
11403 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
11407 req->port_id = cpu_to_le16(pf->port_id);
11408 resp = hwrm_req_hold(bp, req);
11409 rc = hwrm_req_send(bp, req);
11411 hwrm_req_drop(bp, req);
11414 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
11417 bp->num_leds = resp->num_leds;
11418 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
11420 for (i = 0; i < bp->num_leds; i++) {
11421 struct bnxt_led_info *led = &bp->leds[i];
11422 __le16 caps = led->led_state_caps;
11424 if (!led->led_group_id ||
11425 !BNXT_LED_ALT_BLINK_CAP(caps)) {
11431 hwrm_req_drop(bp, req);
11435 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
11437 struct hwrm_wol_filter_alloc_output *resp;
11438 struct hwrm_wol_filter_alloc_input *req;
11441 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
11445 req->port_id = cpu_to_le16(bp->pf.port_id);
11446 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
11447 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
11448 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
11450 resp = hwrm_req_hold(bp, req);
11451 rc = hwrm_req_send(bp, req);
11453 bp->wol_filter_id = resp->wol_filter_id;
11454 hwrm_req_drop(bp, req);
11458 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
11460 struct hwrm_wol_filter_free_input *req;
11463 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
11467 req->port_id = cpu_to_le16(bp->pf.port_id);
11468 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
11469 req->wol_filter_id = bp->wol_filter_id;
11471 return hwrm_req_send(bp, req);
11474 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
11476 struct hwrm_wol_filter_qcfg_output *resp;
11477 struct hwrm_wol_filter_qcfg_input *req;
11478 u16 next_handle = 0;
11481 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
11485 req->port_id = cpu_to_le16(bp->pf.port_id);
11486 req->handle = cpu_to_le16(handle);
11487 resp = hwrm_req_hold(bp, req);
11488 rc = hwrm_req_send(bp, req);
11490 next_handle = le16_to_cpu(resp->next_handle);
11491 if (next_handle != 0) {
11492 if (resp->wol_type ==
11493 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
11495 bp->wol_filter_id = resp->wol_filter_id;
11499 hwrm_req_drop(bp, req);
11500 return next_handle;
11503 static void bnxt_get_wol_settings(struct bnxt *bp)
11508 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
11512 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
11513 } while (handle && handle != 0xffff);
11516 static bool bnxt_eee_config_ok(struct bnxt *bp)
11518 struct ethtool_keee *eee = &bp->eee;
11519 struct bnxt_link_info *link_info = &bp->link_info;
11521 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
11524 if (eee->eee_enabled) {
11525 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
11526 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
11528 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
11530 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11531 eee->eee_enabled = 0;
11534 if (linkmode_andnot(tmp, eee->advertised, advertising)) {
11535 linkmode_and(eee->advertised, advertising,
11543 static int bnxt_update_phy_setting(struct bnxt *bp)
11546 bool update_link = false;
11547 bool update_pause = false;
11548 bool update_eee = false;
11549 struct bnxt_link_info *link_info = &bp->link_info;
11551 rc = bnxt_update_link(bp, true);
11553 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
11557 if (!BNXT_SINGLE_PF(bp))
11560 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11561 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
11562 link_info->req_flow_ctrl)
11563 update_pause = true;
11564 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
11565 link_info->force_pause_setting != link_info->req_flow_ctrl)
11566 update_pause = true;
11567 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
11568 if (BNXT_AUTO_MODE(link_info->auto_mode))
11569 update_link = true;
11570 if (bnxt_force_speed_updated(link_info))
11571 update_link = true;
11572 if (link_info->req_duplex != link_info->duplex_setting)
11573 update_link = true;
11575 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
11576 update_link = true;
11577 if (bnxt_auto_speed_updated(link_info))
11578 update_link = true;
11581 /* The last close may have shutdown the link, so need to call
11582 * PHY_CFG to bring it back up.
11584 if (!BNXT_LINK_IS_UP(bp))
11585 update_link = true;
11587 if (!bnxt_eee_config_ok(bp))
11591 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
11592 else if (update_pause)
11593 rc = bnxt_hwrm_set_pause(bp);
11595 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
11603 /* Common routine to pre-map certain register block to different GRC window.
11604 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
11605 * in PF and 3 windows in VF that can be customized to map in different
11608 static void bnxt_preset_reg_win(struct bnxt *bp)
11611 /* CAG registers map to GRC window #4 */
11612 writel(BNXT_CAG_REG_BASE,
11613 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
11617 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
11619 static int bnxt_reinit_after_abort(struct bnxt *bp)
11623 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11626 if (bp->dev->reg_state == NETREG_UNREGISTERED)
11629 rc = bnxt_fw_init_one(bp);
11631 bnxt_clear_int_mode(bp);
11632 rc = bnxt_init_int_mode(bp);
11634 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11635 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
11641 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
11643 struct bnxt_ntuple_filter *ntp_fltr;
11644 struct bnxt_l2_filter *l2_fltr;
11646 if (list_empty(&fltr->list))
11649 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
11650 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
11651 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
11652 atomic_inc(&l2_fltr->refcnt);
11653 ntp_fltr->l2_fltr = l2_fltr;
11654 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
11655 bnxt_del_ntp_filter(bp, ntp_fltr);
11656 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
11659 } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
11660 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
11661 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
11662 bnxt_del_l2_filter(bp, l2_fltr);
11663 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
11669 static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
11671 struct bnxt_filter_base *usr_fltr, *tmp;
11673 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
11674 bnxt_cfg_one_usr_fltr(bp, usr_fltr);
11677 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11681 bnxt_preset_reg_win(bp);
11682 netif_carrier_off(bp->dev);
11684 /* Reserve rings now if none were reserved at driver probe. */
11685 rc = bnxt_init_dflt_ring_mode(bp);
11687 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
11691 rc = bnxt_reserve_rings(bp, irq_re_init);
11694 if ((bp->flags & BNXT_FLAG_RFS) &&
11695 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
11696 /* disable RFS if falling back to INTA */
11697 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
11698 bp->flags &= ~BNXT_FLAG_RFS;
11701 rc = bnxt_alloc_mem(bp, irq_re_init);
11703 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11704 goto open_err_free_mem;
11708 bnxt_init_napi(bp);
11709 rc = bnxt_request_irq(bp);
11711 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
11716 rc = bnxt_init_nic(bp, irq_re_init);
11718 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11722 bnxt_enable_napi(bp);
11723 bnxt_debug_dev_init(bp);
11725 if (link_re_init) {
11726 mutex_lock(&bp->link_lock);
11727 rc = bnxt_update_phy_setting(bp);
11728 mutex_unlock(&bp->link_lock);
11730 netdev_warn(bp->dev, "failed to update phy settings\n");
11731 if (BNXT_SINGLE_PF(bp)) {
11732 bp->link_info.phy_retry = true;
11733 bp->link_info.phy_retry_expires =
11740 udp_tunnel_nic_reset_ntf(bp->dev);
11742 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
11743 if (!static_key_enabled(&bnxt_xdp_locking_key))
11744 static_branch_enable(&bnxt_xdp_locking_key);
11745 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
11746 static_branch_disable(&bnxt_xdp_locking_key);
11748 set_bit(BNXT_STATE_OPEN, &bp->state);
11749 bnxt_enable_int(bp);
11750 /* Enable TX queues */
11751 bnxt_tx_enable(bp);
11752 mod_timer(&bp->timer, jiffies + bp->current_interval);
11753 /* Poll link status and check for SFP+ module status */
11754 mutex_lock(&bp->link_lock);
11755 bnxt_get_port_module_status(bp);
11756 mutex_unlock(&bp->link_lock);
11758 /* VF-reps may need to be re-opened after the PF is re-opened */
11760 bnxt_vf_reps_open(bp);
11762 atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
11763 bnxt_ptp_init_rtc(bp, true);
11764 bnxt_ptp_cfg_tstamp_filters(bp);
11765 bnxt_cfg_usr_fltrs(bp);
11772 bnxt_free_skbs(bp);
11774 bnxt_free_mem(bp, true);
11778 /* rtnl_lock held */
11779 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11783 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
11786 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
11788 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
11789 dev_close(bp->dev);
11794 /* rtnl_lock held, open the NIC half way by allocating all resources, but
11795 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
11798 int bnxt_half_open_nic(struct bnxt *bp)
11802 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11803 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
11805 goto half_open_err;
11808 rc = bnxt_alloc_mem(bp, true);
11810 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
11811 goto half_open_err;
11813 bnxt_init_napi(bp);
11814 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11815 rc = bnxt_init_nic(bp, true);
11817 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11819 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
11820 goto half_open_err;
11825 bnxt_free_skbs(bp);
11826 bnxt_free_mem(bp, true);
11827 dev_close(bp->dev);
11831 /* rtnl_lock held, this call can only be made after a previous successful
11832 * call to bnxt_half_open_nic().
11834 void bnxt_half_close_nic(struct bnxt *bp)
11836 bnxt_hwrm_resource_free(bp, false, true);
11838 bnxt_free_skbs(bp);
11839 bnxt_free_mem(bp, true);
11840 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
11843 void bnxt_reenable_sriov(struct bnxt *bp)
11846 struct bnxt_pf_info *pf = &bp->pf;
11847 int n = pf->active_vfs;
11850 bnxt_cfg_hw_sriov(bp, &n, true);
11854 static int bnxt_open(struct net_device *dev)
11856 struct bnxt *bp = netdev_priv(dev);
11859 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11860 rc = bnxt_reinit_after_abort(bp);
11863 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
11865 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
11870 rc = bnxt_hwrm_if_change(bp, true);
11874 rc = __bnxt_open_nic(bp, true, true);
11876 bnxt_hwrm_if_change(bp, false);
11878 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
11879 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11880 bnxt_ulp_start(bp, 0);
11881 bnxt_reenable_sriov(bp);
11889 static bool bnxt_drv_busy(struct bnxt *bp)
11891 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
11892 test_bit(BNXT_STATE_READ_STATS, &bp->state));
11895 static void bnxt_get_ring_stats(struct bnxt *bp,
11896 struct rtnl_link_stats64 *stats);
11898 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
11901 /* Close the VF-reps before closing PF */
11903 bnxt_vf_reps_close(bp);
11905 /* Change device state to avoid TX queue wake up's */
11906 bnxt_tx_disable(bp);
11908 clear_bit(BNXT_STATE_OPEN, &bp->state);
11909 smp_mb__after_atomic();
11910 while (bnxt_drv_busy(bp))
11913 /* Flush rings and disable interrupts */
11914 bnxt_shutdown_nic(bp, irq_re_init);
11916 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
11918 bnxt_debug_dev_exit(bp);
11919 bnxt_disable_napi(bp);
11920 del_timer_sync(&bp->timer);
11921 bnxt_free_skbs(bp);
11923 /* Save ring stats before shutdown */
11924 if (bp->bnapi && irq_re_init) {
11925 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
11926 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
11932 bnxt_free_mem(bp, irq_re_init);
11935 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
11937 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11938 /* If we get here, it means firmware reset is in progress
11939 * while we are trying to close. We can safely proceed with
11940 * the close because we are holding rtnl_lock(). Some firmware
11941 * messages may fail as we proceed to close. We set the
11942 * ABORT_ERR flag here so that the FW reset thread will later
11943 * abort when it gets the rtnl_lock() and sees the flag.
11945 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
11946 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
11949 #ifdef CONFIG_BNXT_SRIOV
11950 if (bp->sriov_cfg) {
11953 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
11955 BNXT_SRIOV_CFG_WAIT_TMO);
11957 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
11959 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
11962 __bnxt_close_nic(bp, irq_re_init, link_re_init);
11965 static int bnxt_close(struct net_device *dev)
11967 struct bnxt *bp = netdev_priv(dev);
11969 bnxt_close_nic(bp, true, true);
11970 bnxt_hwrm_shutdown_link(bp);
11971 bnxt_hwrm_if_change(bp, false);
11975 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
11978 struct hwrm_port_phy_mdio_read_output *resp;
11979 struct hwrm_port_phy_mdio_read_input *req;
11982 if (bp->hwrm_spec_code < 0x10a00)
11983 return -EOPNOTSUPP;
11985 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
11989 req->port_id = cpu_to_le16(bp->pf.port_id);
11990 req->phy_addr = phy_addr;
11991 req->reg_addr = cpu_to_le16(reg & 0x1f);
11992 if (mdio_phy_id_is_c45(phy_addr)) {
11993 req->cl45_mdio = 1;
11994 req->phy_addr = mdio_phy_id_prtad(phy_addr);
11995 req->dev_addr = mdio_phy_id_devad(phy_addr);
11996 req->reg_addr = cpu_to_le16(reg);
11999 resp = hwrm_req_hold(bp, req);
12000 rc = hwrm_req_send(bp, req);
12002 *val = le16_to_cpu(resp->reg_data);
12003 hwrm_req_drop(bp, req);
12007 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
12010 struct hwrm_port_phy_mdio_write_input *req;
12013 if (bp->hwrm_spec_code < 0x10a00)
12014 return -EOPNOTSUPP;
12016 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
12020 req->port_id = cpu_to_le16(bp->pf.port_id);
12021 req->phy_addr = phy_addr;
12022 req->reg_addr = cpu_to_le16(reg & 0x1f);
12023 if (mdio_phy_id_is_c45(phy_addr)) {
12024 req->cl45_mdio = 1;
12025 req->phy_addr = mdio_phy_id_prtad(phy_addr);
12026 req->dev_addr = mdio_phy_id_devad(phy_addr);
12027 req->reg_addr = cpu_to_le16(reg);
12029 req->reg_data = cpu_to_le16(val);
12031 return hwrm_req_send(bp, req);
12034 /* rtnl_lock held */
12035 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12037 struct mii_ioctl_data *mdio = if_mii(ifr);
12038 struct bnxt *bp = netdev_priv(dev);
12043 mdio->phy_id = bp->link_info.phy_addr;
12046 case SIOCGMIIREG: {
12047 u16 mii_regval = 0;
12049 if (!netif_running(dev))
12052 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
12054 mdio->val_out = mii_regval;
12059 if (!netif_running(dev))
12062 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
12065 case SIOCSHWTSTAMP:
12066 return bnxt_hwtstamp_set(dev, ifr);
12068 case SIOCGHWTSTAMP:
12069 return bnxt_hwtstamp_get(dev, ifr);
12075 return -EOPNOTSUPP;
12078 static void bnxt_get_ring_stats(struct bnxt *bp,
12079 struct rtnl_link_stats64 *stats)
12083 for (i = 0; i < bp->cp_nr_rings; i++) {
12084 struct bnxt_napi *bnapi = bp->bnapi[i];
12085 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12086 u64 *sw = cpr->stats.sw_stats;
12088 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
12089 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12090 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
12092 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
12093 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
12094 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
12096 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
12097 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
12098 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
12100 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
12101 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
12102 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
12104 stats->rx_missed_errors +=
12105 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
12107 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
12109 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
12111 stats->rx_dropped +=
12112 cpr->sw_stats.rx.rx_netpoll_discards +
12113 cpr->sw_stats.rx.rx_oom_discards;
12117 static void bnxt_add_prev_stats(struct bnxt *bp,
12118 struct rtnl_link_stats64 *stats)
12120 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
12122 stats->rx_packets += prev_stats->rx_packets;
12123 stats->tx_packets += prev_stats->tx_packets;
12124 stats->rx_bytes += prev_stats->rx_bytes;
12125 stats->tx_bytes += prev_stats->tx_bytes;
12126 stats->rx_missed_errors += prev_stats->rx_missed_errors;
12127 stats->multicast += prev_stats->multicast;
12128 stats->rx_dropped += prev_stats->rx_dropped;
12129 stats->tx_dropped += prev_stats->tx_dropped;
12133 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
12135 struct bnxt *bp = netdev_priv(dev);
12137 set_bit(BNXT_STATE_READ_STATS, &bp->state);
12138 /* Make sure bnxt_close_nic() sees that we are reading stats before
12139 * we check the BNXT_STATE_OPEN flag.
12141 smp_mb__after_atomic();
12142 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12143 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12144 *stats = bp->net_stats_prev;
12148 bnxt_get_ring_stats(bp, stats);
12149 bnxt_add_prev_stats(bp, stats);
12151 if (bp->flags & BNXT_FLAG_PORT_STATS) {
12152 u64 *rx = bp->port_stats.sw_stats;
12153 u64 *tx = bp->port_stats.sw_stats +
12154 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
12156 stats->rx_crc_errors =
12157 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
12158 stats->rx_frame_errors =
12159 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
12160 stats->rx_length_errors =
12161 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
12162 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
12163 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
12165 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
12166 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
12167 stats->collisions =
12168 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
12169 stats->tx_fifo_errors =
12170 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
12171 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
12173 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
12176 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
12177 struct bnxt_total_ring_err_stats *stats,
12178 struct bnxt_cp_ring_info *cpr)
12180 struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
12181 u64 *hw_stats = cpr->stats.sw_stats;
12183 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
12184 stats->rx_total_resets += sw_stats->rx.rx_resets;
12185 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
12186 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
12187 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
12188 stats->rx_total_ring_discards +=
12189 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
12190 stats->tx_total_resets += sw_stats->tx.tx_resets;
12191 stats->tx_total_ring_discards +=
12192 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
12193 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
12196 void bnxt_get_ring_err_stats(struct bnxt *bp,
12197 struct bnxt_total_ring_err_stats *stats)
12201 for (i = 0; i < bp->cp_nr_rings; i++)
12202 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
12205 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
12207 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12208 struct net_device *dev = bp->dev;
12209 struct netdev_hw_addr *ha;
12212 bool update = false;
12215 netdev_for_each_mc_addr(ha, dev) {
12216 if (mc_count >= BNXT_MAX_MC_ADDRS) {
12217 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12218 vnic->mc_list_count = 0;
12222 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
12223 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
12230 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12232 if (mc_count != vnic->mc_list_count) {
12233 vnic->mc_list_count = mc_count;
12239 static bool bnxt_uc_list_updated(struct bnxt *bp)
12241 struct net_device *dev = bp->dev;
12242 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12243 struct netdev_hw_addr *ha;
12246 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
12249 netdev_for_each_uc_addr(ha, dev) {
12250 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
12258 static void bnxt_set_rx_mode(struct net_device *dev)
12260 struct bnxt *bp = netdev_priv(dev);
12261 struct bnxt_vnic_info *vnic;
12262 bool mc_update = false;
12266 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
12269 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12270 mask = vnic->rx_mask;
12271 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
12272 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
12273 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
12274 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
12276 if (dev->flags & IFF_PROMISC)
12277 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12279 uc_update = bnxt_uc_list_updated(bp);
12281 if (dev->flags & IFF_BROADCAST)
12282 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
12283 if (dev->flags & IFF_ALLMULTI) {
12284 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12285 vnic->mc_list_count = 0;
12286 } else if (dev->flags & IFF_MULTICAST) {
12287 mc_update = bnxt_mc_list_updated(bp, &mask);
12290 if (mask != vnic->rx_mask || uc_update || mc_update) {
12291 vnic->rx_mask = mask;
12293 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12297 static int bnxt_cfg_rx_mode(struct bnxt *bp)
12299 struct net_device *dev = bp->dev;
12300 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
12301 struct netdev_hw_addr *ha;
12302 int i, off = 0, rc;
12305 netif_addr_lock_bh(dev);
12306 uc_update = bnxt_uc_list_updated(bp);
12307 netif_addr_unlock_bh(dev);
12312 for (i = 1; i < vnic->uc_filter_count; i++) {
12313 struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
12315 bnxt_hwrm_l2_filter_free(bp, fltr);
12316 bnxt_del_l2_filter(bp, fltr);
12319 vnic->uc_filter_count = 1;
12321 netif_addr_lock_bh(dev);
12322 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
12323 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12325 netdev_for_each_uc_addr(ha, dev) {
12326 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
12328 vnic->uc_filter_count++;
12331 netif_addr_unlock_bh(dev);
12333 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
12334 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
12336 if (BNXT_VF(bp) && rc == -ENODEV) {
12337 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12338 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
12340 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
12343 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
12345 vnic->uc_filter_count = i;
12349 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12350 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
12353 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
12354 !bnxt_promisc_ok(bp))
12355 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
12356 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12357 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
12358 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
12360 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
12361 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
12362 vnic->mc_list_count = 0;
12363 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
12366 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
12372 static bool bnxt_can_reserve_rings(struct bnxt *bp)
12374 #ifdef CONFIG_BNXT_SRIOV
12375 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
12376 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12378 /* No minimum rings were provisioned by the PF. Don't
12379 * reserve rings by default when device is down.
12381 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
12384 if (!netif_running(bp->dev))
12391 /* If the chip and firmware supports RFS */
12392 static bool bnxt_rfs_supported(struct bnxt *bp)
12394 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
12395 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
12399 /* 212 firmware is broken for aRFS */
12400 if (BNXT_FW_MAJ(bp) == 212)
12402 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
12404 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
12409 /* If runtime conditions support RFS */
12410 static bool bnxt_rfs_capable(struct bnxt *bp)
12412 struct bnxt_hw_rings hwr = {0};
12413 int max_vnics, max_rss_ctxs;
12416 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
12417 /* 2 VNICS: default + Ntuple */
12419 hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
12421 goto check_reserve_vnic;
12423 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
12424 return bnxt_rfs_supported(bp);
12425 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
12428 hwr.vnic = 1 + bp->rx_nr_rings;
12429 check_reserve_vnic:
12430 max_vnics = bnxt_get_max_func_vnics(bp);
12431 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
12433 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
12434 !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
12435 hwr.rss_ctx = hwr.vnic;
12437 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
12438 if (bp->rx_nr_rings > 1)
12439 netdev_warn(bp->dev,
12440 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
12441 min(max_rss_ctxs - 1, max_vnics - 1));
12445 if (!BNXT_NEW_RM(bp))
12448 if (hwr.vnic == bp->hw_resc.resv_vnics &&
12449 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12452 bnxt_hwrm_reserve_rings(bp, &hwr);
12453 if (hwr.vnic <= bp->hw_resc.resv_vnics &&
12454 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
12457 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
12460 bnxt_hwrm_reserve_rings(bp, &hwr);
12464 static netdev_features_t bnxt_fix_features(struct net_device *dev,
12465 netdev_features_t features)
12467 struct bnxt *bp = netdev_priv(dev);
12468 netdev_features_t vlan_features;
12470 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
12471 features &= ~NETIF_F_NTUPLE;
12473 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
12474 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12476 if (!(features & NETIF_F_GRO))
12477 features &= ~NETIF_F_GRO_HW;
12479 if (features & NETIF_F_GRO_HW)
12480 features &= ~NETIF_F_LRO;
12482 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
12483 * turned on or off together.
12485 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
12486 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
12487 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12488 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12489 else if (vlan_features)
12490 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12492 #ifdef CONFIG_BNXT_SRIOV
12493 if (BNXT_VF(bp) && bp->vf.vlan)
12494 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
12499 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
12500 bool link_re_init, u32 flags, bool update_tpa)
12502 bnxt_close_nic(bp, irq_re_init, link_re_init);
12505 bnxt_set_ring_params(bp);
12506 return bnxt_open_nic(bp, irq_re_init, link_re_init);
12509 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
12511 bool update_tpa = false, update_ntuple = false;
12512 struct bnxt *bp = netdev_priv(dev);
12513 u32 flags = bp->flags;
12516 bool re_init = false;
12518 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
12519 if (features & NETIF_F_GRO_HW)
12520 flags |= BNXT_FLAG_GRO;
12521 else if (features & NETIF_F_LRO)
12522 flags |= BNXT_FLAG_LRO;
12524 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
12525 flags &= ~BNXT_FLAG_TPA;
12527 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12528 flags |= BNXT_FLAG_STRIP_VLAN;
12530 if (features & NETIF_F_NTUPLE)
12531 flags |= BNXT_FLAG_RFS;
12533 bnxt_clear_usr_fltrs(bp, true);
12535 changes = flags ^ bp->flags;
12536 if (changes & BNXT_FLAG_TPA) {
12538 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
12539 (flags & BNXT_FLAG_TPA) == 0 ||
12540 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
12544 if (changes & ~BNXT_FLAG_TPA)
12547 if (changes & BNXT_FLAG_RFS)
12548 update_ntuple = true;
12550 if (flags != bp->flags) {
12551 u32 old_flags = bp->flags;
12553 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12556 bnxt_set_ring_params(bp);
12561 return bnxt_reinit_features(bp, true, false, flags, update_tpa);
12564 return bnxt_reinit_features(bp, false, false, flags, update_tpa);
12568 rc = bnxt_set_tpa(bp,
12569 (flags & BNXT_FLAG_TPA) ?
12572 bp->flags = old_flags;
12578 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
12581 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
12582 struct hop_jumbo_hdr *jhdr;
12587 /* Check that there are at most 2 IPv6 extension headers, no
12588 * fragment header, and each is <= 64 bytes.
12590 start = nw_off + sizeof(*ip6h);
12591 nexthdr = &ip6h->nexthdr;
12592 while (ipv6_ext_hdr(*nexthdr)) {
12593 struct ipv6_opt_hdr *hp;
12596 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
12597 *nexthdr == NEXTHDR_FRAGMENT)
12599 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
12600 skb_headlen(skb), NULL);
12603 if (*nexthdr == NEXTHDR_AUTH)
12604 hdrlen = ipv6_authlen(hp);
12606 hdrlen = ipv6_optlen(hp);
12611 /* The ext header may be a hop-by-hop header inserted for
12612 * big TCP purposes. This will be removed before sending
12613 * from NIC, so do not count it.
12615 if (*nexthdr == NEXTHDR_HOP) {
12616 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
12617 goto increment_hdr;
12619 jhdr = (struct hop_jumbo_hdr *)hp;
12620 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
12621 jhdr->nexthdr != IPPROTO_TCP)
12622 goto increment_hdr;
12629 nexthdr = &hp->nexthdr;
12633 /* Caller will check inner protocol */
12634 if (skb->encapsulation) {
12640 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
12641 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
12644 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
12645 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
12647 struct udphdr *uh = udp_hdr(skb);
12648 __be16 udp_port = uh->dest;
12650 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
12651 udp_port != bp->vxlan_gpe_port)
12653 if (skb->inner_protocol == htons(ETH_P_TEB)) {
12654 struct ethhdr *eh = inner_eth_hdr(skb);
12656 switch (eh->h_proto) {
12657 case htons(ETH_P_IP):
12659 case htons(ETH_P_IPV6):
12660 return bnxt_exthdr_check(bp, skb,
12661 skb_inner_network_offset(skb),
12664 } else if (skb->inner_protocol == htons(ETH_P_IP)) {
12666 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
12667 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12673 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
12675 switch (l4_proto) {
12677 return bnxt_udp_tunl_check(bp, skb);
12680 case IPPROTO_GRE: {
12681 switch (skb->inner_protocol) {
12684 case htons(ETH_P_IP):
12686 case htons(ETH_P_IPV6):
12691 /* Check ext headers of inner ipv6 */
12692 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12698 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
12699 struct net_device *dev,
12700 netdev_features_t features)
12702 struct bnxt *bp = netdev_priv(dev);
12705 features = vlan_features_check(skb, features);
12706 switch (vlan_get_protocol(skb)) {
12707 case htons(ETH_P_IP):
12708 if (!skb->encapsulation)
12710 l4_proto = &ip_hdr(skb)->protocol;
12711 if (bnxt_tunl_check(bp, skb, *l4_proto))
12714 case htons(ETH_P_IPV6):
12715 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
12718 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
12722 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12725 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
12728 struct hwrm_dbg_read_direct_output *resp;
12729 struct hwrm_dbg_read_direct_input *req;
12730 __le32 *dbg_reg_buf;
12731 dma_addr_t mapping;
12734 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
12738 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
12740 if (!dbg_reg_buf) {
12742 goto dbg_rd_reg_exit;
12745 req->host_dest_addr = cpu_to_le64(mapping);
12747 resp = hwrm_req_hold(bp, req);
12748 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
12749 req->read_len32 = cpu_to_le32(num_words);
12751 rc = hwrm_req_send(bp, req);
12752 if (rc || resp->error_code) {
12754 goto dbg_rd_reg_exit;
12756 for (i = 0; i < num_words; i++)
12757 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
12760 hwrm_req_drop(bp, req);
12764 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
12765 u32 ring_id, u32 *prod, u32 *cons)
12767 struct hwrm_dbg_ring_info_get_output *resp;
12768 struct hwrm_dbg_ring_info_get_input *req;
12771 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
12775 req->ring_type = ring_type;
12776 req->fw_ring_id = cpu_to_le32(ring_id);
12777 resp = hwrm_req_hold(bp, req);
12778 rc = hwrm_req_send(bp, req);
12780 *prod = le32_to_cpu(resp->producer_index);
12781 *cons = le32_to_cpu(resp->consumer_index);
12783 hwrm_req_drop(bp, req);
12787 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
12789 struct bnxt_tx_ring_info *txr;
12790 int i = bnapi->index, j;
12792 bnxt_for_each_napi_tx(j, bnapi, txr)
12793 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
12794 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
12798 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
12800 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
12801 int i = bnapi->index;
12806 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
12807 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
12808 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
12809 rxr->rx_sw_agg_prod);
12812 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
12814 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
12815 int i = bnapi->index;
12817 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
12818 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
12821 static void bnxt_dbg_dump_states(struct bnxt *bp)
12824 struct bnxt_napi *bnapi;
12826 for (i = 0; i < bp->cp_nr_rings; i++) {
12827 bnapi = bp->bnapi[i];
12828 if (netif_msg_drv(bp)) {
12829 bnxt_dump_tx_sw_state(bnapi);
12830 bnxt_dump_rx_sw_state(bnapi);
12831 bnxt_dump_cp_sw_state(bnapi);
12836 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
12838 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
12839 struct hwrm_ring_reset_input *req;
12840 struct bnxt_napi *bnapi = rxr->bnapi;
12841 struct bnxt_cp_ring_info *cpr;
12845 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
12849 cpr = &bnapi->cp_ring;
12850 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
12851 req->cmpl_ring = cpu_to_le16(cp_ring_id);
12852 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
12853 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
12854 return hwrm_req_send_silent(bp, req);
12857 static void bnxt_reset_task(struct bnxt *bp, bool silent)
12860 bnxt_dbg_dump_states(bp);
12861 if (netif_running(bp->dev)) {
12865 bnxt_close_nic(bp, false, false);
12866 bnxt_open_nic(bp, false, false);
12869 bnxt_close_nic(bp, true, false);
12870 rc = bnxt_open_nic(bp, true, false);
12871 bnxt_ulp_start(bp, rc);
12876 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
12878 struct bnxt *bp = netdev_priv(dev);
12880 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
12881 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
12884 static void bnxt_fw_health_check(struct bnxt *bp)
12886 struct bnxt_fw_health *fw_health = bp->fw_health;
12887 struct pci_dev *pdev = bp->pdev;
12890 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
12893 /* Make sure it is enabled before checking the tmr_counter. */
12895 if (fw_health->tmr_counter) {
12896 fw_health->tmr_counter--;
12900 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
12901 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
12902 fw_health->arrests++;
12906 fw_health->last_fw_heartbeat = val;
12908 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12909 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
12910 fw_health->discoveries++;
12914 fw_health->tmr_counter = fw_health->tmr_multiplier;
12918 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
12921 static void bnxt_timer(struct timer_list *t)
12923 struct bnxt *bp = from_timer(bp, t, timer);
12924 struct net_device *dev = bp->dev;
12926 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
12929 if (atomic_read(&bp->intr_sem) != 0)
12930 goto bnxt_restart_timer;
12932 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
12933 bnxt_fw_health_check(bp);
12935 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
12936 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
12938 if (bnxt_tc_flower_enabled(bp))
12939 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
12941 #ifdef CONFIG_RFS_ACCEL
12942 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
12943 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
12944 #endif /*CONFIG_RFS_ACCEL*/
12946 if (bp->link_info.phy_retry) {
12947 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
12948 bp->link_info.phy_retry = false;
12949 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
12951 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
12955 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
12956 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
12958 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
12959 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
12961 bnxt_restart_timer:
12962 mod_timer(&bp->timer, jiffies + bp->current_interval);
12965 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
12967 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
12968 * set. If the device is being closed, bnxt_close() may be holding
12969 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
12970 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
12972 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12976 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
12978 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12982 /* Only called from bnxt_sp_task() */
12983 static void bnxt_reset(struct bnxt *bp, bool silent)
12985 bnxt_rtnl_lock_sp(bp);
12986 if (test_bit(BNXT_STATE_OPEN, &bp->state))
12987 bnxt_reset_task(bp, silent);
12988 bnxt_rtnl_unlock_sp(bp);
12991 /* Only called from bnxt_sp_task() */
12992 static void bnxt_rx_ring_reset(struct bnxt *bp)
12996 bnxt_rtnl_lock_sp(bp);
12997 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12998 bnxt_rtnl_unlock_sp(bp);
13001 /* Disable and flush TPA before resetting the RX ring */
13002 if (bp->flags & BNXT_FLAG_TPA)
13003 bnxt_set_tpa(bp, false);
13004 for (i = 0; i < bp->rx_nr_rings; i++) {
13005 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
13006 struct bnxt_cp_ring_info *cpr;
13009 if (!rxr->bnapi->in_reset)
13012 rc = bnxt_hwrm_rx_ring_reset(bp, i);
13014 if (rc == -EINVAL || rc == -EOPNOTSUPP)
13015 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
13017 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
13019 bnxt_reset_task(bp, true);
13022 bnxt_free_one_rx_ring_skbs(bp, i);
13024 rxr->rx_agg_prod = 0;
13025 rxr->rx_sw_agg_prod = 0;
13026 rxr->rx_next_cons = 0;
13027 rxr->bnapi->in_reset = false;
13028 bnxt_alloc_one_rx_ring(bp, i);
13029 cpr = &rxr->bnapi->cp_ring;
13030 cpr->sw_stats.rx.rx_resets++;
13031 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13032 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
13033 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
13035 if (bp->flags & BNXT_FLAG_TPA)
13036 bnxt_set_tpa(bp, true);
13037 bnxt_rtnl_unlock_sp(bp);
13040 static void bnxt_fw_reset_close(struct bnxt *bp)
13043 /* When firmware is in fatal state, quiesce device and disable
13044 * bus master to prevent any potential bad DMAs before freeing
13047 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
13050 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13052 bp->fw_reset_min_dsecs = 0;
13053 bnxt_tx_disable(bp);
13054 bnxt_disable_napi(bp);
13055 bnxt_disable_int_sync(bp);
13057 bnxt_clear_int_mode(bp);
13058 pci_disable_device(bp->pdev);
13060 __bnxt_close_nic(bp, true, false);
13061 bnxt_vf_reps_free(bp);
13062 bnxt_clear_int_mode(bp);
13063 bnxt_hwrm_func_drv_unrgtr(bp);
13064 if (pci_is_enabled(bp->pdev))
13065 pci_disable_device(bp->pdev);
13066 bnxt_free_ctx_mem(bp);
13069 static bool is_bnxt_fw_ok(struct bnxt *bp)
13071 struct bnxt_fw_health *fw_health = bp->fw_health;
13072 bool no_heartbeat = false, has_reset = false;
13075 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
13076 if (val == fw_health->last_fw_heartbeat)
13077 no_heartbeat = true;
13079 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13080 if (val != fw_health->last_fw_reset_cnt)
13083 if (!no_heartbeat && has_reset)
13089 /* rtnl_lock is acquired before calling this function */
13090 static void bnxt_force_fw_reset(struct bnxt *bp)
13092 struct bnxt_fw_health *fw_health = bp->fw_health;
13093 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13096 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
13097 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
13101 spin_lock_bh(&ptp->ptp_lock);
13102 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13103 spin_unlock_bh(&ptp->ptp_lock);
13105 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13107 bnxt_fw_reset_close(bp);
13108 wait_dsecs = fw_health->master_func_wait_dsecs;
13109 if (fw_health->primary) {
13110 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
13112 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13114 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
13115 wait_dsecs = fw_health->normal_func_wait_dsecs;
13116 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13119 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
13120 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
13121 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13124 void bnxt_fw_exception(struct bnxt *bp)
13126 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
13127 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13128 bnxt_rtnl_lock_sp(bp);
13129 bnxt_force_fw_reset(bp);
13130 bnxt_rtnl_unlock_sp(bp);
13133 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
13136 static int bnxt_get_registered_vfs(struct bnxt *bp)
13138 #ifdef CONFIG_BNXT_SRIOV
13144 rc = bnxt_hwrm_func_qcfg(bp);
13146 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
13149 if (bp->pf.registered_vfs)
13150 return bp->pf.registered_vfs;
13157 void bnxt_fw_reset(struct bnxt *bp)
13159 bnxt_rtnl_lock_sp(bp);
13160 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
13161 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13162 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
13166 spin_lock_bh(&ptp->ptp_lock);
13167 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13168 spin_unlock_bh(&ptp->ptp_lock);
13170 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13172 if (bp->pf.active_vfs &&
13173 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
13174 n = bnxt_get_registered_vfs(bp);
13176 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
13178 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13179 dev_close(bp->dev);
13180 goto fw_reset_exit;
13181 } else if (n > 0) {
13182 u16 vf_tmo_dsecs = n * 10;
13184 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
13185 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
13186 bp->fw_reset_state =
13187 BNXT_FW_RESET_STATE_POLL_VF;
13188 bnxt_queue_fw_reset_work(bp, HZ / 10);
13189 goto fw_reset_exit;
13191 bnxt_fw_reset_close(bp);
13192 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13193 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13196 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13197 tmo = bp->fw_reset_min_dsecs * HZ / 10;
13199 bnxt_queue_fw_reset_work(bp, tmo);
13202 bnxt_rtnl_unlock_sp(bp);
13205 static void bnxt_chk_missed_irq(struct bnxt *bp)
13209 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
13212 for (i = 0; i < bp->cp_nr_rings; i++) {
13213 struct bnxt_napi *bnapi = bp->bnapi[i];
13214 struct bnxt_cp_ring_info *cpr;
13221 cpr = &bnapi->cp_ring;
13222 for (j = 0; j < cpr->cp_ring_count; j++) {
13223 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
13226 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
13229 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
13230 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
13233 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
13234 bnxt_dbg_hwrm_ring_info_get(bp,
13235 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
13236 fw_ring_id, &val[0], &val[1]);
13237 cpr->sw_stats.cmn.missed_irqs++;
13242 static void bnxt_cfg_ntp_filters(struct bnxt *);
13244 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
13246 struct bnxt_link_info *link_info = &bp->link_info;
13248 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
13249 link_info->autoneg = BNXT_AUTONEG_SPEED;
13250 if (bp->hwrm_spec_code >= 0x10201) {
13251 if (link_info->auto_pause_setting &
13252 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
13253 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13255 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
13257 bnxt_set_auto_speed(link_info);
13259 bnxt_set_force_speed(link_info);
13260 link_info->req_duplex = link_info->duplex_setting;
13262 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
13263 link_info->req_flow_ctrl =
13264 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
13266 link_info->req_flow_ctrl = link_info->force_pause_setting;
13269 static void bnxt_fw_echo_reply(struct bnxt *bp)
13271 struct bnxt_fw_health *fw_health = bp->fw_health;
13272 struct hwrm_func_echo_response_input *req;
13275 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
13278 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
13279 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
13280 hwrm_req_send(bp, req);
13283 static void bnxt_sp_task(struct work_struct *work)
13285 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
13287 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13288 smp_mb__after_atomic();
13289 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
13290 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13294 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
13295 bnxt_cfg_rx_mode(bp);
13297 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
13298 bnxt_cfg_ntp_filters(bp);
13299 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
13300 bnxt_hwrm_exec_fwd_req(bp);
13301 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13302 netdev_info(bp->dev, "Receive PF driver unload event!\n");
13303 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
13304 bnxt_hwrm_port_qstats(bp, 0);
13305 bnxt_hwrm_port_qstats_ext(bp, 0);
13306 bnxt_accumulate_all_stats(bp);
13309 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
13312 mutex_lock(&bp->link_lock);
13313 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
13315 bnxt_hwrm_phy_qcaps(bp);
13317 rc = bnxt_update_link(bp, true);
13319 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
13322 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
13324 bnxt_init_ethtool_link_settings(bp);
13325 mutex_unlock(&bp->link_lock);
13327 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
13330 mutex_lock(&bp->link_lock);
13331 rc = bnxt_update_phy_setting(bp);
13332 mutex_unlock(&bp->link_lock);
13334 netdev_warn(bp->dev, "update phy settings retry failed\n");
13336 bp->link_info.phy_retry = false;
13337 netdev_info(bp->dev, "update phy settings retry succeeded\n");
13340 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
13341 mutex_lock(&bp->link_lock);
13342 bnxt_get_port_module_status(bp);
13343 mutex_unlock(&bp->link_lock);
13346 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
13347 bnxt_tc_flow_stats_work(bp);
13349 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
13350 bnxt_chk_missed_irq(bp);
13352 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
13353 bnxt_fw_echo_reply(bp);
13355 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
13356 bnxt_hwmon_notify_event(bp);
13358 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
13359 * must be the last functions to be called before exiting.
13361 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
13362 bnxt_reset(bp, false);
13364 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
13365 bnxt_reset(bp, true);
13367 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
13368 bnxt_rx_ring_reset(bp);
13370 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
13371 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
13372 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
13373 bnxt_devlink_health_fw_report(bp);
13378 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
13379 if (!is_bnxt_fw_ok(bp))
13380 bnxt_devlink_health_fw_report(bp);
13383 smp_mb__before_atomic();
13384 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
13387 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13390 /* Under rtnl_lock */
13391 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
13394 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
13395 struct bnxt_hw_rings hwr = {0};
13401 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
13403 if (max_rx < rx_rings)
13406 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13410 hwr.tx = tx * tx_sets + tx_xdp;
13411 if (max_tx < hwr.tx)
13414 hwr.vnic = bnxt_get_total_vnics(bp, rx);
13416 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
13417 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
13418 if (max_cp < hwr.cp)
13421 if (BNXT_NEW_RM(bp)) {
13422 hwr.cp += bnxt_get_ulp_msix_num(bp);
13423 hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
13425 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
13427 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
13428 hwr.cp_p5 = hwr.tx + rx;
13429 return bnxt_hwrm_check_rings(bp, &hwr);
13432 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
13435 pci_iounmap(pdev, bp->bar2);
13440 pci_iounmap(pdev, bp->bar1);
13445 pci_iounmap(pdev, bp->bar0);
13450 static void bnxt_cleanup_pci(struct bnxt *bp)
13452 bnxt_unmap_bars(bp, bp->pdev);
13453 pci_release_regions(bp->pdev);
13454 if (pci_is_enabled(bp->pdev))
13455 pci_disable_device(bp->pdev);
13458 static void bnxt_init_dflt_coal(struct bnxt *bp)
13460 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
13461 struct bnxt_coal *coal;
13464 if (coal_cap->cmpl_params &
13465 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
13466 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
13468 /* Tick values in micro seconds.
13469 * 1 coal_buf x bufs_per_record = 1 completion record.
13471 coal = &bp->rx_coal;
13472 coal->coal_ticks = 10;
13473 coal->coal_bufs = 30;
13474 coal->coal_ticks_irq = 1;
13475 coal->coal_bufs_irq = 2;
13476 coal->idle_thresh = 50;
13477 coal->bufs_per_record = 2;
13478 coal->budget = 64; /* NAPI budget */
13479 coal->flags = flags;
13481 coal = &bp->tx_coal;
13482 coal->coal_ticks = 28;
13483 coal->coal_bufs = 30;
13484 coal->coal_ticks_irq = 2;
13485 coal->coal_bufs_irq = 2;
13486 coal->bufs_per_record = 1;
13487 coal->flags = flags;
13489 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
13492 /* FW that pre-reserves 1 VNIC per function */
13493 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
13495 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
13497 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13498 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
13500 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
13501 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
13506 static int bnxt_fw_init_one_p1(struct bnxt *bp)
13511 rc = bnxt_hwrm_ver_get(bp);
13512 /* FW may be unresponsive after FLR. FLR must complete within 100 msec
13513 * so wait before continuing with recovery.
13517 bnxt_try_map_fw_health_reg(bp);
13519 rc = bnxt_try_recover_fw(bp);
13522 rc = bnxt_hwrm_ver_get(bp);
13527 bnxt_nvm_cfg_ver_get(bp);
13529 rc = bnxt_hwrm_func_reset(bp);
13533 bnxt_hwrm_fw_set_time(bp);
13537 static int bnxt_fw_init_one_p2(struct bnxt *bp)
13541 /* Get the MAX capabilities for this function */
13542 rc = bnxt_hwrm_func_qcaps(bp);
13544 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
13549 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
13551 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
13554 if (bnxt_alloc_fw_health(bp)) {
13555 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
13557 rc = bnxt_hwrm_error_recovery_qcfg(bp);
13559 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
13563 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
13567 if (bnxt_fw_pre_resv_vnics(bp))
13568 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
13570 bnxt_hwrm_func_qcfg(bp);
13571 bnxt_hwrm_vnic_qcaps(bp);
13572 bnxt_hwrm_port_led_qcaps(bp);
13573 bnxt_ethtool_init(bp);
13574 if (bp->fw_cap & BNXT_FW_CAP_PTP)
13575 __bnxt_hwrm_ptp_qcfg(bp);
13577 bnxt_hwmon_init(bp);
13581 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
13583 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
13584 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
13585 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
13586 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
13587 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
13588 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
13589 bp->rss_hash_delta = bp->rss_hash_cfg;
13590 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
13591 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
13592 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
13593 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
13597 static void bnxt_set_dflt_rfs(struct bnxt *bp)
13599 struct net_device *dev = bp->dev;
13601 dev->hw_features &= ~NETIF_F_NTUPLE;
13602 dev->features &= ~NETIF_F_NTUPLE;
13603 bp->flags &= ~BNXT_FLAG_RFS;
13604 if (bnxt_rfs_supported(bp)) {
13605 dev->hw_features |= NETIF_F_NTUPLE;
13606 if (bnxt_rfs_capable(bp)) {
13607 bp->flags |= BNXT_FLAG_RFS;
13608 dev->features |= NETIF_F_NTUPLE;
13613 static void bnxt_fw_init_one_p3(struct bnxt *bp)
13615 struct pci_dev *pdev = bp->pdev;
13617 bnxt_set_dflt_rss_hash_type(bp);
13618 bnxt_set_dflt_rfs(bp);
13620 bnxt_get_wol_settings(bp);
13621 if (bp->flags & BNXT_FLAG_WOL_CAP)
13622 device_set_wakeup_enable(&pdev->dev, bp->wol);
13624 device_set_wakeup_capable(&pdev->dev, false);
13626 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
13627 bnxt_hwrm_coal_params_qcaps(bp);
13630 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
13632 int bnxt_fw_init_one(struct bnxt *bp)
13636 rc = bnxt_fw_init_one_p1(bp);
13638 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
13641 rc = bnxt_fw_init_one_p2(bp);
13643 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
13646 rc = bnxt_probe_phy(bp, false);
13649 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
13653 bnxt_fw_init_one_p3(bp);
13657 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
13659 struct bnxt_fw_health *fw_health = bp->fw_health;
13660 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
13661 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
13662 u32 reg_type, reg_off, delay_msecs;
13664 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
13665 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
13666 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
13667 switch (reg_type) {
13668 case BNXT_FW_HEALTH_REG_TYPE_CFG:
13669 pci_write_config_dword(bp->pdev, reg_off, val);
13671 case BNXT_FW_HEALTH_REG_TYPE_GRC:
13672 writel(reg_off & BNXT_GRC_BASE_MASK,
13673 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
13674 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
13676 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
13677 writel(val, bp->bar0 + reg_off);
13679 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
13680 writel(val, bp->bar1 + reg_off);
13684 pci_read_config_dword(bp->pdev, 0, &val);
13685 msleep(delay_msecs);
13689 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
13691 struct hwrm_func_qcfg_output *resp;
13692 struct hwrm_func_qcfg_input *req;
13693 bool result = true; /* firmware will enforce if unknown */
13695 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
13698 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
13701 req->fid = cpu_to_le16(0xffff);
13702 resp = hwrm_req_hold(bp, req);
13703 if (!hwrm_req_send(bp, req))
13704 result = !!(le16_to_cpu(resp->flags) &
13705 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
13706 hwrm_req_drop(bp, req);
13710 static void bnxt_reset_all(struct bnxt *bp)
13712 struct bnxt_fw_health *fw_health = bp->fw_health;
13715 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13716 bnxt_fw_reset_via_optee(bp);
13717 bp->fw_reset_timestamp = jiffies;
13721 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
13722 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
13723 bnxt_fw_reset_writel(bp, i);
13724 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
13725 struct hwrm_fw_reset_input *req;
13727 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
13729 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
13730 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
13731 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
13732 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
13733 rc = hwrm_req_send(bp, req);
13736 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
13738 bp->fw_reset_timestamp = jiffies;
13741 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
13743 return time_after(jiffies, bp->fw_reset_timestamp +
13744 (bp->fw_reset_max_dsecs * HZ / 10));
13747 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
13749 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13750 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
13751 bnxt_ulp_start(bp, rc);
13752 bnxt_dl_health_fw_status_update(bp, false);
13754 bp->fw_reset_state = 0;
13755 dev_close(bp->dev);
13758 static void bnxt_fw_reset_task(struct work_struct *work)
13760 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
13763 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
13764 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
13768 switch (bp->fw_reset_state) {
13769 case BNXT_FW_RESET_STATE_POLL_VF: {
13770 int n = bnxt_get_registered_vfs(bp);
13774 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
13775 n, jiffies_to_msecs(jiffies -
13776 bp->fw_reset_timestamp));
13777 goto fw_reset_abort;
13778 } else if (n > 0) {
13779 if (bnxt_fw_reset_timeout(bp)) {
13780 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13781 bp->fw_reset_state = 0;
13782 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
13786 bnxt_queue_fw_reset_work(bp, HZ / 10);
13789 bp->fw_reset_timestamp = jiffies;
13791 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
13792 bnxt_fw_reset_abort(bp, rc);
13796 bnxt_fw_reset_close(bp);
13797 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
13798 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
13801 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13802 tmo = bp->fw_reset_min_dsecs * HZ / 10;
13805 bnxt_queue_fw_reset_work(bp, tmo);
13808 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
13811 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
13812 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
13813 !bnxt_fw_reset_timeout(bp)) {
13814 bnxt_queue_fw_reset_work(bp, HZ / 5);
13818 if (!bp->fw_health->primary) {
13819 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
13821 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13822 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
13825 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
13828 case BNXT_FW_RESET_STATE_RESET_FW:
13829 bnxt_reset_all(bp);
13830 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
13831 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
13833 case BNXT_FW_RESET_STATE_ENABLE_DEV:
13834 bnxt_inv_fw_health_reg(bp);
13835 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
13836 !bp->fw_reset_min_dsecs) {
13839 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
13840 if (val == 0xffff) {
13841 if (bnxt_fw_reset_timeout(bp)) {
13842 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
13844 goto fw_reset_abort;
13846 bnxt_queue_fw_reset_work(bp, HZ / 1000);
13850 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
13851 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
13852 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
13853 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
13854 bnxt_dl_remote_reload(bp);
13855 if (pci_enable_device(bp->pdev)) {
13856 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
13858 goto fw_reset_abort;
13860 pci_set_master(bp->pdev);
13861 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
13863 case BNXT_FW_RESET_STATE_POLL_FW:
13864 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
13865 rc = bnxt_hwrm_poll(bp);
13867 if (bnxt_fw_reset_timeout(bp)) {
13868 netdev_err(bp->dev, "Firmware reset aborted\n");
13869 goto fw_reset_abort_status;
13871 bnxt_queue_fw_reset_work(bp, HZ / 5);
13874 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
13875 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
13877 case BNXT_FW_RESET_STATE_OPENING:
13878 while (!rtnl_trylock()) {
13879 bnxt_queue_fw_reset_work(bp, HZ / 10);
13882 rc = bnxt_open(bp->dev);
13884 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
13885 bnxt_fw_reset_abort(bp, rc);
13890 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
13891 bp->fw_health->enabled) {
13892 bp->fw_health->last_fw_reset_cnt =
13893 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
13895 bp->fw_reset_state = 0;
13896 /* Make sure fw_reset_state is 0 before clearing the flag */
13897 smp_mb__before_atomic();
13898 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13899 bnxt_ulp_start(bp, 0);
13900 bnxt_reenable_sriov(bp);
13901 bnxt_vf_reps_alloc(bp);
13902 bnxt_vf_reps_open(bp);
13903 bnxt_ptp_reapply_pps(bp);
13904 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
13905 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
13906 bnxt_dl_health_fw_recovery_done(bp);
13907 bnxt_dl_health_fw_status_update(bp, true);
13914 fw_reset_abort_status:
13915 if (bp->fw_health->status_reliable ||
13916 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
13917 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
13919 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
13923 bnxt_fw_reset_abort(bp, rc);
13927 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
13930 struct bnxt *bp = netdev_priv(dev);
13932 SET_NETDEV_DEV(dev, &pdev->dev);
13934 /* enable device (incl. PCI PM wakeup), and bus-mastering */
13935 rc = pci_enable_device(pdev);
13937 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
13941 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13942 dev_err(&pdev->dev,
13943 "Cannot find PCI device base address, aborting\n");
13945 goto init_err_disable;
13948 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13950 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
13951 goto init_err_disable;
13954 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
13955 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
13956 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
13958 goto init_err_release;
13961 pci_set_master(pdev);
13966 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
13967 * determines the BAR size.
13969 bp->bar0 = pci_ioremap_bar(pdev, 0);
13971 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
13973 goto init_err_release;
13976 bp->bar2 = pci_ioremap_bar(pdev, 4);
13978 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
13980 goto init_err_release;
13983 INIT_WORK(&bp->sp_task, bnxt_sp_task);
13984 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
13986 spin_lock_init(&bp->ntp_fltr_lock);
13987 #if BITS_PER_LONG == 32
13988 spin_lock_init(&bp->db_lock);
13991 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
13992 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
13994 timer_setup(&bp->timer, bnxt_timer, 0);
13995 bp->current_interval = BNXT_TIMER_INTERVAL;
13997 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
13998 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
14000 clear_bit(BNXT_STATE_OPEN, &bp->state);
14004 bnxt_unmap_bars(bp, pdev);
14005 pci_release_regions(pdev);
14008 pci_disable_device(pdev);
14014 /* rtnl_lock held */
14015 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
14017 struct sockaddr *addr = p;
14018 struct bnxt *bp = netdev_priv(dev);
14021 if (!is_valid_ether_addr(addr->sa_data))
14022 return -EADDRNOTAVAIL;
14024 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
14027 rc = bnxt_approve_mac(bp, addr->sa_data, true);
14031 eth_hw_addr_set(dev, addr->sa_data);
14032 bnxt_clear_usr_fltrs(bp, true);
14033 if (netif_running(dev)) {
14034 bnxt_close_nic(bp, false, false);
14035 rc = bnxt_open_nic(bp, false, false);
14041 /* rtnl_lock held */
14042 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
14044 struct bnxt *bp = netdev_priv(dev);
14046 if (netif_running(dev))
14047 bnxt_close_nic(bp, true, false);
14049 dev->mtu = new_mtu;
14050 bnxt_set_ring_params(bp);
14052 if (netif_running(dev))
14053 return bnxt_open_nic(bp, true, false);
14058 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
14060 struct bnxt *bp = netdev_priv(dev);
14064 if (tc > bp->max_tc) {
14065 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
14070 if (bp->num_tc == tc)
14073 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
14076 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
14077 sh, tc, bp->tx_nr_rings_xdp);
14081 /* Needs to close the device and do hw resource re-allocations */
14082 if (netif_running(bp->dev))
14083 bnxt_close_nic(bp, true, false);
14086 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
14087 netdev_set_num_tc(dev, tc);
14090 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14091 netdev_reset_tc(dev);
14094 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
14095 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
14096 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
14097 tx_cp + bp->rx_nr_rings;
14099 if (netif_running(bp->dev))
14100 return bnxt_open_nic(bp, true, false);
14105 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
14108 struct bnxt *bp = cb_priv;
14110 if (!bnxt_tc_flower_enabled(bp) ||
14111 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
14112 return -EOPNOTSUPP;
14115 case TC_SETUP_CLSFLOWER:
14116 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
14118 return -EOPNOTSUPP;
14122 LIST_HEAD(bnxt_block_cb_list);
14124 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
14127 struct bnxt *bp = netdev_priv(dev);
14130 case TC_SETUP_BLOCK:
14131 return flow_block_cb_setup_simple(type_data,
14132 &bnxt_block_cb_list,
14133 bnxt_setup_tc_block_cb,
14135 case TC_SETUP_QDISC_MQPRIO: {
14136 struct tc_mqprio_qopt *mqprio = type_data;
14138 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
14140 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
14143 return -EOPNOTSUPP;
14147 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
14148 const struct sk_buff *skb)
14150 struct bnxt_vnic_info *vnic;
14153 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
14155 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
14156 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
14159 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
14162 struct hlist_head *head;
14165 spin_lock_bh(&bp->ntp_fltr_lock);
14166 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
14168 spin_unlock_bh(&bp->ntp_fltr_lock);
14172 fltr->base.sw_id = (u16)bit_id;
14173 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
14174 fltr->base.flags |= BNXT_ACT_RING_DST;
14175 head = &bp->ntp_fltr_hash_tbl[idx];
14176 hlist_add_head_rcu(&fltr->base.hash, head);
14177 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
14178 bnxt_insert_usr_fltr(bp, &fltr->base);
14179 bp->ntp_fltr_count++;
14180 spin_unlock_bh(&bp->ntp_fltr_lock);
14184 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
14185 struct bnxt_ntuple_filter *f2)
14187 struct bnxt_flow_masks *masks1 = &f1->fmasks;
14188 struct bnxt_flow_masks *masks2 = &f2->fmasks;
14189 struct flow_keys *keys1 = &f1->fkeys;
14190 struct flow_keys *keys2 = &f2->fkeys;
14192 if (keys1->basic.n_proto != keys2->basic.n_proto ||
14193 keys1->basic.ip_proto != keys2->basic.ip_proto)
14196 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
14197 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
14198 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
14199 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
14200 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
14203 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
14204 &keys2->addrs.v6addrs.src) ||
14205 !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
14206 &masks2->addrs.v6addrs.src) ||
14207 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
14208 &keys2->addrs.v6addrs.dst) ||
14209 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
14210 &masks2->addrs.v6addrs.dst))
14214 return keys1->ports.src == keys2->ports.src &&
14215 masks1->ports.src == masks2->ports.src &&
14216 keys1->ports.dst == keys2->ports.dst &&
14217 masks1->ports.dst == masks2->ports.dst &&
14218 keys1->control.flags == keys2->control.flags &&
14219 f1->l2_fltr == f2->l2_fltr;
14222 struct bnxt_ntuple_filter *
14223 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
14224 struct bnxt_ntuple_filter *fltr, u32 idx)
14226 struct bnxt_ntuple_filter *f;
14227 struct hlist_head *head;
14229 head = &bp->ntp_fltr_hash_tbl[idx];
14230 hlist_for_each_entry_rcu(f, head, base.hash) {
14231 if (bnxt_fltr_match(f, fltr))
14237 #ifdef CONFIG_RFS_ACCEL
14238 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
14239 u16 rxq_index, u32 flow_id)
14241 struct bnxt *bp = netdev_priv(dev);
14242 struct bnxt_ntuple_filter *fltr, *new_fltr;
14243 struct flow_keys *fkeys;
14244 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
14245 struct bnxt_l2_filter *l2_fltr;
14249 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
14250 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
14251 atomic_inc(&l2_fltr->refcnt);
14253 struct bnxt_l2_key key;
14255 ether_addr_copy(key.dst_mac_addr, eth->h_dest);
14257 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
14260 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
14261 bnxt_del_l2_filter(bp, l2_fltr);
14265 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
14267 bnxt_del_l2_filter(bp, l2_fltr);
14271 fkeys = &new_fltr->fkeys;
14272 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
14273 rc = -EPROTONOSUPPORT;
14277 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
14278 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
14279 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
14280 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
14281 rc = -EPROTONOSUPPORT;
14284 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
14285 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
14286 if (bp->hwrm_spec_code < 0x10601) {
14287 rc = -EPROTONOSUPPORT;
14290 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
14292 flags = fkeys->control.flags;
14293 if (((flags & FLOW_DIS_ENCAPSULATION) &&
14294 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
14295 rc = -EPROTONOSUPPORT;
14298 new_fltr->l2_fltr = l2_fltr;
14300 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
14302 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
14304 rc = fltr->base.sw_id;
14310 new_fltr->flow_id = flow_id;
14311 new_fltr->base.rxq = rxq_index;
14312 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
14314 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
14315 return new_fltr->base.sw_id;
14319 bnxt_del_l2_filter(bp, l2_fltr);
14325 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
14327 spin_lock_bh(&bp->ntp_fltr_lock);
14328 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
14329 spin_unlock_bh(&bp->ntp_fltr_lock);
14332 hlist_del_rcu(&fltr->base.hash);
14333 bnxt_del_one_usr_fltr(bp, &fltr->base);
14334 bp->ntp_fltr_count--;
14335 spin_unlock_bh(&bp->ntp_fltr_lock);
14336 bnxt_del_l2_filter(bp, fltr->l2_fltr);
14337 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
14338 kfree_rcu(fltr, base.rcu);
14341 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
14343 #ifdef CONFIG_RFS_ACCEL
14346 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
14347 struct hlist_head *head;
14348 struct hlist_node *tmp;
14349 struct bnxt_ntuple_filter *fltr;
14352 head = &bp->ntp_fltr_hash_tbl[i];
14353 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
14356 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
14357 if (fltr->base.flags & BNXT_ACT_NO_AGING)
14359 if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
14361 fltr->base.sw_id)) {
14362 bnxt_hwrm_cfa_ntuple_filter_free(bp,
14367 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
14372 set_bit(BNXT_FLTR_VALID, &fltr->base.state);
14376 bnxt_del_ntp_filter(bp, fltr);
14382 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
14383 unsigned int entry, struct udp_tunnel_info *ti)
14385 struct bnxt *bp = netdev_priv(netdev);
14388 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14389 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
14390 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14391 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
14393 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
14395 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
14398 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
14399 unsigned int entry, struct udp_tunnel_info *ti)
14401 struct bnxt *bp = netdev_priv(netdev);
14404 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
14405 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
14406 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
14407 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
14409 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
14411 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
14414 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
14415 .set_port = bnxt_udp_tunnel_set_port,
14416 .unset_port = bnxt_udp_tunnel_unset_port,
14417 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14418 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14420 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
14421 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14423 }, bnxt_udp_tunnels_p7 = {
14424 .set_port = bnxt_udp_tunnel_set_port,
14425 .unset_port = bnxt_udp_tunnel_unset_port,
14426 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
14427 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
14429 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
14430 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
14431 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
14435 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
14436 struct net_device *dev, u32 filter_mask,
14439 struct bnxt *bp = netdev_priv(dev);
14441 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
14442 nlflags, filter_mask, NULL);
14445 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
14446 u16 flags, struct netlink_ext_ack *extack)
14448 struct bnxt *bp = netdev_priv(dev);
14449 struct nlattr *attr, *br_spec;
14452 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
14453 return -EOPNOTSUPP;
14455 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
14459 nla_for_each_nested(attr, br_spec, rem) {
14462 if (nla_type(attr) != IFLA_BRIDGE_MODE)
14465 mode = nla_get_u16(attr);
14466 if (mode == bp->br_mode)
14469 rc = bnxt_hwrm_set_br_mode(bp, mode);
14471 bp->br_mode = mode;
14477 int bnxt_get_port_parent_id(struct net_device *dev,
14478 struct netdev_phys_item_id *ppid)
14480 struct bnxt *bp = netdev_priv(dev);
14482 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
14483 return -EOPNOTSUPP;
14485 /* The PF and it's VF-reps only support the switchdev framework */
14486 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
14487 return -EOPNOTSUPP;
14489 ppid->id_len = sizeof(bp->dsn);
14490 memcpy(ppid->id, bp->dsn, ppid->id_len);
14495 static const struct net_device_ops bnxt_netdev_ops = {
14496 .ndo_open = bnxt_open,
14497 .ndo_start_xmit = bnxt_start_xmit,
14498 .ndo_stop = bnxt_close,
14499 .ndo_get_stats64 = bnxt_get_stats64,
14500 .ndo_set_rx_mode = bnxt_set_rx_mode,
14501 .ndo_eth_ioctl = bnxt_ioctl,
14502 .ndo_validate_addr = eth_validate_addr,
14503 .ndo_set_mac_address = bnxt_change_mac_addr,
14504 .ndo_change_mtu = bnxt_change_mtu,
14505 .ndo_fix_features = bnxt_fix_features,
14506 .ndo_set_features = bnxt_set_features,
14507 .ndo_features_check = bnxt_features_check,
14508 .ndo_tx_timeout = bnxt_tx_timeout,
14509 #ifdef CONFIG_BNXT_SRIOV
14510 .ndo_get_vf_config = bnxt_get_vf_config,
14511 .ndo_set_vf_mac = bnxt_set_vf_mac,
14512 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
14513 .ndo_set_vf_rate = bnxt_set_vf_bw,
14514 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
14515 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
14516 .ndo_set_vf_trust = bnxt_set_vf_trust,
14518 .ndo_setup_tc = bnxt_setup_tc,
14519 #ifdef CONFIG_RFS_ACCEL
14520 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
14522 .ndo_bpf = bnxt_xdp,
14523 .ndo_xdp_xmit = bnxt_xdp_xmit,
14524 .ndo_bridge_getlink = bnxt_bridge_getlink,
14525 .ndo_bridge_setlink = bnxt_bridge_setlink,
14528 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
14529 struct netdev_queue_stats_rx *stats)
14531 struct bnxt *bp = netdev_priv(dev);
14532 struct bnxt_cp_ring_info *cpr;
14535 cpr = &bp->bnapi[i]->cp_ring;
14536 sw = cpr->stats.sw_stats;
14538 stats->packets = 0;
14539 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
14540 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
14541 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
14544 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
14545 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
14546 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
14548 stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
14551 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
14552 struct netdev_queue_stats_tx *stats)
14554 struct bnxt *bp = netdev_priv(dev);
14555 struct bnxt_napi *bnapi;
14558 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
14559 sw = bnapi->cp_ring.stats.sw_stats;
14561 stats->packets = 0;
14562 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
14563 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
14564 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
14567 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
14568 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
14569 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
14572 static void bnxt_get_base_stats(struct net_device *dev,
14573 struct netdev_queue_stats_rx *rx,
14574 struct netdev_queue_stats_tx *tx)
14576 struct bnxt *bp = netdev_priv(dev);
14578 rx->packets = bp->net_stats_prev.rx_packets;
14579 rx->bytes = bp->net_stats_prev.rx_bytes;
14580 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
14582 tx->packets = bp->net_stats_prev.tx_packets;
14583 tx->bytes = bp->net_stats_prev.tx_bytes;
14586 static const struct netdev_stat_ops bnxt_stat_ops = {
14587 .get_queue_stats_rx = bnxt_get_queue_stats_rx,
14588 .get_queue_stats_tx = bnxt_get_queue_stats_tx,
14589 .get_base_stats = bnxt_get_base_stats,
14592 static void bnxt_remove_one(struct pci_dev *pdev)
14594 struct net_device *dev = pci_get_drvdata(pdev);
14595 struct bnxt *bp = netdev_priv(dev);
14598 bnxt_sriov_disable(bp);
14600 bnxt_rdma_aux_device_uninit(bp);
14602 bnxt_ptp_clear(bp);
14603 unregister_netdev(dev);
14604 bnxt_free_l2_filters(bp, true);
14605 bnxt_free_ntp_fltrs(bp, true);
14606 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
14607 /* Flush any pending tasks */
14608 cancel_work_sync(&bp->sp_task);
14609 cancel_delayed_work_sync(&bp->fw_reset_task);
14612 bnxt_dl_fw_reporters_destroy(bp);
14613 bnxt_dl_unregister(bp);
14614 bnxt_shutdown_tc(bp);
14616 bnxt_clear_int_mode(bp);
14617 bnxt_hwrm_func_drv_unrgtr(bp);
14618 bnxt_free_hwrm_resources(bp);
14619 bnxt_hwmon_uninit(bp);
14620 bnxt_ethtool_free(bp);
14622 kfree(bp->ptp_cfg);
14623 bp->ptp_cfg = NULL;
14624 kfree(bp->fw_health);
14625 bp->fw_health = NULL;
14626 bnxt_cleanup_pci(bp);
14627 bnxt_free_ctx_mem(bp);
14628 kfree(bp->rss_indir_tbl);
14629 bp->rss_indir_tbl = NULL;
14630 bnxt_free_port_stats(bp);
14634 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
14637 struct bnxt_link_info *link_info = &bp->link_info;
14640 rc = bnxt_hwrm_phy_qcaps(bp);
14642 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
14646 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
14647 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
14649 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
14653 mutex_lock(&bp->link_lock);
14654 rc = bnxt_update_link(bp, false);
14656 mutex_unlock(&bp->link_lock);
14657 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
14662 /* Older firmware does not have supported_auto_speeds, so assume
14663 * that all supported speeds can be autonegotiated.
14665 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
14666 link_info->support_auto_speeds = link_info->support_speeds;
14668 bnxt_init_ethtool_link_settings(bp);
14669 mutex_unlock(&bp->link_lock);
14673 static int bnxt_get_max_irq(struct pci_dev *pdev)
14677 if (!pdev->msix_cap)
14680 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
14681 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
14684 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14687 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
14688 int max_ring_grps = 0, max_irq;
14690 *max_tx = hw_resc->max_tx_rings;
14691 *max_rx = hw_resc->max_rx_rings;
14692 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
14693 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
14694 bnxt_get_ulp_msix_num(bp),
14695 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
14696 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
14697 *max_cp = min_t(int, *max_cp, max_irq);
14698 max_ring_grps = hw_resc->max_hw_ring_grps;
14699 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
14703 if (bp->flags & BNXT_FLAG_AGG_RINGS)
14705 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
14708 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
14713 /* On P5 chips, max_cp output param should be available NQs */
14716 *max_rx = min_t(int, *max_rx, max_ring_grps);
14719 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
14723 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
14726 if (!rx || !tx || !cp)
14729 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
14732 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
14737 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14738 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
14739 /* Not enough rings, try disabling agg rings. */
14740 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
14741 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
14743 /* set BNXT_FLAG_AGG_RINGS back for consistency */
14744 bp->flags |= BNXT_FLAG_AGG_RINGS;
14747 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
14748 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14749 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
14750 bnxt_set_ring_params(bp);
14753 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
14754 int max_cp, max_stat, max_irq;
14756 /* Reserve minimum resources for RoCE */
14757 max_cp = bnxt_get_max_func_cp_rings(bp);
14758 max_stat = bnxt_get_max_func_stat_ctxs(bp);
14759 max_irq = bnxt_get_max_func_irqs(bp);
14760 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
14761 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
14762 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
14765 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
14766 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
14767 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
14768 max_cp = min_t(int, max_cp, max_irq);
14769 max_cp = min_t(int, max_cp, max_stat);
14770 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
14777 /* In initial default shared ring setting, each shared ring must have a
14780 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
14782 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
14783 bp->rx_nr_rings = bp->cp_nr_rings;
14784 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
14785 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14788 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
14790 int dflt_rings, max_rx_rings, max_tx_rings, rc;
14792 if (!bnxt_can_reserve_rings(bp))
14796 bp->flags |= BNXT_FLAG_SHARED_RINGS;
14797 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
14798 /* Reduce default rings on multi-port cards so that total default
14799 * rings do not exceed CPU count.
14801 if (bp->port_count > 1) {
14803 max_t(int, num_online_cpus() / bp->port_count, 1);
14805 dflt_rings = min_t(int, dflt_rings, max_rings);
14807 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
14810 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
14811 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
14813 bnxt_trim_dflt_sh_rings(bp);
14815 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
14816 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
14818 rc = __bnxt_reserve_rings(bp);
14819 if (rc && rc != -ENODEV)
14820 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
14821 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14823 bnxt_trim_dflt_sh_rings(bp);
14825 /* Rings may have been trimmed, re-reserve the trimmed rings. */
14826 if (bnxt_need_reserve_rings(bp)) {
14827 rc = __bnxt_reserve_rings(bp);
14828 if (rc && rc != -ENODEV)
14829 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
14830 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14832 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
14837 bp->tx_nr_rings = 0;
14838 bp->rx_nr_rings = 0;
14843 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
14847 if (bp->tx_nr_rings)
14850 bnxt_ulp_irq_stop(bp);
14851 bnxt_clear_int_mode(bp);
14852 rc = bnxt_set_dflt_rings(bp, true);
14854 if (BNXT_VF(bp) && rc == -ENODEV)
14855 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
14857 netdev_err(bp->dev, "Not enough rings available.\n");
14858 goto init_dflt_ring_err;
14860 rc = bnxt_init_int_mode(bp);
14862 goto init_dflt_ring_err;
14864 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
14866 bnxt_set_dflt_rfs(bp);
14868 init_dflt_ring_err:
14869 bnxt_ulp_irq_restart(bp, rc);
14873 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
14878 bnxt_hwrm_func_qcaps(bp);
14880 if (netif_running(bp->dev))
14881 __bnxt_close_nic(bp, true, false);
14883 bnxt_ulp_irq_stop(bp);
14884 bnxt_clear_int_mode(bp);
14885 rc = bnxt_init_int_mode(bp);
14886 bnxt_ulp_irq_restart(bp, rc);
14888 if (netif_running(bp->dev)) {
14890 dev_close(bp->dev);
14892 rc = bnxt_open_nic(bp, true, false);
14898 static int bnxt_init_mac_addr(struct bnxt *bp)
14903 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
14905 #ifdef CONFIG_BNXT_SRIOV
14906 struct bnxt_vf_info *vf = &bp->vf;
14907 bool strict_approval = true;
14909 if (is_valid_ether_addr(vf->mac_addr)) {
14910 /* overwrite netdev dev_addr with admin VF MAC */
14911 eth_hw_addr_set(bp->dev, vf->mac_addr);
14912 /* Older PF driver or firmware may not approve this
14915 strict_approval = false;
14917 eth_hw_addr_random(bp->dev);
14919 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
14925 static void bnxt_vpd_read_info(struct bnxt *bp)
14927 struct pci_dev *pdev = bp->pdev;
14928 unsigned int vpd_size, kw_len;
14932 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
14933 if (IS_ERR(vpd_data)) {
14934 pci_warn(pdev, "Unable to read VPD\n");
14938 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
14939 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
14943 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
14944 memcpy(bp->board_partno, &vpd_data[pos], size);
14947 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
14948 PCI_VPD_RO_KEYWORD_SERIALNO,
14953 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
14954 memcpy(bp->board_serialno, &vpd_data[pos], size);
14959 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
14961 struct pci_dev *pdev = bp->pdev;
14964 qword = pci_get_dsn(pdev);
14966 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
14967 return -EOPNOTSUPP;
14970 put_unaligned_le64(qword, dsn);
14972 bp->flags |= BNXT_FLAG_DSN_VALID;
14976 static int bnxt_map_db_bar(struct bnxt *bp)
14980 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
14986 void bnxt_print_device_info(struct bnxt *bp)
14988 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
14989 board_info[bp->board_idx].name,
14990 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
14992 pcie_print_link_status(bp->pdev);
14995 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
14997 struct bnxt_hw_resc *hw_resc;
14998 struct net_device *dev;
15002 if (pci_is_bridge(pdev))
15005 /* Clear any pending DMA transactions from crash kernel
15006 * while loading driver in capture kernel.
15008 if (is_kdump_kernel()) {
15009 pci_clear_master(pdev);
15013 max_irqs = bnxt_get_max_irq(pdev);
15014 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
15019 bp = netdev_priv(dev);
15020 bp->board_idx = ent->driver_data;
15021 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
15022 bnxt_set_max_func_irqs(bp, max_irqs);
15024 if (bnxt_vf_pciid(bp->board_idx))
15025 bp->flags |= BNXT_FLAG_VF;
15027 /* No devlink port registration in case of a VF */
15029 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
15031 if (pdev->msix_cap)
15032 bp->flags |= BNXT_FLAG_MSIX_CAP;
15034 rc = bnxt_init_board(pdev, dev);
15036 goto init_err_free;
15038 dev->netdev_ops = &bnxt_netdev_ops;
15039 dev->stat_ops = &bnxt_stat_ops;
15040 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
15041 dev->ethtool_ops = &bnxt_ethtool_ops;
15042 pci_set_drvdata(pdev, dev);
15044 rc = bnxt_alloc_hwrm_resources(bp);
15046 goto init_err_pci_clean;
15048 mutex_init(&bp->hwrm_cmd_lock);
15049 mutex_init(&bp->link_lock);
15051 rc = bnxt_fw_init_one_p1(bp);
15053 goto init_err_pci_clean;
15056 bnxt_vpd_read_info(bp);
15058 if (BNXT_CHIP_P5_PLUS(bp)) {
15059 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
15060 if (BNXT_CHIP_P7(bp))
15061 bp->flags |= BNXT_FLAG_CHIP_P7;
15064 rc = bnxt_alloc_rss_indir_tbl(bp);
15066 goto init_err_pci_clean;
15068 rc = bnxt_fw_init_one_p2(bp);
15070 goto init_err_pci_clean;
15072 rc = bnxt_map_db_bar(bp);
15074 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
15076 goto init_err_pci_clean;
15079 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15080 NETIF_F_TSO | NETIF_F_TSO6 |
15081 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15082 NETIF_F_GSO_IPXIP4 |
15083 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15084 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
15085 NETIF_F_RXCSUM | NETIF_F_GRO;
15086 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15087 dev->hw_features |= NETIF_F_GSO_UDP_L4;
15089 if (BNXT_SUPPORTS_TPA(bp))
15090 dev->hw_features |= NETIF_F_LRO;
15092 dev->hw_enc_features =
15093 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
15094 NETIF_F_TSO | NETIF_F_TSO6 |
15095 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
15096 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
15097 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
15098 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
15099 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
15100 if (bp->flags & BNXT_FLAG_CHIP_P7)
15101 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
15103 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
15105 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
15106 NETIF_F_GSO_GRE_CSUM;
15107 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
15108 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
15109 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
15110 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
15111 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
15112 if (BNXT_SUPPORTS_TPA(bp))
15113 dev->hw_features |= NETIF_F_GRO_HW;
15114 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
15115 if (dev->features & NETIF_F_GRO_HW)
15116 dev->features &= ~NETIF_F_LRO;
15117 dev->priv_flags |= IFF_UNICAST_FLT;
15119 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
15121 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
15122 NETDEV_XDP_ACT_RX_SG;
15124 #ifdef CONFIG_BNXT_SRIOV
15125 init_waitqueue_head(&bp->sriov_cfg_wait);
15127 if (BNXT_SUPPORTS_TPA(bp)) {
15128 bp->gro_func = bnxt_gro_func_5730x;
15129 if (BNXT_CHIP_P4(bp))
15130 bp->gro_func = bnxt_gro_func_5731x;
15131 else if (BNXT_CHIP_P5_PLUS(bp))
15132 bp->gro_func = bnxt_gro_func_5750x;
15134 if (!BNXT_CHIP_P4_PLUS(bp))
15135 bp->flags |= BNXT_FLAG_DOUBLE_DB;
15137 rc = bnxt_init_mac_addr(bp);
15139 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
15140 rc = -EADDRNOTAVAIL;
15141 goto init_err_pci_clean;
15145 /* Read the adapter's DSN to use as the eswitch switch_id */
15146 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
15149 /* MTU range: 60 - FW defined max */
15150 dev->min_mtu = ETH_ZLEN;
15151 dev->max_mtu = bp->max_mtu;
15153 rc = bnxt_probe_phy(bp, true);
15155 goto init_err_pci_clean;
15157 hw_resc = &bp->hw_resc;
15158 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
15159 BNXT_L2_FLTR_MAX_FLTR;
15160 /* Older firmware may not report these filters properly */
15161 if (bp->max_fltr < BNXT_MAX_FLTR)
15162 bp->max_fltr = BNXT_MAX_FLTR;
15163 bnxt_init_l2_fltr_tbl(bp);
15164 bnxt_set_rx_skb_mode(bp, false);
15165 bnxt_set_tpa_flags(bp);
15166 bnxt_set_ring_params(bp);
15167 rc = bnxt_set_dflt_rings(bp, true);
15169 if (BNXT_VF(bp) && rc == -ENODEV) {
15170 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
15172 netdev_err(bp->dev, "Not enough rings available.\n");
15175 goto init_err_pci_clean;
15178 bnxt_fw_init_one_p3(bp);
15180 bnxt_init_dflt_coal(bp);
15182 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
15183 bp->flags |= BNXT_FLAG_STRIP_VLAN;
15185 rc = bnxt_init_int_mode(bp);
15187 goto init_err_pci_clean;
15189 /* No TC has been set yet and rings may have been trimmed due to
15190 * limited MSIX, so we re-initialize the TX rings per TC.
15192 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
15197 create_singlethread_workqueue("bnxt_pf_wq");
15199 dev_err(&pdev->dev, "Unable to create workqueue.\n");
15201 goto init_err_pci_clean;
15204 rc = bnxt_init_tc(bp);
15206 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
15210 bnxt_inv_fw_health_reg(bp);
15211 rc = bnxt_dl_register(bp);
15215 INIT_LIST_HEAD(&bp->usr_fltr_list);
15217 rc = register_netdev(dev);
15219 goto init_err_cleanup;
15221 bnxt_dl_fw_reporters_create(bp);
15223 bnxt_rdma_aux_device_init(bp);
15225 bnxt_print_device_info(bp);
15227 pci_save_state(pdev);
15231 bnxt_dl_unregister(bp);
15233 bnxt_shutdown_tc(bp);
15234 bnxt_clear_int_mode(bp);
15236 init_err_pci_clean:
15237 bnxt_hwrm_func_drv_unrgtr(bp);
15238 bnxt_free_hwrm_resources(bp);
15239 bnxt_hwmon_uninit(bp);
15240 bnxt_ethtool_free(bp);
15241 bnxt_ptp_clear(bp);
15242 kfree(bp->ptp_cfg);
15243 bp->ptp_cfg = NULL;
15244 kfree(bp->fw_health);
15245 bp->fw_health = NULL;
15246 bnxt_cleanup_pci(bp);
15247 bnxt_free_ctx_mem(bp);
15248 kfree(bp->rss_indir_tbl);
15249 bp->rss_indir_tbl = NULL;
15256 static void bnxt_shutdown(struct pci_dev *pdev)
15258 struct net_device *dev = pci_get_drvdata(pdev);
15265 bp = netdev_priv(dev);
15267 goto shutdown_exit;
15269 if (netif_running(dev))
15272 bnxt_clear_int_mode(bp);
15273 pci_disable_device(pdev);
15275 if (system_state == SYSTEM_POWER_OFF) {
15276 pci_wake_from_d3(pdev, bp->wol);
15277 pci_set_power_state(pdev, PCI_D3hot);
15284 #ifdef CONFIG_PM_SLEEP
15285 static int bnxt_suspend(struct device *device)
15287 struct net_device *dev = dev_get_drvdata(device);
15288 struct bnxt *bp = netdev_priv(dev);
15293 if (netif_running(dev)) {
15294 netif_device_detach(dev);
15295 rc = bnxt_close(dev);
15297 bnxt_hwrm_func_drv_unrgtr(bp);
15298 pci_disable_device(bp->pdev);
15299 bnxt_free_ctx_mem(bp);
15304 static int bnxt_resume(struct device *device)
15306 struct net_device *dev = dev_get_drvdata(device);
15307 struct bnxt *bp = netdev_priv(dev);
15311 rc = pci_enable_device(bp->pdev);
15313 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
15317 pci_set_master(bp->pdev);
15318 if (bnxt_hwrm_ver_get(bp)) {
15322 rc = bnxt_hwrm_func_reset(bp);
15328 rc = bnxt_hwrm_func_qcaps(bp);
15332 bnxt_clear_reservations(bp, true);
15334 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
15339 bnxt_get_wol_settings(bp);
15340 if (netif_running(dev)) {
15341 rc = bnxt_open(dev);
15343 netif_device_attach(dev);
15347 bnxt_ulp_start(bp, rc);
15349 bnxt_reenable_sriov(bp);
15354 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
15355 #define BNXT_PM_OPS (&bnxt_pm_ops)
15359 #define BNXT_PM_OPS NULL
15361 #endif /* CONFIG_PM_SLEEP */
15364 * bnxt_io_error_detected - called when PCI error is detected
15365 * @pdev: Pointer to PCI device
15366 * @state: The current pci connection state
15368 * This function is called after a PCI bus error affecting
15369 * this device has been detected.
15371 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
15372 pci_channel_state_t state)
15374 struct net_device *netdev = pci_get_drvdata(pdev);
15375 struct bnxt *bp = netdev_priv(netdev);
15377 netdev_info(netdev, "PCI I/O error detected\n");
15380 netif_device_detach(netdev);
15384 if (state == pci_channel_io_perm_failure) {
15386 return PCI_ERS_RESULT_DISCONNECT;
15389 if (state == pci_channel_io_frozen)
15390 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
15392 if (netif_running(netdev))
15393 bnxt_close(netdev);
15395 if (pci_is_enabled(pdev))
15396 pci_disable_device(pdev);
15397 bnxt_free_ctx_mem(bp);
15400 /* Request a slot slot reset. */
15401 return PCI_ERS_RESULT_NEED_RESET;
15405 * bnxt_io_slot_reset - called after the pci bus has been reset.
15406 * @pdev: Pointer to PCI device
15408 * Restart the card from scratch, as if from a cold-boot.
15409 * At this point, the card has exprienced a hard reset,
15410 * followed by fixups by BIOS, and has its config space
15411 * set up identically to what it was at cold boot.
15413 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
15415 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
15416 struct net_device *netdev = pci_get_drvdata(pdev);
15417 struct bnxt *bp = netdev_priv(netdev);
15422 netdev_info(bp->dev, "PCI Slot Reset\n");
15426 if (pci_enable_device(pdev)) {
15427 dev_err(&pdev->dev,
15428 "Cannot re-enable PCI device after reset.\n");
15430 pci_set_master(pdev);
15431 /* Upon fatal error, our device internal logic that latches to
15432 * BAR value is getting reset and will restore only upon
15433 * rewritting the BARs.
15435 * As pci_restore_state() does not re-write the BARs if the
15436 * value is same as saved value earlier, driver needs to
15437 * write the BARs to 0 to force restore, in case of fatal error.
15439 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
15441 for (off = PCI_BASE_ADDRESS_0;
15442 off <= PCI_BASE_ADDRESS_5; off += 4)
15443 pci_write_config_dword(bp->pdev, off, 0);
15445 pci_restore_state(pdev);
15446 pci_save_state(pdev);
15448 bnxt_inv_fw_health_reg(bp);
15449 bnxt_try_map_fw_health_reg(bp);
15451 /* In some PCIe AER scenarios, firmware may take up to
15452 * 10 seconds to become ready in the worst case.
15455 err = bnxt_try_recover_fw(bp);
15459 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
15462 dev_err(&pdev->dev, "Firmware not ready\n");
15466 err = bnxt_hwrm_func_reset(bp);
15468 result = PCI_ERS_RESULT_RECOVERED;
15470 bnxt_ulp_irq_stop(bp);
15471 bnxt_clear_int_mode(bp);
15472 err = bnxt_init_int_mode(bp);
15473 bnxt_ulp_irq_restart(bp, err);
15477 bnxt_clear_reservations(bp, true);
15484 * bnxt_io_resume - called when traffic can start flowing again.
15485 * @pdev: Pointer to PCI device
15487 * This callback is called when the error recovery driver tells
15488 * us that its OK to resume normal operation.
15490 static void bnxt_io_resume(struct pci_dev *pdev)
15492 struct net_device *netdev = pci_get_drvdata(pdev);
15493 struct bnxt *bp = netdev_priv(netdev);
15496 netdev_info(bp->dev, "PCI Slot Resume\n");
15499 err = bnxt_hwrm_func_qcaps(bp);
15500 if (!err && netif_running(netdev))
15501 err = bnxt_open(netdev);
15503 bnxt_ulp_start(bp, err);
15505 bnxt_reenable_sriov(bp);
15506 netif_device_attach(netdev);
15512 static const struct pci_error_handlers bnxt_err_handler = {
15513 .error_detected = bnxt_io_error_detected,
15514 .slot_reset = bnxt_io_slot_reset,
15515 .resume = bnxt_io_resume
15518 static struct pci_driver bnxt_pci_driver = {
15519 .name = DRV_MODULE_NAME,
15520 .id_table = bnxt_pci_tbl,
15521 .probe = bnxt_init_one,
15522 .remove = bnxt_remove_one,
15523 .shutdown = bnxt_shutdown,
15524 .driver.pm = BNXT_PM_OPS,
15525 .err_handler = &bnxt_err_handler,
15526 #if defined(CONFIG_BNXT_SRIOV)
15527 .sriov_configure = bnxt_sriov_configure,
15531 static int __init bnxt_init(void)
15536 err = pci_register_driver(&bnxt_pci_driver);
15545 static void __exit bnxt_exit(void)
15547 pci_unregister_driver(&bnxt_pci_driver);
15549 destroy_workqueue(bnxt_pf_wq);
15553 module_init(bnxt_init);
15554 module_exit(bnxt_exit);