1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
7 #include <linux/module.h>
8 #include <linux/phy/phy.h>
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 #include "sparx5_port.h"
15 #define SPX5_ETYPE_TAG_C 0x8100
16 #define SPX5_ETYPE_TAG_S 0x88a8
18 #define SPX5_WAIT_US 1000
19 #define SPX5_WAIT_MAX_US 2000
26 #define PAUSE_DISCARD 0xC
27 #define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
29 static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
31 status->an_complete = true;
32 if (!(lp_abil & LPA_SGMII_LINK)) {
37 switch (lp_abil & LPA_SGMII_SPD_MASK) {
39 status->speed = SPEED_10;
42 status->speed = SPEED_100;
45 status->speed = SPEED_1000;
51 if (lp_abil & LPA_SGMII_FULL_DUPLEX)
52 status->duplex = DUPLEX_FULL;
54 status->duplex = DUPLEX_HALF;
57 static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
59 status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
60 status->an_complete = true;
61 status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
62 DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
64 if ((ld_abil & ADVERTISE_1000XPAUSE) &&
65 (lp_abil & ADVERTISE_1000XPAUSE)) {
66 status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
67 } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
68 (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
69 status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
71 status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
74 status->pause = MLO_PAUSE_NONE;
78 static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
79 struct sparx5_port *port,
80 struct sparx5_port_status *status)
82 u32 portno = port->portno;
86 /* Get PCS Link down sticky */
87 value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
88 status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
89 if (status->link_down) /* Clear the sticky */
90 spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
92 /* Get both current Link and Sync status */
93 value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
94 status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
95 DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
97 if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
98 status->speed = SPEED_1000;
99 else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
100 status->speed = SPEED_2500;
102 status->duplex = DUPLEX_FULL;
104 /* Get PCS ANEG status register */
105 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
107 /* Aneg complete provides more information */
108 if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
109 lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
110 if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
111 decode_sgmii_word(lp_adv, status);
113 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
114 ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
115 decode_cl37_word(lp_adv, ld_adv, status);
121 static int sparx5_get_sfi_status(struct sparx5 *sparx5,
122 struct sparx5_port *port,
123 struct sparx5_port_status *status)
125 bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
126 u32 portno = port->portno;
127 u32 value, dev, tinst;
130 if (!high_speed_dev) {
131 netdev_err(port->ndev, "error: low speed and SFI mode\n");
135 dev = sparx5_to_high_dev(portno);
136 tinst = sparx5_port_dev_index(portno);
137 inst = spx5_inst_get(sparx5, dev, tinst);
139 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
140 if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
141 /* The link is or has been down. Clear the sticky bit */
142 status->link_down = 1;
143 spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
146 status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
147 status->duplex = DUPLEX_FULL;
148 if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
149 status->speed = SPEED_5000;
150 else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
151 status->speed = SPEED_10000;
153 status->speed = SPEED_25000;
158 /* Get link status of 1000Base-X/in-band and SFI ports.
160 int sparx5_get_port_status(struct sparx5 *sparx5,
161 struct sparx5_port *port,
162 struct sparx5_port_status *status)
164 memset(status, 0, sizeof(*status));
165 status->speed = port->conf.speed;
166 if (port->conf.power_down) {
167 status->link = false;
170 switch (port->conf.portmode) {
171 case PHY_INTERFACE_MODE_SGMII:
172 case PHY_INTERFACE_MODE_QSGMII:
173 case PHY_INTERFACE_MODE_1000BASEX:
174 case PHY_INTERFACE_MODE_2500BASEX:
175 return sparx5_get_dev2g5_status(sparx5, port, status);
176 case PHY_INTERFACE_MODE_5GBASER:
177 case PHY_INTERFACE_MODE_10GBASER:
178 case PHY_INTERFACE_MODE_25GBASER:
179 return sparx5_get_sfi_status(sparx5, port, status);
180 case PHY_INTERFACE_MODE_NA:
183 netdev_err(port->ndev, "Status not supported");
189 static int sparx5_port_error(struct sparx5_port *port,
190 struct sparx5_port_config *conf,
191 enum port_error errtype)
194 case SPX5_PERR_SPEED:
195 netdev_err(port->ndev,
196 "Interface does not support speed: %u: for %s\n",
197 conf->speed, phy_modes(conf->portmode));
199 case SPX5_PERR_IFTYPE:
200 netdev_err(port->ndev,
201 "Switch port does not support interface type: %s\n",
202 phy_modes(conf->portmode));
205 netdev_err(port->ndev,
206 "Interface configuration error\n");
212 static int sparx5_port_verify_speed(struct sparx5 *sparx5,
213 struct sparx5_port *port,
214 struct sparx5_port_config *conf)
216 if ((sparx5_port_is_2g5(port->portno) &&
217 conf->speed > SPEED_2500) ||
218 (sparx5_port_is_5g(port->portno) &&
219 conf->speed > SPEED_5000) ||
220 (sparx5_port_is_10g(port->portno) &&
221 conf->speed > SPEED_10000))
222 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
224 switch (conf->portmode) {
225 case PHY_INTERFACE_MODE_NA:
227 case PHY_INTERFACE_MODE_1000BASEX:
228 if (conf->speed != SPEED_1000 ||
229 sparx5_port_is_2g5(port->portno))
230 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
231 if (sparx5_port_is_2g5(port->portno))
232 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
234 case PHY_INTERFACE_MODE_2500BASEX:
235 if (conf->speed != SPEED_2500 ||
236 sparx5_port_is_2g5(port->portno))
237 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
239 case PHY_INTERFACE_MODE_QSGMII:
240 if (port->portno > 47)
241 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
243 case PHY_INTERFACE_MODE_SGMII:
244 if (conf->speed != SPEED_1000 &&
245 conf->speed != SPEED_100 &&
246 conf->speed != SPEED_10 &&
247 conf->speed != SPEED_2500)
248 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
250 case PHY_INTERFACE_MODE_5GBASER:
251 case PHY_INTERFACE_MODE_10GBASER:
252 case PHY_INTERFACE_MODE_25GBASER:
253 if ((conf->speed != SPEED_5000 &&
254 conf->speed != SPEED_10000 &&
255 conf->speed != SPEED_25000))
256 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
259 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
264 static bool sparx5_dev_change(struct sparx5 *sparx5,
265 struct sparx5_port *port,
266 struct sparx5_port_config *conf)
268 return sparx5_is_baser(port->conf.portmode) ^
269 sparx5_is_baser(conf->portmode);
272 static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
274 u32 value, resource, prio, delay_cnt = 0;
275 bool poll_src = true;
278 /* Resource == 0: Memory tracked per source (SRC-MEM)
279 * Resource == 1: Frame references tracked per source (SRC-REF)
280 * Resource == 2: Memory tracked per destination (DST-MEM)
281 * Resource == 3: Frame references tracked per destination. (DST-REF)
286 for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
289 base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
290 for (prio = 0; prio < SPX5_PRIOS; prio++) {
291 value = spx5_rd(sparx5,
292 QRES_RES_STAT(base + prio));
294 mem = resource == 0 ?
295 "DST-MEM" : "SRC-MEM";
304 if (delay_cnt++ == 2000) {
306 "Flush timeout port %u. %s queue not empty\n",
311 usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
316 static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
318 u32 tinst = high_spd_dev ?
319 sparx5_port_dev_index(port->portno) : port->portno;
320 u32 dev = high_spd_dev ?
321 sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
322 void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
323 u32 spd = port->conf.speed;
328 /* 1: Reset the PCS Rx clock domain */
329 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
330 DEV10G_DEV_RST_CTRL_PCS_RX_RST,
332 DEV10G_DEV_RST_CTRL(0));
334 /* 2: Disable MAC frame reception */
336 DEV10G_MAC_ENA_CFG_RX_ENA,
338 DEV10G_MAC_ENA_CFG(0));
340 /* 1: Reset the PCS Rx clock domain */
341 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
342 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
344 DEV2G5_DEV_RST_CTRL(0));
345 /* 2: Disable MAC frame reception */
347 DEV2G5_MAC_ENA_CFG_RX_ENA,
349 DEV2G5_MAC_ENA_CFG(0));
351 /* 3: Disable traffic being sent to or from switch port->portno */
353 QFWD_SWITCH_PORT_MODE_PORT_ENA,
355 QFWD_SWITCH_PORT_MODE(port->portno));
357 /* 4: Disable dequeuing from the egress queues */
358 spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
359 HSCH_PORT_MODE_DEQUEUE_DIS,
361 HSCH_PORT_MODE(port->portno));
363 /* 5: Disable Flowcontrol */
364 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
365 QSYS_PAUSE_CFG_PAUSE_STOP,
367 QSYS_PAUSE_CFG(port->portno));
369 spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
370 /* 6: Wait while the last frame is exiting the queues */
371 usleep_range(8 * spd_prm, 10 * spd_prm);
373 /* 7: Flush the queues accociated with the port->portno */
374 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
375 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
376 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
377 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
378 HSCH_FLUSH_CTRL_FLUSH_PORT |
379 HSCH_FLUSH_CTRL_FLUSH_DST |
380 HSCH_FLUSH_CTRL_FLUSH_SRC |
381 HSCH_FLUSH_CTRL_FLUSH_ENA,
385 /* 8: Enable dequeuing from the egress queues */
387 HSCH_PORT_MODE_DEQUEUE_DIS,
389 HSCH_PORT_MODE(port->portno));
391 /* 9: Wait until flushing is complete */
392 err = sparx5_port_flush_poll(sparx5, port->portno);
396 /* 10: Reset the MAC clock domain */
398 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
399 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
400 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
401 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
402 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
403 DEV10G_DEV_RST_CTRL_MAC_TX_RST,
405 DEV10G_DEV_RST_CTRL(0));
408 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
409 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
410 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
411 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
412 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
413 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
414 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
415 DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
416 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
417 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
419 DEV2G5_DEV_RST_CTRL(0));
421 /* 11: Clear flushing */
422 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
423 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
424 HSCH_FLUSH_CTRL_FLUSH_PORT |
425 HSCH_FLUSH_CTRL_FLUSH_ENA,
430 u32 pcs = sparx5_to_pcs_dev(port->portno);
431 void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
433 /* 12: Disable 5G/10G/25 BaseR PCS */
434 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
435 PCS10G_BR_PCS_CFG_PCS_ENA,
437 PCS10G_BR_PCS_CFG(0));
439 if (sparx5_port_is_25g(port->portno))
440 /* Disable 25G PCS */
441 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
442 DEV25G_PCS25G_CFG_PCS25G_ENA,
444 DEV25G_PCS25G_CFG(tinst));
446 /* 12: Disable 1G PCS */
447 spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
448 DEV2G5_PCS1G_CFG_PCS_ENA,
450 DEV2G5_PCS1G_CFG(port->portno));
453 /* The port is now flushed and disabled */
457 static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
458 u32 portno, u32 speed)
460 u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
461 const u32 taxi_dist[SPX5_PORTS_ALL] = {
462 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
464 11, 12, 13, 14, 15, 16, 17, 18,
465 11, 12, 13, 14, 15, 16, 17, 18,
466 11, 12, 13, 14, 15, 16, 17, 18,
467 11, 12, 13, 14, 15, 16, 17, 18,
468 4, 6, 8, 4, 6, 8, 6, 8,
469 2, 2, 2, 2, 2, 2, 2, 4, 2
471 u32 mac_per = 6400, tmp1, tmp2, tmp3;
506 tmp1 = 1000 * mac_width / fifo_width;
507 tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
508 * sys_clk / mac_per);
509 tmp3 = tmp1 * tmp2 / 1000;
510 return (tmp3 + 2000 + 999) / 1000 + addition;
513 /* Configure port muxing:
514 * QSGMII: 4x2G5 devices
516 static int sparx5_port_mux_set(struct sparx5 *sparx5,
517 struct sparx5_port *port,
518 struct sparx5_port_config *conf)
520 u32 portno = port->portno;
523 if (port->conf.portmode == conf->portmode)
524 return 0; /* Nothing to do */
526 switch (conf->portmode) {
527 case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
528 inst = (portno - portno % 4) / 4;
532 PORT_CONF_QSGMII_ENA);
534 if ((portno / 4 % 2) == 0) {
535 /* Affects d0-d3,d8-d11..d40-d43 */
536 spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
537 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
538 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
539 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
540 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
541 PORT_CONF_USGMII_CFG_QUAD_MODE,
543 PORT_CONF_USGMII_CFG((portno / 8)));
552 static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
553 struct sparx5_port *port)
555 enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
556 int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
557 max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
558 bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
559 enum sparx5_vlan_port_type vlan_type = port->vlan_type;
560 bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
561 u32 dev = sparx5_to_high_dev(port->portno);
562 u32 tinst = sparx5_port_dev_index(port->portno);
563 void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
566 etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
568 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
569 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
571 spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
572 DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
573 DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
574 DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
576 DEV2G5_MAC_TAGS_CFG(port->portno));
578 if (sparx5_port_is_2g5(port->portno))
581 spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
582 DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
583 DEV10G_MAC_TAGS_CFG_TAG_ID |
584 DEV10G_MAC_TAGS_CFG_TAG_ENA,
586 DEV10G_MAC_TAGS_CFG(0, 0));
588 spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
589 DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
591 DEV10G_MAC_NUM_TAGS_CFG(0));
593 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
594 DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
596 DEV10G_MAC_MAXLEN_CFG(0));
600 int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
602 u32 clk_period_ps = 1600; /* 625Mhz for now */
624 return urg / clk_period_ps - 1;
627 static u16 sparx5_wm_enc(u16 value)
630 return 2048 + value / 16;
635 static int sparx5_port_fc_setup(struct sparx5 *sparx5,
636 struct sparx5_port *port,
637 struct sparx5_port_config *conf)
639 bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
640 u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
642 if (conf->pause & MLO_PAUSE_TX)
643 pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
644 SPX5_BUFFER_CELL_SZ));
646 /* Set HDX flowcontrol */
647 spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
648 DSM_MAC_CFG_HDX_BACKPREASSURE,
650 DSM_MAC_CFG(port->portno));
652 /* Obey flowcontrol */
653 spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
654 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
656 DSM_RX_PAUSE_CFG(port->portno));
658 /* Disable forward pressure */
659 spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
660 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
662 QSYS_FWD_PRESSURE(port->portno));
664 /* Generate pause frames */
665 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
666 QSYS_PAUSE_CFG_PAUSE_STOP,
668 QSYS_PAUSE_CFG(port->portno));
673 static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
675 if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
676 return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
678 return 1; /* Enable SGMII Aneg */
681 int sparx5_serdes_set(struct sparx5 *sparx5,
682 struct sparx5_port *port,
683 struct sparx5_port_config *conf)
685 int portmode, err, speed = conf->speed;
687 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
688 ((port->portno % 4) != 0)) {
691 if (sparx5_is_baser(conf->portmode)) {
692 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
694 else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
700 err = phy_set_media(port->serdes, conf->media);
704 err = phy_set_speed(port->serdes, speed);
708 if (conf->serdes_reset) {
709 err = phy_reset(port->serdes);
714 /* Configure SerDes with port parameters
715 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
717 portmode = conf->portmode;
718 if (sparx5_is_baser(conf->portmode))
719 portmode = PHY_INTERFACE_MODE_10GBASER;
720 err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
723 conf->serdes_reset = false;
727 static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
728 struct sparx5_port *port,
729 struct sparx5_port_config *conf)
731 bool sgmii = false, inband_aneg = false;
735 if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
736 conf->portmode == PHY_INTERFACE_MODE_QSGMII)
737 inband_aneg = true; /* Cisco-SGMII in-band-aneg */
738 else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
740 inband_aneg = true; /* Clause-37 in-band-aneg */
742 err = sparx5_serdes_set(sparx5, port, conf);
746 sgmii = true; /* Phy is connected to the MAC */
749 /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
750 spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
751 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
753 DEV2G5_PCS1G_MODE_CFG(port->portno));
756 spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
758 DEV2G5_PCS1G_CFG(port->portno));
761 u16 abil = sparx5_get_aneg_word(conf);
763 /* Enable in-band aneg */
764 spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
765 DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
766 DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
767 DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
769 DEV2G5_PCS1G_ANEG_CFG(port->portno));
771 spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
774 /* Take PCS out of reset */
775 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
776 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
777 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
778 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
779 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
780 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
782 DEV2G5_DEV_RST_CTRL(port->portno));
787 static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
788 struct sparx5_port *port,
789 struct sparx5_port_config *conf)
791 u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
792 u32 pix = sparx5_port_dev_index(port->portno);
793 u32 dev = sparx5_to_high_dev(port->portno);
794 u32 pcs = sparx5_to_pcs_dev(port->portno);
795 void __iomem *devinst;
796 void __iomem *pcsinst;
799 devinst = spx5_inst_get(sparx5, dev, pix);
800 pcsinst = spx5_inst_get(sparx5, pcs, pix);
802 /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
803 err = sparx5_serdes_set(sparx5, port, conf);
806 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
807 /* Enable PCS for 25G device, speed 25G */
808 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
809 DEV25G_PCS25G_CFG_PCS25G_ENA,
811 DEV25G_PCS25G_CFG(pix));
813 /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
814 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
815 PCS10G_BR_PCS_CFG_PCS_ENA,
817 PCS10G_BR_PCS_CFG(0));
820 /* Enable 5G/10G/25G MAC module */
821 spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
822 DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
824 DEV10G_MAC_ENA_CFG(0));
826 /* Take the device out of reset */
827 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
828 DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
829 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
830 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
831 DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
832 DEV10G_DEV_RST_CTRL_PCS_RX_RST |
833 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
834 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
835 DEV10G_DEV_RST_CTRL_MAC_TX_RST |
836 DEV10G_DEV_RST_CTRL_SPEED_SEL,
838 DEV10G_DEV_RST_CTRL(0));
843 /* Switch between 1G/2500 and 5G/10G/25G devices */
844 static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
846 int bt_indx = BIT(sparx5_port_dev_index(port));
848 if (sparx5_port_is_5g(port)) {
849 spx5_rmw(hsd ? 0 : bt_indx,
852 PORT_CONF_DEV5G_MODES);
853 } else if (sparx5_port_is_10g(port)) {
854 spx5_rmw(hsd ? 0 : bt_indx,
857 PORT_CONF_DEV10G_MODES);
858 } else if (sparx5_port_is_25g(port)) {
859 spx5_rmw(hsd ? 0 : bt_indx,
862 PORT_CONF_DEV25G_MODES);
866 /* Configure speed/duplex dependent registers */
867 static int sparx5_port_config_low_set(struct sparx5 *sparx5,
868 struct sparx5_port *port,
869 struct sparx5_port_config *conf)
871 u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
872 bool fdx = conf->duplex == DUPLEX_FULL;
873 int spd = conf->speed;
875 clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
876 gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
877 tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
878 hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
879 hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
882 spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
883 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
884 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
885 DEV2G5_MAC_MODE_CFG_FDX_ENA,
887 DEV2G5_MAC_MODE_CFG(port->portno));
889 /* Set MAC IFG Gaps */
890 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
891 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
892 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
894 DEV2G5_MAC_IFG_CFG(port->portno));
896 /* Disabling frame aging when in HDX (due to HDX issue) */
897 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
898 HSCH_PORT_MODE_AGE_DIS,
900 HSCH_PORT_MODE(port->portno));
902 /* Enable MAC module */
903 spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
904 DEV2G5_MAC_ENA_CFG_TX_ENA,
906 DEV2G5_MAC_ENA_CFG(port->portno));
908 /* Select speed and take MAC out of reset */
909 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
910 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
911 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
912 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
913 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
914 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
916 DEV2G5_DEV_RST_CTRL(port->portno));
921 int sparx5_port_pcs_set(struct sparx5 *sparx5,
922 struct sparx5_port *port,
923 struct sparx5_port_config *conf)
926 bool high_speed_dev = sparx5_is_baser(conf->portmode);
929 if (sparx5_dev_change(sparx5, port, conf)) {
931 sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
933 /* Disable the not-in-use device */
934 err = sparx5_port_disable(sparx5, port, !high_speed_dev);
938 /* Disable the port before re-configuring */
939 err = sparx5_port_disable(sparx5, port, high_speed_dev);
944 err = sparx5_port_pcs_high_set(sparx5, port, conf);
946 err = sparx5_port_pcs_low_set(sparx5, port, conf);
952 /* Enable/disable 1G counters in ASM */
953 spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
954 ASM_PORT_CFG_CSC_STAT_DIS,
956 ASM_PORT_CFG(port->portno));
958 /* Enable/disable 1G counters in DSM */
959 spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
960 DSM_BUF_CFG_CSC_STAT_DIS,
962 DSM_BUF_CFG(port->portno));
970 int sparx5_port_config(struct sparx5 *sparx5,
971 struct sparx5_port *port,
972 struct sparx5_port_config *conf)
974 bool high_speed_dev = sparx5_is_baser(conf->portmode);
975 int err, urgency, stop_wm;
977 err = sparx5_port_verify_speed(sparx5, port, conf);
981 /* high speed device is already configured */
983 sparx5_port_config_low_set(sparx5, port, conf);
985 /* Configure flow control */
986 err = sparx5_port_fc_setup(sparx5, port, conf);
990 /* Set the DSM stop watermark */
991 stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
992 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
993 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
995 DSM_DEV_TX_STOP_WM_CFG(port->portno));
997 /* Enable port in queue system */
998 urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
999 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1000 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1001 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1002 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1004 QFWD_SWITCH_PORT_MODE(port->portno));
1006 /* Save the new values */
1012 /* Initialize port config to default */
1013 int sparx5_port_init(struct sparx5 *sparx5,
1014 struct sparx5_port *port,
1015 struct sparx5_port_config *conf)
1017 u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1018 u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1019 u32 devhigh = sparx5_to_high_dev(port->portno);
1020 u32 pix = sparx5_port_dev_index(port->portno);
1021 u32 pcs = sparx5_to_pcs_dev(port->portno);
1022 bool sd_pol = port->signd_active_high;
1023 bool sd_sel = !port->signd_internal;
1024 bool sd_ena = port->signd_enable;
1025 u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1026 void __iomem *devinst;
1027 void __iomem *pcsinst;
1030 devinst = spx5_inst_get(sparx5, devhigh, pix);
1031 pcsinst = spx5_inst_get(sparx5, pcs, pix);
1033 /* Set the mux port mode */
1034 err = sparx5_port_mux_set(sparx5, port, conf);
1038 /* Configure MAC vlan awareness */
1039 err = sparx5_port_max_tags_set(sparx5, port);
1043 /* Set Max Length */
1044 spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1045 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1047 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1049 /* 1G/2G5: Signal Detect configuration */
1050 spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1051 DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1052 DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1054 DEV2G5_PCS1G_SD_CFG(port->portno));
1056 /* Set Pause WM hysteresis */
1057 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1058 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1059 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1060 QSYS_PAUSE_CFG_PAUSE_START |
1061 QSYS_PAUSE_CFG_PAUSE_STOP |
1062 QSYS_PAUSE_CFG_PAUSE_ENA,
1064 QSYS_PAUSE_CFG(port->portno));
1066 /* Port ATOP. Frames are tail dropped when this WM is hit */
1067 spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1069 QSYS_ATOP(port->portno));
1071 /* Discard pause frame 01-80-C2-00-00-01 */
1072 spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1074 /* Discard SMAC multicast */
1075 spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1076 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1077 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1079 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1080 conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1081 err = sparx5_serdes_set(sparx5, port, conf);
1085 if (!sparx5_port_is_2g5(port->portno))
1086 /* Enable shadow device */
1087 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1088 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1090 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1092 sparx5_dev_switch(sparx5, port->portno, false);
1094 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1095 // All ports must be PCS enabled in QSGMII mode
1096 spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1097 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1099 DEV2G5_DEV_RST_CTRL(port->portno));
1101 /* Default IFGs for 1G */
1102 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1103 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1104 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1106 DEV2G5_MAC_IFG_CFG(port->portno));
1108 if (sparx5_port_is_2g5(port->portno))
1109 return 0; /* Low speed device only - return */
1111 /* Now setup the high speed device */
1112 if (conf->portmode == PHY_INTERFACE_MODE_NA)
1113 conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1115 if (sparx5_is_baser(conf->portmode))
1116 sparx5_dev_switch(sparx5, port->portno, true);
1118 /* Set Max Length */
1119 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1120 DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1122 DEV10G_MAC_ENA_CFG(0));
1124 /* Handle Signal Detect in 10G PCS */
1125 spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1126 PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1127 PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1129 PCS10G_BR_PCS_SD_CFG(0));
1131 if (sparx5_port_is_25g(port->portno)) {
1132 /* Handle Signal Detect in 25G PCS */
1133 spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1134 DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1135 DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1137 DEV25G_PCS25G_SD_CFG(pix));
1143 void sparx5_port_enable(struct sparx5_port *port, bool enable)
1145 struct sparx5 *sparx5 = port->sparx5;
1147 /* Enable port for frame transfer? */
1148 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1149 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1151 QFWD_SWITCH_PORT_MODE(port->portno));
1154 int sparx5_port_qos_set(struct sparx5_port *port,
1155 struct sparx5_port_qos *qos)
1157 sparx5_port_qos_dscp_set(port, &qos->dscp);
1158 sparx5_port_qos_pcp_set(port, &qos->pcp);
1159 sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1160 sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1161 sparx5_port_qos_default_set(port, qos);
1166 int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1167 struct sparx5_port_qos_pcp_rewr *qos)
1169 int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1170 struct sparx5 *sparx5 = port->sparx5;
1173 /* Use mapping table, with classified QoS as index, to map QoS and DP
1174 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1175 * PCP. Classified PCP equals frame PCP.
1178 mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1180 spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1181 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1182 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1183 port->sparx5, REW_TAG_CTRL(port->portno));
1185 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1186 /* Extract PCP and DEI */
1187 pcp = qos->map.map[i];
1188 if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1193 /* Rewrite PCP and DEI, for each classified QoS class and DP
1194 * level. This table is only used if tag ctrl mode is set to
1197 * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
1198 * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
1201 spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1202 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1203 REW_PCP_MAP_DE1(port->portno, i));
1205 spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1206 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1207 REW_DEI_MAP_DE1(port->portno, i));
1209 spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1210 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1211 REW_PCP_MAP_DE0(port->portno, i));
1213 spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1214 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1215 REW_DEI_MAP_DE0(port->portno, i));
1222 int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1223 struct sparx5_port_qos_pcp *qos)
1225 struct sparx5 *sparx5 = port->sparx5;
1226 u8 *pcp_itr = qos->map.map;
1230 /* Enable/disable pcp and dp for qos classification. */
1231 spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1232 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1233 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1234 sparx5, ANA_CL_QOS_CFG(port->portno));
1236 /* Map each pcp and dei value to priority and dp */
1237 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1238 pcp = *(pcp_itr + i);
1239 dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1240 spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1241 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1242 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1243 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1244 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1250 void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1253 spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1254 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1255 ANA_CL_QOS_CFG(port->portno));
1258 int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1259 struct sparx5_port_qos_dscp_rewr *qos)
1261 struct sparx5 *sparx5 = port->sparx5;
1266 /* On egress, rewrite DSCP value to either classified DSCP or frame
1267 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1272 spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1273 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1274 REW_DSCP_MAP(port->portno));
1276 /* On ingress, map each classified QoS class and DP to classified DSCP
1277 * value. This mapping table is global for all ports.
1279 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1280 dscp = qos->map.map[i];
1281 spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1282 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1283 ANA_CL_QOS_MAP_CFG(i));
1289 int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1290 struct sparx5_port_qos_dscp *qos)
1292 struct sparx5 *sparx5 = port->sparx5;
1293 u8 *dscp = qos->map.map;
1296 /* Enable/disable dscp and dp for qos classification.
1297 * Disable rewrite of dscp values for now.
1299 spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1300 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1301 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1302 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1303 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1304 ANA_CL_QOS_CFG(port->portno));
1306 /* Map each dscp value to priority and dp */
1307 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1308 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1309 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1310 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1311 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1312 ANA_CL_DSCP_CFG(i));
1315 /* Set per-dscp trust */
1316 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1317 if (qos->qos_enable) {
1318 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1319 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1320 ANA_CL_DSCP_CFG(i));
1327 int sparx5_port_qos_default_set(const struct sparx5_port *port,
1328 const struct sparx5_port_qos *qos)
1330 struct sparx5 *sparx5 = port->sparx5;
1332 /* Set default prio and dp level */
1333 spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1334 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1335 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1336 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1337 sparx5, ANA_CL_QOS_CFG(port->portno));
1339 /* Set default pcp and dei for untagged frames */
1340 spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1341 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1342 ANA_CL_VLAN_CTRL_PORT_PCP |
1343 ANA_CL_VLAN_CTRL_PORT_DEI,
1344 sparx5, ANA_CL_VLAN_CTRL(port->portno));