1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2022 CS GROUP France
7 * Author: Herve Codina <herve.codina@bootlin.com>
10 #include <soc/fsl/qe/qmc.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/hdlc.h>
13 #include <linux/interrupt.h>
15 #include <linux/module.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <soc/fsl/cpm.h>
21 #include <sysdev/fsl_soc.h>
24 /* SCC general mode register high (32 bits) */
25 #define SCC_GSMRL 0x00
26 #define SCC_GSMRL_ENR (1 << 5)
27 #define SCC_GSMRL_ENT (1 << 4)
28 #define SCC_GSMRL_MODE_QMC (0x0A << 0)
30 /* SCC general mode register low (32 bits) */
31 #define SCC_GSMRH 0x04
32 #define SCC_GSMRH_CTSS (1 << 7)
33 #define SCC_GSMRH_CDS (1 << 8)
34 #define SCC_GSMRH_CTSP (1 << 9)
35 #define SCC_GSMRH_CDP (1 << 10)
37 /* SCC event register (16 bits) */
39 #define SCC_SCCE_IQOV (1 << 3)
40 #define SCC_SCCE_GINT (1 << 2)
41 #define SCC_SCCE_GUN (1 << 1)
42 #define SCC_SCCE_GOV (1 << 0)
44 /* SCC mask register (16 bits) */
46 /* Multichannel base pointer (32 bits) */
47 #define QMC_GBL_MCBASE 0x00
48 /* Multichannel controller state (16 bits) */
49 #define QMC_GBL_QMCSTATE 0x04
50 /* Maximum receive buffer length (16 bits) */
51 #define QMC_GBL_MRBLR 0x06
52 /* Tx time-slot assignment table pointer (16 bits) */
53 #define QMC_GBL_TX_S_PTR 0x08
54 /* Rx pointer (16 bits) */
55 #define QMC_GBL_RXPTR 0x0A
56 /* Global receive frame threshold (16 bits) */
57 #define QMC_GBL_GRFTHR 0x0C
58 /* Global receive frame count (16 bits) */
59 #define QMC_GBL_GRFCNT 0x0E
60 /* Multichannel interrupt base address (32 bits) */
61 #define QMC_GBL_INTBASE 0x10
62 /* Multichannel interrupt pointer (32 bits) */
63 #define QMC_GBL_INTPTR 0x14
64 /* Rx time-slot assignment table pointer (16 bits) */
65 #define QMC_GBL_RX_S_PTR 0x18
66 /* Tx pointer (16 bits) */
67 #define QMC_GBL_TXPTR 0x1A
68 /* CRC constant (32 bits) */
69 #define QMC_GBL_C_MASK32 0x1C
70 /* Time slot assignment table Rx (32 x 16 bits) */
71 #define QMC_GBL_TSATRX 0x20
72 /* Time slot assignment table Tx (32 x 16 bits) */
73 #define QMC_GBL_TSATTX 0x60
74 /* CRC constant (16 bits) */
75 #define QMC_GBL_C_MASK16 0xA0
77 /* TSA entry (16bit entry in TSATRX and TSATTX) */
78 #define QMC_TSA_VALID (1 << 15)
79 #define QMC_TSA_WRAP (1 << 14)
80 #define QMC_TSA_MASK (0x303F)
81 #define QMC_TSA_CHANNEL(x) ((x) << 6)
83 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
84 #define QMC_SPE_TBASE 0x00
86 /* Channel mode register (16 bits) */
87 #define QMC_SPE_CHAMR 0x02
88 #define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
89 #define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
90 #define QMC_SPE_CHAMR_ENT (1 << 12)
91 #define QMC_SPE_CHAMR_POL (1 << 8)
92 #define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
93 #define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
94 #define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
95 #define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
96 #define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
98 /* Tx internal state (32 bits) */
99 #define QMC_SPE_TSTATE 0x04
100 /* Tx buffer descriptor pointer (16 bits) */
101 #define QMC_SPE_TBPTR 0x0C
102 /* Zero-insertion state (32 bits) */
103 #define QMC_SPE_ZISTATE 0x14
104 /* Channel’s interrupt mask flags (16 bits) */
105 #define QMC_SPE_INTMSK 0x1C
106 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
107 #define QMC_SPE_RBASE 0x20
108 /* HDLC: Maximum frame length register (16 bits) */
109 #define QMC_SPE_MFLR 0x22
110 /* TRANSPARENT: Transparent maximum receive length (16 bits) */
111 #define QMC_SPE_TMRBLR 0x22
112 /* Rx internal state (32 bits) */
113 #define QMC_SPE_RSTATE 0x24
114 /* Rx buffer descriptor pointer (16 bits) */
115 #define QMC_SPE_RBPTR 0x2C
116 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
117 #define QMC_SPE_RPACK 0x30
118 /* Zero deletion state (32 bits) */
119 #define QMC_SPE_ZDSTATE 0x34
121 /* Transparent synchronization (16 bits) */
122 #define QMC_SPE_TRNSYNC 0x3C
123 #define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
124 #define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
126 /* Interrupt related registers bits */
127 #define QMC_INT_V (1 << 15)
128 #define QMC_INT_W (1 << 14)
129 #define QMC_INT_NID (1 << 13)
130 #define QMC_INT_IDL (1 << 12)
131 #define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
132 #define QMC_INT_MRF (1 << 5)
133 #define QMC_INT_UN (1 << 4)
134 #define QMC_INT_RXF (1 << 3)
135 #define QMC_INT_BSY (1 << 2)
136 #define QMC_INT_TXB (1 << 1)
137 #define QMC_INT_RXB (1 << 0)
139 /* BD related registers bits */
140 #define QMC_BD_RX_E (1 << 15)
141 #define QMC_BD_RX_W (1 << 13)
142 #define QMC_BD_RX_I (1 << 12)
143 #define QMC_BD_RX_L (1 << 11)
144 #define QMC_BD_RX_F (1 << 10)
145 #define QMC_BD_RX_CM (1 << 9)
146 #define QMC_BD_RX_UB (1 << 7)
147 #define QMC_BD_RX_LG (1 << 5)
148 #define QMC_BD_RX_NO (1 << 4)
149 #define QMC_BD_RX_AB (1 << 3)
150 #define QMC_BD_RX_CR (1 << 2)
152 #define QMC_BD_TX_R (1 << 15)
153 #define QMC_BD_TX_W (1 << 13)
154 #define QMC_BD_TX_I (1 << 12)
155 #define QMC_BD_TX_L (1 << 11)
156 #define QMC_BD_TX_TC (1 << 10)
157 #define QMC_BD_TX_CM (1 << 9)
158 #define QMC_BD_TX_UB (1 << 7)
159 #define QMC_BD_TX_PAD (0x0f << 0)
161 /* Numbers of BDs and interrupt items */
162 #define QMC_NB_TXBDS 8
163 #define QMC_NB_RXBDS 8
164 #define QMC_NB_INTS 128
166 struct qmc_xfer_desc {
168 void (*tx_complete)(void *context);
169 void (*rx_complete)(void *context, size_t length, unsigned int flags);
175 struct list_head list;
178 void __iomem *s_param;
180 u64 tx_ts_mask_avail;
182 u64 rx_ts_mask_avail;
184 bool is_reverse_data;
187 cbd_t __iomem *txbds;
188 cbd_t __iomem *txbd_free;
189 cbd_t __iomem *txbd_done;
190 struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
195 cbd_t __iomem *rxbds;
196 cbd_t __iomem *rxbd_free;
197 cbd_t __iomem *rxbd_done;
198 struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
207 struct tsa_serial *tsa_serial;
208 void __iomem *scc_regs;
209 void __iomem *scc_pram;
212 cbd_t __iomem *bd_table;
213 dma_addr_t bd_dma_addr;
215 u16 __iomem *int_table;
216 u16 __iomem *int_curr;
217 dma_addr_t int_dma_addr;
219 struct list_head chan_head;
220 struct qmc_chan *chans[64];
223 static void qmc_write16(void __iomem *addr, u16 val)
225 iowrite16be(val, addr);
228 static u16 qmc_read16(void __iomem *addr)
230 return ioread16be(addr);
233 static void qmc_setbits16(void __iomem *addr, u16 set)
235 qmc_write16(addr, qmc_read16(addr) | set);
238 static void qmc_clrbits16(void __iomem *addr, u16 clr)
240 qmc_write16(addr, qmc_read16(addr) & ~clr);
243 static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
245 qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
248 static void qmc_write32(void __iomem *addr, u32 val)
250 iowrite32be(val, addr);
253 static u32 qmc_read32(void __iomem *addr)
255 return ioread32be(addr);
258 static void qmc_setbits32(void __iomem *addr, u32 set)
260 qmc_write32(addr, qmc_read32(addr) | set);
264 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
266 struct tsa_serial_info tsa_info;
269 /* Retrieve info from the TSA related serial */
270 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
274 info->mode = chan->mode;
275 info->rx_fs_rate = tsa_info.rx_fs_rate;
276 info->rx_bit_rate = tsa_info.rx_bit_rate;
277 info->nb_tx_ts = hweight64(chan->tx_ts_mask);
278 info->tx_fs_rate = tsa_info.tx_fs_rate;
279 info->tx_bit_rate = tsa_info.tx_bit_rate;
280 info->nb_rx_ts = hweight64(chan->rx_ts_mask);
284 EXPORT_SYMBOL(qmc_chan_get_info);
286 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
288 if (param->mode != chan->mode)
291 switch (param->mode) {
293 if ((param->hdlc.max_rx_buf_size % 4) ||
294 (param->hdlc.max_rx_buf_size < 8))
297 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
298 param->hdlc.max_rx_buf_size - 8);
299 qmc_write16(chan->s_param + QMC_SPE_MFLR,
300 param->hdlc.max_rx_frame_size);
301 if (param->hdlc.is_crc32) {
302 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
303 QMC_SPE_CHAMR_HDLC_CRC);
305 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
306 QMC_SPE_CHAMR_HDLC_CRC);
310 case QMC_TRANSPARENT:
311 qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
312 param->transp.max_rx_buf_size);
321 EXPORT_SYMBOL(qmc_chan_set_param);
323 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
324 void (*complete)(void *context), void *context)
326 struct qmc_xfer_desc *xfer_desc;
334 * 0 0 : The BD is free
335 * 1 1 : The BD is in used, waiting for transfer
336 * 0 1 : The BD is in used, waiting for completion
337 * 1 0 : Should not append
340 spin_lock_irqsave(&chan->tx_lock, flags);
341 bd = chan->txbd_free;
343 ctrl = qmc_read16(&bd->cbd_sc);
344 if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
345 /* We are full ... */
350 qmc_write16(&bd->cbd_datlen, length);
351 qmc_write32(&bd->cbd_bufaddr, addr);
353 xfer_desc = &chan->tx_desc[bd - chan->txbds];
354 xfer_desc->tx_complete = complete;
355 xfer_desc->context = context;
357 /* Activate the descriptor */
358 ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
359 wmb(); /* Be sure to flush the descriptor before control update */
360 qmc_write16(&bd->cbd_sc, ctrl);
362 if (!chan->is_tx_stopped)
363 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
365 if (ctrl & QMC_BD_TX_W)
366 chan->txbd_free = chan->txbds;
373 spin_unlock_irqrestore(&chan->tx_lock, flags);
376 EXPORT_SYMBOL(qmc_chan_write_submit);
378 static void qmc_chan_write_done(struct qmc_chan *chan)
380 struct qmc_xfer_desc *xfer_desc;
381 void (*complete)(void *context);
389 * 0 0 : The BD is free
390 * 1 1 : The BD is in used, waiting for transfer
391 * 0 1 : The BD is in used, waiting for completion
392 * 1 0 : Should not append
395 spin_lock_irqsave(&chan->tx_lock, flags);
396 bd = chan->txbd_done;
398 ctrl = qmc_read16(&bd->cbd_sc);
399 while (!(ctrl & QMC_BD_TX_R)) {
400 if (!(ctrl & QMC_BD_TX_UB))
403 xfer_desc = &chan->tx_desc[bd - chan->txbds];
404 complete = xfer_desc->tx_complete;
405 context = xfer_desc->context;
406 xfer_desc->tx_complete = NULL;
407 xfer_desc->context = NULL;
409 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
411 if (ctrl & QMC_BD_TX_W)
412 chan->txbd_done = chan->txbds;
417 spin_unlock_irqrestore(&chan->tx_lock, flags);
419 spin_lock_irqsave(&chan->tx_lock, flags);
422 bd = chan->txbd_done;
423 ctrl = qmc_read16(&bd->cbd_sc);
427 spin_unlock_irqrestore(&chan->tx_lock, flags);
430 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
431 void (*complete)(void *context, size_t length, unsigned int flags),
434 struct qmc_xfer_desc *xfer_desc;
442 * 0 0 : The BD is free
443 * 1 1 : The BD is in used, waiting for transfer
444 * 0 1 : The BD is in used, waiting for completion
445 * 1 0 : Should not append
448 spin_lock_irqsave(&chan->rx_lock, flags);
449 bd = chan->rxbd_free;
451 ctrl = qmc_read16(&bd->cbd_sc);
452 if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
453 /* We are full ... */
458 qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
459 qmc_write32(&bd->cbd_bufaddr, addr);
461 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
462 xfer_desc->rx_complete = complete;
463 xfer_desc->context = context;
465 /* Clear previous status flags */
466 ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
467 QMC_BD_RX_AB | QMC_BD_RX_CR);
469 /* Activate the descriptor */
470 ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
471 wmb(); /* Be sure to flush data before descriptor activation */
472 qmc_write16(&bd->cbd_sc, ctrl);
474 /* Restart receiver if needed */
475 if (chan->is_rx_halted && !chan->is_rx_stopped) {
476 /* Restart receiver */
477 if (chan->mode == QMC_TRANSPARENT)
478 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
480 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
481 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
482 chan->is_rx_halted = false;
486 if (ctrl & QMC_BD_RX_W)
487 chan->rxbd_free = chan->rxbds;
493 spin_unlock_irqrestore(&chan->rx_lock, flags);
496 EXPORT_SYMBOL(qmc_chan_read_submit);
498 static void qmc_chan_read_done(struct qmc_chan *chan)
500 void (*complete)(void *context, size_t size, unsigned int flags);
501 struct qmc_xfer_desc *xfer_desc;
510 * 0 0 : The BD is free
511 * 1 1 : The BD is in used, waiting for transfer
512 * 0 1 : The BD is in used, waiting for completion
513 * 1 0 : Should not append
516 spin_lock_irqsave(&chan->rx_lock, flags);
517 bd = chan->rxbd_done;
519 ctrl = qmc_read16(&bd->cbd_sc);
520 while (!(ctrl & QMC_BD_RX_E)) {
521 if (!(ctrl & QMC_BD_RX_UB))
524 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
525 complete = xfer_desc->rx_complete;
526 context = xfer_desc->context;
527 xfer_desc->rx_complete = NULL;
528 xfer_desc->context = NULL;
530 datalen = qmc_read16(&bd->cbd_datlen);
531 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
533 if (ctrl & QMC_BD_RX_W)
534 chan->rxbd_done = chan->rxbds;
541 spin_unlock_irqrestore(&chan->rx_lock, flags);
544 * Avoid conversion between internal hardware flags and
545 * the software API flags.
546 * -> Be sure that the software API flags are consistent
547 * with the hardware flags
549 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
550 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
551 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
552 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
553 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
554 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
556 complete(context, datalen,
557 ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
558 QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
559 spin_lock_irqsave(&chan->rx_lock, flags);
562 bd = chan->rxbd_done;
563 ctrl = qmc_read16(&bd->cbd_sc);
567 spin_unlock_irqrestore(&chan->rx_lock, flags);
570 static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info)
577 * Use a common Tx/Rx 64 entries table.
578 * Tx and Rx related stuffs must be identical
580 if (chan->tx_ts_mask != chan->rx_ts_mask) {
581 dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
585 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
587 /* Check entries based on Rx stuff*/
588 for (i = 0; i < info->nb_rx_ts; i++) {
589 if (!(chan->rx_ts_mask & (((u64)1) << i)))
592 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
593 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
594 dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
600 /* Set entries based on Rx stuff*/
601 for (i = 0; i < info->nb_rx_ts; i++) {
602 if (!(chan->rx_ts_mask & (((u64)1) << i)))
605 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
612 static int qmc_chan_setup_tsa_32rx_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info)
618 /* Use a Tx 32 entries table and a Rx 32 entries table */
620 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
622 /* Check entries based on Rx stuff */
623 for (i = 0; i < info->nb_rx_ts; i++) {
624 if (!(chan->rx_ts_mask & (((u64)1) << i)))
627 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
628 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
629 dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
634 /* Check entries based on Tx stuff */
635 for (i = 0; i < info->nb_tx_ts; i++) {
636 if (!(chan->tx_ts_mask & (((u64)1) << i)))
639 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
640 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
641 dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
647 /* Set entries based on Rx stuff */
648 for (i = 0; i < info->nb_rx_ts; i++) {
649 if (!(chan->rx_ts_mask & (((u64)1) << i)))
652 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
655 /* Set entries based on Tx stuff */
656 for (i = 0; i < info->nb_tx_ts; i++) {
657 if (!(chan->tx_ts_mask & (((u64)1) << i)))
660 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
667 static int qmc_chan_setup_tsa(struct qmc_chan *chan)
669 struct tsa_serial_info info;
672 /* Retrieve info from the TSA related serial */
673 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
678 * Setup one common 64 entries table or two 32 entries (one for Tx
679 * and one for Tx) according to assigned TS numbers.
681 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
682 qmc_chan_setup_tsa_64rxtx(chan, &info) :
683 qmc_chan_setup_tsa_32rx_32tx(chan, &info);
686 static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
688 return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
691 static int qmc_chan_stop_rx(struct qmc_chan *chan)
696 spin_lock_irqsave(&chan->rx_lock, flags);
698 /* Send STOP RECEIVE command */
699 ret = qmc_chan_command(chan, 0x0);
701 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
706 chan->is_rx_stopped = true;
709 spin_unlock_irqrestore(&chan->rx_lock, flags);
713 static int qmc_chan_stop_tx(struct qmc_chan *chan)
718 spin_lock_irqsave(&chan->tx_lock, flags);
720 /* Send STOP TRANSMIT command */
721 ret = qmc_chan_command(chan, 0x1);
723 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
728 chan->is_tx_stopped = true;
731 spin_unlock_irqrestore(&chan->tx_lock, flags);
735 int qmc_chan_stop(struct qmc_chan *chan, int direction)
739 if (direction & QMC_CHAN_READ) {
740 ret = qmc_chan_stop_rx(chan);
745 if (direction & QMC_CHAN_WRITE) {
746 ret = qmc_chan_stop_tx(chan);
753 EXPORT_SYMBOL(qmc_chan_stop);
755 static void qmc_chan_start_rx(struct qmc_chan *chan)
759 spin_lock_irqsave(&chan->rx_lock, flags);
761 /* Restart the receiver */
762 if (chan->mode == QMC_TRANSPARENT)
763 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
765 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
766 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
767 chan->is_rx_halted = false;
769 chan->is_rx_stopped = false;
771 spin_unlock_irqrestore(&chan->rx_lock, flags);
774 static void qmc_chan_start_tx(struct qmc_chan *chan)
778 spin_lock_irqsave(&chan->tx_lock, flags);
781 * Enable channel transmitter as it could be disabled if
782 * qmc_chan_reset() was called.
784 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
786 /* Set the POL bit in the channel mode register */
787 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
789 chan->is_tx_stopped = false;
791 spin_unlock_irqrestore(&chan->tx_lock, flags);
794 int qmc_chan_start(struct qmc_chan *chan, int direction)
796 if (direction & QMC_CHAN_READ)
797 qmc_chan_start_rx(chan);
799 if (direction & QMC_CHAN_WRITE)
800 qmc_chan_start_tx(chan);
804 EXPORT_SYMBOL(qmc_chan_start);
806 static void qmc_chan_reset_rx(struct qmc_chan *chan)
808 struct qmc_xfer_desc *xfer_desc;
813 spin_lock_irqsave(&chan->rx_lock, flags);
816 ctrl = qmc_read16(&bd->cbd_sc);
817 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
819 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
820 xfer_desc->rx_complete = NULL;
821 xfer_desc->context = NULL;
824 } while (!(ctrl & QMC_BD_RX_W));
826 chan->rxbd_free = chan->rxbds;
827 chan->rxbd_done = chan->rxbds;
828 qmc_write16(chan->s_param + QMC_SPE_RBPTR,
829 qmc_read16(chan->s_param + QMC_SPE_RBASE));
831 chan->rx_pending = 0;
833 spin_unlock_irqrestore(&chan->rx_lock, flags);
836 static void qmc_chan_reset_tx(struct qmc_chan *chan)
838 struct qmc_xfer_desc *xfer_desc;
843 spin_lock_irqsave(&chan->tx_lock, flags);
845 /* Disable transmitter. It will be re-enable on qmc_chan_start() */
846 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
850 ctrl = qmc_read16(&bd->cbd_sc);
851 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
853 xfer_desc = &chan->tx_desc[bd - chan->txbds];
854 xfer_desc->tx_complete = NULL;
855 xfer_desc->context = NULL;
858 } while (!(ctrl & QMC_BD_TX_W));
860 chan->txbd_free = chan->txbds;
861 chan->txbd_done = chan->txbds;
862 qmc_write16(chan->s_param + QMC_SPE_TBPTR,
863 qmc_read16(chan->s_param + QMC_SPE_TBASE));
865 /* Reset TSTATE and ZISTATE to their initial value */
866 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
867 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
869 spin_unlock_irqrestore(&chan->tx_lock, flags);
872 int qmc_chan_reset(struct qmc_chan *chan, int direction)
874 if (direction & QMC_CHAN_READ)
875 qmc_chan_reset_rx(chan);
877 if (direction & QMC_CHAN_WRITE)
878 qmc_chan_reset_tx(chan);
882 EXPORT_SYMBOL(qmc_chan_reset);
884 static int qmc_check_chans(struct qmc *qmc)
886 struct tsa_serial_info info;
887 bool is_one_table = false;
888 struct qmc_chan *chan;
891 u64 tx_ts_assigned_mask;
892 u64 rx_ts_assigned_mask;
895 /* Retrieve info from the TSA related serial */
896 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
900 if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
901 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
906 * If more than 32 TS are assigned to this serial, one common table is
907 * used for Tx and Rx and so masks must be equal for all channels.
909 if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
910 if (info.nb_tx_ts != info.nb_rx_ts) {
911 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
917 tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
918 rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
920 list_for_each_entry(chan, &qmc->chan_head, list) {
921 if (chan->tx_ts_mask > tx_ts_assigned_mask) {
922 dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
925 if (tx_ts_mask & chan->tx_ts_mask) {
926 dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
930 if (chan->rx_ts_mask > rx_ts_assigned_mask) {
931 dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
934 if (rx_ts_mask & chan->rx_ts_mask) {
935 dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
939 if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
940 dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
944 tx_ts_mask |= chan->tx_ts_mask;
945 rx_ts_mask |= chan->rx_ts_mask;
951 static unsigned int qmc_nb_chans(struct qmc *qmc)
953 unsigned int count = 0;
954 struct qmc_chan *chan;
956 list_for_each_entry(chan, &qmc->chan_head, list)
962 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
964 struct device_node *chan_np;
965 struct qmc_chan *chan;
971 for_each_available_child_of_node(np, chan_np) {
972 ret = of_property_read_u32(chan_np, "reg", &chan_id);
974 dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
975 of_node_put(chan_np);
979 dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
980 of_node_put(chan_np);
984 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
986 of_node_put(chan_np);
991 spin_lock_init(&chan->rx_lock);
992 spin_lock_init(&chan->tx_lock);
994 ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
996 dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
998 of_node_put(chan_np);
1001 chan->tx_ts_mask_avail = ts_mask;
1002 chan->tx_ts_mask = chan->tx_ts_mask_avail;
1004 ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
1006 dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
1008 of_node_put(chan_np);
1011 chan->rx_ts_mask_avail = ts_mask;
1012 chan->rx_ts_mask = chan->rx_ts_mask_avail;
1014 mode = "transparent";
1015 ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
1016 if (ret && ret != -EINVAL) {
1017 dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
1019 of_node_put(chan_np);
1022 if (!strcmp(mode, "transparent")) {
1023 chan->mode = QMC_TRANSPARENT;
1024 } else if (!strcmp(mode, "hdlc")) {
1025 chan->mode = QMC_HDLC;
1027 dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
1029 of_node_put(chan_np);
1033 chan->is_reverse_data = of_property_read_bool(chan_np,
1034 "fsl,reverse-data");
1036 list_add_tail(&chan->list, &qmc->chan_head);
1037 qmc->chans[chan->id] = chan;
1040 return qmc_check_chans(qmc);
1043 static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
1049 * Use a common Tx/Rx 64 entries table.
1050 * Everything was previously checked, Tx and Rx related stuffs are
1051 * identical -> Used Rx related stuff to build the table
1054 /* Invalidate all entries */
1055 for (i = 0; i < 64; i++)
1056 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1058 /* Set Wrap bit on last entry */
1059 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1062 /* Init pointers to the table */
1063 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1064 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1065 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1066 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1067 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1072 static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
1078 * Use a Tx 32 entries table and a Rx 32 entries table.
1079 * Everything was previously checked.
1082 /* Invalidate all entries */
1083 for (i = 0; i < 32; i++) {
1084 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1085 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
1088 /* Set Wrap bit on last entries */
1089 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1091 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
1094 /* Init Rx pointers ...*/
1095 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1096 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1097 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1099 /* ... and Tx pointers */
1100 val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
1101 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1102 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1107 static int qmc_init_tsa(struct qmc *qmc)
1109 struct tsa_serial_info info;
1112 /* Retrieve info from the TSA related serial */
1113 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1118 * Initialize one common 64 entries table or two 32 entries (one for Tx
1119 * and one for Tx) according to assigned TS numbers.
1121 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1122 qmc_init_tsa_64rxtx(qmc, &info) :
1123 qmc_init_tsa_32rx_32tx(qmc, &info);
1126 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
1128 struct tsa_serial_info info;
1129 u16 first_rx, last_tx;
1133 /* Retrieve info from the TSA related serial */
1134 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
1138 /* Find the first Rx TS allocated to the channel */
1139 first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
1141 /* Find the last Tx TS allocated to the channel */
1142 last_tx = fls64(chan->tx_ts_mask);
1146 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
1148 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
1150 qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
1152 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
1154 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1155 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1160 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1169 ret = qmc_chan_setup_tsa(chan);
1173 /* Set channel specific parameter base address */
1174 chan->s_param = qmc->dpram + (chan->id * 64);
1175 /* 16 bd per channel (8 rx and 8 tx) */
1176 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1177 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1179 chan->txbd_free = chan->txbds;
1180 chan->txbd_done = chan->txbds;
1181 chan->rxbd_free = chan->rxbds;
1182 chan->rxbd_done = chan->rxbds;
1184 /* TBASE and TBPTR*/
1185 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1186 qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1187 qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1189 /* RBASE and RBPTR*/
1190 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1191 qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1192 qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1193 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
1194 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1195 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
1196 if (chan->mode == QMC_TRANSPARENT) {
1197 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1198 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1199 val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
1200 if (chan->is_reverse_data)
1201 val |= QMC_SPE_CHAMR_TRANSP_RD;
1202 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1203 ret = qmc_setup_chan_trnsync(qmc, chan);
1207 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1208 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1209 qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1210 QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1213 /* Do not enable interrupts now. They will be enabled later */
1214 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1216 /* Init Rx BDs and set Wrap bit on last descriptor */
1217 BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1219 for (i = 0; i < QMC_NB_RXBDS; i++) {
1220 bd = chan->rxbds + i;
1221 qmc_write16(&bd->cbd_sc, val);
1223 bd = chan->rxbds + QMC_NB_RXBDS - 1;
1224 qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
1226 /* Init Tx BDs and set Wrap bit on last descriptor */
1227 BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1229 if (chan->mode == QMC_HDLC)
1230 val |= QMC_BD_TX_L | QMC_BD_TX_TC;
1231 for (i = 0; i < QMC_NB_TXBDS; i++) {
1232 bd = chan->txbds + i;
1233 qmc_write16(&bd->cbd_sc, val);
1235 bd = chan->txbds + QMC_NB_TXBDS - 1;
1236 qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1241 static int qmc_setup_chans(struct qmc *qmc)
1243 struct qmc_chan *chan;
1246 list_for_each_entry(chan, &qmc->chan_head, list) {
1247 ret = qmc_setup_chan(qmc, chan);
1255 static int qmc_finalize_chans(struct qmc *qmc)
1257 struct qmc_chan *chan;
1260 list_for_each_entry(chan, &qmc->chan_head, list) {
1261 /* Unmask channel interrupts */
1262 if (chan->mode == QMC_HDLC) {
1263 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1264 QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1265 QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1266 QMC_INT_TXB | QMC_INT_RXB);
1268 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1269 QMC_INT_UN | QMC_INT_BSY |
1270 QMC_INT_TXB | QMC_INT_RXB);
1273 /* Forced stop the channel */
1274 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1282 static int qmc_setup_ints(struct qmc *qmc)
1287 /* Raz all entries */
1288 for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1289 qmc_write16(qmc->int_table + i, 0x0000);
1291 /* Set Wrap bit on last entry */
1292 if (qmc->int_size >= sizeof(u16)) {
1293 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1294 qmc_write16(last, QMC_INT_W);
1300 static void qmc_irq_gint(struct qmc *qmc)
1302 struct qmc_chan *chan;
1303 unsigned int chan_id;
1304 unsigned long flags;
1307 int_entry = qmc_read16(qmc->int_curr);
1308 while (int_entry & QMC_INT_V) {
1309 /* Clear all but the Wrap bit */
1310 qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1312 chan_id = QMC_INT_GET_CHANNEL(int_entry);
1313 chan = qmc->chans[chan_id];
1315 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1319 if (int_entry & QMC_INT_TXB)
1320 qmc_chan_write_done(chan);
1322 if (int_entry & QMC_INT_UN) {
1323 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1325 chan->nb_tx_underrun++;
1328 if (int_entry & QMC_INT_BSY) {
1329 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1332 /* Restart the receiver if needed */
1333 spin_lock_irqsave(&chan->rx_lock, flags);
1334 if (chan->rx_pending && !chan->is_rx_stopped) {
1335 if (chan->mode == QMC_TRANSPARENT)
1336 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1338 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1339 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1340 chan->is_rx_halted = false;
1342 chan->is_rx_halted = true;
1344 spin_unlock_irqrestore(&chan->rx_lock, flags);
1347 if (int_entry & QMC_INT_RXB)
1348 qmc_chan_read_done(chan);
1351 if (int_entry & QMC_INT_W)
1352 qmc->int_curr = qmc->int_table;
1355 int_entry = qmc_read16(qmc->int_curr);
1359 static irqreturn_t qmc_irq_handler(int irq, void *priv)
1361 struct qmc *qmc = (struct qmc *)priv;
1364 scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1365 qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1367 if (unlikely(scce & SCC_SCCE_IQOV))
1368 dev_info(qmc->dev, "IRQ queue overflow\n");
1370 if (unlikely(scce & SCC_SCCE_GUN))
1371 dev_err(qmc->dev, "Global transmitter underrun\n");
1373 if (unlikely(scce & SCC_SCCE_GOV))
1374 dev_err(qmc->dev, "Global receiver overrun\n");
1376 /* normal interrupt */
1377 if (likely(scce & SCC_SCCE_GINT))
1383 static int qmc_probe(struct platform_device *pdev)
1385 struct device_node *np = pdev->dev.of_node;
1386 unsigned int nb_chans;
1387 struct resource *res;
1392 qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1396 qmc->dev = &pdev->dev;
1397 INIT_LIST_HEAD(&qmc->chan_head);
1399 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1400 if (IS_ERR(qmc->scc_regs))
1401 return PTR_ERR(qmc->scc_regs);
1404 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1407 qmc->scc_pram_offset = res->start - get_immrbase();
1408 qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1409 if (IS_ERR(qmc->scc_pram))
1410 return PTR_ERR(qmc->scc_pram);
1412 qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram");
1413 if (IS_ERR(qmc->dpram))
1414 return PTR_ERR(qmc->dpram);
1416 qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1417 if (IS_ERR(qmc->tsa_serial)) {
1418 return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1419 "Failed to get TSA serial\n");
1422 /* Connect the serial (SCC) to TSA */
1423 ret = tsa_serial_connect(qmc->tsa_serial);
1425 dev_err(qmc->dev, "Failed to connect TSA serial\n");
1429 /* Parse channels informationss */
1430 ret = qmc_of_parse_chans(qmc, np);
1432 goto err_tsa_serial_disconnect;
1434 nb_chans = qmc_nb_chans(qmc);
1436 /* Init GMSR_H and GMSR_L registers */
1437 qmc_write32(qmc->scc_regs + SCC_GSMRH,
1438 SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
1440 /* enable QMC mode */
1441 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
1444 * Allocate the buffer descriptor table
1445 * 8 rx and 8 tx descriptors per channel
1447 qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1448 qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1449 &qmc->bd_dma_addr, GFP_KERNEL);
1450 if (!qmc->bd_table) {
1451 dev_err(qmc->dev, "Failed to allocate bd table\n");
1453 goto err_tsa_serial_disconnect;
1455 memset(qmc->bd_table, 0, qmc->bd_size);
1457 qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1459 /* Allocate the interrupt table */
1460 qmc->int_size = QMC_NB_INTS * sizeof(u16);
1461 qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1462 &qmc->int_dma_addr, GFP_KERNEL);
1463 if (!qmc->int_table) {
1464 dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1466 goto err_tsa_serial_disconnect;
1468 memset(qmc->int_table, 0, qmc->int_size);
1470 qmc->int_curr = qmc->int_table;
1471 qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1472 qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1474 /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1475 qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1477 qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1478 qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1480 qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1481 qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1483 ret = qmc_init_tsa(qmc);
1485 goto err_tsa_serial_disconnect;
1487 qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
1489 ret = qmc_setup_chans(qmc);
1491 goto err_tsa_serial_disconnect;
1493 /* Init interrupts table */
1494 ret = qmc_setup_ints(qmc);
1496 goto err_tsa_serial_disconnect;
1498 /* Disable and clear interrupts, set the irq handler */
1499 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1500 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1501 irq = platform_get_irq(pdev, 0);
1503 goto err_tsa_serial_disconnect;
1504 ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
1506 goto err_tsa_serial_disconnect;
1508 /* Enable interrupts */
1509 qmc_write16(qmc->scc_regs + SCC_SCCM,
1510 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
1512 ret = qmc_finalize_chans(qmc);
1514 goto err_disable_intr;
1516 /* Enable transmiter and receiver */
1517 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1519 platform_set_drvdata(pdev, qmc);
1521 /* Populate channel related devices */
1522 ret = devm_of_platform_populate(qmc->dev);
1524 goto err_disable_txrx;
1529 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
1532 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1534 err_tsa_serial_disconnect:
1535 tsa_serial_disconnect(qmc->tsa_serial);
1539 static void qmc_remove(struct platform_device *pdev)
1541 struct qmc *qmc = platform_get_drvdata(pdev);
1543 /* Disable transmiter and receiver */
1544 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
1546 /* Disable interrupts */
1547 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1549 /* Disconnect the serial from TSA */
1550 tsa_serial_disconnect(qmc->tsa_serial);
1553 static const struct of_device_id qmc_id_table[] = {
1554 { .compatible = "fsl,cpm1-scc-qmc" },
1557 MODULE_DEVICE_TABLE(of, qmc_id_table);
1559 static struct platform_driver qmc_driver = {
1562 .of_match_table = of_match_ptr(qmc_id_table),
1565 .remove_new = qmc_remove,
1567 module_platform_driver(qmc_driver);
1569 static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
1571 struct platform_device *pdev;
1572 struct qmc_chan *qmc_chan;
1575 if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
1576 return ERR_PTR(-EINVAL);
1578 pdev = of_find_device_by_node(qmc_np);
1580 return ERR_PTR(-ENODEV);
1582 qmc = platform_get_drvdata(pdev);
1584 platform_device_put(pdev);
1585 return ERR_PTR(-EPROBE_DEFER);
1588 if (chan_index >= ARRAY_SIZE(qmc->chans)) {
1589 platform_device_put(pdev);
1590 return ERR_PTR(-EINVAL);
1593 qmc_chan = qmc->chans[chan_index];
1595 platform_device_put(pdev);
1596 return ERR_PTR(-ENOENT);
1602 struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
1604 struct of_phandle_args out_args;
1605 struct qmc_chan *qmc_chan;
1608 ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
1611 return ERR_PTR(ret);
1613 if (out_args.args_count != 1) {
1614 of_node_put(out_args.np);
1615 return ERR_PTR(-EINVAL);
1618 qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
1619 of_node_put(out_args.np);
1622 EXPORT_SYMBOL(qmc_chan_get_byphandle);
1624 struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
1626 struct device_node *qmc_np;
1630 qmc_np = np->parent;
1631 ret = of_property_read_u32(np, "reg", &chan_index);
1633 return ERR_PTR(-EINVAL);
1635 return qmc_chan_get_from_qmc(qmc_np, chan_index);
1637 EXPORT_SYMBOL(qmc_chan_get_bychild);
1639 void qmc_chan_put(struct qmc_chan *chan)
1641 put_device(chan->qmc->dev);
1643 EXPORT_SYMBOL(qmc_chan_put);
1645 static void devm_qmc_chan_release(struct device *dev, void *res)
1647 struct qmc_chan **qmc_chan = res;
1649 qmc_chan_put(*qmc_chan);
1652 struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
1653 struct device_node *np,
1654 const char *phandle_name)
1656 struct qmc_chan *qmc_chan;
1657 struct qmc_chan **dr;
1659 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1661 return ERR_PTR(-ENOMEM);
1663 qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
1664 if (!IS_ERR(qmc_chan)) {
1666 devres_add(dev, dr);
1673 EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
1675 struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
1676 struct device_node *np)
1678 struct qmc_chan *qmc_chan;
1679 struct qmc_chan **dr;
1681 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1683 return ERR_PTR(-ENOMEM);
1685 qmc_chan = qmc_chan_get_bychild(np);
1686 if (!IS_ERR(qmc_chan)) {
1688 devres_add(dev, dr);
1695 EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
1697 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1698 MODULE_DESCRIPTION("CPM QMC driver");
1699 MODULE_LICENSE("GPL");