soc: fsl: cpm1: qmc: Introduce qmc_chan_setup_tsa*
[sfrench/cifs-2.6.git] / drivers / soc / fsl / qe / qmc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * QMC driver
4  *
5  * Copyright 2022 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9
10 #include <soc/fsl/qe/qmc.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/hdlc.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <soc/fsl/cpm.h>
21 #include <sysdev/fsl_soc.h>
22 #include "tsa.h"
23
24 /* SCC general mode register high (32 bits) */
25 #define SCC_GSMRL       0x00
26 #define SCC_GSMRL_ENR           (1 << 5)
27 #define SCC_GSMRL_ENT           (1 << 4)
28 #define SCC_GSMRL_MODE_QMC      (0x0A << 0)
29
30 /* SCC general mode register low (32 bits) */
31 #define SCC_GSMRH       0x04
32 #define   SCC_GSMRH_CTSS        (1 << 7)
33 #define   SCC_GSMRH_CDS         (1 << 8)
34 #define   SCC_GSMRH_CTSP        (1 << 9)
35 #define   SCC_GSMRH_CDP         (1 << 10)
36
37 /* SCC event register (16 bits) */
38 #define SCC_SCCE        0x10
39 #define   SCC_SCCE_IQOV         (1 << 3)
40 #define   SCC_SCCE_GINT         (1 << 2)
41 #define   SCC_SCCE_GUN          (1 << 1)
42 #define   SCC_SCCE_GOV          (1 << 0)
43
44 /* SCC mask register (16 bits) */
45 #define SCC_SCCM        0x14
46 /* Multichannel base pointer (32 bits) */
47 #define QMC_GBL_MCBASE          0x00
48 /* Multichannel controller state (16 bits) */
49 #define QMC_GBL_QMCSTATE        0x04
50 /* Maximum receive buffer length (16 bits) */
51 #define QMC_GBL_MRBLR           0x06
52 /* Tx time-slot assignment table pointer (16 bits) */
53 #define QMC_GBL_TX_S_PTR        0x08
54 /* Rx pointer (16 bits) */
55 #define QMC_GBL_RXPTR           0x0A
56 /* Global receive frame threshold (16 bits) */
57 #define QMC_GBL_GRFTHR          0x0C
58 /* Global receive frame count (16 bits) */
59 #define QMC_GBL_GRFCNT          0x0E
60 /* Multichannel interrupt base address (32 bits) */
61 #define QMC_GBL_INTBASE         0x10
62 /* Multichannel interrupt pointer (32 bits) */
63 #define QMC_GBL_INTPTR          0x14
64 /* Rx time-slot assignment table pointer (16 bits) */
65 #define QMC_GBL_RX_S_PTR        0x18
66 /* Tx pointer (16 bits) */
67 #define QMC_GBL_TXPTR           0x1A
68 /* CRC constant (32 bits) */
69 #define QMC_GBL_C_MASK32        0x1C
70 /* Time slot assignment table Rx (32 x 16 bits) */
71 #define QMC_GBL_TSATRX          0x20
72 /* Time slot assignment table Tx (32 x 16 bits) */
73 #define QMC_GBL_TSATTX          0x60
74 /* CRC constant (16 bits) */
75 #define QMC_GBL_C_MASK16        0xA0
76
77 /* TSA entry (16bit entry in TSATRX and TSATTX) */
78 #define QMC_TSA_VALID           (1 << 15)
79 #define QMC_TSA_WRAP            (1 << 14)
80 #define QMC_TSA_MASK            (0x303F)
81 #define QMC_TSA_CHANNEL(x)      ((x) << 6)
82
83 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
84 #define QMC_SPE_TBASE   0x00
85
86 /* Channel mode register (16 bits) */
87 #define QMC_SPE_CHAMR   0x02
88 #define   QMC_SPE_CHAMR_MODE_HDLC       (1 << 15)
89 #define   QMC_SPE_CHAMR_MODE_TRANSP     ((0 << 15) | (1 << 13))
90 #define   QMC_SPE_CHAMR_ENT             (1 << 12)
91 #define   QMC_SPE_CHAMR_POL             (1 << 8)
92 #define   QMC_SPE_CHAMR_HDLC_IDLM       (1 << 13)
93 #define   QMC_SPE_CHAMR_HDLC_CRC        (1 << 7)
94 #define   QMC_SPE_CHAMR_HDLC_NOF        (0x0f << 0)
95 #define   QMC_SPE_CHAMR_TRANSP_RD       (1 << 14)
96 #define   QMC_SPE_CHAMR_TRANSP_SYNC     (1 << 10)
97
98 /* Tx internal state (32 bits) */
99 #define QMC_SPE_TSTATE  0x04
100 /* Tx buffer descriptor pointer (16 bits) */
101 #define QMC_SPE_TBPTR   0x0C
102 /* Zero-insertion state (32 bits) */
103 #define QMC_SPE_ZISTATE 0x14
104 /* Channel’s interrupt mask flags (16 bits) */
105 #define QMC_SPE_INTMSK  0x1C
106 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
107 #define QMC_SPE_RBASE   0x20
108 /* HDLC: Maximum frame length register (16 bits) */
109 #define QMC_SPE_MFLR    0x22
110 /* TRANSPARENT: Transparent maximum receive length (16 bits) */
111 #define QMC_SPE_TMRBLR  0x22
112 /* Rx internal state (32 bits) */
113 #define QMC_SPE_RSTATE  0x24
114 /* Rx buffer descriptor pointer (16 bits) */
115 #define QMC_SPE_RBPTR   0x2C
116 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
117 #define QMC_SPE_RPACK   0x30
118 /* Zero deletion state (32 bits) */
119 #define QMC_SPE_ZDSTATE 0x34
120
121 /* Transparent synchronization (16 bits) */
122 #define QMC_SPE_TRNSYNC 0x3C
123 #define   QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
124 #define   QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
125
126 /* Interrupt related registers bits */
127 #define QMC_INT_V               (1 << 15)
128 #define QMC_INT_W               (1 << 14)
129 #define QMC_INT_NID             (1 << 13)
130 #define QMC_INT_IDL             (1 << 12)
131 #define QMC_INT_GET_CHANNEL(x)  (((x) & 0x0FC0) >> 6)
132 #define QMC_INT_MRF             (1 << 5)
133 #define QMC_INT_UN              (1 << 4)
134 #define QMC_INT_RXF             (1 << 3)
135 #define QMC_INT_BSY             (1 << 2)
136 #define QMC_INT_TXB             (1 << 1)
137 #define QMC_INT_RXB             (1 << 0)
138
139 /* BD related registers bits */
140 #define QMC_BD_RX_E     (1 << 15)
141 #define QMC_BD_RX_W     (1 << 13)
142 #define QMC_BD_RX_I     (1 << 12)
143 #define QMC_BD_RX_L     (1 << 11)
144 #define QMC_BD_RX_F     (1 << 10)
145 #define QMC_BD_RX_CM    (1 << 9)
146 #define QMC_BD_RX_UB    (1 << 7)
147 #define QMC_BD_RX_LG    (1 << 5)
148 #define QMC_BD_RX_NO    (1 << 4)
149 #define QMC_BD_RX_AB    (1 << 3)
150 #define QMC_BD_RX_CR    (1 << 2)
151
152 #define QMC_BD_TX_R     (1 << 15)
153 #define QMC_BD_TX_W     (1 << 13)
154 #define QMC_BD_TX_I     (1 << 12)
155 #define QMC_BD_TX_L     (1 << 11)
156 #define QMC_BD_TX_TC    (1 << 10)
157 #define QMC_BD_TX_CM    (1 << 9)
158 #define QMC_BD_TX_UB    (1 << 7)
159 #define QMC_BD_TX_PAD   (0x0f << 0)
160
161 /* Numbers of BDs and interrupt items */
162 #define QMC_NB_TXBDS    8
163 #define QMC_NB_RXBDS    8
164 #define QMC_NB_INTS     128
165
166 struct qmc_xfer_desc {
167         union {
168                 void (*tx_complete)(void *context);
169                 void (*rx_complete)(void *context, size_t length, unsigned int flags);
170         };
171         void *context;
172 };
173
174 struct qmc_chan {
175         struct list_head list;
176         unsigned int id;
177         struct qmc *qmc;
178         void __iomem *s_param;
179         enum qmc_mode mode;
180         u64     tx_ts_mask_avail;
181         u64     tx_ts_mask;
182         u64     rx_ts_mask_avail;
183         u64     rx_ts_mask;
184         bool is_reverse_data;
185
186         spinlock_t      tx_lock;
187         cbd_t __iomem *txbds;
188         cbd_t __iomem *txbd_free;
189         cbd_t __iomem *txbd_done;
190         struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
191         u64     nb_tx_underrun;
192         bool    is_tx_stopped;
193
194         spinlock_t      rx_lock;
195         cbd_t __iomem *rxbds;
196         cbd_t __iomem *rxbd_free;
197         cbd_t __iomem *rxbd_done;
198         struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
199         u64     nb_rx_busy;
200         int     rx_pending;
201         bool    is_rx_halted;
202         bool    is_rx_stopped;
203 };
204
205 struct qmc {
206         struct device *dev;
207         struct tsa_serial *tsa_serial;
208         void __iomem *scc_regs;
209         void __iomem *scc_pram;
210         void __iomem *dpram;
211         u16 scc_pram_offset;
212         cbd_t __iomem *bd_table;
213         dma_addr_t bd_dma_addr;
214         size_t bd_size;
215         u16 __iomem *int_table;
216         u16 __iomem *int_curr;
217         dma_addr_t int_dma_addr;
218         size_t int_size;
219         struct list_head chan_head;
220         struct qmc_chan *chans[64];
221 };
222
223 static void qmc_write16(void __iomem *addr, u16 val)
224 {
225         iowrite16be(val, addr);
226 }
227
228 static u16 qmc_read16(void __iomem *addr)
229 {
230         return ioread16be(addr);
231 }
232
233 static void qmc_setbits16(void __iomem *addr, u16 set)
234 {
235         qmc_write16(addr, qmc_read16(addr) | set);
236 }
237
238 static void qmc_clrbits16(void __iomem *addr, u16 clr)
239 {
240         qmc_write16(addr, qmc_read16(addr) & ~clr);
241 }
242
243 static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
244 {
245         qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
246 }
247
248 static void qmc_write32(void __iomem *addr, u32 val)
249 {
250         iowrite32be(val, addr);
251 }
252
253 static u32 qmc_read32(void __iomem *addr)
254 {
255         return ioread32be(addr);
256 }
257
258 static void qmc_setbits32(void __iomem *addr, u32 set)
259 {
260         qmc_write32(addr, qmc_read32(addr) | set);
261 }
262
263
264 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
265 {
266         struct tsa_serial_info tsa_info;
267         int ret;
268
269         /* Retrieve info from the TSA related serial */
270         ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
271         if (ret)
272                 return ret;
273
274         info->mode = chan->mode;
275         info->rx_fs_rate = tsa_info.rx_fs_rate;
276         info->rx_bit_rate = tsa_info.rx_bit_rate;
277         info->nb_tx_ts = hweight64(chan->tx_ts_mask);
278         info->tx_fs_rate = tsa_info.tx_fs_rate;
279         info->tx_bit_rate = tsa_info.tx_bit_rate;
280         info->nb_rx_ts = hweight64(chan->rx_ts_mask);
281
282         return 0;
283 }
284 EXPORT_SYMBOL(qmc_chan_get_info);
285
286 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
287 {
288         if (param->mode != chan->mode)
289                 return -EINVAL;
290
291         switch (param->mode) {
292         case QMC_HDLC:
293                 if ((param->hdlc.max_rx_buf_size % 4) ||
294                     (param->hdlc.max_rx_buf_size < 8))
295                         return -EINVAL;
296
297                 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
298                             param->hdlc.max_rx_buf_size - 8);
299                 qmc_write16(chan->s_param + QMC_SPE_MFLR,
300                             param->hdlc.max_rx_frame_size);
301                 if (param->hdlc.is_crc32) {
302                         qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
303                                       QMC_SPE_CHAMR_HDLC_CRC);
304                 } else {
305                         qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
306                                       QMC_SPE_CHAMR_HDLC_CRC);
307                 }
308                 break;
309
310         case QMC_TRANSPARENT:
311                 qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
312                             param->transp.max_rx_buf_size);
313                 break;
314
315         default:
316                 return -EINVAL;
317         }
318
319         return 0;
320 }
321 EXPORT_SYMBOL(qmc_chan_set_param);
322
323 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
324                           void (*complete)(void *context), void *context)
325 {
326         struct qmc_xfer_desc *xfer_desc;
327         unsigned long flags;
328         cbd_t __iomem *bd;
329         u16 ctrl;
330         int ret;
331
332         /*
333          * R bit  UB bit
334          *   0       0  : The BD is free
335          *   1       1  : The BD is in used, waiting for transfer
336          *   0       1  : The BD is in used, waiting for completion
337          *   1       0  : Should not append
338          */
339
340         spin_lock_irqsave(&chan->tx_lock, flags);
341         bd = chan->txbd_free;
342
343         ctrl = qmc_read16(&bd->cbd_sc);
344         if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
345                 /* We are full ... */
346                 ret = -EBUSY;
347                 goto end;
348         }
349
350         qmc_write16(&bd->cbd_datlen, length);
351         qmc_write32(&bd->cbd_bufaddr, addr);
352
353         xfer_desc = &chan->tx_desc[bd - chan->txbds];
354         xfer_desc->tx_complete = complete;
355         xfer_desc->context = context;
356
357         /* Activate the descriptor */
358         ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
359         wmb(); /* Be sure to flush the descriptor before control update */
360         qmc_write16(&bd->cbd_sc, ctrl);
361
362         if (!chan->is_tx_stopped)
363                 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
364
365         if (ctrl & QMC_BD_TX_W)
366                 chan->txbd_free = chan->txbds;
367         else
368                 chan->txbd_free++;
369
370         ret = 0;
371
372 end:
373         spin_unlock_irqrestore(&chan->tx_lock, flags);
374         return ret;
375 }
376 EXPORT_SYMBOL(qmc_chan_write_submit);
377
378 static void qmc_chan_write_done(struct qmc_chan *chan)
379 {
380         struct qmc_xfer_desc *xfer_desc;
381         void (*complete)(void *context);
382         unsigned long flags;
383         void *context;
384         cbd_t __iomem *bd;
385         u16 ctrl;
386
387         /*
388          * R bit  UB bit
389          *   0       0  : The BD is free
390          *   1       1  : The BD is in used, waiting for transfer
391          *   0       1  : The BD is in used, waiting for completion
392          *   1       0  : Should not append
393          */
394
395         spin_lock_irqsave(&chan->tx_lock, flags);
396         bd = chan->txbd_done;
397
398         ctrl = qmc_read16(&bd->cbd_sc);
399         while (!(ctrl & QMC_BD_TX_R)) {
400                 if (!(ctrl & QMC_BD_TX_UB))
401                         goto end;
402
403                 xfer_desc = &chan->tx_desc[bd - chan->txbds];
404                 complete = xfer_desc->tx_complete;
405                 context = xfer_desc->context;
406                 xfer_desc->tx_complete = NULL;
407                 xfer_desc->context = NULL;
408
409                 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
410
411                 if (ctrl & QMC_BD_TX_W)
412                         chan->txbd_done = chan->txbds;
413                 else
414                         chan->txbd_done++;
415
416                 if (complete) {
417                         spin_unlock_irqrestore(&chan->tx_lock, flags);
418                         complete(context);
419                         spin_lock_irqsave(&chan->tx_lock, flags);
420                 }
421
422                 bd = chan->txbd_done;
423                 ctrl = qmc_read16(&bd->cbd_sc);
424         }
425
426 end:
427         spin_unlock_irqrestore(&chan->tx_lock, flags);
428 }
429
430 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
431                          void (*complete)(void *context, size_t length, unsigned int flags),
432                          void *context)
433 {
434         struct qmc_xfer_desc *xfer_desc;
435         unsigned long flags;
436         cbd_t __iomem *bd;
437         u16 ctrl;
438         int ret;
439
440         /*
441          * E bit  UB bit
442          *   0       0  : The BD is free
443          *   1       1  : The BD is in used, waiting for transfer
444          *   0       1  : The BD is in used, waiting for completion
445          *   1       0  : Should not append
446          */
447
448         spin_lock_irqsave(&chan->rx_lock, flags);
449         bd = chan->rxbd_free;
450
451         ctrl = qmc_read16(&bd->cbd_sc);
452         if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
453                 /* We are full ... */
454                 ret = -EBUSY;
455                 goto end;
456         }
457
458         qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
459         qmc_write32(&bd->cbd_bufaddr, addr);
460
461         xfer_desc = &chan->rx_desc[bd - chan->rxbds];
462         xfer_desc->rx_complete = complete;
463         xfer_desc->context = context;
464
465         /* Clear previous status flags */
466         ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
467                   QMC_BD_RX_AB | QMC_BD_RX_CR);
468
469         /* Activate the descriptor */
470         ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
471         wmb(); /* Be sure to flush data before descriptor activation */
472         qmc_write16(&bd->cbd_sc, ctrl);
473
474         /* Restart receiver if needed */
475         if (chan->is_rx_halted && !chan->is_rx_stopped) {
476                 /* Restart receiver */
477                 if (chan->mode == QMC_TRANSPARENT)
478                         qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
479                 else
480                         qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
481                 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
482                 chan->is_rx_halted = false;
483         }
484         chan->rx_pending++;
485
486         if (ctrl & QMC_BD_RX_W)
487                 chan->rxbd_free = chan->rxbds;
488         else
489                 chan->rxbd_free++;
490
491         ret = 0;
492 end:
493         spin_unlock_irqrestore(&chan->rx_lock, flags);
494         return ret;
495 }
496 EXPORT_SYMBOL(qmc_chan_read_submit);
497
498 static void qmc_chan_read_done(struct qmc_chan *chan)
499 {
500         void (*complete)(void *context, size_t size, unsigned int flags);
501         struct qmc_xfer_desc *xfer_desc;
502         unsigned long flags;
503         cbd_t __iomem *bd;
504         void *context;
505         u16 datalen;
506         u16 ctrl;
507
508         /*
509          * E bit  UB bit
510          *   0       0  : The BD is free
511          *   1       1  : The BD is in used, waiting for transfer
512          *   0       1  : The BD is in used, waiting for completion
513          *   1       0  : Should not append
514          */
515
516         spin_lock_irqsave(&chan->rx_lock, flags);
517         bd = chan->rxbd_done;
518
519         ctrl = qmc_read16(&bd->cbd_sc);
520         while (!(ctrl & QMC_BD_RX_E)) {
521                 if (!(ctrl & QMC_BD_RX_UB))
522                         goto end;
523
524                 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
525                 complete = xfer_desc->rx_complete;
526                 context = xfer_desc->context;
527                 xfer_desc->rx_complete = NULL;
528                 xfer_desc->context = NULL;
529
530                 datalen = qmc_read16(&bd->cbd_datlen);
531                 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
532
533                 if (ctrl & QMC_BD_RX_W)
534                         chan->rxbd_done = chan->rxbds;
535                 else
536                         chan->rxbd_done++;
537
538                 chan->rx_pending--;
539
540                 if (complete) {
541                         spin_unlock_irqrestore(&chan->rx_lock, flags);
542
543                         /*
544                          * Avoid conversion between internal hardware flags and
545                          * the software API flags.
546                          * -> Be sure that the software API flags are consistent
547                          *    with the hardware flags
548                          */
549                         BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST  != QMC_BD_RX_L);
550                         BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
551                         BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF   != QMC_BD_RX_LG);
552                         BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA   != QMC_BD_RX_NO);
553                         BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
554                         BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC   != QMC_BD_RX_CR);
555
556                         complete(context, datalen,
557                                  ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
558                                          QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
559                         spin_lock_irqsave(&chan->rx_lock, flags);
560                 }
561
562                 bd = chan->rxbd_done;
563                 ctrl = qmc_read16(&bd->cbd_sc);
564         }
565
566 end:
567         spin_unlock_irqrestore(&chan->rx_lock, flags);
568 }
569
570 static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info)
571 {
572         unsigned int i;
573         u16 curr;
574         u16 val;
575
576         /*
577          * Use a common Tx/Rx 64 entries table.
578          * Tx and Rx related stuffs must be identical
579          */
580         if (chan->tx_ts_mask != chan->rx_ts_mask) {
581                 dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
582                 return -EINVAL;
583         }
584
585         val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
586
587         /* Check entries based on Rx stuff*/
588         for (i = 0; i < info->nb_rx_ts; i++) {
589                 if (!(chan->rx_ts_mask & (((u64)1) << i)))
590                         continue;
591
592                 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
593                 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
594                         dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
595                                 chan->id, i);
596                         return -EBUSY;
597                 }
598         }
599
600         /* Set entries based on Rx stuff*/
601         for (i = 0; i < info->nb_rx_ts; i++) {
602                 if (!(chan->rx_ts_mask & (((u64)1) << i)))
603                         continue;
604
605                 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
606                                  ~QMC_TSA_WRAP, val);
607         }
608
609         return 0;
610 }
611
612 static int qmc_chan_setup_tsa_32rx_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info)
613 {
614         unsigned int i;
615         u16 curr;
616         u16 val;
617
618         /* Use a Tx 32 entries table and a Rx 32 entries table */
619
620         val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
621
622         /* Check entries based on Rx stuff */
623         for (i = 0; i < info->nb_rx_ts; i++) {
624                 if (!(chan->rx_ts_mask & (((u64)1) << i)))
625                         continue;
626
627                 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
628                 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
629                         dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
630                                 chan->id, i);
631                         return -EBUSY;
632                 }
633         }
634         /* Check entries based on Tx stuff */
635         for (i = 0; i < info->nb_tx_ts; i++) {
636                 if (!(chan->tx_ts_mask & (((u64)1) << i)))
637                         continue;
638
639                 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
640                 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
641                         dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
642                                 chan->id, i);
643                         return -EBUSY;
644                 }
645         }
646
647         /* Set entries based on Rx stuff */
648         for (i = 0; i < info->nb_rx_ts; i++) {
649                 if (!(chan->rx_ts_mask & (((u64)1) << i)))
650                         continue;
651
652                 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
653                                  ~QMC_TSA_WRAP, val);
654         }
655         /* Set entries based on Tx stuff */
656         for (i = 0; i < info->nb_tx_ts; i++) {
657                 if (!(chan->tx_ts_mask & (((u64)1) << i)))
658                         continue;
659
660                 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
661                                  ~QMC_TSA_WRAP, val);
662         }
663
664         return 0;
665 }
666
667 static int qmc_chan_setup_tsa(struct qmc_chan *chan)
668 {
669         struct tsa_serial_info info;
670         int ret;
671
672         /* Retrieve info from the TSA related serial */
673         ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
674         if (ret)
675                 return ret;
676
677         /*
678          * Setup one common 64 entries table or two 32 entries (one for Tx
679          * and one for Tx) according to assigned TS numbers.
680          */
681         return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
682                 qmc_chan_setup_tsa_64rxtx(chan, &info) :
683                 qmc_chan_setup_tsa_32rx_32tx(chan, &info);
684 }
685
686 static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
687 {
688         return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
689 }
690
691 static int qmc_chan_stop_rx(struct qmc_chan *chan)
692 {
693         unsigned long flags;
694         int ret;
695
696         spin_lock_irqsave(&chan->rx_lock, flags);
697
698         /* Send STOP RECEIVE command */
699         ret = qmc_chan_command(chan, 0x0);
700         if (ret) {
701                 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
702                         chan->id, ret);
703                 goto end;
704         }
705
706         chan->is_rx_stopped = true;
707
708 end:
709         spin_unlock_irqrestore(&chan->rx_lock, flags);
710         return ret;
711 }
712
713 static int qmc_chan_stop_tx(struct qmc_chan *chan)
714 {
715         unsigned long flags;
716         int ret;
717
718         spin_lock_irqsave(&chan->tx_lock, flags);
719
720         /* Send STOP TRANSMIT command */
721         ret = qmc_chan_command(chan, 0x1);
722         if (ret) {
723                 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
724                         chan->id, ret);
725                 goto end;
726         }
727
728         chan->is_tx_stopped = true;
729
730 end:
731         spin_unlock_irqrestore(&chan->tx_lock, flags);
732         return ret;
733 }
734
735 int qmc_chan_stop(struct qmc_chan *chan, int direction)
736 {
737         int ret;
738
739         if (direction & QMC_CHAN_READ) {
740                 ret = qmc_chan_stop_rx(chan);
741                 if (ret)
742                         return ret;
743         }
744
745         if (direction & QMC_CHAN_WRITE) {
746                 ret = qmc_chan_stop_tx(chan);
747                 if (ret)
748                         return ret;
749         }
750
751         return 0;
752 }
753 EXPORT_SYMBOL(qmc_chan_stop);
754
755 static void qmc_chan_start_rx(struct qmc_chan *chan)
756 {
757         unsigned long flags;
758
759         spin_lock_irqsave(&chan->rx_lock, flags);
760
761         /* Restart the receiver */
762         if (chan->mode == QMC_TRANSPARENT)
763                 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
764         else
765                 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
766         qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
767         chan->is_rx_halted = false;
768
769         chan->is_rx_stopped = false;
770
771         spin_unlock_irqrestore(&chan->rx_lock, flags);
772 }
773
774 static void qmc_chan_start_tx(struct qmc_chan *chan)
775 {
776         unsigned long flags;
777
778         spin_lock_irqsave(&chan->tx_lock, flags);
779
780         /*
781          * Enable channel transmitter as it could be disabled if
782          * qmc_chan_reset() was called.
783          */
784         qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
785
786         /* Set the POL bit in the channel mode register */
787         qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
788
789         chan->is_tx_stopped = false;
790
791         spin_unlock_irqrestore(&chan->tx_lock, flags);
792 }
793
794 int qmc_chan_start(struct qmc_chan *chan, int direction)
795 {
796         if (direction & QMC_CHAN_READ)
797                 qmc_chan_start_rx(chan);
798
799         if (direction & QMC_CHAN_WRITE)
800                 qmc_chan_start_tx(chan);
801
802         return 0;
803 }
804 EXPORT_SYMBOL(qmc_chan_start);
805
806 static void qmc_chan_reset_rx(struct qmc_chan *chan)
807 {
808         struct qmc_xfer_desc *xfer_desc;
809         unsigned long flags;
810         cbd_t __iomem *bd;
811         u16 ctrl;
812
813         spin_lock_irqsave(&chan->rx_lock, flags);
814         bd = chan->rxbds;
815         do {
816                 ctrl = qmc_read16(&bd->cbd_sc);
817                 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
818
819                 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
820                 xfer_desc->rx_complete = NULL;
821                 xfer_desc->context = NULL;
822
823                 bd++;
824         } while (!(ctrl & QMC_BD_RX_W));
825
826         chan->rxbd_free = chan->rxbds;
827         chan->rxbd_done = chan->rxbds;
828         qmc_write16(chan->s_param + QMC_SPE_RBPTR,
829                     qmc_read16(chan->s_param + QMC_SPE_RBASE));
830
831         chan->rx_pending = 0;
832
833         spin_unlock_irqrestore(&chan->rx_lock, flags);
834 }
835
836 static void qmc_chan_reset_tx(struct qmc_chan *chan)
837 {
838         struct qmc_xfer_desc *xfer_desc;
839         unsigned long flags;
840         cbd_t __iomem *bd;
841         u16 ctrl;
842
843         spin_lock_irqsave(&chan->tx_lock, flags);
844
845         /* Disable transmitter. It will be re-enable on qmc_chan_start() */
846         qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
847
848         bd = chan->txbds;
849         do {
850                 ctrl = qmc_read16(&bd->cbd_sc);
851                 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
852
853                 xfer_desc = &chan->tx_desc[bd - chan->txbds];
854                 xfer_desc->tx_complete = NULL;
855                 xfer_desc->context = NULL;
856
857                 bd++;
858         } while (!(ctrl & QMC_BD_TX_W));
859
860         chan->txbd_free = chan->txbds;
861         chan->txbd_done = chan->txbds;
862         qmc_write16(chan->s_param + QMC_SPE_TBPTR,
863                     qmc_read16(chan->s_param + QMC_SPE_TBASE));
864
865         /* Reset TSTATE and ZISTATE to their initial value */
866         qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
867         qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
868
869         spin_unlock_irqrestore(&chan->tx_lock, flags);
870 }
871
872 int qmc_chan_reset(struct qmc_chan *chan, int direction)
873 {
874         if (direction & QMC_CHAN_READ)
875                 qmc_chan_reset_rx(chan);
876
877         if (direction & QMC_CHAN_WRITE)
878                 qmc_chan_reset_tx(chan);
879
880         return 0;
881 }
882 EXPORT_SYMBOL(qmc_chan_reset);
883
884 static int qmc_check_chans(struct qmc *qmc)
885 {
886         struct tsa_serial_info info;
887         bool is_one_table = false;
888         struct qmc_chan *chan;
889         u64 tx_ts_mask = 0;
890         u64 rx_ts_mask = 0;
891         u64 tx_ts_assigned_mask;
892         u64 rx_ts_assigned_mask;
893         int ret;
894
895         /* Retrieve info from the TSA related serial */
896         ret = tsa_serial_get_info(qmc->tsa_serial, &info);
897         if (ret)
898                 return ret;
899
900         if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
901                 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
902                 return -EINVAL;
903         }
904
905         /*
906          * If more than 32 TS are assigned to this serial, one common table is
907          * used for Tx and Rx and so masks must be equal for all channels.
908          */
909         if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
910                 if (info.nb_tx_ts != info.nb_rx_ts) {
911                         dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
912                         return -EINVAL;
913                 }
914                 is_one_table = true;
915         }
916
917         tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
918         rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
919
920         list_for_each_entry(chan, &qmc->chan_head, list) {
921                 if (chan->tx_ts_mask > tx_ts_assigned_mask) {
922                         dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
923                         return -EINVAL;
924                 }
925                 if (tx_ts_mask & chan->tx_ts_mask) {
926                         dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
927                         return -EINVAL;
928                 }
929
930                 if (chan->rx_ts_mask > rx_ts_assigned_mask) {
931                         dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
932                         return -EINVAL;
933                 }
934                 if (rx_ts_mask & chan->rx_ts_mask) {
935                         dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
936                         return -EINVAL;
937                 }
938
939                 if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
940                         dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
941                         return -EINVAL;
942                 }
943
944                 tx_ts_mask |= chan->tx_ts_mask;
945                 rx_ts_mask |= chan->rx_ts_mask;
946         }
947
948         return 0;
949 }
950
951 static unsigned int qmc_nb_chans(struct qmc *qmc)
952 {
953         unsigned int count = 0;
954         struct qmc_chan *chan;
955
956         list_for_each_entry(chan, &qmc->chan_head, list)
957                 count++;
958
959         return count;
960 }
961
962 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
963 {
964         struct device_node *chan_np;
965         struct qmc_chan *chan;
966         const char *mode;
967         u32 chan_id;
968         u64 ts_mask;
969         int ret;
970
971         for_each_available_child_of_node(np, chan_np) {
972                 ret = of_property_read_u32(chan_np, "reg", &chan_id);
973                 if (ret) {
974                         dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
975                         of_node_put(chan_np);
976                         return ret;
977                 }
978                 if (chan_id > 63) {
979                         dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
980                         of_node_put(chan_np);
981                         return -EINVAL;
982                 }
983
984                 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
985                 if (!chan) {
986                         of_node_put(chan_np);
987                         return -ENOMEM;
988                 }
989
990                 chan->id = chan_id;
991                 spin_lock_init(&chan->rx_lock);
992                 spin_lock_init(&chan->tx_lock);
993
994                 ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
995                 if (ret) {
996                         dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
997                                 chan_np);
998                         of_node_put(chan_np);
999                         return ret;
1000                 }
1001                 chan->tx_ts_mask_avail = ts_mask;
1002                 chan->tx_ts_mask = chan->tx_ts_mask_avail;
1003
1004                 ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
1005                 if (ret) {
1006                         dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
1007                                 chan_np);
1008                         of_node_put(chan_np);
1009                         return ret;
1010                 }
1011                 chan->rx_ts_mask_avail = ts_mask;
1012                 chan->rx_ts_mask = chan->rx_ts_mask_avail;
1013
1014                 mode = "transparent";
1015                 ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
1016                 if (ret && ret != -EINVAL) {
1017                         dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
1018                                 chan_np);
1019                         of_node_put(chan_np);
1020                         return ret;
1021                 }
1022                 if (!strcmp(mode, "transparent")) {
1023                         chan->mode = QMC_TRANSPARENT;
1024                 } else if (!strcmp(mode, "hdlc")) {
1025                         chan->mode = QMC_HDLC;
1026                 } else {
1027                         dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
1028                                 chan_np, mode);
1029                         of_node_put(chan_np);
1030                         return -EINVAL;
1031                 }
1032
1033                 chan->is_reverse_data = of_property_read_bool(chan_np,
1034                                                               "fsl,reverse-data");
1035
1036                 list_add_tail(&chan->list, &qmc->chan_head);
1037                 qmc->chans[chan->id] = chan;
1038         }
1039
1040         return qmc_check_chans(qmc);
1041 }
1042
1043 static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
1044 {
1045         unsigned int i;
1046         u16 val;
1047
1048         /*
1049          * Use a common Tx/Rx 64 entries table.
1050          * Everything was previously checked, Tx and Rx related stuffs are
1051          * identical -> Used Rx related stuff to build the table
1052          */
1053
1054         /* Invalidate all entries */
1055         for (i = 0; i < 64; i++)
1056                 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1057
1058         /* Set Wrap bit on last entry */
1059         qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1060                       QMC_TSA_WRAP);
1061
1062         /* Init pointers to the table */
1063         val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1064         qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1065         qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1066         qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1067         qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1068
1069         return 0;
1070 }
1071
1072 static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
1073 {
1074         unsigned int i;
1075         u16 val;
1076
1077         /*
1078          * Use a Tx 32 entries table and a Rx 32 entries table.
1079          * Everything was previously checked.
1080          */
1081
1082         /* Invalidate all entries */
1083         for (i = 0; i < 32; i++) {
1084                 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1085                 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
1086         }
1087
1088         /* Set Wrap bit on last entries */
1089         qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1090                       QMC_TSA_WRAP);
1091         qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
1092                       QMC_TSA_WRAP);
1093
1094         /* Init Rx pointers ...*/
1095         val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1096         qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1097         qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1098
1099         /* ... and Tx pointers */
1100         val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
1101         qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1102         qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1103
1104         return 0;
1105 }
1106
1107 static int qmc_init_tsa(struct qmc *qmc)
1108 {
1109         struct tsa_serial_info info;
1110         int ret;
1111
1112         /* Retrieve info from the TSA related serial */
1113         ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1114         if (ret)
1115                 return ret;
1116
1117         /*
1118          * Initialize one common 64 entries table or two 32 entries (one for Tx
1119          * and one for Tx) according to assigned TS numbers.
1120          */
1121         return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1122                 qmc_init_tsa_64rxtx(qmc, &info) :
1123                 qmc_init_tsa_32rx_32tx(qmc, &info);
1124 }
1125
1126 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
1127 {
1128         struct tsa_serial_info info;
1129         u16 first_rx, last_tx;
1130         u16 trnsync;
1131         int ret;
1132
1133         /* Retrieve info from the TSA related serial */
1134         ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
1135         if (ret)
1136                 return ret;
1137
1138         /* Find the first Rx TS allocated to the channel */
1139         first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
1140
1141         /* Find the last Tx TS allocated to the channel */
1142         last_tx = fls64(chan->tx_ts_mask);
1143
1144         trnsync = 0;
1145         if (info.nb_rx_ts)
1146                 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
1147         if (info.nb_tx_ts)
1148                 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
1149
1150         qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
1151
1152         dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
1153                 chan->id, trnsync,
1154                 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1155                 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1156
1157         return 0;
1158 }
1159
1160 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1161 {
1162         unsigned int i;
1163         cbd_t __iomem *bd;
1164         int ret;
1165         u16 val;
1166
1167         chan->qmc = qmc;
1168
1169         ret = qmc_chan_setup_tsa(chan);
1170         if (ret)
1171                 return ret;
1172
1173         /* Set channel specific parameter base address */
1174         chan->s_param = qmc->dpram + (chan->id * 64);
1175         /* 16 bd per channel (8 rx and 8 tx) */
1176         chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1177         chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1178
1179         chan->txbd_free = chan->txbds;
1180         chan->txbd_done = chan->txbds;
1181         chan->rxbd_free = chan->rxbds;
1182         chan->rxbd_done = chan->rxbds;
1183
1184         /* TBASE and TBPTR*/
1185         val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1186         qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1187         qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1188
1189         /* RBASE and RBPTR*/
1190         val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1191         qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1192         qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1193         qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
1194         qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1195         qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
1196         if (chan->mode == QMC_TRANSPARENT) {
1197                 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1198                 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1199                 val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
1200                 if (chan->is_reverse_data)
1201                         val |= QMC_SPE_CHAMR_TRANSP_RD;
1202                 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1203                 ret = qmc_setup_chan_trnsync(qmc, chan);
1204                 if (ret)
1205                         return ret;
1206         } else {
1207                 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1208                 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1209                 qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1210                         QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1211         }
1212
1213         /* Do not enable interrupts now. They will be enabled later */
1214         qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1215
1216         /* Init Rx BDs and set Wrap bit on last descriptor */
1217         BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1218         val = QMC_BD_RX_I;
1219         for (i = 0; i < QMC_NB_RXBDS; i++) {
1220                 bd = chan->rxbds + i;
1221                 qmc_write16(&bd->cbd_sc, val);
1222         }
1223         bd = chan->rxbds + QMC_NB_RXBDS - 1;
1224         qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
1225
1226         /* Init Tx BDs and set Wrap bit on last descriptor */
1227         BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1228         val = QMC_BD_TX_I;
1229         if (chan->mode == QMC_HDLC)
1230                 val |= QMC_BD_TX_L | QMC_BD_TX_TC;
1231         for (i = 0; i < QMC_NB_TXBDS; i++) {
1232                 bd = chan->txbds + i;
1233                 qmc_write16(&bd->cbd_sc, val);
1234         }
1235         bd = chan->txbds + QMC_NB_TXBDS - 1;
1236         qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1237
1238         return 0;
1239 }
1240
1241 static int qmc_setup_chans(struct qmc *qmc)
1242 {
1243         struct qmc_chan *chan;
1244         int ret;
1245
1246         list_for_each_entry(chan, &qmc->chan_head, list) {
1247                 ret = qmc_setup_chan(qmc, chan);
1248                 if (ret)
1249                         return ret;
1250         }
1251
1252         return 0;
1253 }
1254
1255 static int qmc_finalize_chans(struct qmc *qmc)
1256 {
1257         struct qmc_chan *chan;
1258         int ret;
1259
1260         list_for_each_entry(chan, &qmc->chan_head, list) {
1261                 /* Unmask channel interrupts */
1262                 if (chan->mode == QMC_HDLC) {
1263                         qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1264                                     QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1265                                     QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1266                                     QMC_INT_TXB | QMC_INT_RXB);
1267                 } else {
1268                         qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1269                                     QMC_INT_UN | QMC_INT_BSY |
1270                                     QMC_INT_TXB | QMC_INT_RXB);
1271                 }
1272
1273                 /* Forced stop the channel */
1274                 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1275                 if (ret)
1276                         return ret;
1277         }
1278
1279         return 0;
1280 }
1281
1282 static int qmc_setup_ints(struct qmc *qmc)
1283 {
1284         unsigned int i;
1285         u16 __iomem *last;
1286
1287         /* Raz all entries */
1288         for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1289                 qmc_write16(qmc->int_table + i, 0x0000);
1290
1291         /* Set Wrap bit on last entry */
1292         if (qmc->int_size >= sizeof(u16)) {
1293                 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1294                 qmc_write16(last, QMC_INT_W);
1295         }
1296
1297         return 0;
1298 }
1299
1300 static void qmc_irq_gint(struct qmc *qmc)
1301 {
1302         struct qmc_chan *chan;
1303         unsigned int chan_id;
1304         unsigned long flags;
1305         u16 int_entry;
1306
1307         int_entry = qmc_read16(qmc->int_curr);
1308         while (int_entry & QMC_INT_V) {
1309                 /* Clear all but the Wrap bit */
1310                 qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1311
1312                 chan_id = QMC_INT_GET_CHANNEL(int_entry);
1313                 chan = qmc->chans[chan_id];
1314                 if (!chan) {
1315                         dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1316                         goto int_next;
1317                 }
1318
1319                 if (int_entry & QMC_INT_TXB)
1320                         qmc_chan_write_done(chan);
1321
1322                 if (int_entry & QMC_INT_UN) {
1323                         dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1324                                  int_entry);
1325                         chan->nb_tx_underrun++;
1326                 }
1327
1328                 if (int_entry & QMC_INT_BSY) {
1329                         dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1330                                  int_entry);
1331                         chan->nb_rx_busy++;
1332                         /* Restart the receiver if needed */
1333                         spin_lock_irqsave(&chan->rx_lock, flags);
1334                         if (chan->rx_pending && !chan->is_rx_stopped) {
1335                                 if (chan->mode == QMC_TRANSPARENT)
1336                                         qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1337                                 else
1338                                         qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1339                                 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1340                                 chan->is_rx_halted = false;
1341                         } else {
1342                                 chan->is_rx_halted = true;
1343                         }
1344                         spin_unlock_irqrestore(&chan->rx_lock, flags);
1345                 }
1346
1347                 if (int_entry & QMC_INT_RXB)
1348                         qmc_chan_read_done(chan);
1349
1350 int_next:
1351                 if (int_entry & QMC_INT_W)
1352                         qmc->int_curr = qmc->int_table;
1353                 else
1354                         qmc->int_curr++;
1355                 int_entry = qmc_read16(qmc->int_curr);
1356         }
1357 }
1358
1359 static irqreturn_t qmc_irq_handler(int irq, void *priv)
1360 {
1361         struct qmc *qmc = (struct qmc *)priv;
1362         u16 scce;
1363
1364         scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1365         qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1366
1367         if (unlikely(scce & SCC_SCCE_IQOV))
1368                 dev_info(qmc->dev, "IRQ queue overflow\n");
1369
1370         if (unlikely(scce & SCC_SCCE_GUN))
1371                 dev_err(qmc->dev, "Global transmitter underrun\n");
1372
1373         if (unlikely(scce & SCC_SCCE_GOV))
1374                 dev_err(qmc->dev, "Global receiver overrun\n");
1375
1376         /* normal interrupt */
1377         if (likely(scce & SCC_SCCE_GINT))
1378                 qmc_irq_gint(qmc);
1379
1380         return IRQ_HANDLED;
1381 }
1382
1383 static int qmc_probe(struct platform_device *pdev)
1384 {
1385         struct device_node *np = pdev->dev.of_node;
1386         unsigned int nb_chans;
1387         struct resource *res;
1388         struct qmc *qmc;
1389         int irq;
1390         int ret;
1391
1392         qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1393         if (!qmc)
1394                 return -ENOMEM;
1395
1396         qmc->dev = &pdev->dev;
1397         INIT_LIST_HEAD(&qmc->chan_head);
1398
1399         qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1400         if (IS_ERR(qmc->scc_regs))
1401                 return PTR_ERR(qmc->scc_regs);
1402
1403
1404         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1405         if (!res)
1406                 return -EINVAL;
1407         qmc->scc_pram_offset = res->start - get_immrbase();
1408         qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1409         if (IS_ERR(qmc->scc_pram))
1410                 return PTR_ERR(qmc->scc_pram);
1411
1412         qmc->dpram  = devm_platform_ioremap_resource_byname(pdev, "dpram");
1413         if (IS_ERR(qmc->dpram))
1414                 return PTR_ERR(qmc->dpram);
1415
1416         qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1417         if (IS_ERR(qmc->tsa_serial)) {
1418                 return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1419                                      "Failed to get TSA serial\n");
1420         }
1421
1422         /* Connect the serial (SCC) to TSA */
1423         ret = tsa_serial_connect(qmc->tsa_serial);
1424         if (ret) {
1425                 dev_err(qmc->dev, "Failed to connect TSA serial\n");
1426                 return ret;
1427         }
1428
1429         /* Parse channels informationss */
1430         ret = qmc_of_parse_chans(qmc, np);
1431         if (ret)
1432                 goto err_tsa_serial_disconnect;
1433
1434         nb_chans = qmc_nb_chans(qmc);
1435
1436         /* Init GMSR_H and GMSR_L registers */
1437         qmc_write32(qmc->scc_regs + SCC_GSMRH,
1438                     SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
1439
1440         /* enable QMC mode */
1441         qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
1442
1443         /*
1444          * Allocate the buffer descriptor table
1445          * 8 rx and 8 tx descriptors per channel
1446          */
1447         qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1448         qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1449                 &qmc->bd_dma_addr, GFP_KERNEL);
1450         if (!qmc->bd_table) {
1451                 dev_err(qmc->dev, "Failed to allocate bd table\n");
1452                 ret = -ENOMEM;
1453                 goto err_tsa_serial_disconnect;
1454         }
1455         memset(qmc->bd_table, 0, qmc->bd_size);
1456
1457         qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1458
1459         /* Allocate the interrupt table */
1460         qmc->int_size = QMC_NB_INTS * sizeof(u16);
1461         qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1462                 &qmc->int_dma_addr, GFP_KERNEL);
1463         if (!qmc->int_table) {
1464                 dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1465                 ret = -ENOMEM;
1466                 goto err_tsa_serial_disconnect;
1467         }
1468         memset(qmc->int_table, 0, qmc->int_size);
1469
1470         qmc->int_curr = qmc->int_table;
1471         qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1472         qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1473
1474         /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1475         qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1476
1477         qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1478         qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1479
1480         qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1481         qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1482
1483         ret = qmc_init_tsa(qmc);
1484         if (ret)
1485                 goto err_tsa_serial_disconnect;
1486
1487         qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
1488
1489         ret = qmc_setup_chans(qmc);
1490         if (ret)
1491                 goto err_tsa_serial_disconnect;
1492
1493         /* Init interrupts table */
1494         ret = qmc_setup_ints(qmc);
1495         if (ret)
1496                 goto err_tsa_serial_disconnect;
1497
1498         /* Disable and clear interrupts,  set the irq handler */
1499         qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1500         qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1501         irq = platform_get_irq(pdev, 0);
1502         if (irq < 0)
1503                 goto err_tsa_serial_disconnect;
1504         ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
1505         if (ret < 0)
1506                 goto err_tsa_serial_disconnect;
1507
1508         /* Enable interrupts */
1509         qmc_write16(qmc->scc_regs + SCC_SCCM,
1510                 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
1511
1512         ret = qmc_finalize_chans(qmc);
1513         if (ret < 0)
1514                 goto err_disable_intr;
1515
1516         /* Enable transmiter and receiver */
1517         qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1518
1519         platform_set_drvdata(pdev, qmc);
1520
1521         /* Populate channel related devices */
1522         ret = devm_of_platform_populate(qmc->dev);
1523         if (ret)
1524                 goto err_disable_txrx;
1525
1526         return 0;
1527
1528 err_disable_txrx:
1529         qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
1530
1531 err_disable_intr:
1532         qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1533
1534 err_tsa_serial_disconnect:
1535         tsa_serial_disconnect(qmc->tsa_serial);
1536         return ret;
1537 }
1538
1539 static void qmc_remove(struct platform_device *pdev)
1540 {
1541         struct qmc *qmc = platform_get_drvdata(pdev);
1542
1543         /* Disable transmiter and receiver */
1544         qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
1545
1546         /* Disable interrupts */
1547         qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1548
1549         /* Disconnect the serial from TSA */
1550         tsa_serial_disconnect(qmc->tsa_serial);
1551 }
1552
1553 static const struct of_device_id qmc_id_table[] = {
1554         { .compatible = "fsl,cpm1-scc-qmc" },
1555         {} /* sentinel */
1556 };
1557 MODULE_DEVICE_TABLE(of, qmc_id_table);
1558
1559 static struct platform_driver qmc_driver = {
1560         .driver = {
1561                 .name = "fsl-qmc",
1562                 .of_match_table = of_match_ptr(qmc_id_table),
1563         },
1564         .probe = qmc_probe,
1565         .remove_new = qmc_remove,
1566 };
1567 module_platform_driver(qmc_driver);
1568
1569 static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
1570 {
1571         struct platform_device *pdev;
1572         struct qmc_chan *qmc_chan;
1573         struct qmc *qmc;
1574
1575         if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
1576                 return ERR_PTR(-EINVAL);
1577
1578         pdev = of_find_device_by_node(qmc_np);
1579         if (!pdev)
1580                 return ERR_PTR(-ENODEV);
1581
1582         qmc = platform_get_drvdata(pdev);
1583         if (!qmc) {
1584                 platform_device_put(pdev);
1585                 return ERR_PTR(-EPROBE_DEFER);
1586         }
1587
1588         if (chan_index >= ARRAY_SIZE(qmc->chans)) {
1589                 platform_device_put(pdev);
1590                 return ERR_PTR(-EINVAL);
1591         }
1592
1593         qmc_chan = qmc->chans[chan_index];
1594         if (!qmc_chan) {
1595                 platform_device_put(pdev);
1596                 return ERR_PTR(-ENOENT);
1597         }
1598
1599         return qmc_chan;
1600 }
1601
1602 struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
1603 {
1604         struct of_phandle_args out_args;
1605         struct qmc_chan *qmc_chan;
1606         int ret;
1607
1608         ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
1609                                                &out_args);
1610         if (ret < 0)
1611                 return ERR_PTR(ret);
1612
1613         if (out_args.args_count != 1) {
1614                 of_node_put(out_args.np);
1615                 return ERR_PTR(-EINVAL);
1616         }
1617
1618         qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
1619         of_node_put(out_args.np);
1620         return qmc_chan;
1621 }
1622 EXPORT_SYMBOL(qmc_chan_get_byphandle);
1623
1624 struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
1625 {
1626         struct device_node *qmc_np;
1627         u32 chan_index;
1628         int ret;
1629
1630         qmc_np = np->parent;
1631         ret = of_property_read_u32(np, "reg", &chan_index);
1632         if (ret)
1633                 return ERR_PTR(-EINVAL);
1634
1635         return qmc_chan_get_from_qmc(qmc_np, chan_index);
1636 }
1637 EXPORT_SYMBOL(qmc_chan_get_bychild);
1638
1639 void qmc_chan_put(struct qmc_chan *chan)
1640 {
1641         put_device(chan->qmc->dev);
1642 }
1643 EXPORT_SYMBOL(qmc_chan_put);
1644
1645 static void devm_qmc_chan_release(struct device *dev, void *res)
1646 {
1647         struct qmc_chan **qmc_chan = res;
1648
1649         qmc_chan_put(*qmc_chan);
1650 }
1651
1652 struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
1653                                              struct device_node *np,
1654                                              const char *phandle_name)
1655 {
1656         struct qmc_chan *qmc_chan;
1657         struct qmc_chan **dr;
1658
1659         dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1660         if (!dr)
1661                 return ERR_PTR(-ENOMEM);
1662
1663         qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
1664         if (!IS_ERR(qmc_chan)) {
1665                 *dr = qmc_chan;
1666                 devres_add(dev, dr);
1667         } else {
1668                 devres_free(dr);
1669         }
1670
1671         return qmc_chan;
1672 }
1673 EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
1674
1675 struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
1676                                            struct device_node *np)
1677 {
1678         struct qmc_chan *qmc_chan;
1679         struct qmc_chan **dr;
1680
1681         dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1682         if (!dr)
1683                 return ERR_PTR(-ENOMEM);
1684
1685         qmc_chan = qmc_chan_get_bychild(np);
1686         if (!IS_ERR(qmc_chan)) {
1687                 *dr = qmc_chan;
1688                 devres_add(dev, dr);
1689         } else {
1690                 devres_free(dr);
1691         }
1692
1693         return qmc_chan;
1694 }
1695 EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
1696
1697 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1698 MODULE_DESCRIPTION("CPM QMC driver");
1699 MODULE_LICENSE("GPL");