1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/idr.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/mhi.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/wait.h>
23 #define CREATE_TRACE_POINTS
26 static DEFINE_IDA(mhi_controller_ida);
31 #define mhi_ee(a, b) [MHI_EE_##a] = b,
32 #define mhi_ee_end(a, b) [MHI_EE_##a] = b,
34 const char * const mhi_ee_str[MHI_EE_MAX] = {
39 #undef dev_st_trans_end
41 #define dev_st_trans(a, b) [DEV_ST_TRANSITION_##a] = b,
42 #define dev_st_trans_end(a, b) [DEV_ST_TRANSITION_##a] = b,
44 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
45 DEV_ST_TRANSITION_LIST
49 #undef ch_state_type_end
51 #define ch_state_type(a, b) [MHI_CH_STATE_TYPE_##a] = b,
52 #define ch_state_type_end(a, b) [MHI_CH_STATE_TYPE_##a] = b,
54 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
55 MHI_CH_STATE_TYPE_LIST
59 #undef mhi_pm_state_end
61 #define mhi_pm_state(a, b) [MHI_PM_STATE_##a] = b,
62 #define mhi_pm_state_end(a, b) [MHI_PM_STATE_##a] = b,
64 static const char * const mhi_pm_state_str[] = {
68 const char *to_mhi_pm_state_str(u32 state)
75 if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
76 return "Invalid State";
78 return mhi_pm_state_str[index];
81 static ssize_t serial_number_show(struct device *dev,
82 struct device_attribute *attr,
85 struct mhi_device *mhi_dev = to_mhi_device(dev);
86 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
88 return sysfs_emit(buf, "Serial Number: %u\n",
89 mhi_cntrl->serial_number);
91 static DEVICE_ATTR_RO(serial_number);
93 static ssize_t oem_pk_hash_show(struct device *dev,
94 struct device_attribute *attr,
97 struct mhi_device *mhi_dev = to_mhi_device(dev);
98 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
99 u32 hash_segment[MHI_MAX_OEM_PK_HASH_SEGMENTS];
102 for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) {
103 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]);
105 dev_err(dev, "Could not capture OEM PK HASH\n");
110 for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++)
111 cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", i, hash_segment[i]);
115 static DEVICE_ATTR_RO(oem_pk_hash);
117 static ssize_t soc_reset_store(struct device *dev,
118 struct device_attribute *attr,
122 struct mhi_device *mhi_dev = to_mhi_device(dev);
123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
125 mhi_soc_reset(mhi_cntrl);
128 static DEVICE_ATTR_WO(soc_reset);
130 static struct attribute *mhi_dev_attrs[] = {
131 &dev_attr_serial_number.attr,
132 &dev_attr_oem_pk_hash.attr,
133 &dev_attr_soc_reset.attr,
136 ATTRIBUTE_GROUPS(mhi_dev);
138 /* MHI protocol requires the transfer ring to be aligned with ring length */
139 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
140 struct mhi_ring *ring,
143 ring->alloc_size = len + (len - 1);
144 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
145 &ring->dma_handle, GFP_KERNEL);
146 if (!ring->pre_aligned)
149 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
150 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
155 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
158 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
160 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
161 if (mhi_event->offload_ev)
164 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
167 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
170 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
172 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
173 struct device *dev = &mhi_cntrl->mhi_dev->dev;
174 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
177 /* if controller driver has set irq_flags, use it */
178 if (mhi_cntrl->irq_flags)
179 irq_flags = mhi_cntrl->irq_flags;
181 /* Setup BHI_INTVEC IRQ */
182 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
183 mhi_intvec_threaded_handler,
189 * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
190 * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
191 * IRQ_NOAUTOEN is not applicable.
193 disable_irq(mhi_cntrl->irq[0]);
195 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
196 if (mhi_event->offload_ev)
199 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
200 dev_err(dev, "irq %d not available for event ring\n",
206 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
211 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
212 mhi_cntrl->irq[mhi_event->irq], i);
216 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
222 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
223 if (mhi_event->offload_ev)
226 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
228 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
233 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
236 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
237 struct mhi_cmd *mhi_cmd;
238 struct mhi_event *mhi_event;
239 struct mhi_ring *ring;
241 mhi_cmd = mhi_cntrl->mhi_cmd;
242 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
243 ring = &mhi_cmd->ring;
244 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
245 ring->pre_aligned, ring->dma_handle);
247 ring->iommu_base = 0;
250 dma_free_coherent(mhi_cntrl->cntrl_dev,
251 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
252 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
254 mhi_event = mhi_cntrl->mhi_event;
255 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
256 if (mhi_event->offload_ev)
259 ring = &mhi_event->ring;
260 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
261 ring->pre_aligned, ring->dma_handle);
263 ring->iommu_base = 0;
266 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
267 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
268 mhi_ctxt->er_ctxt_addr);
270 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
271 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
272 mhi_ctxt->chan_ctxt_addr);
275 mhi_cntrl->mhi_ctxt = NULL;
278 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
280 struct mhi_ctxt *mhi_ctxt;
281 struct mhi_chan_ctxt *chan_ctxt;
282 struct mhi_event_ctxt *er_ctxt;
283 struct mhi_cmd_ctxt *cmd_ctxt;
284 struct mhi_chan *mhi_chan;
285 struct mhi_event *mhi_event;
286 struct mhi_cmd *mhi_cmd;
288 int ret = -ENOMEM, i;
290 atomic_set(&mhi_cntrl->dev_wake, 0);
291 atomic_set(&mhi_cntrl->pending_pkts, 0);
293 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
297 /* Setup channel ctxt */
298 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
299 sizeof(*mhi_ctxt->chan_ctxt) *
301 &mhi_ctxt->chan_ctxt_addr,
303 if (!mhi_ctxt->chan_ctxt)
304 goto error_alloc_chan_ctxt;
306 mhi_chan = mhi_cntrl->mhi_chan;
307 chan_ctxt = mhi_ctxt->chan_ctxt;
308 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
309 /* Skip if it is an offload channel */
310 if (mhi_chan->offload_ch)
313 tmp = le32_to_cpu(chan_ctxt->chcfg);
314 tmp &= ~CHAN_CTX_CHSTATE_MASK;
315 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
316 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
317 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
318 tmp &= ~CHAN_CTX_POLLCFG_MASK;
319 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
320 chan_ctxt->chcfg = cpu_to_le32(tmp);
322 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
323 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
325 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
326 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
329 /* Setup event context */
330 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
331 sizeof(*mhi_ctxt->er_ctxt) *
332 mhi_cntrl->total_ev_rings,
333 &mhi_ctxt->er_ctxt_addr,
335 if (!mhi_ctxt->er_ctxt)
336 goto error_alloc_er_ctxt;
338 er_ctxt = mhi_ctxt->er_ctxt;
339 mhi_event = mhi_cntrl->mhi_event;
340 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
342 struct mhi_ring *ring = &mhi_event->ring;
344 /* Skip if it is an offload event */
345 if (mhi_event->offload_ev)
348 tmp = le32_to_cpu(er_ctxt->intmod);
349 tmp &= ~EV_CTX_INTMODC_MASK;
350 tmp &= ~EV_CTX_INTMODT_MASK;
351 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
352 er_ctxt->intmod = cpu_to_le32(tmp);
354 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
355 er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
356 mhi_event->db_cfg.db_mode = true;
358 ring->el_size = sizeof(struct mhi_ring_element);
359 ring->len = ring->el_size * ring->elements;
360 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
365 * If the read pointer equals to the write pointer, then the
368 ring->rp = ring->wp = ring->base;
369 er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
370 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
371 er_ctxt->rlen = cpu_to_le64(ring->len);
372 ring->ctxt_wp = &er_ctxt->wp;
375 /* Setup cmd context */
377 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
378 sizeof(*mhi_ctxt->cmd_ctxt) *
380 &mhi_ctxt->cmd_ctxt_addr,
382 if (!mhi_ctxt->cmd_ctxt)
385 mhi_cmd = mhi_cntrl->mhi_cmd;
386 cmd_ctxt = mhi_ctxt->cmd_ctxt;
387 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
388 struct mhi_ring *ring = &mhi_cmd->ring;
390 ring->el_size = sizeof(struct mhi_ring_element);
391 ring->elements = CMD_EL_PER_RING;
392 ring->len = ring->el_size * ring->elements;
393 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
395 goto error_alloc_cmd;
397 ring->rp = ring->wp = ring->base;
398 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
399 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
400 cmd_ctxt->rlen = cpu_to_le64(ring->len);
401 ring->ctxt_wp = &cmd_ctxt->wp;
404 mhi_cntrl->mhi_ctxt = mhi_ctxt;
409 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
410 struct mhi_ring *ring = &mhi_cmd->ring;
412 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
413 ring->pre_aligned, ring->dma_handle);
415 dma_free_coherent(mhi_cntrl->cntrl_dev,
416 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
417 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
418 i = mhi_cntrl->total_ev_rings;
419 mhi_event = mhi_cntrl->mhi_event + i;
422 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
423 struct mhi_ring *ring = &mhi_event->ring;
425 if (mhi_event->offload_ev)
428 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
429 ring->pre_aligned, ring->dma_handle);
431 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
432 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
433 mhi_ctxt->er_ctxt_addr);
436 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
437 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
438 mhi_ctxt->chan_ctxt_addr);
440 error_alloc_chan_ctxt:
446 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
450 struct mhi_chan *mhi_chan;
451 struct mhi_event *mhi_event;
452 void __iomem *base = mhi_cntrl->regs;
453 struct device *dev = &mhi_cntrl->mhi_dev->dev;
460 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
464 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
468 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
472 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
476 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
480 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
484 upper_32_bits(mhi_cntrl->iova_start),
488 lower_32_bits(mhi_cntrl->iova_start),
492 upper_32_bits(mhi_cntrl->iova_start),
496 lower_32_bits(mhi_cntrl->iova_start),
500 upper_32_bits(mhi_cntrl->iova_stop),
504 lower_32_bits(mhi_cntrl->iova_stop),
508 upper_32_bits(mhi_cntrl->iova_stop),
512 lower_32_bits(mhi_cntrl->iova_stop),
517 dev_dbg(dev, "Initializing MHI registers\n");
519 /* Read channel db offset */
520 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
522 dev_err(dev, "Unable to read CHDBOFF register\n");
526 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
527 dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
528 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
533 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
534 mhi_cntrl->wake_set = false;
536 /* Setup channel db address for each channel in tre_ring */
537 mhi_chan = mhi_cntrl->mhi_chan;
538 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
539 mhi_chan->tre_ring.db_addr = base + val;
541 /* Read event ring db offset */
542 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
544 dev_err(dev, "Unable to read ERDBOFF register\n");
548 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
549 dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
550 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
554 /* Setup event db address for each ev_ring */
555 mhi_event = mhi_cntrl->mhi_event;
556 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
557 if (mhi_event->offload_ev)
560 mhi_event->ring.db_addr = base + val;
563 /* Setup DB register for primary CMD rings */
564 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
566 /* Write to MMIO registers */
567 for (i = 0; reg_info[i].offset; i++)
568 mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
571 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
572 mhi_cntrl->total_ev_rings);
574 dev_err(dev, "Unable to write MHICFG register\n");
578 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
579 mhi_cntrl->hw_ev_rings);
581 dev_err(dev, "Unable to write MHICFG register\n");
588 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
589 struct mhi_chan *mhi_chan)
591 struct mhi_ring *buf_ring;
592 struct mhi_ring *tre_ring;
593 struct mhi_chan_ctxt *chan_ctxt;
596 buf_ring = &mhi_chan->buf_ring;
597 tre_ring = &mhi_chan->tre_ring;
598 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
600 if (!chan_ctxt->rbase) /* Already uninitialized */
603 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
604 tre_ring->pre_aligned, tre_ring->dma_handle);
605 vfree(buf_ring->base);
607 buf_ring->base = tre_ring->base = NULL;
608 tre_ring->ctxt_wp = NULL;
609 chan_ctxt->rbase = 0;
614 tmp = le32_to_cpu(chan_ctxt->chcfg);
615 tmp &= ~CHAN_CTX_CHSTATE_MASK;
616 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
617 chan_ctxt->chcfg = cpu_to_le32(tmp);
619 /* Update to all cores */
623 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
624 struct mhi_chan *mhi_chan)
626 struct mhi_ring *buf_ring;
627 struct mhi_ring *tre_ring;
628 struct mhi_chan_ctxt *chan_ctxt;
632 buf_ring = &mhi_chan->buf_ring;
633 tre_ring = &mhi_chan->tre_ring;
634 tre_ring->el_size = sizeof(struct mhi_ring_element);
635 tre_ring->len = tre_ring->el_size * tre_ring->elements;
636 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
637 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
641 buf_ring->el_size = sizeof(struct mhi_buf_info);
642 buf_ring->len = buf_ring->el_size * buf_ring->elements;
643 buf_ring->base = vzalloc(buf_ring->len);
645 if (!buf_ring->base) {
646 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
647 tre_ring->pre_aligned, tre_ring->dma_handle);
651 tmp = le32_to_cpu(chan_ctxt->chcfg);
652 tmp &= ~CHAN_CTX_CHSTATE_MASK;
653 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
654 chan_ctxt->chcfg = cpu_to_le32(tmp);
656 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
657 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
658 chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
659 tre_ring->ctxt_wp = &chan_ctxt->wp;
661 tre_ring->rp = tre_ring->wp = tre_ring->base;
662 buf_ring->rp = buf_ring->wp = buf_ring->base;
663 mhi_chan->db_cfg.db_mode = 1;
665 /* Update to all cores */
671 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
672 const struct mhi_controller_config *config)
674 struct mhi_event *mhi_event;
675 const struct mhi_event_config *event_cfg;
676 struct device *dev = mhi_cntrl->cntrl_dev;
679 num = config->num_events;
680 mhi_cntrl->total_ev_rings = num;
681 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
683 if (!mhi_cntrl->mhi_event)
686 /* Populate event ring */
687 mhi_event = mhi_cntrl->mhi_event;
688 for (i = 0; i < num; i++) {
689 event_cfg = &config->event_cfg[i];
691 mhi_event->er_index = i;
692 mhi_event->ring.elements = event_cfg->num_elements;
693 mhi_event->intmod = event_cfg->irq_moderation_ms;
694 mhi_event->irq = event_cfg->irq;
696 if (event_cfg->channel != U32_MAX) {
697 /* This event ring has a dedicated channel */
698 mhi_event->chan = event_cfg->channel;
699 if (mhi_event->chan >= mhi_cntrl->max_chan) {
701 "Event Ring channel not available\n");
705 mhi_event->mhi_chan =
706 &mhi_cntrl->mhi_chan[mhi_event->chan];
709 /* Priority is fixed to 1 for now */
710 mhi_event->priority = 1;
712 mhi_event->db_cfg.brstmode = event_cfg->mode;
713 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
716 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
717 mhi_event->db_cfg.process_db = mhi_db_brstmode;
719 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
721 mhi_event->data_type = event_cfg->data_type;
723 switch (mhi_event->data_type) {
725 mhi_event->process_event = mhi_process_data_event_ring;
728 mhi_event->process_event = mhi_process_ctrl_ev_ring;
731 dev_err(dev, "Event Ring type not supported\n");
735 mhi_event->hw_ring = event_cfg->hardware_event;
736 if (mhi_event->hw_ring)
737 mhi_cntrl->hw_ev_rings++;
739 mhi_cntrl->sw_ev_rings++;
741 mhi_event->cl_manage = event_cfg->client_managed;
742 mhi_event->offload_ev = event_cfg->offload_channel;
750 kfree(mhi_cntrl->mhi_event);
754 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
755 const struct mhi_controller_config *config)
757 const struct mhi_channel_config *ch_cfg;
758 struct device *dev = mhi_cntrl->cntrl_dev;
762 mhi_cntrl->max_chan = config->max_channels;
765 * The allocation of MHI channels can exceed 32KB in some scenarios,
766 * so to avoid any memory possible allocation failures, vzalloc is
769 mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
770 sizeof(*mhi_cntrl->mhi_chan));
771 if (!mhi_cntrl->mhi_chan)
774 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
776 /* Populate channel configurations */
777 for (i = 0; i < config->num_channels; i++) {
778 struct mhi_chan *mhi_chan;
780 ch_cfg = &config->ch_cfg[i];
783 if (chan >= mhi_cntrl->max_chan) {
784 dev_err(dev, "Channel %d not available\n", chan);
788 mhi_chan = &mhi_cntrl->mhi_chan[chan];
789 mhi_chan->name = ch_cfg->name;
790 mhi_chan->chan = chan;
792 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
793 if (!mhi_chan->tre_ring.elements)
797 * For some channels, local ring length should be bigger than
798 * the transfer ring length due to internal logical channels
799 * in device. So host can queue much more buffers than transfer
800 * ring length. Example, RSC channels should have a larger local
801 * channel length than transfer ring length.
803 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
804 if (!mhi_chan->buf_ring.elements)
805 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
806 mhi_chan->er_index = ch_cfg->event_ring;
807 mhi_chan->dir = ch_cfg->dir;
810 * For most channels, chtype is identical to channel directions.
811 * So, if it is not defined then assign channel direction to
814 mhi_chan->type = ch_cfg->type;
816 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
818 mhi_chan->ee_mask = ch_cfg->ee_mask;
819 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
820 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
821 mhi_chan->offload_ch = ch_cfg->offload_channel;
822 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
823 mhi_chan->pre_alloc = ch_cfg->auto_queue;
824 mhi_chan->wake_capable = ch_cfg->wake_capable;
827 * If MHI host allocates buffers, then the channel direction
828 * should be DMA_FROM_DEVICE
830 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
831 dev_err(dev, "Invalid channel configuration\n");
836 * Bi-directional and direction less channel must be an
839 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
840 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
841 dev_err(dev, "Invalid channel configuration\n");
845 if (!mhi_chan->offload_ch) {
846 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
847 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
848 dev_err(dev, "Invalid Door bell mode\n");
853 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
854 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
856 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
858 mhi_chan->configured = true;
860 if (mhi_chan->lpm_notify)
861 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
867 vfree(mhi_cntrl->mhi_chan);
872 static int parse_config(struct mhi_controller *mhi_cntrl,
873 const struct mhi_controller_config *config)
877 /* Parse MHI channel configuration */
878 ret = parse_ch_cfg(mhi_cntrl, config);
882 /* Parse MHI event configuration */
883 ret = parse_ev_cfg(mhi_cntrl, config);
887 mhi_cntrl->timeout_ms = config->timeout_ms;
888 if (!mhi_cntrl->timeout_ms)
889 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
891 mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms;
892 mhi_cntrl->bounce_buf = config->use_bounce_buf;
893 mhi_cntrl->buffer_len = config->buf_len;
894 if (!mhi_cntrl->buffer_len)
895 mhi_cntrl->buffer_len = MHI_MAX_MTU;
897 /* By default, host is allowed to ring DB in both M0 and M2 states */
898 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
899 if (config->m2_no_db)
900 mhi_cntrl->db_access &= ~MHI_PM_M2;
905 vfree(mhi_cntrl->mhi_chan);
910 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
911 const struct mhi_controller_config *config)
913 struct mhi_event *mhi_event;
914 struct mhi_chan *mhi_chan;
915 struct mhi_cmd *mhi_cmd;
916 struct mhi_device *mhi_dev;
919 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
920 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
921 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
922 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
923 !mhi_cntrl->irq || !mhi_cntrl->reg_len)
926 ret = parse_config(mhi_cntrl, config);
930 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
931 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
932 if (!mhi_cntrl->mhi_cmd) {
937 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
938 mutex_init(&mhi_cntrl->pm_mutex);
939 rwlock_init(&mhi_cntrl->pm_lock);
940 spin_lock_init(&mhi_cntrl->transition_lock);
941 spin_lock_init(&mhi_cntrl->wlock);
942 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
943 init_waitqueue_head(&mhi_cntrl->state_event);
945 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
946 if (!mhi_cntrl->hiprio_wq) {
947 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
952 mhi_cmd = mhi_cntrl->mhi_cmd;
953 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
954 spin_lock_init(&mhi_cmd->lock);
956 mhi_event = mhi_cntrl->mhi_event;
957 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
958 /* Skip for offload events */
959 if (mhi_event->offload_ev)
962 mhi_event->mhi_cntrl = mhi_cntrl;
963 spin_lock_init(&mhi_event->lock);
964 if (mhi_event->data_type == MHI_ER_CTRL)
965 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
968 tasklet_init(&mhi_event->task, mhi_ev_task,
972 mhi_chan = mhi_cntrl->mhi_chan;
973 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
974 mutex_init(&mhi_chan->mutex);
975 init_completion(&mhi_chan->completion);
976 rwlock_init(&mhi_chan->lock);
978 /* used in setting bei field of TRE */
979 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
980 mhi_chan->intmod = mhi_event->intmod;
983 if (mhi_cntrl->bounce_buf) {
984 mhi_cntrl->map_single = mhi_map_single_use_bb;
985 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
987 mhi_cntrl->map_single = mhi_map_single_no_bb;
988 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
991 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
992 if (mhi_cntrl->index < 0) {
993 ret = mhi_cntrl->index;
997 ret = mhi_init_irq_setup(mhi_cntrl);
1001 /* Register controller with MHI bus */
1002 mhi_dev = mhi_alloc_device(mhi_cntrl);
1003 if (IS_ERR(mhi_dev)) {
1004 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
1005 ret = PTR_ERR(mhi_dev);
1006 goto error_setup_irq;
1009 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
1010 mhi_dev->mhi_cntrl = mhi_cntrl;
1011 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
1012 mhi_dev->name = dev_name(&mhi_dev->dev);
1014 /* Init wakeup source */
1015 device_init_wakeup(&mhi_dev->dev, true);
1017 ret = device_add(&mhi_dev->dev);
1019 goto err_release_dev;
1021 mhi_cntrl->mhi_dev = mhi_dev;
1023 mhi_create_debugfs(mhi_cntrl);
1028 put_device(&mhi_dev->dev);
1030 mhi_deinit_free_irq(mhi_cntrl);
1032 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1034 destroy_workqueue(mhi_cntrl->hiprio_wq);
1036 kfree(mhi_cntrl->mhi_cmd);
1038 kfree(mhi_cntrl->mhi_event);
1039 vfree(mhi_cntrl->mhi_chan);
1043 EXPORT_SYMBOL_GPL(mhi_register_controller);
1045 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1047 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1048 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1051 mhi_deinit_free_irq(mhi_cntrl);
1052 mhi_destroy_debugfs(mhi_cntrl);
1054 destroy_workqueue(mhi_cntrl->hiprio_wq);
1055 kfree(mhi_cntrl->mhi_cmd);
1056 kfree(mhi_cntrl->mhi_event);
1058 /* Drop the references to MHI devices created for channels */
1059 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1060 if (!mhi_chan->mhi_dev)
1063 put_device(&mhi_chan->mhi_dev->dev);
1065 vfree(mhi_cntrl->mhi_chan);
1067 device_del(&mhi_dev->dev);
1068 put_device(&mhi_dev->dev);
1070 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1072 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1074 struct mhi_controller *mhi_alloc_controller(void)
1076 struct mhi_controller *mhi_cntrl;
1078 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1082 EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1084 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1088 EXPORT_SYMBOL_GPL(mhi_free_controller);
1090 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1092 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1093 u32 bhi_off, bhie_off;
1096 mutex_lock(&mhi_cntrl->pm_mutex);
1098 ret = mhi_init_dev_ctxt(mhi_cntrl);
1100 goto error_dev_ctxt;
1102 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
1104 dev_err(dev, "Error getting BHI offset\n");
1105 goto error_reg_offset;
1108 if (bhi_off >= mhi_cntrl->reg_len) {
1109 dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
1110 bhi_off, mhi_cntrl->reg_len);
1112 goto error_reg_offset;
1114 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
1116 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
1117 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1120 dev_err(dev, "Error getting BHIE offset\n");
1121 goto error_reg_offset;
1124 if (bhie_off >= mhi_cntrl->reg_len) {
1126 "BHIe offset: 0x%x is out of range: 0x%zx\n",
1127 bhie_off, mhi_cntrl->reg_len);
1129 goto error_reg_offset;
1131 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1134 if (mhi_cntrl->rddm_size) {
1136 * This controller supports RDDM, so we need to manually clear
1137 * BHIE RX registers since POR values are undefined.
1139 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1140 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1143 * Allocate RDDM table for debugging purpose if specified
1145 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1146 mhi_cntrl->rddm_size);
1147 if (mhi_cntrl->rddm_image) {
1148 ret = mhi_rddm_prepare(mhi_cntrl,
1149 mhi_cntrl->rddm_image);
1151 mhi_free_bhie_table(mhi_cntrl,
1152 mhi_cntrl->rddm_image);
1153 goto error_reg_offset;
1158 mutex_unlock(&mhi_cntrl->pm_mutex);
1163 mhi_deinit_dev_ctxt(mhi_cntrl);
1166 mutex_unlock(&mhi_cntrl->pm_mutex);
1170 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1172 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1174 if (mhi_cntrl->fbc_image) {
1175 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1176 mhi_cntrl->fbc_image = NULL;
1179 if (mhi_cntrl->rddm_image) {
1180 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1181 mhi_cntrl->rddm_image = NULL;
1184 mhi_cntrl->bhi = NULL;
1185 mhi_cntrl->bhie = NULL;
1187 mhi_deinit_dev_ctxt(mhi_cntrl);
1189 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1191 static void mhi_release_device(struct device *dev)
1193 struct mhi_device *mhi_dev = to_mhi_device(dev);
1196 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1197 * devices for the channels will only get created if the mhi_dev
1198 * associated with it is NULL. This scenario will happen during the
1199 * controller suspend and resume.
1201 if (mhi_dev->ul_chan)
1202 mhi_dev->ul_chan->mhi_dev = NULL;
1204 if (mhi_dev->dl_chan)
1205 mhi_dev->dl_chan->mhi_dev = NULL;
1210 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1212 struct mhi_device *mhi_dev;
1215 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1217 return ERR_PTR(-ENOMEM);
1219 dev = &mhi_dev->dev;
1220 device_initialize(dev);
1221 dev->bus = &mhi_bus_type;
1222 dev->release = mhi_release_device;
1224 if (mhi_cntrl->mhi_dev) {
1225 /* for MHI client devices, parent is the MHI controller device */
1226 dev->parent = &mhi_cntrl->mhi_dev->dev;
1228 /* for MHI controller device, parent is the bus device (e.g. pci device) */
1229 dev->parent = mhi_cntrl->cntrl_dev;
1232 mhi_dev->mhi_cntrl = mhi_cntrl;
1233 mhi_dev->dev_wake = 0;
1238 static int mhi_driver_probe(struct device *dev)
1240 struct mhi_device *mhi_dev = to_mhi_device(dev);
1241 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1242 struct device_driver *drv = dev->driver;
1243 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1244 struct mhi_event *mhi_event;
1245 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1246 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1249 /* Bring device out of LPM */
1250 ret = mhi_device_get_sync(mhi_dev);
1258 * If channel supports LPM notifications then status_cb should
1261 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1264 /* For non-offload channels then xfer_cb should be provided */
1265 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1268 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1274 * If channel supports LPM notifications then status_cb should
1277 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1280 /* For non-offload channels then xfer_cb should be provided */
1281 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1284 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1287 * If the channel event ring is managed by client, then
1288 * status_cb must be provided so that the framework can
1289 * notify pending data
1291 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1294 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1297 /* Call the user provided probe function */
1298 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1302 mhi_device_put(mhi_dev);
1307 mhi_unprepare_from_transfer(mhi_dev);
1309 mhi_device_put(mhi_dev);
1314 static int mhi_driver_remove(struct device *dev)
1316 struct mhi_device *mhi_dev = to_mhi_device(dev);
1317 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1318 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1319 struct mhi_chan *mhi_chan;
1320 enum mhi_ch_state ch_state[] = {
1321 MHI_CH_STATE_DISABLED,
1322 MHI_CH_STATE_DISABLED
1326 /* Skip if it is a controller device */
1327 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1330 /* Reset both channels */
1331 for (dir = 0; dir < 2; dir++) {
1332 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1337 /* Wake all threads waiting for completion */
1338 write_lock_irq(&mhi_chan->lock);
1339 mhi_chan->ccs = MHI_EV_CC_INVALID;
1340 complete_all(&mhi_chan->completion);
1341 write_unlock_irq(&mhi_chan->lock);
1343 /* Set the channel state to disabled */
1344 mutex_lock(&mhi_chan->mutex);
1345 write_lock_irq(&mhi_chan->lock);
1346 ch_state[dir] = mhi_chan->ch_state;
1347 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1348 write_unlock_irq(&mhi_chan->lock);
1350 /* Reset the non-offload channel */
1351 if (!mhi_chan->offload_ch)
1352 mhi_reset_chan(mhi_cntrl, mhi_chan);
1354 mutex_unlock(&mhi_chan->mutex);
1357 mhi_drv->remove(mhi_dev);
1359 /* De-init channel if it was enabled */
1360 for (dir = 0; dir < 2; dir++) {
1361 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1366 mutex_lock(&mhi_chan->mutex);
1368 if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1369 ch_state[dir] == MHI_CH_STATE_STOP) &&
1370 !mhi_chan->offload_ch)
1371 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1373 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1375 mutex_unlock(&mhi_chan->mutex);
1378 while (mhi_dev->dev_wake)
1379 mhi_device_put(mhi_dev);
1384 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1386 struct device_driver *driver = &mhi_drv->driver;
1388 if (!mhi_drv->probe || !mhi_drv->remove)
1391 driver->bus = &mhi_bus_type;
1392 driver->owner = owner;
1393 driver->probe = mhi_driver_probe;
1394 driver->remove = mhi_driver_remove;
1396 return driver_register(driver);
1398 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1400 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1402 driver_unregister(&mhi_drv->driver);
1404 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1406 static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
1408 const struct mhi_device *mhi_dev = to_mhi_device(dev);
1410 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1414 static int mhi_match(struct device *dev, struct device_driver *drv)
1416 struct mhi_device *mhi_dev = to_mhi_device(dev);
1417 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1418 const struct mhi_device_id *id;
1421 * If the device is a controller type then there is no client driver
1422 * associated with it
1424 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1427 for (id = mhi_drv->id_table; id->chan[0]; id++)
1428 if (!strcmp(mhi_dev->name, id->chan)) {
1436 struct bus_type mhi_bus_type = {
1440 .uevent = mhi_uevent,
1441 .dev_groups = mhi_dev_groups,
1444 static int __init mhi_init(void)
1447 return bus_register(&mhi_bus_type);
1450 static void __exit mhi_exit(void)
1453 bus_unregister(&mhi_bus_type);
1456 postcore_initcall(mhi_init);
1457 module_exit(mhi_exit);
1459 MODULE_LICENSE("GPL v2");
1460 MODULE_DESCRIPTION("Modem Host Interface");