1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
31 * L0: DISABLE <--> POR
33 * POR -> M0 -> M2 --> M0
35 * FW_DL_ERR <--> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
41 * SHUTDOWN_PROCESS -> DISABLE
42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
43 * LD_ERR_FATAL_DETECT -> DISABLE
45 static const struct mhi_pm_transitions dev_state_transitions[] = {
53 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
54 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
55 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
59 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
60 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
61 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
65 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
66 MHI_PM_LD_ERR_FATAL_DETECT
70 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
71 MHI_PM_LD_ERR_FATAL_DETECT
75 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
76 MHI_PM_LD_ERR_FATAL_DETECT
80 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
81 MHI_PM_LD_ERR_FATAL_DETECT
85 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
86 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
90 MHI_PM_SYS_ERR_DETECT,
91 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
92 MHI_PM_LD_ERR_FATAL_DETECT
95 MHI_PM_SYS_ERR_PROCESS,
96 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
97 MHI_PM_LD_ERR_FATAL_DETECT
101 MHI_PM_SHUTDOWN_PROCESS,
102 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
106 MHI_PM_LD_ERR_FATAL_DETECT,
107 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
111 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
112 enum mhi_pm_state state)
114 unsigned long cur_state = mhi_cntrl->pm_state;
115 int index = find_last_bit(&cur_state, 32);
117 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
120 if (unlikely(dev_state_transitions[index].from_state != cur_state))
123 if (unlikely(!(dev_state_transitions[index].to_states & state)))
126 mhi_cntrl->pm_state = state;
127 return mhi_cntrl->pm_state;
130 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
132 struct device *dev = &mhi_cntrl->mhi_dev->dev;
135 if (state == MHI_STATE_RESET) {
136 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
137 MHICTRL_RESET_MASK, 1);
139 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
140 MHICTRL_MHISTATE_MASK, state);
144 dev_err(dev, "Failed to set MHI state to: %s\n",
145 mhi_state_str(state));
148 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
149 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
153 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
155 mhi_cntrl->wake_get(mhi_cntrl, false);
156 mhi_cntrl->wake_put(mhi_cntrl, true);
159 /* Handle device ready state transition */
160 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
162 struct mhi_event *mhi_event;
163 enum mhi_pm_state cur_state;
164 struct device *dev = &mhi_cntrl->mhi_dev->dev;
165 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
169 /* Check if device entered error state */
170 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
171 dev_err(dev, "Device link is not accessible\n");
175 /* Wait for RESET to be cleared and READY bit to be set by the device */
176 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
177 MHICTRL_RESET_MASK, 0, interval_us,
178 mhi_cntrl->timeout_ms);
180 dev_err(dev, "Device failed to clear MHI Reset\n");
184 timeout_ms = mhi_cntrl->ready_timeout_ms ?
185 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
186 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
187 MHISTATUS_READY_MASK, 1, interval_us,
190 dev_err(dev, "Device failed to enter MHI Ready\n");
194 dev_dbg(dev, "Device in READY State\n");
195 write_lock_irq(&mhi_cntrl->pm_lock);
196 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
197 mhi_cntrl->dev_state = MHI_STATE_READY;
198 write_unlock_irq(&mhi_cntrl->pm_lock);
200 if (cur_state != MHI_PM_POR) {
201 dev_err(dev, "Error moving to state %s from %s\n",
202 to_mhi_pm_state_str(MHI_PM_POR),
203 to_mhi_pm_state_str(cur_state));
207 read_lock_bh(&mhi_cntrl->pm_lock);
208 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
209 dev_err(dev, "Device registers not accessible\n");
213 /* Configure MMIO registers */
214 ret = mhi_init_mmio(mhi_cntrl);
216 dev_err(dev, "Error configuring MMIO registers\n");
220 /* Add elements to all SW event rings */
221 mhi_event = mhi_cntrl->mhi_event;
222 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
223 struct mhi_ring *ring = &mhi_event->ring;
225 /* Skip if this is an offload or HW event */
226 if (mhi_event->offload_ev || mhi_event->hw_ring)
229 ring->wp = ring->base + ring->len - ring->el_size;
230 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
231 /* Update all cores */
234 /* Ring the event ring db */
235 spin_lock_irq(&mhi_event->lock);
236 mhi_ring_er_db(mhi_event);
237 spin_unlock_irq(&mhi_event->lock);
240 /* Set MHI to M0 state */
241 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
242 read_unlock_bh(&mhi_cntrl->pm_lock);
247 read_unlock_bh(&mhi_cntrl->pm_lock);
252 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
254 enum mhi_pm_state cur_state;
255 struct mhi_chan *mhi_chan;
256 struct device *dev = &mhi_cntrl->mhi_dev->dev;
259 write_lock_irq(&mhi_cntrl->pm_lock);
260 mhi_cntrl->dev_state = MHI_STATE_M0;
261 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
262 write_unlock_irq(&mhi_cntrl->pm_lock);
263 if (unlikely(cur_state != MHI_PM_M0)) {
264 dev_err(dev, "Unable to transition to M0 state\n");
269 /* Wake up the device */
270 read_lock_bh(&mhi_cntrl->pm_lock);
271 mhi_cntrl->wake_get(mhi_cntrl, true);
273 /* Ring all event rings and CMD ring only if we're in mission mode */
274 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
275 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
276 struct mhi_cmd *mhi_cmd =
277 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
279 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
280 if (mhi_event->offload_ev)
283 spin_lock_irq(&mhi_event->lock);
284 mhi_ring_er_db(mhi_event);
285 spin_unlock_irq(&mhi_event->lock);
288 /* Only ring primary cmd ring if ring is not empty */
289 spin_lock_irq(&mhi_cmd->lock);
290 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
291 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
292 spin_unlock_irq(&mhi_cmd->lock);
295 /* Ring channel DB registers */
296 mhi_chan = mhi_cntrl->mhi_chan;
297 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
298 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
300 if (mhi_chan->db_cfg.reset_req) {
301 write_lock_irq(&mhi_chan->lock);
302 mhi_chan->db_cfg.db_mode = true;
303 write_unlock_irq(&mhi_chan->lock);
306 read_lock_irq(&mhi_chan->lock);
308 /* Only ring DB if ring is not empty */
309 if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
310 mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
311 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
312 read_unlock_irq(&mhi_chan->lock);
315 mhi_cntrl->wake_put(mhi_cntrl, false);
316 read_unlock_bh(&mhi_cntrl->pm_lock);
317 wake_up_all(&mhi_cntrl->state_event);
323 * After receiving the MHI state change event from the device indicating the
324 * transition to M1 state, the host can transition the device to M2 state
325 * for keeping it in low power state.
327 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
329 enum mhi_pm_state state;
330 struct device *dev = &mhi_cntrl->mhi_dev->dev;
332 write_lock_irq(&mhi_cntrl->pm_lock);
333 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
334 if (state == MHI_PM_M2) {
335 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
336 mhi_cntrl->dev_state = MHI_STATE_M2;
338 write_unlock_irq(&mhi_cntrl->pm_lock);
341 wake_up_all(&mhi_cntrl->state_event);
343 /* If there are any pending resources, exit M2 immediately */
344 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
345 atomic_read(&mhi_cntrl->dev_wake))) {
347 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
348 atomic_read(&mhi_cntrl->pending_pkts),
349 atomic_read(&mhi_cntrl->dev_wake));
350 read_lock_bh(&mhi_cntrl->pm_lock);
351 mhi_cntrl->wake_get(mhi_cntrl, true);
352 mhi_cntrl->wake_put(mhi_cntrl, true);
353 read_unlock_bh(&mhi_cntrl->pm_lock);
355 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
358 write_unlock_irq(&mhi_cntrl->pm_lock);
362 /* MHI M3 completion handler */
363 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
365 enum mhi_pm_state state;
366 struct device *dev = &mhi_cntrl->mhi_dev->dev;
368 write_lock_irq(&mhi_cntrl->pm_lock);
369 mhi_cntrl->dev_state = MHI_STATE_M3;
370 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
371 write_unlock_irq(&mhi_cntrl->pm_lock);
372 if (state != MHI_PM_M3) {
373 dev_err(dev, "Unable to transition to M3 state\n");
378 wake_up_all(&mhi_cntrl->state_event);
383 /* Handle device Mission Mode transition */
384 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
386 struct mhi_event *mhi_event;
387 struct device *dev = &mhi_cntrl->mhi_dev->dev;
388 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
391 dev_dbg(dev, "Processing Mission Mode transition\n");
393 write_lock_irq(&mhi_cntrl->pm_lock);
394 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
395 ee = mhi_get_exec_env(mhi_cntrl);
397 if (!MHI_IN_MISSION_MODE(ee)) {
398 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
399 write_unlock_irq(&mhi_cntrl->pm_lock);
400 wake_up_all(&mhi_cntrl->state_event);
404 write_unlock_irq(&mhi_cntrl->pm_lock);
406 wake_up_all(&mhi_cntrl->state_event);
408 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
410 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
412 /* Force MHI to be in M0 state before continuing */
413 ret = __mhi_device_get_sync(mhi_cntrl);
417 read_lock_bh(&mhi_cntrl->pm_lock);
419 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
421 goto error_mission_mode;
424 /* Add elements to all HW event rings */
425 mhi_event = mhi_cntrl->mhi_event;
426 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
427 struct mhi_ring *ring = &mhi_event->ring;
429 if (mhi_event->offload_ev || !mhi_event->hw_ring)
432 ring->wp = ring->base + ring->len - ring->el_size;
433 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
434 /* Update to all cores */
437 spin_lock_irq(&mhi_event->lock);
438 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
439 mhi_ring_er_db(mhi_event);
440 spin_unlock_irq(&mhi_event->lock);
443 read_unlock_bh(&mhi_cntrl->pm_lock);
446 * The MHI devices are only created when the client device switches its
447 * Execution Environment (EE) to either SBL or AMSS states
449 mhi_create_devices(mhi_cntrl);
451 read_lock_bh(&mhi_cntrl->pm_lock);
454 mhi_cntrl->wake_put(mhi_cntrl, false);
455 read_unlock_bh(&mhi_cntrl->pm_lock);
460 /* Handle shutdown transitions */
461 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
463 enum mhi_pm_state cur_state;
464 struct mhi_event *mhi_event;
465 struct mhi_cmd_ctxt *cmd_ctxt;
466 struct mhi_cmd *mhi_cmd;
467 struct mhi_event_ctxt *er_ctxt;
468 struct device *dev = &mhi_cntrl->mhi_dev->dev;
471 dev_dbg(dev, "Processing disable transition with PM state: %s\n",
472 to_mhi_pm_state_str(mhi_cntrl->pm_state));
474 mutex_lock(&mhi_cntrl->pm_mutex);
476 /* Trigger MHI RESET so that the device will not access host memory */
477 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
478 /* Skip MHI RESET if in RDDM state */
479 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
482 dev_dbg(dev, "Triggering MHI Reset in device\n");
483 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
485 /* Wait for the reset bit to be cleared by the device */
486 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
487 MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
489 dev_err(dev, "Device failed to clear MHI Reset\n");
492 * Device will clear BHI_INTVEC as a part of RESET processing,
493 * hence re-program it
495 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
497 if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
498 /* wait for ready to be set */
499 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
500 MHISTATUS, MHISTATUS_READY_MASK,
501 1, 25000, mhi_cntrl->timeout_ms);
503 dev_err(dev, "Device failed to enter READY state\n");
509 "Waiting for all pending event ring processing to complete\n");
510 mhi_event = mhi_cntrl->mhi_event;
511 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
512 if (mhi_event->offload_ev)
514 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
515 tasklet_kill(&mhi_event->task);
518 /* Release lock and wait for all pending threads to complete */
519 mutex_unlock(&mhi_cntrl->pm_mutex);
520 dev_dbg(dev, "Waiting for all pending threads to complete\n");
521 wake_up_all(&mhi_cntrl->state_event);
523 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
524 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
526 mutex_lock(&mhi_cntrl->pm_mutex);
528 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
529 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
531 /* Reset the ev rings and cmd rings */
532 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
533 mhi_cmd = mhi_cntrl->mhi_cmd;
534 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
535 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
536 struct mhi_ring *ring = &mhi_cmd->ring;
538 ring->rp = ring->base;
539 ring->wp = ring->base;
540 cmd_ctxt->rp = cmd_ctxt->rbase;
541 cmd_ctxt->wp = cmd_ctxt->rbase;
544 mhi_event = mhi_cntrl->mhi_event;
545 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
546 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
548 struct mhi_ring *ring = &mhi_event->ring;
550 /* Skip offload events */
551 if (mhi_event->offload_ev)
554 ring->rp = ring->base;
555 ring->wp = ring->base;
556 er_ctxt->rp = er_ctxt->rbase;
557 er_ctxt->wp = er_ctxt->rbase;
560 /* Move to disable state */
561 write_lock_irq(&mhi_cntrl->pm_lock);
562 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
563 write_unlock_irq(&mhi_cntrl->pm_lock);
564 if (unlikely(cur_state != MHI_PM_DISABLE))
565 dev_err(dev, "Error moving from PM state: %s to: %s\n",
566 to_mhi_pm_state_str(cur_state),
567 to_mhi_pm_state_str(MHI_PM_DISABLE));
569 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
570 to_mhi_pm_state_str(mhi_cntrl->pm_state),
571 mhi_state_str(mhi_cntrl->dev_state));
573 mutex_unlock(&mhi_cntrl->pm_mutex);
576 /* Handle system error transitions */
577 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
579 enum mhi_pm_state cur_state, prev_state;
580 enum dev_st_transition next_state;
581 struct mhi_event *mhi_event;
582 struct mhi_cmd_ctxt *cmd_ctxt;
583 struct mhi_cmd *mhi_cmd;
584 struct mhi_event_ctxt *er_ctxt;
585 struct device *dev = &mhi_cntrl->mhi_dev->dev;
588 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
589 to_mhi_pm_state_str(mhi_cntrl->pm_state),
590 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
592 /* We must notify MHI control driver so it can clean up first */
593 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
595 mutex_lock(&mhi_cntrl->pm_mutex);
596 write_lock_irq(&mhi_cntrl->pm_lock);
597 prev_state = mhi_cntrl->pm_state;
598 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
599 write_unlock_irq(&mhi_cntrl->pm_lock);
601 if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
602 dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
603 to_mhi_pm_state_str(cur_state),
604 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
605 goto exit_sys_error_transition;
608 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
609 mhi_cntrl->dev_state = MHI_STATE_RESET;
611 /* Wake up threads waiting for state transition */
612 wake_up_all(&mhi_cntrl->state_event);
614 /* Trigger MHI RESET so that the device will not access host memory */
615 if (MHI_REG_ACCESS_VALID(prev_state)) {
617 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
619 dev_dbg(dev, "Triggering MHI Reset in device\n");
620 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
622 /* Wait for the reset bit to be cleared by the device */
623 ret = wait_event_timeout(mhi_cntrl->state_event,
624 mhi_read_reg_field(mhi_cntrl,
630 if (!ret || in_reset) {
631 dev_err(dev, "Device failed to exit MHI Reset state\n");
632 goto exit_sys_error_transition;
636 * Device will clear BHI_INTVEC as a part of RESET processing,
637 * hence re-program it
639 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
643 "Waiting for all pending event ring processing to complete\n");
644 mhi_event = mhi_cntrl->mhi_event;
645 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
646 if (mhi_event->offload_ev)
648 tasklet_kill(&mhi_event->task);
651 /* Release lock and wait for all pending threads to complete */
652 mutex_unlock(&mhi_cntrl->pm_mutex);
653 dev_dbg(dev, "Waiting for all pending threads to complete\n");
654 wake_up_all(&mhi_cntrl->state_event);
656 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
657 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
659 mutex_lock(&mhi_cntrl->pm_mutex);
661 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
662 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
664 /* Reset the ev rings and cmd rings */
665 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
666 mhi_cmd = mhi_cntrl->mhi_cmd;
667 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
668 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
669 struct mhi_ring *ring = &mhi_cmd->ring;
671 ring->rp = ring->base;
672 ring->wp = ring->base;
673 cmd_ctxt->rp = cmd_ctxt->rbase;
674 cmd_ctxt->wp = cmd_ctxt->rbase;
677 mhi_event = mhi_cntrl->mhi_event;
678 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
679 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
681 struct mhi_ring *ring = &mhi_event->ring;
683 /* Skip offload events */
684 if (mhi_event->offload_ev)
687 ring->rp = ring->base;
688 ring->wp = ring->base;
689 er_ctxt->rp = er_ctxt->rbase;
690 er_ctxt->wp = er_ctxt->rbase;
693 /* Transition to next state */
694 if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
695 write_lock_irq(&mhi_cntrl->pm_lock);
696 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
697 write_unlock_irq(&mhi_cntrl->pm_lock);
698 if (cur_state != MHI_PM_POR) {
699 dev_err(dev, "Error moving to state %s from %s\n",
700 to_mhi_pm_state_str(MHI_PM_POR),
701 to_mhi_pm_state_str(cur_state));
702 goto exit_sys_error_transition;
704 next_state = DEV_ST_TRANSITION_PBL;
706 next_state = DEV_ST_TRANSITION_READY;
709 mhi_queue_state_transition(mhi_cntrl, next_state);
711 exit_sys_error_transition:
712 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
713 to_mhi_pm_state_str(mhi_cntrl->pm_state),
714 mhi_state_str(mhi_cntrl->dev_state));
716 mutex_unlock(&mhi_cntrl->pm_mutex);
719 /* Queue a new work item and schedule work */
720 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
721 enum dev_st_transition state)
723 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
730 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
731 list_add_tail(&item->node, &mhi_cntrl->transition_list);
732 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
734 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
740 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
742 struct device *dev = &mhi_cntrl->mhi_dev->dev;
744 /* skip if controller supports RDDM */
745 if (mhi_cntrl->rddm_image) {
746 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
750 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
753 /* Device State Transition worker */
754 void mhi_pm_st_worker(struct work_struct *work)
756 struct state_transition *itr, *tmp;
758 struct mhi_controller *mhi_cntrl = container_of(work,
759 struct mhi_controller,
761 struct device *dev = &mhi_cntrl->mhi_dev->dev;
763 spin_lock_irq(&mhi_cntrl->transition_lock);
764 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
765 spin_unlock_irq(&mhi_cntrl->transition_lock);
767 list_for_each_entry_safe(itr, tmp, &head, node) {
768 list_del(&itr->node);
769 dev_dbg(dev, "Handling state transition: %s\n",
770 TO_DEV_STATE_TRANS_STR(itr->state));
772 switch (itr->state) {
773 case DEV_ST_TRANSITION_PBL:
774 write_lock_irq(&mhi_cntrl->pm_lock);
775 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
776 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
777 write_unlock_irq(&mhi_cntrl->pm_lock);
778 mhi_fw_load_handler(mhi_cntrl);
780 case DEV_ST_TRANSITION_SBL:
781 write_lock_irq(&mhi_cntrl->pm_lock);
782 mhi_cntrl->ee = MHI_EE_SBL;
783 write_unlock_irq(&mhi_cntrl->pm_lock);
785 * The MHI devices are only created when the client
786 * device switches its Execution Environment (EE) to
787 * either SBL or AMSS states
789 mhi_create_devices(mhi_cntrl);
790 if (mhi_cntrl->fbc_download)
791 mhi_download_amss_image(mhi_cntrl);
793 case DEV_ST_TRANSITION_MISSION_MODE:
794 mhi_pm_mission_mode_transition(mhi_cntrl);
796 case DEV_ST_TRANSITION_FP:
797 write_lock_irq(&mhi_cntrl->pm_lock);
798 mhi_cntrl->ee = MHI_EE_FP;
799 write_unlock_irq(&mhi_cntrl->pm_lock);
800 mhi_create_devices(mhi_cntrl);
802 case DEV_ST_TRANSITION_READY:
803 mhi_ready_state_transition(mhi_cntrl);
805 case DEV_ST_TRANSITION_SYS_ERR:
806 mhi_pm_sys_error_transition(mhi_cntrl);
808 case DEV_ST_TRANSITION_DISABLE:
809 mhi_pm_disable_transition(mhi_cntrl);
818 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
820 struct mhi_chan *itr, *tmp;
821 struct device *dev = &mhi_cntrl->mhi_dev->dev;
822 enum mhi_pm_state new_state;
825 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
828 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
831 /* Return busy if there are any pending resources */
832 if (atomic_read(&mhi_cntrl->dev_wake) ||
833 atomic_read(&mhi_cntrl->pending_pkts))
836 /* Take MHI out of M2 state */
837 read_lock_bh(&mhi_cntrl->pm_lock);
838 mhi_cntrl->wake_get(mhi_cntrl, false);
839 read_unlock_bh(&mhi_cntrl->pm_lock);
841 ret = wait_event_timeout(mhi_cntrl->state_event,
842 mhi_cntrl->dev_state == MHI_STATE_M0 ||
843 mhi_cntrl->dev_state == MHI_STATE_M1 ||
844 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
845 msecs_to_jiffies(mhi_cntrl->timeout_ms));
847 read_lock_bh(&mhi_cntrl->pm_lock);
848 mhi_cntrl->wake_put(mhi_cntrl, false);
849 read_unlock_bh(&mhi_cntrl->pm_lock);
851 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
853 "Could not enter M0/M1 state");
857 write_lock_irq(&mhi_cntrl->pm_lock);
859 if (atomic_read(&mhi_cntrl->dev_wake) ||
860 atomic_read(&mhi_cntrl->pending_pkts)) {
861 write_unlock_irq(&mhi_cntrl->pm_lock);
865 dev_dbg(dev, "Allowing M3 transition\n");
866 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
867 if (new_state != MHI_PM_M3_ENTER) {
868 write_unlock_irq(&mhi_cntrl->pm_lock);
870 "Error setting to PM state: %s from: %s\n",
871 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
872 to_mhi_pm_state_str(mhi_cntrl->pm_state));
876 /* Set MHI to M3 and wait for completion */
877 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
878 write_unlock_irq(&mhi_cntrl->pm_lock);
879 dev_dbg(dev, "Waiting for M3 completion\n");
881 ret = wait_event_timeout(mhi_cntrl->state_event,
882 mhi_cntrl->dev_state == MHI_STATE_M3 ||
883 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
884 msecs_to_jiffies(mhi_cntrl->timeout_ms));
886 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
888 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
889 mhi_state_str(mhi_cntrl->dev_state),
890 to_mhi_pm_state_str(mhi_cntrl->pm_state));
894 /* Notify clients about entering LPM */
895 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
896 mutex_lock(&itr->mutex);
898 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
899 mutex_unlock(&itr->mutex);
904 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
906 static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
908 struct mhi_chan *itr, *tmp;
909 struct device *dev = &mhi_cntrl->mhi_dev->dev;
910 enum mhi_pm_state cur_state;
913 dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
914 to_mhi_pm_state_str(mhi_cntrl->pm_state),
915 mhi_state_str(mhi_cntrl->dev_state));
917 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
920 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
923 if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
924 dev_warn(dev, "Resuming from non M3 state (%s)\n",
925 mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
930 /* Notify clients about exiting LPM */
931 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
932 mutex_lock(&itr->mutex);
934 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
935 mutex_unlock(&itr->mutex);
938 write_lock_irq(&mhi_cntrl->pm_lock);
939 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
940 if (cur_state != MHI_PM_M3_EXIT) {
941 write_unlock_irq(&mhi_cntrl->pm_lock);
943 "Error setting to PM state: %s from: %s\n",
944 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
945 to_mhi_pm_state_str(mhi_cntrl->pm_state));
949 /* Set MHI to M0 and wait for completion */
950 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
951 write_unlock_irq(&mhi_cntrl->pm_lock);
953 ret = wait_event_timeout(mhi_cntrl->state_event,
954 mhi_cntrl->dev_state == MHI_STATE_M0 ||
955 mhi_cntrl->dev_state == MHI_STATE_M2 ||
956 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
957 msecs_to_jiffies(mhi_cntrl->timeout_ms));
959 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
961 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
962 mhi_state_str(mhi_cntrl->dev_state),
963 to_mhi_pm_state_str(mhi_cntrl->pm_state));
970 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
972 return __mhi_pm_resume(mhi_cntrl, false);
974 EXPORT_SYMBOL_GPL(mhi_pm_resume);
976 int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
978 return __mhi_pm_resume(mhi_cntrl, true);
980 EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
982 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
986 /* Wake up the device */
987 read_lock_bh(&mhi_cntrl->pm_lock);
988 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
989 read_unlock_bh(&mhi_cntrl->pm_lock);
992 mhi_cntrl->wake_get(mhi_cntrl, true);
993 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
994 mhi_trigger_resume(mhi_cntrl);
995 read_unlock_bh(&mhi_cntrl->pm_lock);
997 ret = wait_event_timeout(mhi_cntrl->state_event,
998 mhi_cntrl->pm_state == MHI_PM_M0 ||
999 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1000 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1002 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1003 read_lock_bh(&mhi_cntrl->pm_lock);
1004 mhi_cntrl->wake_put(mhi_cntrl, false);
1005 read_unlock_bh(&mhi_cntrl->pm_lock);
1012 /* Assert device wake db */
1013 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
1015 unsigned long flags;
1018 * If force flag is set, then increment the wake count value and
1021 if (unlikely(force)) {
1022 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1023 atomic_inc(&mhi_cntrl->dev_wake);
1024 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
1025 !mhi_cntrl->wake_set) {
1026 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1027 mhi_cntrl->wake_set = true;
1029 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1032 * If resources are already requested, then just increment
1033 * the wake count value and return
1035 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1038 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1039 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1040 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1041 !mhi_cntrl->wake_set) {
1042 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1043 mhi_cntrl->wake_set = true;
1045 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1049 /* De-assert device wake db */
1050 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1053 unsigned long flags;
1056 * Only continue if there is a single resource, else just decrement
1059 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1062 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1063 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1064 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1065 mhi_cntrl->wake_set) {
1066 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1067 mhi_cntrl->wake_set = false;
1069 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1072 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1074 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
1075 enum mhi_state state;
1076 enum mhi_ee_type current_ee;
1077 enum dev_st_transition next_state;
1078 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1079 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
1082 dev_info(dev, "Requested to power ON\n");
1084 /* Supply default wake routines if not provided by controller driver */
1085 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1086 !mhi_cntrl->wake_toggle) {
1087 mhi_cntrl->wake_get = mhi_assert_dev_wake;
1088 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1089 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1090 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1093 mutex_lock(&mhi_cntrl->pm_mutex);
1094 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1096 /* Setup BHI INTVEC */
1097 write_lock_irq(&mhi_cntrl->pm_lock);
1098 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1099 mhi_cntrl->pm_state = MHI_PM_POR;
1100 mhi_cntrl->ee = MHI_EE_MAX;
1101 current_ee = mhi_get_exec_env(mhi_cntrl);
1102 write_unlock_irq(&mhi_cntrl->pm_lock);
1104 /* Confirm that the device is in valid exec env */
1105 if (!MHI_POWER_UP_CAPABLE(current_ee)) {
1106 dev_err(dev, "%s is not a valid EE for power on\n",
1107 TO_MHI_EXEC_STR(current_ee));
1112 state = mhi_get_mhi_state(mhi_cntrl);
1113 dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1114 TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
1116 if (state == MHI_STATE_SYS_ERR) {
1117 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1118 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
1119 MHICTRL_RESET_MASK, 0, interval_us,
1120 mhi_cntrl->timeout_ms);
1122 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1127 * device cleares INTVEC as part of RESET processing,
1130 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1133 /* IRQs have been requested during probe, so we just need to enable them. */
1134 enable_irq(mhi_cntrl->irq[0]);
1136 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
1137 if (mhi_event->offload_ev)
1140 enable_irq(mhi_cntrl->irq[mhi_event->irq]);
1143 /* Transition to next state */
1144 next_state = MHI_IN_PBL(current_ee) ?
1145 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1147 mhi_queue_state_transition(mhi_cntrl, next_state);
1149 mutex_unlock(&mhi_cntrl->pm_mutex);
1151 dev_info(dev, "Power on setup success\n");
1156 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1157 mutex_unlock(&mhi_cntrl->pm_mutex);
1161 EXPORT_SYMBOL_GPL(mhi_async_power_up);
1163 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1165 enum mhi_pm_state cur_state, transition_state;
1166 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1168 mutex_lock(&mhi_cntrl->pm_mutex);
1169 write_lock_irq(&mhi_cntrl->pm_lock);
1170 cur_state = mhi_cntrl->pm_state;
1171 if (cur_state == MHI_PM_DISABLE) {
1172 write_unlock_irq(&mhi_cntrl->pm_lock);
1173 mutex_unlock(&mhi_cntrl->pm_mutex);
1174 return; /* Already powered down */
1177 /* If it's not a graceful shutdown, force MHI to linkdown state */
1178 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1179 MHI_PM_LD_ERR_FATAL_DETECT;
1181 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1182 if (cur_state != transition_state) {
1183 dev_err(dev, "Failed to move to state: %s from: %s\n",
1184 to_mhi_pm_state_str(transition_state),
1185 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1186 /* Force link down or error fatal detected state */
1187 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1190 /* mark device inactive to avoid any further host processing */
1191 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1192 mhi_cntrl->dev_state = MHI_STATE_RESET;
1194 wake_up_all(&mhi_cntrl->state_event);
1196 write_unlock_irq(&mhi_cntrl->pm_lock);
1197 mutex_unlock(&mhi_cntrl->pm_mutex);
1199 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1201 /* Wait for shutdown to complete */
1202 flush_work(&mhi_cntrl->st_worker);
1204 disable_irq(mhi_cntrl->irq[0]);
1206 EXPORT_SYMBOL_GPL(mhi_power_down);
1208 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1210 int ret = mhi_async_power_up(mhi_cntrl);
1216 /* Some devices need more time to set ready during power up */
1217 timeout_ms = mhi_cntrl->ready_timeout_ms ?
1218 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
1219 wait_event_timeout(mhi_cntrl->state_event,
1220 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1221 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1222 msecs_to_jiffies(timeout_ms));
1224 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1226 mhi_power_down(mhi_cntrl, false);
1230 EXPORT_SYMBOL(mhi_sync_power_up);
1232 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1234 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1237 /* Check if device is already in RDDM */
1238 if (mhi_cntrl->ee == MHI_EE_RDDM)
1241 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1242 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1244 /* Wait for RDDM event */
1245 ret = wait_event_timeout(mhi_cntrl->state_event,
1246 mhi_cntrl->ee == MHI_EE_RDDM,
1247 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1248 ret = ret ? 0 : -EIO;
1252 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1254 void mhi_device_get(struct mhi_device *mhi_dev)
1256 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1258 mhi_dev->dev_wake++;
1259 read_lock_bh(&mhi_cntrl->pm_lock);
1260 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1261 mhi_trigger_resume(mhi_cntrl);
1263 mhi_cntrl->wake_get(mhi_cntrl, true);
1264 read_unlock_bh(&mhi_cntrl->pm_lock);
1266 EXPORT_SYMBOL_GPL(mhi_device_get);
1268 int mhi_device_get_sync(struct mhi_device *mhi_dev)
1270 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1273 ret = __mhi_device_get_sync(mhi_cntrl);
1275 mhi_dev->dev_wake++;
1279 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1281 void mhi_device_put(struct mhi_device *mhi_dev)
1283 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1285 mhi_dev->dev_wake--;
1286 read_lock_bh(&mhi_cntrl->pm_lock);
1287 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1288 mhi_trigger_resume(mhi_cntrl);
1290 mhi_cntrl->wake_put(mhi_cntrl, false);
1291 read_unlock_bh(&mhi_cntrl->pm_lock);
1293 EXPORT_SYMBOL_GPL(mhi_device_put);