1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2015-2017 Google, Inc
5 * USB Power Delivery protocol stack.
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/usb.h>
25 #include <linux/usb/pd.h>
26 #include <linux/usb/pd_ado.h>
27 #include <linux/usb/pd_bdo.h>
28 #include <linux/usb/pd_ext_sdb.h>
29 #include <linux/usb/pd_vdo.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/tcpm.h>
32 #include <linux/usb/typec_altmode.h>
34 #include <uapi/linux/sched/types.h>
36 #define FOREACH_STATE(S) \
39 S(CHECK_CONTAMINANT), \
44 S(SRC_SEND_CAPABILITIES), \
45 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
46 S(SRC_NEGOTIATE_CAPABILITIES), \
47 S(SRC_TRANSITION_SUPPLY), \
49 S(SRC_WAIT_NEW_CAPABILITIES), \
57 S(SNK_DISCOVERY_DEBOUNCE), \
58 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
59 S(SNK_WAIT_CAPABILITIES), \
60 S(SNK_NEGOTIATE_CAPABILITIES), \
61 S(SNK_NEGOTIATE_PPS_CAPABILITIES), \
62 S(SNK_TRANSITION_SINK), \
63 S(SNK_TRANSITION_SINK_VBUS), \
67 S(DEBUG_ACC_ATTACHED), \
68 S(AUDIO_ACC_ATTACHED), \
69 S(AUDIO_ACC_DEBOUNCE), \
72 S(HARD_RESET_START), \
73 S(SRC_HARD_RESET_VBUS_OFF), \
74 S(SRC_HARD_RESET_VBUS_ON), \
75 S(SNK_HARD_RESET_SINK_OFF), \
76 S(SNK_HARD_RESET_WAIT_VBUS), \
77 S(SNK_HARD_RESET_SINK_ON), \
80 S(SRC_SOFT_RESET_WAIT_SNK_TX), \
86 S(DR_SWAP_SEND_TIMEOUT), \
88 S(DR_SWAP_CHANGE_DR), \
92 S(PR_SWAP_SEND_TIMEOUT), \
95 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
96 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
97 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
98 S(PR_SWAP_SRC_SNK_SINK_ON), \
99 S(PR_SWAP_SNK_SRC_SINK_OFF), \
100 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
101 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
103 S(VCONN_SWAP_ACCEPT), \
104 S(VCONN_SWAP_SEND), \
105 S(VCONN_SWAP_SEND_TIMEOUT), \
106 S(VCONN_SWAP_CANCEL), \
107 S(VCONN_SWAP_START), \
108 S(VCONN_SWAP_WAIT_FOR_VCONN), \
109 S(VCONN_SWAP_TURN_ON_VCONN), \
110 S(VCONN_SWAP_TURN_OFF_VCONN), \
113 S(FR_SWAP_SEND_TIMEOUT), \
114 S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
115 S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
116 S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
121 S(SNK_TRY_WAIT_DEBOUNCE), \
122 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
124 S(SRC_TRYWAIT_DEBOUNCE), \
125 S(SRC_TRYWAIT_UNATTACHED), \
129 S(SRC_TRY_DEBOUNCE), \
131 S(SNK_TRYWAIT_DEBOUNCE), \
132 S(SNK_TRYWAIT_VBUS), \
135 S(GET_STATUS_SEND), \
136 S(GET_STATUS_SEND_TIMEOUT), \
137 S(GET_PPS_STATUS_SEND), \
138 S(GET_PPS_STATUS_SEND_TIMEOUT), \
141 S(GET_SINK_CAP_TIMEOUT), \
145 S(PORT_RESET_WAIT_OFF), \
150 #define FOREACH_AMS(S) \
152 S(POWER_NEGOTIATION), \
157 S(GET_SOURCE_CAPABILITIES), \
158 S(GET_SINK_CAPABILITIES), \
159 S(POWER_ROLE_SWAP), \
164 S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
165 S(GETTING_SOURCE_SINK_STATUS), \
166 S(GETTING_BATTERY_CAPABILITIES), \
167 S(GETTING_BATTERY_STATUS), \
168 S(GETTING_MANUFACTURER_INFORMATION), \
170 S(FIRMWARE_UPDATE), \
171 S(DISCOVER_IDENTITY), \
172 S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
175 S(DFP_TO_UFP_ENTER_MODE), \
176 S(DFP_TO_UFP_EXIT_MODE), \
177 S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
178 S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
181 S(UNSTRUCTURED_VDMS), \
182 S(STRUCTURED_VDMS), \
186 #define GENERATE_ENUM(e) e
187 #define GENERATE_STRING(s) #s
190 FOREACH_STATE(GENERATE_ENUM)
193 static const char * const tcpm_states[] = {
194 FOREACH_STATE(GENERATE_STRING)
198 FOREACH_AMS(GENERATE_ENUM)
201 static const char * const tcpm_ams_str[] = {
202 FOREACH_AMS(GENERATE_STRING)
206 VDM_STATE_ERR_BUSY = -3,
207 VDM_STATE_ERR_SEND = -2,
208 VDM_STATE_ERR_TMOUT = -1,
210 /* Anything >0 represents an active state */
213 VDM_STATE_WAIT_RSP_BUSY = 3,
214 VDM_STATE_SEND_MESSAGE = 4,
217 enum pd_msg_request {
221 PD_MSG_CTRL_NOT_SUPP,
222 PD_MSG_DATA_SINK_CAP,
223 PD_MSG_DATA_SOURCE_CAP,
228 ADEV_NOTIFY_USB_AND_QUEUE_VDM,
230 ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
235 * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
236 * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
239 enum frs_typec_current {
246 /* Events from low level driver */
248 #define TCPM_CC_EVENT BIT(0)
249 #define TCPM_VBUS_EVENT BIT(1)
250 #define TCPM_RESET_EVENT BIT(2)
251 #define TCPM_FRS_EVENT BIT(3)
252 #define TCPM_SOURCING_VBUS BIT(4)
253 #define TCPM_PORT_CLEAN BIT(5)
254 #define TCPM_PORT_ERROR BIT(6)
256 #define LOG_BUFFER_ENTRIES 1024
257 #define LOG_BUFFER_ENTRY_SIZE 128
259 /* Alternate mode support */
261 #define SVID_DISCOVERY_MAX 16
262 #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
264 #define GET_SINK_CAP_RETRY_MS 100
265 #define SEND_DISCOVER_RETRY_MS 100
267 struct pd_mode_data {
268 int svid_index; /* current SVID index */
270 u16 svids[SVID_DISCOVERY_MAX];
271 int altmodes; /* number of alternate modes */
272 struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
276 * @min_volt: Actual min voltage at the local port
277 * @req_min_volt: Requested min voltage to the port partner
278 * @max_volt: Actual max voltage at the local port
279 * @req_max_volt: Requested max voltage to the port partner
280 * @max_curr: Actual max current at the local port
281 * @req_max_curr: Requested max current of the port partner
282 * @req_out_volt: Requested output voltage to the port partner
283 * @req_op_curr: Requested operating current to the port partner
284 * @supported: Parter has at least one APDO hence supports PPS
285 * @active: PPS mode is active
301 struct usb_power_delivery *pd;
302 struct usb_power_delivery_capabilities *source_cap;
303 struct usb_power_delivery_capabilities_desc source_desc;
304 struct usb_power_delivery_capabilities *sink_cap;
305 struct usb_power_delivery_capabilities_desc sink_desc;
306 unsigned int operating_snk_mw;
312 struct mutex lock; /* tcpm state machine lock */
313 struct kthread_worker *wq;
315 struct typec_capability typec_caps;
316 struct typec_port *typec_port;
318 struct tcpc_dev *tcpc;
319 struct usb_role_switch *role_sw;
321 enum typec_role vconn_role;
322 enum typec_role pwr_role;
323 enum typec_data_role data_role;
324 enum typec_pwr_opmode pwr_opmode;
326 struct usb_pd_identity partner_ident;
327 struct typec_partner_desc partner_desc;
328 struct typec_partner *partner;
330 enum typec_cc_status cc_req;
331 enum typec_cc_status src_rp; /* work only if pd_supported == false */
333 enum typec_cc_status cc1;
334 enum typec_cc_status cc2;
335 enum typec_cc_polarity polarity;
341 enum typec_port_type port_type;
344 * Set to true when vbus is greater than VSAFE5V min.
345 * Set to false when vbus falls below vSinkDisconnect max threshold.
350 * Set to true when vbus is less than VSAFE0V max.
351 * Set to false when vbus is greater than VSAFE0V max.
359 /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
367 enum pd_msg_request queued_message;
369 enum tcpm_state enter_state;
370 enum tcpm_state prev_state;
371 enum tcpm_state state;
372 enum tcpm_state delayed_state;
373 ktime_t delayed_runtime;
374 unsigned long delay_ms;
376 spinlock_t pd_event_lock;
379 struct kthread_work event_work;
380 struct hrtimer state_machine_timer;
381 struct kthread_work state_machine;
382 struct hrtimer vdm_state_machine_timer;
383 struct kthread_work vdm_state_machine;
384 struct hrtimer enable_frs_timer;
385 struct kthread_work enable_frs;
386 struct hrtimer send_discover_timer;
387 struct kthread_work send_discover_work;
388 bool state_machine_running;
389 /* Set to true when VDM State Machine has following actions. */
392 struct completion tx_complete;
393 enum tcpm_transmit_status tx_status;
395 struct mutex swap_lock; /* swap command lock */
397 bool non_pd_role_swap;
398 struct completion swap_complete;
401 unsigned int negotiated_rev;
402 unsigned int message_id;
403 unsigned int caps_count;
404 unsigned int hard_reset_count;
406 bool explicit_contract;
407 unsigned int rx_msgid;
410 struct usb_power_delivery **pds;
411 struct pd_data **pd_list;
412 struct usb_power_delivery_capabilities *port_source_caps;
413 struct usb_power_delivery_capabilities *port_sink_caps;
414 struct usb_power_delivery *partner_pd;
415 struct usb_power_delivery_capabilities *partner_source_caps;
416 struct usb_power_delivery_capabilities *partner_sink_caps;
417 struct usb_power_delivery *selected_pd;
419 /* Partner capabilities/requests */
421 u32 source_caps[PDO_MAX_OBJECTS];
422 unsigned int nr_source_caps;
423 u32 sink_caps[PDO_MAX_OBJECTS];
424 unsigned int nr_sink_caps;
426 /* Local capabilities */
427 unsigned int pd_count;
428 u32 src_pdo[PDO_MAX_OBJECTS];
429 unsigned int nr_src_pdo;
430 u32 snk_pdo[PDO_MAX_OBJECTS];
431 unsigned int nr_snk_pdo;
432 u32 snk_vdo_v1[VDO_MAX_OBJECTS];
433 unsigned int nr_snk_vdo_v1;
434 u32 snk_vdo[VDO_MAX_OBJECTS];
435 unsigned int nr_snk_vdo;
437 unsigned int operating_snk_mw;
438 bool update_sink_caps;
440 /* Requested current / voltage to the port partner */
441 u32 req_current_limit;
442 u32 req_supply_voltage;
443 /* Actual current / voltage limit of the local port */
447 /* Used to export TA voltage and current */
448 struct power_supply *psy;
449 struct power_supply_desc psy_desc;
450 enum power_supply_usb_type usb_type;
454 /* PD state for Vendor Defined Messages */
455 enum vdm_states vdm_state;
457 /* next Vendor Defined Message to send */
458 u32 vdo_data[VDO_MAX_SIZE];
460 /* VDO to retry if UFP responder replied busy */
464 struct pd_pps_data pps_data;
465 struct completion pps_complete;
469 /* Alternate mode data */
470 struct pd_mode_data mode_data;
471 struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
472 struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
474 /* Deadline in jiffies to exit src_try_wait state */
475 unsigned long max_wait;
477 /* port belongs to a self powered device */
481 enum frs_typec_current new_source_frs_current;
483 /* Sink caps have been queried */
486 /* Collision Avoidance and Atomic Message Sequence */
487 enum tcpm_state upcoming_state;
489 enum tcpm_ams next_ams;
492 /* Auto vbus discharge status */
493 bool auto_vbus_discharge_enabled;
496 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
497 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
498 * SNK_READY for non-pd link.
500 bool slow_charger_loop;
503 * When true indicates that the lower level drivers indicate potential presence
504 * of contaminant in the connector pins based on the tcpm state machine
507 bool potential_contaminant;
508 #ifdef CONFIG_DEBUG_FS
509 struct dentry *dentry;
510 struct mutex logbuffer_lock; /* log buffer access lock */
513 u8 *logbuffer[LOG_BUFFER_ENTRIES];
518 struct kthread_work work;
519 struct tcpm_port *port;
520 struct pd_message msg;
523 static const char * const pd_rev[] = {
529 #define tcpm_cc_is_sink(cc) \
530 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
531 (cc) == TYPEC_CC_RP_3_0)
533 /* As long as cc is pulled up, we can consider it as sink. */
534 #define tcpm_port_is_sink(port) \
535 (tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
537 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
538 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
539 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
541 #define tcpm_port_is_source(port) \
542 ((tcpm_cc_is_source((port)->cc1) && \
543 !tcpm_cc_is_source((port)->cc2)) || \
544 (tcpm_cc_is_source((port)->cc2) && \
545 !tcpm_cc_is_source((port)->cc1)))
547 #define tcpm_port_is_debug(port) \
548 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
550 #define tcpm_port_is_audio(port) \
551 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
553 #define tcpm_port_is_audio_detached(port) \
554 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
555 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
557 #define tcpm_try_snk(port) \
558 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
559 (port)->port_type == TYPEC_PORT_DRP)
561 #define tcpm_try_src(port) \
562 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
563 (port)->port_type == TYPEC_PORT_DRP)
565 #define tcpm_data_role_for_source(port) \
566 ((port)->typec_caps.data == TYPEC_PORT_UFP ? \
567 TYPEC_DEVICE : TYPEC_HOST)
569 #define tcpm_data_role_for_sink(port) \
570 ((port)->typec_caps.data == TYPEC_PORT_DFP ? \
571 TYPEC_HOST : TYPEC_DEVICE)
573 #define tcpm_sink_tx_ok(port) \
574 (tcpm_port_is_sink(port) && \
575 ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
577 #define tcpm_wait_for_discharge(port) \
578 (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
580 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
582 if (port->port_type == TYPEC_PORT_DRP) {
583 if (port->try_role == TYPEC_SINK)
584 return SNK_UNATTACHED;
585 else if (port->try_role == TYPEC_SOURCE)
586 return SRC_UNATTACHED;
587 /* Fall through to return SRC_UNATTACHED */
588 } else if (port->port_type == TYPEC_PORT_SNK) {
589 return SNK_UNATTACHED;
591 return SRC_UNATTACHED;
594 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
596 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
597 port->cc2 == TYPEC_CC_OPEN) ||
598 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
599 port->cc1 == TYPEC_CC_OPEN) ||
600 (port->polarity == TYPEC_POLARITY_CC2 &&
601 port->cc2 == TYPEC_CC_OPEN)));
608 #ifdef CONFIG_DEBUG_FS
610 static bool tcpm_log_full(struct tcpm_port *port)
612 return port->logbuffer_tail ==
613 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
617 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
619 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
620 u64 ts_nsec = local_clock();
621 unsigned long rem_nsec;
623 mutex_lock(&port->logbuffer_lock);
624 if (!port->logbuffer[port->logbuffer_head]) {
625 port->logbuffer[port->logbuffer_head] =
626 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
627 if (!port->logbuffer[port->logbuffer_head]) {
628 mutex_unlock(&port->logbuffer_lock);
633 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
635 if (tcpm_log_full(port)) {
636 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
637 strcpy(tmpbuffer, "overflow");
640 if (port->logbuffer_head < 0 ||
641 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
643 "Bad log buffer index %d\n", port->logbuffer_head);
647 if (!port->logbuffer[port->logbuffer_head]) {
649 "Log buffer index %d is NULL\n", port->logbuffer_head);
653 rem_nsec = do_div(ts_nsec, 1000000000);
654 scnprintf(port->logbuffer[port->logbuffer_head],
655 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
656 (unsigned long)ts_nsec, rem_nsec / 1000,
658 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
661 mutex_unlock(&port->logbuffer_lock);
665 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
669 /* Do not log while disconnected and unattached */
670 if (tcpm_port_is_disconnected(port) &&
671 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
672 port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
676 _tcpm_log(port, fmt, args);
681 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
686 _tcpm_log(port, fmt, args);
690 static void tcpm_log_source_caps(struct tcpm_port *port)
694 for (i = 0; i < port->nr_source_caps; i++) {
695 u32 pdo = port->source_caps[i];
696 enum pd_pdo_type type = pdo_type(pdo);
701 scnprintf(msg, sizeof(msg),
702 "%u mV, %u mA [%s%s%s%s%s%s]",
703 pdo_fixed_voltage(pdo),
704 pdo_max_current(pdo),
705 (pdo & PDO_FIXED_DUAL_ROLE) ?
707 (pdo & PDO_FIXED_SUSPEND) ?
709 (pdo & PDO_FIXED_HIGHER_CAP) ?
711 (pdo & PDO_FIXED_USB_COMM) ?
713 (pdo & PDO_FIXED_DATA_SWAP) ?
715 (pdo & PDO_FIXED_EXTPOWER) ?
719 scnprintf(msg, sizeof(msg),
721 pdo_min_voltage(pdo),
722 pdo_max_voltage(pdo),
723 pdo_max_current(pdo));
726 scnprintf(msg, sizeof(msg),
728 pdo_min_voltage(pdo),
729 pdo_max_voltage(pdo),
733 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
734 scnprintf(msg, sizeof(msg),
736 pdo_pps_apdo_min_voltage(pdo),
737 pdo_pps_apdo_max_voltage(pdo),
738 pdo_pps_apdo_max_current(pdo));
740 strcpy(msg, "undefined APDO");
743 strcpy(msg, "undefined");
746 tcpm_log(port, " PDO %d: type %d, %s",
751 static int tcpm_debug_show(struct seq_file *s, void *v)
753 struct tcpm_port *port = s->private;
756 mutex_lock(&port->logbuffer_lock);
757 tail = port->logbuffer_tail;
758 while (tail != port->logbuffer_head) {
759 seq_printf(s, "%s\n", port->logbuffer[tail]);
760 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
762 if (!seq_has_overflowed(s))
763 port->logbuffer_tail = tail;
764 mutex_unlock(&port->logbuffer_lock);
768 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
770 static void tcpm_debugfs_init(struct tcpm_port *port)
774 mutex_init(&port->logbuffer_lock);
775 snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
776 port->dentry = debugfs_create_dir(name, usb_debug_root);
777 debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
781 static void tcpm_debugfs_exit(struct tcpm_port *port)
785 mutex_lock(&port->logbuffer_lock);
786 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
787 kfree(port->logbuffer[i]);
788 port->logbuffer[i] = NULL;
790 mutex_unlock(&port->logbuffer_lock);
792 debugfs_remove(port->dentry);
798 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
800 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
801 static void tcpm_log_source_caps(struct tcpm_port *port) { }
802 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
803 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
807 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
809 tcpm_log(port, "cc:=%d", cc);
811 port->tcpc->set_cc(port->tcpc, cc);
814 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
818 if (port->tcpc->enable_auto_vbus_discharge) {
819 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
820 tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
823 port->auto_vbus_discharge_enabled = enable;
829 static void tcpm_apply_rc(struct tcpm_port *port)
832 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
833 * when Vbus auto discharge on disconnect is enabled.
835 if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
836 tcpm_log(port, "Apply_RC");
837 port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
838 tcpm_enable_auto_vbus_discharge(port, false);
843 * Determine RP value to set based on maximum current supported
844 * by a port if configured as source.
845 * Returns CC value to report to link partner.
847 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
849 const u32 *src_pdo = port->src_pdo;
850 int nr_pdo = port->nr_src_pdo;
853 if (!port->pd_supported)
857 * Search for first entry with matching voltage.
858 * It should report the maximum supported current.
860 for (i = 0; i < nr_pdo; i++) {
861 const u32 pdo = src_pdo[i];
863 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
864 pdo_fixed_voltage(pdo) == 5000) {
865 unsigned int curr = pdo_max_current(pdo);
868 return TYPEC_CC_RP_3_0;
869 else if (curr >= 1500)
870 return TYPEC_CC_RP_1_5;
871 return TYPEC_CC_RP_DEF;
875 return TYPEC_CC_RP_DEF;
878 static void tcpm_ams_finish(struct tcpm_port *port)
880 tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
882 if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
883 if (port->negotiated_rev >= PD_REV30)
884 tcpm_set_cc(port, SINK_TX_OK);
886 tcpm_set_cc(port, SINK_TX_NG);
887 } else if (port->pwr_role == TYPEC_SOURCE) {
888 tcpm_set_cc(port, tcpm_rp_cc(port));
891 port->in_ams = false;
892 port->ams = NONE_AMS;
895 static int tcpm_pd_transmit(struct tcpm_port *port,
896 enum tcpm_transmit_type type,
897 const struct pd_message *msg)
899 unsigned long timeout;
903 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
905 tcpm_log(port, "PD TX, type: %#x", type);
907 reinit_completion(&port->tx_complete);
908 ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
912 mutex_unlock(&port->lock);
913 timeout = wait_for_completion_timeout(&port->tx_complete,
914 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
915 mutex_lock(&port->lock);
919 switch (port->tx_status) {
920 case TCPC_TX_SUCCESS:
921 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
923 * USB PD rev 2.0, 8.3.2.2.1:
924 * USB PD rev 3.0, 8.3.2.1.3:
925 * "... Note that every AMS is Interruptible until the first
926 * Message in the sequence has been successfully sent (GoodCRC
927 * Message received)."
929 if (port->ams != NONE_AMS)
932 case TCPC_TX_DISCARDED:
941 /* Some AMS don't expect responses. Finish them here. */
942 if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
943 tcpm_ams_finish(port);
948 void tcpm_pd_transmit_complete(struct tcpm_port *port,
949 enum tcpm_transmit_status status)
951 tcpm_log(port, "PD TX complete, status: %u", status);
952 port->tx_status = status;
953 complete(&port->tx_complete);
955 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
957 static int tcpm_mux_set(struct tcpm_port *port, int state,
958 enum usb_role usb_role,
959 enum typec_orientation orientation)
963 tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
964 state, usb_role, orientation);
966 ret = typec_set_orientation(port->typec_port, orientation);
971 ret = usb_role_switch_set_role(port->role_sw, usb_role);
976 return typec_set_mode(port->typec_port, state);
979 static int tcpm_set_polarity(struct tcpm_port *port,
980 enum typec_cc_polarity polarity)
984 tcpm_log(port, "polarity %d", polarity);
986 ret = port->tcpc->set_polarity(port->tcpc, polarity);
990 port->polarity = polarity;
995 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
999 tcpm_log(port, "vconn:=%d", enable);
1001 ret = port->tcpc->set_vconn(port->tcpc, enable);
1003 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1004 typec_set_vconn_role(port->typec_port, port->vconn_role);
1010 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1012 enum typec_cc_status cc;
1015 cc = port->polarity ? port->cc2 : port->cc1;
1017 case TYPEC_CC_RP_1_5:
1020 case TYPEC_CC_RP_3_0:
1023 case TYPEC_CC_RP_DEF:
1025 if (port->tcpc->get_current_limit)
1026 limit = port->tcpc->get_current_limit(port->tcpc);
1035 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1037 int ret = -EOPNOTSUPP;
1039 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1041 port->supply_voltage = mv;
1042 port->current_limit = max_ma;
1043 power_supply_changed(port->psy);
1045 if (port->tcpc->set_current_limit)
1046 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1051 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1053 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1057 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
1058 enum typec_role role, enum typec_data_role data)
1060 enum typec_orientation orientation;
1061 enum usb_role usb_role;
1064 if (port->polarity == TYPEC_POLARITY_CC1)
1065 orientation = TYPEC_ORIENTATION_NORMAL;
1067 orientation = TYPEC_ORIENTATION_REVERSE;
1069 if (port->typec_caps.data == TYPEC_PORT_DRD) {
1070 if (data == TYPEC_HOST)
1071 usb_role = USB_ROLE_HOST;
1073 usb_role = USB_ROLE_DEVICE;
1074 } else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1075 if (data == TYPEC_HOST) {
1076 if (role == TYPEC_SOURCE)
1077 usb_role = USB_ROLE_HOST;
1079 usb_role = USB_ROLE_NONE;
1084 if (data == TYPEC_DEVICE) {
1085 if (role == TYPEC_SINK)
1086 usb_role = USB_ROLE_DEVICE;
1088 usb_role = USB_ROLE_NONE;
1094 ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
1098 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1102 port->pwr_role = role;
1103 port->data_role = data;
1104 typec_set_data_role(port->typec_port, data);
1105 typec_set_pwr_role(port->typec_port, role);
1110 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1114 ret = port->tcpc->set_roles(port->tcpc, true, role,
1119 port->pwr_role = role;
1120 typec_set_pwr_role(port->typec_port, role);
1126 * Transform the PDO to be compliant to PD rev2.0.
1127 * Return 0 if the PDO type is not defined in PD rev2.0.
1128 * Otherwise, return the converted PDO.
1130 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1132 switch (pdo_type(pdo)) {
1133 case PDO_TYPE_FIXED:
1134 if (role == TYPEC_SINK)
1135 return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1137 return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1147 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1149 struct pd_message msg;
1151 unsigned int i, nr_pdo = 0;
1153 memset(&msg, 0, sizeof(msg));
1155 for (i = 0; i < port->nr_src_pdo; i++) {
1156 if (port->negotiated_rev >= PD_REV30) {
1157 msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
1159 pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1161 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1166 /* No source capabilities defined, sink only */
1167 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1170 port->negotiated_rev,
1171 port->message_id, 0);
1173 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1176 port->negotiated_rev,
1181 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1184 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1186 struct pd_message msg;
1188 unsigned int i, nr_pdo = 0;
1190 memset(&msg, 0, sizeof(msg));
1192 for (i = 0; i < port->nr_snk_pdo; i++) {
1193 if (port->negotiated_rev >= PD_REV30) {
1194 msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
1196 pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1198 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1203 /* No sink capabilities defined, source only */
1204 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1207 port->negotiated_rev,
1208 port->message_id, 0);
1210 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1213 port->negotiated_rev,
1218 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1221 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1224 hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1226 hrtimer_cancel(&port->state_machine_timer);
1227 kthread_queue_work(port->wq, &port->state_machine);
1231 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1234 hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1237 hrtimer_cancel(&port->vdm_state_machine_timer);
1238 kthread_queue_work(port->wq, &port->vdm_state_machine);
1242 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1245 hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1247 hrtimer_cancel(&port->enable_frs_timer);
1248 kthread_queue_work(port->wq, &port->enable_frs);
1252 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1255 hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1257 hrtimer_cancel(&port->send_discover_timer);
1258 kthread_queue_work(port->wq, &port->send_discover_work);
1262 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1263 unsigned int delay_ms)
1266 tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1267 tcpm_states[port->state], tcpm_states[state], delay_ms,
1268 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1269 port->delayed_state = state;
1270 mod_tcpm_delayed_work(port, delay_ms);
1271 port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1272 port->delay_ms = delay_ms;
1274 tcpm_log(port, "state change %s -> %s [%s %s]",
1275 tcpm_states[port->state], tcpm_states[state],
1276 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1277 port->delayed_state = INVALID_STATE;
1278 port->prev_state = port->state;
1279 port->state = state;
1281 * Don't re-queue the state machine work item if we're currently
1282 * in the state machine and we're immediately changing states.
1283 * tcpm_state_machine_work() will continue running the state
1286 if (!port->state_machine_running)
1287 mod_tcpm_delayed_work(port, 0);
1291 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1292 unsigned int delay_ms)
1294 if (port->enter_state == port->state)
1295 tcpm_set_state(port, state, delay_ms);
1298 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1299 delay_ms ? "delayed " : "",
1300 tcpm_states[port->state], tcpm_states[state],
1301 delay_ms, tcpm_states[port->enter_state],
1302 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1305 static void tcpm_queue_message(struct tcpm_port *port,
1306 enum pd_msg_request message)
1308 port->queued_message = message;
1309 mod_tcpm_delayed_work(port, 0);
1312 static bool tcpm_vdm_ams(struct tcpm_port *port)
1314 switch (port->ams) {
1315 case DISCOVER_IDENTITY:
1316 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1317 case DISCOVER_SVIDS:
1318 case DISCOVER_MODES:
1319 case DFP_TO_UFP_ENTER_MODE:
1320 case DFP_TO_UFP_EXIT_MODE:
1321 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1322 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1324 case UNSTRUCTURED_VDMS:
1325 case STRUCTURED_VDMS:
1334 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1336 switch (port->ams) {
1337 /* Interruptible AMS */
1340 case FIRMWARE_UPDATE:
1341 case DISCOVER_IDENTITY:
1342 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1343 case DISCOVER_SVIDS:
1344 case DISCOVER_MODES:
1345 case DFP_TO_UFP_ENTER_MODE:
1346 case DFP_TO_UFP_EXIT_MODE:
1347 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1348 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1349 case UNSTRUCTURED_VDMS:
1350 case STRUCTURED_VDMS:
1354 /* Non-Interruptible AMS */
1364 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1368 tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1370 if (!tcpm_ams_interruptible(port) &&
1371 !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1372 port->upcoming_state = INVALID_STATE;
1373 tcpm_log(port, "AMS %s not interruptible, aborting",
1374 tcpm_ams_str[port->ams]);
1378 if (port->pwr_role == TYPEC_SOURCE) {
1379 enum typec_cc_status cc_req = port->cc_req;
1383 if (ams == HARD_RESET) {
1384 tcpm_set_cc(port, tcpm_rp_cc(port));
1385 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1386 tcpm_set_state(port, HARD_RESET_START, 0);
1388 } else if (ams == SOFT_RESET_AMS) {
1389 if (!port->explicit_contract)
1390 tcpm_set_cc(port, tcpm_rp_cc(port));
1391 tcpm_set_state(port, SOFT_RESET_SEND, 0);
1393 } else if (tcpm_vdm_ams(port)) {
1394 /* tSinkTx is enforced in vdm_run_state_machine */
1395 if (port->negotiated_rev >= PD_REV30)
1396 tcpm_set_cc(port, SINK_TX_NG);
1400 if (port->negotiated_rev >= PD_REV30)
1401 tcpm_set_cc(port, SINK_TX_NG);
1403 switch (port->state) {
1406 case SRC_SOFT_RESET_WAIT_SNK_TX:
1408 case SOFT_RESET_SEND:
1409 if (port->negotiated_rev >= PD_REV30)
1410 tcpm_set_state(port, AMS_START,
1411 cc_req == SINK_TX_OK ?
1414 tcpm_set_state(port, AMS_START, 0);
1417 if (port->negotiated_rev >= PD_REV30)
1418 tcpm_set_state(port, SRC_READY,
1419 cc_req == SINK_TX_OK ?
1422 tcpm_set_state(port, SRC_READY, 0);
1426 if (port->negotiated_rev >= PD_REV30 &&
1427 !tcpm_sink_tx_ok(port) &&
1428 ams != SOFT_RESET_AMS &&
1429 ams != HARD_RESET) {
1430 port->upcoming_state = INVALID_STATE;
1431 tcpm_log(port, "Sink TX No Go");
1437 if (ams == HARD_RESET) {
1438 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1439 tcpm_set_state(port, HARD_RESET_START, 0);
1441 } else if (tcpm_vdm_ams(port)) {
1445 if (port->state == SNK_READY ||
1446 port->state == SNK_SOFT_RESET)
1447 tcpm_set_state(port, AMS_START, 0);
1449 tcpm_set_state(port, SNK_READY, 0);
1456 * VDM/VDO handling functions
1458 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1459 const u32 *data, int cnt)
1461 u32 vdo_hdr = port->vdo_data[0];
1463 WARN_ON(!mutex_is_locked(&port->lock));
1465 /* If is sending discover_identity, handle received message first */
1466 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1467 port->send_discover = true;
1468 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1470 /* Make sure we are not still processing a previous VDM packet */
1471 WARN_ON(port->vdm_state > VDM_STATE_DONE);
1474 port->vdo_count = cnt + 1;
1475 port->vdo_data[0] = header;
1476 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1477 /* Set ready, vdm state machine will actually send */
1478 port->vdm_retries = 0;
1479 port->vdm_state = VDM_STATE_READY;
1480 port->vdm_sm_running = true;
1482 mod_vdm_delayed_work(port, 0);
1485 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1486 const u32 *data, int cnt)
1488 mutex_lock(&port->lock);
1489 tcpm_queue_vdm(port, header, data, cnt);
1490 mutex_unlock(&port->lock);
1493 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1495 u32 vdo = p[VDO_INDEX_IDH];
1496 u32 product = p[VDO_INDEX_PRODUCT];
1498 memset(&port->mode_data, 0, sizeof(port->mode_data));
1500 port->partner_ident.id_header = vdo;
1501 port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1502 port->partner_ident.product = product;
1504 typec_partner_set_identity(port->partner);
1506 tcpm_log(port, "Identity: %04x:%04x.%04x",
1508 PD_PRODUCT_PID(product), product & 0xffff);
1511 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
1513 struct pd_mode_data *pmdata = &port->mode_data;
1516 for (i = 1; i < cnt; i++) {
1519 svid = (p[i] >> 16) & 0xffff;
1523 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1526 pmdata->svids[pmdata->nsvids++] = svid;
1527 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1529 svid = p[i] & 0xffff;
1533 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1536 pmdata->svids[pmdata->nsvids++] = svid;
1537 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1541 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1542 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1543 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1544 * SVIDs Command Shall be executed multiple times until a Discover
1545 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1546 * the last part of the last VDO or with a VDO containing two SVIDs
1547 * with values of 0x0000.
1549 * However, some odd dockers support SVIDs less than 12 but without
1550 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1551 * request and return false here.
1555 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1559 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
1561 struct pd_mode_data *pmdata = &port->mode_data;
1562 struct typec_altmode_desc *paltmode;
1565 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1566 /* Already logged in svdm_consume_svids() */
1570 for (i = 1; i < cnt; i++) {
1571 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1572 memset(paltmode, 0, sizeof(*paltmode));
1574 paltmode->svid = pmdata->svids[pmdata->svid_index];
1576 paltmode->vdo = p[i];
1578 tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1579 pmdata->altmodes, paltmode->svid,
1580 paltmode->mode, paltmode->vdo);
1586 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1588 struct pd_mode_data *modep = &port->mode_data;
1589 struct typec_altmode *altmode;
1592 for (i = 0; i < modep->altmodes; i++) {
1593 altmode = typec_partner_register_altmode(port->partner,
1594 &modep->altmode_desc[i]);
1595 if (IS_ERR(altmode)) {
1596 tcpm_log(port, "Failed to register partner SVID 0x%04x",
1597 modep->altmode_desc[i].svid);
1600 port->partner_altmode[i] = altmode;
1604 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1606 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1607 const u32 *p, int cnt, u32 *response,
1608 enum adev_actions *adev_action)
1610 struct typec_port *typec = port->typec_port;
1611 struct typec_altmode *pdev;
1612 struct pd_mode_data *modep;
1619 cmd_type = PD_VDO_CMDT(p[0]);
1620 cmd = PD_VDO_CMD(p[0]);
1622 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1623 p[0], cmd_type, cmd, cnt);
1625 modep = &port->mode_data;
1627 pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
1628 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1630 svdm_version = typec_get_negotiated_svdm_version(typec);
1631 if (svdm_version < 0)
1637 case CMD_DISCOVER_IDENT:
1638 if (PD_VDO_VID(p[0]) != USB_SID_PD)
1641 if (IS_ERR_OR_NULL(port->partner))
1644 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
1645 typec_partner_set_svdm_version(port->partner,
1646 PD_VDO_SVDM_VER(p[0]));
1647 svdm_version = PD_VDO_SVDM_VER(p[0]);
1650 port->ams = DISCOVER_IDENTITY;
1652 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
1653 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
1654 * "wrong configuation" or "Unrecognized"
1656 if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
1658 if (svdm_version < SVDM_VER_2_0) {
1659 for (i = 0; i < port->nr_snk_vdo_v1; i++)
1660 response[i + 1] = port->snk_vdo_v1[i];
1661 rlen = port->nr_snk_vdo_v1 + 1;
1664 for (i = 0; i < port->nr_snk_vdo; i++)
1665 response[i + 1] = port->snk_vdo[i];
1666 rlen = port->nr_snk_vdo + 1;
1670 case CMD_DISCOVER_SVID:
1671 port->ams = DISCOVER_SVIDS;
1673 case CMD_DISCOVER_MODES:
1674 port->ams = DISCOVER_MODES;
1676 case CMD_ENTER_MODE:
1677 port->ams = DFP_TO_UFP_ENTER_MODE;
1680 port->ams = DFP_TO_UFP_EXIT_MODE;
1683 /* Attention command does not have response */
1684 *adev_action = ADEV_ATTENTION;
1690 response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
1691 } else if (rlen == 0) {
1692 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1695 response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
1698 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1699 (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
1702 /* silently drop message if we are not connected */
1703 if (IS_ERR_OR_NULL(port->partner))
1706 tcpm_ams_finish(port);
1709 case CMD_DISCOVER_IDENT:
1710 if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
1711 typec_partner_set_svdm_version(port->partner,
1712 PD_VDO_SVDM_VER(p[0]));
1714 svdm_consume_identity(port, p, cnt);
1715 response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
1719 case CMD_DISCOVER_SVID:
1721 if (svdm_consume_svids(port, p, cnt)) {
1722 response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
1724 } else if (modep->nsvids && supports_modal(port)) {
1725 response[0] = VDO(modep->svids[0], 1, svdm_version,
1726 CMD_DISCOVER_MODES);
1730 case CMD_DISCOVER_MODES:
1732 svdm_consume_modes(port, p, cnt);
1733 modep->svid_index++;
1734 if (modep->svid_index < modep->nsvids) {
1735 u16 svid = modep->svids[modep->svid_index];
1736 response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
1739 tcpm_register_partner_altmodes(port);
1742 case CMD_ENTER_MODE:
1744 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
1748 /* Back to USB Operation */
1749 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1753 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1756 /* Unrecognized SVDM */
1757 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1759 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1760 (VDO_SVDM_VERS(svdm_version));
1765 tcpm_ams_finish(port);
1767 case CMD_DISCOVER_IDENT:
1768 case CMD_DISCOVER_SVID:
1769 case CMD_DISCOVER_MODES:
1770 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1772 case CMD_ENTER_MODE:
1773 /* Back to USB Operation */
1774 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1777 /* Unrecognized SVDM */
1778 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1780 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1781 (VDO_SVDM_VERS(svdm_version));
1786 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1788 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1789 (VDO_SVDM_VERS(svdm_version));
1793 /* Informing the alternate mode drivers about everything */
1794 *adev_action = ADEV_QUEUE_VDM;
1798 static void tcpm_pd_handle_msg(struct tcpm_port *port,
1799 enum pd_msg_request message,
1802 static void tcpm_handle_vdm_request(struct tcpm_port *port,
1803 const __le32 *payload, int cnt)
1805 enum adev_actions adev_action = ADEV_NONE;
1806 struct typec_altmode *adev;
1807 u32 p[PD_MAX_PAYLOAD];
1808 u32 response[8] = { };
1811 for (i = 0; i < cnt; i++)
1812 p[i] = le32_to_cpu(payload[i]);
1814 adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
1815 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1817 if (port->vdm_state == VDM_STATE_BUSY) {
1818 /* If UFP responded busy retry after timeout */
1819 if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
1820 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1821 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
1823 mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
1826 port->vdm_state = VDM_STATE_DONE;
1829 if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
1831 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
1832 * advance because we are dropping the lock but may send VDMs soon.
1833 * For the cases of INIT received:
1834 * - If no response to send, it will be cleared later in this function.
1835 * - If there are responses to send, it will be cleared in the state machine.
1836 * For the cases of RSP received:
1837 * - If no further INIT to send, it will be cleared later in this function.
1838 * - Otherwise, it will be cleared in the state machine if timeout or it will go
1839 * back here until no further INIT to send.
1840 * For the cases of unknown type received:
1841 * - We will send NAK and the flag will be cleared in the state machine.
1843 port->vdm_sm_running = true;
1844 rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
1846 if (port->negotiated_rev >= PD_REV30)
1847 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
1851 * We are done with any state stored in the port struct now, except
1852 * for any port struct changes done by the tcpm_queue_vdm() call
1853 * below, which is a separate operation.
1855 * So we can safely release the lock here; and we MUST release the
1856 * lock here to avoid an AB BA lock inversion:
1858 * If we keep the lock here then the lock ordering in this path is:
1859 * 1. tcpm_pd_rx_handler take the tcpm port lock
1860 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
1862 * And we also have this ordering:
1863 * 1. alt-mode driver takes the alt-mode's lock
1864 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
1867 * Dropping our lock here avoids this.
1869 mutex_unlock(&port->lock);
1872 switch (adev_action) {
1875 case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
1876 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
1877 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1879 case ADEV_QUEUE_VDM:
1880 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1882 case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
1883 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1884 int svdm_version = typec_get_negotiated_svdm_version(
1886 if (svdm_version < 0)
1889 response[0] = VDO(adev->svid, 1, svdm_version,
1891 response[0] |= VDO_OPOS(adev->mode);
1895 case ADEV_ATTENTION:
1896 if (typec_altmode_attention(adev, p[1]))
1897 tcpm_log(port, "typec_altmode_attention no port partner altmode");
1903 * We must re-take the lock here to balance the unlock in
1904 * tcpm_pd_rx_handler, note that no changes, other then the
1905 * tcpm_queue_vdm call, are made while the lock is held again.
1906 * All that is done after the call is unwinding the call stack until
1907 * we return to tcpm_pd_rx_handler and do the unlock there.
1909 mutex_lock(&port->lock);
1912 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1914 port->vdm_sm_running = false;
1917 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1918 const u32 *data, int count)
1920 int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
1923 if (svdm_version < 0)
1926 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1927 count = VDO_MAX_SIZE - 1;
1929 /* set VDM header with VID & CMD */
1930 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1931 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
1933 tcpm_queue_vdm(port, header, data, count);
1936 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1938 unsigned int timeout;
1939 int cmd = PD_VDO_CMD(vdm_hdr);
1941 /* its not a structured VDM command */
1942 if (!PD_VDO_SVDM(vdm_hdr))
1943 return PD_T_VDM_UNSTRUCTURED;
1945 switch (PD_VDO_CMDT(vdm_hdr)) {
1947 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1948 timeout = PD_T_VDM_WAIT_MODE_E;
1950 timeout = PD_T_VDM_SNDR_RSP;
1953 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1954 timeout = PD_T_VDM_E_MODE;
1956 timeout = PD_T_VDM_RCVR_RSP;
1962 static void vdm_run_state_machine(struct tcpm_port *port)
1964 struct pd_message msg;
1966 u32 vdo_hdr = port->vdo_data[0];
1968 switch (port->vdm_state) {
1969 case VDM_STATE_READY:
1970 /* Only transmit VDM if attached */
1971 if (!port->attached) {
1972 port->vdm_state = VDM_STATE_ERR_BUSY;
1977 * if there's traffic or we're not in PDO ready state don't send
1980 if (port->state != SRC_READY && port->state != SNK_READY) {
1981 port->vdm_sm_running = false;
1985 /* TODO: AMS operation for Unstructured VDM */
1986 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
1987 switch (PD_VDO_CMD(vdo_hdr)) {
1988 case CMD_DISCOVER_IDENT:
1989 res = tcpm_ams_start(port, DISCOVER_IDENTITY);
1991 port->send_discover = false;
1992 } else if (res == -EAGAIN) {
1993 port->vdo_data[0] = 0;
1994 mod_send_discover_delayed_work(port,
1995 SEND_DISCOVER_RETRY_MS);
1998 case CMD_DISCOVER_SVID:
1999 res = tcpm_ams_start(port, DISCOVER_SVIDS);
2001 case CMD_DISCOVER_MODES:
2002 res = tcpm_ams_start(port, DISCOVER_MODES);
2004 case CMD_ENTER_MODE:
2005 res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2008 res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2011 res = tcpm_ams_start(port, ATTENTION);
2013 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2014 res = tcpm_ams_start(port, STRUCTURED_VDMS);
2022 port->vdm_state = VDM_STATE_ERR_BUSY;
2027 port->vdm_state = VDM_STATE_SEND_MESSAGE;
2028 mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2029 port->pwr_role == TYPEC_SOURCE &&
2030 PD_VDO_SVDM(vdo_hdr) &&
2031 PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2034 case VDM_STATE_WAIT_RSP_BUSY:
2035 port->vdo_data[0] = port->vdo_retry;
2036 port->vdo_count = 1;
2037 port->vdm_state = VDM_STATE_READY;
2038 tcpm_ams_finish(port);
2040 case VDM_STATE_BUSY:
2041 port->vdm_state = VDM_STATE_ERR_TMOUT;
2042 if (port->ams != NONE_AMS)
2043 tcpm_ams_finish(port);
2045 case VDM_STATE_ERR_SEND:
2047 * A partner which does not support USB PD will not reply,
2048 * so this is not a fatal error. At the same time, some
2049 * devices may not return GoodCRC under some circumstances,
2050 * so we need to retry.
2052 if (port->vdm_retries < 3) {
2053 tcpm_log(port, "VDM Tx error, retry");
2054 port->vdm_retries++;
2055 port->vdm_state = VDM_STATE_READY;
2056 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2057 tcpm_ams_finish(port);
2059 tcpm_ams_finish(port);
2062 case VDM_STATE_SEND_MESSAGE:
2063 /* Prepare and send VDM */
2064 memset(&msg, 0, sizeof(msg));
2065 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2068 port->negotiated_rev,
2069 port->message_id, port->vdo_count);
2070 for (i = 0; i < port->vdo_count; i++)
2071 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2072 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
2074 port->vdm_state = VDM_STATE_ERR_SEND;
2076 unsigned long timeout;
2078 port->vdm_retries = 0;
2079 port->vdo_data[0] = 0;
2080 port->vdm_state = VDM_STATE_BUSY;
2081 timeout = vdm_ready_timeout(vdo_hdr);
2082 mod_vdm_delayed_work(port, timeout);
2090 static void vdm_state_machine_work(struct kthread_work *work)
2092 struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2093 enum vdm_states prev_state;
2095 mutex_lock(&port->lock);
2098 * Continue running as long as the port is not busy and there was
2102 prev_state = port->vdm_state;
2103 vdm_run_state_machine(port);
2104 } while (port->vdm_state != prev_state &&
2105 port->vdm_state != VDM_STATE_BUSY &&
2106 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2108 if (port->vdm_state < VDM_STATE_READY)
2109 port->vdm_sm_running = false;
2111 mutex_unlock(&port->lock);
2117 PDO_ERR_VSAFE5V_NOT_FIRST,
2118 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2119 PDO_ERR_FIXED_NOT_SORTED,
2120 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2122 PDO_ERR_PPS_APDO_NOT_SORTED,
2123 PDO_ERR_DUPE_PPS_APDO,
2126 static const char * const pdo_err_msg[] = {
2127 [PDO_ERR_NO_VSAFE5V] =
2128 " err: source/sink caps should at least have vSafe5V",
2129 [PDO_ERR_VSAFE5V_NOT_FIRST] =
2130 " err: vSafe5V Fixed Supply Object Shall always be the first object",
2131 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2132 " err: PDOs should be in the following order: Fixed; Battery; Variable",
2133 [PDO_ERR_FIXED_NOT_SORTED] =
2134 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
2135 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2136 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2137 [PDO_ERR_DUPE_PDO] =
2138 " err: Variable/Batt supply pdos cannot have same min/max voltage",
2139 [PDO_ERR_PPS_APDO_NOT_SORTED] =
2140 " err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2141 [PDO_ERR_DUPE_PPS_APDO] =
2142 " err: Programmable power supply apdos cannot have same min/max voltage and max current",
2145 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2146 unsigned int nr_pdo)
2150 /* Should at least contain vSafe5v */
2152 return PDO_ERR_NO_VSAFE5V;
2154 /* The vSafe5V Fixed Supply Object Shall always be the first object */
2155 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2156 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2157 return PDO_ERR_VSAFE5V_NOT_FIRST;
2159 for (i = 1; i < nr_pdo; i++) {
2160 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2161 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2162 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2163 enum pd_pdo_type type = pdo_type(pdo[i]);
2167 * The remaining Fixed Supply Objects, if
2168 * present, shall be sent in voltage order;
2169 * lowest to highest.
2171 case PDO_TYPE_FIXED:
2172 if (pdo_fixed_voltage(pdo[i]) <=
2173 pdo_fixed_voltage(pdo[i - 1]))
2174 return PDO_ERR_FIXED_NOT_SORTED;
2177 * The Battery Supply Objects and Variable
2178 * supply, if present shall be sent in Minimum
2179 * Voltage order; lowest to highest.
2183 if (pdo_min_voltage(pdo[i]) <
2184 pdo_min_voltage(pdo[i - 1]))
2185 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2186 else if ((pdo_min_voltage(pdo[i]) ==
2187 pdo_min_voltage(pdo[i - 1])) &&
2188 (pdo_max_voltage(pdo[i]) ==
2189 pdo_max_voltage(pdo[i - 1])))
2190 return PDO_ERR_DUPE_PDO;
2193 * The Programmable Power Supply APDOs, if present,
2194 * shall be sent in Maximum Voltage order;
2195 * lowest to highest.
2198 if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2201 if (pdo_pps_apdo_max_voltage(pdo[i]) <
2202 pdo_pps_apdo_max_voltage(pdo[i - 1]))
2203 return PDO_ERR_PPS_APDO_NOT_SORTED;
2204 else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2205 pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2206 pdo_pps_apdo_max_voltage(pdo[i]) ==
2207 pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2208 pdo_pps_apdo_max_current(pdo[i]) ==
2209 pdo_pps_apdo_max_current(pdo[i - 1]))
2210 return PDO_ERR_DUPE_PPS_APDO;
2213 tcpm_log_force(port, " Unknown pdo type");
2221 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2222 unsigned int nr_pdo)
2224 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2226 if (err_index != PDO_NO_ERR) {
2227 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2234 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2236 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2240 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2241 if (svdm_version < 0)
2242 return svdm_version;
2244 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2245 header |= VDO_OPOS(altmode->mode);
2247 tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
2251 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2253 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2257 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2258 if (svdm_version < 0)
2259 return svdm_version;
2261 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2262 header |= VDO_OPOS(altmode->mode);
2264 tcpm_queue_vdm_unlocked(port, header, NULL, 0);
2268 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2269 u32 header, const u32 *data, int count)
2271 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2273 tcpm_queue_vdm_unlocked(port, header, data, count - 1);
2278 static const struct typec_altmode_ops tcpm_altmode_ops = {
2279 .enter = tcpm_altmode_enter,
2280 .exit = tcpm_altmode_exit,
2281 .vdm = tcpm_altmode_vdm,
2285 * PD (data, control) command handling functions
2287 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2289 if (port->pwr_role == TYPEC_SOURCE)
2295 static int tcpm_pd_send_control(struct tcpm_port *port,
2296 enum pd_ctrl_msg_type type);
2298 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2301 u32 p0 = le32_to_cpu(payload[0]);
2302 unsigned int type = usb_pd_ado_type(p0);
2305 tcpm_log(port, "Alert message received with no type");
2306 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2310 /* Just handling non-battery alerts for now */
2311 if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
2312 if (port->pwr_role == TYPEC_SOURCE) {
2313 port->upcoming_state = GET_STATUS_SEND;
2314 tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
2317 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
2320 port->ams = GETTING_SOURCE_SINK_STATUS;
2321 tcpm_set_state(port, GET_STATUS_SEND, 0);
2324 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2328 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
2329 enum typec_pwr_opmode mode, bool pps_active,
2330 u32 requested_vbus_voltage)
2334 if (!port->tcpc->set_auto_vbus_discharge_threshold)
2337 ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
2338 requested_vbus_voltage);
2339 tcpm_log_force(port,
2340 "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
2341 mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
2346 static void tcpm_pd_handle_state(struct tcpm_port *port,
2347 enum tcpm_state state,
2349 unsigned int delay_ms)
2351 switch (port->state) {
2355 tcpm_set_state(port, state, delay_ms);
2357 /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
2358 case SNK_TRANSITION_SINK:
2359 case SNK_TRANSITION_SINK_VBUS:
2360 case SRC_TRANSITION_SUPPLY:
2361 tcpm_set_state(port, HARD_RESET_SEND, 0);
2364 if (!tcpm_ams_interruptible(port)) {
2365 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2366 SRC_SOFT_RESET_WAIT_SNK_TX :
2370 /* process the Message 6.8.1 */
2371 port->upcoming_state = state;
2372 port->next_ams = ams;
2373 tcpm_set_state(port, ready_state(port), delay_ms);
2379 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2380 enum pd_msg_request message,
2383 switch (port->state) {
2387 tcpm_queue_message(port, message);
2389 /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
2390 case SNK_TRANSITION_SINK:
2391 case SNK_TRANSITION_SINK_VBUS:
2392 case SRC_TRANSITION_SUPPLY:
2393 tcpm_set_state(port, HARD_RESET_SEND, 0);
2396 if (!tcpm_ams_interruptible(port)) {
2397 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2398 SRC_SOFT_RESET_WAIT_SNK_TX :
2402 port->next_ams = ams;
2403 tcpm_set_state(port, ready_state(port), 0);
2404 /* 6.8.1 process the Message */
2405 tcpm_queue_message(port, message);
2411 static int tcpm_register_source_caps(struct tcpm_port *port)
2413 struct usb_power_delivery_desc desc = { port->negotiated_rev };
2414 struct usb_power_delivery_capabilities_desc caps = { };
2415 struct usb_power_delivery_capabilities *cap;
2417 if (!port->partner_pd)
2418 port->partner_pd = usb_power_delivery_register(NULL, &desc);
2419 if (IS_ERR(port->partner_pd))
2420 return PTR_ERR(port->partner_pd);
2422 memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
2423 caps.role = TYPEC_SOURCE;
2425 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
2427 return PTR_ERR(cap);
2429 port->partner_source_caps = cap;
2434 static int tcpm_register_sink_caps(struct tcpm_port *port)
2436 struct usb_power_delivery_desc desc = { port->negotiated_rev };
2437 struct usb_power_delivery_capabilities_desc caps = { };
2438 struct usb_power_delivery_capabilities *cap;
2440 if (!port->partner_pd)
2441 port->partner_pd = usb_power_delivery_register(NULL, &desc);
2442 if (IS_ERR(port->partner_pd))
2443 return PTR_ERR(port->partner_pd);
2445 memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
2446 caps.role = TYPEC_SINK;
2448 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
2450 return PTR_ERR(cap);
2452 port->partner_sink_caps = cap;
2457 static void tcpm_pd_data_request(struct tcpm_port *port,
2458 const struct pd_message *msg)
2460 enum pd_data_msg_type type = pd_header_type_le(msg->header);
2461 unsigned int cnt = pd_header_cnt_le(msg->header);
2462 unsigned int rev = pd_header_rev_le(msg->header);
2464 enum frs_typec_current partner_frs_current;
2468 if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
2469 port->vdm_state = VDM_STATE_ERR_BUSY;
2470 tcpm_ams_finish(port);
2471 mod_vdm_delayed_work(port, 0);
2475 case PD_DATA_SOURCE_CAP:
2476 for (i = 0; i < cnt; i++)
2477 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
2479 port->nr_source_caps = cnt;
2481 tcpm_log_source_caps(port);
2483 tcpm_validate_caps(port, port->source_caps,
2484 port->nr_source_caps);
2486 tcpm_register_source_caps(port);
2489 * Adjust revision in subsequent message headers, as required,
2490 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2491 * support Rev 1.0 so just do nothing in that scenario.
2493 if (rev == PD_REV10) {
2494 if (port->ams == GET_SOURCE_CAPABILITIES)
2495 tcpm_ams_finish(port);
2499 if (rev < PD_MAX_REV)
2500 port->negotiated_rev = rev;
2502 if (port->pwr_role == TYPEC_SOURCE) {
2503 if (port->ams == GET_SOURCE_CAPABILITIES)
2504 tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
2505 /* Unexpected Source Capabilities */
2507 tcpm_pd_handle_msg(port,
2508 port->negotiated_rev < PD_REV30 ?
2509 PD_MSG_CTRL_REJECT :
2510 PD_MSG_CTRL_NOT_SUPP,
2512 } else if (port->state == SNK_WAIT_CAPABILITIES) {
2514 * This message may be received even if VBUS is not
2515 * present. This is quite unexpected; see USB PD
2516 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
2517 * However, at the same time, we must be ready to
2518 * receive this message and respond to it 15ms after
2519 * receiving PS_RDY during power swap operations, no matter
2520 * if VBUS is available or not (USB PD specification,
2522 * So we need to accept the message either way,
2523 * but be prepared to keep waiting for VBUS after it was
2526 port->ams = POWER_NEGOTIATION;
2527 port->in_ams = true;
2528 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
2530 if (port->ams == GET_SOURCE_CAPABILITIES)
2531 tcpm_ams_finish(port);
2532 tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
2533 POWER_NEGOTIATION, 0);
2536 case PD_DATA_REQUEST:
2538 * Adjust revision in subsequent message headers, as required,
2539 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2540 * support Rev 1.0 so just reject in that scenario.
2542 if (rev == PD_REV10) {
2543 tcpm_pd_handle_msg(port,
2544 port->negotiated_rev < PD_REV30 ?
2545 PD_MSG_CTRL_REJECT :
2546 PD_MSG_CTRL_NOT_SUPP,
2551 if (rev < PD_MAX_REV)
2552 port->negotiated_rev = rev;
2554 if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
2555 tcpm_pd_handle_msg(port,
2556 port->negotiated_rev < PD_REV30 ?
2557 PD_MSG_CTRL_REJECT :
2558 PD_MSG_CTRL_NOT_SUPP,
2563 port->sink_request = le32_to_cpu(msg->payload[0]);
2565 if (port->vdm_sm_running && port->explicit_contract) {
2566 tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
2570 if (port->state == SRC_SEND_CAPABILITIES)
2571 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
2573 tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
2574 POWER_NEGOTIATION, 0);
2576 case PD_DATA_SINK_CAP:
2577 /* We don't do anything with this at the moment... */
2578 for (i = 0; i < cnt; i++)
2579 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
2581 partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
2582 PDO_FIXED_FRS_CURR_SHIFT;
2583 frs_enable = partner_frs_current && (partner_frs_current <=
2584 port->new_source_frs_current);
2586 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
2587 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
2589 ret = port->tcpc->enable_frs(port->tcpc, true);
2590 tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
2593 port->nr_sink_caps = cnt;
2594 port->sink_cap_done = true;
2595 tcpm_register_sink_caps(port);
2597 if (port->ams == GET_SINK_CAPABILITIES)
2598 tcpm_set_state(port, ready_state(port), 0);
2599 /* Unexpected Sink Capabilities */
2601 tcpm_pd_handle_msg(port,
2602 port->negotiated_rev < PD_REV30 ?
2603 PD_MSG_CTRL_REJECT :
2604 PD_MSG_CTRL_NOT_SUPP,
2607 case PD_DATA_VENDOR_DEF:
2608 tcpm_handle_vdm_request(port, msg->payload, cnt);
2611 port->bist_request = le32_to_cpu(msg->payload[0]);
2612 tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
2615 if (port->state != SRC_READY && port->state != SNK_READY)
2616 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2617 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2620 tcpm_handle_alert(port, msg->payload, cnt);
2622 case PD_DATA_BATT_STATUS:
2623 case PD_DATA_GET_COUNTRY_INFO:
2624 /* Currently unsupported */
2625 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2626 PD_MSG_CTRL_REJECT :
2627 PD_MSG_CTRL_NOT_SUPP,
2631 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2632 PD_MSG_CTRL_REJECT :
2633 PD_MSG_CTRL_NOT_SUPP,
2635 tcpm_log(port, "Unrecognized data message type %#x", type);
2640 static void tcpm_pps_complete(struct tcpm_port *port, int result)
2642 if (port->pps_pending) {
2643 port->pps_status = result;
2644 port->pps_pending = false;
2645 complete(&port->pps_complete);
2649 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
2650 const struct pd_message *msg)
2652 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2653 enum tcpm_state next_state;
2656 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
2657 * VDM AMS if waiting for VDM responses and will be handled later.
2659 if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
2660 port->vdm_state = VDM_STATE_ERR_BUSY;
2661 tcpm_ams_finish(port);
2662 mod_vdm_delayed_work(port, 0);
2666 case PD_CTRL_GOOD_CRC:
2669 case PD_CTRL_GET_SOURCE_CAP:
2670 tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
2672 case PD_CTRL_GET_SINK_CAP:
2673 tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
2675 case PD_CTRL_GOTO_MIN:
2677 case PD_CTRL_PS_RDY:
2678 switch (port->state) {
2679 case SNK_TRANSITION_SINK:
2680 if (port->vbus_present) {
2681 tcpm_set_current_limit(port,
2682 port->req_current_limit,
2683 port->req_supply_voltage);
2684 port->explicit_contract = true;
2685 tcpm_set_auto_vbus_discharge_threshold(port,
2687 port->pps_data.active,
2688 port->supply_voltage);
2689 tcpm_set_state(port, SNK_READY, 0);
2692 * Seen after power swap. Keep waiting for VBUS
2693 * in a transitional state.
2695 tcpm_set_state(port,
2696 SNK_TRANSITION_SINK_VBUS, 0);
2699 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
2700 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
2702 case PR_SWAP_SNK_SRC_SINK_OFF:
2703 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
2705 case VCONN_SWAP_WAIT_FOR_VCONN:
2706 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
2708 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
2709 tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
2712 tcpm_pd_handle_state(port,
2713 port->pwr_role == TYPEC_SOURCE ?
2714 SRC_SOFT_RESET_WAIT_SNK_TX :
2720 case PD_CTRL_REJECT:
2722 case PD_CTRL_NOT_SUPP:
2723 switch (port->state) {
2724 case SNK_NEGOTIATE_CAPABILITIES:
2725 /* USB PD specification, Figure 8-43 */
2726 if (port->explicit_contract)
2727 next_state = SNK_READY;
2729 next_state = SNK_WAIT_CAPABILITIES;
2731 /* Threshold was relaxed before sending Request. Restore it back. */
2732 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2733 port->pps_data.active,
2734 port->supply_voltage);
2735 tcpm_set_state(port, next_state, 0);
2737 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2738 /* Revert data back from any requested PPS updates */
2739 port->pps_data.req_out_volt = port->supply_voltage;
2740 port->pps_data.req_op_curr = port->current_limit;
2741 port->pps_status = (type == PD_CTRL_WAIT ?
2742 -EAGAIN : -EOPNOTSUPP);
2744 /* Threshold was relaxed before sending Request. Restore it back. */
2745 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2746 port->pps_data.active,
2747 port->supply_voltage);
2749 tcpm_set_state(port, SNK_READY, 0);
2752 port->swap_status = (type == PD_CTRL_WAIT ?
2753 -EAGAIN : -EOPNOTSUPP);
2754 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
2757 port->swap_status = (type == PD_CTRL_WAIT ?
2758 -EAGAIN : -EOPNOTSUPP);
2759 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
2761 case VCONN_SWAP_SEND:
2762 port->swap_status = (type == PD_CTRL_WAIT ?
2763 -EAGAIN : -EOPNOTSUPP);
2764 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
2767 tcpm_set_state(port, FR_SWAP_CANCEL, 0);
2770 port->sink_cap_done = true;
2771 tcpm_set_state(port, ready_state(port), 0);
2774 * Some port partners do not support GET_STATUS, avoid soft reset the link to
2775 * prevent redundant power re-negotiation
2777 case GET_STATUS_SEND:
2778 tcpm_set_state(port, ready_state(port), 0);
2782 if (port->vdm_state > VDM_STATE_READY) {
2783 port->vdm_state = VDM_STATE_DONE;
2784 if (tcpm_vdm_ams(port))
2785 tcpm_ams_finish(port);
2786 mod_vdm_delayed_work(port, 0);
2791 tcpm_pd_handle_state(port,
2792 port->pwr_role == TYPEC_SOURCE ?
2793 SRC_SOFT_RESET_WAIT_SNK_TX :
2799 case PD_CTRL_ACCEPT:
2800 switch (port->state) {
2801 case SNK_NEGOTIATE_CAPABILITIES:
2802 port->pps_data.active = false;
2803 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2805 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2806 port->pps_data.active = true;
2807 port->pps_data.min_volt = port->pps_data.req_min_volt;
2808 port->pps_data.max_volt = port->pps_data.req_max_volt;
2809 port->pps_data.max_curr = port->pps_data.req_max_curr;
2810 port->req_supply_voltage = port->pps_data.req_out_volt;
2811 port->req_current_limit = port->pps_data.req_op_curr;
2812 power_supply_changed(port->psy);
2813 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2815 case SOFT_RESET_SEND:
2816 if (port->ams == SOFT_RESET_AMS)
2817 tcpm_ams_finish(port);
2818 if (port->pwr_role == TYPEC_SOURCE) {
2819 port->upcoming_state = SRC_SEND_CAPABILITIES;
2820 tcpm_ams_start(port, POWER_NEGOTIATION);
2822 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2826 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
2829 tcpm_set_state(port, PR_SWAP_START, 0);
2831 case VCONN_SWAP_SEND:
2832 tcpm_set_state(port, VCONN_SWAP_START, 0);
2835 tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
2838 tcpm_pd_handle_state(port,
2839 port->pwr_role == TYPEC_SOURCE ?
2840 SRC_SOFT_RESET_WAIT_SNK_TX :
2846 case PD_CTRL_SOFT_RESET:
2847 port->ams = SOFT_RESET_AMS;
2848 tcpm_set_state(port, SOFT_RESET, 0);
2850 case PD_CTRL_DR_SWAP:
2853 * 6.3.9: If an alternate mode is active, a request to swap
2854 * alternate modes shall trigger a port reset.
2856 if (port->typec_caps.data != TYPEC_PORT_DRD) {
2857 tcpm_pd_handle_msg(port,
2858 port->negotiated_rev < PD_REV30 ?
2859 PD_MSG_CTRL_REJECT :
2860 PD_MSG_CTRL_NOT_SUPP,
2863 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2864 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2868 tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
2871 case PD_CTRL_PR_SWAP:
2872 if (port->port_type != TYPEC_PORT_DRP) {
2873 tcpm_pd_handle_msg(port,
2874 port->negotiated_rev < PD_REV30 ?
2875 PD_MSG_CTRL_REJECT :
2876 PD_MSG_CTRL_NOT_SUPP,
2879 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2880 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2884 tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
2887 case PD_CTRL_VCONN_SWAP:
2888 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2889 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2893 tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
2895 case PD_CTRL_GET_SOURCE_CAP_EXT:
2896 case PD_CTRL_GET_STATUS:
2897 case PD_CTRL_FR_SWAP:
2898 case PD_CTRL_GET_PPS_STATUS:
2899 case PD_CTRL_GET_COUNTRY_CODES:
2900 /* Currently not supported */
2901 tcpm_pd_handle_msg(port,
2902 port->negotiated_rev < PD_REV30 ?
2903 PD_MSG_CTRL_REJECT :
2904 PD_MSG_CTRL_NOT_SUPP,
2908 tcpm_pd_handle_msg(port,
2909 port->negotiated_rev < PD_REV30 ?
2910 PD_MSG_CTRL_REJECT :
2911 PD_MSG_CTRL_NOT_SUPP,
2913 tcpm_log(port, "Unrecognized ctrl message type %#x", type);
2918 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
2919 const struct pd_message *msg)
2921 enum pd_ext_msg_type type = pd_header_type_le(msg->header);
2922 unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
2924 /* stopping VDM state machine if interrupted by other Messages */
2925 if (tcpm_vdm_ams(port)) {
2926 port->vdm_state = VDM_STATE_ERR_BUSY;
2927 tcpm_ams_finish(port);
2928 mod_vdm_delayed_work(port, 0);
2931 if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
2932 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2933 tcpm_log(port, "Unchunked extended messages unsupported");
2937 if (data_size > PD_EXT_MAX_CHUNK_DATA) {
2938 tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
2939 tcpm_log(port, "Chunk handling not yet supported");
2945 case PD_EXT_PPS_STATUS:
2946 if (port->ams == GETTING_SOURCE_SINK_STATUS) {
2947 tcpm_ams_finish(port);
2948 tcpm_set_state(port, ready_state(port), 0);
2950 /* unexpected Status or PPS_Status Message */
2951 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2952 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2956 case PD_EXT_SOURCE_CAP_EXT:
2957 case PD_EXT_GET_BATT_CAP:
2958 case PD_EXT_GET_BATT_STATUS:
2959 case PD_EXT_BATT_CAP:
2960 case PD_EXT_GET_MANUFACTURER_INFO:
2961 case PD_EXT_MANUFACTURER_INFO:
2962 case PD_EXT_SECURITY_REQUEST:
2963 case PD_EXT_SECURITY_RESPONSE:
2964 case PD_EXT_FW_UPDATE_REQUEST:
2965 case PD_EXT_FW_UPDATE_RESPONSE:
2966 case PD_EXT_COUNTRY_INFO:
2967 case PD_EXT_COUNTRY_CODES:
2968 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2971 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2972 tcpm_log(port, "Unrecognized extended message type %#x", type);
2977 static void tcpm_pd_rx_handler(struct kthread_work *work)
2979 struct pd_rx_event *event = container_of(work,
2980 struct pd_rx_event, work);
2981 const struct pd_message *msg = &event->msg;
2982 unsigned int cnt = pd_header_cnt_le(msg->header);
2983 struct tcpm_port *port = event->port;
2985 mutex_lock(&port->lock);
2987 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
2990 if (port->attached) {
2991 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2992 unsigned int msgid = pd_header_msgid_le(msg->header);
2995 * USB PD standard, 6.6.1.2:
2996 * "... if MessageID value in a received Message is the
2997 * same as the stored value, the receiver shall return a
2998 * GoodCRC Message with that MessageID value and drop
2999 * the Message (this is a retry of an already received
3000 * Message). Note: this shall not apply to the Soft_Reset
3001 * Message which always has a MessageID value of zero."
3003 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3005 port->rx_msgid = msgid;
3008 * If both ends believe to be DFP/host, we have a data role
3011 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3012 (port->data_role == TYPEC_HOST)) {
3014 "Data role mismatch, initiating error recovery");
3015 tcpm_set_state(port, ERROR_RECOVERY, 0);
3017 if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3018 tcpm_pd_ext_msg_request(port, msg);
3020 tcpm_pd_data_request(port, msg);
3022 tcpm_pd_ctrl_request(port, msg);
3027 mutex_unlock(&port->lock);
3031 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
3033 struct pd_rx_event *event;
3035 event = kzalloc(sizeof(*event), GFP_ATOMIC);
3039 kthread_init_work(&event->work, tcpm_pd_rx_handler);
3041 memcpy(&event->msg, msg, sizeof(*msg));
3042 kthread_queue_work(port->wq, &event->work);
3044 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3046 static int tcpm_pd_send_control(struct tcpm_port *port,
3047 enum pd_ctrl_msg_type type)
3049 struct pd_message msg;
3051 memset(&msg, 0, sizeof(msg));
3052 msg.header = PD_HEADER_LE(type, port->pwr_role,
3054 port->negotiated_rev,
3055 port->message_id, 0);
3057 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3061 * Send queued message without affecting state.
3062 * Return true if state machine should go back to sleep,
3065 static bool tcpm_send_queued_message(struct tcpm_port *port)
3067 enum pd_msg_request queued_message;
3071 queued_message = port->queued_message;
3072 port->queued_message = PD_MSG_NONE;
3074 switch (queued_message) {
3075 case PD_MSG_CTRL_WAIT:
3076 tcpm_pd_send_control(port, PD_CTRL_WAIT);
3078 case PD_MSG_CTRL_REJECT:
3079 tcpm_pd_send_control(port, PD_CTRL_REJECT);
3081 case PD_MSG_CTRL_NOT_SUPP:
3082 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
3084 case PD_MSG_DATA_SINK_CAP:
3085 ret = tcpm_pd_send_sink_caps(port);
3087 tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3088 tcpm_set_state(port, SNK_SOFT_RESET, 0);
3090 tcpm_ams_finish(port);
3092 case PD_MSG_DATA_SOURCE_CAP:
3093 ret = tcpm_pd_send_source_caps(port);
3096 "Unable to send src caps, ret=%d",
3098 tcpm_set_state(port, SOFT_RESET_SEND, 0);
3099 } else if (port->pwr_role == TYPEC_SOURCE) {
3100 tcpm_ams_finish(port);
3101 tcpm_set_state(port, HARD_RESET_SEND,
3102 PD_T_SENDER_RESPONSE);
3104 tcpm_ams_finish(port);
3110 } while (port->queued_message != PD_MSG_NONE);
3112 if (port->delayed_state != INVALID_STATE) {
3113 if (ktime_after(port->delayed_runtime, ktime_get())) {
3114 mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3118 port->delayed_state = INVALID_STATE;
3123 static int tcpm_pd_check_request(struct tcpm_port *port)
3125 u32 pdo, rdo = port->sink_request;
3126 unsigned int max, op, pdo_max, index;
3127 enum pd_pdo_type type;
3129 index = rdo_index(rdo);
3130 if (!index || index > port->nr_src_pdo)
3133 pdo = port->src_pdo[index - 1];
3134 type = pdo_type(pdo);
3136 case PDO_TYPE_FIXED:
3138 max = rdo_max_current(rdo);
3139 op = rdo_op_current(rdo);
3140 pdo_max = pdo_max_current(pdo);
3144 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3147 if (type == PDO_TYPE_FIXED)
3149 "Requested %u mV, %u mA for %u / %u mA",
3150 pdo_fixed_voltage(pdo), pdo_max, op, max);
3153 "Requested %u -> %u mV, %u mA for %u / %u mA",
3154 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3158 max = rdo_max_power(rdo);
3159 op = rdo_op_power(rdo);
3160 pdo_max = pdo_max_power(pdo);
3164 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3167 "Requested %u -> %u mV, %u mW for %u / %u mW",
3168 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3175 port->op_vsafe5v = index == 1;
3180 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3181 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3183 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3186 unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3187 max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3191 port->pps_data.supported = false;
3192 port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3193 power_supply_changed(port->psy);
3196 * Select the source PDO providing the most power which has a
3199 for (i = 0; i < port->nr_source_caps; i++) {
3200 u32 pdo = port->source_caps[i];
3201 enum pd_pdo_type type = pdo_type(pdo);
3204 case PDO_TYPE_FIXED:
3205 max_src_mv = pdo_fixed_voltage(pdo);
3206 min_src_mv = max_src_mv;
3210 max_src_mv = pdo_max_voltage(pdo);
3211 min_src_mv = pdo_min_voltage(pdo);
3214 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3215 port->pps_data.supported = true;
3217 POWER_SUPPLY_USB_TYPE_PD_PPS;
3218 power_supply_changed(port->psy);
3222 tcpm_log(port, "Invalid source PDO type, ignoring");
3227 case PDO_TYPE_FIXED:
3229 src_ma = pdo_max_current(pdo);
3230 src_mw = src_ma * min_src_mv / 1000;
3233 src_mw = pdo_max_power(pdo);
3238 tcpm_log(port, "Invalid source PDO type, ignoring");
3242 for (j = 0; j < port->nr_snk_pdo; j++) {
3243 pdo = port->snk_pdo[j];
3245 switch (pdo_type(pdo)) {
3246 case PDO_TYPE_FIXED:
3247 max_snk_mv = pdo_fixed_voltage(pdo);
3248 min_snk_mv = max_snk_mv;
3252 max_snk_mv = pdo_max_voltage(pdo);
3253 min_snk_mv = pdo_min_voltage(pdo);
3258 tcpm_log(port, "Invalid sink PDO type, ignoring");
3262 if (max_src_mv <= max_snk_mv &&
3263 min_src_mv >= min_snk_mv) {
3264 /* Prefer higher voltages if available */
3265 if ((src_mw == max_mw && min_src_mv > max_mv) ||
3270 max_mv = min_src_mv;
3280 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
3282 unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
3283 unsigned int src_pdo = 0;
3286 for (i = 1; i < port->nr_source_caps; ++i) {
3287 pdo = port->source_caps[i];
3289 switch (pdo_type(pdo)) {
3291 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3292 tcpm_log(port, "Not PPS APDO (source), ignoring");
3296 if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
3297 port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
3300 src_ma = pdo_pps_apdo_max_current(pdo);
3301 max_op_ma = min(src_ma, port->pps_data.req_op_curr);
3302 op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
3303 if (op_mw > max_temp_mw) {
3305 max_temp_mw = op_mw;
3309 tcpm_log(port, "Not APDO type (source), ignoring");
3315 src = port->source_caps[src_pdo];
3317 port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
3318 port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
3319 port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
3320 port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
3321 port->pps_data.req_op_curr);
3327 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
3329 unsigned int mv, ma, mw, flags;
3330 unsigned int max_ma, max_mw;
3331 enum pd_pdo_type type;
3332 u32 pdo, matching_snk_pdo;
3333 int src_pdo_index = 0;
3334 int snk_pdo_index = 0;
3337 ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
3341 pdo = port->source_caps[src_pdo_index];
3342 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
3343 type = pdo_type(pdo);
3346 case PDO_TYPE_FIXED:
3347 mv = pdo_fixed_voltage(pdo);
3351 mv = pdo_min_voltage(pdo);
3354 tcpm_log(port, "Invalid PDO selected!");
3358 /* Select maximum available current within the sink pdo's limit */
3359 if (type == PDO_TYPE_BATT) {
3360 mw = min_power(pdo, matching_snk_pdo);
3361 ma = 1000 * mw / mv;
3363 ma = min_current(pdo, matching_snk_pdo);
3364 mw = ma * mv / 1000;
3367 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3369 /* Set mismatch bit if offered power is less than operating power */
3372 if (mw < port->operating_snk_mw) {
3373 flags |= RDO_CAP_MISMATCH;
3374 if (type == PDO_TYPE_BATT &&
3375 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
3376 max_mw = pdo_max_power(matching_snk_pdo);
3377 else if (pdo_max_current(matching_snk_pdo) >
3378 pdo_max_current(pdo))
3379 max_ma = pdo_max_current(matching_snk_pdo);
3382 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3383 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3384 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3387 if (type == PDO_TYPE_BATT) {
3388 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
3390 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
3391 src_pdo_index, mv, mw,
3392 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3394 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
3396 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
3397 src_pdo_index, mv, ma,
3398 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3401 port->req_current_limit = ma;
3402 port->req_supply_voltage = mv;
3407 static int tcpm_pd_send_request(struct tcpm_port *port)
3409 struct pd_message msg;
3413 ret = tcpm_pd_build_request(port, &rdo);
3418 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
3419 * It is safer to modify the threshold here.
3421 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3423 memset(&msg, 0, sizeof(msg));
3424 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3427 port->negotiated_rev,
3428 port->message_id, 1);
3429 msg.payload[0] = cpu_to_le32(rdo);
3431 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3434 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
3436 unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
3437 unsigned int src_pdo_index;
3439 src_pdo_index = tcpm_pd_select_pps_apdo(port);
3443 max_mv = port->pps_data.req_max_volt;
3444 max_ma = port->pps_data.req_max_curr;
3445 out_mv = port->pps_data.req_out_volt;
3446 op_ma = port->pps_data.req_op_curr;
3448 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3450 op_mw = (op_ma * out_mv) / 1000;
3451 if (op_mw < port->operating_snk_mw) {
3453 * Try raising current to meet power needs. If that's not enough
3454 * then try upping the voltage. If that's still not enough
3455 * then we've obviously chosen a PPS APDO which really isn't
3456 * suitable so abandon ship.
3458 op_ma = (port->operating_snk_mw * 1000) / out_mv;
3459 if ((port->operating_snk_mw * 1000) % out_mv)
3461 op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
3463 if (op_ma > max_ma) {
3465 out_mv = (port->operating_snk_mw * 1000) / op_ma;
3466 if ((port->operating_snk_mw * 1000) % op_ma)
3468 out_mv += RDO_PROG_VOLT_MV_STEP -
3469 (out_mv % RDO_PROG_VOLT_MV_STEP);
3471 if (out_mv > max_mv) {
3472 tcpm_log(port, "Invalid PPS APDO selected!");
3478 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3479 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3480 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3483 *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
3485 tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
3486 src_pdo_index, out_mv, op_ma);
3488 port->pps_data.req_op_curr = op_ma;
3489 port->pps_data.req_out_volt = out_mv;
3494 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
3496 struct pd_message msg;
3500 ret = tcpm_pd_build_pps_request(port, &rdo);
3504 /* Relax the threshold as voltage will be adjusted right after Accept Message. */
3505 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3507 memset(&msg, 0, sizeof(msg));
3508 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3511 port->negotiated_rev,
3512 port->message_id, 1);
3513 msg.payload[0] = cpu_to_le32(rdo);
3515 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3518 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
3522 if (enable && port->vbus_charge)
3525 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
3527 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
3531 port->vbus_source = enable;
3535 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
3539 if (charge && port->vbus_source)
3542 if (charge != port->vbus_charge) {
3543 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
3544 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
3549 port->vbus_charge = charge;
3550 power_supply_changed(port->psy);
3554 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
3558 if (!port->tcpc->start_toggling)
3561 tcpm_log_force(port, "Start toggling");
3562 ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
3566 static int tcpm_init_vbus(struct tcpm_port *port)
3570 ret = port->tcpc->set_vbus(port->tcpc, false, false);
3571 port->vbus_source = false;
3572 port->vbus_charge = false;
3576 static int tcpm_init_vconn(struct tcpm_port *port)
3580 ret = port->tcpc->set_vconn(port->tcpc, false);
3581 port->vconn_role = TYPEC_SINK;
3585 static void tcpm_typec_connect(struct tcpm_port *port)
3587 if (!port->connected) {
3588 /* Make sure we don't report stale identity information */
3589 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
3590 port->partner_desc.usb_pd = port->pd_capable;
3591 if (tcpm_port_is_debug(port))
3592 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
3593 else if (tcpm_port_is_audio(port))
3594 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
3596 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
3597 port->partner = typec_register_partner(port->typec_port,
3598 &port->partner_desc);
3599 port->connected = true;
3600 typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
3604 static int tcpm_src_attach(struct tcpm_port *port)
3606 enum typec_cc_polarity polarity =
3607 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
3608 : TYPEC_POLARITY_CC1;
3614 ret = tcpm_set_polarity(port, polarity);
3618 tcpm_enable_auto_vbus_discharge(port, true);
3620 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
3624 if (port->pd_supported) {
3625 ret = port->tcpc->set_pd_rx(port->tcpc, true);
3627 goto out_disable_mux;
3631 * USB Type-C specification, version 1.2,
3632 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
3633 * Enable VCONN only if the non-RD port is set to RA.
3635 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
3636 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
3637 ret = tcpm_set_vconn(port, true);
3639 goto out_disable_pd;
3642 ret = tcpm_set_vbus(port, true);
3644 goto out_disable_vconn;
3646 port->pd_capable = false;
3648 port->partner = NULL;
3650 port->attached = true;
3651 port->send_discover = true;
3656 tcpm_set_vconn(port, false);
3658 if (port->pd_supported)
3659 port->tcpc->set_pd_rx(port->tcpc, false);
3661 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3662 TYPEC_ORIENTATION_NONE);
3666 static void tcpm_typec_disconnect(struct tcpm_port *port)
3668 if (port->connected) {
3669 typec_partner_set_usb_power_delivery(port->partner, NULL);
3670 typec_unregister_partner(port->partner);
3671 port->partner = NULL;
3672 port->connected = false;
3676 static void tcpm_unregister_altmodes(struct tcpm_port *port)
3678 struct pd_mode_data *modep = &port->mode_data;
3681 for (i = 0; i < modep->altmodes; i++) {
3682 typec_unregister_altmode(port->partner_altmode[i]);
3683 port->partner_altmode[i] = NULL;
3686 memset(modep, 0, sizeof(*modep));
3689 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
3691 tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
3693 if (port->tcpc->set_partner_usb_comm_capable)
3694 port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
3697 static void tcpm_reset_port(struct tcpm_port *port)
3699 tcpm_enable_auto_vbus_discharge(port, false);
3700 port->in_ams = false;
3701 port->ams = NONE_AMS;
3702 port->vdm_sm_running = false;
3703 tcpm_unregister_altmodes(port);
3704 tcpm_typec_disconnect(port);
3705 port->attached = false;
3706 port->pd_capable = false;
3707 port->pps_data.supported = false;
3708 tcpm_set_partner_usb_comm_capable(port, false);
3711 * First Rx ID should be 0; set this to a sentinel of -1 so that
3712 * we can check tcpm_pd_rx_handler() if we had seen it before.
3714 port->rx_msgid = -1;
3716 port->tcpc->set_pd_rx(port->tcpc, false);
3717 tcpm_init_vbus(port); /* also disables charging */
3718 tcpm_init_vconn(port);
3719 tcpm_set_current_limit(port, 0, 0);
3720 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
3721 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3722 TYPEC_ORIENTATION_NONE);
3723 tcpm_set_attached_state(port, false);
3724 port->try_src_count = 0;
3725 port->try_snk_count = 0;
3726 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
3727 power_supply_changed(port->psy);
3728 port->nr_sink_caps = 0;
3729 port->sink_cap_done = false;
3730 if (port->tcpc->enable_frs)
3731 port->tcpc->enable_frs(port->tcpc, false);
3733 usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
3734 port->partner_sink_caps = NULL;
3735 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
3736 port->partner_source_caps = NULL;
3737 usb_power_delivery_unregister(port->partner_pd);
3738 port->partner_pd = NULL;
3741 static void tcpm_detach(struct tcpm_port *port)
3743 if (tcpm_port_is_disconnected(port))
3744 port->hard_reset_count = 0;
3746 if (!port->attached)
3749 if (port->tcpc->set_bist_data) {
3750 tcpm_log(port, "disable BIST MODE TESTDATA");
3751 port->tcpc->set_bist_data(port->tcpc, false);
3754 tcpm_reset_port(port);
3757 static void tcpm_src_detach(struct tcpm_port *port)
3762 static int tcpm_snk_attach(struct tcpm_port *port)
3769 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
3770 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
3774 tcpm_enable_auto_vbus_discharge(port, true);
3776 ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
3780 port->pd_capable = false;
3782 port->partner = NULL;
3784 port->attached = true;
3785 port->send_discover = true;
3790 static void tcpm_snk_detach(struct tcpm_port *port)
3795 static int tcpm_acc_attach(struct tcpm_port *port)
3802 ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
3803 tcpm_data_role_for_source(port));
3807 port->partner = NULL;
3809 tcpm_typec_connect(port);
3811 port->attached = true;
3816 static void tcpm_acc_detach(struct tcpm_port *port)
3821 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
3823 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
3824 return HARD_RESET_SEND;
3825 if (port->pd_capable)
3826 return ERROR_RECOVERY;
3827 if (port->pwr_role == TYPEC_SOURCE)
3828 return SRC_UNATTACHED;
3829 if (port->state == SNK_WAIT_CAPABILITIES)
3831 return SNK_UNATTACHED;
3834 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
3836 if (port->port_type == TYPEC_PORT_DRP) {
3837 if (port->pwr_role == TYPEC_SOURCE)
3838 return SRC_UNATTACHED;
3840 return SNK_UNATTACHED;
3841 } else if (port->port_type == TYPEC_PORT_SRC) {
3842 return SRC_UNATTACHED;
3845 return SNK_UNATTACHED;
3848 static void tcpm_swap_complete(struct tcpm_port *port, int result)
3850 if (port->swap_pending) {
3851 port->swap_status = result;
3852 port->swap_pending = false;
3853 port->non_pd_role_swap = false;
3854 complete(&port->swap_complete);
3858 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
3861 case TYPEC_CC_RP_1_5:
3862 return TYPEC_PWR_MODE_1_5A;
3863 case TYPEC_CC_RP_3_0:
3864 return TYPEC_PWR_MODE_3_0A;
3865 case TYPEC_CC_RP_DEF:
3867 return TYPEC_PWR_MODE_USB;
3871 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
3874 case TYPEC_PWR_MODE_USB:
3875 return TYPEC_CC_RP_DEF;
3876 case TYPEC_PWR_MODE_1_5A:
3877 return TYPEC_CC_RP_1_5;
3878 case TYPEC_PWR_MODE_3_0A:
3879 case TYPEC_PWR_MODE_PD:
3881 return TYPEC_CC_RP_3_0;
3885 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
3887 switch (port->negotiated_rev) {
3891 * 6.4.4.2.3 Structured VDM Version
3892 * 2.0 states "At this time, there is only one version (1.0) defined.
3893 * This field Shall be set to zero to indicate Version 1.0."
3894 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
3895 * To ensure that we follow the Power Delivery revision we are currently
3896 * operating on, downgrade the SVDM version to the highest one supported
3897 * by the Power Delivery revision.
3900 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
3903 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
3908 static void run_state_machine(struct tcpm_port *port)
3911 enum typec_pwr_opmode opmode;
3913 enum tcpm_state upcoming_state;
3915 if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
3916 port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
3917 port->state == SRC_UNATTACHED) ||
3918 (port->enter_state == SNK_ATTACH_WAIT &&
3919 port->state == SNK_UNATTACHED) ||
3920 (port->enter_state == SNK_DEBOUNCED &&
3921 port->state == SNK_UNATTACHED));
3923 port->enter_state = port->state;
3924 switch (port->state) {
3927 case CHECK_CONTAMINANT:
3928 port->tcpc->check_contaminant(port->tcpc);
3931 case SRC_UNATTACHED:
3932 if (!port->non_pd_role_swap)
3933 tcpm_swap_complete(port, -ENOTCONN);
3934 tcpm_src_detach(port);
3935 if (port->potential_contaminant) {
3936 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
3939 if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
3940 tcpm_set_state(port, TOGGLING, 0);
3943 tcpm_set_cc(port, tcpm_rp_cc(port));
3944 if (port->port_type == TYPEC_PORT_DRP)
3945 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
3947 case SRC_ATTACH_WAIT:
3948 if (tcpm_port_is_debug(port))
3949 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
3951 else if (tcpm_port_is_audio(port))
3952 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
3954 else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
3955 tcpm_set_state(port,
3956 tcpm_try_snk(port) ? SNK_TRY
3962 port->try_snk_count++;
3965 * - Do not drive vconn or vbus
3966 * - Terminate CC pins (both) to Rd
3968 * - Wait for tDRPTry (PD_T_DRP_TRY).
3969 * Until then, ignore any state changes.
3971 tcpm_set_cc(port, TYPEC_CC_RD);
3972 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
3975 if (tcpm_port_is_sink(port)) {
3976 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
3978 tcpm_set_state(port, SRC_TRYWAIT, 0);
3982 case SNK_TRY_WAIT_DEBOUNCE:
3983 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
3984 PD_T_TRY_CC_DEBOUNCE);
3986 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
3987 if (port->vbus_present && tcpm_port_is_sink(port))
3988 tcpm_set_state(port, SNK_ATTACHED, 0);
3993 tcpm_set_cc(port, tcpm_rp_cc(port));
3994 if (port->max_wait == 0) {
3995 port->max_wait = jiffies +
3996 msecs_to_jiffies(PD_T_DRP_TRY);
3997 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4000 if (time_is_after_jiffies(port->max_wait))
4001 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4002 jiffies_to_msecs(port->max_wait -
4005 tcpm_set_state(port, SNK_UNATTACHED, 0);
4008 case SRC_TRYWAIT_DEBOUNCE:
4009 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
4011 case SRC_TRYWAIT_UNATTACHED:
4012 tcpm_set_state(port, SNK_UNATTACHED, 0);
4016 ret = tcpm_src_attach(port);
4017 tcpm_set_state(port, SRC_UNATTACHED,
4018 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4021 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4022 typec_set_pwr_opmode(port->typec_port, opmode);
4023 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4024 port->caps_count = 0;
4025 port->negotiated_rev = PD_MAX_REV;
4026 port->message_id = 0;
4027 port->rx_msgid = -1;
4028 port->explicit_contract = false;
4029 /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4030 if (port->ams == POWER_ROLE_SWAP ||
4031 port->ams == FAST_ROLE_SWAP)
4032 tcpm_ams_finish(port);
4033 if (!port->pd_supported) {
4034 tcpm_set_state(port, SRC_READY, 0);
4037 port->upcoming_state = SRC_SEND_CAPABILITIES;
4038 tcpm_ams_start(port, POWER_NEGOTIATION);
4040 case SRC_SEND_CAPABILITIES:
4042 if (port->caps_count > PD_N_CAPS_COUNT) {
4043 tcpm_set_state(port, SRC_READY, 0);
4046 ret = tcpm_pd_send_source_caps(port);
4048 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4049 PD_T_SEND_SOURCE_CAP);
4052 * Per standard, we should clear the reset counter here.
4053 * However, that can result in state machine hang-ups.
4054 * Reset it only in READY state to improve stability.
4056 /* port->hard_reset_count = 0; */
4057 port->caps_count = 0;
4058 port->pd_capable = true;
4059 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4060 PD_T_SEND_SOURCE_CAP);
4063 case SRC_SEND_CAPABILITIES_TIMEOUT:
4065 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4067 * PD 2.0 sinks are supposed to accept src-capabilities with a
4068 * 3.0 header and simply ignore any src PDOs which the sink does
4069 * not understand such as PPS but some 2.0 sinks instead ignore
4070 * the entire PD_DATA_SOURCE_CAP message, causing contract
4071 * negotiation to fail.
4073 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4074 * sending src-capabilities with a lower PD revision to
4075 * make these broken sinks work.
4077 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4078 tcpm_set_state(port, HARD_RESET_SEND, 0);
4079 } else if (port->negotiated_rev > PD_REV20) {
4080 port->negotiated_rev--;
4081 port->hard_reset_count = 0;
4082 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4084 tcpm_set_state(port, hard_reset_state(port), 0);
4087 case SRC_NEGOTIATE_CAPABILITIES:
4088 ret = tcpm_pd_check_request(port);
4090 tcpm_pd_send_control(port, PD_CTRL_REJECT);
4091 if (!port->explicit_contract) {
4092 tcpm_set_state(port,
4093 SRC_WAIT_NEW_CAPABILITIES, 0);
4095 tcpm_set_state(port, SRC_READY, 0);
4098 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4099 tcpm_set_partner_usb_comm_capable(port,
4100 !!(port->sink_request & RDO_USB_COMM));
4101 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4102 PD_T_SRC_TRANSITION);
4105 case SRC_TRANSITION_SUPPLY:
4106 /* XXX: regulator_set_voltage(vbus, ...) */
4107 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4108 port->explicit_contract = true;
4109 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4110 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4111 tcpm_set_state_cond(port, SRC_READY, 0);
4115 port->hard_reset_count = 0;
4117 port->try_src_count = 0;
4119 tcpm_swap_complete(port, 0);
4120 tcpm_typec_connect(port);
4122 if (port->ams != NONE_AMS)
4123 tcpm_ams_finish(port);
4124 if (port->next_ams != NONE_AMS) {
4125 port->ams = port->next_ams;
4126 port->next_ams = NONE_AMS;
4130 * If previous AMS is interrupted, switch to the upcoming
4133 if (port->upcoming_state != INVALID_STATE) {
4134 upcoming_state = port->upcoming_state;
4135 port->upcoming_state = INVALID_STATE;
4136 tcpm_set_state(port, upcoming_state, 0);
4141 * 6.4.4.3.1 Discover Identity
4142 * "The Discover Identity Command Shall only be sent to SOP when there is an
4143 * Explicit Contract."
4144 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4145 * port->explicit_contract to decide whether to send the command.
4147 if (port->explicit_contract) {
4148 tcpm_set_initial_svdm_version(port);
4149 mod_send_discover_delayed_work(port, 0);
4151 port->send_discover = false;
4156 * Sending ping messages is not necessary if
4157 * - the source operates at vSafe5V
4159 * - The system is not operating in PD mode
4161 * - Both partners are connected using a Type-C connector
4163 * There is no actual need to send PD messages since the local
4164 * port type-c and the spec does not clearly say whether PD is
4165 * possible when type-c is connected to Type-A/B
4168 case SRC_WAIT_NEW_CAPABILITIES:
4169 /* Nothing to do... */
4173 case SNK_UNATTACHED:
4174 if (!port->non_pd_role_swap)
4175 tcpm_swap_complete(port, -ENOTCONN);
4176 tcpm_pps_complete(port, -ENOTCONN);
4177 tcpm_snk_detach(port);
4178 if (port->potential_contaminant) {
4179 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4182 if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
4183 tcpm_set_state(port, TOGGLING, 0);
4186 tcpm_set_cc(port, TYPEC_CC_RD);
4187 if (port->port_type == TYPEC_PORT_DRP)
4188 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
4190 case SNK_ATTACH_WAIT:
4191 if ((port->cc1 == TYPEC_CC_OPEN &&
4192 port->cc2 != TYPEC_CC_OPEN) ||
4193 (port->cc1 != TYPEC_CC_OPEN &&
4194 port->cc2 == TYPEC_CC_OPEN))
4195 tcpm_set_state(port, SNK_DEBOUNCED,
4197 else if (tcpm_port_is_disconnected(port))
4198 tcpm_set_state(port, SNK_UNATTACHED,
4202 if (tcpm_port_is_disconnected(port))
4203 tcpm_set_state(port, SNK_UNATTACHED,
4205 else if (port->vbus_present)
4206 tcpm_set_state(port,
4207 tcpm_try_src(port) ? SRC_TRY
4212 port->try_src_count++;
4213 tcpm_set_cc(port, tcpm_rp_cc(port));
4215 tcpm_set_state(port, SRC_TRY_WAIT, 0);
4218 if (port->max_wait == 0) {
4219 port->max_wait = jiffies +
4220 msecs_to_jiffies(PD_T_DRP_TRY);
4221 msecs = PD_T_DRP_TRY;
4223 if (time_is_after_jiffies(port->max_wait))
4224 msecs = jiffies_to_msecs(port->max_wait -
4229 tcpm_set_state(port, SNK_TRYWAIT, msecs);
4231 case SRC_TRY_DEBOUNCE:
4232 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
4235 tcpm_set_cc(port, TYPEC_CC_RD);
4236 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
4238 case SNK_TRYWAIT_VBUS:
4240 * TCPM stays in this state indefinitely until VBUS
4241 * is detected as long as Rp is not detected for
4242 * more than a time period of tPDDebounce.
4244 if (port->vbus_present && tcpm_port_is_sink(port)) {
4245 tcpm_set_state(port, SNK_ATTACHED, 0);
4248 if (!tcpm_port_is_sink(port))
4249 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
4251 case SNK_TRYWAIT_DEBOUNCE:
4252 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
4255 ret = tcpm_snk_attach(port);
4257 tcpm_set_state(port, SNK_UNATTACHED, 0);
4259 tcpm_set_state(port, SNK_STARTUP, 0);
4262 opmode = tcpm_get_pwr_opmode(port->polarity ?
4263 port->cc2 : port->cc1);
4264 typec_set_pwr_opmode(port->typec_port, opmode);
4265 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4266 port->negotiated_rev = PD_MAX_REV;
4267 port->message_id = 0;
4268 port->rx_msgid = -1;
4269 port->explicit_contract = false;
4271 if (port->ams == POWER_ROLE_SWAP ||
4272 port->ams == FAST_ROLE_SWAP)
4273 /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
4274 tcpm_ams_finish(port);
4276 tcpm_set_state(port, SNK_DISCOVERY, 0);
4279 if (port->vbus_present) {
4280 u32 current_lim = tcpm_get_current_limit(port);
4282 if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
4283 current_lim = PD_P_SNK_STDBY_MW / 5;
4284 tcpm_set_current_limit(port, current_lim, 5000);
4285 /* Not sink vbus if operational current is 0mA */
4286 tcpm_set_charge(port, !port->pd_supported ||
4287 pdo_max_current(port->snk_pdo[0]));
4289 if (!port->pd_supported)
4290 tcpm_set_state(port, SNK_READY, 0);
4292 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4296 * For DRP, timeouts differ. Also, handling is supposed to be
4297 * different and much more complex (dead battery detection;
4298 * see USB power delivery specification, section 8.3.3.6.1.5.1).
4300 tcpm_set_state(port, hard_reset_state(port),
4301 port->port_type == TYPEC_PORT_DRP ?
4302 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
4304 case SNK_DISCOVERY_DEBOUNCE:
4305 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
4308 case SNK_DISCOVERY_DEBOUNCE_DONE:
4309 if (!tcpm_port_is_disconnected(port) &&
4310 tcpm_port_is_sink(port) &&
4311 ktime_after(port->delayed_runtime, ktime_get())) {
4312 tcpm_set_state(port, SNK_DISCOVERY,
4313 ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
4316 tcpm_set_state(port, unattached_state(port), 0);
4318 case SNK_WAIT_CAPABILITIES:
4319 ret = port->tcpc->set_pd_rx(port->tcpc, true);
4321 tcpm_set_state(port, SNK_READY, 0);
4325 * If VBUS has never been low, and we time out waiting
4326 * for source cap, try a soft reset first, in case we
4327 * were already in a stable contract before this boot.
4328 * Do this only once.
4330 if (port->vbus_never_low) {
4331 port->vbus_never_low = false;
4332 tcpm_set_state(port, SNK_SOFT_RESET,
4333 PD_T_SINK_WAIT_CAP);
4335 tcpm_set_state(port, hard_reset_state(port),
4336 PD_T_SINK_WAIT_CAP);
4339 case SNK_NEGOTIATE_CAPABILITIES:
4340 port->pd_capable = true;
4341 tcpm_set_partner_usb_comm_capable(port,
4342 !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
4343 port->hard_reset_count = 0;
4344 ret = tcpm_pd_send_request(port);
4346 /* Restore back to the original state */
4347 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4348 port->pps_data.active,
4349 port->supply_voltage);
4350 /* Let the Source send capabilities again. */
4351 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4353 tcpm_set_state_cond(port, hard_reset_state(port),
4354 PD_T_SENDER_RESPONSE);
4357 case SNK_NEGOTIATE_PPS_CAPABILITIES:
4358 ret = tcpm_pd_send_pps_request(port);
4360 /* Restore back to the original state */
4361 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4362 port->pps_data.active,
4363 port->supply_voltage);
4364 port->pps_status = ret;
4366 * If this was called due to updates to sink
4367 * capabilities, and pps is no longer valid, we should
4368 * safely fall back to a standard PDO.
4370 if (port->update_sink_caps)
4371 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
4373 tcpm_set_state(port, SNK_READY, 0);
4375 tcpm_set_state_cond(port, hard_reset_state(port),
4376 PD_T_SENDER_RESPONSE);
4379 case SNK_TRANSITION_SINK:
4380 /* From the USB PD spec:
4381 * "The Sink Shall transition to Sink Standby before a positive or
4382 * negative voltage transition of VBUS. During Sink Standby
4383 * the Sink Shall reduce its power draw to pSnkStdby."
4385 * This is not applicable to PPS though as the port can continue
4386 * to draw negotiated power without switching to standby.
4388 if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
4389 port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
4390 u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
4392 tcpm_log(port, "Setting standby current %u mV @ %u mA",
4393 port->supply_voltage, stdby_ma);
4394 tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
4397 case SNK_TRANSITION_SINK_VBUS:
4398 tcpm_set_state(port, hard_reset_state(port),
4399 PD_T_PS_TRANSITION);
4402 port->try_snk_count = 0;
4403 port->update_sink_caps = false;
4404 if (port->explicit_contract) {
4405 typec_set_pwr_opmode(port->typec_port,
4407 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4410 if (!port->pd_capable && port->slow_charger_loop)
4411 tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
4412 tcpm_swap_complete(port, 0);
4413 tcpm_typec_connect(port);
4414 if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
4415 mod_enable_frs_delayed_work(port, 0);
4416 tcpm_pps_complete(port, port->pps_status);
4418 if (port->ams != NONE_AMS)
4419 tcpm_ams_finish(port);
4420 if (port->next_ams != NONE_AMS) {
4421 port->ams = port->next_ams;
4422 port->next_ams = NONE_AMS;
4426 * If previous AMS is interrupted, switch to the upcoming
4429 if (port->upcoming_state != INVALID_STATE) {
4430 upcoming_state = port->upcoming_state;
4431 port->upcoming_state = INVALID_STATE;
4432 tcpm_set_state(port, upcoming_state, 0);
4437 * 6.4.4.3.1 Discover Identity
4438 * "The Discover Identity Command Shall only be sent to SOP when there is an
4439 * Explicit Contract."
4440 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4441 * port->explicit_contract.
4443 if (port->explicit_contract) {
4444 tcpm_set_initial_svdm_version(port);
4445 mod_send_discover_delayed_work(port, 0);
4447 port->send_discover = false;
4450 power_supply_changed(port->psy);
4453 /* Accessory states */
4454 case ACC_UNATTACHED:
4455 tcpm_acc_detach(port);
4456 tcpm_set_state(port, SRC_UNATTACHED, 0);
4458 case DEBUG_ACC_ATTACHED:
4459 case AUDIO_ACC_ATTACHED:
4460 ret = tcpm_acc_attach(port);
4462 tcpm_set_state(port, ACC_UNATTACHED, 0);
4464 case AUDIO_ACC_DEBOUNCE:
4465 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
4468 /* Hard_Reset states */
4469 case HARD_RESET_SEND:
4470 if (port->ams != NONE_AMS)
4471 tcpm_ams_finish(port);
4473 * State machine will be directed to HARD_RESET_START,
4474 * thus set upcoming_state to INVALID_STATE.
4476 port->upcoming_state = INVALID_STATE;
4477 tcpm_ams_start(port, HARD_RESET);
4479 case HARD_RESET_START:
4480 port->sink_cap_done = false;
4481 if (port->tcpc->enable_frs)
4482 port->tcpc->enable_frs(port->tcpc, false);
4483 port->hard_reset_count++;
4484 port->tcpc->set_pd_rx(port->tcpc, false);
4485 tcpm_unregister_altmodes(port);
4486 port->nr_sink_caps = 0;
4487 port->send_discover = true;
4488 if (port->pwr_role == TYPEC_SOURCE)
4489 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
4490 PD_T_PS_HARD_RESET);
4492 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
4494 case SRC_HARD_RESET_VBUS_OFF:
4496 * 7.1.5 Response to Hard Resets
4497 * Hard Reset Signaling indicates a communication failure has occurred and the
4498 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
4499 * drive VBUS to vSafe0V as shown in Figure 7-9.
4501 tcpm_set_vconn(port, false);
4502 tcpm_set_vbus(port, false);
4503 tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
4504 tcpm_data_role_for_source(port));
4506 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
4507 * PD_T_SRC_RECOVER before turning vbus back on.
4508 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
4509 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
4510 * tells the Device Policy Manager to instruct the power supply to perform a
4511 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
4512 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
4513 * re-establish communication with the Sink and resume USB Default Operation.
4514 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
4516 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
4518 case SRC_HARD_RESET_VBUS_ON:
4519 tcpm_set_vconn(port, true);
4520 tcpm_set_vbus(port, true);
4521 if (port->ams == HARD_RESET)
4522 tcpm_ams_finish(port);
4523 if (port->pd_supported)
4524 port->tcpc->set_pd_rx(port->tcpc, true);
4525 tcpm_set_attached_state(port, true);
4526 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
4528 case SNK_HARD_RESET_SINK_OFF:
4529 /* Do not discharge/disconnect during hard reseet */
4530 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4531 memset(&port->pps_data, 0, sizeof(port->pps_data));
4532 tcpm_set_vconn(port, false);
4533 if (port->pd_capable)
4534 tcpm_set_charge(port, false);
4535 tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
4536 tcpm_data_role_for_sink(port));
4538 * VBUS may or may not toggle, depending on the adapter.
4539 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
4540 * directly after timeout.
4542 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
4544 case SNK_HARD_RESET_WAIT_VBUS:
4545 if (port->ams == HARD_RESET)
4546 tcpm_ams_finish(port);
4547 /* Assume we're disconnected if VBUS doesn't come back. */
4548 tcpm_set_state(port, SNK_UNATTACHED,
4549 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
4551 case SNK_HARD_RESET_SINK_ON:
4552 /* Note: There is no guarantee that VBUS is on in this state */
4555 * The specification suggests that dual mode ports in sink
4556 * mode should transition to state PE_SRC_Transition_to_default.
4557 * See USB power delivery specification chapter 8.3.3.6.1.3.
4558 * This would mean to
4559 * - turn off VCONN, reset power supply
4560 * - request hardware reset
4562 * - Transition to state PE_Src_Startup
4563 * SNK only ports shall transition to state Snk_Startup
4564 * (see chapter 8.3.3.3.8).
4565 * Similar, dual-mode ports in source mode should transition
4566 * to PE_SNK_Transition_to_default.
4568 if (port->pd_capable) {
4569 tcpm_set_current_limit(port,
4570 tcpm_get_current_limit(port),
4572 /* Not sink vbus if operational current is 0mA */
4573 tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
4575 if (port->ams == HARD_RESET)
4576 tcpm_ams_finish(port);
4577 tcpm_set_attached_state(port, true);
4578 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4579 tcpm_set_state(port, SNK_STARTUP, 0);
4582 /* Soft_Reset states */
4584 port->message_id = 0;
4585 port->rx_msgid = -1;
4586 /* remove existing capabilities */
4587 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4588 port->partner_source_caps = NULL;
4589 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4590 tcpm_ams_finish(port);
4591 if (port->pwr_role == TYPEC_SOURCE) {
4592 port->upcoming_state = SRC_SEND_CAPABILITIES;
4593 tcpm_ams_start(port, POWER_NEGOTIATION);
4595 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4598 case SRC_SOFT_RESET_WAIT_SNK_TX:
4599 case SNK_SOFT_RESET:
4600 if (port->ams != NONE_AMS)
4601 tcpm_ams_finish(port);
4602 port->upcoming_state = SOFT_RESET_SEND;
4603 tcpm_ams_start(port, SOFT_RESET_AMS);
4605 case SOFT_RESET_SEND:
4606 port->message_id = 0;
4607 port->rx_msgid = -1;
4608 /* remove existing capabilities */
4609 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4610 port->partner_source_caps = NULL;
4611 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
4612 tcpm_set_state_cond(port, hard_reset_state(port), 0);
4614 tcpm_set_state_cond(port, hard_reset_state(port),
4615 PD_T_SENDER_RESPONSE);
4618 /* DR_Swap states */
4620 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
4621 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4622 port->send_discover = true;
4623 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
4624 PD_T_SENDER_RESPONSE);
4626 case DR_SWAP_ACCEPT:
4627 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4628 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4629 port->send_discover = true;
4630 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
4632 case DR_SWAP_SEND_TIMEOUT:
4633 tcpm_swap_complete(port, -ETIMEDOUT);
4634 port->send_discover = false;
4635 tcpm_ams_finish(port);
4636 tcpm_set_state(port, ready_state(port), 0);
4638 case DR_SWAP_CHANGE_DR:
4639 tcpm_unregister_altmodes(port);
4640 if (port->data_role == TYPEC_HOST)
4641 tcpm_set_roles(port, true, port->pwr_role,
4644 tcpm_set_roles(port, true, port->pwr_role,
4646 tcpm_ams_finish(port);
4647 tcpm_set_state(port, ready_state(port), 0);
4651 if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
4652 tcpm_set_state(port, ERROR_RECOVERY, 0);
4655 tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
4657 case FR_SWAP_SEND_TIMEOUT:
4658 tcpm_set_state(port, ERROR_RECOVERY, 0);
4660 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
4661 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
4663 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
4664 if (port->vbus_source)
4665 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
4667 tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
4669 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
4670 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4671 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4672 tcpm_set_state(port, ERROR_RECOVERY, 0);
4675 tcpm_set_cc(port, tcpm_rp_cc(port));
4676 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4679 /* PR_Swap states */
4680 case PR_SWAP_ACCEPT:
4681 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4682 tcpm_set_state(port, PR_SWAP_START, 0);
4685 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
4686 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
4687 PD_T_SENDER_RESPONSE);
4689 case PR_SWAP_SEND_TIMEOUT:
4690 tcpm_swap_complete(port, -ETIMEDOUT);
4691 tcpm_set_state(port, ready_state(port), 0);
4694 tcpm_apply_rc(port);
4695 if (port->pwr_role == TYPEC_SOURCE)
4696 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
4697 PD_T_SRC_TRANSITION);
4699 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
4701 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
4703 * Prevent vbus discharge circuit from turning on during PR_SWAP
4704 * as this is not a disconnect.
4706 tcpm_set_vbus(port, false);
4707 port->explicit_contract = false;
4708 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
4709 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
4712 case PR_SWAP_SRC_SNK_SOURCE_OFF:
4713 tcpm_set_cc(port, TYPEC_CC_RD);
4714 /* allow CC debounce */
4715 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
4718 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
4720 * USB-PD standard, 6.2.1.4, Port Power Role:
4721 * "During the Power Role Swap Sequence, for the initial Source
4722 * Port, the Port Power Role field shall be set to Sink in the
4723 * PS_RDY Message indicating that the initial Source’s power
4724 * supply is turned off"
4726 tcpm_set_pwr_role(port, TYPEC_SINK);
4727 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4728 tcpm_set_state(port, ERROR_RECOVERY, 0);
4731 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
4733 case PR_SWAP_SRC_SNK_SINK_ON:
4734 tcpm_enable_auto_vbus_discharge(port, true);
4735 /* Set the vbus disconnect threshold for implicit contract */
4736 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4737 tcpm_set_state(port, SNK_STARTUP, 0);
4739 case PR_SWAP_SNK_SRC_SINK_OFF:
4740 /* will be source, remove existing capabilities */
4741 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4742 port->partner_source_caps = NULL;
4744 * Prevent vbus discharge circuit from turning on during PR_SWAP
4745 * as this is not a disconnect.
4747 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
4748 port->pps_data.active, 0);
4749 tcpm_set_charge(port, false);
4750 tcpm_set_state(port, hard_reset_state(port),
4751 PD_T_PS_SOURCE_OFF);
4753 case PR_SWAP_SNK_SRC_SOURCE_ON:
4754 tcpm_enable_auto_vbus_discharge(port, true);
4755 tcpm_set_cc(port, tcpm_rp_cc(port));
4756 tcpm_set_vbus(port, true);
4758 * allow time VBUS ramp-up, must be < tNewSrc
4759 * Also, this window overlaps with CC debounce as well.
4760 * So, Wait for the max of two which is PD_T_NEWSRC
4762 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
4765 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
4767 * USB PD standard, 6.2.1.4:
4768 * "Subsequent Messages initiated by the Policy Engine,
4769 * such as the PS_RDY Message sent to indicate that Vbus
4770 * is ready, will have the Port Power Role field set to
4773 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4774 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4775 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4778 case VCONN_SWAP_ACCEPT:
4779 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4780 tcpm_ams_finish(port);
4781 tcpm_set_state(port, VCONN_SWAP_START, 0);
4783 case VCONN_SWAP_SEND:
4784 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
4785 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
4786 PD_T_SENDER_RESPONSE);
4788 case VCONN_SWAP_SEND_TIMEOUT:
4789 tcpm_swap_complete(port, -ETIMEDOUT);
4790 tcpm_set_state(port, ready_state(port), 0);
4792 case VCONN_SWAP_START:
4793 if (port->vconn_role == TYPEC_SOURCE)
4794 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
4796 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
4798 case VCONN_SWAP_WAIT_FOR_VCONN:
4799 tcpm_set_state(port, hard_reset_state(port),
4800 PD_T_VCONN_SOURCE_ON);
4802 case VCONN_SWAP_TURN_ON_VCONN:
4803 tcpm_set_vconn(port, true);
4804 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4805 tcpm_set_state(port, ready_state(port), 0);
4807 case VCONN_SWAP_TURN_OFF_VCONN:
4808 tcpm_set_vconn(port, false);
4809 tcpm_set_state(port, ready_state(port), 0);
4812 case DR_SWAP_CANCEL:
4813 case PR_SWAP_CANCEL:
4814 case VCONN_SWAP_CANCEL:
4815 tcpm_swap_complete(port, port->swap_status);
4816 if (port->pwr_role == TYPEC_SOURCE)
4817 tcpm_set_state(port, SRC_READY, 0);
4819 tcpm_set_state(port, SNK_READY, 0);
4821 case FR_SWAP_CANCEL:
4822 if (port->pwr_role == TYPEC_SOURCE)
4823 tcpm_set_state(port, SRC_READY, 0);
4825 tcpm_set_state(port, SNK_READY, 0);
4829 switch (BDO_MODE_MASK(port->bist_request)) {
4830 case BDO_MODE_CARRIER2:
4831 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
4832 tcpm_set_state(port, unattached_state(port),
4833 PD_T_BIST_CONT_MODE);
4835 case BDO_MODE_TESTDATA:
4836 if (port->tcpc->set_bist_data) {
4837 tcpm_log(port, "Enable BIST MODE TESTDATA");
4838 port->tcpc->set_bist_data(port->tcpc, true);
4845 case GET_STATUS_SEND:
4846 tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
4847 tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
4848 PD_T_SENDER_RESPONSE);
4850 case GET_STATUS_SEND_TIMEOUT:
4851 tcpm_set_state(port, ready_state(port), 0);
4853 case GET_PPS_STATUS_SEND:
4854 tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
4855 tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
4856 PD_T_SENDER_RESPONSE);
4858 case GET_PPS_STATUS_SEND_TIMEOUT:
4859 tcpm_set_state(port, ready_state(port), 0);
4862 tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
4863 tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
4865 case GET_SINK_CAP_TIMEOUT:
4866 port->sink_cap_done = true;
4867 tcpm_set_state(port, ready_state(port), 0);
4869 case ERROR_RECOVERY:
4870 tcpm_swap_complete(port, -EPROTO);
4871 tcpm_pps_complete(port, -EPROTO);
4872 tcpm_set_state(port, PORT_RESET, 0);
4875 tcpm_reset_port(port);
4876 if (port->self_powered)
4877 tcpm_set_cc(port, TYPEC_CC_OPEN);
4879 tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
4880 TYPEC_CC_RD : tcpm_rp_cc(port));
4881 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
4882 PD_T_ERROR_RECOVERY);
4884 case PORT_RESET_WAIT_OFF:
4885 tcpm_set_state(port,
4886 tcpm_default_state(port),
4887 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
4890 /* AMS intermediate state */
4892 if (port->upcoming_state == INVALID_STATE) {
4893 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
4894 SRC_READY : SNK_READY, 0);
4898 upcoming_state = port->upcoming_state;
4899 port->upcoming_state = INVALID_STATE;
4900 tcpm_set_state(port, upcoming_state, 0);
4904 case CHUNK_NOT_SUPP:
4905 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
4906 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
4909 WARN(1, "Unexpected port state %d\n", port->state);
4914 static void tcpm_state_machine_work(struct kthread_work *work)
4916 struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
4917 enum tcpm_state prev_state;
4919 mutex_lock(&port->lock);
4920 port->state_machine_running = true;
4922 if (port->queued_message && tcpm_send_queued_message(port))
4925 /* If we were queued due to a delayed state change, update it now */
4926 if (port->delayed_state) {
4927 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
4928 tcpm_states[port->state],
4929 tcpm_states[port->delayed_state], port->delay_ms);
4930 port->prev_state = port->state;
4931 port->state = port->delayed_state;
4932 port->delayed_state = INVALID_STATE;
4936 * Continue running as long as we have (non-delayed) state changes
4940 prev_state = port->state;
4941 run_state_machine(port);
4942 if (port->queued_message)
4943 tcpm_send_queued_message(port);
4944 } while (port->state != prev_state && !port->delayed_state);
4947 port->state_machine_running = false;
4948 mutex_unlock(&port->lock);
4951 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
4952 enum typec_cc_status cc2)
4954 enum typec_cc_status old_cc1, old_cc2;
4955 enum tcpm_state new_state;
4957 old_cc1 = port->cc1;
4958 old_cc2 = port->cc2;
4962 tcpm_log_force(port,
4963 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
4964 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
4966 tcpm_port_is_disconnected(port) ? "disconnected"
4969 switch (port->state) {
4971 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4972 tcpm_port_is_source(port))
4973 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4974 else if (tcpm_port_is_sink(port))
4975 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
4977 case CHECK_CONTAMINANT:
4978 /* Wait for Toggling to be resumed */
4980 case SRC_UNATTACHED:
4981 case ACC_UNATTACHED:
4982 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4983 tcpm_port_is_source(port))
4984 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4986 case SRC_ATTACH_WAIT:
4987 if (tcpm_port_is_disconnected(port) ||
4988 tcpm_port_is_audio_detached(port))
4989 tcpm_set_state(port, SRC_UNATTACHED, 0);
4990 else if (cc1 != old_cc1 || cc2 != old_cc2)
4991 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4995 case SRC_SEND_CAPABILITIES:
4997 if (tcpm_port_is_disconnected(port) ||
4998 !tcpm_port_is_source(port)) {
4999 if (port->port_type == TYPEC_PORT_SRC)
5000 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5002 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5005 case SNK_UNATTACHED:
5006 if (tcpm_port_is_sink(port))
5007 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5009 case SNK_ATTACH_WAIT:
5010 if ((port->cc1 == TYPEC_CC_OPEN &&
5011 port->cc2 != TYPEC_CC_OPEN) ||
5012 (port->cc1 != TYPEC_CC_OPEN &&
5013 port->cc2 == TYPEC_CC_OPEN))
5014 new_state = SNK_DEBOUNCED;
5015 else if (tcpm_port_is_disconnected(port))
5016 new_state = SNK_UNATTACHED;
5019 if (new_state != port->delayed_state)
5020 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5023 if (tcpm_port_is_disconnected(port))
5024 new_state = SNK_UNATTACHED;
5025 else if (port->vbus_present)
5026 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5028 new_state = SNK_UNATTACHED;
5029 if (new_state != port->delayed_state)
5030 tcpm_set_state(port, SNK_DEBOUNCED, 0);
5034 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5035 * "A port that has entered into USB PD communications with the Source and
5036 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5037 * cable disconnect in addition to monitoring VBUS.
5039 * A port that is monitoring the CC voltage for disconnect (but is not in
5040 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5041 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5042 * vRd-USB for tPDDebounce."
5044 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5045 * away before vbus decays to disconnect threshold. Allow
5046 * disconnect to be driven by vbus disconnect when auto vbus
5047 * discharge is enabled.
5049 if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5050 tcpm_set_state(port, unattached_state(port), 0);
5051 else if (!port->pd_capable &&
5052 (cc1 != old_cc1 || cc2 != old_cc2))
5053 tcpm_set_current_limit(port,
5054 tcpm_get_current_limit(port),
5058 case AUDIO_ACC_ATTACHED:
5059 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5060 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
5062 case AUDIO_ACC_DEBOUNCE:
5063 if (tcpm_port_is_audio(port))
5064 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
5067 case DEBUG_ACC_ATTACHED:
5068 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5069 tcpm_set_state(port, ACC_UNATTACHED, 0);
5073 /* Do nothing, waiting for timeout */
5077 /* CC line is unstable, wait for debounce */
5078 if (tcpm_port_is_disconnected(port))
5079 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
5081 case SNK_DISCOVERY_DEBOUNCE:
5085 /* Hand over to state machine if needed */
5086 if (!port->vbus_present && tcpm_port_is_source(port))
5087 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5089 case SRC_TRYWAIT_DEBOUNCE:
5090 if (port->vbus_present || !tcpm_port_is_source(port))
5091 tcpm_set_state(port, SRC_TRYWAIT, 0);
5093 case SNK_TRY_WAIT_DEBOUNCE:
5094 if (!tcpm_port_is_sink(port)) {
5096 tcpm_set_state(port, SRC_TRYWAIT, 0);
5100 if (tcpm_port_is_source(port))
5101 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
5103 case SRC_TRY_DEBOUNCE:
5104 tcpm_set_state(port, SRC_TRY_WAIT, 0);
5106 case SNK_TRYWAIT_DEBOUNCE:
5107 if (tcpm_port_is_sink(port))
5108 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
5110 case SNK_TRYWAIT_VBUS:
5111 if (!tcpm_port_is_sink(port))
5112 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5114 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5115 if (!tcpm_port_is_sink(port))
5116 tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
5118 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
5121 /* Do nothing, waiting for tCCDebounce */
5123 case PR_SWAP_SNK_SRC_SINK_OFF:
5124 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5125 case PR_SWAP_SRC_SNK_SOURCE_OFF:
5126 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5127 case PR_SWAP_SNK_SRC_SOURCE_ON:
5129 * CC state change is expected in PR_SWAP
5134 case FR_SWAP_SEND_TIMEOUT:
5135 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5136 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5137 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5138 /* Do nothing, CC change expected */
5142 case PORT_RESET_WAIT_OFF:
5144 * State set back to default mode once the timer completes.
5145 * Ignore CC changes here.
5150 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
5151 * to be driven by vbus disconnect.
5153 if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
5154 port->auto_vbus_discharge_enabled))
5155 tcpm_set_state(port, unattached_state(port), 0);
5160 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
5162 tcpm_log_force(port, "VBUS on");
5163 port->vbus_present = true;
5165 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
5166 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
5168 port->vbus_vsafe0v = false;
5170 switch (port->state) {
5171 case SNK_TRANSITION_SINK_VBUS:
5172 port->explicit_contract = true;
5173 tcpm_set_state(port, SNK_READY, 0);
5176 tcpm_set_state(port, SNK_DISCOVERY, 0);
5180 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
5184 case SNK_HARD_RESET_WAIT_VBUS:
5185 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
5188 tcpm_set_state(port, SRC_STARTUP, 0);
5190 case SRC_HARD_RESET_VBUS_ON:
5191 tcpm_set_state(port, SRC_STARTUP, 0);
5195 /* Do nothing, waiting for timeout */
5198 /* Do nothing, Waiting for Rd to be detected */
5200 case SRC_TRYWAIT_DEBOUNCE:
5201 tcpm_set_state(port, SRC_TRYWAIT, 0);
5203 case SNK_TRY_WAIT_DEBOUNCE:
5204 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5207 /* Do nothing, waiting for tCCDebounce */
5209 case SNK_TRYWAIT_VBUS:
5210 if (tcpm_port_is_sink(port))
5211 tcpm_set_state(port, SNK_ATTACHED, 0);
5213 case SNK_TRYWAIT_DEBOUNCE:
5214 /* Do nothing, waiting for Rp */
5216 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5217 if (port->vbus_present && tcpm_port_is_sink(port))
5218 tcpm_set_state(port, SNK_ATTACHED, 0);
5221 case SRC_TRY_DEBOUNCE:
5222 /* Do nothing, waiting for sink detection */
5225 case FR_SWAP_SEND_TIMEOUT:
5226 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5227 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5228 if (port->tcpc->frs_sourcing_vbus)
5229 port->tcpc->frs_sourcing_vbus(port->tcpc);
5231 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5232 if (port->tcpc->frs_sourcing_vbus)
5233 port->tcpc->frs_sourcing_vbus(port->tcpc);
5234 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5238 case PORT_RESET_WAIT_OFF:
5240 * State set back to default mode once the timer completes.
5241 * Ignore vbus changes here.
5250 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
5252 tcpm_log_force(port, "VBUS off");
5253 port->vbus_present = false;
5254 port->vbus_never_low = false;
5255 switch (port->state) {
5256 case SNK_HARD_RESET_SINK_OFF:
5257 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
5259 case HARD_RESET_SEND:
5262 /* Do nothing, waiting for timeout */
5265 /* Hand over to state machine if needed */
5266 if (tcpm_port_is_source(port))
5267 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5269 case SNK_TRY_WAIT_DEBOUNCE:
5270 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5273 case SNK_TRYWAIT_VBUS:
5274 case SNK_TRYWAIT_DEBOUNCE:
5276 case SNK_ATTACH_WAIT:
5278 /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
5281 case SNK_NEGOTIATE_CAPABILITIES:
5284 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5285 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
5288 case PR_SWAP_SNK_SRC_SINK_OFF:
5289 /* Do nothing, expected */
5292 case PR_SWAP_SNK_SRC_SOURCE_ON:
5294 * Do nothing when vbus off notification is received.
5295 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
5296 * for the vbus source to ramp up.
5300 case PORT_RESET_WAIT_OFF:
5301 tcpm_set_state(port, tcpm_default_state(port), 0);
5305 case SRC_TRY_DEBOUNCE:
5306 /* Do nothing, waiting for sink detection */
5310 case SRC_SEND_CAPABILITIES:
5311 case SRC_SEND_CAPABILITIES_TIMEOUT:
5312 case SRC_NEGOTIATE_CAPABILITIES:
5313 case SRC_TRANSITION_SUPPLY:
5315 case SRC_WAIT_NEW_CAPABILITIES:
5317 * Force to unattached state to re-initiate connection.
5318 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
5319 * sink removed. Although sink removal here is due to source's vbus collapse,
5320 * treat it the same way for consistency.
5322 if (port->port_type == TYPEC_PORT_SRC)
5323 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5325 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5330 * State set back to default mode once the timer completes.
5331 * Ignore vbus changes here.
5336 case FR_SWAP_SEND_TIMEOUT:
5337 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5338 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5339 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5340 /* Do nothing, vbus drop expected */
5343 case SNK_HARD_RESET_WAIT_VBUS:
5344 /* Do nothing, its OK to receive vbus off events */
5348 if (port->pwr_role == TYPEC_SINK && port->attached)
5349 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5354 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
5356 tcpm_log_force(port, "VBUS VSAFE0V");
5357 port->vbus_vsafe0v = true;
5358 switch (port->state) {
5359 case SRC_HARD_RESET_VBUS_OFF:
5361 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
5362 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
5364 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
5366 case SRC_ATTACH_WAIT:
5367 if (tcpm_port_is_source(port))
5368 tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
5372 case SRC_SEND_CAPABILITIES:
5373 case SRC_SEND_CAPABILITIES_TIMEOUT:
5374 case SRC_NEGOTIATE_CAPABILITIES:
5375 case SRC_TRANSITION_SUPPLY:
5377 case SRC_WAIT_NEW_CAPABILITIES:
5378 if (port->auto_vbus_discharge_enabled) {
5379 if (port->port_type == TYPEC_PORT_SRC)
5380 tcpm_set_state(port, SRC_UNATTACHED, 0);
5382 tcpm_set_state(port, SNK_UNATTACHED, 0);
5385 case PR_SWAP_SNK_SRC_SINK_OFF:
5386 case PR_SWAP_SNK_SRC_SOURCE_ON:
5387 /* Do nothing, vsafe0v is expected during transition */
5389 case SNK_ATTACH_WAIT:
5391 /*Do nothing, still waiting for VSAFE5V for connect */
5393 case SNK_HARD_RESET_WAIT_VBUS:
5394 /* Do nothing, its OK to receive vbus off events */
5397 if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
5398 tcpm_set_state(port, SNK_UNATTACHED, 0);
5403 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
5405 tcpm_log_force(port, "Received hard reset");
5406 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
5407 port->tcpc->set_bist_data(port->tcpc, false);
5409 switch (port->state) {
5410 case ERROR_RECOVERY:
5412 case PORT_RESET_WAIT_OFF:
5418 if (port->ams != NONE_AMS)
5419 port->ams = NONE_AMS;
5420 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
5421 port->ams = HARD_RESET;
5423 * If we keep receiving hard reset requests, executing the hard reset
5424 * must have failed. Revert to error recovery if that happens.
5426 tcpm_set_state(port,
5427 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
5428 HARD_RESET_START : ERROR_RECOVERY,
5432 static void tcpm_pd_event_handler(struct kthread_work *work)
5434 struct tcpm_port *port = container_of(work, struct tcpm_port,
5438 mutex_lock(&port->lock);
5440 spin_lock(&port->pd_event_lock);
5441 while (port->pd_events) {
5442 events = port->pd_events;
5443 port->pd_events = 0;
5444 spin_unlock(&port->pd_event_lock);
5445 if (events & TCPM_RESET_EVENT)
5446 _tcpm_pd_hard_reset(port);
5447 if (events & TCPM_VBUS_EVENT) {
5450 vbus = port->tcpc->get_vbus(port->tcpc);
5452 _tcpm_pd_vbus_on(port);
5454 _tcpm_pd_vbus_off(port);
5456 * When TCPC does not support detecting vsafe0v voltage level,
5457 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
5458 * to see if vbus has discharge to VSAFE0V.
5460 if (!port->tcpc->is_vbus_vsafe0v ||
5461 port->tcpc->is_vbus_vsafe0v(port->tcpc))
5462 _tcpm_pd_vbus_vsafe0v(port);
5465 if (events & TCPM_CC_EVENT) {
5466 enum typec_cc_status cc1, cc2;
5468 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
5469 _tcpm_cc_change(port, cc1, cc2);
5471 if (events & TCPM_FRS_EVENT) {
5472 if (port->state == SNK_READY) {
5475 port->upcoming_state = FR_SWAP_SEND;
5476 ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
5478 port->upcoming_state = INVALID_STATE;
5480 tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
5483 if (events & TCPM_SOURCING_VBUS) {
5484 tcpm_log(port, "sourcing vbus");
5486 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
5487 * true as TCPM wouldn't have called tcpm_set_vbus.
5489 * When vbus is sourced on the command on TCPM i.e. TCPM called
5490 * tcpm_set_vbus to source vbus, vbus_source would already be true.
5492 port->vbus_source = true;
5493 _tcpm_pd_vbus_on(port);
5495 if (events & TCPM_PORT_CLEAN) {
5496 tcpm_log(port, "port clean");
5497 if (port->state == CHECK_CONTAMINANT) {
5498 if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
5499 tcpm_set_state(port, TOGGLING, 0);
5501 tcpm_set_state(port, tcpm_default_state(port), 0);
5504 if (events & TCPM_PORT_ERROR) {
5505 tcpm_log(port, "port triggering error recovery");
5506 tcpm_set_state(port, ERROR_RECOVERY, 0);
5509 spin_lock(&port->pd_event_lock);
5511 spin_unlock(&port->pd_event_lock);
5512 mutex_unlock(&port->lock);
5515 void tcpm_cc_change(struct tcpm_port *port)
5517 spin_lock(&port->pd_event_lock);
5518 port->pd_events |= TCPM_CC_EVENT;
5519 spin_unlock(&port->pd_event_lock);
5520 kthread_queue_work(port->wq, &port->event_work);
5522 EXPORT_SYMBOL_GPL(tcpm_cc_change);
5524 void tcpm_vbus_change(struct tcpm_port *port)
5526 spin_lock(&port->pd_event_lock);
5527 port->pd_events |= TCPM_VBUS_EVENT;
5528 spin_unlock(&port->pd_event_lock);
5529 kthread_queue_work(port->wq, &port->event_work);
5531 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
5533 void tcpm_pd_hard_reset(struct tcpm_port *port)
5535 spin_lock(&port->pd_event_lock);
5536 port->pd_events = TCPM_RESET_EVENT;
5537 spin_unlock(&port->pd_event_lock);
5538 kthread_queue_work(port->wq, &port->event_work);
5540 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
5542 void tcpm_sink_frs(struct tcpm_port *port)
5544 spin_lock(&port->pd_event_lock);
5545 port->pd_events |= TCPM_FRS_EVENT;
5546 spin_unlock(&port->pd_event_lock);
5547 kthread_queue_work(port->wq, &port->event_work);
5549 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
5551 void tcpm_sourcing_vbus(struct tcpm_port *port)
5553 spin_lock(&port->pd_event_lock);
5554 port->pd_events |= TCPM_SOURCING_VBUS;
5555 spin_unlock(&port->pd_event_lock);
5556 kthread_queue_work(port->wq, &port->event_work);
5558 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
5560 void tcpm_port_clean(struct tcpm_port *port)
5562 spin_lock(&port->pd_event_lock);
5563 port->pd_events |= TCPM_PORT_CLEAN;
5564 spin_unlock(&port->pd_event_lock);
5565 kthread_queue_work(port->wq, &port->event_work);
5567 EXPORT_SYMBOL_GPL(tcpm_port_clean);
5569 bool tcpm_port_is_toggling(struct tcpm_port *port)
5571 return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
5573 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
5575 void tcpm_port_error_recovery(struct tcpm_port *port)
5577 spin_lock(&port->pd_event_lock);
5578 port->pd_events |= TCPM_PORT_ERROR;
5579 spin_unlock(&port->pd_event_lock);
5580 kthread_queue_work(port->wq, &port->event_work);
5582 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
5584 static void tcpm_enable_frs_work(struct kthread_work *work)
5586 struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
5589 mutex_lock(&port->lock);
5590 /* Not FRS capable */
5591 if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
5592 port->pwr_opmode != TYPEC_PWR_MODE_PD ||
5593 !port->tcpc->enable_frs ||
5594 /* Sink caps queried */
5595 port->sink_cap_done || port->negotiated_rev < PD_REV30)
5598 /* Send when the state machine is idle */
5599 if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
5602 port->upcoming_state = GET_SINK_CAP;
5603 ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
5604 if (ret == -EAGAIN) {
5605 port->upcoming_state = INVALID_STATE;
5607 port->sink_cap_done = true;
5611 mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
5613 mutex_unlock(&port->lock);
5616 static void tcpm_send_discover_work(struct kthread_work *work)
5618 struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
5620 mutex_lock(&port->lock);
5621 /* No need to send DISCOVER_IDENTITY anymore */
5622 if (!port->send_discover)
5625 if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
5626 port->send_discover = false;
5630 /* Retry if the port is not idle */
5631 if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
5632 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
5636 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
5639 mutex_unlock(&port->lock);
5642 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
5644 struct tcpm_port *port = typec_get_drvdata(p);
5647 mutex_lock(&port->swap_lock);
5648 mutex_lock(&port->lock);
5650 if (port->typec_caps.data != TYPEC_PORT_DRD) {
5654 if (port->state != SRC_READY && port->state != SNK_READY) {
5659 if (port->data_role == data) {
5666 * 6.3.9: If an alternate mode is active, a request to swap
5667 * alternate modes shall trigger a port reset.
5668 * Reject data role swap request in this case.
5671 if (!port->pd_capable) {
5673 * If the partner is not PD capable, reset the port to
5674 * trigger a role change. This can only work if a preferred
5675 * role is configured, and if it matches the requested role.
5677 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
5678 port->try_role == port->pwr_role) {
5682 port->non_pd_role_swap = true;
5683 tcpm_set_state(port, PORT_RESET, 0);
5685 port->upcoming_state = DR_SWAP_SEND;
5686 ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
5687 if (ret == -EAGAIN) {
5688 port->upcoming_state = INVALID_STATE;
5693 port->swap_status = 0;
5694 port->swap_pending = true;
5695 reinit_completion(&port->swap_complete);
5696 mutex_unlock(&port->lock);
5698 if (!wait_for_completion_timeout(&port->swap_complete,
5699 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5702 ret = port->swap_status;
5704 port->non_pd_role_swap = false;
5708 mutex_unlock(&port->lock);
5710 mutex_unlock(&port->swap_lock);
5714 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
5716 struct tcpm_port *port = typec_get_drvdata(p);
5719 mutex_lock(&port->swap_lock);
5720 mutex_lock(&port->lock);
5722 if (port->port_type != TYPEC_PORT_DRP) {
5726 if (port->state != SRC_READY && port->state != SNK_READY) {
5731 if (role == port->pwr_role) {
5736 port->upcoming_state = PR_SWAP_SEND;
5737 ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
5738 if (ret == -EAGAIN) {
5739 port->upcoming_state = INVALID_STATE;
5743 port->swap_status = 0;
5744 port->swap_pending = true;
5745 reinit_completion(&port->swap_complete);
5746 mutex_unlock(&port->lock);
5748 if (!wait_for_completion_timeout(&port->swap_complete,
5749 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5752 ret = port->swap_status;
5757 mutex_unlock(&port->lock);
5759 mutex_unlock(&port->swap_lock);
5763 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
5765 struct tcpm_port *port = typec_get_drvdata(p);
5768 mutex_lock(&port->swap_lock);
5769 mutex_lock(&port->lock);
5771 if (port->state != SRC_READY && port->state != SNK_READY) {
5776 if (role == port->vconn_role) {
5781 port->upcoming_state = VCONN_SWAP_SEND;
5782 ret = tcpm_ams_start(port, VCONN_SWAP);
5783 if (ret == -EAGAIN) {
5784 port->upcoming_state = INVALID_STATE;
5788 port->swap_status = 0;
5789 port->swap_pending = true;
5790 reinit_completion(&port->swap_complete);
5791 mutex_unlock(&port->lock);
5793 if (!wait_for_completion_timeout(&port->swap_complete,
5794 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5797 ret = port->swap_status;
5802 mutex_unlock(&port->lock);
5804 mutex_unlock(&port->swap_lock);
5808 static int tcpm_try_role(struct typec_port *p, int role)
5810 struct tcpm_port *port = typec_get_drvdata(p);
5811 struct tcpc_dev *tcpc = port->tcpc;
5814 mutex_lock(&port->lock);
5816 ret = tcpc->try_role(tcpc, role);
5818 port->try_role = role;
5819 port->try_src_count = 0;
5820 port->try_snk_count = 0;
5821 mutex_unlock(&port->lock);
5826 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
5828 unsigned int target_mw;
5831 mutex_lock(&port->swap_lock);
5832 mutex_lock(&port->lock);
5834 if (!port->pps_data.active) {
5839 if (port->state != SNK_READY) {
5844 if (req_op_curr > port->pps_data.max_curr) {
5849 target_mw = (req_op_curr * port->supply_voltage) / 1000;
5850 if (target_mw < port->operating_snk_mw) {
5855 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5856 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5857 if (ret == -EAGAIN) {
5858 port->upcoming_state = INVALID_STATE;
5862 /* Round down operating current to align with PPS valid steps */
5863 req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
5865 reinit_completion(&port->pps_complete);
5866 port->pps_data.req_op_curr = req_op_curr;
5867 port->pps_status = 0;
5868 port->pps_pending = true;
5869 mutex_unlock(&port->lock);
5871 if (!wait_for_completion_timeout(&port->pps_complete,
5872 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5875 ret = port->pps_status;
5880 mutex_unlock(&port->lock);
5882 mutex_unlock(&port->swap_lock);
5887 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
5889 unsigned int target_mw;
5892 mutex_lock(&port->swap_lock);
5893 mutex_lock(&port->lock);
5895 if (!port->pps_data.active) {
5900 if (port->state != SNK_READY) {
5905 target_mw = (port->current_limit * req_out_volt) / 1000;
5906 if (target_mw < port->operating_snk_mw) {
5911 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5912 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5913 if (ret == -EAGAIN) {
5914 port->upcoming_state = INVALID_STATE;
5918 /* Round down output voltage to align with PPS valid steps */
5919 req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
5921 reinit_completion(&port->pps_complete);
5922 port->pps_data.req_out_volt = req_out_volt;
5923 port->pps_status = 0;
5924 port->pps_pending = true;
5925 mutex_unlock(&port->lock);
5927 if (!wait_for_completion_timeout(&port->pps_complete,
5928 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5931 ret = port->pps_status;
5936 mutex_unlock(&port->lock);
5938 mutex_unlock(&port->swap_lock);
5943 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
5947 mutex_lock(&port->swap_lock);
5948 mutex_lock(&port->lock);
5950 if (!port->pps_data.supported) {
5955 /* Trying to deactivate PPS when already deactivated so just bail */
5956 if (!port->pps_data.active && !activate)
5959 if (port->state != SNK_READY) {
5965 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5967 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
5968 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5969 if (ret == -EAGAIN) {
5970 port->upcoming_state = INVALID_STATE;
5974 reinit_completion(&port->pps_complete);
5975 port->pps_status = 0;
5976 port->pps_pending = true;
5978 /* Trigger PPS request or move back to standard PDO contract */
5980 port->pps_data.req_out_volt = port->supply_voltage;
5981 port->pps_data.req_op_curr = port->current_limit;
5983 mutex_unlock(&port->lock);
5985 if (!wait_for_completion_timeout(&port->pps_complete,
5986 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5989 ret = port->pps_status;
5994 mutex_unlock(&port->lock);
5996 mutex_unlock(&port->swap_lock);
6001 static void tcpm_init(struct tcpm_port *port)
6003 enum typec_cc_status cc1, cc2;
6005 port->tcpc->init(port->tcpc);
6007 tcpm_reset_port(port);
6011 * Should possibly wait for VBUS to settle if it was enabled locally
6012 * since tcpm_reset_port() will disable VBUS.
6014 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
6015 if (port->vbus_present)
6016 port->vbus_never_low = true;
6019 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
6020 * So implicitly vbus_vsafe0v = false.
6022 * 2. When vbus_present is false and TCPC does NOT support querying
6023 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6024 * vbus_vsafe0v is true.
6026 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6027 * then, query tcpc for vsafe0v status.
6029 if (port->vbus_present)
6030 port->vbus_vsafe0v = false;
6031 else if (!port->tcpc->is_vbus_vsafe0v)
6032 port->vbus_vsafe0v = true;
6034 port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6036 tcpm_set_state(port, tcpm_default_state(port), 0);
6038 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6039 _tcpm_cc_change(port, cc1, cc2);
6042 * Some adapters need a clean slate at startup, and won't recover
6043 * otherwise. So do not try to be fancy and force a clean disconnect.
6045 tcpm_set_state(port, PORT_RESET, 0);
6048 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6050 struct tcpm_port *port = typec_get_drvdata(p);
6052 mutex_lock(&port->lock);
6053 if (type == port->port_type)
6056 port->port_type = type;
6058 if (!port->connected) {
6059 tcpm_set_state(port, PORT_RESET, 0);
6060 } else if (type == TYPEC_PORT_SNK) {
6061 if (!(port->pwr_role == TYPEC_SINK &&
6062 port->data_role == TYPEC_DEVICE))
6063 tcpm_set_state(port, PORT_RESET, 0);
6064 } else if (type == TYPEC_PORT_SRC) {
6065 if (!(port->pwr_role == TYPEC_SOURCE &&
6066 port->data_role == TYPEC_HOST))
6067 tcpm_set_state(port, PORT_RESET, 0);
6071 mutex_unlock(&port->lock);
6075 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
6079 for (i = 0; port->pd_list[i]; i++) {
6080 if (port->pd_list[i]->pd == pd)
6081 return port->pd_list[i];
6084 return ERR_PTR(-ENODATA);
6087 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
6089 struct tcpm_port *port = typec_get_drvdata(p);
6094 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
6096 struct tcpm_port *port = typec_get_drvdata(p);
6097 struct pd_data *data;
6100 mutex_lock(&port->lock);
6102 if (port->selected_pd == pd)
6105 data = tcpm_find_pd_data(port, pd);
6107 ret = PTR_ERR(data);
6111 if (data->sink_desc.pdo[0]) {
6112 for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
6113 port->snk_pdo[i] = data->sink_desc.pdo[i];
6114 port->nr_snk_pdo = i + 1;
6115 port->operating_snk_mw = data->operating_snk_mw;
6118 if (data->source_desc.pdo[0]) {
6119 for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
6120 port->snk_pdo[i] = data->source_desc.pdo[i];
6121 port->nr_src_pdo = i + 1;
6124 switch (port->state) {
6125 case SRC_UNATTACHED:
6126 case SRC_ATTACH_WAIT:
6128 tcpm_set_cc(port, tcpm_rp_cc(port));
6130 case SRC_SEND_CAPABILITIES:
6131 case SRC_SEND_CAPABILITIES_TIMEOUT:
6132 case SRC_NEGOTIATE_CAPABILITIES:
6134 case SRC_WAIT_NEW_CAPABILITIES:
6135 port->caps_count = 0;
6136 port->upcoming_state = SRC_SEND_CAPABILITIES;
6137 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6138 if (ret == -EAGAIN) {
6139 port->upcoming_state = INVALID_STATE;
6143 case SNK_NEGOTIATE_CAPABILITIES:
6144 case SNK_NEGOTIATE_PPS_CAPABILITIES:
6146 case SNK_TRANSITION_SINK:
6147 case SNK_TRANSITION_SINK_VBUS:
6148 if (port->pps_data.active)
6149 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6150 else if (port->pd_capable)
6151 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6155 port->update_sink_caps = true;
6157 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6158 if (ret == -EAGAIN) {
6159 port->upcoming_state = INVALID_STATE;
6167 port->port_source_caps = data->source_cap;
6168 port->port_sink_caps = data->sink_cap;
6169 port->selected_pd = pd;
6171 mutex_unlock(&port->lock);
6175 static const struct typec_operations tcpm_ops = {
6176 .try_role = tcpm_try_role,
6177 .dr_set = tcpm_dr_set,
6178 .pr_set = tcpm_pr_set,
6179 .vconn_set = tcpm_vconn_set,
6180 .port_type_set = tcpm_port_type_set,
6181 .pd_get = tcpm_pd_get,
6182 .pd_set = tcpm_pd_set
6185 void tcpm_tcpc_reset(struct tcpm_port *port)
6187 mutex_lock(&port->lock);
6188 /* XXX: Maintain PD connection if possible? */
6190 mutex_unlock(&port->lock);
6192 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
6194 static void tcpm_port_unregister_pd(struct tcpm_port *port)
6198 port->port_sink_caps = NULL;
6199 port->port_source_caps = NULL;
6200 for (i = 0; i < port->pd_count; i++) {
6201 usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
6202 kfree(port->pd_list[i]->sink_cap);
6203 usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
6204 kfree(port->pd_list[i]->source_cap);
6205 devm_kfree(port->dev, port->pd_list[i]);
6206 port->pd_list[i] = NULL;
6207 usb_power_delivery_unregister(port->pds[i]);
6208 port->pds[i] = NULL;
6212 static int tcpm_port_register_pd(struct tcpm_port *port)
6214 struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
6215 struct usb_power_delivery_capabilities *cap;
6218 if (!port->nr_src_pdo && !port->nr_snk_pdo)
6221 for (i = 0; i < port->pd_count; i++) {
6222 port->pds[i] = usb_power_delivery_register(port->dev, &desc);
6223 if (IS_ERR(port->pds[i])) {
6224 ret = PTR_ERR(port->pds[i]);
6225 goto err_unregister;
6227 port->pd_list[i]->pd = port->pds[i];
6229 if (port->pd_list[i]->source_desc.pdo[0]) {
6230 cap = usb_power_delivery_register_capabilities(port->pds[i],
6231 &port->pd_list[i]->source_desc);
6234 goto err_unregister;
6236 port->pd_list[i]->source_cap = cap;
6239 if (port->pd_list[i]->sink_desc.pdo[0]) {
6240 cap = usb_power_delivery_register_capabilities(port->pds[i],
6241 &port->pd_list[i]->sink_desc);
6244 goto err_unregister;
6246 port->pd_list[i]->sink_cap = cap;
6250 port->port_source_caps = port->pd_list[0]->source_cap;
6251 port->port_sink_caps = port->pd_list[0]->sink_cap;
6252 port->selected_pd = port->pds[0];
6256 tcpm_port_unregister_pd(port);
6261 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
6263 struct fwnode_handle *capabilities, *child, *caps = NULL;
6264 unsigned int nr_src_pdo, nr_snk_pdo;
6265 const char *opmode_str;
6266 u32 *src_pdo, *snk_pdo;
6267 u32 uw, frs_current;
6275 * This fwnode has a "compatible" property, but is never populated as a
6276 * struct device. Instead we simply parse it to read the properties.
6277 * This it breaks fw_devlink=on. To maintain backward compatibility
6278 * with existing DT files, we work around this by deleting any
6279 * fwnode_links to/from this fwnode.
6281 fw_devlink_purge_absent_suppliers(fwnode);
6283 ret = typec_get_fw_cap(&port->typec_caps, fwnode);
6289 if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
6290 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
6292 if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
6293 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
6295 port->port_type = port->typec_caps.type;
6296 port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
6297 port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
6298 port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
6300 if (!port->pd_supported) {
6301 ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
6304 ret = typec_find_pwr_opmode(opmode_str);
6307 port->src_rp = tcpm_pwr_opmode_to_rp(ret);
6311 /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
6313 /* FRS can only be supported by DRP ports */
6314 if (port->port_type == TYPEC_PORT_DRP) {
6315 ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
6317 if (!ret && frs_current <= FRS_5V_3A)
6318 port->new_source_frs_current = frs_current;
6324 /* For the backward compatibility, "capabilities" node is optional. */
6325 capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
6326 if (!capabilities) {
6329 fwnode_for_each_child_node(capabilities, child)
6332 if (!port->pd_count) {
6334 goto put_capabilities;
6338 port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
6342 goto put_capabilities;
6345 port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
6347 if (!port->pd_list) {
6349 goto put_capabilities;
6352 for (i = 0; i < port->pd_count; i++) {
6353 port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
6354 if (!port->pd_list[i]) {
6356 goto put_capabilities;
6359 src_pdo = port->pd_list[i]->source_desc.pdo;
6360 port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
6361 snk_pdo = port->pd_list[i]->sink_desc.pdo;
6362 port->pd_list[i]->sink_desc.role = TYPEC_SINK;
6364 /* If "capabilities" is NULL, fall back to single pd cap population. */
6368 caps = fwnode_get_next_child_node(capabilities, caps);
6370 if (port->port_type != TYPEC_PORT_SNK) {
6371 ret = fwnode_property_count_u32(caps, "source-pdos");
6379 nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
6380 ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
6385 ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
6390 port->nr_src_pdo = nr_src_pdo;
6391 memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
6392 port->pd_list[0]->source_desc.pdo,
6393 sizeof(u32) * nr_src_pdo,
6398 if (port->port_type != TYPEC_PORT_SRC) {
6399 ret = fwnode_property_count_u32(caps, "sink-pdos");
6408 nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
6409 ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
6414 ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
6418 if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
6423 port->pd_list[i]->operating_snk_mw = uw / 1000;
6426 port->nr_snk_pdo = nr_snk_pdo;
6427 memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
6428 port->pd_list[0]->sink_desc.pdo,
6429 sizeof(u32) * nr_snk_pdo,
6431 port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
6438 fwnode_handle_put(caps);
6440 fwnode_handle_put(capabilities);
6444 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
6448 /* sink-vdos is optional */
6449 ret = fwnode_property_count_u32(fwnode, "sink-vdos");
6453 port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
6454 if (port->nr_snk_vdo) {
6455 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
6462 /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
6463 if (port->nr_snk_vdo) {
6464 ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
6470 port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
6471 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
6473 port->nr_snk_vdo_v1);
6481 /* Power Supply access to expose source power information */
6482 enum tcpm_psy_online_states {
6483 TCPM_PSY_OFFLINE = 0,
6484 TCPM_PSY_FIXED_ONLINE,
6485 TCPM_PSY_PROG_ONLINE,
6488 static enum power_supply_property tcpm_psy_props[] = {
6489 POWER_SUPPLY_PROP_USB_TYPE,
6490 POWER_SUPPLY_PROP_ONLINE,
6491 POWER_SUPPLY_PROP_VOLTAGE_MIN,
6492 POWER_SUPPLY_PROP_VOLTAGE_MAX,
6493 POWER_SUPPLY_PROP_VOLTAGE_NOW,
6494 POWER_SUPPLY_PROP_CURRENT_MAX,
6495 POWER_SUPPLY_PROP_CURRENT_NOW,
6498 static int tcpm_psy_get_online(struct tcpm_port *port,
6499 union power_supply_propval *val)
6501 if (port->vbus_charge) {
6502 if (port->pps_data.active)
6503 val->intval = TCPM_PSY_PROG_ONLINE;
6505 val->intval = TCPM_PSY_FIXED_ONLINE;
6507 val->intval = TCPM_PSY_OFFLINE;
6513 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
6514 union power_supply_propval *val)
6516 if (port->pps_data.active)
6517 val->intval = port->pps_data.min_volt * 1000;
6519 val->intval = port->supply_voltage * 1000;
6524 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
6525 union power_supply_propval *val)
6527 if (port->pps_data.active)
6528 val->intval = port->pps_data.max_volt * 1000;
6530 val->intval = port->supply_voltage * 1000;
6535 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
6536 union power_supply_propval *val)
6538 val->intval = port->supply_voltage * 1000;
6543 static int tcpm_psy_get_current_max(struct tcpm_port *port,
6544 union power_supply_propval *val)
6546 if (port->pps_data.active)
6547 val->intval = port->pps_data.max_curr * 1000;
6549 val->intval = port->current_limit * 1000;
6554 static int tcpm_psy_get_current_now(struct tcpm_port *port,
6555 union power_supply_propval *val)
6557 val->intval = port->current_limit * 1000;
6562 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
6563 union power_supply_propval *val)
6565 unsigned int src_mv, src_ma, max_src_uw = 0;
6566 unsigned int i, tmp;
6568 for (i = 0; i < port->nr_source_caps; i++) {
6569 u32 pdo = port->source_caps[i];
6571 if (pdo_type(pdo) == PDO_TYPE_FIXED) {
6572 src_mv = pdo_fixed_voltage(pdo);
6573 src_ma = pdo_max_current(pdo);
6574 tmp = src_mv * src_ma;
6575 max_src_uw = tmp > max_src_uw ? tmp : max_src_uw;
6579 val->intval = max_src_uw;
6583 static int tcpm_psy_get_prop(struct power_supply *psy,
6584 enum power_supply_property psp,
6585 union power_supply_propval *val)
6587 struct tcpm_port *port = power_supply_get_drvdata(psy);
6591 case POWER_SUPPLY_PROP_USB_TYPE:
6592 val->intval = port->usb_type;
6594 case POWER_SUPPLY_PROP_ONLINE:
6595 ret = tcpm_psy_get_online(port, val);
6597 case POWER_SUPPLY_PROP_VOLTAGE_MIN:
6598 ret = tcpm_psy_get_voltage_min(port, val);
6600 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
6601 ret = tcpm_psy_get_voltage_max(port, val);
6603 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6604 ret = tcpm_psy_get_voltage_now(port, val);
6606 case POWER_SUPPLY_PROP_CURRENT_MAX:
6607 ret = tcpm_psy_get_current_max(port, val);
6609 case POWER_SUPPLY_PROP_CURRENT_NOW:
6610 ret = tcpm_psy_get_current_now(port, val);
6612 case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
6613 tcpm_psy_get_input_power_limit(port, val);
6623 static int tcpm_psy_set_online(struct tcpm_port *port,
6624 const union power_supply_propval *val)
6628 switch (val->intval) {
6629 case TCPM_PSY_FIXED_ONLINE:
6630 ret = tcpm_pps_activate(port, false);
6632 case TCPM_PSY_PROG_ONLINE:
6633 ret = tcpm_pps_activate(port, true);
6643 static int tcpm_psy_set_prop(struct power_supply *psy,
6644 enum power_supply_property psp,
6645 const union power_supply_propval *val)
6647 struct tcpm_port *port = power_supply_get_drvdata(psy);
6651 * All the properties below are related to USB PD. The check needs to be
6652 * property specific when a non-pd related property is added.
6654 if (!port->pd_supported)
6658 case POWER_SUPPLY_PROP_ONLINE:
6659 ret = tcpm_psy_set_online(port, val);
6661 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6662 ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
6664 case POWER_SUPPLY_PROP_CURRENT_NOW:
6665 if (val->intval > port->pps_data.max_curr * 1000)
6668 ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
6674 power_supply_changed(port->psy);
6678 static int tcpm_psy_prop_writeable(struct power_supply *psy,
6679 enum power_supply_property psp)
6682 case POWER_SUPPLY_PROP_ONLINE:
6683 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6684 case POWER_SUPPLY_PROP_CURRENT_NOW:
6691 static enum power_supply_usb_type tcpm_psy_usb_types[] = {
6692 POWER_SUPPLY_USB_TYPE_C,
6693 POWER_SUPPLY_USB_TYPE_PD,
6694 POWER_SUPPLY_USB_TYPE_PD_PPS,
6697 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
6699 static int devm_tcpm_psy_register(struct tcpm_port *port)
6701 struct power_supply_config psy_cfg = {};
6702 const char *port_dev_name = dev_name(port->dev);
6703 size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
6704 strlen(port_dev_name) + 1;
6707 psy_cfg.drv_data = port;
6708 psy_cfg.fwnode = dev_fwnode(port->dev);
6709 psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
6713 snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
6715 port->psy_desc.name = psy_name;
6716 port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
6717 port->psy_desc.usb_types = tcpm_psy_usb_types;
6718 port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
6719 port->psy_desc.properties = tcpm_psy_props;
6720 port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
6721 port->psy_desc.get_property = tcpm_psy_get_prop;
6722 port->psy_desc.set_property = tcpm_psy_set_prop;
6723 port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
6725 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
6727 port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
6730 return PTR_ERR_OR_ZERO(port->psy);
6733 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
6735 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
6737 if (port->registered)
6738 kthread_queue_work(port->wq, &port->state_machine);
6739 return HRTIMER_NORESTART;
6742 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
6744 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
6746 if (port->registered)
6747 kthread_queue_work(port->wq, &port->vdm_state_machine);
6748 return HRTIMER_NORESTART;
6751 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
6753 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
6755 if (port->registered)
6756 kthread_queue_work(port->wq, &port->enable_frs);
6757 return HRTIMER_NORESTART;
6760 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
6762 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
6764 if (port->registered)
6765 kthread_queue_work(port->wq, &port->send_discover_work);
6766 return HRTIMER_NORESTART;
6769 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
6771 struct tcpm_port *port;
6774 if (!dev || !tcpc ||
6775 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
6776 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
6777 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
6778 return ERR_PTR(-EINVAL);
6780 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
6782 return ERR_PTR(-ENOMEM);
6787 mutex_init(&port->lock);
6788 mutex_init(&port->swap_lock);
6790 port->wq = kthread_create_worker(0, dev_name(dev));
6791 if (IS_ERR(port->wq))
6792 return ERR_CAST(port->wq);
6793 sched_set_fifo(port->wq->task);
6795 kthread_init_work(&port->state_machine, tcpm_state_machine_work);
6796 kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
6797 kthread_init_work(&port->event_work, tcpm_pd_event_handler);
6798 kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
6799 kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
6800 hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6801 port->state_machine_timer.function = state_machine_timer_handler;
6802 hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6803 port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
6804 hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6805 port->enable_frs_timer.function = enable_frs_timer_handler;
6806 hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6807 port->send_discover_timer.function = send_discover_timer_handler;
6809 spin_lock_init(&port->pd_event_lock);
6811 init_completion(&port->tx_complete);
6812 init_completion(&port->swap_complete);
6813 init_completion(&port->pps_complete);
6814 tcpm_debugfs_init(port);
6816 err = tcpm_fw_get_caps(port, tcpc->fwnode);
6818 goto out_destroy_wq;
6819 err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
6821 goto out_destroy_wq;
6823 port->try_role = port->typec_caps.prefer_role;
6825 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
6826 port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
6827 port->typec_caps.svdm_version = SVDM_VER_2_0;
6828 port->typec_caps.driver_data = port;
6829 port->typec_caps.ops = &tcpm_ops;
6830 port->typec_caps.orientation_aware = 1;
6832 port->partner_desc.identity = &port->partner_ident;
6834 port->role_sw = usb_role_switch_get(port->dev);
6836 port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
6837 if (IS_ERR(port->role_sw)) {
6838 err = PTR_ERR(port->role_sw);
6839 goto out_destroy_wq;
6842 err = devm_tcpm_psy_register(port);
6844 goto out_role_sw_put;
6845 power_supply_changed(port->psy);
6847 err = tcpm_port_register_pd(port);
6849 goto out_role_sw_put;
6852 port->typec_caps.pd = port->pds[0];
6854 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
6855 if (IS_ERR(port->typec_port)) {
6856 err = PTR_ERR(port->typec_port);
6857 goto out_unregister_pd;
6860 typec_port_register_altmodes(port->typec_port,
6861 &tcpm_altmode_ops, port,
6862 port->port_altmode, ALTMODE_DISCOVERY_MAX);
6863 port->registered = true;
6865 mutex_lock(&port->lock);
6867 mutex_unlock(&port->lock);
6869 tcpm_log(port, "%s: registered", dev_name(dev));
6873 tcpm_port_unregister_pd(port);
6875 usb_role_switch_put(port->role_sw);
6877 tcpm_debugfs_exit(port);
6878 kthread_destroy_worker(port->wq);
6879 return ERR_PTR(err);
6881 EXPORT_SYMBOL_GPL(tcpm_register_port);
6883 void tcpm_unregister_port(struct tcpm_port *port)
6887 port->registered = false;
6888 kthread_destroy_worker(port->wq);
6890 hrtimer_cancel(&port->send_discover_timer);
6891 hrtimer_cancel(&port->enable_frs_timer);
6892 hrtimer_cancel(&port->vdm_state_machine_timer);
6893 hrtimer_cancel(&port->state_machine_timer);
6895 tcpm_reset_port(port);
6897 tcpm_port_unregister_pd(port);
6899 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
6900 typec_unregister_altmode(port->port_altmode[i]);
6901 typec_unregister_port(port->typec_port);
6902 usb_role_switch_put(port->role_sw);
6903 tcpm_debugfs_exit(port);
6905 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
6907 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
6908 MODULE_DESCRIPTION("USB Type-C Port Manager");
6909 MODULE_LICENSE("GPL");