1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2015-2017 Google, Inc
5 * USB Power Delivery protocol stack.
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/usb.h>
25 #include <linux/usb/pd.h>
26 #include <linux/usb/pd_ado.h>
27 #include <linux/usb/pd_bdo.h>
28 #include <linux/usb/pd_ext_sdb.h>
29 #include <linux/usb/pd_vdo.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/tcpm.h>
32 #include <linux/usb/typec_altmode.h>
34 #include <uapi/linux/sched/types.h>
36 #define FOREACH_STATE(S) \
39 S(CHECK_CONTAMINANT), \
44 S(SRC_SEND_CAPABILITIES), \
45 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
46 S(SRC_NEGOTIATE_CAPABILITIES), \
47 S(SRC_TRANSITION_SUPPLY), \
49 S(SRC_WAIT_NEW_CAPABILITIES), \
57 S(SNK_DISCOVERY_DEBOUNCE), \
58 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
59 S(SNK_WAIT_CAPABILITIES), \
60 S(SNK_NEGOTIATE_CAPABILITIES), \
61 S(SNK_NEGOTIATE_PPS_CAPABILITIES), \
62 S(SNK_TRANSITION_SINK), \
63 S(SNK_TRANSITION_SINK_VBUS), \
67 S(DEBUG_ACC_ATTACHED), \
68 S(AUDIO_ACC_ATTACHED), \
69 S(AUDIO_ACC_DEBOUNCE), \
72 S(HARD_RESET_START), \
73 S(SRC_HARD_RESET_VBUS_OFF), \
74 S(SRC_HARD_RESET_VBUS_ON), \
75 S(SNK_HARD_RESET_SINK_OFF), \
76 S(SNK_HARD_RESET_WAIT_VBUS), \
77 S(SNK_HARD_RESET_SINK_ON), \
80 S(SRC_SOFT_RESET_WAIT_SNK_TX), \
86 S(DR_SWAP_SEND_TIMEOUT), \
88 S(DR_SWAP_CHANGE_DR), \
92 S(PR_SWAP_SEND_TIMEOUT), \
95 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
96 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
97 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
98 S(PR_SWAP_SRC_SNK_SINK_ON), \
99 S(PR_SWAP_SNK_SRC_SINK_OFF), \
100 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
101 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
103 S(VCONN_SWAP_ACCEPT), \
104 S(VCONN_SWAP_SEND), \
105 S(VCONN_SWAP_SEND_TIMEOUT), \
106 S(VCONN_SWAP_CANCEL), \
107 S(VCONN_SWAP_START), \
108 S(VCONN_SWAP_WAIT_FOR_VCONN), \
109 S(VCONN_SWAP_TURN_ON_VCONN), \
110 S(VCONN_SWAP_TURN_OFF_VCONN), \
113 S(FR_SWAP_SEND_TIMEOUT), \
114 S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
115 S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
116 S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
121 S(SNK_TRY_WAIT_DEBOUNCE), \
122 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
124 S(SRC_TRYWAIT_DEBOUNCE), \
125 S(SRC_TRYWAIT_UNATTACHED), \
129 S(SRC_TRY_DEBOUNCE), \
131 S(SNK_TRYWAIT_DEBOUNCE), \
132 S(SNK_TRYWAIT_VBUS), \
135 S(GET_STATUS_SEND), \
136 S(GET_STATUS_SEND_TIMEOUT), \
137 S(GET_PPS_STATUS_SEND), \
138 S(GET_PPS_STATUS_SEND_TIMEOUT), \
141 S(GET_SINK_CAP_TIMEOUT), \
145 S(PORT_RESET_WAIT_OFF), \
150 #define FOREACH_AMS(S) \
152 S(POWER_NEGOTIATION), \
157 S(GET_SOURCE_CAPABILITIES), \
158 S(GET_SINK_CAPABILITIES), \
159 S(POWER_ROLE_SWAP), \
164 S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
165 S(GETTING_SOURCE_SINK_STATUS), \
166 S(GETTING_BATTERY_CAPABILITIES), \
167 S(GETTING_BATTERY_STATUS), \
168 S(GETTING_MANUFACTURER_INFORMATION), \
170 S(FIRMWARE_UPDATE), \
171 S(DISCOVER_IDENTITY), \
172 S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
175 S(DFP_TO_UFP_ENTER_MODE), \
176 S(DFP_TO_UFP_EXIT_MODE), \
177 S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
178 S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
181 S(UNSTRUCTURED_VDMS), \
182 S(STRUCTURED_VDMS), \
186 #define GENERATE_ENUM(e) e
187 #define GENERATE_STRING(s) #s
190 FOREACH_STATE(GENERATE_ENUM)
193 static const char * const tcpm_states[] = {
194 FOREACH_STATE(GENERATE_STRING)
198 FOREACH_AMS(GENERATE_ENUM)
201 static const char * const tcpm_ams_str[] = {
202 FOREACH_AMS(GENERATE_STRING)
206 VDM_STATE_ERR_BUSY = -3,
207 VDM_STATE_ERR_SEND = -2,
208 VDM_STATE_ERR_TMOUT = -1,
210 /* Anything >0 represents an active state */
213 VDM_STATE_WAIT_RSP_BUSY = 3,
214 VDM_STATE_SEND_MESSAGE = 4,
217 enum pd_msg_request {
221 PD_MSG_CTRL_NOT_SUPP,
222 PD_MSG_DATA_SINK_CAP,
223 PD_MSG_DATA_SOURCE_CAP,
228 ADEV_NOTIFY_USB_AND_QUEUE_VDM,
230 ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
235 * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
236 * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
239 enum frs_typec_current {
246 /* Events from low level driver */
248 #define TCPM_CC_EVENT BIT(0)
249 #define TCPM_VBUS_EVENT BIT(1)
250 #define TCPM_RESET_EVENT BIT(2)
251 #define TCPM_FRS_EVENT BIT(3)
252 #define TCPM_SOURCING_VBUS BIT(4)
253 #define TCPM_PORT_CLEAN BIT(5)
254 #define TCPM_PORT_ERROR BIT(6)
256 #define LOG_BUFFER_ENTRIES 1024
257 #define LOG_BUFFER_ENTRY_SIZE 128
259 /* Alternate mode support */
261 #define SVID_DISCOVERY_MAX 16
262 #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
264 #define GET_SINK_CAP_RETRY_MS 100
265 #define SEND_DISCOVER_RETRY_MS 100
267 struct pd_mode_data {
268 int svid_index; /* current SVID index */
270 u16 svids[SVID_DISCOVERY_MAX];
271 int altmodes; /* number of alternate modes */
272 struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
276 * @min_volt: Actual min voltage at the local port
277 * @req_min_volt: Requested min voltage to the port partner
278 * @max_volt: Actual max voltage at the local port
279 * @req_max_volt: Requested max voltage to the port partner
280 * @max_curr: Actual max current at the local port
281 * @req_max_curr: Requested max current of the port partner
282 * @req_out_volt: Requested output voltage to the port partner
283 * @req_op_curr: Requested operating current to the port partner
284 * @supported: Parter has at least one APDO hence supports PPS
285 * @active: PPS mode is active
301 struct usb_power_delivery *pd;
302 struct usb_power_delivery_capabilities *source_cap;
303 struct usb_power_delivery_capabilities_desc source_desc;
304 struct usb_power_delivery_capabilities *sink_cap;
305 struct usb_power_delivery_capabilities_desc sink_desc;
306 unsigned int operating_snk_mw;
312 struct mutex lock; /* tcpm state machine lock */
313 struct kthread_worker *wq;
315 struct typec_capability typec_caps;
316 struct typec_port *typec_port;
318 struct tcpc_dev *tcpc;
319 struct usb_role_switch *role_sw;
321 enum typec_role vconn_role;
322 enum typec_role pwr_role;
323 enum typec_data_role data_role;
324 enum typec_pwr_opmode pwr_opmode;
326 struct usb_pd_identity partner_ident;
327 struct typec_partner_desc partner_desc;
328 struct typec_partner *partner;
330 enum typec_cc_status cc_req;
331 enum typec_cc_status src_rp; /* work only if pd_supported == false */
333 enum typec_cc_status cc1;
334 enum typec_cc_status cc2;
335 enum typec_cc_polarity polarity;
341 enum typec_port_type port_type;
344 * Set to true when vbus is greater than VSAFE5V min.
345 * Set to false when vbus falls below vSinkDisconnect max threshold.
350 * Set to true when vbus is less than VSAFE0V max.
351 * Set to false when vbus is greater than VSAFE0V max.
359 /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
367 enum pd_msg_request queued_message;
369 enum tcpm_state enter_state;
370 enum tcpm_state prev_state;
371 enum tcpm_state state;
372 enum tcpm_state delayed_state;
373 ktime_t delayed_runtime;
374 unsigned long delay_ms;
376 spinlock_t pd_event_lock;
379 struct kthread_work event_work;
380 struct hrtimer state_machine_timer;
381 struct kthread_work state_machine;
382 struct hrtimer vdm_state_machine_timer;
383 struct kthread_work vdm_state_machine;
384 struct hrtimer enable_frs_timer;
385 struct kthread_work enable_frs;
386 struct hrtimer send_discover_timer;
387 struct kthread_work send_discover_work;
388 bool state_machine_running;
389 /* Set to true when VDM State Machine has following actions. */
392 struct completion tx_complete;
393 enum tcpm_transmit_status tx_status;
395 struct mutex swap_lock; /* swap command lock */
397 bool non_pd_role_swap;
398 struct completion swap_complete;
401 unsigned int negotiated_rev;
402 unsigned int message_id;
403 unsigned int caps_count;
404 unsigned int hard_reset_count;
406 bool explicit_contract;
407 unsigned int rx_msgid;
410 struct usb_power_delivery **pds;
411 struct pd_data **pd_list;
412 struct usb_power_delivery_capabilities *port_source_caps;
413 struct usb_power_delivery_capabilities *port_sink_caps;
414 struct usb_power_delivery *partner_pd;
415 struct usb_power_delivery_capabilities *partner_source_caps;
416 struct usb_power_delivery_capabilities *partner_sink_caps;
417 struct usb_power_delivery *selected_pd;
419 /* Partner capabilities/requests */
421 u32 source_caps[PDO_MAX_OBJECTS];
422 unsigned int nr_source_caps;
423 u32 sink_caps[PDO_MAX_OBJECTS];
424 unsigned int nr_sink_caps;
426 /* Local capabilities */
427 unsigned int pd_count;
428 u32 src_pdo[PDO_MAX_OBJECTS];
429 unsigned int nr_src_pdo;
430 u32 snk_pdo[PDO_MAX_OBJECTS];
431 unsigned int nr_snk_pdo;
432 u32 snk_vdo_v1[VDO_MAX_OBJECTS];
433 unsigned int nr_snk_vdo_v1;
434 u32 snk_vdo[VDO_MAX_OBJECTS];
435 unsigned int nr_snk_vdo;
437 unsigned int operating_snk_mw;
438 bool update_sink_caps;
440 /* Requested current / voltage to the port partner */
441 u32 req_current_limit;
442 u32 req_supply_voltage;
443 /* Actual current / voltage limit of the local port */
447 /* Used to export TA voltage and current */
448 struct power_supply *psy;
449 struct power_supply_desc psy_desc;
450 enum power_supply_usb_type usb_type;
454 /* PD state for Vendor Defined Messages */
455 enum vdm_states vdm_state;
457 /* next Vendor Defined Message to send */
458 u32 vdo_data[VDO_MAX_SIZE];
460 /* VDO to retry if UFP responder replied busy */
464 struct pd_pps_data pps_data;
465 struct completion pps_complete;
469 /* Alternate mode data */
470 struct pd_mode_data mode_data;
471 struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
472 struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
474 /* Deadline in jiffies to exit src_try_wait state */
475 unsigned long max_wait;
477 /* port belongs to a self powered device */
481 enum frs_typec_current new_source_frs_current;
483 /* Sink caps have been queried */
486 /* Collision Avoidance and Atomic Message Sequence */
487 enum tcpm_state upcoming_state;
489 enum tcpm_ams next_ams;
492 /* Auto vbus discharge status */
493 bool auto_vbus_discharge_enabled;
496 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
497 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
498 * SNK_READY for non-pd link.
500 bool slow_charger_loop;
503 * When true indicates that the lower level drivers indicate potential presence
504 * of contaminant in the connector pins based on the tcpm state machine
507 bool potential_contaminant;
508 #ifdef CONFIG_DEBUG_FS
509 struct dentry *dentry;
510 struct mutex logbuffer_lock; /* log buffer access lock */
513 u8 *logbuffer[LOG_BUFFER_ENTRIES];
518 struct kthread_work work;
519 struct tcpm_port *port;
520 struct pd_message msg;
523 static const char * const pd_rev[] = {
529 #define tcpm_cc_is_sink(cc) \
530 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
531 (cc) == TYPEC_CC_RP_3_0)
533 /* As long as cc is pulled up, we can consider it as sink. */
534 #define tcpm_port_is_sink(port) \
535 (tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
537 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
538 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
539 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
541 #define tcpm_port_is_source(port) \
542 ((tcpm_cc_is_source((port)->cc1) && \
543 !tcpm_cc_is_source((port)->cc2)) || \
544 (tcpm_cc_is_source((port)->cc2) && \
545 !tcpm_cc_is_source((port)->cc1)))
547 #define tcpm_port_is_debug(port) \
548 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
550 #define tcpm_port_is_audio(port) \
551 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
553 #define tcpm_port_is_audio_detached(port) \
554 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
555 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
557 #define tcpm_try_snk(port) \
558 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
559 (port)->port_type == TYPEC_PORT_DRP)
561 #define tcpm_try_src(port) \
562 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
563 (port)->port_type == TYPEC_PORT_DRP)
565 #define tcpm_data_role_for_source(port) \
566 ((port)->typec_caps.data == TYPEC_PORT_UFP ? \
567 TYPEC_DEVICE : TYPEC_HOST)
569 #define tcpm_data_role_for_sink(port) \
570 ((port)->typec_caps.data == TYPEC_PORT_DFP ? \
571 TYPEC_HOST : TYPEC_DEVICE)
573 #define tcpm_sink_tx_ok(port) \
574 (tcpm_port_is_sink(port) && \
575 ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
577 #define tcpm_wait_for_discharge(port) \
578 (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
580 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
582 if (port->port_type == TYPEC_PORT_DRP) {
583 if (port->try_role == TYPEC_SINK)
584 return SNK_UNATTACHED;
585 else if (port->try_role == TYPEC_SOURCE)
586 return SRC_UNATTACHED;
587 /* Fall through to return SRC_UNATTACHED */
588 } else if (port->port_type == TYPEC_PORT_SNK) {
589 return SNK_UNATTACHED;
591 return SRC_UNATTACHED;
594 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
596 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
597 port->cc2 == TYPEC_CC_OPEN) ||
598 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
599 port->cc1 == TYPEC_CC_OPEN) ||
600 (port->polarity == TYPEC_POLARITY_CC2 &&
601 port->cc2 == TYPEC_CC_OPEN)));
608 #ifdef CONFIG_DEBUG_FS
610 static bool tcpm_log_full(struct tcpm_port *port)
612 return port->logbuffer_tail ==
613 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
617 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
619 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
620 u64 ts_nsec = local_clock();
621 unsigned long rem_nsec;
623 mutex_lock(&port->logbuffer_lock);
624 if (!port->logbuffer[port->logbuffer_head]) {
625 port->logbuffer[port->logbuffer_head] =
626 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
627 if (!port->logbuffer[port->logbuffer_head]) {
628 mutex_unlock(&port->logbuffer_lock);
633 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
635 if (tcpm_log_full(port)) {
636 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
637 strcpy(tmpbuffer, "overflow");
640 if (port->logbuffer_head < 0 ||
641 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
643 "Bad log buffer index %d\n", port->logbuffer_head);
647 if (!port->logbuffer[port->logbuffer_head]) {
649 "Log buffer index %d is NULL\n", port->logbuffer_head);
653 rem_nsec = do_div(ts_nsec, 1000000000);
654 scnprintf(port->logbuffer[port->logbuffer_head],
655 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
656 (unsigned long)ts_nsec, rem_nsec / 1000,
658 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
661 mutex_unlock(&port->logbuffer_lock);
665 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
669 /* Do not log while disconnected and unattached */
670 if (tcpm_port_is_disconnected(port) &&
671 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
672 port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
676 _tcpm_log(port, fmt, args);
681 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
686 _tcpm_log(port, fmt, args);
690 static void tcpm_log_source_caps(struct tcpm_port *port)
694 for (i = 0; i < port->nr_source_caps; i++) {
695 u32 pdo = port->source_caps[i];
696 enum pd_pdo_type type = pdo_type(pdo);
701 scnprintf(msg, sizeof(msg),
702 "%u mV, %u mA [%s%s%s%s%s%s]",
703 pdo_fixed_voltage(pdo),
704 pdo_max_current(pdo),
705 (pdo & PDO_FIXED_DUAL_ROLE) ?
707 (pdo & PDO_FIXED_SUSPEND) ?
709 (pdo & PDO_FIXED_HIGHER_CAP) ?
711 (pdo & PDO_FIXED_USB_COMM) ?
713 (pdo & PDO_FIXED_DATA_SWAP) ?
715 (pdo & PDO_FIXED_EXTPOWER) ?
719 scnprintf(msg, sizeof(msg),
721 pdo_min_voltage(pdo),
722 pdo_max_voltage(pdo),
723 pdo_max_current(pdo));
726 scnprintf(msg, sizeof(msg),
728 pdo_min_voltage(pdo),
729 pdo_max_voltage(pdo),
733 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
734 scnprintf(msg, sizeof(msg),
736 pdo_pps_apdo_min_voltage(pdo),
737 pdo_pps_apdo_max_voltage(pdo),
738 pdo_pps_apdo_max_current(pdo));
740 strcpy(msg, "undefined APDO");
743 strcpy(msg, "undefined");
746 tcpm_log(port, " PDO %d: type %d, %s",
751 static int tcpm_debug_show(struct seq_file *s, void *v)
753 struct tcpm_port *port = s->private;
756 mutex_lock(&port->logbuffer_lock);
757 tail = port->logbuffer_tail;
758 while (tail != port->logbuffer_head) {
759 seq_printf(s, "%s\n", port->logbuffer[tail]);
760 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
762 if (!seq_has_overflowed(s))
763 port->logbuffer_tail = tail;
764 mutex_unlock(&port->logbuffer_lock);
768 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
770 static void tcpm_debugfs_init(struct tcpm_port *port)
774 mutex_init(&port->logbuffer_lock);
775 snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
776 port->dentry = debugfs_create_dir(name, usb_debug_root);
777 debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
781 static void tcpm_debugfs_exit(struct tcpm_port *port)
785 mutex_lock(&port->logbuffer_lock);
786 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
787 kfree(port->logbuffer[i]);
788 port->logbuffer[i] = NULL;
790 mutex_unlock(&port->logbuffer_lock);
792 debugfs_remove(port->dentry);
798 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
800 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
801 static void tcpm_log_source_caps(struct tcpm_port *port) { }
802 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
803 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
807 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
809 tcpm_log(port, "cc:=%d", cc);
811 port->tcpc->set_cc(port->tcpc, cc);
814 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
818 if (port->tcpc->enable_auto_vbus_discharge) {
819 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
820 tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
823 port->auto_vbus_discharge_enabled = enable;
829 static void tcpm_apply_rc(struct tcpm_port *port)
832 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
833 * when Vbus auto discharge on disconnect is enabled.
835 if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
836 tcpm_log(port, "Apply_RC");
837 port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
838 tcpm_enable_auto_vbus_discharge(port, false);
843 * Determine RP value to set based on maximum current supported
844 * by a port if configured as source.
845 * Returns CC value to report to link partner.
847 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
849 const u32 *src_pdo = port->src_pdo;
850 int nr_pdo = port->nr_src_pdo;
853 if (!port->pd_supported)
857 * Search for first entry with matching voltage.
858 * It should report the maximum supported current.
860 for (i = 0; i < nr_pdo; i++) {
861 const u32 pdo = src_pdo[i];
863 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
864 pdo_fixed_voltage(pdo) == 5000) {
865 unsigned int curr = pdo_max_current(pdo);
868 return TYPEC_CC_RP_3_0;
869 else if (curr >= 1500)
870 return TYPEC_CC_RP_1_5;
871 return TYPEC_CC_RP_DEF;
875 return TYPEC_CC_RP_DEF;
878 static void tcpm_ams_finish(struct tcpm_port *port)
880 tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
882 if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
883 if (port->negotiated_rev >= PD_REV30)
884 tcpm_set_cc(port, SINK_TX_OK);
886 tcpm_set_cc(port, SINK_TX_NG);
887 } else if (port->pwr_role == TYPEC_SOURCE) {
888 tcpm_set_cc(port, tcpm_rp_cc(port));
891 port->in_ams = false;
892 port->ams = NONE_AMS;
895 static int tcpm_pd_transmit(struct tcpm_port *port,
896 enum tcpm_transmit_type type,
897 const struct pd_message *msg)
899 unsigned long timeout;
903 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
905 tcpm_log(port, "PD TX, type: %#x", type);
907 reinit_completion(&port->tx_complete);
908 ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
912 mutex_unlock(&port->lock);
913 timeout = wait_for_completion_timeout(&port->tx_complete,
914 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
915 mutex_lock(&port->lock);
919 switch (port->tx_status) {
920 case TCPC_TX_SUCCESS:
921 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
923 * USB PD rev 2.0, 8.3.2.2.1:
924 * USB PD rev 3.0, 8.3.2.1.3:
925 * "... Note that every AMS is Interruptible until the first
926 * Message in the sequence has been successfully sent (GoodCRC
927 * Message received)."
929 if (port->ams != NONE_AMS)
932 case TCPC_TX_DISCARDED:
941 /* Some AMS don't expect responses. Finish them here. */
942 if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
943 tcpm_ams_finish(port);
948 void tcpm_pd_transmit_complete(struct tcpm_port *port,
949 enum tcpm_transmit_status status)
951 tcpm_log(port, "PD TX complete, status: %u", status);
952 port->tx_status = status;
953 complete(&port->tx_complete);
955 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
957 static int tcpm_mux_set(struct tcpm_port *port, int state,
958 enum usb_role usb_role,
959 enum typec_orientation orientation)
963 tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
964 state, usb_role, orientation);
966 ret = typec_set_orientation(port->typec_port, orientation);
971 ret = usb_role_switch_set_role(port->role_sw, usb_role);
976 return typec_set_mode(port->typec_port, state);
979 static int tcpm_set_polarity(struct tcpm_port *port,
980 enum typec_cc_polarity polarity)
984 tcpm_log(port, "polarity %d", polarity);
986 ret = port->tcpc->set_polarity(port->tcpc, polarity);
990 port->polarity = polarity;
995 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
999 tcpm_log(port, "vconn:=%d", enable);
1001 ret = port->tcpc->set_vconn(port->tcpc, enable);
1003 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1004 typec_set_vconn_role(port->typec_port, port->vconn_role);
1010 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1012 enum typec_cc_status cc;
1015 cc = port->polarity ? port->cc2 : port->cc1;
1017 case TYPEC_CC_RP_1_5:
1020 case TYPEC_CC_RP_3_0:
1023 case TYPEC_CC_RP_DEF:
1025 if (port->tcpc->get_current_limit)
1026 limit = port->tcpc->get_current_limit(port->tcpc);
1035 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1037 int ret = -EOPNOTSUPP;
1039 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1041 port->supply_voltage = mv;
1042 port->current_limit = max_ma;
1043 power_supply_changed(port->psy);
1045 if (port->tcpc->set_current_limit)
1046 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1051 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1053 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1057 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
1058 enum typec_role role, enum typec_data_role data)
1060 enum typec_orientation orientation;
1061 enum usb_role usb_role;
1064 if (port->polarity == TYPEC_POLARITY_CC1)
1065 orientation = TYPEC_ORIENTATION_NORMAL;
1067 orientation = TYPEC_ORIENTATION_REVERSE;
1069 if (port->typec_caps.data == TYPEC_PORT_DRD) {
1070 if (data == TYPEC_HOST)
1071 usb_role = USB_ROLE_HOST;
1073 usb_role = USB_ROLE_DEVICE;
1074 } else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1075 if (data == TYPEC_HOST) {
1076 if (role == TYPEC_SOURCE)
1077 usb_role = USB_ROLE_HOST;
1079 usb_role = USB_ROLE_NONE;
1084 if (data == TYPEC_DEVICE) {
1085 if (role == TYPEC_SINK)
1086 usb_role = USB_ROLE_DEVICE;
1088 usb_role = USB_ROLE_NONE;
1094 ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
1098 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1102 port->pwr_role = role;
1103 port->data_role = data;
1104 typec_set_data_role(port->typec_port, data);
1105 typec_set_pwr_role(port->typec_port, role);
1110 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1114 ret = port->tcpc->set_roles(port->tcpc, true, role,
1119 port->pwr_role = role;
1120 typec_set_pwr_role(port->typec_port, role);
1126 * Transform the PDO to be compliant to PD rev2.0.
1127 * Return 0 if the PDO type is not defined in PD rev2.0.
1128 * Otherwise, return the converted PDO.
1130 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1132 switch (pdo_type(pdo)) {
1133 case PDO_TYPE_FIXED:
1134 if (role == TYPEC_SINK)
1135 return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1137 return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1147 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1149 struct pd_message msg;
1151 unsigned int i, nr_pdo = 0;
1153 memset(&msg, 0, sizeof(msg));
1155 for (i = 0; i < port->nr_src_pdo; i++) {
1156 if (port->negotiated_rev >= PD_REV30) {
1157 msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
1159 pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1161 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1166 /* No source capabilities defined, sink only */
1167 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1170 port->negotiated_rev,
1171 port->message_id, 0);
1173 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1176 port->negotiated_rev,
1181 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1184 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1186 struct pd_message msg;
1188 unsigned int i, nr_pdo = 0;
1190 memset(&msg, 0, sizeof(msg));
1192 for (i = 0; i < port->nr_snk_pdo; i++) {
1193 if (port->negotiated_rev >= PD_REV30) {
1194 msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
1196 pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1198 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1203 /* No sink capabilities defined, source only */
1204 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1207 port->negotiated_rev,
1208 port->message_id, 0);
1210 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1213 port->negotiated_rev,
1218 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1221 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1224 hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1226 hrtimer_cancel(&port->state_machine_timer);
1227 kthread_queue_work(port->wq, &port->state_machine);
1231 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1234 hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1237 hrtimer_cancel(&port->vdm_state_machine_timer);
1238 kthread_queue_work(port->wq, &port->vdm_state_machine);
1242 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1245 hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1247 hrtimer_cancel(&port->enable_frs_timer);
1248 kthread_queue_work(port->wq, &port->enable_frs);
1252 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1255 hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1257 hrtimer_cancel(&port->send_discover_timer);
1258 kthread_queue_work(port->wq, &port->send_discover_work);
1262 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1263 unsigned int delay_ms)
1266 tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1267 tcpm_states[port->state], tcpm_states[state], delay_ms,
1268 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1269 port->delayed_state = state;
1270 mod_tcpm_delayed_work(port, delay_ms);
1271 port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1272 port->delay_ms = delay_ms;
1274 tcpm_log(port, "state change %s -> %s [%s %s]",
1275 tcpm_states[port->state], tcpm_states[state],
1276 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1277 port->delayed_state = INVALID_STATE;
1278 port->prev_state = port->state;
1279 port->state = state;
1281 * Don't re-queue the state machine work item if we're currently
1282 * in the state machine and we're immediately changing states.
1283 * tcpm_state_machine_work() will continue running the state
1286 if (!port->state_machine_running)
1287 mod_tcpm_delayed_work(port, 0);
1291 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1292 unsigned int delay_ms)
1294 if (port->enter_state == port->state)
1295 tcpm_set_state(port, state, delay_ms);
1298 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1299 delay_ms ? "delayed " : "",
1300 tcpm_states[port->state], tcpm_states[state],
1301 delay_ms, tcpm_states[port->enter_state],
1302 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1305 static void tcpm_queue_message(struct tcpm_port *port,
1306 enum pd_msg_request message)
1308 port->queued_message = message;
1309 mod_tcpm_delayed_work(port, 0);
1312 static bool tcpm_vdm_ams(struct tcpm_port *port)
1314 switch (port->ams) {
1315 case DISCOVER_IDENTITY:
1316 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1317 case DISCOVER_SVIDS:
1318 case DISCOVER_MODES:
1319 case DFP_TO_UFP_ENTER_MODE:
1320 case DFP_TO_UFP_EXIT_MODE:
1321 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1322 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1324 case UNSTRUCTURED_VDMS:
1325 case STRUCTURED_VDMS:
1334 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1336 switch (port->ams) {
1337 /* Interruptible AMS */
1340 case FIRMWARE_UPDATE:
1341 case DISCOVER_IDENTITY:
1342 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1343 case DISCOVER_SVIDS:
1344 case DISCOVER_MODES:
1345 case DFP_TO_UFP_ENTER_MODE:
1346 case DFP_TO_UFP_EXIT_MODE:
1347 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1348 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1349 case UNSTRUCTURED_VDMS:
1350 case STRUCTURED_VDMS:
1354 /* Non-Interruptible AMS */
1364 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1368 tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1370 if (!tcpm_ams_interruptible(port) &&
1371 !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1372 port->upcoming_state = INVALID_STATE;
1373 tcpm_log(port, "AMS %s not interruptible, aborting",
1374 tcpm_ams_str[port->ams]);
1378 if (port->pwr_role == TYPEC_SOURCE) {
1379 enum typec_cc_status cc_req = port->cc_req;
1383 if (ams == HARD_RESET) {
1384 tcpm_set_cc(port, tcpm_rp_cc(port));
1385 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1386 tcpm_set_state(port, HARD_RESET_START, 0);
1388 } else if (ams == SOFT_RESET_AMS) {
1389 if (!port->explicit_contract)
1390 tcpm_set_cc(port, tcpm_rp_cc(port));
1391 tcpm_set_state(port, SOFT_RESET_SEND, 0);
1393 } else if (tcpm_vdm_ams(port)) {
1394 /* tSinkTx is enforced in vdm_run_state_machine */
1395 if (port->negotiated_rev >= PD_REV30)
1396 tcpm_set_cc(port, SINK_TX_NG);
1400 if (port->negotiated_rev >= PD_REV30)
1401 tcpm_set_cc(port, SINK_TX_NG);
1403 switch (port->state) {
1406 case SRC_SOFT_RESET_WAIT_SNK_TX:
1408 case SOFT_RESET_SEND:
1409 if (port->negotiated_rev >= PD_REV30)
1410 tcpm_set_state(port, AMS_START,
1411 cc_req == SINK_TX_OK ?
1414 tcpm_set_state(port, AMS_START, 0);
1417 if (port->negotiated_rev >= PD_REV30)
1418 tcpm_set_state(port, SRC_READY,
1419 cc_req == SINK_TX_OK ?
1422 tcpm_set_state(port, SRC_READY, 0);
1426 if (port->negotiated_rev >= PD_REV30 &&
1427 !tcpm_sink_tx_ok(port) &&
1428 ams != SOFT_RESET_AMS &&
1429 ams != HARD_RESET) {
1430 port->upcoming_state = INVALID_STATE;
1431 tcpm_log(port, "Sink TX No Go");
1437 if (ams == HARD_RESET) {
1438 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1439 tcpm_set_state(port, HARD_RESET_START, 0);
1441 } else if (tcpm_vdm_ams(port)) {
1445 if (port->state == SNK_READY ||
1446 port->state == SNK_SOFT_RESET)
1447 tcpm_set_state(port, AMS_START, 0);
1449 tcpm_set_state(port, SNK_READY, 0);
1456 * VDM/VDO handling functions
1458 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1459 const u32 *data, int cnt)
1461 u32 vdo_hdr = port->vdo_data[0];
1463 WARN_ON(!mutex_is_locked(&port->lock));
1465 /* If is sending discover_identity, handle received message first */
1466 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1467 port->send_discover = true;
1468 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1470 /* Make sure we are not still processing a previous VDM packet */
1471 WARN_ON(port->vdm_state > VDM_STATE_DONE);
1474 port->vdo_count = cnt + 1;
1475 port->vdo_data[0] = header;
1476 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1477 /* Set ready, vdm state machine will actually send */
1478 port->vdm_retries = 0;
1479 port->vdm_state = VDM_STATE_READY;
1480 port->vdm_sm_running = true;
1482 mod_vdm_delayed_work(port, 0);
1485 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1486 const u32 *data, int cnt)
1488 mutex_lock(&port->lock);
1489 tcpm_queue_vdm(port, header, data, cnt);
1490 mutex_unlock(&port->lock);
1493 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1495 u32 vdo = p[VDO_INDEX_IDH];
1496 u32 product = p[VDO_INDEX_PRODUCT];
1498 memset(&port->mode_data, 0, sizeof(port->mode_data));
1500 port->partner_ident.id_header = vdo;
1501 port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1502 port->partner_ident.product = product;
1504 typec_partner_set_identity(port->partner);
1506 tcpm_log(port, "Identity: %04x:%04x.%04x",
1508 PD_PRODUCT_PID(product), product & 0xffff);
1511 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
1513 struct pd_mode_data *pmdata = &port->mode_data;
1516 for (i = 1; i < cnt; i++) {
1519 svid = (p[i] >> 16) & 0xffff;
1523 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1526 pmdata->svids[pmdata->nsvids++] = svid;
1527 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1529 svid = p[i] & 0xffff;
1533 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1536 pmdata->svids[pmdata->nsvids++] = svid;
1537 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1541 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1542 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1543 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1544 * SVIDs Command Shall be executed multiple times until a Discover
1545 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1546 * the last part of the last VDO or with a VDO containing two SVIDs
1547 * with values of 0x0000.
1549 * However, some odd dockers support SVIDs less than 12 but without
1550 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1551 * request and return false here.
1555 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1559 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
1561 struct pd_mode_data *pmdata = &port->mode_data;
1562 struct typec_altmode_desc *paltmode;
1565 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1566 /* Already logged in svdm_consume_svids() */
1570 for (i = 1; i < cnt; i++) {
1571 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1572 memset(paltmode, 0, sizeof(*paltmode));
1574 paltmode->svid = pmdata->svids[pmdata->svid_index];
1576 paltmode->vdo = p[i];
1578 tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1579 pmdata->altmodes, paltmode->svid,
1580 paltmode->mode, paltmode->vdo);
1586 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1588 struct pd_mode_data *modep = &port->mode_data;
1589 struct typec_altmode *altmode;
1592 for (i = 0; i < modep->altmodes; i++) {
1593 altmode = typec_partner_register_altmode(port->partner,
1594 &modep->altmode_desc[i]);
1595 if (IS_ERR(altmode)) {
1596 tcpm_log(port, "Failed to register partner SVID 0x%04x",
1597 modep->altmode_desc[i].svid);
1600 port->partner_altmode[i] = altmode;
1604 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1606 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1607 const u32 *p, int cnt, u32 *response,
1608 enum adev_actions *adev_action)
1610 struct typec_port *typec = port->typec_port;
1611 struct typec_altmode *pdev;
1612 struct pd_mode_data *modep;
1619 cmd_type = PD_VDO_CMDT(p[0]);
1620 cmd = PD_VDO_CMD(p[0]);
1622 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1623 p[0], cmd_type, cmd, cnt);
1625 modep = &port->mode_data;
1627 pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
1628 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1630 svdm_version = typec_get_negotiated_svdm_version(typec);
1631 if (svdm_version < 0)
1637 case CMD_DISCOVER_IDENT:
1638 if (PD_VDO_VID(p[0]) != USB_SID_PD)
1641 if (IS_ERR_OR_NULL(port->partner))
1644 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
1645 typec_partner_set_svdm_version(port->partner,
1646 PD_VDO_SVDM_VER(p[0]));
1647 svdm_version = PD_VDO_SVDM_VER(p[0]);
1650 port->ams = DISCOVER_IDENTITY;
1652 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
1653 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
1654 * "wrong configuation" or "Unrecognized"
1656 if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
1658 if (svdm_version < SVDM_VER_2_0) {
1659 for (i = 0; i < port->nr_snk_vdo_v1; i++)
1660 response[i + 1] = port->snk_vdo_v1[i];
1661 rlen = port->nr_snk_vdo_v1 + 1;
1664 for (i = 0; i < port->nr_snk_vdo; i++)
1665 response[i + 1] = port->snk_vdo[i];
1666 rlen = port->nr_snk_vdo + 1;
1670 case CMD_DISCOVER_SVID:
1671 port->ams = DISCOVER_SVIDS;
1673 case CMD_DISCOVER_MODES:
1674 port->ams = DISCOVER_MODES;
1676 case CMD_ENTER_MODE:
1677 port->ams = DFP_TO_UFP_ENTER_MODE;
1680 port->ams = DFP_TO_UFP_EXIT_MODE;
1683 /* Attention command does not have response */
1684 *adev_action = ADEV_ATTENTION;
1690 response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
1691 } else if (rlen == 0) {
1692 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1695 response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
1698 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1699 (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
1702 /* silently drop message if we are not connected */
1703 if (IS_ERR_OR_NULL(port->partner))
1706 tcpm_ams_finish(port);
1709 case CMD_DISCOVER_IDENT:
1710 if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
1711 typec_partner_set_svdm_version(port->partner,
1712 PD_VDO_SVDM_VER(p[0]));
1714 svdm_consume_identity(port, p, cnt);
1715 response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
1719 case CMD_DISCOVER_SVID:
1721 if (svdm_consume_svids(port, p, cnt)) {
1722 response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
1724 } else if (modep->nsvids && supports_modal(port)) {
1725 response[0] = VDO(modep->svids[0], 1, svdm_version,
1726 CMD_DISCOVER_MODES);
1730 case CMD_DISCOVER_MODES:
1732 svdm_consume_modes(port, p, cnt);
1733 modep->svid_index++;
1734 if (modep->svid_index < modep->nsvids) {
1735 u16 svid = modep->svids[modep->svid_index];
1736 response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
1739 tcpm_register_partner_altmodes(port);
1742 case CMD_ENTER_MODE:
1744 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
1748 /* Back to USB Operation */
1749 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1753 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1756 /* Unrecognized SVDM */
1757 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1759 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1760 (VDO_SVDM_VERS(svdm_version));
1765 tcpm_ams_finish(port);
1767 case CMD_DISCOVER_IDENT:
1768 case CMD_DISCOVER_SVID:
1769 case CMD_DISCOVER_MODES:
1770 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1772 case CMD_ENTER_MODE:
1773 /* Back to USB Operation */
1774 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1777 /* Unrecognized SVDM */
1778 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1780 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1781 (VDO_SVDM_VERS(svdm_version));
1786 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1788 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1789 (VDO_SVDM_VERS(svdm_version));
1793 /* Informing the alternate mode drivers about everything */
1794 *adev_action = ADEV_QUEUE_VDM;
1798 static void tcpm_pd_handle_msg(struct tcpm_port *port,
1799 enum pd_msg_request message,
1802 static void tcpm_handle_vdm_request(struct tcpm_port *port,
1803 const __le32 *payload, int cnt)
1805 enum adev_actions adev_action = ADEV_NONE;
1806 struct typec_altmode *adev;
1807 u32 p[PD_MAX_PAYLOAD];
1808 u32 response[8] = { };
1811 for (i = 0; i < cnt; i++)
1812 p[i] = le32_to_cpu(payload[i]);
1814 adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
1815 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1817 if (port->vdm_state == VDM_STATE_BUSY) {
1818 /* If UFP responded busy retry after timeout */
1819 if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
1820 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1821 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
1823 mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
1826 port->vdm_state = VDM_STATE_DONE;
1829 if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
1831 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
1832 * advance because we are dropping the lock but may send VDMs soon.
1833 * For the cases of INIT received:
1834 * - If no response to send, it will be cleared later in this function.
1835 * - If there are responses to send, it will be cleared in the state machine.
1836 * For the cases of RSP received:
1837 * - If no further INIT to send, it will be cleared later in this function.
1838 * - Otherwise, it will be cleared in the state machine if timeout or it will go
1839 * back here until no further INIT to send.
1840 * For the cases of unknown type received:
1841 * - We will send NAK and the flag will be cleared in the state machine.
1843 port->vdm_sm_running = true;
1844 rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
1846 if (port->negotiated_rev >= PD_REV30)
1847 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
1851 * We are done with any state stored in the port struct now, except
1852 * for any port struct changes done by the tcpm_queue_vdm() call
1853 * below, which is a separate operation.
1855 * So we can safely release the lock here; and we MUST release the
1856 * lock here to avoid an AB BA lock inversion:
1858 * If we keep the lock here then the lock ordering in this path is:
1859 * 1. tcpm_pd_rx_handler take the tcpm port lock
1860 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
1862 * And we also have this ordering:
1863 * 1. alt-mode driver takes the alt-mode's lock
1864 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
1867 * Dropping our lock here avoids this.
1869 mutex_unlock(&port->lock);
1872 switch (adev_action) {
1875 case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
1876 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
1877 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1879 case ADEV_QUEUE_VDM:
1880 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1882 case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
1883 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1884 int svdm_version = typec_get_negotiated_svdm_version(
1886 if (svdm_version < 0)
1889 response[0] = VDO(adev->svid, 1, svdm_version,
1891 response[0] |= VDO_OPOS(adev->mode);
1895 case ADEV_ATTENTION:
1896 if (typec_altmode_attention(adev, p[1]))
1897 tcpm_log(port, "typec_altmode_attention no port partner altmode");
1903 * We must re-take the lock here to balance the unlock in
1904 * tcpm_pd_rx_handler, note that no changes, other then the
1905 * tcpm_queue_vdm call, are made while the lock is held again.
1906 * All that is done after the call is unwinding the call stack until
1907 * we return to tcpm_pd_rx_handler and do the unlock there.
1909 mutex_lock(&port->lock);
1912 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1914 port->vdm_sm_running = false;
1917 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1918 const u32 *data, int count)
1920 int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
1923 if (svdm_version < 0)
1926 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1927 count = VDO_MAX_SIZE - 1;
1929 /* set VDM header with VID & CMD */
1930 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1931 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
1933 tcpm_queue_vdm(port, header, data, count);
1936 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1938 unsigned int timeout;
1939 int cmd = PD_VDO_CMD(vdm_hdr);
1941 /* its not a structured VDM command */
1942 if (!PD_VDO_SVDM(vdm_hdr))
1943 return PD_T_VDM_UNSTRUCTURED;
1945 switch (PD_VDO_CMDT(vdm_hdr)) {
1947 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1948 timeout = PD_T_VDM_WAIT_MODE_E;
1950 timeout = PD_T_VDM_SNDR_RSP;
1953 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1954 timeout = PD_T_VDM_E_MODE;
1956 timeout = PD_T_VDM_RCVR_RSP;
1962 static void vdm_run_state_machine(struct tcpm_port *port)
1964 struct pd_message msg;
1966 u32 vdo_hdr = port->vdo_data[0];
1968 switch (port->vdm_state) {
1969 case VDM_STATE_READY:
1970 /* Only transmit VDM if attached */
1971 if (!port->attached) {
1972 port->vdm_state = VDM_STATE_ERR_BUSY;
1977 * if there's traffic or we're not in PDO ready state don't send
1980 if (port->state != SRC_READY && port->state != SNK_READY) {
1981 port->vdm_sm_running = false;
1985 /* TODO: AMS operation for Unstructured VDM */
1986 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
1987 switch (PD_VDO_CMD(vdo_hdr)) {
1988 case CMD_DISCOVER_IDENT:
1989 res = tcpm_ams_start(port, DISCOVER_IDENTITY);
1991 port->send_discover = false;
1992 } else if (res == -EAGAIN) {
1993 port->vdo_data[0] = 0;
1994 mod_send_discover_delayed_work(port,
1995 SEND_DISCOVER_RETRY_MS);
1998 case CMD_DISCOVER_SVID:
1999 res = tcpm_ams_start(port, DISCOVER_SVIDS);
2001 case CMD_DISCOVER_MODES:
2002 res = tcpm_ams_start(port, DISCOVER_MODES);
2004 case CMD_ENTER_MODE:
2005 res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2008 res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2011 res = tcpm_ams_start(port, ATTENTION);
2013 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2014 res = tcpm_ams_start(port, STRUCTURED_VDMS);
2022 port->vdm_state = VDM_STATE_ERR_BUSY;
2027 port->vdm_state = VDM_STATE_SEND_MESSAGE;
2028 mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2029 port->pwr_role == TYPEC_SOURCE &&
2030 PD_VDO_SVDM(vdo_hdr) &&
2031 PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2034 case VDM_STATE_WAIT_RSP_BUSY:
2035 port->vdo_data[0] = port->vdo_retry;
2036 port->vdo_count = 1;
2037 port->vdm_state = VDM_STATE_READY;
2038 tcpm_ams_finish(port);
2040 case VDM_STATE_BUSY:
2041 port->vdm_state = VDM_STATE_ERR_TMOUT;
2042 if (port->ams != NONE_AMS)
2043 tcpm_ams_finish(port);
2045 case VDM_STATE_ERR_SEND:
2047 * A partner which does not support USB PD will not reply,
2048 * so this is not a fatal error. At the same time, some
2049 * devices may not return GoodCRC under some circumstances,
2050 * so we need to retry.
2052 if (port->vdm_retries < 3) {
2053 tcpm_log(port, "VDM Tx error, retry");
2054 port->vdm_retries++;
2055 port->vdm_state = VDM_STATE_READY;
2056 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2057 tcpm_ams_finish(port);
2059 tcpm_ams_finish(port);
2062 case VDM_STATE_SEND_MESSAGE:
2063 /* Prepare and send VDM */
2064 memset(&msg, 0, sizeof(msg));
2065 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2068 port->negotiated_rev,
2069 port->message_id, port->vdo_count);
2070 for (i = 0; i < port->vdo_count; i++)
2071 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2072 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
2074 port->vdm_state = VDM_STATE_ERR_SEND;
2076 unsigned long timeout;
2078 port->vdm_retries = 0;
2079 port->vdo_data[0] = 0;
2080 port->vdm_state = VDM_STATE_BUSY;
2081 timeout = vdm_ready_timeout(vdo_hdr);
2082 mod_vdm_delayed_work(port, timeout);
2090 static void vdm_state_machine_work(struct kthread_work *work)
2092 struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2093 enum vdm_states prev_state;
2095 mutex_lock(&port->lock);
2098 * Continue running as long as the port is not busy and there was
2102 prev_state = port->vdm_state;
2103 vdm_run_state_machine(port);
2104 } while (port->vdm_state != prev_state &&
2105 port->vdm_state != VDM_STATE_BUSY &&
2106 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2108 if (port->vdm_state < VDM_STATE_READY)
2109 port->vdm_sm_running = false;
2111 mutex_unlock(&port->lock);
2117 PDO_ERR_VSAFE5V_NOT_FIRST,
2118 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2119 PDO_ERR_FIXED_NOT_SORTED,
2120 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2122 PDO_ERR_PPS_APDO_NOT_SORTED,
2123 PDO_ERR_DUPE_PPS_APDO,
2126 static const char * const pdo_err_msg[] = {
2127 [PDO_ERR_NO_VSAFE5V] =
2128 " err: source/sink caps should at least have vSafe5V",
2129 [PDO_ERR_VSAFE5V_NOT_FIRST] =
2130 " err: vSafe5V Fixed Supply Object Shall always be the first object",
2131 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2132 " err: PDOs should be in the following order: Fixed; Battery; Variable",
2133 [PDO_ERR_FIXED_NOT_SORTED] =
2134 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
2135 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2136 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2137 [PDO_ERR_DUPE_PDO] =
2138 " err: Variable/Batt supply pdos cannot have same min/max voltage",
2139 [PDO_ERR_PPS_APDO_NOT_SORTED] =
2140 " err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2141 [PDO_ERR_DUPE_PPS_APDO] =
2142 " err: Programmable power supply apdos cannot have same min/max voltage and max current",
2145 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2146 unsigned int nr_pdo)
2150 /* Should at least contain vSafe5v */
2152 return PDO_ERR_NO_VSAFE5V;
2154 /* The vSafe5V Fixed Supply Object Shall always be the first object */
2155 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2156 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2157 return PDO_ERR_VSAFE5V_NOT_FIRST;
2159 for (i = 1; i < nr_pdo; i++) {
2160 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2161 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2162 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2163 enum pd_pdo_type type = pdo_type(pdo[i]);
2167 * The remaining Fixed Supply Objects, if
2168 * present, shall be sent in voltage order;
2169 * lowest to highest.
2171 case PDO_TYPE_FIXED:
2172 if (pdo_fixed_voltage(pdo[i]) <=
2173 pdo_fixed_voltage(pdo[i - 1]))
2174 return PDO_ERR_FIXED_NOT_SORTED;
2177 * The Battery Supply Objects and Variable
2178 * supply, if present shall be sent in Minimum
2179 * Voltage order; lowest to highest.
2183 if (pdo_min_voltage(pdo[i]) <
2184 pdo_min_voltage(pdo[i - 1]))
2185 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2186 else if ((pdo_min_voltage(pdo[i]) ==
2187 pdo_min_voltage(pdo[i - 1])) &&
2188 (pdo_max_voltage(pdo[i]) ==
2189 pdo_max_voltage(pdo[i - 1])))
2190 return PDO_ERR_DUPE_PDO;
2193 * The Programmable Power Supply APDOs, if present,
2194 * shall be sent in Maximum Voltage order;
2195 * lowest to highest.
2198 if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2201 if (pdo_pps_apdo_max_voltage(pdo[i]) <
2202 pdo_pps_apdo_max_voltage(pdo[i - 1]))
2203 return PDO_ERR_PPS_APDO_NOT_SORTED;
2204 else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2205 pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2206 pdo_pps_apdo_max_voltage(pdo[i]) ==
2207 pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2208 pdo_pps_apdo_max_current(pdo[i]) ==
2209 pdo_pps_apdo_max_current(pdo[i - 1]))
2210 return PDO_ERR_DUPE_PPS_APDO;
2213 tcpm_log_force(port, " Unknown pdo type");
2221 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2222 unsigned int nr_pdo)
2224 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2226 if (err_index != PDO_NO_ERR) {
2227 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2234 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2236 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2240 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2241 if (svdm_version < 0)
2242 return svdm_version;
2244 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2245 header |= VDO_OPOS(altmode->mode);
2247 tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
2251 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2253 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2257 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2258 if (svdm_version < 0)
2259 return svdm_version;
2261 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2262 header |= VDO_OPOS(altmode->mode);
2264 tcpm_queue_vdm_unlocked(port, header, NULL, 0);
2268 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2269 u32 header, const u32 *data, int count)
2271 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2273 tcpm_queue_vdm_unlocked(port, header, data, count - 1);
2278 static const struct typec_altmode_ops tcpm_altmode_ops = {
2279 .enter = tcpm_altmode_enter,
2280 .exit = tcpm_altmode_exit,
2281 .vdm = tcpm_altmode_vdm,
2285 * PD (data, control) command handling functions
2287 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2289 if (port->pwr_role == TYPEC_SOURCE)
2295 static int tcpm_pd_send_control(struct tcpm_port *port,
2296 enum pd_ctrl_msg_type type);
2298 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2301 u32 p0 = le32_to_cpu(payload[0]);
2302 unsigned int type = usb_pd_ado_type(p0);
2305 tcpm_log(port, "Alert message received with no type");
2306 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2310 /* Just handling non-battery alerts for now */
2311 if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
2312 if (port->pwr_role == TYPEC_SOURCE) {
2313 port->upcoming_state = GET_STATUS_SEND;
2314 tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
2317 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
2320 port->ams = GETTING_SOURCE_SINK_STATUS;
2321 tcpm_set_state(port, GET_STATUS_SEND, 0);
2324 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2328 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
2329 enum typec_pwr_opmode mode, bool pps_active,
2330 u32 requested_vbus_voltage)
2334 if (!port->tcpc->set_auto_vbus_discharge_threshold)
2337 ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
2338 requested_vbus_voltage);
2339 tcpm_log_force(port,
2340 "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
2341 mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
2346 static void tcpm_pd_handle_state(struct tcpm_port *port,
2347 enum tcpm_state state,
2349 unsigned int delay_ms)
2351 switch (port->state) {
2355 tcpm_set_state(port, state, delay_ms);
2357 /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
2358 case SNK_TRANSITION_SINK:
2359 case SNK_TRANSITION_SINK_VBUS:
2360 case SRC_TRANSITION_SUPPLY:
2361 tcpm_set_state(port, HARD_RESET_SEND, 0);
2364 if (!tcpm_ams_interruptible(port)) {
2365 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2366 SRC_SOFT_RESET_WAIT_SNK_TX :
2370 /* process the Message 6.8.1 */
2371 port->upcoming_state = state;
2372 port->next_ams = ams;
2373 tcpm_set_state(port, ready_state(port), delay_ms);
2379 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2380 enum pd_msg_request message,
2383 switch (port->state) {
2387 tcpm_queue_message(port, message);
2389 /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
2390 case SNK_TRANSITION_SINK:
2391 case SNK_TRANSITION_SINK_VBUS:
2392 case SRC_TRANSITION_SUPPLY:
2393 tcpm_set_state(port, HARD_RESET_SEND, 0);
2396 if (!tcpm_ams_interruptible(port)) {
2397 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2398 SRC_SOFT_RESET_WAIT_SNK_TX :
2402 port->next_ams = ams;
2403 tcpm_set_state(port, ready_state(port), 0);
2404 /* 6.8.1 process the Message */
2405 tcpm_queue_message(port, message);
2411 static int tcpm_register_source_caps(struct tcpm_port *port)
2413 struct usb_power_delivery_desc desc = { port->negotiated_rev };
2414 struct usb_power_delivery_capabilities_desc caps = { };
2415 struct usb_power_delivery_capabilities *cap;
2417 if (!port->partner_pd)
2418 port->partner_pd = usb_power_delivery_register(NULL, &desc);
2419 if (IS_ERR(port->partner_pd))
2420 return PTR_ERR(port->partner_pd);
2422 memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
2423 caps.role = TYPEC_SOURCE;
2425 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
2427 return PTR_ERR(cap);
2429 port->partner_source_caps = cap;
2434 static int tcpm_register_sink_caps(struct tcpm_port *port)
2436 struct usb_power_delivery_desc desc = { port->negotiated_rev };
2437 struct usb_power_delivery_capabilities_desc caps = { };
2438 struct usb_power_delivery_capabilities *cap;
2440 if (!port->partner_pd)
2441 port->partner_pd = usb_power_delivery_register(NULL, &desc);
2442 if (IS_ERR(port->partner_pd))
2443 return PTR_ERR(port->partner_pd);
2445 memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
2446 caps.role = TYPEC_SINK;
2448 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
2450 return PTR_ERR(cap);
2452 port->partner_sink_caps = cap;
2457 static void tcpm_pd_data_request(struct tcpm_port *port,
2458 const struct pd_message *msg)
2460 enum pd_data_msg_type type = pd_header_type_le(msg->header);
2461 unsigned int cnt = pd_header_cnt_le(msg->header);
2462 unsigned int rev = pd_header_rev_le(msg->header);
2464 enum frs_typec_current partner_frs_current;
2468 if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
2469 port->vdm_state = VDM_STATE_ERR_BUSY;
2470 tcpm_ams_finish(port);
2471 mod_vdm_delayed_work(port, 0);
2475 case PD_DATA_SOURCE_CAP:
2476 for (i = 0; i < cnt; i++)
2477 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
2479 port->nr_source_caps = cnt;
2481 tcpm_log_source_caps(port);
2483 tcpm_validate_caps(port, port->source_caps,
2484 port->nr_source_caps);
2486 tcpm_register_source_caps(port);
2489 * Adjust revision in subsequent message headers, as required,
2490 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2491 * support Rev 1.0 so just do nothing in that scenario.
2493 if (rev == PD_REV10) {
2494 if (port->ams == GET_SOURCE_CAPABILITIES)
2495 tcpm_ams_finish(port);
2499 if (rev < PD_MAX_REV)
2500 port->negotiated_rev = rev;
2502 if (port->pwr_role == TYPEC_SOURCE) {
2503 if (port->ams == GET_SOURCE_CAPABILITIES)
2504 tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
2505 /* Unexpected Source Capabilities */
2507 tcpm_pd_handle_msg(port,
2508 port->negotiated_rev < PD_REV30 ?
2509 PD_MSG_CTRL_REJECT :
2510 PD_MSG_CTRL_NOT_SUPP,
2512 } else if (port->state == SNK_WAIT_CAPABILITIES) {
2514 * This message may be received even if VBUS is not
2515 * present. This is quite unexpected; see USB PD
2516 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
2517 * However, at the same time, we must be ready to
2518 * receive this message and respond to it 15ms after
2519 * receiving PS_RDY during power swap operations, no matter
2520 * if VBUS is available or not (USB PD specification,
2522 * So we need to accept the message either way,
2523 * but be prepared to keep waiting for VBUS after it was
2526 port->ams = POWER_NEGOTIATION;
2527 port->in_ams = true;
2528 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
2530 if (port->ams == GET_SOURCE_CAPABILITIES)
2531 tcpm_ams_finish(port);
2532 tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
2533 POWER_NEGOTIATION, 0);
2536 case PD_DATA_REQUEST:
2538 * Adjust revision in subsequent message headers, as required,
2539 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2540 * support Rev 1.0 so just reject in that scenario.
2542 if (rev == PD_REV10) {
2543 tcpm_pd_handle_msg(port,
2544 port->negotiated_rev < PD_REV30 ?
2545 PD_MSG_CTRL_REJECT :
2546 PD_MSG_CTRL_NOT_SUPP,
2551 if (rev < PD_MAX_REV)
2552 port->negotiated_rev = rev;
2554 if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
2555 tcpm_pd_handle_msg(port,
2556 port->negotiated_rev < PD_REV30 ?
2557 PD_MSG_CTRL_REJECT :
2558 PD_MSG_CTRL_NOT_SUPP,
2563 port->sink_request = le32_to_cpu(msg->payload[0]);
2565 if (port->vdm_sm_running && port->explicit_contract) {
2566 tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
2570 if (port->state == SRC_SEND_CAPABILITIES)
2571 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
2573 tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
2574 POWER_NEGOTIATION, 0);
2576 case PD_DATA_SINK_CAP:
2577 /* We don't do anything with this at the moment... */
2578 for (i = 0; i < cnt; i++)
2579 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
2581 partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
2582 PDO_FIXED_FRS_CURR_SHIFT;
2583 frs_enable = partner_frs_current && (partner_frs_current <=
2584 port->new_source_frs_current);
2586 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
2587 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
2589 ret = port->tcpc->enable_frs(port->tcpc, true);
2590 tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
2593 port->nr_sink_caps = cnt;
2594 port->sink_cap_done = true;
2595 tcpm_register_sink_caps(port);
2597 if (port->ams == GET_SINK_CAPABILITIES)
2598 tcpm_set_state(port, ready_state(port), 0);
2599 /* Unexpected Sink Capabilities */
2601 tcpm_pd_handle_msg(port,
2602 port->negotiated_rev < PD_REV30 ?
2603 PD_MSG_CTRL_REJECT :
2604 PD_MSG_CTRL_NOT_SUPP,
2607 case PD_DATA_VENDOR_DEF:
2608 tcpm_handle_vdm_request(port, msg->payload, cnt);
2611 port->bist_request = le32_to_cpu(msg->payload[0]);
2612 tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
2615 if (port->state != SRC_READY && port->state != SNK_READY)
2616 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2617 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2620 tcpm_handle_alert(port, msg->payload, cnt);
2622 case PD_DATA_BATT_STATUS:
2623 case PD_DATA_GET_COUNTRY_INFO:
2624 /* Currently unsupported */
2625 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2626 PD_MSG_CTRL_REJECT :
2627 PD_MSG_CTRL_NOT_SUPP,
2631 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2632 PD_MSG_CTRL_REJECT :
2633 PD_MSG_CTRL_NOT_SUPP,
2635 tcpm_log(port, "Unrecognized data message type %#x", type);
2640 static void tcpm_pps_complete(struct tcpm_port *port, int result)
2642 if (port->pps_pending) {
2643 port->pps_status = result;
2644 port->pps_pending = false;
2645 complete(&port->pps_complete);
2649 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
2650 const struct pd_message *msg)
2652 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2653 enum tcpm_state next_state;
2656 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
2657 * VDM AMS if waiting for VDM responses and will be handled later.
2659 if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
2660 port->vdm_state = VDM_STATE_ERR_BUSY;
2661 tcpm_ams_finish(port);
2662 mod_vdm_delayed_work(port, 0);
2666 case PD_CTRL_GOOD_CRC:
2669 case PD_CTRL_GET_SOURCE_CAP:
2670 tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
2672 case PD_CTRL_GET_SINK_CAP:
2673 tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
2675 case PD_CTRL_GOTO_MIN:
2677 case PD_CTRL_PS_RDY:
2678 switch (port->state) {
2679 case SNK_TRANSITION_SINK:
2680 if (port->vbus_present) {
2681 tcpm_set_current_limit(port,
2682 port->req_current_limit,
2683 port->req_supply_voltage);
2684 port->explicit_contract = true;
2685 tcpm_set_auto_vbus_discharge_threshold(port,
2687 port->pps_data.active,
2688 port->supply_voltage);
2689 tcpm_set_state(port, SNK_READY, 0);
2692 * Seen after power swap. Keep waiting for VBUS
2693 * in a transitional state.
2695 tcpm_set_state(port,
2696 SNK_TRANSITION_SINK_VBUS, 0);
2699 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
2700 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
2702 case PR_SWAP_SNK_SRC_SINK_OFF:
2703 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
2705 case VCONN_SWAP_WAIT_FOR_VCONN:
2706 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
2708 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
2709 tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
2712 tcpm_pd_handle_state(port,
2713 port->pwr_role == TYPEC_SOURCE ?
2714 SRC_SOFT_RESET_WAIT_SNK_TX :
2720 case PD_CTRL_REJECT:
2722 case PD_CTRL_NOT_SUPP:
2723 switch (port->state) {
2724 case SNK_NEGOTIATE_CAPABILITIES:
2725 /* USB PD specification, Figure 8-43 */
2726 if (port->explicit_contract)
2727 next_state = SNK_READY;
2729 next_state = SNK_WAIT_CAPABILITIES;
2731 /* Threshold was relaxed before sending Request. Restore it back. */
2732 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2733 port->pps_data.active,
2734 port->supply_voltage);
2735 tcpm_set_state(port, next_state, 0);
2737 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2738 /* Revert data back from any requested PPS updates */
2739 port->pps_data.req_out_volt = port->supply_voltage;
2740 port->pps_data.req_op_curr = port->current_limit;
2741 port->pps_status = (type == PD_CTRL_WAIT ?
2742 -EAGAIN : -EOPNOTSUPP);
2744 /* Threshold was relaxed before sending Request. Restore it back. */
2745 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2746 port->pps_data.active,
2747 port->supply_voltage);
2749 tcpm_set_state(port, SNK_READY, 0);
2752 port->swap_status = (type == PD_CTRL_WAIT ?
2753 -EAGAIN : -EOPNOTSUPP);
2754 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
2757 port->swap_status = (type == PD_CTRL_WAIT ?
2758 -EAGAIN : -EOPNOTSUPP);
2759 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
2761 case VCONN_SWAP_SEND:
2762 port->swap_status = (type == PD_CTRL_WAIT ?
2763 -EAGAIN : -EOPNOTSUPP);
2764 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
2767 tcpm_set_state(port, FR_SWAP_CANCEL, 0);
2770 port->sink_cap_done = true;
2771 tcpm_set_state(port, ready_state(port), 0);
2774 * Some port partners do not support GET_STATUS, avoid soft reset the link to
2775 * prevent redundant power re-negotiation
2777 case GET_STATUS_SEND:
2778 tcpm_set_state(port, ready_state(port), 0);
2782 if (port->vdm_state > VDM_STATE_READY) {
2783 port->vdm_state = VDM_STATE_DONE;
2784 if (tcpm_vdm_ams(port))
2785 tcpm_ams_finish(port);
2786 mod_vdm_delayed_work(port, 0);
2791 tcpm_pd_handle_state(port,
2792 port->pwr_role == TYPEC_SOURCE ?
2793 SRC_SOFT_RESET_WAIT_SNK_TX :
2799 case PD_CTRL_ACCEPT:
2800 switch (port->state) {
2801 case SNK_NEGOTIATE_CAPABILITIES:
2802 port->pps_data.active = false;
2803 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2805 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2806 port->pps_data.active = true;
2807 port->pps_data.min_volt = port->pps_data.req_min_volt;
2808 port->pps_data.max_volt = port->pps_data.req_max_volt;
2809 port->pps_data.max_curr = port->pps_data.req_max_curr;
2810 port->req_supply_voltage = port->pps_data.req_out_volt;
2811 port->req_current_limit = port->pps_data.req_op_curr;
2812 power_supply_changed(port->psy);
2813 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2815 case SOFT_RESET_SEND:
2816 if (port->ams == SOFT_RESET_AMS)
2817 tcpm_ams_finish(port);
2818 if (port->pwr_role == TYPEC_SOURCE) {
2819 port->upcoming_state = SRC_SEND_CAPABILITIES;
2820 tcpm_ams_start(port, POWER_NEGOTIATION);
2822 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2826 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
2829 tcpm_set_state(port, PR_SWAP_START, 0);
2831 case VCONN_SWAP_SEND:
2832 tcpm_set_state(port, VCONN_SWAP_START, 0);
2835 tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
2838 tcpm_pd_handle_state(port,
2839 port->pwr_role == TYPEC_SOURCE ?
2840 SRC_SOFT_RESET_WAIT_SNK_TX :
2846 case PD_CTRL_SOFT_RESET:
2847 port->ams = SOFT_RESET_AMS;
2848 tcpm_set_state(port, SOFT_RESET, 0);
2850 case PD_CTRL_DR_SWAP:
2853 * 6.3.9: If an alternate mode is active, a request to swap
2854 * alternate modes shall trigger a port reset.
2856 if (port->typec_caps.data != TYPEC_PORT_DRD) {
2857 tcpm_pd_handle_msg(port,
2858 port->negotiated_rev < PD_REV30 ?
2859 PD_MSG_CTRL_REJECT :
2860 PD_MSG_CTRL_NOT_SUPP,
2863 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2864 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2868 tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
2871 case PD_CTRL_PR_SWAP:
2872 if (port->port_type != TYPEC_PORT_DRP) {
2873 tcpm_pd_handle_msg(port,
2874 port->negotiated_rev < PD_REV30 ?
2875 PD_MSG_CTRL_REJECT :
2876 PD_MSG_CTRL_NOT_SUPP,
2879 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2880 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2884 tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
2887 case PD_CTRL_VCONN_SWAP:
2888 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2889 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2893 tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
2895 case PD_CTRL_GET_SOURCE_CAP_EXT:
2896 case PD_CTRL_GET_STATUS:
2897 case PD_CTRL_FR_SWAP:
2898 case PD_CTRL_GET_PPS_STATUS:
2899 case PD_CTRL_GET_COUNTRY_CODES:
2900 /* Currently not supported */
2901 tcpm_pd_handle_msg(port,
2902 port->negotiated_rev < PD_REV30 ?
2903 PD_MSG_CTRL_REJECT :
2904 PD_MSG_CTRL_NOT_SUPP,
2908 tcpm_pd_handle_msg(port,
2909 port->negotiated_rev < PD_REV30 ?
2910 PD_MSG_CTRL_REJECT :
2911 PD_MSG_CTRL_NOT_SUPP,
2913 tcpm_log(port, "Unrecognized ctrl message type %#x", type);
2918 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
2919 const struct pd_message *msg)
2921 enum pd_ext_msg_type type = pd_header_type_le(msg->header);
2922 unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
2924 /* stopping VDM state machine if interrupted by other Messages */
2925 if (tcpm_vdm_ams(port)) {
2926 port->vdm_state = VDM_STATE_ERR_BUSY;
2927 tcpm_ams_finish(port);
2928 mod_vdm_delayed_work(port, 0);
2931 if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
2932 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2933 tcpm_log(port, "Unchunked extended messages unsupported");
2937 if (data_size > PD_EXT_MAX_CHUNK_DATA) {
2938 tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
2939 tcpm_log(port, "Chunk handling not yet supported");
2945 case PD_EXT_PPS_STATUS:
2946 if (port->ams == GETTING_SOURCE_SINK_STATUS) {
2947 tcpm_ams_finish(port);
2948 tcpm_set_state(port, ready_state(port), 0);
2950 /* unexpected Status or PPS_Status Message */
2951 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2952 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2956 case PD_EXT_SOURCE_CAP_EXT:
2957 case PD_EXT_GET_BATT_CAP:
2958 case PD_EXT_GET_BATT_STATUS:
2959 case PD_EXT_BATT_CAP:
2960 case PD_EXT_GET_MANUFACTURER_INFO:
2961 case PD_EXT_MANUFACTURER_INFO:
2962 case PD_EXT_SECURITY_REQUEST:
2963 case PD_EXT_SECURITY_RESPONSE:
2964 case PD_EXT_FW_UPDATE_REQUEST:
2965 case PD_EXT_FW_UPDATE_RESPONSE:
2966 case PD_EXT_COUNTRY_INFO:
2967 case PD_EXT_COUNTRY_CODES:
2968 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2971 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2972 tcpm_log(port, "Unrecognized extended message type %#x", type);
2977 static void tcpm_pd_rx_handler(struct kthread_work *work)
2979 struct pd_rx_event *event = container_of(work,
2980 struct pd_rx_event, work);
2981 const struct pd_message *msg = &event->msg;
2982 unsigned int cnt = pd_header_cnt_le(msg->header);
2983 struct tcpm_port *port = event->port;
2985 mutex_lock(&port->lock);
2987 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
2990 if (port->attached) {
2991 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2992 unsigned int msgid = pd_header_msgid_le(msg->header);
2995 * USB PD standard, 6.6.1.2:
2996 * "... if MessageID value in a received Message is the
2997 * same as the stored value, the receiver shall return a
2998 * GoodCRC Message with that MessageID value and drop
2999 * the Message (this is a retry of an already received
3000 * Message). Note: this shall not apply to the Soft_Reset
3001 * Message which always has a MessageID value of zero."
3003 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3005 port->rx_msgid = msgid;
3008 * If both ends believe to be DFP/host, we have a data role
3011 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3012 (port->data_role == TYPEC_HOST)) {
3014 "Data role mismatch, initiating error recovery");
3015 tcpm_set_state(port, ERROR_RECOVERY, 0);
3017 if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3018 tcpm_pd_ext_msg_request(port, msg);
3020 tcpm_pd_data_request(port, msg);
3022 tcpm_pd_ctrl_request(port, msg);
3027 mutex_unlock(&port->lock);
3031 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
3033 struct pd_rx_event *event;
3035 event = kzalloc(sizeof(*event), GFP_ATOMIC);
3039 kthread_init_work(&event->work, tcpm_pd_rx_handler);
3041 memcpy(&event->msg, msg, sizeof(*msg));
3042 kthread_queue_work(port->wq, &event->work);
3044 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3046 static int tcpm_pd_send_control(struct tcpm_port *port,
3047 enum pd_ctrl_msg_type type)
3049 struct pd_message msg;
3051 memset(&msg, 0, sizeof(msg));
3052 msg.header = PD_HEADER_LE(type, port->pwr_role,
3054 port->negotiated_rev,
3055 port->message_id, 0);
3057 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3061 * Send queued message without affecting state.
3062 * Return true if state machine should go back to sleep,
3065 static bool tcpm_send_queued_message(struct tcpm_port *port)
3067 enum pd_msg_request queued_message;
3071 queued_message = port->queued_message;
3072 port->queued_message = PD_MSG_NONE;
3074 switch (queued_message) {
3075 case PD_MSG_CTRL_WAIT:
3076 tcpm_pd_send_control(port, PD_CTRL_WAIT);
3078 case PD_MSG_CTRL_REJECT:
3079 tcpm_pd_send_control(port, PD_CTRL_REJECT);
3081 case PD_MSG_CTRL_NOT_SUPP:
3082 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
3084 case PD_MSG_DATA_SINK_CAP:
3085 ret = tcpm_pd_send_sink_caps(port);
3087 tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3088 tcpm_set_state(port, SNK_SOFT_RESET, 0);
3090 tcpm_ams_finish(port);
3092 case PD_MSG_DATA_SOURCE_CAP:
3093 ret = tcpm_pd_send_source_caps(port);
3096 "Unable to send src caps, ret=%d",
3098 tcpm_set_state(port, SOFT_RESET_SEND, 0);
3099 } else if (port->pwr_role == TYPEC_SOURCE) {
3100 tcpm_ams_finish(port);
3101 tcpm_set_state(port, HARD_RESET_SEND,
3102 PD_T_SENDER_RESPONSE);
3104 tcpm_ams_finish(port);
3110 } while (port->queued_message != PD_MSG_NONE);
3112 if (port->delayed_state != INVALID_STATE) {
3113 if (ktime_after(port->delayed_runtime, ktime_get())) {
3114 mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3118 port->delayed_state = INVALID_STATE;
3123 static int tcpm_pd_check_request(struct tcpm_port *port)
3125 u32 pdo, rdo = port->sink_request;
3126 unsigned int max, op, pdo_max, index;
3127 enum pd_pdo_type type;
3129 index = rdo_index(rdo);
3130 if (!index || index > port->nr_src_pdo)
3133 pdo = port->src_pdo[index - 1];
3134 type = pdo_type(pdo);
3136 case PDO_TYPE_FIXED:
3138 max = rdo_max_current(rdo);
3139 op = rdo_op_current(rdo);
3140 pdo_max = pdo_max_current(pdo);
3144 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3147 if (type == PDO_TYPE_FIXED)
3149 "Requested %u mV, %u mA for %u / %u mA",
3150 pdo_fixed_voltage(pdo), pdo_max, op, max);
3153 "Requested %u -> %u mV, %u mA for %u / %u mA",
3154 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3158 max = rdo_max_power(rdo);
3159 op = rdo_op_power(rdo);
3160 pdo_max = pdo_max_power(pdo);
3164 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3167 "Requested %u -> %u mV, %u mW for %u / %u mW",
3168 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3175 port->op_vsafe5v = index == 1;
3180 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3181 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3183 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3186 unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3187 max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3191 port->pps_data.supported = false;
3192 port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3193 power_supply_changed(port->psy);
3196 * Select the source PDO providing the most power which has a
3199 for (i = 0; i < port->nr_source_caps; i++) {
3200 u32 pdo = port->source_caps[i];
3201 enum pd_pdo_type type = pdo_type(pdo);
3204 case PDO_TYPE_FIXED:
3205 max_src_mv = pdo_fixed_voltage(pdo);
3206 min_src_mv = max_src_mv;
3210 max_src_mv = pdo_max_voltage(pdo);
3211 min_src_mv = pdo_min_voltage(pdo);
3214 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3215 port->pps_data.supported = true;
3217 POWER_SUPPLY_USB_TYPE_PD_PPS;
3218 power_supply_changed(port->psy);
3222 tcpm_log(port, "Invalid source PDO type, ignoring");
3227 case PDO_TYPE_FIXED:
3229 src_ma = pdo_max_current(pdo);
3230 src_mw = src_ma * min_src_mv / 1000;
3233 src_mw = pdo_max_power(pdo);
3238 tcpm_log(port, "Invalid source PDO type, ignoring");
3242 for (j = 0; j < port->nr_snk_pdo; j++) {
3243 pdo = port->snk_pdo[j];
3245 switch (pdo_type(pdo)) {
3246 case PDO_TYPE_FIXED:
3247 max_snk_mv = pdo_fixed_voltage(pdo);
3248 min_snk_mv = max_snk_mv;
3252 max_snk_mv = pdo_max_voltage(pdo);
3253 min_snk_mv = pdo_min_voltage(pdo);
3258 tcpm_log(port, "Invalid sink PDO type, ignoring");
3262 if (max_src_mv <= max_snk_mv &&
3263 min_src_mv >= min_snk_mv) {
3264 /* Prefer higher voltages if available */
3265 if ((src_mw == max_mw && min_src_mv > max_mv) ||
3270 max_mv = min_src_mv;
3280 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
3282 unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
3283 unsigned int src_pdo = 0;
3286 for (i = 1; i < port->nr_source_caps; ++i) {
3287 pdo = port->source_caps[i];
3289 switch (pdo_type(pdo)) {
3291 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3292 tcpm_log(port, "Not PPS APDO (source), ignoring");
3296 if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
3297 port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
3300 src_ma = pdo_pps_apdo_max_current(pdo);
3301 max_op_ma = min(src_ma, port->pps_data.req_op_curr);
3302 op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
3303 if (op_mw > max_temp_mw) {
3305 max_temp_mw = op_mw;
3309 tcpm_log(port, "Not APDO type (source), ignoring");
3315 src = port->source_caps[src_pdo];
3317 port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
3318 port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
3319 port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
3320 port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
3321 port->pps_data.req_op_curr);
3327 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
3329 unsigned int mv, ma, mw, flags;
3330 unsigned int max_ma, max_mw;
3331 enum pd_pdo_type type;
3332 u32 pdo, matching_snk_pdo;
3333 int src_pdo_index = 0;
3334 int snk_pdo_index = 0;
3337 ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
3341 pdo = port->source_caps[src_pdo_index];
3342 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
3343 type = pdo_type(pdo);
3346 case PDO_TYPE_FIXED:
3347 mv = pdo_fixed_voltage(pdo);
3351 mv = pdo_min_voltage(pdo);
3354 tcpm_log(port, "Invalid PDO selected!");
3358 /* Select maximum available current within the sink pdo's limit */
3359 if (type == PDO_TYPE_BATT) {
3360 mw = min_power(pdo, matching_snk_pdo);
3361 ma = 1000 * mw / mv;
3363 ma = min_current(pdo, matching_snk_pdo);
3364 mw = ma * mv / 1000;
3367 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3369 /* Set mismatch bit if offered power is less than operating power */
3372 if (mw < port->operating_snk_mw) {
3373 flags |= RDO_CAP_MISMATCH;
3374 if (type == PDO_TYPE_BATT &&
3375 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
3376 max_mw = pdo_max_power(matching_snk_pdo);
3377 else if (pdo_max_current(matching_snk_pdo) >
3378 pdo_max_current(pdo))
3379 max_ma = pdo_max_current(matching_snk_pdo);
3382 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3383 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3384 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3387 if (type == PDO_TYPE_BATT) {
3388 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
3390 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
3391 src_pdo_index, mv, mw,
3392 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3394 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
3396 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
3397 src_pdo_index, mv, ma,
3398 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3401 port->req_current_limit = ma;
3402 port->req_supply_voltage = mv;
3407 static int tcpm_pd_send_request(struct tcpm_port *port)
3409 struct pd_message msg;
3413 ret = tcpm_pd_build_request(port, &rdo);
3418 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
3419 * It is safer to modify the threshold here.
3421 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3423 memset(&msg, 0, sizeof(msg));
3424 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3427 port->negotiated_rev,
3428 port->message_id, 1);
3429 msg.payload[0] = cpu_to_le32(rdo);
3431 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3434 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
3436 unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
3437 unsigned int src_pdo_index;
3439 src_pdo_index = tcpm_pd_select_pps_apdo(port);
3443 max_mv = port->pps_data.req_max_volt;
3444 max_ma = port->pps_data.req_max_curr;
3445 out_mv = port->pps_data.req_out_volt;
3446 op_ma = port->pps_data.req_op_curr;
3448 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3450 op_mw = (op_ma * out_mv) / 1000;
3451 if (op_mw < port->operating_snk_mw) {
3453 * Try raising current to meet power needs. If that's not enough
3454 * then try upping the voltage. If that's still not enough
3455 * then we've obviously chosen a PPS APDO which really isn't
3456 * suitable so abandon ship.
3458 op_ma = (port->operating_snk_mw * 1000) / out_mv;
3459 if ((port->operating_snk_mw * 1000) % out_mv)
3461 op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
3463 if (op_ma > max_ma) {
3465 out_mv = (port->operating_snk_mw * 1000) / op_ma;
3466 if ((port->operating_snk_mw * 1000) % op_ma)
3468 out_mv += RDO_PROG_VOLT_MV_STEP -
3469 (out_mv % RDO_PROG_VOLT_MV_STEP);
3471 if (out_mv > max_mv) {
3472 tcpm_log(port, "Invalid PPS APDO selected!");
3478 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3479 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3480 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3483 *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
3485 tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
3486 src_pdo_index, out_mv, op_ma);
3488 port->pps_data.req_op_curr = op_ma;
3489 port->pps_data.req_out_volt = out_mv;
3494 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
3496 struct pd_message msg;
3500 ret = tcpm_pd_build_pps_request(port, &rdo);
3504 /* Relax the threshold as voltage will be adjusted right after Accept Message. */
3505 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3507 memset(&msg, 0, sizeof(msg));
3508 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3511 port->negotiated_rev,
3512 port->message_id, 1);
3513 msg.payload[0] = cpu_to_le32(rdo);
3515 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3518 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
3522 if (enable && port->vbus_charge)
3525 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
3527 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
3531 port->vbus_source = enable;
3535 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
3539 if (charge && port->vbus_source)
3542 if (charge != port->vbus_charge) {
3543 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
3544 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
3549 port->vbus_charge = charge;
3550 power_supply_changed(port->psy);
3554 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
3558 if (!port->tcpc->start_toggling)
3561 tcpm_log_force(port, "Start toggling");
3562 ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
3566 static int tcpm_init_vbus(struct tcpm_port *port)
3570 ret = port->tcpc->set_vbus(port->tcpc, false, false);
3571 port->vbus_source = false;
3572 port->vbus_charge = false;
3576 static int tcpm_init_vconn(struct tcpm_port *port)
3580 ret = port->tcpc->set_vconn(port->tcpc, false);
3581 port->vconn_role = TYPEC_SINK;
3585 static void tcpm_typec_connect(struct tcpm_port *port)
3587 if (!port->connected) {
3588 /* Make sure we don't report stale identity information */
3589 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
3590 port->partner_desc.usb_pd = port->pd_capable;
3591 if (tcpm_port_is_debug(port))
3592 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
3593 else if (tcpm_port_is_audio(port))
3594 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
3596 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
3597 port->partner = typec_register_partner(port->typec_port,
3598 &port->partner_desc);
3599 port->connected = true;
3600 typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
3604 static int tcpm_src_attach(struct tcpm_port *port)
3606 enum typec_cc_polarity polarity =
3607 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
3608 : TYPEC_POLARITY_CC1;
3614 ret = tcpm_set_polarity(port, polarity);
3618 tcpm_enable_auto_vbus_discharge(port, true);
3620 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
3624 if (port->pd_supported) {
3625 ret = port->tcpc->set_pd_rx(port->tcpc, true);
3627 goto out_disable_mux;
3631 * USB Type-C specification, version 1.2,
3632 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
3633 * Enable VCONN only if the non-RD port is set to RA.
3635 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
3636 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
3637 ret = tcpm_set_vconn(port, true);
3639 goto out_disable_pd;
3642 ret = tcpm_set_vbus(port, true);
3644 goto out_disable_vconn;
3646 port->pd_capable = false;
3648 port->partner = NULL;
3650 port->attached = true;
3651 port->send_discover = true;
3656 tcpm_set_vconn(port, false);
3658 if (port->pd_supported)
3659 port->tcpc->set_pd_rx(port->tcpc, false);
3661 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3662 TYPEC_ORIENTATION_NONE);
3666 static void tcpm_typec_disconnect(struct tcpm_port *port)
3668 if (port->connected) {
3669 typec_partner_set_usb_power_delivery(port->partner, NULL);
3670 typec_unregister_partner(port->partner);
3671 port->partner = NULL;
3672 port->connected = false;
3676 static void tcpm_unregister_altmodes(struct tcpm_port *port)
3678 struct pd_mode_data *modep = &port->mode_data;
3681 for (i = 0; i < modep->altmodes; i++) {
3682 typec_unregister_altmode(port->partner_altmode[i]);
3683 port->partner_altmode[i] = NULL;
3686 memset(modep, 0, sizeof(*modep));
3689 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
3691 tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
3693 if (port->tcpc->set_partner_usb_comm_capable)
3694 port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
3697 static void tcpm_reset_port(struct tcpm_port *port)
3699 tcpm_enable_auto_vbus_discharge(port, false);
3700 port->in_ams = false;
3701 port->ams = NONE_AMS;
3702 port->vdm_sm_running = false;
3703 tcpm_unregister_altmodes(port);
3704 tcpm_typec_disconnect(port);
3705 port->attached = false;
3706 port->pd_capable = false;
3707 port->pps_data.supported = false;
3708 tcpm_set_partner_usb_comm_capable(port, false);
3711 * First Rx ID should be 0; set this to a sentinel of -1 so that
3712 * we can check tcpm_pd_rx_handler() if we had seen it before.
3714 port->rx_msgid = -1;
3716 port->tcpc->set_pd_rx(port->tcpc, false);
3717 tcpm_init_vbus(port); /* also disables charging */
3718 tcpm_init_vconn(port);
3719 tcpm_set_current_limit(port, 0, 0);
3720 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
3721 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3722 TYPEC_ORIENTATION_NONE);
3723 tcpm_set_attached_state(port, false);
3724 port->try_src_count = 0;
3725 port->try_snk_count = 0;
3726 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
3727 power_supply_changed(port->psy);
3728 port->nr_sink_caps = 0;
3729 port->sink_cap_done = false;
3730 if (port->tcpc->enable_frs)
3731 port->tcpc->enable_frs(port->tcpc, false);
3733 usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
3734 port->partner_sink_caps = NULL;
3735 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
3736 port->partner_source_caps = NULL;
3737 usb_power_delivery_unregister(port->partner_pd);
3738 port->partner_pd = NULL;
3741 static void tcpm_detach(struct tcpm_port *port)
3743 if (tcpm_port_is_disconnected(port))
3744 port->hard_reset_count = 0;
3746 port->try_src_count = 0;
3747 port->try_snk_count = 0;
3749 if (!port->attached)
3752 if (port->tcpc->set_bist_data) {
3753 tcpm_log(port, "disable BIST MODE TESTDATA");
3754 port->tcpc->set_bist_data(port->tcpc, false);
3757 tcpm_reset_port(port);
3760 static void tcpm_src_detach(struct tcpm_port *port)
3765 static int tcpm_snk_attach(struct tcpm_port *port)
3772 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
3773 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
3777 tcpm_enable_auto_vbus_discharge(port, true);
3779 ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
3783 port->pd_capable = false;
3785 port->partner = NULL;
3787 port->attached = true;
3788 port->send_discover = true;
3793 static void tcpm_snk_detach(struct tcpm_port *port)
3798 static int tcpm_acc_attach(struct tcpm_port *port)
3805 ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
3806 tcpm_data_role_for_source(port));
3810 port->partner = NULL;
3812 tcpm_typec_connect(port);
3814 port->attached = true;
3819 static void tcpm_acc_detach(struct tcpm_port *port)
3824 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
3826 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
3827 return HARD_RESET_SEND;
3828 if (port->pd_capable)
3829 return ERROR_RECOVERY;
3830 if (port->pwr_role == TYPEC_SOURCE)
3831 return SRC_UNATTACHED;
3832 if (port->state == SNK_WAIT_CAPABILITIES)
3834 return SNK_UNATTACHED;
3837 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
3839 if (port->port_type == TYPEC_PORT_DRP) {
3840 if (port->pwr_role == TYPEC_SOURCE)
3841 return SRC_UNATTACHED;
3843 return SNK_UNATTACHED;
3844 } else if (port->port_type == TYPEC_PORT_SRC) {
3845 return SRC_UNATTACHED;
3848 return SNK_UNATTACHED;
3851 static void tcpm_swap_complete(struct tcpm_port *port, int result)
3853 if (port->swap_pending) {
3854 port->swap_status = result;
3855 port->swap_pending = false;
3856 port->non_pd_role_swap = false;
3857 complete(&port->swap_complete);
3861 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
3864 case TYPEC_CC_RP_1_5:
3865 return TYPEC_PWR_MODE_1_5A;
3866 case TYPEC_CC_RP_3_0:
3867 return TYPEC_PWR_MODE_3_0A;
3868 case TYPEC_CC_RP_DEF:
3870 return TYPEC_PWR_MODE_USB;
3874 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
3877 case TYPEC_PWR_MODE_USB:
3878 return TYPEC_CC_RP_DEF;
3879 case TYPEC_PWR_MODE_1_5A:
3880 return TYPEC_CC_RP_1_5;
3881 case TYPEC_PWR_MODE_3_0A:
3882 case TYPEC_PWR_MODE_PD:
3884 return TYPEC_CC_RP_3_0;
3888 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
3890 switch (port->negotiated_rev) {
3894 * 6.4.4.2.3 Structured VDM Version
3895 * 2.0 states "At this time, there is only one version (1.0) defined.
3896 * This field Shall be set to zero to indicate Version 1.0."
3897 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
3898 * To ensure that we follow the Power Delivery revision we are currently
3899 * operating on, downgrade the SVDM version to the highest one supported
3900 * by the Power Delivery revision.
3903 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
3906 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
3911 static void run_state_machine(struct tcpm_port *port)
3914 enum typec_pwr_opmode opmode;
3916 enum tcpm_state upcoming_state;
3918 if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
3919 port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
3920 port->state == SRC_UNATTACHED) ||
3921 (port->enter_state == SNK_ATTACH_WAIT &&
3922 port->state == SNK_UNATTACHED) ||
3923 (port->enter_state == SNK_DEBOUNCED &&
3924 port->state == SNK_UNATTACHED));
3926 port->enter_state = port->state;
3927 switch (port->state) {
3930 case CHECK_CONTAMINANT:
3931 port->tcpc->check_contaminant(port->tcpc);
3934 case SRC_UNATTACHED:
3935 if (!port->non_pd_role_swap)
3936 tcpm_swap_complete(port, -ENOTCONN);
3937 tcpm_src_detach(port);
3938 if (port->potential_contaminant) {
3939 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
3942 if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
3943 tcpm_set_state(port, TOGGLING, 0);
3946 tcpm_set_cc(port, tcpm_rp_cc(port));
3947 if (port->port_type == TYPEC_PORT_DRP)
3948 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
3950 case SRC_ATTACH_WAIT:
3951 if (tcpm_port_is_debug(port))
3952 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
3954 else if (tcpm_port_is_audio(port))
3955 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
3957 else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
3958 tcpm_set_state(port,
3959 tcpm_try_snk(port) ? SNK_TRY
3965 port->try_snk_count++;
3968 * - Do not drive vconn or vbus
3969 * - Terminate CC pins (both) to Rd
3971 * - Wait for tDRPTry (PD_T_DRP_TRY).
3972 * Until then, ignore any state changes.
3974 tcpm_set_cc(port, TYPEC_CC_RD);
3975 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
3978 if (tcpm_port_is_sink(port)) {
3979 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
3981 tcpm_set_state(port, SRC_TRYWAIT, 0);
3985 case SNK_TRY_WAIT_DEBOUNCE:
3986 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
3987 PD_T_TRY_CC_DEBOUNCE);
3989 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
3990 if (port->vbus_present && tcpm_port_is_sink(port))
3991 tcpm_set_state(port, SNK_ATTACHED, 0);
3996 tcpm_set_cc(port, tcpm_rp_cc(port));
3997 if (port->max_wait == 0) {
3998 port->max_wait = jiffies +
3999 msecs_to_jiffies(PD_T_DRP_TRY);
4000 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4003 if (time_is_after_jiffies(port->max_wait))
4004 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4005 jiffies_to_msecs(port->max_wait -
4008 tcpm_set_state(port, SNK_UNATTACHED, 0);
4011 case SRC_TRYWAIT_DEBOUNCE:
4012 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
4014 case SRC_TRYWAIT_UNATTACHED:
4015 tcpm_set_state(port, SNK_UNATTACHED, 0);
4019 ret = tcpm_src_attach(port);
4020 tcpm_set_state(port, SRC_UNATTACHED,
4021 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4024 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4025 typec_set_pwr_opmode(port->typec_port, opmode);
4026 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4027 port->caps_count = 0;
4028 port->negotiated_rev = PD_MAX_REV;
4029 port->message_id = 0;
4030 port->rx_msgid = -1;
4031 port->explicit_contract = false;
4032 /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4033 if (port->ams == POWER_ROLE_SWAP ||
4034 port->ams == FAST_ROLE_SWAP)
4035 tcpm_ams_finish(port);
4036 if (!port->pd_supported) {
4037 tcpm_set_state(port, SRC_READY, 0);
4040 port->upcoming_state = SRC_SEND_CAPABILITIES;
4041 tcpm_ams_start(port, POWER_NEGOTIATION);
4043 case SRC_SEND_CAPABILITIES:
4045 if (port->caps_count > PD_N_CAPS_COUNT) {
4046 tcpm_set_state(port, SRC_READY, 0);
4049 ret = tcpm_pd_send_source_caps(port);
4051 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4052 PD_T_SEND_SOURCE_CAP);
4055 * Per standard, we should clear the reset counter here.
4056 * However, that can result in state machine hang-ups.
4057 * Reset it only in READY state to improve stability.
4059 /* port->hard_reset_count = 0; */
4060 port->caps_count = 0;
4061 port->pd_capable = true;
4062 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4063 PD_T_SEND_SOURCE_CAP);
4066 case SRC_SEND_CAPABILITIES_TIMEOUT:
4068 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4070 * PD 2.0 sinks are supposed to accept src-capabilities with a
4071 * 3.0 header and simply ignore any src PDOs which the sink does
4072 * not understand such as PPS but some 2.0 sinks instead ignore
4073 * the entire PD_DATA_SOURCE_CAP message, causing contract
4074 * negotiation to fail.
4076 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4077 * sending src-capabilities with a lower PD revision to
4078 * make these broken sinks work.
4080 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4081 tcpm_set_state(port, HARD_RESET_SEND, 0);
4082 } else if (port->negotiated_rev > PD_REV20) {
4083 port->negotiated_rev--;
4084 port->hard_reset_count = 0;
4085 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4087 tcpm_set_state(port, hard_reset_state(port), 0);
4090 case SRC_NEGOTIATE_CAPABILITIES:
4091 ret = tcpm_pd_check_request(port);
4093 tcpm_pd_send_control(port, PD_CTRL_REJECT);
4094 if (!port->explicit_contract) {
4095 tcpm_set_state(port,
4096 SRC_WAIT_NEW_CAPABILITIES, 0);
4098 tcpm_set_state(port, SRC_READY, 0);
4101 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4102 tcpm_set_partner_usb_comm_capable(port,
4103 !!(port->sink_request & RDO_USB_COMM));
4104 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4105 PD_T_SRC_TRANSITION);
4108 case SRC_TRANSITION_SUPPLY:
4109 /* XXX: regulator_set_voltage(vbus, ...) */
4110 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4111 port->explicit_contract = true;
4112 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4113 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4114 tcpm_set_state_cond(port, SRC_READY, 0);
4118 port->hard_reset_count = 0;
4120 port->try_src_count = 0;
4122 tcpm_swap_complete(port, 0);
4123 tcpm_typec_connect(port);
4125 if (port->ams != NONE_AMS)
4126 tcpm_ams_finish(port);
4127 if (port->next_ams != NONE_AMS) {
4128 port->ams = port->next_ams;
4129 port->next_ams = NONE_AMS;
4133 * If previous AMS is interrupted, switch to the upcoming
4136 if (port->upcoming_state != INVALID_STATE) {
4137 upcoming_state = port->upcoming_state;
4138 port->upcoming_state = INVALID_STATE;
4139 tcpm_set_state(port, upcoming_state, 0);
4144 * 6.4.4.3.1 Discover Identity
4145 * "The Discover Identity Command Shall only be sent to SOP when there is an
4146 * Explicit Contract."
4147 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4148 * port->explicit_contract to decide whether to send the command.
4150 if (port->explicit_contract) {
4151 tcpm_set_initial_svdm_version(port);
4152 mod_send_discover_delayed_work(port, 0);
4154 port->send_discover = false;
4159 * Sending ping messages is not necessary if
4160 * - the source operates at vSafe5V
4162 * - The system is not operating in PD mode
4164 * - Both partners are connected using a Type-C connector
4166 * There is no actual need to send PD messages since the local
4167 * port type-c and the spec does not clearly say whether PD is
4168 * possible when type-c is connected to Type-A/B
4171 case SRC_WAIT_NEW_CAPABILITIES:
4172 /* Nothing to do... */
4176 case SNK_UNATTACHED:
4177 if (!port->non_pd_role_swap)
4178 tcpm_swap_complete(port, -ENOTCONN);
4179 tcpm_pps_complete(port, -ENOTCONN);
4180 tcpm_snk_detach(port);
4181 if (port->potential_contaminant) {
4182 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4185 if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
4186 tcpm_set_state(port, TOGGLING, 0);
4189 tcpm_set_cc(port, TYPEC_CC_RD);
4190 if (port->port_type == TYPEC_PORT_DRP)
4191 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
4193 case SNK_ATTACH_WAIT:
4194 if ((port->cc1 == TYPEC_CC_OPEN &&
4195 port->cc2 != TYPEC_CC_OPEN) ||
4196 (port->cc1 != TYPEC_CC_OPEN &&
4197 port->cc2 == TYPEC_CC_OPEN))
4198 tcpm_set_state(port, SNK_DEBOUNCED,
4200 else if (tcpm_port_is_disconnected(port))
4201 tcpm_set_state(port, SNK_UNATTACHED,
4205 if (tcpm_port_is_disconnected(port))
4206 tcpm_set_state(port, SNK_UNATTACHED,
4208 else if (port->vbus_present)
4209 tcpm_set_state(port,
4210 tcpm_try_src(port) ? SRC_TRY
4215 port->try_src_count++;
4216 tcpm_set_cc(port, tcpm_rp_cc(port));
4218 tcpm_set_state(port, SRC_TRY_WAIT, 0);
4221 if (port->max_wait == 0) {
4222 port->max_wait = jiffies +
4223 msecs_to_jiffies(PD_T_DRP_TRY);
4224 msecs = PD_T_DRP_TRY;
4226 if (time_is_after_jiffies(port->max_wait))
4227 msecs = jiffies_to_msecs(port->max_wait -
4232 tcpm_set_state(port, SNK_TRYWAIT, msecs);
4234 case SRC_TRY_DEBOUNCE:
4235 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
4238 tcpm_set_cc(port, TYPEC_CC_RD);
4239 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
4241 case SNK_TRYWAIT_VBUS:
4243 * TCPM stays in this state indefinitely until VBUS
4244 * is detected as long as Rp is not detected for
4245 * more than a time period of tPDDebounce.
4247 if (port->vbus_present && tcpm_port_is_sink(port)) {
4248 tcpm_set_state(port, SNK_ATTACHED, 0);
4251 if (!tcpm_port_is_sink(port))
4252 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
4254 case SNK_TRYWAIT_DEBOUNCE:
4255 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
4258 ret = tcpm_snk_attach(port);
4260 tcpm_set_state(port, SNK_UNATTACHED, 0);
4262 tcpm_set_state(port, SNK_STARTUP, 0);
4265 opmode = tcpm_get_pwr_opmode(port->polarity ?
4266 port->cc2 : port->cc1);
4267 typec_set_pwr_opmode(port->typec_port, opmode);
4268 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4269 port->negotiated_rev = PD_MAX_REV;
4270 port->message_id = 0;
4271 port->rx_msgid = -1;
4272 port->explicit_contract = false;
4274 if (port->ams == POWER_ROLE_SWAP ||
4275 port->ams == FAST_ROLE_SWAP)
4276 /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
4277 tcpm_ams_finish(port);
4279 tcpm_set_state(port, SNK_DISCOVERY, 0);
4282 if (port->vbus_present) {
4283 u32 current_lim = tcpm_get_current_limit(port);
4285 if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
4286 current_lim = PD_P_SNK_STDBY_MW / 5;
4287 tcpm_set_current_limit(port, current_lim, 5000);
4288 /* Not sink vbus if operational current is 0mA */
4289 tcpm_set_charge(port, !port->pd_supported ||
4290 pdo_max_current(port->snk_pdo[0]));
4292 if (!port->pd_supported)
4293 tcpm_set_state(port, SNK_READY, 0);
4295 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4299 * For DRP, timeouts differ. Also, handling is supposed to be
4300 * different and much more complex (dead battery detection;
4301 * see USB power delivery specification, section 8.3.3.6.1.5.1).
4303 tcpm_set_state(port, hard_reset_state(port),
4304 port->port_type == TYPEC_PORT_DRP ?
4305 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
4307 case SNK_DISCOVERY_DEBOUNCE:
4308 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
4311 case SNK_DISCOVERY_DEBOUNCE_DONE:
4312 if (!tcpm_port_is_disconnected(port) &&
4313 tcpm_port_is_sink(port) &&
4314 ktime_after(port->delayed_runtime, ktime_get())) {
4315 tcpm_set_state(port, SNK_DISCOVERY,
4316 ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
4319 tcpm_set_state(port, unattached_state(port), 0);
4321 case SNK_WAIT_CAPABILITIES:
4322 ret = port->tcpc->set_pd_rx(port->tcpc, true);
4324 tcpm_set_state(port, SNK_READY, 0);
4328 * If VBUS has never been low, and we time out waiting
4329 * for source cap, try a soft reset first, in case we
4330 * were already in a stable contract before this boot.
4331 * Do this only once.
4333 if (port->vbus_never_low) {
4334 port->vbus_never_low = false;
4335 tcpm_set_state(port, SNK_SOFT_RESET,
4336 PD_T_SINK_WAIT_CAP);
4338 tcpm_set_state(port, hard_reset_state(port),
4339 PD_T_SINK_WAIT_CAP);
4342 case SNK_NEGOTIATE_CAPABILITIES:
4343 port->pd_capable = true;
4344 tcpm_set_partner_usb_comm_capable(port,
4345 !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
4346 port->hard_reset_count = 0;
4347 ret = tcpm_pd_send_request(port);
4349 /* Restore back to the original state */
4350 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4351 port->pps_data.active,
4352 port->supply_voltage);
4353 /* Let the Source send capabilities again. */
4354 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4356 tcpm_set_state_cond(port, hard_reset_state(port),
4357 PD_T_SENDER_RESPONSE);
4360 case SNK_NEGOTIATE_PPS_CAPABILITIES:
4361 ret = tcpm_pd_send_pps_request(port);
4363 /* Restore back to the original state */
4364 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4365 port->pps_data.active,
4366 port->supply_voltage);
4367 port->pps_status = ret;
4369 * If this was called due to updates to sink
4370 * capabilities, and pps is no longer valid, we should
4371 * safely fall back to a standard PDO.
4373 if (port->update_sink_caps)
4374 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
4376 tcpm_set_state(port, SNK_READY, 0);
4378 tcpm_set_state_cond(port, hard_reset_state(port),
4379 PD_T_SENDER_RESPONSE);
4382 case SNK_TRANSITION_SINK:
4383 /* From the USB PD spec:
4384 * "The Sink Shall transition to Sink Standby before a positive or
4385 * negative voltage transition of VBUS. During Sink Standby
4386 * the Sink Shall reduce its power draw to pSnkStdby."
4388 * This is not applicable to PPS though as the port can continue
4389 * to draw negotiated power without switching to standby.
4391 if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
4392 port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
4393 u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
4395 tcpm_log(port, "Setting standby current %u mV @ %u mA",
4396 port->supply_voltage, stdby_ma);
4397 tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
4400 case SNK_TRANSITION_SINK_VBUS:
4401 tcpm_set_state(port, hard_reset_state(port),
4402 PD_T_PS_TRANSITION);
4405 port->try_snk_count = 0;
4406 port->update_sink_caps = false;
4407 if (port->explicit_contract) {
4408 typec_set_pwr_opmode(port->typec_port,
4410 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4413 if (!port->pd_capable && port->slow_charger_loop)
4414 tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
4415 tcpm_swap_complete(port, 0);
4416 tcpm_typec_connect(port);
4417 if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
4418 mod_enable_frs_delayed_work(port, 0);
4419 tcpm_pps_complete(port, port->pps_status);
4421 if (port->ams != NONE_AMS)
4422 tcpm_ams_finish(port);
4423 if (port->next_ams != NONE_AMS) {
4424 port->ams = port->next_ams;
4425 port->next_ams = NONE_AMS;
4429 * If previous AMS is interrupted, switch to the upcoming
4432 if (port->upcoming_state != INVALID_STATE) {
4433 upcoming_state = port->upcoming_state;
4434 port->upcoming_state = INVALID_STATE;
4435 tcpm_set_state(port, upcoming_state, 0);
4440 * 6.4.4.3.1 Discover Identity
4441 * "The Discover Identity Command Shall only be sent to SOP when there is an
4442 * Explicit Contract."
4443 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4444 * port->explicit_contract.
4446 if (port->explicit_contract) {
4447 tcpm_set_initial_svdm_version(port);
4448 mod_send_discover_delayed_work(port, 0);
4450 port->send_discover = false;
4453 power_supply_changed(port->psy);
4456 /* Accessory states */
4457 case ACC_UNATTACHED:
4458 tcpm_acc_detach(port);
4459 tcpm_set_state(port, SRC_UNATTACHED, 0);
4461 case DEBUG_ACC_ATTACHED:
4462 case AUDIO_ACC_ATTACHED:
4463 ret = tcpm_acc_attach(port);
4465 tcpm_set_state(port, ACC_UNATTACHED, 0);
4467 case AUDIO_ACC_DEBOUNCE:
4468 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
4471 /* Hard_Reset states */
4472 case HARD_RESET_SEND:
4473 if (port->ams != NONE_AMS)
4474 tcpm_ams_finish(port);
4476 * State machine will be directed to HARD_RESET_START,
4477 * thus set upcoming_state to INVALID_STATE.
4479 port->upcoming_state = INVALID_STATE;
4480 tcpm_ams_start(port, HARD_RESET);
4482 case HARD_RESET_START:
4483 port->sink_cap_done = false;
4484 if (port->tcpc->enable_frs)
4485 port->tcpc->enable_frs(port->tcpc, false);
4486 port->hard_reset_count++;
4487 port->tcpc->set_pd_rx(port->tcpc, false);
4488 tcpm_unregister_altmodes(port);
4489 port->nr_sink_caps = 0;
4490 port->send_discover = true;
4491 if (port->pwr_role == TYPEC_SOURCE)
4492 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
4493 PD_T_PS_HARD_RESET);
4495 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
4497 case SRC_HARD_RESET_VBUS_OFF:
4499 * 7.1.5 Response to Hard Resets
4500 * Hard Reset Signaling indicates a communication failure has occurred and the
4501 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
4502 * drive VBUS to vSafe0V as shown in Figure 7-9.
4504 tcpm_set_vconn(port, false);
4505 tcpm_set_vbus(port, false);
4506 tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
4507 tcpm_data_role_for_source(port));
4509 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
4510 * PD_T_SRC_RECOVER before turning vbus back on.
4511 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
4512 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
4513 * tells the Device Policy Manager to instruct the power supply to perform a
4514 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
4515 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
4516 * re-establish communication with the Sink and resume USB Default Operation.
4517 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
4519 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
4521 case SRC_HARD_RESET_VBUS_ON:
4522 tcpm_set_vconn(port, true);
4523 tcpm_set_vbus(port, true);
4524 if (port->ams == HARD_RESET)
4525 tcpm_ams_finish(port);
4526 if (port->pd_supported)
4527 port->tcpc->set_pd_rx(port->tcpc, true);
4528 tcpm_set_attached_state(port, true);
4529 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
4531 case SNK_HARD_RESET_SINK_OFF:
4532 /* Do not discharge/disconnect during hard reseet */
4533 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4534 memset(&port->pps_data, 0, sizeof(port->pps_data));
4535 tcpm_set_vconn(port, false);
4536 if (port->pd_capable)
4537 tcpm_set_charge(port, false);
4538 tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
4539 tcpm_data_role_for_sink(port));
4541 * VBUS may or may not toggle, depending on the adapter.
4542 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
4543 * directly after timeout.
4545 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
4547 case SNK_HARD_RESET_WAIT_VBUS:
4548 if (port->ams == HARD_RESET)
4549 tcpm_ams_finish(port);
4550 /* Assume we're disconnected if VBUS doesn't come back. */
4551 tcpm_set_state(port, SNK_UNATTACHED,
4552 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
4554 case SNK_HARD_RESET_SINK_ON:
4555 /* Note: There is no guarantee that VBUS is on in this state */
4558 * The specification suggests that dual mode ports in sink
4559 * mode should transition to state PE_SRC_Transition_to_default.
4560 * See USB power delivery specification chapter 8.3.3.6.1.3.
4561 * This would mean to
4562 * - turn off VCONN, reset power supply
4563 * - request hardware reset
4565 * - Transition to state PE_Src_Startup
4566 * SNK only ports shall transition to state Snk_Startup
4567 * (see chapter 8.3.3.3.8).
4568 * Similar, dual-mode ports in source mode should transition
4569 * to PE_SNK_Transition_to_default.
4571 if (port->pd_capable) {
4572 tcpm_set_current_limit(port,
4573 tcpm_get_current_limit(port),
4575 /* Not sink vbus if operational current is 0mA */
4576 tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
4578 if (port->ams == HARD_RESET)
4579 tcpm_ams_finish(port);
4580 tcpm_set_attached_state(port, true);
4581 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4582 tcpm_set_state(port, SNK_STARTUP, 0);
4585 /* Soft_Reset states */
4587 port->message_id = 0;
4588 port->rx_msgid = -1;
4589 /* remove existing capabilities */
4590 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4591 port->partner_source_caps = NULL;
4592 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4593 tcpm_ams_finish(port);
4594 if (port->pwr_role == TYPEC_SOURCE) {
4595 port->upcoming_state = SRC_SEND_CAPABILITIES;
4596 tcpm_ams_start(port, POWER_NEGOTIATION);
4598 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4601 case SRC_SOFT_RESET_WAIT_SNK_TX:
4602 case SNK_SOFT_RESET:
4603 if (port->ams != NONE_AMS)
4604 tcpm_ams_finish(port);
4605 port->upcoming_state = SOFT_RESET_SEND;
4606 tcpm_ams_start(port, SOFT_RESET_AMS);
4608 case SOFT_RESET_SEND:
4609 port->message_id = 0;
4610 port->rx_msgid = -1;
4611 /* remove existing capabilities */
4612 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4613 port->partner_source_caps = NULL;
4614 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
4615 tcpm_set_state_cond(port, hard_reset_state(port), 0);
4617 tcpm_set_state_cond(port, hard_reset_state(port),
4618 PD_T_SENDER_RESPONSE);
4621 /* DR_Swap states */
4623 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
4624 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4625 port->send_discover = true;
4626 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
4627 PD_T_SENDER_RESPONSE);
4629 case DR_SWAP_ACCEPT:
4630 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4631 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4632 port->send_discover = true;
4633 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
4635 case DR_SWAP_SEND_TIMEOUT:
4636 tcpm_swap_complete(port, -ETIMEDOUT);
4637 port->send_discover = false;
4638 tcpm_ams_finish(port);
4639 tcpm_set_state(port, ready_state(port), 0);
4641 case DR_SWAP_CHANGE_DR:
4642 tcpm_unregister_altmodes(port);
4643 if (port->data_role == TYPEC_HOST)
4644 tcpm_set_roles(port, true, port->pwr_role,
4647 tcpm_set_roles(port, true, port->pwr_role,
4649 tcpm_ams_finish(port);
4650 tcpm_set_state(port, ready_state(port), 0);
4654 if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
4655 tcpm_set_state(port, ERROR_RECOVERY, 0);
4658 tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
4660 case FR_SWAP_SEND_TIMEOUT:
4661 tcpm_set_state(port, ERROR_RECOVERY, 0);
4663 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
4664 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
4666 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
4667 if (port->vbus_source)
4668 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
4670 tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
4672 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
4673 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4674 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4675 tcpm_set_state(port, ERROR_RECOVERY, 0);
4678 tcpm_set_cc(port, tcpm_rp_cc(port));
4679 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4682 /* PR_Swap states */
4683 case PR_SWAP_ACCEPT:
4684 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4685 tcpm_set_state(port, PR_SWAP_START, 0);
4688 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
4689 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
4690 PD_T_SENDER_RESPONSE);
4692 case PR_SWAP_SEND_TIMEOUT:
4693 tcpm_swap_complete(port, -ETIMEDOUT);
4694 tcpm_set_state(port, ready_state(port), 0);
4697 tcpm_apply_rc(port);
4698 if (port->pwr_role == TYPEC_SOURCE)
4699 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
4700 PD_T_SRC_TRANSITION);
4702 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
4704 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
4706 * Prevent vbus discharge circuit from turning on during PR_SWAP
4707 * as this is not a disconnect.
4709 tcpm_set_vbus(port, false);
4710 port->explicit_contract = false;
4711 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
4712 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
4715 case PR_SWAP_SRC_SNK_SOURCE_OFF:
4716 tcpm_set_cc(port, TYPEC_CC_RD);
4717 /* allow CC debounce */
4718 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
4721 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
4723 * USB-PD standard, 6.2.1.4, Port Power Role:
4724 * "During the Power Role Swap Sequence, for the initial Source
4725 * Port, the Port Power Role field shall be set to Sink in the
4726 * PS_RDY Message indicating that the initial Source’s power
4727 * supply is turned off"
4729 tcpm_set_pwr_role(port, TYPEC_SINK);
4730 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4731 tcpm_set_state(port, ERROR_RECOVERY, 0);
4734 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
4736 case PR_SWAP_SRC_SNK_SINK_ON:
4737 tcpm_enable_auto_vbus_discharge(port, true);
4738 /* Set the vbus disconnect threshold for implicit contract */
4739 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4740 tcpm_set_state(port, SNK_STARTUP, 0);
4742 case PR_SWAP_SNK_SRC_SINK_OFF:
4743 /* will be source, remove existing capabilities */
4744 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4745 port->partner_source_caps = NULL;
4747 * Prevent vbus discharge circuit from turning on during PR_SWAP
4748 * as this is not a disconnect.
4750 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
4751 port->pps_data.active, 0);
4752 tcpm_set_charge(port, false);
4753 tcpm_set_state(port, hard_reset_state(port),
4754 PD_T_PS_SOURCE_OFF);
4756 case PR_SWAP_SNK_SRC_SOURCE_ON:
4757 tcpm_enable_auto_vbus_discharge(port, true);
4758 tcpm_set_cc(port, tcpm_rp_cc(port));
4759 tcpm_set_vbus(port, true);
4761 * allow time VBUS ramp-up, must be < tNewSrc
4762 * Also, this window overlaps with CC debounce as well.
4763 * So, Wait for the max of two which is PD_T_NEWSRC
4765 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
4768 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
4770 * USB PD standard, 6.2.1.4:
4771 * "Subsequent Messages initiated by the Policy Engine,
4772 * such as the PS_RDY Message sent to indicate that Vbus
4773 * is ready, will have the Port Power Role field set to
4776 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4777 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4778 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4781 case VCONN_SWAP_ACCEPT:
4782 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4783 tcpm_ams_finish(port);
4784 tcpm_set_state(port, VCONN_SWAP_START, 0);
4786 case VCONN_SWAP_SEND:
4787 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
4788 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
4789 PD_T_SENDER_RESPONSE);
4791 case VCONN_SWAP_SEND_TIMEOUT:
4792 tcpm_swap_complete(port, -ETIMEDOUT);
4793 tcpm_set_state(port, ready_state(port), 0);
4795 case VCONN_SWAP_START:
4796 if (port->vconn_role == TYPEC_SOURCE)
4797 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
4799 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
4801 case VCONN_SWAP_WAIT_FOR_VCONN:
4802 tcpm_set_state(port, hard_reset_state(port),
4803 PD_T_VCONN_SOURCE_ON);
4805 case VCONN_SWAP_TURN_ON_VCONN:
4806 tcpm_set_vconn(port, true);
4807 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4808 tcpm_set_state(port, ready_state(port), 0);
4810 case VCONN_SWAP_TURN_OFF_VCONN:
4811 tcpm_set_vconn(port, false);
4812 tcpm_set_state(port, ready_state(port), 0);
4815 case DR_SWAP_CANCEL:
4816 case PR_SWAP_CANCEL:
4817 case VCONN_SWAP_CANCEL:
4818 tcpm_swap_complete(port, port->swap_status);
4819 if (port->pwr_role == TYPEC_SOURCE)
4820 tcpm_set_state(port, SRC_READY, 0);
4822 tcpm_set_state(port, SNK_READY, 0);
4824 case FR_SWAP_CANCEL:
4825 if (port->pwr_role == TYPEC_SOURCE)
4826 tcpm_set_state(port, SRC_READY, 0);
4828 tcpm_set_state(port, SNK_READY, 0);
4832 switch (BDO_MODE_MASK(port->bist_request)) {
4833 case BDO_MODE_CARRIER2:
4834 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
4835 tcpm_set_state(port, unattached_state(port),
4836 PD_T_BIST_CONT_MODE);
4838 case BDO_MODE_TESTDATA:
4839 if (port->tcpc->set_bist_data) {
4840 tcpm_log(port, "Enable BIST MODE TESTDATA");
4841 port->tcpc->set_bist_data(port->tcpc, true);
4848 case GET_STATUS_SEND:
4849 tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
4850 tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
4851 PD_T_SENDER_RESPONSE);
4853 case GET_STATUS_SEND_TIMEOUT:
4854 tcpm_set_state(port, ready_state(port), 0);
4856 case GET_PPS_STATUS_SEND:
4857 tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
4858 tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
4859 PD_T_SENDER_RESPONSE);
4861 case GET_PPS_STATUS_SEND_TIMEOUT:
4862 tcpm_set_state(port, ready_state(port), 0);
4865 tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
4866 tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
4868 case GET_SINK_CAP_TIMEOUT:
4869 port->sink_cap_done = true;
4870 tcpm_set_state(port, ready_state(port), 0);
4872 case ERROR_RECOVERY:
4873 tcpm_swap_complete(port, -EPROTO);
4874 tcpm_pps_complete(port, -EPROTO);
4875 tcpm_set_state(port, PORT_RESET, 0);
4878 tcpm_reset_port(port);
4879 tcpm_set_cc(port, TYPEC_CC_OPEN);
4880 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
4881 PD_T_ERROR_RECOVERY);
4883 case PORT_RESET_WAIT_OFF:
4884 tcpm_set_state(port,
4885 tcpm_default_state(port),
4886 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
4889 /* AMS intermediate state */
4891 if (port->upcoming_state == INVALID_STATE) {
4892 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
4893 SRC_READY : SNK_READY, 0);
4897 upcoming_state = port->upcoming_state;
4898 port->upcoming_state = INVALID_STATE;
4899 tcpm_set_state(port, upcoming_state, 0);
4903 case CHUNK_NOT_SUPP:
4904 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
4905 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
4908 WARN(1, "Unexpected port state %d\n", port->state);
4913 static void tcpm_state_machine_work(struct kthread_work *work)
4915 struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
4916 enum tcpm_state prev_state;
4918 mutex_lock(&port->lock);
4919 port->state_machine_running = true;
4921 if (port->queued_message && tcpm_send_queued_message(port))
4924 /* If we were queued due to a delayed state change, update it now */
4925 if (port->delayed_state) {
4926 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
4927 tcpm_states[port->state],
4928 tcpm_states[port->delayed_state], port->delay_ms);
4929 port->prev_state = port->state;
4930 port->state = port->delayed_state;
4931 port->delayed_state = INVALID_STATE;
4935 * Continue running as long as we have (non-delayed) state changes
4939 prev_state = port->state;
4940 run_state_machine(port);
4941 if (port->queued_message)
4942 tcpm_send_queued_message(port);
4943 } while (port->state != prev_state && !port->delayed_state);
4946 port->state_machine_running = false;
4947 mutex_unlock(&port->lock);
4950 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
4951 enum typec_cc_status cc2)
4953 enum typec_cc_status old_cc1, old_cc2;
4954 enum tcpm_state new_state;
4956 old_cc1 = port->cc1;
4957 old_cc2 = port->cc2;
4961 tcpm_log_force(port,
4962 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
4963 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
4965 tcpm_port_is_disconnected(port) ? "disconnected"
4968 switch (port->state) {
4970 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4971 tcpm_port_is_source(port))
4972 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4973 else if (tcpm_port_is_sink(port))
4974 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
4976 case CHECK_CONTAMINANT:
4977 /* Wait for Toggling to be resumed */
4979 case SRC_UNATTACHED:
4980 case ACC_UNATTACHED:
4981 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4982 tcpm_port_is_source(port))
4983 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4985 case SRC_ATTACH_WAIT:
4986 if (tcpm_port_is_disconnected(port) ||
4987 tcpm_port_is_audio_detached(port))
4988 tcpm_set_state(port, SRC_UNATTACHED, 0);
4989 else if (cc1 != old_cc1 || cc2 != old_cc2)
4990 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4994 case SRC_SEND_CAPABILITIES:
4996 if (tcpm_port_is_disconnected(port) ||
4997 !tcpm_port_is_source(port)) {
4998 if (port->port_type == TYPEC_PORT_SRC)
4999 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5001 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5004 case SNK_UNATTACHED:
5005 if (tcpm_port_is_sink(port))
5006 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5008 case SNK_ATTACH_WAIT:
5009 if ((port->cc1 == TYPEC_CC_OPEN &&
5010 port->cc2 != TYPEC_CC_OPEN) ||
5011 (port->cc1 != TYPEC_CC_OPEN &&
5012 port->cc2 == TYPEC_CC_OPEN))
5013 new_state = SNK_DEBOUNCED;
5014 else if (tcpm_port_is_disconnected(port))
5015 new_state = SNK_UNATTACHED;
5018 if (new_state != port->delayed_state)
5019 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5022 if (tcpm_port_is_disconnected(port))
5023 new_state = SNK_UNATTACHED;
5024 else if (port->vbus_present)
5025 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5027 new_state = SNK_UNATTACHED;
5028 if (new_state != port->delayed_state)
5029 tcpm_set_state(port, SNK_DEBOUNCED, 0);
5033 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5034 * "A port that has entered into USB PD communications with the Source and
5035 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5036 * cable disconnect in addition to monitoring VBUS.
5038 * A port that is monitoring the CC voltage for disconnect (but is not in
5039 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5040 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5041 * vRd-USB for tPDDebounce."
5043 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5044 * away before vbus decays to disconnect threshold. Allow
5045 * disconnect to be driven by vbus disconnect when auto vbus
5046 * discharge is enabled.
5048 if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5049 tcpm_set_state(port, unattached_state(port), 0);
5050 else if (!port->pd_capable &&
5051 (cc1 != old_cc1 || cc2 != old_cc2))
5052 tcpm_set_current_limit(port,
5053 tcpm_get_current_limit(port),
5057 case AUDIO_ACC_ATTACHED:
5058 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5059 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
5061 case AUDIO_ACC_DEBOUNCE:
5062 if (tcpm_port_is_audio(port))
5063 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
5066 case DEBUG_ACC_ATTACHED:
5067 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5068 tcpm_set_state(port, ACC_UNATTACHED, 0);
5072 /* Do nothing, waiting for timeout */
5076 /* CC line is unstable, wait for debounce */
5077 if (tcpm_port_is_disconnected(port))
5078 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
5080 case SNK_DISCOVERY_DEBOUNCE:
5084 /* Hand over to state machine if needed */
5085 if (!port->vbus_present && tcpm_port_is_source(port))
5086 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5088 case SRC_TRYWAIT_DEBOUNCE:
5089 if (port->vbus_present || !tcpm_port_is_source(port))
5090 tcpm_set_state(port, SRC_TRYWAIT, 0);
5092 case SNK_TRY_WAIT_DEBOUNCE:
5093 if (!tcpm_port_is_sink(port)) {
5095 tcpm_set_state(port, SRC_TRYWAIT, 0);
5099 if (tcpm_port_is_source(port))
5100 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
5102 case SRC_TRY_DEBOUNCE:
5103 tcpm_set_state(port, SRC_TRY_WAIT, 0);
5105 case SNK_TRYWAIT_DEBOUNCE:
5106 if (tcpm_port_is_sink(port))
5107 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
5109 case SNK_TRYWAIT_VBUS:
5110 if (!tcpm_port_is_sink(port))
5111 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5113 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5114 if (!tcpm_port_is_sink(port))
5115 tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
5117 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
5120 /* Do nothing, waiting for tCCDebounce */
5122 case PR_SWAP_SNK_SRC_SINK_OFF:
5123 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5124 case PR_SWAP_SRC_SNK_SOURCE_OFF:
5125 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5126 case PR_SWAP_SNK_SRC_SOURCE_ON:
5128 * CC state change is expected in PR_SWAP
5133 case FR_SWAP_SEND_TIMEOUT:
5134 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5135 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5136 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5137 /* Do nothing, CC change expected */
5141 case PORT_RESET_WAIT_OFF:
5143 * State set back to default mode once the timer completes.
5144 * Ignore CC changes here.
5149 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
5150 * to be driven by vbus disconnect.
5152 if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
5153 port->auto_vbus_discharge_enabled))
5154 tcpm_set_state(port, unattached_state(port), 0);
5159 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
5161 tcpm_log_force(port, "VBUS on");
5162 port->vbus_present = true;
5164 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
5165 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
5167 port->vbus_vsafe0v = false;
5169 switch (port->state) {
5170 case SNK_TRANSITION_SINK_VBUS:
5171 port->explicit_contract = true;
5172 tcpm_set_state(port, SNK_READY, 0);
5175 tcpm_set_state(port, SNK_DISCOVERY, 0);
5179 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
5183 case SNK_HARD_RESET_WAIT_VBUS:
5184 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
5187 tcpm_set_state(port, SRC_STARTUP, 0);
5189 case SRC_HARD_RESET_VBUS_ON:
5190 tcpm_set_state(port, SRC_STARTUP, 0);
5194 /* Do nothing, waiting for timeout */
5197 /* Do nothing, Waiting for Rd to be detected */
5199 case SRC_TRYWAIT_DEBOUNCE:
5200 tcpm_set_state(port, SRC_TRYWAIT, 0);
5202 case SNK_TRY_WAIT_DEBOUNCE:
5203 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5206 /* Do nothing, waiting for tCCDebounce */
5208 case SNK_TRYWAIT_VBUS:
5209 if (tcpm_port_is_sink(port))
5210 tcpm_set_state(port, SNK_ATTACHED, 0);
5212 case SNK_TRYWAIT_DEBOUNCE:
5213 /* Do nothing, waiting for Rp */
5215 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5216 if (port->vbus_present && tcpm_port_is_sink(port))
5217 tcpm_set_state(port, SNK_ATTACHED, 0);
5220 case SRC_TRY_DEBOUNCE:
5221 /* Do nothing, waiting for sink detection */
5224 case FR_SWAP_SEND_TIMEOUT:
5225 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5226 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5227 if (port->tcpc->frs_sourcing_vbus)
5228 port->tcpc->frs_sourcing_vbus(port->tcpc);
5230 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5231 if (port->tcpc->frs_sourcing_vbus)
5232 port->tcpc->frs_sourcing_vbus(port->tcpc);
5233 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5237 case PORT_RESET_WAIT_OFF:
5239 * State set back to default mode once the timer completes.
5240 * Ignore vbus changes here.
5249 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
5251 tcpm_log_force(port, "VBUS off");
5252 port->vbus_present = false;
5253 port->vbus_never_low = false;
5254 switch (port->state) {
5255 case SNK_HARD_RESET_SINK_OFF:
5256 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
5258 case HARD_RESET_SEND:
5261 /* Do nothing, waiting for timeout */
5264 /* Hand over to state machine if needed */
5265 if (tcpm_port_is_source(port))
5266 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5268 case SNK_TRY_WAIT_DEBOUNCE:
5269 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5272 case SNK_TRYWAIT_VBUS:
5273 case SNK_TRYWAIT_DEBOUNCE:
5275 case SNK_ATTACH_WAIT:
5277 /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
5280 case SNK_NEGOTIATE_CAPABILITIES:
5283 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5284 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
5287 case PR_SWAP_SNK_SRC_SINK_OFF:
5288 /* Do nothing, expected */
5291 case PR_SWAP_SNK_SRC_SOURCE_ON:
5293 * Do nothing when vbus off notification is received.
5294 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
5295 * for the vbus source to ramp up.
5299 case PORT_RESET_WAIT_OFF:
5300 tcpm_set_state(port, tcpm_default_state(port), 0);
5304 case SRC_TRY_DEBOUNCE:
5305 /* Do nothing, waiting for sink detection */
5309 case SRC_SEND_CAPABILITIES:
5310 case SRC_SEND_CAPABILITIES_TIMEOUT:
5311 case SRC_NEGOTIATE_CAPABILITIES:
5312 case SRC_TRANSITION_SUPPLY:
5314 case SRC_WAIT_NEW_CAPABILITIES:
5316 * Force to unattached state to re-initiate connection.
5317 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
5318 * sink removed. Although sink removal here is due to source's vbus collapse,
5319 * treat it the same way for consistency.
5321 if (port->port_type == TYPEC_PORT_SRC)
5322 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5324 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5329 * State set back to default mode once the timer completes.
5330 * Ignore vbus changes here.
5335 case FR_SWAP_SEND_TIMEOUT:
5336 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5337 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5338 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5339 /* Do nothing, vbus drop expected */
5342 case SNK_HARD_RESET_WAIT_VBUS:
5343 /* Do nothing, its OK to receive vbus off events */
5347 if (port->pwr_role == TYPEC_SINK && port->attached)
5348 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5353 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
5355 tcpm_log_force(port, "VBUS VSAFE0V");
5356 port->vbus_vsafe0v = true;
5357 switch (port->state) {
5358 case SRC_HARD_RESET_VBUS_OFF:
5360 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
5361 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
5363 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
5365 case SRC_ATTACH_WAIT:
5366 if (tcpm_port_is_source(port))
5367 tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
5371 case SRC_SEND_CAPABILITIES:
5372 case SRC_SEND_CAPABILITIES_TIMEOUT:
5373 case SRC_NEGOTIATE_CAPABILITIES:
5374 case SRC_TRANSITION_SUPPLY:
5376 case SRC_WAIT_NEW_CAPABILITIES:
5377 if (port->auto_vbus_discharge_enabled) {
5378 if (port->port_type == TYPEC_PORT_SRC)
5379 tcpm_set_state(port, SRC_UNATTACHED, 0);
5381 tcpm_set_state(port, SNK_UNATTACHED, 0);
5384 case PR_SWAP_SNK_SRC_SINK_OFF:
5385 case PR_SWAP_SNK_SRC_SOURCE_ON:
5386 /* Do nothing, vsafe0v is expected during transition */
5388 case SNK_ATTACH_WAIT:
5390 /*Do nothing, still waiting for VSAFE5V for connect */
5392 case SNK_HARD_RESET_WAIT_VBUS:
5393 /* Do nothing, its OK to receive vbus off events */
5396 if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
5397 tcpm_set_state(port, SNK_UNATTACHED, 0);
5402 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
5404 tcpm_log_force(port, "Received hard reset");
5405 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
5406 port->tcpc->set_bist_data(port->tcpc, false);
5408 switch (port->state) {
5409 case ERROR_RECOVERY:
5411 case PORT_RESET_WAIT_OFF:
5417 if (port->ams != NONE_AMS)
5418 port->ams = NONE_AMS;
5419 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
5420 port->ams = HARD_RESET;
5422 * If we keep receiving hard reset requests, executing the hard reset
5423 * must have failed. Revert to error recovery if that happens.
5425 tcpm_set_state(port,
5426 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
5427 HARD_RESET_START : ERROR_RECOVERY,
5431 static void tcpm_pd_event_handler(struct kthread_work *work)
5433 struct tcpm_port *port = container_of(work, struct tcpm_port,
5437 mutex_lock(&port->lock);
5439 spin_lock(&port->pd_event_lock);
5440 while (port->pd_events) {
5441 events = port->pd_events;
5442 port->pd_events = 0;
5443 spin_unlock(&port->pd_event_lock);
5444 if (events & TCPM_RESET_EVENT)
5445 _tcpm_pd_hard_reset(port);
5446 if (events & TCPM_VBUS_EVENT) {
5449 vbus = port->tcpc->get_vbus(port->tcpc);
5451 _tcpm_pd_vbus_on(port);
5453 _tcpm_pd_vbus_off(port);
5455 * When TCPC does not support detecting vsafe0v voltage level,
5456 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
5457 * to see if vbus has discharge to VSAFE0V.
5459 if (!port->tcpc->is_vbus_vsafe0v ||
5460 port->tcpc->is_vbus_vsafe0v(port->tcpc))
5461 _tcpm_pd_vbus_vsafe0v(port);
5464 if (events & TCPM_CC_EVENT) {
5465 enum typec_cc_status cc1, cc2;
5467 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
5468 _tcpm_cc_change(port, cc1, cc2);
5470 if (events & TCPM_FRS_EVENT) {
5471 if (port->state == SNK_READY) {
5474 port->upcoming_state = FR_SWAP_SEND;
5475 ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
5477 port->upcoming_state = INVALID_STATE;
5479 tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
5482 if (events & TCPM_SOURCING_VBUS) {
5483 tcpm_log(port, "sourcing vbus");
5485 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
5486 * true as TCPM wouldn't have called tcpm_set_vbus.
5488 * When vbus is sourced on the command on TCPM i.e. TCPM called
5489 * tcpm_set_vbus to source vbus, vbus_source would already be true.
5491 port->vbus_source = true;
5492 _tcpm_pd_vbus_on(port);
5494 if (events & TCPM_PORT_CLEAN) {
5495 tcpm_log(port, "port clean");
5496 if (port->state == CHECK_CONTAMINANT) {
5497 if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
5498 tcpm_set_state(port, TOGGLING, 0);
5500 tcpm_set_state(port, tcpm_default_state(port), 0);
5503 if (events & TCPM_PORT_ERROR) {
5504 tcpm_log(port, "port triggering error recovery");
5505 tcpm_set_state(port, ERROR_RECOVERY, 0);
5508 spin_lock(&port->pd_event_lock);
5510 spin_unlock(&port->pd_event_lock);
5511 mutex_unlock(&port->lock);
5514 void tcpm_cc_change(struct tcpm_port *port)
5516 spin_lock(&port->pd_event_lock);
5517 port->pd_events |= TCPM_CC_EVENT;
5518 spin_unlock(&port->pd_event_lock);
5519 kthread_queue_work(port->wq, &port->event_work);
5521 EXPORT_SYMBOL_GPL(tcpm_cc_change);
5523 void tcpm_vbus_change(struct tcpm_port *port)
5525 spin_lock(&port->pd_event_lock);
5526 port->pd_events |= TCPM_VBUS_EVENT;
5527 spin_unlock(&port->pd_event_lock);
5528 kthread_queue_work(port->wq, &port->event_work);
5530 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
5532 void tcpm_pd_hard_reset(struct tcpm_port *port)
5534 spin_lock(&port->pd_event_lock);
5535 port->pd_events = TCPM_RESET_EVENT;
5536 spin_unlock(&port->pd_event_lock);
5537 kthread_queue_work(port->wq, &port->event_work);
5539 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
5541 void tcpm_sink_frs(struct tcpm_port *port)
5543 spin_lock(&port->pd_event_lock);
5544 port->pd_events |= TCPM_FRS_EVENT;
5545 spin_unlock(&port->pd_event_lock);
5546 kthread_queue_work(port->wq, &port->event_work);
5548 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
5550 void tcpm_sourcing_vbus(struct tcpm_port *port)
5552 spin_lock(&port->pd_event_lock);
5553 port->pd_events |= TCPM_SOURCING_VBUS;
5554 spin_unlock(&port->pd_event_lock);
5555 kthread_queue_work(port->wq, &port->event_work);
5557 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
5559 void tcpm_port_clean(struct tcpm_port *port)
5561 spin_lock(&port->pd_event_lock);
5562 port->pd_events |= TCPM_PORT_CLEAN;
5563 spin_unlock(&port->pd_event_lock);
5564 kthread_queue_work(port->wq, &port->event_work);
5566 EXPORT_SYMBOL_GPL(tcpm_port_clean);
5568 bool tcpm_port_is_toggling(struct tcpm_port *port)
5570 return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
5572 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
5574 void tcpm_port_error_recovery(struct tcpm_port *port)
5576 spin_lock(&port->pd_event_lock);
5577 port->pd_events |= TCPM_PORT_ERROR;
5578 spin_unlock(&port->pd_event_lock);
5579 kthread_queue_work(port->wq, &port->event_work);
5581 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
5583 static void tcpm_enable_frs_work(struct kthread_work *work)
5585 struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
5588 mutex_lock(&port->lock);
5589 /* Not FRS capable */
5590 if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
5591 port->pwr_opmode != TYPEC_PWR_MODE_PD ||
5592 !port->tcpc->enable_frs ||
5593 /* Sink caps queried */
5594 port->sink_cap_done || port->negotiated_rev < PD_REV30)
5597 /* Send when the state machine is idle */
5598 if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
5601 port->upcoming_state = GET_SINK_CAP;
5602 ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
5603 if (ret == -EAGAIN) {
5604 port->upcoming_state = INVALID_STATE;
5606 port->sink_cap_done = true;
5610 mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
5612 mutex_unlock(&port->lock);
5615 static void tcpm_send_discover_work(struct kthread_work *work)
5617 struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
5619 mutex_lock(&port->lock);
5620 /* No need to send DISCOVER_IDENTITY anymore */
5621 if (!port->send_discover)
5624 if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
5625 port->send_discover = false;
5629 /* Retry if the port is not idle */
5630 if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
5631 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
5635 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
5638 mutex_unlock(&port->lock);
5641 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
5643 struct tcpm_port *port = typec_get_drvdata(p);
5646 mutex_lock(&port->swap_lock);
5647 mutex_lock(&port->lock);
5649 if (port->typec_caps.data != TYPEC_PORT_DRD) {
5653 if (port->state != SRC_READY && port->state != SNK_READY) {
5658 if (port->data_role == data) {
5665 * 6.3.9: If an alternate mode is active, a request to swap
5666 * alternate modes shall trigger a port reset.
5667 * Reject data role swap request in this case.
5670 if (!port->pd_capable) {
5672 * If the partner is not PD capable, reset the port to
5673 * trigger a role change. This can only work if a preferred
5674 * role is configured, and if it matches the requested role.
5676 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
5677 port->try_role == port->pwr_role) {
5681 port->non_pd_role_swap = true;
5682 tcpm_set_state(port, PORT_RESET, 0);
5684 port->upcoming_state = DR_SWAP_SEND;
5685 ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
5686 if (ret == -EAGAIN) {
5687 port->upcoming_state = INVALID_STATE;
5692 port->swap_status = 0;
5693 port->swap_pending = true;
5694 reinit_completion(&port->swap_complete);
5695 mutex_unlock(&port->lock);
5697 if (!wait_for_completion_timeout(&port->swap_complete,
5698 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5701 ret = port->swap_status;
5703 port->non_pd_role_swap = false;
5707 mutex_unlock(&port->lock);
5709 mutex_unlock(&port->swap_lock);
5713 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
5715 struct tcpm_port *port = typec_get_drvdata(p);
5718 mutex_lock(&port->swap_lock);
5719 mutex_lock(&port->lock);
5721 if (port->port_type != TYPEC_PORT_DRP) {
5725 if (port->state != SRC_READY && port->state != SNK_READY) {
5730 if (role == port->pwr_role) {
5735 port->upcoming_state = PR_SWAP_SEND;
5736 ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
5737 if (ret == -EAGAIN) {
5738 port->upcoming_state = INVALID_STATE;
5742 port->swap_status = 0;
5743 port->swap_pending = true;
5744 reinit_completion(&port->swap_complete);
5745 mutex_unlock(&port->lock);
5747 if (!wait_for_completion_timeout(&port->swap_complete,
5748 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5751 ret = port->swap_status;
5756 mutex_unlock(&port->lock);
5758 mutex_unlock(&port->swap_lock);
5762 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
5764 struct tcpm_port *port = typec_get_drvdata(p);
5767 mutex_lock(&port->swap_lock);
5768 mutex_lock(&port->lock);
5770 if (port->state != SRC_READY && port->state != SNK_READY) {
5775 if (role == port->vconn_role) {
5780 port->upcoming_state = VCONN_SWAP_SEND;
5781 ret = tcpm_ams_start(port, VCONN_SWAP);
5782 if (ret == -EAGAIN) {
5783 port->upcoming_state = INVALID_STATE;
5787 port->swap_status = 0;
5788 port->swap_pending = true;
5789 reinit_completion(&port->swap_complete);
5790 mutex_unlock(&port->lock);
5792 if (!wait_for_completion_timeout(&port->swap_complete,
5793 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5796 ret = port->swap_status;
5801 mutex_unlock(&port->lock);
5803 mutex_unlock(&port->swap_lock);
5807 static int tcpm_try_role(struct typec_port *p, int role)
5809 struct tcpm_port *port = typec_get_drvdata(p);
5810 struct tcpc_dev *tcpc = port->tcpc;
5813 mutex_lock(&port->lock);
5815 ret = tcpc->try_role(tcpc, role);
5817 port->try_role = role;
5818 port->try_src_count = 0;
5819 port->try_snk_count = 0;
5820 mutex_unlock(&port->lock);
5825 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
5827 unsigned int target_mw;
5830 mutex_lock(&port->swap_lock);
5831 mutex_lock(&port->lock);
5833 if (!port->pps_data.active) {
5838 if (port->state != SNK_READY) {
5843 if (req_op_curr > port->pps_data.max_curr) {
5848 target_mw = (req_op_curr * port->supply_voltage) / 1000;
5849 if (target_mw < port->operating_snk_mw) {
5854 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5855 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5856 if (ret == -EAGAIN) {
5857 port->upcoming_state = INVALID_STATE;
5861 /* Round down operating current to align with PPS valid steps */
5862 req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
5864 reinit_completion(&port->pps_complete);
5865 port->pps_data.req_op_curr = req_op_curr;
5866 port->pps_status = 0;
5867 port->pps_pending = true;
5868 mutex_unlock(&port->lock);
5870 if (!wait_for_completion_timeout(&port->pps_complete,
5871 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5874 ret = port->pps_status;
5879 mutex_unlock(&port->lock);
5881 mutex_unlock(&port->swap_lock);
5886 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
5888 unsigned int target_mw;
5891 mutex_lock(&port->swap_lock);
5892 mutex_lock(&port->lock);
5894 if (!port->pps_data.active) {
5899 if (port->state != SNK_READY) {
5904 target_mw = (port->current_limit * req_out_volt) / 1000;
5905 if (target_mw < port->operating_snk_mw) {
5910 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5911 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5912 if (ret == -EAGAIN) {
5913 port->upcoming_state = INVALID_STATE;
5917 /* Round down output voltage to align with PPS valid steps */
5918 req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
5920 reinit_completion(&port->pps_complete);
5921 port->pps_data.req_out_volt = req_out_volt;
5922 port->pps_status = 0;
5923 port->pps_pending = true;
5924 mutex_unlock(&port->lock);
5926 if (!wait_for_completion_timeout(&port->pps_complete,
5927 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5930 ret = port->pps_status;
5935 mutex_unlock(&port->lock);
5937 mutex_unlock(&port->swap_lock);
5942 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
5946 mutex_lock(&port->swap_lock);
5947 mutex_lock(&port->lock);
5949 if (!port->pps_data.supported) {
5954 /* Trying to deactivate PPS when already deactivated so just bail */
5955 if (!port->pps_data.active && !activate)
5958 if (port->state != SNK_READY) {
5964 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5966 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
5967 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5968 if (ret == -EAGAIN) {
5969 port->upcoming_state = INVALID_STATE;
5973 reinit_completion(&port->pps_complete);
5974 port->pps_status = 0;
5975 port->pps_pending = true;
5977 /* Trigger PPS request or move back to standard PDO contract */
5979 port->pps_data.req_out_volt = port->supply_voltage;
5980 port->pps_data.req_op_curr = port->current_limit;
5982 mutex_unlock(&port->lock);
5984 if (!wait_for_completion_timeout(&port->pps_complete,
5985 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5988 ret = port->pps_status;
5993 mutex_unlock(&port->lock);
5995 mutex_unlock(&port->swap_lock);
6000 static void tcpm_init(struct tcpm_port *port)
6002 enum typec_cc_status cc1, cc2;
6004 port->tcpc->init(port->tcpc);
6006 tcpm_reset_port(port);
6010 * Should possibly wait for VBUS to settle if it was enabled locally
6011 * since tcpm_reset_port() will disable VBUS.
6013 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
6014 if (port->vbus_present)
6015 port->vbus_never_low = true;
6018 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
6019 * So implicitly vbus_vsafe0v = false.
6021 * 2. When vbus_present is false and TCPC does NOT support querying
6022 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6023 * vbus_vsafe0v is true.
6025 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6026 * then, query tcpc for vsafe0v status.
6028 if (port->vbus_present)
6029 port->vbus_vsafe0v = false;
6030 else if (!port->tcpc->is_vbus_vsafe0v)
6031 port->vbus_vsafe0v = true;
6033 port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6035 tcpm_set_state(port, tcpm_default_state(port), 0);
6037 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6038 _tcpm_cc_change(port, cc1, cc2);
6041 * Some adapters need a clean slate at startup, and won't recover
6042 * otherwise. So do not try to be fancy and force a clean disconnect.
6044 tcpm_set_state(port, PORT_RESET, 0);
6047 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6049 struct tcpm_port *port = typec_get_drvdata(p);
6051 mutex_lock(&port->lock);
6052 if (type == port->port_type)
6055 port->port_type = type;
6057 if (!port->connected) {
6058 tcpm_set_state(port, PORT_RESET, 0);
6059 } else if (type == TYPEC_PORT_SNK) {
6060 if (!(port->pwr_role == TYPEC_SINK &&
6061 port->data_role == TYPEC_DEVICE))
6062 tcpm_set_state(port, PORT_RESET, 0);
6063 } else if (type == TYPEC_PORT_SRC) {
6064 if (!(port->pwr_role == TYPEC_SOURCE &&
6065 port->data_role == TYPEC_HOST))
6066 tcpm_set_state(port, PORT_RESET, 0);
6070 mutex_unlock(&port->lock);
6074 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
6078 for (i = 0; port->pd_list[i]; i++) {
6079 if (port->pd_list[i]->pd == pd)
6080 return port->pd_list[i];
6083 return ERR_PTR(-ENODATA);
6086 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
6088 struct tcpm_port *port = typec_get_drvdata(p);
6093 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
6095 struct tcpm_port *port = typec_get_drvdata(p);
6096 struct pd_data *data;
6099 mutex_lock(&port->lock);
6101 if (port->selected_pd == pd)
6104 data = tcpm_find_pd_data(port, pd);
6106 ret = PTR_ERR(data);
6110 if (data->sink_desc.pdo[0]) {
6111 for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
6112 port->snk_pdo[i] = data->sink_desc.pdo[i];
6113 port->nr_snk_pdo = i + 1;
6114 port->operating_snk_mw = data->operating_snk_mw;
6117 if (data->source_desc.pdo[0]) {
6118 for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
6119 port->snk_pdo[i] = data->source_desc.pdo[i];
6120 port->nr_src_pdo = i + 1;
6123 switch (port->state) {
6124 case SRC_UNATTACHED:
6125 case SRC_ATTACH_WAIT:
6127 tcpm_set_cc(port, tcpm_rp_cc(port));
6129 case SRC_SEND_CAPABILITIES:
6130 case SRC_SEND_CAPABILITIES_TIMEOUT:
6131 case SRC_NEGOTIATE_CAPABILITIES:
6133 case SRC_WAIT_NEW_CAPABILITIES:
6134 port->caps_count = 0;
6135 port->upcoming_state = SRC_SEND_CAPABILITIES;
6136 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6137 if (ret == -EAGAIN) {
6138 port->upcoming_state = INVALID_STATE;
6142 case SNK_NEGOTIATE_CAPABILITIES:
6143 case SNK_NEGOTIATE_PPS_CAPABILITIES:
6145 case SNK_TRANSITION_SINK:
6146 case SNK_TRANSITION_SINK_VBUS:
6147 if (port->pps_data.active)
6148 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6149 else if (port->pd_capable)
6150 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6154 port->update_sink_caps = true;
6156 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6157 if (ret == -EAGAIN) {
6158 port->upcoming_state = INVALID_STATE;
6166 port->port_source_caps = data->source_cap;
6167 port->port_sink_caps = data->sink_cap;
6168 port->selected_pd = pd;
6170 mutex_unlock(&port->lock);
6174 static const struct typec_operations tcpm_ops = {
6175 .try_role = tcpm_try_role,
6176 .dr_set = tcpm_dr_set,
6177 .pr_set = tcpm_pr_set,
6178 .vconn_set = tcpm_vconn_set,
6179 .port_type_set = tcpm_port_type_set,
6180 .pd_get = tcpm_pd_get,
6181 .pd_set = tcpm_pd_set
6184 void tcpm_tcpc_reset(struct tcpm_port *port)
6186 mutex_lock(&port->lock);
6187 /* XXX: Maintain PD connection if possible? */
6189 mutex_unlock(&port->lock);
6191 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
6193 static void tcpm_port_unregister_pd(struct tcpm_port *port)
6197 port->port_sink_caps = NULL;
6198 port->port_source_caps = NULL;
6199 for (i = 0; i < port->pd_count; i++) {
6200 usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
6201 kfree(port->pd_list[i]->sink_cap);
6202 usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
6203 kfree(port->pd_list[i]->source_cap);
6204 devm_kfree(port->dev, port->pd_list[i]);
6205 port->pd_list[i] = NULL;
6206 usb_power_delivery_unregister(port->pds[i]);
6207 port->pds[i] = NULL;
6211 static int tcpm_port_register_pd(struct tcpm_port *port)
6213 struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
6214 struct usb_power_delivery_capabilities *cap;
6217 if (!port->nr_src_pdo && !port->nr_snk_pdo)
6220 for (i = 0; i < port->pd_count; i++) {
6221 port->pds[i] = usb_power_delivery_register(port->dev, &desc);
6222 if (IS_ERR(port->pds[i])) {
6223 ret = PTR_ERR(port->pds[i]);
6224 goto err_unregister;
6226 port->pd_list[i]->pd = port->pds[i];
6228 if (port->pd_list[i]->source_desc.pdo[0]) {
6229 cap = usb_power_delivery_register_capabilities(port->pds[i],
6230 &port->pd_list[i]->source_desc);
6233 goto err_unregister;
6235 port->pd_list[i]->source_cap = cap;
6238 if (port->pd_list[i]->sink_desc.pdo[0]) {
6239 cap = usb_power_delivery_register_capabilities(port->pds[i],
6240 &port->pd_list[i]->sink_desc);
6243 goto err_unregister;
6245 port->pd_list[i]->sink_cap = cap;
6249 port->port_source_caps = port->pd_list[0]->source_cap;
6250 port->port_sink_caps = port->pd_list[0]->sink_cap;
6251 port->selected_pd = port->pds[0];
6255 tcpm_port_unregister_pd(port);
6260 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
6262 struct fwnode_handle *capabilities, *child, *caps = NULL;
6263 unsigned int nr_src_pdo, nr_snk_pdo;
6264 const char *opmode_str;
6265 u32 *src_pdo, *snk_pdo;
6266 u32 uw, frs_current;
6274 * This fwnode has a "compatible" property, but is never populated as a
6275 * struct device. Instead we simply parse it to read the properties.
6276 * This it breaks fw_devlink=on. To maintain backward compatibility
6277 * with existing DT files, we work around this by deleting any
6278 * fwnode_links to/from this fwnode.
6280 fw_devlink_purge_absent_suppliers(fwnode);
6282 ret = typec_get_fw_cap(&port->typec_caps, fwnode);
6288 if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
6289 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
6291 if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
6292 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
6294 port->port_type = port->typec_caps.type;
6295 port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
6296 port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
6297 port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
6299 if (!port->pd_supported) {
6300 ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
6303 ret = typec_find_pwr_opmode(opmode_str);
6306 port->src_rp = tcpm_pwr_opmode_to_rp(ret);
6310 /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
6312 /* FRS can only be supported by DRP ports */
6313 if (port->port_type == TYPEC_PORT_DRP) {
6314 ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
6316 if (!ret && frs_current <= FRS_5V_3A)
6317 port->new_source_frs_current = frs_current;
6323 /* For the backward compatibility, "capabilities" node is optional. */
6324 capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
6325 if (!capabilities) {
6328 fwnode_for_each_child_node(capabilities, child)
6331 if (!port->pd_count) {
6333 goto put_capabilities;
6337 port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
6341 goto put_capabilities;
6344 port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
6346 if (!port->pd_list) {
6348 goto put_capabilities;
6351 for (i = 0; i < port->pd_count; i++) {
6352 port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
6353 if (!port->pd_list[i]) {
6355 goto put_capabilities;
6358 src_pdo = port->pd_list[i]->source_desc.pdo;
6359 port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
6360 snk_pdo = port->pd_list[i]->sink_desc.pdo;
6361 port->pd_list[i]->sink_desc.role = TYPEC_SINK;
6363 /* If "capabilities" is NULL, fall back to single pd cap population. */
6367 caps = fwnode_get_next_child_node(capabilities, caps);
6369 if (port->port_type != TYPEC_PORT_SNK) {
6370 ret = fwnode_property_count_u32(caps, "source-pdos");
6378 nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
6379 ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
6384 ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
6389 port->nr_src_pdo = nr_src_pdo;
6390 memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
6391 port->pd_list[0]->source_desc.pdo,
6392 sizeof(u32) * nr_src_pdo,
6397 if (port->port_type != TYPEC_PORT_SRC) {
6398 ret = fwnode_property_count_u32(caps, "sink-pdos");
6407 nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
6408 ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
6413 ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
6417 if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
6422 port->pd_list[i]->operating_snk_mw = uw / 1000;
6425 port->nr_snk_pdo = nr_snk_pdo;
6426 memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
6427 port->pd_list[0]->sink_desc.pdo,
6428 sizeof(u32) * nr_snk_pdo,
6430 port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
6437 fwnode_handle_put(caps);
6439 fwnode_handle_put(capabilities);
6443 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
6447 /* sink-vdos is optional */
6448 ret = fwnode_property_count_u32(fwnode, "sink-vdos");
6452 port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
6453 if (port->nr_snk_vdo) {
6454 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
6461 /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
6462 if (port->nr_snk_vdo) {
6463 ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
6469 port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
6470 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
6472 port->nr_snk_vdo_v1);
6480 /* Power Supply access to expose source power information */
6481 enum tcpm_psy_online_states {
6482 TCPM_PSY_OFFLINE = 0,
6483 TCPM_PSY_FIXED_ONLINE,
6484 TCPM_PSY_PROG_ONLINE,
6487 static enum power_supply_property tcpm_psy_props[] = {
6488 POWER_SUPPLY_PROP_USB_TYPE,
6489 POWER_SUPPLY_PROP_ONLINE,
6490 POWER_SUPPLY_PROP_VOLTAGE_MIN,
6491 POWER_SUPPLY_PROP_VOLTAGE_MAX,
6492 POWER_SUPPLY_PROP_VOLTAGE_NOW,
6493 POWER_SUPPLY_PROP_CURRENT_MAX,
6494 POWER_SUPPLY_PROP_CURRENT_NOW,
6497 static int tcpm_psy_get_online(struct tcpm_port *port,
6498 union power_supply_propval *val)
6500 if (port->vbus_charge) {
6501 if (port->pps_data.active)
6502 val->intval = TCPM_PSY_PROG_ONLINE;
6504 val->intval = TCPM_PSY_FIXED_ONLINE;
6506 val->intval = TCPM_PSY_OFFLINE;
6512 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
6513 union power_supply_propval *val)
6515 if (port->pps_data.active)
6516 val->intval = port->pps_data.min_volt * 1000;
6518 val->intval = port->supply_voltage * 1000;
6523 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
6524 union power_supply_propval *val)
6526 if (port->pps_data.active)
6527 val->intval = port->pps_data.max_volt * 1000;
6529 val->intval = port->supply_voltage * 1000;
6534 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
6535 union power_supply_propval *val)
6537 val->intval = port->supply_voltage * 1000;
6542 static int tcpm_psy_get_current_max(struct tcpm_port *port,
6543 union power_supply_propval *val)
6545 if (port->pps_data.active)
6546 val->intval = port->pps_data.max_curr * 1000;
6548 val->intval = port->current_limit * 1000;
6553 static int tcpm_psy_get_current_now(struct tcpm_port *port,
6554 union power_supply_propval *val)
6556 val->intval = port->current_limit * 1000;
6561 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
6562 union power_supply_propval *val)
6564 unsigned int src_mv, src_ma, max_src_uw = 0;
6565 unsigned int i, tmp;
6567 for (i = 0; i < port->nr_source_caps; i++) {
6568 u32 pdo = port->source_caps[i];
6570 if (pdo_type(pdo) == PDO_TYPE_FIXED) {
6571 src_mv = pdo_fixed_voltage(pdo);
6572 src_ma = pdo_max_current(pdo);
6573 tmp = src_mv * src_ma;
6574 max_src_uw = tmp > max_src_uw ? tmp : max_src_uw;
6578 val->intval = max_src_uw;
6582 static int tcpm_psy_get_prop(struct power_supply *psy,
6583 enum power_supply_property psp,
6584 union power_supply_propval *val)
6586 struct tcpm_port *port = power_supply_get_drvdata(psy);
6590 case POWER_SUPPLY_PROP_USB_TYPE:
6591 val->intval = port->usb_type;
6593 case POWER_SUPPLY_PROP_ONLINE:
6594 ret = tcpm_psy_get_online(port, val);
6596 case POWER_SUPPLY_PROP_VOLTAGE_MIN:
6597 ret = tcpm_psy_get_voltage_min(port, val);
6599 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
6600 ret = tcpm_psy_get_voltage_max(port, val);
6602 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6603 ret = tcpm_psy_get_voltage_now(port, val);
6605 case POWER_SUPPLY_PROP_CURRENT_MAX:
6606 ret = tcpm_psy_get_current_max(port, val);
6608 case POWER_SUPPLY_PROP_CURRENT_NOW:
6609 ret = tcpm_psy_get_current_now(port, val);
6611 case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
6612 tcpm_psy_get_input_power_limit(port, val);
6622 static int tcpm_psy_set_online(struct tcpm_port *port,
6623 const union power_supply_propval *val)
6627 switch (val->intval) {
6628 case TCPM_PSY_FIXED_ONLINE:
6629 ret = tcpm_pps_activate(port, false);
6631 case TCPM_PSY_PROG_ONLINE:
6632 ret = tcpm_pps_activate(port, true);
6642 static int tcpm_psy_set_prop(struct power_supply *psy,
6643 enum power_supply_property psp,
6644 const union power_supply_propval *val)
6646 struct tcpm_port *port = power_supply_get_drvdata(psy);
6650 * All the properties below are related to USB PD. The check needs to be
6651 * property specific when a non-pd related property is added.
6653 if (!port->pd_supported)
6657 case POWER_SUPPLY_PROP_ONLINE:
6658 ret = tcpm_psy_set_online(port, val);
6660 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6661 ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
6663 case POWER_SUPPLY_PROP_CURRENT_NOW:
6664 if (val->intval > port->pps_data.max_curr * 1000)
6667 ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
6673 power_supply_changed(port->psy);
6677 static int tcpm_psy_prop_writeable(struct power_supply *psy,
6678 enum power_supply_property psp)
6681 case POWER_SUPPLY_PROP_ONLINE:
6682 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6683 case POWER_SUPPLY_PROP_CURRENT_NOW:
6690 static enum power_supply_usb_type tcpm_psy_usb_types[] = {
6691 POWER_SUPPLY_USB_TYPE_C,
6692 POWER_SUPPLY_USB_TYPE_PD,
6693 POWER_SUPPLY_USB_TYPE_PD_PPS,
6696 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
6698 static int devm_tcpm_psy_register(struct tcpm_port *port)
6700 struct power_supply_config psy_cfg = {};
6701 const char *port_dev_name = dev_name(port->dev);
6702 size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
6703 strlen(port_dev_name) + 1;
6706 psy_cfg.drv_data = port;
6707 psy_cfg.fwnode = dev_fwnode(port->dev);
6708 psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
6712 snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
6714 port->psy_desc.name = psy_name;
6715 port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
6716 port->psy_desc.usb_types = tcpm_psy_usb_types;
6717 port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
6718 port->psy_desc.properties = tcpm_psy_props;
6719 port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
6720 port->psy_desc.get_property = tcpm_psy_get_prop;
6721 port->psy_desc.set_property = tcpm_psy_set_prop;
6722 port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
6724 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
6726 port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
6729 return PTR_ERR_OR_ZERO(port->psy);
6732 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
6734 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
6736 if (port->registered)
6737 kthread_queue_work(port->wq, &port->state_machine);
6738 return HRTIMER_NORESTART;
6741 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
6743 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
6745 if (port->registered)
6746 kthread_queue_work(port->wq, &port->vdm_state_machine);
6747 return HRTIMER_NORESTART;
6750 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
6752 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
6754 if (port->registered)
6755 kthread_queue_work(port->wq, &port->enable_frs);
6756 return HRTIMER_NORESTART;
6759 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
6761 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
6763 if (port->registered)
6764 kthread_queue_work(port->wq, &port->send_discover_work);
6765 return HRTIMER_NORESTART;
6768 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
6770 struct tcpm_port *port;
6773 if (!dev || !tcpc ||
6774 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
6775 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
6776 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
6777 return ERR_PTR(-EINVAL);
6779 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
6781 return ERR_PTR(-ENOMEM);
6786 mutex_init(&port->lock);
6787 mutex_init(&port->swap_lock);
6789 port->wq = kthread_create_worker(0, dev_name(dev));
6790 if (IS_ERR(port->wq))
6791 return ERR_CAST(port->wq);
6792 sched_set_fifo(port->wq->task);
6794 kthread_init_work(&port->state_machine, tcpm_state_machine_work);
6795 kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
6796 kthread_init_work(&port->event_work, tcpm_pd_event_handler);
6797 kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
6798 kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
6799 hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6800 port->state_machine_timer.function = state_machine_timer_handler;
6801 hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6802 port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
6803 hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6804 port->enable_frs_timer.function = enable_frs_timer_handler;
6805 hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6806 port->send_discover_timer.function = send_discover_timer_handler;
6808 spin_lock_init(&port->pd_event_lock);
6810 init_completion(&port->tx_complete);
6811 init_completion(&port->swap_complete);
6812 init_completion(&port->pps_complete);
6813 tcpm_debugfs_init(port);
6815 err = tcpm_fw_get_caps(port, tcpc->fwnode);
6817 goto out_destroy_wq;
6818 err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
6820 goto out_destroy_wq;
6822 port->try_role = port->typec_caps.prefer_role;
6824 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
6825 port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
6826 port->typec_caps.svdm_version = SVDM_VER_2_0;
6827 port->typec_caps.driver_data = port;
6828 port->typec_caps.ops = &tcpm_ops;
6829 port->typec_caps.orientation_aware = 1;
6831 port->partner_desc.identity = &port->partner_ident;
6833 port->role_sw = usb_role_switch_get(port->dev);
6835 port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
6836 if (IS_ERR(port->role_sw)) {
6837 err = PTR_ERR(port->role_sw);
6838 goto out_destroy_wq;
6841 err = devm_tcpm_psy_register(port);
6843 goto out_role_sw_put;
6844 power_supply_changed(port->psy);
6846 err = tcpm_port_register_pd(port);
6848 goto out_role_sw_put;
6851 port->typec_caps.pd = port->pds[0];
6853 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
6854 if (IS_ERR(port->typec_port)) {
6855 err = PTR_ERR(port->typec_port);
6856 goto out_unregister_pd;
6859 typec_port_register_altmodes(port->typec_port,
6860 &tcpm_altmode_ops, port,
6861 port->port_altmode, ALTMODE_DISCOVERY_MAX);
6862 port->registered = true;
6864 mutex_lock(&port->lock);
6866 mutex_unlock(&port->lock);
6868 tcpm_log(port, "%s: registered", dev_name(dev));
6872 tcpm_port_unregister_pd(port);
6874 usb_role_switch_put(port->role_sw);
6876 tcpm_debugfs_exit(port);
6877 kthread_destroy_worker(port->wq);
6878 return ERR_PTR(err);
6880 EXPORT_SYMBOL_GPL(tcpm_register_port);
6882 void tcpm_unregister_port(struct tcpm_port *port)
6886 port->registered = false;
6887 kthread_destroy_worker(port->wq);
6889 hrtimer_cancel(&port->send_discover_timer);
6890 hrtimer_cancel(&port->enable_frs_timer);
6891 hrtimer_cancel(&port->vdm_state_machine_timer);
6892 hrtimer_cancel(&port->state_machine_timer);
6894 tcpm_reset_port(port);
6896 tcpm_port_unregister_pd(port);
6898 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
6899 typec_unregister_altmode(port->port_altmode[i]);
6900 typec_unregister_port(port->typec_port);
6901 usb_role_switch_put(port->role_sw);
6902 tcpm_debugfs_exit(port);
6904 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
6906 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
6907 MODULE_DESCRIPTION("USB Type-C Port Manager");
6908 MODULE_LICENSE("GPL");