From 81816f5048bac5f4b202ed1443e9788dfc31a18c Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 7 Oct 2020 17:08:47 +0300 Subject: [PATCH 01/22] thunderbolt: Do not clear USB4 router protocol adapter IFC and ISE bits These fields are marked as vendor defined in the USB4 spec and should not be modified by the software, so only clear them when we are dealing with pre-USB4 hardware. Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/thunderbolt/path.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 03e7b714deab..7c2c45d9ba4a 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -406,10 +406,17 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index, if (!hop.pending) { if (clear_fc) { - /* Clear flow control */ - hop.ingress_fc = 0; + /* + * Clear flow control. Protocol adapters + * IFC and ISE bits are vendor defined + * in the USB4 spec so we clear them + * only for pre-USB4 adapters. + */ + if (!tb_switch_is_usb4(port->sw)) { + hop.ingress_fc = 0; + hop.ingress_shared_buffer = 0; + } hop.egress_fc = 0; - hop.ingress_shared_buffer = 0; hop.egress_shared_buffer = 0; return tb_port_write(port, &hop, TB_CFG_HOPS, From d67274bacb8a2bc2a6cfd2a0cef4963379e3eb26 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 7 Oct 2020 17:01:43 +0300 Subject: [PATCH 02/22] thunderbolt: Find XDomain by route instead of UUID We are going to represent loops back to the host also as XDomains and they all have the same (host) UUID, so finding them needs to use route string instead. This also requires that we check if the XDomain device is added to the bus before its properties can be updated. Otherwise the remote UUID might not be populated yet. Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/thunderbolt/xdomain.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 48907853732a..e436e9efa7e7 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -587,8 +587,6 @@ static void tb_xdp_handle_request(struct work_struct *work) break; case PROPERTIES_CHANGED_REQUEST: { - const struct tb_xdp_properties_changed *xchg = - (const struct tb_xdp_properties_changed *)pkg; struct tb_xdomain *xd; ret = tb_xdp_properties_changed_response(ctl, route, sequence); @@ -598,10 +596,12 @@ static void tb_xdp_handle_request(struct work_struct *work) * the xdomain related to this connection as well in * case there is a change in services it offers. */ - xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); + xd = tb_xdomain_find_by_route_locked(tb, route); if (xd) { - queue_delayed_work(tb->wq, &xd->get_properties_work, - msecs_to_jiffies(50)); + if (device_is_registered(&xd->dev)) { + queue_delayed_work(tb->wq, &xd->get_properties_work, + msecs_to_jiffies(50)); + } tb_xdomain_put(xd); } From 47844ecb8cec2916abaad9c1d1b801f49ed6265d Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 7 Oct 2020 18:19:28 +0300 Subject: [PATCH 03/22] thunderbolt: Create XDomain devices for loops back to the host It is perfectly possible to have loops back from the routers to the host, or even from one host port to another. Instead of ignoring these, we create XDomain devices for each. This allows creating services such as DMA traffic test that is used in manufacturing for example. Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/thunderbolt/xdomain.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index e436e9efa7e7..da229ac4e471 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -961,10 +961,8 @@ static void tb_xdomain_get_uuid(struct work_struct *work) return; } - if (uuid_equal(&uuid, xd->local_uuid)) { + if (uuid_equal(&uuid, xd->local_uuid)) dev_dbg(&xd->dev, "intra-domain loop detected\n"); - return; - } /* * If the UUID is different, there is another domain connected From 4210d50f0b3e423e10a7a254b2a67f5c5318868e Mon Sep 17 00:00:00 2001 From: Isaac Hazan Date: Thu, 24 Sep 2020 11:43:58 +0300 Subject: [PATCH 04/22] thunderbolt: Add link_speed and link_width to XDomain Link speed and link width are needed for checking expected values in case of using a loopback service. Signed-off-by: Isaac Hazan Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- .../ABI/testing/sysfs-bus-thunderbolt | 28 ++++++++ drivers/thunderbolt/switch.c | 9 ++- drivers/thunderbolt/tb.h | 1 + drivers/thunderbolt/xdomain.c | 65 +++++++++++++++++++ include/linux/thunderbolt.h | 4 ++ 5 files changed, 106 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt index 0b4ab9e4b8f4..a91b4b24496e 100644 --- a/Documentation/ABI/testing/sysfs-bus-thunderbolt +++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt @@ -1,3 +1,31 @@ +What: /sys/bus/thunderbolt/devices//rx_speed +Date: Feb 2021 +KernelVersion: 5.11 +Contact: Isaac Hazan +Description: This attribute reports the XDomain RX speed per lane. + All RX lanes run at the same speed. + +What: /sys/bus/thunderbolt/devices//rx_lanes +Date: Feb 2021 +KernelVersion: 5.11 +Contact: Isaac Hazan +Description: This attribute reports the number of RX lanes the XDomain + is using simultaneously through its upstream port. + +What: /sys/bus/thunderbolt/devices//tx_speed +Date: Feb 2021 +KernelVersion: 5.11 +Contact: Isaac Hazan +Description: This attribute reports the XDomain TX speed per lane. + All TX lanes run at the same speed. + +What: /sys/bus/thunderbolt/devices//tx_lanes +Date: Feb 2021 +KernelVersion: 5.11 +Contact: Isaac Hazan +Description: This attribute reports number of TX lanes the XDomain + is using simultaneously through its upstream port. + What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl Date: Jun 2018 KernelVersion: 4.17 diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index c73bbfe69ba1..05a360901790 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -932,7 +932,14 @@ int tb_port_get_link_speed(struct tb_port *port) return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; } -static int tb_port_get_link_width(struct tb_port *port) +/** + * tb_port_get_link_width() - Get current link width + * @port: Port to check (USB4 or CIO) + * + * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) + * or negative errno in case of failure. + */ +int tb_port_get_link_width(struct tb_port *port) { u32 val; int ret; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index a9995e21b916..3a826315049e 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -862,6 +862,7 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, (p) = tb_next_port_on_path((src), (dst), (p))) int tb_port_get_link_speed(struct tb_port *port); +int tb_port_get_link_width(struct tb_port *port); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index da229ac4e471..83a315f96934 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -942,6 +942,43 @@ static void tb_xdomain_restore_paths(struct tb_xdomain *xd) } } +static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) +{ + return tb_to_switch(xd->dev.parent); +} + +static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd) +{ + bool change = false; + struct tb_port *port; + int ret; + + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + + ret = tb_port_get_link_speed(port); + if (ret < 0) + return ret; + + if (xd->link_speed != ret) + change = true; + + xd->link_speed = ret; + + ret = tb_port_get_link_width(port); + if (ret < 0) + return ret; + + if (xd->link_width != ret) + change = true; + + xd->link_width = ret; + + if (change) + kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); + + return 0; +} + static void tb_xdomain_get_uuid(struct work_struct *work) { struct tb_xdomain *xd = container_of(work, typeof(*xd), @@ -1053,6 +1090,8 @@ static void tb_xdomain_get_properties(struct work_struct *work) xd->properties = dir; xd->property_block_gen = gen; + tb_xdomain_update_link_attributes(xd); + tb_xdomain_restore_paths(xd); mutex_unlock(&xd->lock); @@ -1159,9 +1198,35 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(unique_id); +static ssize_t speed_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed); +} + +static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); +static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); + +static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sprintf(buf, "%u\n", xd->link_width); +} + +static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); +static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); + static struct attribute *xdomain_attrs[] = { &dev_attr_device.attr, &dev_attr_device_name.attr, + &dev_attr_rx_lanes.attr, + &dev_attr_rx_speed.attr, + &dev_attr_tx_lanes.attr, + &dev_attr_tx_speed.attr, &dev_attr_unique_id.attr, &dev_attr_vendor.attr, &dev_attr_vendor_name.attr, diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 5db2b11ab085..e441af88ed77 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -179,6 +179,8 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); * @lock: Lock to serialize access to the following fields of this structure * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) + * @link_speed: Speed of the link in Gb/s + * @link_width: Width of the link (1 or 2) * @is_unplugged: The XDomain is unplugged * @resume: The XDomain is being resumed * @needs_uuid: If the XDomain does not have @remote_uuid it will be @@ -223,6 +225,8 @@ struct tb_xdomain { struct mutex lock; const char *vendor_name; const char *device_name; + unsigned int link_speed; + unsigned int link_width; bool is_unplugged; bool resume; bool needs_uuid; From 5cc0df9ce10a860aaeac53f8df1cc8754c5c7b03 Mon Sep 17 00:00:00 2001 From: Isaac Hazan Date: Thu, 24 Sep 2020 11:44:01 +0300 Subject: [PATCH 05/22] thunderbolt: Add functions for enabling and disabling lane bonding on XDomain These can be used by service drivers to enable and disable lane bonding as needed. Signed-off-by: Isaac Hazan Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/thunderbolt/switch.c | 24 +++++++++++-- drivers/thunderbolt/tb.h | 3 ++ drivers/thunderbolt/xdomain.c | 66 +++++++++++++++++++++++++++++++++++ include/linux/thunderbolt.h | 2 ++ 4 files changed, 92 insertions(+), 3 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 05a360901790..cdfd8cccfe19 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -503,12 +503,13 @@ static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) /** * tb_port_state() - get connectedness state of a port + * @port: the port to check * * The port must have a TB_CAP_PHY (i.e. it should be a real port). * * Return: Returns an enum tb_port_state on success or an error code on failure. */ -static int tb_port_state(struct tb_port *port) +int tb_port_state(struct tb_port *port) { struct tb_cap_phy phy; int res; @@ -1008,7 +1009,16 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width) port->cap_phy + LANE_ADP_CS_1, 1); } -static int tb_port_lane_bonding_enable(struct tb_port *port) +/** + * tb_port_lane_bonding_enable() - Enable bonding on port + * @port: port to enable + * + * Enable bonding by setting the link width of the port and the + * other port in case of dual link port. + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_port_lane_bonding_enable(struct tb_port *port) { int ret; @@ -1038,7 +1048,15 @@ static int tb_port_lane_bonding_enable(struct tb_port *port) return 0; } -static void tb_port_lane_bonding_disable(struct tb_port *port) +/** + * tb_port_lane_bonding_disable() - Disable bonding on port + * @port: port to disable + * + * Disable bonding by setting the link width of the port and the + * other port in case of dual link port. + * + */ +void tb_port_lane_bonding_disable(struct tb_port *port) { port->dual_link_port->bonded = false; port->bonded = false; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 3a826315049e..e98d3561648d 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -863,6 +863,9 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, int tb_port_get_link_speed(struct tb_port *port); int tb_port_get_link_width(struct tb_port *port); +int tb_port_state(struct tb_port *port); +int tb_port_lane_bonding_enable(struct tb_port *port); +void tb_port_lane_bonding_disable(struct tb_port *port); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 83a315f96934..65108216bfe3 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -21,6 +22,7 @@ #define XDOMAIN_UUID_RETRIES 10 #define XDOMAIN_PROPERTIES_RETRIES 60 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 +#define XDOMAIN_BONDING_WAIT 100 /* ms */ struct xdomain_request_work { struct work_struct work; @@ -1443,6 +1445,70 @@ void tb_xdomain_remove(struct tb_xdomain *xd) device_unregister(&xd->dev); } +/** + * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain + * @xd: XDomain connection + * + * Lane bonding is disabled by default for XDomains. This function tries + * to enable bonding by first enabling the port and waiting for the CL0 + * state. + * + * Return: %0 in case of success and negative errno in case of error. + */ +int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd) +{ + struct tb_port *port; + int ret; + + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + if (!port->dual_link_port) + return -ENODEV; + + ret = tb_port_enable(port->dual_link_port); + if (ret) + return ret; + + ret = tb_wait_for_port(port->dual_link_port, true); + if (ret < 0) + return ret; + if (!ret) + return -ENOTCONN; + + ret = tb_port_lane_bonding_enable(port); + if (ret) { + tb_port_warn(port, "failed to enable lane bonding\n"); + return ret; + } + + tb_xdomain_update_link_attributes(xd); + + dev_dbg(&xd->dev, "lane bonding enabled\n"); + return 0; +} +EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable); + +/** + * tb_xdomain_lane_bonding_disable() - Disable lane bonding + * @xd: XDomain connection + * + * Lane bonding is disabled by default for XDomains. If bonding has been + * enabled, this function can be used to disable it. + */ +void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd) +{ + struct tb_port *port; + + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + if (port->dual_link_port) { + tb_port_lane_bonding_disable(port); + tb_port_disable(port->dual_link_port); + tb_xdomain_update_link_attributes(xd); + + dev_dbg(&xd->dev, "lane bonding disabled\n"); + } +} +EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable); + /** * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection * @xd: XDomain connection diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index e441af88ed77..0a747f92847e 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -247,6 +247,8 @@ struct tb_xdomain { u8 depth; }; +int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); +void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, u16 transmit_ring, u16 receive_path, u16 receive_ring); From 407ac931aefda91ac90498c6b6e6893982173613 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 7 Oct 2020 17:53:44 +0300 Subject: [PATCH 06/22] thunderbolt: Create debugfs directory automatically for services This allows service drivers to use it as parent directory if they need to add their own debugfs entries. Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/thunderbolt/debugfs.c | 24 ++++++++++++++++++++++++ drivers/thunderbolt/tb.h | 4 ++++ drivers/thunderbolt/xdomain.c | 3 +++ include/linux/thunderbolt.h | 4 ++++ 4 files changed, 35 insertions(+) diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 3680b2784ea1..e53ca8270acd 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -690,6 +690,30 @@ void tb_switch_debugfs_remove(struct tb_switch *sw) debugfs_remove_recursive(sw->debugfs_dir); } +/** + * tb_service_debugfs_init() - Add debugfs directory for service + * @svc: Thunderbolt service pointer + * + * Adds debugfs directory for service. + */ +void tb_service_debugfs_init(struct tb_service *svc) +{ + svc->debugfs_dir = debugfs_create_dir(dev_name(&svc->dev), + tb_debugfs_root); +} + +/** + * tb_service_debugfs_remove() - Remove service debugfs directory + * @svc: Thunderbolt service pointer + * + * Removes the previously created debugfs directory for @svc. + */ +void tb_service_debugfs_remove(struct tb_service *svc) +{ + debugfs_remove_recursive(svc->debugfs_dir); + svc->debugfs_dir = NULL; +} + void tb_debugfs_init(void) { tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index e98d3561648d..a21000649009 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1027,11 +1027,15 @@ void tb_debugfs_init(void); void tb_debugfs_exit(void); void tb_switch_debugfs_init(struct tb_switch *sw); void tb_switch_debugfs_remove(struct tb_switch *sw); +void tb_service_debugfs_init(struct tb_service *svc); +void tb_service_debugfs_remove(struct tb_service *svc); #else static inline void tb_debugfs_init(void) { } static inline void tb_debugfs_exit(void) { } static inline void tb_switch_debugfs_init(struct tb_switch *sw) { } static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { } +static inline void tb_service_debugfs_init(struct tb_service *svc) { } +static inline void tb_service_debugfs_remove(struct tb_service *svc) { } #endif #ifdef CONFIG_USB4_KUNIT_TEST diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 65108216bfe3..1a0491b461fd 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -779,6 +779,7 @@ static void tb_service_release(struct device *dev) struct tb_service *svc = container_of(dev, struct tb_service, dev); struct tb_xdomain *xd = tb_service_parent(svc); + tb_service_debugfs_remove(svc); ida_simple_remove(&xd->service_ids, svc->id); kfree(svc->key); kfree(svc); @@ -892,6 +893,8 @@ static void enumerate_services(struct tb_xdomain *xd) svc->dev.parent = &xd->dev; dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); + tb_service_debugfs_init(svc); + if (device_register(&svc->dev)) { put_device(&svc->dev); break; diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 0a747f92847e..a844fd5d96ab 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -350,6 +350,9 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler); * @prtcvers: Protocol version from the properties directory * @prtcrevs: Protocol software revision from the properties directory * @prtcstns: Protocol settings mask from the properties directory + * @debugfs_dir: Pointer to the service debugfs directory. Always created + * when debugfs is enabled. Can be used by service drivers to + * add their own entries under the service. * * Each domain exposes set of services it supports as collection of * properties. For each service there will be one corresponding @@ -363,6 +366,7 @@ struct tb_service { u32 prtcvers; u32 prtcrevs; u32 prtcstns; + struct dentry *debugfs_dir; }; static inline struct tb_service *tb_service_get(struct tb_service *svc) From 5bf722df5d37e82fd252b1d3e37cde4eab355c1c Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 7 Oct 2020 18:17:12 +0300 Subject: [PATCH 07/22] thunderbolt: Make it possible to allocate one directional DMA tunnel With DMA tunnels it is possible that the service using it does not require bi-directional paths so make RX and TX optional (but of course one of them needs to be set). Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/thunderbolt/tunnel.c | 50 ++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 829b6ccdd5d4..dcdf9c7a9cae 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -34,9 +34,6 @@ #define TB_DP_AUX_PATH_OUT 1 #define TB_DP_AUX_PATH_IN 2 -#define TB_DMA_PATH_OUT 0 -#define TB_DMA_PATH_IN 1 - static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ @@ -829,10 +826,10 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb, * @nhi: Host controller port * @dst: Destination null port which the other domain is connected to * @transmit_ring: NHI ring number used to send packets towards the - * other domain + * other domain. Set to %0 if TX path is not needed. * @transmit_path: HopID used for transmitting packets * @receive_ring: NHI ring number used to receive packets from the - * other domain + * other domain. Set to %0 if RX path is not needed. * @reveive_path: HopID used for receiving packets * * Return: Returns a tb_tunnel on success or NULL on failure. @@ -843,10 +840,19 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, int receive_path) { struct tb_tunnel *tunnel; + size_t npaths = 0, i = 0; struct tb_path *path; u32 credits; - tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA); + if (receive_ring) + npaths++; + if (transmit_ring) + npaths++; + + if (WARN_ON(!npaths)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA); if (!tunnel) return NULL; @@ -856,22 +862,28 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, credits = tb_dma_credits(nhi); - path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; + if (receive_ring) { + path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, + "DMA RX"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL, + credits); + tunnel->paths[i++] = path; } - tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL, - credits); - tunnel->paths[TB_DMA_PATH_IN] = path; - path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; + if (transmit_ring) { + path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, + "DMA TX"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits); + tunnel->paths[i++] = path; } - tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits); - tunnel->paths[TB_DMA_PATH_OUT] = path; return tunnel; } From afe704a2d0618ebdb559b5ddb059f6cdbfc78783 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 19 Oct 2020 19:15:20 +0300 Subject: [PATCH 08/22] thunderbolt: Add support for end-to-end flow control USB4 spec defines end-to-end (E2E) flow control that can be used between hosts to prevent overflow of a RX ring. We previously had this partially implemented but that code was removed with commit 53f13319d131 ("thunderbolt: Get rid of E2E workaround") with the idea that we add it back properly if there ever is need. Now that we are going to add DMA traffic test driver (in subsequent patches) this can be useful. For this reason we modify tb_ring_alloc_rx/tx() so that they accept RING_FLAG_E2E and configure the hardware ring accordingly. The RX side also requires passing TX HopID (e2e_tx_hop) used in the credit grant packets. Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/net/thunderbolt.c | 2 +- drivers/thunderbolt/ctl.c | 4 ++-- drivers/thunderbolt/nhi.c | 36 ++++++++++++++++++++++++++++++++---- include/linux/thunderbolt.h | 8 +++++++- 4 files changed, 42 insertions(+), 8 deletions(-) diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 3160443ef3b9..d7b5f87eaa15 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -866,7 +866,7 @@ static int tbnet_open(struct net_device *dev) eof_mask = BIT(TBIP_PDF_FRAME_END); ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, - RING_FLAG_FRAME, sof_mask, eof_mask, + RING_FLAG_FRAME, 0, sof_mask, eof_mask, tbnet_start_poll, net); if (!ring) { netdev_err(dev, "failed to allocate Rx ring\n"); diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 9894b8f63064..1d86e27a0ef3 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -628,8 +628,8 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) if (!ctl->tx) goto err; - ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff, - 0xffff, NULL, NULL); + ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, + 0xffff, NULL, NULL); if (!ctl->rx) goto err; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 3f79baa54829..a69bc6b49405 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -483,7 +483,7 @@ err_unlock: static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, bool transmit, unsigned int flags, - u16 sof_mask, u16 eof_mask, + int e2e_tx_hop, u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data) { @@ -506,6 +506,7 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, ring->is_tx = transmit; ring->size = size; ring->flags = flags; + ring->e2e_tx_hop = e2e_tx_hop; ring->sof_mask = sof_mask; ring->eof_mask = eof_mask; ring->head = 0; @@ -550,7 +551,7 @@ err_free_ring: struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, unsigned int flags) { - return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); + return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL); } EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); @@ -560,6 +561,7 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation. * @size: Number of entries in the ring * @flags: Flags for the ring + * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags * @sof_mask: Mask of PDF values that start a frame * @eof_mask: Mask of PDF values that end a frame * @start_poll: If not %NULL the ring will call this function when an @@ -568,10 +570,11 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); * @poll_data: Optional data passed to @start_poll */ struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, - unsigned int flags, u16 sof_mask, u16 eof_mask, + unsigned int flags, int e2e_tx_hop, + u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data) { - return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, + return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask, start_poll, poll_data); } EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); @@ -618,6 +621,31 @@ void tb_ring_start(struct tb_ring *ring) ring_iowrite32options(ring, sof_eof_mask, 4); ring_iowrite32options(ring, flags, 0); } + + /* + * Now that the ring valid bit is set we can configure E2E if + * enabled for the ring. + */ + if (ring->flags & RING_FLAG_E2E) { + if (!ring->is_tx) { + u32 hop; + + hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT; + hop &= REG_RX_OPTIONS_E2E_HOP_MASK; + flags |= hop; + + dev_dbg(&ring->nhi->pdev->dev, + "enabling E2E for %s %d with TX HopID %d\n", + RING_TYPE(ring), ring->hop, ring->e2e_tx_hop); + } else { + dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n", + RING_TYPE(ring), ring->hop); + } + + flags |= RING_FLAG_E2E_FLOW_CONTROL; + ring_iowrite32options(ring, flags, 0); + } + ring_interrupt_active(ring, true); ring->running = true; err: diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index a844fd5d96ab..034dccf93955 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -481,6 +481,8 @@ struct tb_nhi { * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) * @flags: Ring specific flags + * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to + * RX ring. For TX ring this should be set to %0. * @sof_mask: Bit mask used to detect start of frame PDF * @eof_mask: Bit mask used to detect end of frame PDF * @start_poll: Called when ring interrupt is triggered to start @@ -504,6 +506,7 @@ struct tb_ring { int irq; u8 vector; unsigned int flags; + int e2e_tx_hop; u16 sof_mask; u16 eof_mask; void (*start_poll)(void *data); @@ -514,6 +517,8 @@ struct tb_ring { #define RING_FLAG_NO_SUSPEND BIT(0) /* Configure the ring to be in frame mode */ #define RING_FLAG_FRAME BIT(1) +/* Enable end-to-end flow control */ +#define RING_FLAG_E2E BIT(2) struct ring_frame; typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); @@ -562,7 +567,8 @@ struct ring_frame { struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, unsigned int flags); struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, - unsigned int flags, u16 sof_mask, u16 eof_mask, + unsigned int flags, int e2e_tx_hop, + u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data); void tb_ring_start(struct tb_ring *ring); void tb_ring_stop(struct tb_ring *ring); From edc0f494ed966e39e5619be7cdaeb9873e1f4fe1 Mon Sep 17 00:00:00 2001 From: Isaac Hazan Date: Thu, 24 Sep 2020 11:44:02 +0300 Subject: [PATCH 09/22] thunderbolt: Add DMA traffic test driver This driver allows sending DMA traffic over XDomain connection. Specifically over a loopback connection using either a Thunderbolt/USB4 cable that is connected back to the host router port, or a special loopback dongle that has RX and TX lines crossed. This can be useful at manufacturing floor to check whether Thunderbolt/USB4 ports are functional. The driver exposes debugfs directory under the XDomain service that can be used to configure the driver, start the test and check the results. If a loopback dongle is used the steps to send and receive 1000 packets can be done like: # modprobe thunderbolt_dma_test # echo 1000 > /sys/kernel/debug/thunderbolt//dma_test/packets_to_receive # echo 1000 > /sys/kernel/debug/thunderbolt//dma_test/packets_to_send # echo 1 > /sys/kernel/debug/thunderbolt//dma_test/test # cat /sys/kernel/debug/thunderbolt//dma_test/status When a cable is connected back to host then there are two Thunderbolt services, one is configured for receiving (does not matter which one): # modprobe thunderbolt_dma_test # echo 1000 > /sys/kernel/debug/thunderbolt//dma_test/packets_to_receive # echo 1 > /sys/kernel/debug/thunderbolt//dma_test/test The other one for sending: # echo 1000 > /sys/kernel/debug/thunderbolt//dma_test/packets_to_send # echo 1 > /sys/kernel/debug/thunderbolt//dma_test/test Results can be read from both services status attributes. Signed-off-by: Isaac Hazan Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- drivers/thunderbolt/Kconfig | 13 + drivers/thunderbolt/Makefile | 3 + drivers/thunderbolt/dma_test.c | 736 +++++++++++++++++++++++++++++++++ 3 files changed, 752 insertions(+) create mode 100644 drivers/thunderbolt/dma_test.c diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index 7fc058f81d00..4bfec8a28064 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -31,4 +31,17 @@ config USB4_KUNIT_TEST bool "KUnit tests" depends on KUNIT=y +config USB4_DMA_TEST + tristate "DMA traffic test driver" + depends on DEBUG_FS + help + This allows sending and receiving DMA traffic through loopback + connection. Loopback connection can be done by either special + dongle that has TX/RX lines crossed, or by simply connecting a + cable back to the host. Only enable this if you know what you + are doing. Normal users and distro kernels should say N here. + + To compile this driver a module, choose M here. The module will be + called thunderbolt_dma_test. + endif # USB4 diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile index 571537371072..7aa48f6c41d9 100644 --- a/drivers/thunderbolt/Makefile +++ b/drivers/thunderbolt/Makefile @@ -7,3 +7,6 @@ thunderbolt-objs += nvm.o retimer.o quirks.o thunderbolt-${CONFIG_ACPI} += acpi.o thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o + +thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o +obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c new file mode 100644 index 000000000000..f924423fa180 --- /dev/null +++ b/drivers/thunderbolt/dma_test.c @@ -0,0 +1,736 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA traffic test driver + * + * Copyright (C) 2020, Intel Corporation + * Authors: Isaac Hazan + * Mika Westerberg + */ + +#include +#include +#include +#include +#include +#include + +#define DMA_TEST_HOPID 8 +#define DMA_TEST_TX_RING_SIZE 64 +#define DMA_TEST_RX_RING_SIZE 256 +#define DMA_TEST_FRAME_SIZE SZ_4K +#define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL +#define DMA_TEST_MAX_PACKETS 1000 + +enum dma_test_frame_pdf { + DMA_TEST_PDF_FRAME_START = 1, + DMA_TEST_PDF_FRAME_END, +}; + +struct dma_test_frame { + struct dma_test *dma_test; + void *data; + struct ring_frame frame; +}; + +enum dma_test_test_error { + DMA_TEST_NO_ERROR, + DMA_TEST_INTERRUPTED, + DMA_TEST_BUFFER_ERROR, + DMA_TEST_DMA_ERROR, + DMA_TEST_CONFIG_ERROR, + DMA_TEST_SPEED_ERROR, + DMA_TEST_WIDTH_ERROR, + DMA_TEST_BONDING_ERROR, + DMA_TEST_PACKET_ERROR, +}; + +static const char * const dma_test_error_names[] = { + [DMA_TEST_NO_ERROR] = "no errors", + [DMA_TEST_INTERRUPTED] = "interrupted by signal", + [DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers", + [DMA_TEST_DMA_ERROR] = "DMA ring setup failed", + [DMA_TEST_CONFIG_ERROR] = "configuration is not valid", + [DMA_TEST_SPEED_ERROR] = "unexpected link speed", + [DMA_TEST_WIDTH_ERROR] = "unexpected link width", + [DMA_TEST_BONDING_ERROR] = "lane bonding configuration error", + [DMA_TEST_PACKET_ERROR] = "packet check failed", +}; + +enum dma_test_result { + DMA_TEST_NOT_RUN, + DMA_TEST_SUCCESS, + DMA_TEST_FAIL, +}; + +static const char * const dma_test_result_names[] = { + [DMA_TEST_NOT_RUN] = "not run", + [DMA_TEST_SUCCESS] = "success", + [DMA_TEST_FAIL] = "failed", +}; + +/** + * struct dma_test - DMA test device driver private data + * @svc: XDomain service the driver is bound to + * @xd: XDomain the service belongs to + * @rx_ring: Software ring holding RX frames + * @tx_ring: Software ring holding TX frames + * @packets_to_send: Number of packets to send + * @packets_to_receive: Number of packets to receive + * @packets_sent: Actual number of packets sent + * @packets_received: Actual number of packets received + * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated + * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated + * @crc_errors: Number of CRC errors during the test run + * @buffer_overflow_errors: Number of buffer overflow errors during the test + * run + * @result: Result of the last run + * @error_code: Error code of the last run + * @complete: Used to wait for the Rx to complete + * @lock: Lock serializing access to this structure + * @debugfs_dir: dentry of this dma_test + */ +struct dma_test { + const struct tb_service *svc; + struct tb_xdomain *xd; + struct tb_ring *rx_ring; + struct tb_ring *tx_ring; + unsigned int packets_to_send; + unsigned int packets_to_receive; + unsigned int packets_sent; + unsigned int packets_received; + unsigned int link_speed; + unsigned int link_width; + unsigned int crc_errors; + unsigned int buffer_overflow_errors; + enum dma_test_result result; + enum dma_test_test_error error_code; + struct completion complete; + struct mutex lock; + struct dentry *debugfs_dir; +}; + +/* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */ +static const uuid_t dma_test_dir_uuid = + UUID_INIT(0x3188cd10, 0x6523, 0x4a5a, + 0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8); + +static struct tb_property_dir *dma_test_dir; +static void *dma_test_pattern; + +static void dma_test_free_rings(struct dma_test *dt) +{ + if (dt->rx_ring) { + tb_ring_free(dt->rx_ring); + dt->rx_ring = NULL; + } + if (dt->tx_ring) { + tb_ring_free(dt->tx_ring); + dt->tx_ring = NULL; + } +} + +static int dma_test_start_rings(struct dma_test *dt) +{ + unsigned int flags = RING_FLAG_FRAME; + struct tb_xdomain *xd = dt->xd; + int ret, e2e_tx_hop = 0; + struct tb_ring *ring; + + /* + * If we are both sender and receiver (traffic goes over a + * special loopback dongle) enable E2E flow control. This avoids + * losing packets. + */ + if (dt->packets_to_send && dt->packets_to_receive) + flags |= RING_FLAG_E2E; + + if (dt->packets_to_send) { + ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE, + flags); + if (!ring) + return -ENOMEM; + + dt->tx_ring = ring; + e2e_tx_hop = ring->hop; + } + + if (dt->packets_to_receive) { + u16 sof_mask, eof_mask; + + sof_mask = BIT(DMA_TEST_PDF_FRAME_START); + eof_mask = BIT(DMA_TEST_PDF_FRAME_END); + + ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE, + flags, e2e_tx_hop, sof_mask, eof_mask, + NULL, NULL); + if (!ring) { + dma_test_free_rings(dt); + return -ENOMEM; + } + + dt->rx_ring = ring; + } + + ret = tb_xdomain_enable_paths(dt->xd, DMA_TEST_HOPID, + dt->tx_ring ? dt->tx_ring->hop : 0, + DMA_TEST_HOPID, + dt->rx_ring ? dt->rx_ring->hop : 0); + if (ret) { + dma_test_free_rings(dt); + return ret; + } + + if (dt->tx_ring) + tb_ring_start(dt->tx_ring); + if (dt->rx_ring) + tb_ring_start(dt->rx_ring); + + return 0; +} + +static void dma_test_stop_rings(struct dma_test *dt) +{ + if (dt->rx_ring) + tb_ring_stop(dt->rx_ring); + if (dt->tx_ring) + tb_ring_stop(dt->tx_ring); + + if (tb_xdomain_disable_paths(dt->xd)) + dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); + + dma_test_free_rings(dt); +} + +static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); + struct dma_test *dt = tf->dma_test; + struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); + + dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, + DMA_FROM_DEVICE); + kfree(tf->data); + + if (canceled) { + kfree(tf); + return; + } + + dt->packets_received++; + dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received, + dt->packets_to_receive); + + if (tf->frame.flags & RING_DESC_CRC_ERROR) + dt->crc_errors++; + if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) + dt->buffer_overflow_errors++; + + kfree(tf); + + if (dt->packets_received == dt->packets_to_receive) + complete(&dt->complete); +} + +static int dma_test_submit_rx(struct dma_test *dt, size_t npackets) +{ + struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); + int i; + + for (i = 0; i < npackets; i++) { + struct dma_test_frame *tf; + dma_addr_t dma_addr; + + tf = kzalloc(sizeof(*tf), GFP_KERNEL); + if (!tf) + return -ENOMEM; + + tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!tf->data) { + kfree(tf); + return -ENOMEM; + } + + dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + kfree(tf->data); + kfree(tf); + return -ENOMEM; + } + + tf->frame.buffer_phy = dma_addr; + tf->frame.callback = dma_test_rx_callback; + tf->dma_test = dt; + INIT_LIST_HEAD(&tf->frame.list); + + tb_ring_rx(dt->rx_ring, &tf->frame); + } + + return 0; +} + +static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); + struct dma_test *dt = tf->dma_test; + struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); + + dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, + DMA_TO_DEVICE); + kfree(tf->data); + kfree(tf); +} + +static int dma_test_submit_tx(struct dma_test *dt, size_t npackets) +{ + struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); + int i; + + for (i = 0; i < npackets; i++) { + struct dma_test_frame *tf; + dma_addr_t dma_addr; + + tf = kzalloc(sizeof(*tf), GFP_KERNEL); + if (!tf) + return -ENOMEM; + + tf->frame.size = 0; /* means 4096 */ + tf->dma_test = dt; + + tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!tf->data) { + kfree(tf); + return -ENOMEM; + } + + memcpy(tf->data, dma_test_pattern, DMA_TEST_FRAME_SIZE); + + dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + kfree(tf->data); + kfree(tf); + return -ENOMEM; + } + + tf->frame.buffer_phy = dma_addr; + tf->frame.callback = dma_test_tx_callback; + tf->frame.sof = DMA_TEST_PDF_FRAME_START; + tf->frame.eof = DMA_TEST_PDF_FRAME_END; + INIT_LIST_HEAD(&tf->frame.list); + + dt->packets_sent++; + dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent, + dt->packets_to_send); + + tb_ring_tx(dt->tx_ring, &tf->frame); + } + + return 0; +} + +#define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \ +static int __fops ## _show(void *data, u64 *val) \ +{ \ + struct tb_service *svc = data; \ + struct dma_test *dt = tb_service_get_drvdata(svc); \ + int ret; \ + \ + ret = mutex_lock_interruptible(&dt->lock); \ + if (ret) \ + return ret; \ + __get(dt, val); \ + mutex_unlock(&dt->lock); \ + return 0; \ +} \ +static int __fops ## _store(void *data, u64 val) \ +{ \ + struct tb_service *svc = data; \ + struct dma_test *dt = tb_service_get_drvdata(svc); \ + int ret; \ + \ + ret = __validate(val); \ + if (ret) \ + return ret; \ + ret = mutex_lock_interruptible(&dt->lock); \ + if (ret) \ + return ret; \ + __set(dt, val); \ + mutex_unlock(&dt->lock); \ + return 0; \ +} \ +DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \ + __fops ## _store, "%llu\n") + +static void lanes_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->link_width; +} + +static int lanes_validate(u64 val) +{ + return val > 2 ? -EINVAL : 0; +} + +static void lanes_set(struct dma_test *dt, u64 val) +{ + dt->link_width = val; +} +DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set); + +static void speed_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->link_speed; +} + +static int speed_validate(u64 val) +{ + switch (val) { + case 20: + case 10: + case 0: + return 0; + default: + return -EINVAL; + } +} + +static void speed_set(struct dma_test *dt, u64 val) +{ + dt->link_speed = val; +} +DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set); + +static void packets_to_receive_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->packets_to_receive; +} + +static int packets_to_receive_validate(u64 val) +{ + return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; +} + +static void packets_to_receive_set(struct dma_test *dt, u64 val) +{ + dt->packets_to_receive = val; +} +DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get, + packets_to_receive_validate, packets_to_receive_set); + +static void packets_to_send_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->packets_to_send; +} + +static int packets_to_send_validate(u64 val) +{ + return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; +} + +static void packets_to_send_set(struct dma_test *dt, u64 val) +{ + dt->packets_to_send = val; +} +DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get, + packets_to_send_validate, packets_to_send_set); + +static int dma_test_set_bonding(struct dma_test *dt) +{ + switch (dt->link_width) { + case 2: + return tb_xdomain_lane_bonding_enable(dt->xd); + case 1: + tb_xdomain_lane_bonding_disable(dt->xd); + fallthrough; + default: + return 0; + } +} + +static bool dma_test_validate_config(struct dma_test *dt) +{ + if (!dt->packets_to_send && !dt->packets_to_receive) + return false; + if (dt->packets_to_send && dt->packets_to_receive && + dt->packets_to_send != dt->packets_to_receive) + return false; + return true; +} + +static void dma_test_check_errors(struct dma_test *dt, int ret) +{ + if (!dt->error_code) { + if (dt->link_speed && dt->xd->link_speed != dt->link_speed) { + dt->error_code = DMA_TEST_SPEED_ERROR; + } else if (dt->link_width && + dt->xd->link_width != dt->link_width) { + dt->error_code = DMA_TEST_WIDTH_ERROR; + } else if (dt->packets_to_send != dt->packets_sent || + dt->packets_to_receive != dt->packets_received || + dt->crc_errors || dt->buffer_overflow_errors) { + dt->error_code = DMA_TEST_PACKET_ERROR; + } else { + return; + } + } + + dt->result = DMA_TEST_FAIL; +} + +static int test_store(void *data, u64 val) +{ + struct tb_service *svc = data; + struct dma_test *dt = tb_service_get_drvdata(svc); + int ret; + + if (val != 1) + return -EINVAL; + + ret = mutex_lock_interruptible(&dt->lock); + if (ret) + return ret; + + dt->packets_sent = 0; + dt->packets_received = 0; + dt->crc_errors = 0; + dt->buffer_overflow_errors = 0; + dt->result = DMA_TEST_SUCCESS; + dt->error_code = DMA_TEST_NO_ERROR; + + dev_dbg(&svc->dev, "DMA test starting\n"); + if (dt->link_speed) + dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed); + if (dt->link_width) + dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width); + dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send); + dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive); + + if (!dma_test_validate_config(dt)) { + dev_err(&svc->dev, "invalid test configuration\n"); + dt->error_code = DMA_TEST_CONFIG_ERROR; + goto out_unlock; + } + + ret = dma_test_set_bonding(dt); + if (ret) { + dev_err(&svc->dev, "failed to set lanes\n"); + dt->error_code = DMA_TEST_BONDING_ERROR; + goto out_unlock; + } + + ret = dma_test_start_rings(dt); + if (ret) { + dev_err(&svc->dev, "failed to enable DMA rings\n"); + dt->error_code = DMA_TEST_DMA_ERROR; + goto out_unlock; + } + + if (dt->packets_to_receive) { + reinit_completion(&dt->complete); + ret = dma_test_submit_rx(dt, dt->packets_to_receive); + if (ret) { + dev_err(&svc->dev, "failed to submit receive buffers\n"); + dt->error_code = DMA_TEST_BUFFER_ERROR; + goto out_stop; + } + } + + if (dt->packets_to_send) { + ret = dma_test_submit_tx(dt, dt->packets_to_send); + if (ret) { + dev_err(&svc->dev, "failed to submit transmit buffers\n"); + dt->error_code = DMA_TEST_BUFFER_ERROR; + goto out_stop; + } + } + + if (dt->packets_to_receive) { + ret = wait_for_completion_interruptible(&dt->complete); + if (ret) { + dt->error_code = DMA_TEST_INTERRUPTED; + goto out_stop; + } + } + +out_stop: + dma_test_stop_rings(dt); +out_unlock: + dma_test_check_errors(dt, ret); + mutex_unlock(&dt->lock); + + dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]); + return ret; +} +DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n"); + +static int status_show(struct seq_file *s, void *not_used) +{ + struct tb_service *svc = s->private; + struct dma_test *dt = tb_service_get_drvdata(svc); + int ret; + + ret = mutex_lock_interruptible(&dt->lock); + if (ret) + return ret; + + seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]); + if (dt->result == DMA_TEST_NOT_RUN) + goto out_unlock; + + seq_printf(s, "packets received: %u\n", dt->packets_received); + seq_printf(s, "packets sent: %u\n", dt->packets_sent); + seq_printf(s, "CRC errors: %u\n", dt->crc_errors); + seq_printf(s, "buffer overflow errors: %u\n", + dt->buffer_overflow_errors); + seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]); + +out_unlock: + mutex_unlock(&dt->lock); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(status); + +static void dma_test_debugfs_init(struct tb_service *svc) +{ + struct dma_test *dt = tb_service_get_drvdata(svc); + + dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir); + + debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops); + debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops); + debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc, + &packets_to_receive_fops); + debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc, + &packets_to_send_fops); + debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops); + debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops); +} + +static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id) +{ + struct tb_xdomain *xd = tb_service_parent(svc); + struct dma_test *dt; + + dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL); + if (!dt) + return -ENOMEM; + + dt->svc = svc; + dt->xd = xd; + mutex_init(&dt->lock); + init_completion(&dt->complete); + + tb_service_set_drvdata(svc, dt); + dma_test_debugfs_init(svc); + + return 0; +} + +static void dma_test_remove(struct tb_service *svc) +{ + struct dma_test *dt = tb_service_get_drvdata(svc); + + mutex_lock(&dt->lock); + debugfs_remove_recursive(dt->debugfs_dir); + mutex_unlock(&dt->lock); +} + +static int __maybe_unused dma_test_suspend(struct device *dev) +{ + /* + * No need to do anything special here. If userspace is writing + * to the test attribute when suspend started, it comes out from + * wait_for_completion_interruptible() with -ERESTARTSYS and the + * DMA test fails tearing down the rings. Once userspace is + * thawed the kernel restarts the write syscall effectively + * re-running the test. + */ + return 0; +} + +static int __maybe_unused dma_test_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops dma_test_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume) +}; + +static const struct tb_service_id dma_test_ids[] = { + { TB_SERVICE("dma_test", 1) }, + { }, +}; +MODULE_DEVICE_TABLE(tbsvc, dma_test_ids); + +static struct tb_service_driver dma_test_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "thunderbolt_dma_test", + .pm = &dma_test_pm_ops, + }, + .probe = dma_test_probe, + .remove = dma_test_remove, + .id_table = dma_test_ids, +}; + +static int __init dma_test_init(void) +{ + u64 data_value = DMA_TEST_DATA_PATTERN; + int i, ret; + + dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!dma_test_pattern) + return -ENOMEM; + + for (i = 0; i < DMA_TEST_FRAME_SIZE / sizeof(data_value); i++) + ((u32 *)dma_test_pattern)[i] = data_value++; + + dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid); + if (!dma_test_dir) { + ret = -ENOMEM; + goto err_free_pattern; + } + + tb_property_add_immediate(dma_test_dir, "prtcid", 1); + tb_property_add_immediate(dma_test_dir, "prtcvers", 1); + tb_property_add_immediate(dma_test_dir, "prtcrevs", 0); + tb_property_add_immediate(dma_test_dir, "prtcstns", 0); + + ret = tb_register_property_dir("dma_test", dma_test_dir); + if (ret) + goto err_free_dir; + + ret = tb_register_service_driver(&dma_test_driver); + if (ret) + goto err_unregister_dir; + + return 0; + +err_unregister_dir: + tb_unregister_property_dir("dma_test", dma_test_dir); +err_free_dir: + tb_property_free_dir(dma_test_dir); +err_free_pattern: + kfree(dma_test_pattern); + + return ret; +} +module_init(dma_test_init); + +static void __exit dma_test_exit(void) +{ + tb_unregister_service_driver(&dma_test_driver); + tb_unregister_property_dir("dma_test", dma_test_dir); + tb_property_free_dir(dma_test_dir); + kfree(dma_test_pattern); +} +module_exit(dma_test_exit); + +MODULE_AUTHOR("Isaac Hazan "); +MODULE_AUTHOR("Mika Westerberg "); +MODULE_DESCRIPTION("DMA traffic test driver"); +MODULE_LICENSE("GPL v2"); From 4e58171aa93fe8caf4f6e5e9972b7abe117c0014 Mon Sep 17 00:00:00 2001 From: Isaac Hazan Date: Tue, 3 Nov 2020 15:57:32 +0300 Subject: [PATCH 10/22] MAINTAINERS: Add Isaac as maintainer of Thunderbolt DMA traffic test driver I will be maintaining the Thunderbolt DMA traffic test driver. Signed-off-by: Isaac Hazan Signed-off-by: Mika Westerberg Acked-by: Yehezkel Bernat Reviewed-by: Greg Kroah-Hartman --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 3da6d8c154e4..83c4c66f8188 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17378,6 +17378,12 @@ W: http://thinkwiki.org/wiki/Ibm-acpi T: git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git F: drivers/platform/x86/thinkpad_acpi.c +THUNDERBOLT DMA TRAFFIC TEST DRIVER +M: Isaac Hazan +L: linux-usb@vger.kernel.org +S: Maintained +F: drivers/thunderbolt/dma_test.c + THUNDERBOLT DRIVER M: Andreas Noever M: Michael Jamet From 45ef561abcdd6cbaba0ab391b60d1831d2ac47af Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 3 Nov 2020 13:59:31 +0200 Subject: [PATCH 11/22] thunderbolt: Move max_boot_acl field to correct place in struct icm This makes the kernel-doc to match the ordering and also this is better place for it, not between upstream_port and vnd_cap that are used together. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index b51fc3f62b1f..03e86817afc7 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -79,9 +79,9 @@ struct icm { struct mutex request_lock; struct delayed_work rescan_work; struct pci_dev *upstream_port; - size_t max_boot_acl; int vnd_cap; bool safe_mode; + size_t max_boot_acl; bool rpm; bool can_upgrade_nvm; bool veto; From e0258805d71b9e9febeae9d9ae39ae7997384b16 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 10 Nov 2020 11:02:31 +0300 Subject: [PATCH 12/22] thunderbolt: Log which connection manager implementation is used This makes it easier to figure out whether the driver is using firmware or software based connection manager implementation. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 2 ++ drivers/thunderbolt/tb.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 03e86817afc7..beee6e6b8b6e 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -2302,5 +2302,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) return NULL; } + tb_dbg(tb, "using firmware connection manager\n"); + return tb; } diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 214fbc92c1b7..51d5b031cada 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -1534,5 +1534,7 @@ struct tb *tb_probe(struct tb_nhi *nhi) INIT_LIST_HEAD(&tcm->dp_resources); INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); + tb_dbg(tb, "using software connection manager\n"); + return tb; } From a3595258970bf2dfd21ba8fa3fb3d07000ae989c Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 17 Nov 2020 13:41:21 +0300 Subject: [PATCH 13/22] thunderbolt: Log adapter numbers in decimal in path activation/deactivation This makes it consistent with other debug logs that already are using decimal number for adapters (ports). Signed-off-by: Mika Westerberg --- drivers/thunderbolt/path.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 7c2c45d9ba4a..ca7d738d66de 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -454,7 +454,7 @@ void tb_path_deactivate(struct tb_path *path) return; } tb_dbg(path->tb, - "deactivating %s path from %llx:%x to %llx:%x\n", + "deactivating %s path from %llx:%u to %llx:%u\n", path->name, tb_route(path->hops[0].in_port->sw), path->hops[0].in_port->port, tb_route(path->hops[path->path_length - 1].out_port->sw), @@ -482,7 +482,7 @@ int tb_path_activate(struct tb_path *path) } tb_dbg(path->tb, - "activating %s path from %llx:%x to %llx:%x\n", + "activating %s path from %llx:%u to %llx:%u\n", path->name, tb_route(path->hops[0].in_port->sw), path->hops[0].in_port->port, tb_route(path->hops[path->path_length - 1].out_port->sw), From b658eb9d9075aa2b44834962a1efc4bc78e9bed8 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 10 Nov 2020 11:04:31 +0300 Subject: [PATCH 14/22] thunderbolt: Keep the parent runtime resumed for a while on device disconnect When doing device firmware upgrade the device will disconnect for a while and then reconnect back. Keep the parent device (and the whole domain) powered for a while so we don't need to runtime resume immediately when the device is connected back after the device upgrade completes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index beee6e6b8b6e..635b949fb1d6 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -870,7 +870,13 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) return; } + pm_runtime_get_sync(sw->dev.parent); + remove_switch(sw); + + pm_runtime_mark_last_busy(sw->dev.parent); + pm_runtime_put_autosuspend(sw->dev.parent); + tb_switch_put(sw); } @@ -1280,8 +1286,13 @@ icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) tb_warn(tb, "no switch exists at %llx, ignoring\n", route); return; } + pm_runtime_get_sync(sw->dev.parent); remove_switch(sw); + + pm_runtime_mark_last_busy(sw->dev.parent); + pm_runtime_put_autosuspend(sw->dev.parent); + tb_switch_put(sw); } From 463e48fa544826898791085508459de246fc4c09 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 17 Nov 2020 13:19:41 +0300 Subject: [PATCH 15/22] thunderbolt: Return -ENOTCONN when ERR_CONN is received This allows the calling code to distinguish if the error was due to ERR_CONN (adapter is disconneced or disabled) or something else. Will be needed in USB4 router NVM update in the following patch. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/ctl.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 1d86e27a0ef3..bac08b820015 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -962,6 +962,9 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, if (res->tb_error == TB_CFG_ERROR_LOCK) return -EACCES; + else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) + return -ENOTCONN; + return -EIO; } From 661b19473bf3ac0924560f0cbf84c15458b3c8de Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 10 Nov 2020 11:34:07 +0300 Subject: [PATCH 16/22] thunderbolt: Perform USB4 router NVM upgrade in two phases The currect code expects that the router returns back the status of the NVM authentication immediately. When tested against a real USB4 device what happens is that the router is reset and only after that the result is updated in the ROUTER_CS_26 register status field. This also seems to align better what the spec suggests. For this reason do the same what we already do with the Thunderbolt 3 devices and perform the NVM upgrade in two phases. First start the NVM_AUTH router operation and once the router is added back after the reset read the status in ROUTER_CS_26 and expose it to the userspace accordingly. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 20 ++++++++-- drivers/thunderbolt/tb.h | 1 + drivers/thunderbolt/tb_regs.h | 1 + drivers/thunderbolt/usb4.c | 75 +++++++++++++++++++++++++++-------- 4 files changed, 77 insertions(+), 20 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index cdfd8cccfe19..a8572f49d3ad 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2160,6 +2160,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) fallthrough; case 3: + case 4: ret = tb_switch_set_uuid(sw); if (ret) return ret; @@ -2175,6 +2176,22 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) break; } + if (sw->no_nvm_upgrade) + return 0; + + if (tb_switch_is_usb4(sw)) { + ret = usb4_switch_nvm_authenticate_status(sw, &status); + if (ret) + return ret; + + if (status) { + tb_sw_info(sw, "switch flash authentication failed\n"); + nvm_set_auth_status(sw, status); + } + + return 0; + } + /* Root switch DMA port requires running firmware */ if (!tb_route(sw) && !tb_switch_is_icm(sw)) return 0; @@ -2183,9 +2200,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (!sw->dma_port) return 0; - if (sw->no_nvm_upgrade) - return 0; - /* * If there is status already set then authentication failed * when the dma_port_flash_update_auth() returned. Power cycling diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index a21000649009..3885f2515aae 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -972,6 +972,7 @@ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, const void *buf, size_t size); int usb4_switch_nvm_authenticate(struct tb_switch *sw); +int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status); bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index e7d9529822fa..67cb173a2f8e 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -211,6 +211,7 @@ struct tb_regs_switch_header { #define ROUTER_CS_9 0x09 #define ROUTER_CS_25 0x19 #define ROUTER_CS_26 0x1a +#define ROUTER_CS_26_OPCODE_MASK GENMASK(15, 0) #define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24) #define ROUTER_CS_26_STATUS_SHIFT 24 #define ROUTER_CS_26_ONS BIT(30) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 40f13579a3fe..d88e28eee975 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -192,7 +192,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) if (val & ROUTER_CS_26_ONS) return -EOPNOTSUPP; - *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; + if (status) + *status = (val & ROUTER_CS_26_STATUS_MASK) >> + ROUTER_CS_26_STATUS_SHIFT; return 0; } @@ -634,32 +636,71 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, * @sw: USB4 router * * After the new NVM has been written via usb4_switch_nvm_write(), this - * function triggers NVM authentication process. If the authentication - * is successful the router is power cycled and the new NVM starts + * function triggers NVM authentication process. The router gets power + * cycled and if the authentication is successful the new NVM starts * running. In case of failure returns negative errno. + * + * The caller should call usb4_switch_nvm_authenticate_status() to read + * the status of the authentication after power cycle. It should be the + * first router operation to avoid the status being lost. */ int usb4_switch_nvm_authenticate(struct tb_switch *sw) { - u8 status = 0; int ret; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL); + switch (ret) { + /* + * The router is power cycled once NVM_AUTH is started so it is + * expected to get any of the following errors back. + */ + case -EACCES: + case -ENOTCONN: + case -ETIMEDOUT: + return 0; + + default: + return ret; + } +} + +/** + * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate + * @sw: USB4 router + * @status: Status code of the operation + * + * The function checks if there is status available from the last NVM + * authenticate router operation. If there is status then %0 is returned + * and the status code is placed in @status. Returns negative errno in case + * of failure. + * + * Must be called before any other router operation. + */ +int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) +{ + u16 opcode; + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (ret) return ret; - switch (status) { - case 0x0: - tb_sw_dbg(sw, "NVM authentication successful\n"); - return 0; - case 0x1: - return -EINVAL; - case 0x2: - return -EAGAIN; - case 0x3: - return -EOPNOTSUPP; - default: - return -EIO; + /* Check that the opcode is correct */ + opcode = val & ROUTER_CS_26_OPCODE_MASK; + if (opcode == USB4_SWITCH_OP_NVM_AUTH) { + if (val & ROUTER_CS_26_OV) + return -EBUSY; + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + + *status = (val & ROUTER_CS_26_STATUS_MASK) >> + ROUTER_CS_26_STATUS_SHIFT; + } else { + *status = 0; } + + return 0; } /** From fe265a06319bfa27cfbccd3305d93b33b78f48f2 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 2 Nov 2020 14:54:23 +0200 Subject: [PATCH 17/22] thunderbolt: Pass metadata directly to usb4_switch_op() We are going to make usb4_switch_op() to match better the corresponding firmware (ICM) USB4 router operation proxy interface, so that we can use either based on the connection manager implementation. For this reason pass metadata directly to usb4_switch_op(). Signed-off-by: Mika Westerberg --- drivers/thunderbolt/usb4.c | 77 ++++++++++++++------------------------ 1 file changed, 28 insertions(+), 49 deletions(-) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index d88e28eee975..5f3237d66987 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -92,16 +92,6 @@ static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data, return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); } -static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata) -{ - return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); -} - -static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata) -{ - return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); -} - static int usb4_do_read_data(u16 address, void *buf, size_t size, read_block_fn read_block, void *read_block_data) { @@ -171,11 +161,18 @@ static int usb4_do_write_data(unsigned int address, const void *buf, size_t size return 0; } -static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) +static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status) { u32 val; int ret; + if (metadata) { + ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); + if (ret) + return ret; + } + val = opcode | ROUTER_CS_26_OV; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (ret) @@ -195,7 +192,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) if (status) *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; - return 0; + + return metadata ? + tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1) : 0; } static void usb4_switch_check_wakes(struct tb_switch *sw) @@ -350,11 +349,7 @@ static int usb4_switch_drom_read_block(void *data, metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & USB4_DROM_ADDRESS_MASK; - ret = usb4_switch_op_write_metadata(sw, metadata); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &metadata, &status); if (ret) return ret; @@ -510,17 +505,14 @@ int usb4_switch_nvm_sector_size(struct tb_switch *sw) u8 status; int ret; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, + &status); if (ret) return ret; if (status) return status == 0x2 ? -EOPNOTSUPP : -EIO; - ret = usb4_switch_op_read_metadata(sw, &metadata); - if (ret) - return ret; - return metadata & USB4_NVM_SECTOR_SIZE_MASK; } @@ -537,11 +529,7 @@ static int usb4_switch_nvm_read_block(void *data, metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & USB4_NVM_READ_OFFSET_MASK; - ret = usb4_switch_op_write_metadata(sw, metadata); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &metadata, &status); if (ret) return ret; @@ -579,11 +567,8 @@ static int usb4_switch_nvm_set_offset(struct tb_switch *sw, metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & USB4_NVM_SET_OFFSET_MASK; - ret = usb4_switch_op_write_metadata(sw, metadata); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, + &status); if (ret) return ret; @@ -601,7 +586,7 @@ static int usb4_switch_nvm_write_next_block(void *data, const void *buf, if (ret) return ret; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status); if (ret) return ret; @@ -648,7 +633,7 @@ int usb4_switch_nvm_authenticate(struct tb_switch *sw) { int ret; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); switch (ret) { /* * The router is power cycled once NVM_AUTH is started so it is @@ -714,14 +699,12 @@ int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) */ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) { + u32 metadata = in->port; u8 status; int ret; - ret = usb4_switch_op_write_metadata(sw, in->port); - if (ret) - return false; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, + &status); /* * If DP resource allocation is not supported assume it is * always available. @@ -746,14 +729,12 @@ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) */ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { + u32 metadata = in->port; u8 status; int ret; - ret = usb4_switch_op_write_metadata(sw, in->port); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, + &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) @@ -771,14 +752,12 @@ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) */ int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { + u32 metadata = in->port; u8 status; int ret; - ret = usb4_switch_op_write_metadata(sw, in->port); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, + &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) From 83bab44ada0512b054844e661279d68d0c8f3d03 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 2 Nov 2020 15:09:44 +0200 Subject: [PATCH 18/22] thunderbolt: Pass TX and RX data directly to usb4_switch_op() We are going to make usb4_switch_op() to match better the corresponding firmware (ICM) USB4 router operation proxy interface, so that we can use either based on the connection manager implementation. For this reason rename usb4_switch_op() to __usb4_switch_op() that provides the most complete interface. Then make usb4_switch_op() and usb4_switch_op_data() call it with correct set of parameters and update the callers accordingly. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/usb4.c | 85 +++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 37 deletions(-) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 5f3237d66987..c1bb5ec6e1db 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -74,24 +74,6 @@ static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, return -ETIMEDOUT; } -static int usb4_switch_op_read_data(struct tb_switch *sw, void *data, - size_t dwords) -{ - if (dwords > USB4_DATA_DWORDS) - return -EINVAL; - - return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); -} - -static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data, - size_t dwords) -{ - if (dwords > USB4_DATA_DWORDS) - return -EINVAL; - - return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); -} - static int usb4_do_read_data(u16 address, void *buf, size_t size, read_block_fn read_block, void *read_block_data) { @@ -161,17 +143,27 @@ static int usb4_do_write_data(unsigned int address, const void *buf, size_t size return 0; } -static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, - u8 *status) +static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) { u32 val; int ret; + if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) + return -EINVAL; + if (metadata) { ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); if (ret) return ret; } + if (tx_dwords) { + ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, + tx_dwords); + if (ret) + return ret; + } val = opcode | ROUTER_CS_26_OV; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); @@ -193,8 +185,34 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; - return metadata ? - tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1) : 0; + if (metadata) { + ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); + if (ret) + return ret; + } + if (rx_dwords) { + ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, + rx_dwords); + if (ret) + return ret; + } + + return 0; +} + +static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status) +{ + return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); +} + +static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status, + const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) +{ + return __usb4_switch_op(sw, opcode, metadata, status, tx_data, + tx_dwords, rx_data, rx_dwords); } static void usb4_switch_check_wakes(struct tb_switch *sw) @@ -349,14 +367,12 @@ static int usb4_switch_drom_read_block(void *data, metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & USB4_DROM_ADDRESS_MASK; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &metadata, &status); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, + &status, NULL, 0, buf, dwords); if (ret) return ret; - if (status) - return -EIO; - - return usb4_switch_op_read_data(sw, buf, dwords); + return status ? -EIO : 0; } /** @@ -529,14 +545,12 @@ static int usb4_switch_nvm_read_block(void *data, metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & USB4_NVM_READ_OFFSET_MASK; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &metadata, &status); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, + &status, NULL, 0, buf, dwords); if (ret) return ret; - if (status) - return -EIO; - - return usb4_switch_op_read_data(sw, buf, dwords); + return status ? -EIO : 0; } /** @@ -582,11 +596,8 @@ static int usb4_switch_nvm_write_next_block(void *data, const void *buf, u8 status; int ret; - ret = usb4_switch_op_write_data(sw, buf, dwords); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, + buf, dwords, NULL, 0); if (ret) return ret; From 9490f71167feba55349e33854f5e51a1a3af9e8c Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 3 Nov 2020 13:58:00 +0200 Subject: [PATCH 19/22] thunderbolt: Add connection manager specific hooks for USB4 router operations Intel USB4 host routers that run the firmware based connection manager (ICM) may implement a proxy for USB4 router operations. This is to avoid the firmware to race with the OS driver, as both may need to run these operations. This adds two new connection manager specific callbacks which, if provided, get called instead of the native USB4 router operation. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.h | 13 ++++++++++ drivers/thunderbolt/usb4.c | 50 +++++++++++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 3885f2515aae..d19dbc8e9457 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -367,6 +367,14 @@ struct tb_path { * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update * @approve_xdomain_paths: Approve (establish) XDomain DMA paths * @disconnect_xdomain_paths: Disconnect XDomain DMA paths + * @usb4_switch_op: Optional proxy for USB4 router operations. If set + * this will be called whenever USB4 router operation is + * performed. If this returns %-EOPNOTSUPP then the + * native USB4 router operation is called. + * @usb4_switch_nvm_authenticate_status: Optional callback that the CM + * implementation can be used to + * return status of USB4 NVM_AUTH + * router operation. */ struct tb_cm_ops { int (*driver_ready)(struct tb *tb); @@ -393,6 +401,11 @@ struct tb_cm_ops { int (*disconnect_pcie_paths)(struct tb *tb); int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); + int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_data_len, + void *rx_data, size_t rx_data_len); + int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw, + u32 *status); }; static inline void *tb_priv(struct tb *tb) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index c1bb5ec6e1db..cbf1c0536360 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -143,16 +143,14 @@ static int usb4_do_write_data(unsigned int address, const void *buf, size_t size return 0; } -static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, - u8 *status, const void *tx_data, size_t tx_dwords, - void *rx_data, size_t rx_dwords) +static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status, + const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) { u32 val; int ret; - if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) - return -EINVAL; - if (metadata) { ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); if (ret) @@ -200,6 +198,39 @@ static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, return 0; } +static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) +{ + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + + if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) + return -EINVAL; + + /* + * If the connection manager implementation provides USB4 router + * operation proxy callback, call it here instead of running the + * operation natively. + */ + if (cm_ops->usb4_switch_op) { + int ret; + + ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, + tx_data, tx_dwords, rx_data, + rx_dwords); + if (ret != -EOPNOTSUPP) + return ret; + + /* + * If the proxy was not supported then run the native + * router operation instead. + */ + } + + return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, + tx_dwords, rx_data, rx_dwords); +} + static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, u8 *status) { @@ -674,10 +705,17 @@ int usb4_switch_nvm_authenticate(struct tb_switch *sw) */ int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) { + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; u16 opcode; u32 val; int ret; + if (cm_ops->usb4_switch_nvm_authenticate_status) { + ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); + if (ret != -EOPNOTSUPP) + return ret; + } + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (ret) return ret; From 579f14217c952975e7d11e300c669af0c47bfe04 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 12 Nov 2020 15:45:18 +0200 Subject: [PATCH 20/22] thunderbolt: Move constants for USB4 router operations to tb_regs.h We are going to use these in subsequent patch so make them available outside of usb4.c. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb_regs.h | 13 +++++++++++++ drivers/thunderbolt/usb4.c | 12 ------------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 67cb173a2f8e..ae427a953489 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -217,6 +217,19 @@ struct tb_regs_switch_header { #define ROUTER_CS_26_ONS BIT(30) #define ROUTER_CS_26_OV BIT(31) +/* USB4 router operations opcodes */ +enum usb4_switch_op { + USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, + USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, + USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, + USB4_SWITCH_OP_NVM_WRITE = 0x20, + USB4_SWITCH_OP_NVM_AUTH = 0x21, + USB4_SWITCH_OP_NVM_READ = 0x22, + USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, + USB4_SWITCH_OP_DROM_READ = 0x24, + USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, +}; + /* Router TMU configuration */ #define TMU_RTR_CS_0 0x00 #define TMU_RTR_CS_0_TD BIT(27) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index cbf1c0536360..6a0aa83a1ac8 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -16,18 +16,6 @@ #define USB4_DATA_DWORDS 16 #define USB4_DATA_RETRIES 3 -enum usb4_switch_op { - USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, - USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, - USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, - USB4_SWITCH_OP_NVM_WRITE = 0x20, - USB4_SWITCH_OP_NVM_AUTH = 0x21, - USB4_SWITCH_OP_NVM_READ = 0x22, - USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, - USB4_SWITCH_OP_DROM_READ = 0x24, - USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, -}; - enum usb4_sb_target { USB4_SB_TARGET_ROUTER, USB4_SB_TARGET_PARTNER, From 9039387e166edab35c89ddcc057529e332cc4089 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 3 Nov 2020 13:55:32 +0200 Subject: [PATCH 21/22] thunderbolt: Add USB4 router operation proxy for firmware connection manager Intel Maple Ridge and Tiger Lake connection manager firmware implements a USB4 router operation proxy that should be used instead of direct register access to avoid races with the firmware. This is supported in all firmwares where the protocol version field returned in the driver ready response is 3 (or higher). This adds the USB4 router proxy operations support to the driver so that we first check the protocol version and if it is 3 (or higher) the USB4 router operation is run through the firmware provided proxy. Otherwise the native version is used. Most USB4 router proxy operations are pretty straightforward except NVM_AUTH where the firmware only responds once the router is restarted but before it sends device connected notification. To support this we split the operation so that the reply is received asynchronously and stored to struct icm. This last reply is then returned in icm_usb4_switch_nvm_authenticate_status() if available. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 214 ++++++++++++++++++++++++++++++++-- drivers/thunderbolt/tb_msgs.h | 28 +++++ 2 files changed, 232 insertions(+), 10 deletions(-) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 635b949fb1d6..35935c106e3d 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -48,6 +48,18 @@ static bool start_icm; module_param(start_icm, bool, 0444); MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)"); +/** + * struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status + * @reply: Reply from ICM firmware is placed here + * @request: Request that is sent to ICM firmware + * @icm: Pointer to ICM private data + */ +struct usb4_switch_nvm_auth { + struct icm_usb4_switch_op_response reply; + struct icm_usb4_switch_op request; + struct icm *icm; +}; + /** * struct icm - Internal connection manager private data * @request_lock: Makes sure only one message is send to ICM at time @@ -61,6 +73,8 @@ MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: f * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) * @rpm: Does the controller support runtime PM (RTD3) * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller + * @proto_version: Firmware protocol version + * @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set) * @veto: Is RTD3 veto in effect * @is_supported: Checks if we can support ICM on this controller * @cio_reset: Trigger CIO reset @@ -84,6 +98,8 @@ struct icm { size_t max_boot_acl; bool rpm; bool can_upgrade_nvm; + u8 proto_version; + struct usb4_switch_nvm_auth *last_nvm_auth; bool veto; bool (*is_supported)(struct tb *tb); int (*cio_reset)(struct tb *tb); @@ -92,7 +108,7 @@ struct icm { void (*save_devices)(struct tb *tb); int (*driver_ready)(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm); + u8 *proto_version, size_t *nboot_acl, bool *rpm); void (*set_uuid)(struct tb *tb); void (*device_connected)(struct tb *tb, const struct icm_pkg_header *hdr); @@ -437,7 +453,7 @@ static void icm_fr_save_devices(struct tb *tb) static int icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_fr_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -992,7 +1008,7 @@ static int icm_tr_cio_reset(struct tb *tb) static int icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_tr_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -1008,6 +1024,9 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, if (security_level) *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; + if (proto_version) + *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> + ICM_TR_INFO_PROTO_VERSION_SHIFT; if (nboot_acl) *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> ICM_TR_INFO_BOOT_ACL_SHIFT; @@ -1461,7 +1480,7 @@ static int icm_ar_get_mode(struct tb *tb) static int icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_ar_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -1591,7 +1610,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, static int icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_tr_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -1605,6 +1624,10 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, if (ret) return ret; + if (proto_version) + *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> + ICM_TR_INFO_PROTO_VERSION_SHIFT; + /* Ice Lake always supports RTD3 */ if (rpm) *rpm = true; @@ -1713,13 +1736,14 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, static int __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm *icm = tb_priv(tb); unsigned int retries = 50; int ret; - ret = icm->driver_ready(tb, security_level, nboot_acl, rpm); + ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl, + rpm); if (ret) { tb_err(tb, "failed to send driver ready to ICM\n"); return ret; @@ -1929,8 +1953,8 @@ static int icm_driver_ready(struct tb *tb) return 0; } - ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl, - &icm->rpm); + ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version, + &tb->nboot_acl, &icm->rpm); if (ret) return ret; @@ -1941,6 +1965,9 @@ static int icm_driver_ready(struct tb *tb) if (tb->nboot_acl > icm->max_boot_acl) tb->nboot_acl = 0; + if (icm->proto_version >= 3) + tb_dbg(tb, "USB4 proxy operations supported\n"); + return 0; } @@ -2052,7 +2079,7 @@ static void icm_complete(struct tb *tb) * Now all existing children should be resumed, start events * from ICM to get updated status. */ - __icm_driver_ready(tb, NULL, NULL, NULL); + __icm_driver_ready(tb, NULL, NULL, NULL, NULL); /* * We do not get notifications of devices that have been @@ -2131,6 +2158,8 @@ static void icm_stop(struct tb *tb) tb_switch_remove(tb->root_switch); tb->root_switch = NULL; nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); + kfree(icm->last_nvm_auth); + icm->last_nvm_auth = NULL; } static int icm_disconnect_pcie_paths(struct tb *tb) @@ -2138,6 +2167,165 @@ static int icm_disconnect_pcie_paths(struct tb *tb) return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); } +static void icm_usb4_switch_nvm_auth_complete(void *data) +{ + struct usb4_switch_nvm_auth *auth = data; + struct icm *icm = auth->icm; + struct tb *tb = icm_to_tb(icm); + + tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n", + get_route(auth->reply.route_hi, auth->reply.route_lo), + auth->reply.hdr.flags, auth->reply.status); + + mutex_lock(&tb->lock); + if (WARN_ON(icm->last_nvm_auth)) + kfree(icm->last_nvm_auth); + icm->last_nvm_auth = auth; + mutex_unlock(&tb->lock); +} + +static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route) +{ + struct usb4_switch_nvm_auth *auth; + struct icm *icm = tb_priv(tb); + struct tb_cfg_request *req; + int ret; + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) + return -ENOMEM; + + auth->icm = icm; + auth->request.hdr.code = ICM_USB4_SWITCH_OP; + auth->request.route_hi = upper_32_bits(route); + auth->request.route_lo = lower_32_bits(route); + auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH; + + req = tb_cfg_request_alloc(); + if (!req) { + ret = -ENOMEM; + goto err_free_auth; + } + + req->match = icm_match; + req->copy = icm_copy; + req->request = &auth->request; + req->request_size = sizeof(auth->request); + req->request_type = TB_CFG_PKG_ICM_CMD; + req->response = &auth->reply; + req->npackets = 1; + req->response_size = sizeof(auth->reply); + req->response_type = TB_CFG_PKG_ICM_RESP; + + tb_dbg(tb, "NVM_AUTH request for %llx\n", route); + + mutex_lock(&icm->request_lock); + ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete, + auth); + mutex_unlock(&icm->request_lock); + + tb_cfg_request_put(req); + if (ret) + goto err_free_auth; + return 0; + +err_free_auth: + kfree(auth); + return ret; +} + +static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_data_len, + void *rx_data, size_t rx_data_len) +{ + struct icm_usb4_switch_op_response reply; + struct icm_usb4_switch_op request; + struct tb *tb = sw->tb; + struct icm *icm = tb_priv(tb); + u64 route = tb_route(sw); + int ret; + + /* + * USB4 router operation proxy is supported in firmware if the + * protocol version is 3 or higher. + */ + if (icm->proto_version < 3) + return -EOPNOTSUPP; + + /* + * NVM_AUTH is a special USB4 proxy operation that does not + * return immediately so handle it separately. + */ + if (opcode == USB4_SWITCH_OP_NVM_AUTH) + return icm_usb4_switch_nvm_authenticate(tb, route); + + memset(&request, 0, sizeof(request)); + request.hdr.code = ICM_USB4_SWITCH_OP; + request.route_hi = upper_32_bits(route); + request.route_lo = lower_32_bits(route); + request.opcode = opcode; + if (metadata) + request.metadata = *metadata; + + if (tx_data_len) { + request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID; + if (tx_data_len < ARRAY_SIZE(request.data)) + request.data_len_valid = + tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK; + memcpy(request.data, tx_data, tx_data_len * sizeof(u32)); + } + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + if (status) + *status = reply.status; + + if (metadata) + *metadata = reply.metadata; + + if (rx_data_len) + memcpy(rx_data, reply.data, rx_data_len * sizeof(u32)); + + return 0; +} + +static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw, + u32 *status) +{ + struct usb4_switch_nvm_auth *auth; + struct tb *tb = sw->tb; + struct icm *icm = tb_priv(tb); + int ret = 0; + + if (icm->proto_version < 3) + return -EOPNOTSUPP; + + auth = icm->last_nvm_auth; + icm->last_nvm_auth = NULL; + + if (auth && auth->reply.route_hi == sw->config.route_hi && + auth->reply.route_lo == sw->config.route_lo) { + tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n", + tb_route(sw), auth->reply.hdr.flags, auth->reply.status); + if (auth->reply.hdr.flags & ICM_FLAGS_ERROR) + ret = -EIO; + else + *status = auth->reply.status; + } else { + *status = 0; + } + + kfree(auth); + return ret; +} + /* Falcon Ridge */ static const struct tb_cm_ops icm_fr_ops = { .driver_ready = icm_driver_ready, @@ -2196,6 +2384,9 @@ static const struct tb_cm_ops icm_tr_ops = { .disconnect_pcie_paths = icm_disconnect_pcie_paths, .approve_xdomain_paths = icm_tr_approve_xdomain_paths, .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, + .usb4_switch_op = icm_usb4_switch_op, + .usb4_switch_nvm_authenticate_status = + icm_usb4_switch_nvm_authenticate_status, }; /* Ice Lake */ @@ -2209,6 +2400,9 @@ static const struct tb_cm_ops icm_icl_ops = { .handle_event = icm_handle_event, .approve_xdomain_paths = icm_tr_approve_xdomain_paths, .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, + .usb4_switch_op = icm_usb4_switch_op, + .usb4_switch_nvm_authenticate_status = + icm_usb4_switch_nvm_authenticate_status, }; struct tb *icm_probe(struct tb_nhi *nhi) diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index 0e01dbc63e72..bcabfcb2fd03 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -106,6 +106,7 @@ enum icm_pkg_code { ICM_APPROVE_XDOMAIN = 0x10, ICM_DISCONNECT_XDOMAIN = 0x11, ICM_PREBOOT_ACL = 0x18, + ICM_USB4_SWITCH_OP = 0x20, }; enum icm_event_code { @@ -343,6 +344,8 @@ struct icm_tr_pkg_driver_ready_response { #define ICM_TR_FLAGS_RTD3 BIT(6) #define ICM_TR_INFO_SLEVEL_MASK GENMASK(2, 0) +#define ICM_TR_INFO_PROTO_VERSION_MASK GENMASK(6, 4) +#define ICM_TR_INFO_PROTO_VERSION_SHIFT 4 #define ICM_TR_INFO_BOOT_ACL_SHIFT 7 #define ICM_TR_INFO_BOOT_ACL_MASK GENMASK(12, 7) @@ -478,6 +481,31 @@ struct icm_icl_event_rtd3_veto { u32 veto_reason; }; +/* USB4 ICM messages */ + +struct icm_usb4_switch_op { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + u32 metadata; + u16 opcode; + u16 data_len_valid; + u32 data[16]; +}; + +#define ICM_USB4_SWITCH_DATA_LEN_MASK GENMASK(3, 0) +#define ICM_USB4_SWITCH_DATA_VALID BIT(4) + +struct icm_usb4_switch_op_response { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + u32 metadata; + u16 opcode; + u16 status; + u32 data[16]; +}; + /* XDomain messages */ struct tb_xdomain_header { From db0746e3399ee87ee5f957880811da16faa89fb8 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Fri, 31 Jan 2020 19:24:30 +0300 Subject: [PATCH 22/22] thunderbolt: Add support for Intel Maple Ridge Maple Ridge is first discrete USB4 host controller from Intel. It comes with firmware based connection manager and the flows are similar as used in Intel Titan Ridge. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 11 +++++++++++ drivers/thunderbolt/nhi.h | 1 + 2 files changed, 12 insertions(+) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 35935c106e3d..adc7b61937a1 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -2499,6 +2499,17 @@ struct tb *icm_probe(struct tb_nhi *nhi) icm->rtd3_veto = icm_icl_rtd3_veto; tb->cm_ops = &icm_icl_ops; break; + + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: + icm->is_supported = icm_tgl_is_supported; + icm->get_mode = icm_ar_get_mode; + icm->driver_ready = icm_tr_driver_ready; + icm->device_connected = icm_tr_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + tb->cm_ops = &icm_tr_ops; + break; } if (!icm->is_supported || !icm->is_supported(tb)) { diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 80162e4b013f..ffe0531c0fd0 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops; * need for the PCI quirk anymore as we will use ICM also on Apple * hardware. */ +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137 #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf