1
0
Fork 0

thunderbolt: Changes for v5.11 merge window

This includes following Thunderbolt/USB4 changes for v5.11 merge window:
 
   * DMA traffic test driver
 
   * USB4 router NVM upgrade improvements
 
   * USB4 router operations proxy implementation available in the recent
     Intel Connection Manager firmwares
 
   * Support for Intel Maple Ridge discrete Thunderbolt 4 controller
 
   * A couple of cleanups and minor improvements.
 -----BEGIN PGP SIGNATURE-----
 
 iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAl/PagggHG1pa2Eud2Vz
 dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKD5DQ/+NFsABFQaf1P+
 sU4HVOo9cwfvQCFCapfOsOjBsrkuLsjZPQdUqVdTzhrDzRM6uVQXHqWkcInNYxEs
 D0o9f8yheYSPuZolHIIydkNZ7VjhXwhVp7FuF+6M3bIhtD9siuBUisCu7QtOjzpF
 EAzBZcGHvKXkPmVAQKZS/P4WsgcZDv0/pODjeQawosgJAPOo5begZVBEYcpbOeCT
 qvl1vEs+Fr5fcoFcY/58sJX932CcbbO5bZMSc01IIF94FQMsQJg3ATLdxkgc++2M
 FnzcHEVQi7h8zwCmMT4deGwLJqvbyVy5SNo6GY4/Adhsf0HQzrvtWdESegoIooJK
 dNWhSCuAFbXrFKGH4iBEUldigNXiCGiTwalmJ1+IIDccJQwkKC4GGU+9KEWBtYCn
 OIvKkHUWPeeqNBzSeiMbFDXiK6QFe2VpNBg/iRUZwZwxibqgjgJE1rHbY098sPrL
 yHRcrz6vih3wgpqZJTGdanIMk6F0MzaoHtj2egXbXaqyGf8dIdvnZJZN9gb5WDyu
 ZT/ffh3XiNfBvFtsu9gosnn3m76TQ4jIb4lUesTOVZjHX2yNz3MabYet312lP4PO
 qhxb1l2HGWuxnLLSxas6gzEv5arpx88ldSj6PaA86pBL/eezy59Bvn5hYrmCQ269
 lVZQ19nC8y13VyCgbqcyTSpGxS+NXV0=
 =+RXF
 -----END PGP SIGNATURE-----

Merge tag 'thunderbolt-for-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v5.11 merge window

This includes following Thunderbolt/USB4 changes for v5.11 merge window:

  * DMA traffic test driver

  * USB4 router NVM upgrade improvements

  * USB4 router operations proxy implementation available in the recent
    Intel Connection Manager firmwares

  * Support for Intel Maple Ridge discrete Thunderbolt 4 controller

  * A couple of cleanups and minor improvements.

* tag 'thunderbolt-for-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (22 commits)
  thunderbolt: Add support for Intel Maple Ridge
  thunderbolt: Add USB4 router operation proxy for firmware connection manager
  thunderbolt: Move constants for USB4 router operations to tb_regs.h
  thunderbolt: Add connection manager specific hooks for USB4 router operations
  thunderbolt: Pass TX and RX data directly to usb4_switch_op()
  thunderbolt: Pass metadata directly to usb4_switch_op()
  thunderbolt: Perform USB4 router NVM upgrade in two phases
  thunderbolt: Return -ENOTCONN when ERR_CONN is received
  thunderbolt: Keep the parent runtime resumed for a while on device disconnect
  thunderbolt: Log adapter numbers in decimal in path activation/deactivation
  thunderbolt: Log which connection manager implementation is used
  thunderbolt: Move max_boot_acl field to correct place in struct icm
  MAINTAINERS: Add Isaac as maintainer of Thunderbolt DMA traffic test driver
  thunderbolt: Add DMA traffic test driver
  thunderbolt: Add support for end-to-end flow control
  thunderbolt: Make it possible to allocate one directional DMA tunnel
  thunderbolt: Create debugfs directory automatically for services
  thunderbolt: Add functions for enabling and disabling lane bonding on XDomain
  thunderbolt: Add link_speed and link_width to XDomain
  thunderbolt: Create XDomain devices for loops back to the host
  ...
zero-sugar-mainline-defconfig
Greg Kroah-Hartman 2020-12-08 13:41:45 +01:00
commit 3d41137801
21 changed files with 1553 additions and 164 deletions

View File

@ -1,3 +1,31 @@
What: /sys/bus/thunderbolt/devices/<xdomain>/rx_speed
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the XDomain RX speed per lane.
All RX lanes run at the same speed.
What: /sys/bus/thunderbolt/devices/<xdomain>/rx_lanes
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the number of RX lanes the XDomain
is using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/<xdomain>/tx_speed
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the XDomain TX speed per lane.
All TX lanes run at the same speed.
What: /sys/bus/thunderbolt/devices/<xdomain>/tx_lanes
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports number of TX lanes the XDomain
is using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl
Date: Jun 2018
KernelVersion: 4.17

View File

@ -17372,6 +17372,12 @@ W: http://thinkwiki.org/wiki/Ibm-acpi
T: git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
F: drivers/platform/x86/thinkpad_acpi.c
THUNDERBOLT DMA TRAFFIC TEST DRIVER
M: Isaac Hazan <isaac.hazan@intel.com>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/thunderbolt/dma_test.c
THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
M: Michael Jamet <michael.jamet@intel.com>

View File

@ -866,7 +866,7 @@ static int tbnet_open(struct net_device *dev)
eof_mask = BIT(TBIP_PDF_FRAME_END);
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
RING_FLAG_FRAME, sof_mask, eof_mask,
RING_FLAG_FRAME, 0, sof_mask, eof_mask,
tbnet_start_poll, net);
if (!ring) {
netdev_err(dev, "failed to allocate Rx ring\n");

View File

@ -31,4 +31,17 @@ config USB4_KUNIT_TEST
bool "KUnit tests"
depends on KUNIT=y
config USB4_DMA_TEST
tristate "DMA traffic test driver"
depends on DEBUG_FS
help
This allows sending and receiving DMA traffic through loopback
connection. Loopback connection can be done by either special
dongle that has TX/RX lines crossed, or by simply connecting a
cable back to the host. Only enable this if you know what you
are doing. Normal users and distro kernels should say N here.
To compile this driver a module, choose M here. The module will be
called thunderbolt_dma_test.
endif # USB4

View File

@ -7,3 +7,6 @@ thunderbolt-objs += nvm.o retimer.o quirks.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o

View File

@ -628,8 +628,8 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
if (!ctl->tx)
goto err;
ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
0xffff, NULL, NULL);
ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
0xffff, NULL, NULL);
if (!ctl->rx)
goto err;
@ -962,6 +962,9 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
return -ENOTCONN;
return -EIO;
}

View File

@ -691,6 +691,30 @@ void tb_switch_debugfs_remove(struct tb_switch *sw)
debugfs_remove_recursive(sw->debugfs_dir);
}
/**
* tb_service_debugfs_init() - Add debugfs directory for service
* @svc: Thunderbolt service pointer
*
* Adds debugfs directory for service.
*/
void tb_service_debugfs_init(struct tb_service *svc)
{
svc->debugfs_dir = debugfs_create_dir(dev_name(&svc->dev),
tb_debugfs_root);
}
/**
* tb_service_debugfs_remove() - Remove service debugfs directory
* @svc: Thunderbolt service pointer
*
* Removes the previously created debugfs directory for @svc.
*/
void tb_service_debugfs_remove(struct tb_service *svc)
{
debugfs_remove_recursive(svc->debugfs_dir);
svc->debugfs_dir = NULL;
}
void tb_debugfs_init(void)
{
tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);

View File

@ -0,0 +1,736 @@
// SPDX-License-Identifier: GPL-2.0
/*
* DMA traffic test driver
*
* Copyright (C) 2020, Intel Corporation
* Authors: Isaac Hazan <isaac.hazan@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/thunderbolt.h>
#define DMA_TEST_HOPID 8
#define DMA_TEST_TX_RING_SIZE 64
#define DMA_TEST_RX_RING_SIZE 256
#define DMA_TEST_FRAME_SIZE SZ_4K
#define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL
#define DMA_TEST_MAX_PACKETS 1000
enum dma_test_frame_pdf {
DMA_TEST_PDF_FRAME_START = 1,
DMA_TEST_PDF_FRAME_END,
};
struct dma_test_frame {
struct dma_test *dma_test;
void *data;
struct ring_frame frame;
};
enum dma_test_test_error {
DMA_TEST_NO_ERROR,
DMA_TEST_INTERRUPTED,
DMA_TEST_BUFFER_ERROR,
DMA_TEST_DMA_ERROR,
DMA_TEST_CONFIG_ERROR,
DMA_TEST_SPEED_ERROR,
DMA_TEST_WIDTH_ERROR,
DMA_TEST_BONDING_ERROR,
DMA_TEST_PACKET_ERROR,
};
static const char * const dma_test_error_names[] = {
[DMA_TEST_NO_ERROR] = "no errors",
[DMA_TEST_INTERRUPTED] = "interrupted by signal",
[DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers",
[DMA_TEST_DMA_ERROR] = "DMA ring setup failed",
[DMA_TEST_CONFIG_ERROR] = "configuration is not valid",
[DMA_TEST_SPEED_ERROR] = "unexpected link speed",
[DMA_TEST_WIDTH_ERROR] = "unexpected link width",
[DMA_TEST_BONDING_ERROR] = "lane bonding configuration error",
[DMA_TEST_PACKET_ERROR] = "packet check failed",
};
enum dma_test_result {
DMA_TEST_NOT_RUN,
DMA_TEST_SUCCESS,
DMA_TEST_FAIL,
};
static const char * const dma_test_result_names[] = {
[DMA_TEST_NOT_RUN] = "not run",
[DMA_TEST_SUCCESS] = "success",
[DMA_TEST_FAIL] = "failed",
};
/**
* struct dma_test - DMA test device driver private data
* @svc: XDomain service the driver is bound to
* @xd: XDomain the service belongs to
* @rx_ring: Software ring holding RX frames
* @tx_ring: Software ring holding TX frames
* @packets_to_send: Number of packets to send
* @packets_to_receive: Number of packets to receive
* @packets_sent: Actual number of packets sent
* @packets_received: Actual number of packets received
* @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated
* @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated
* @crc_errors: Number of CRC errors during the test run
* @buffer_overflow_errors: Number of buffer overflow errors during the test
* run
* @result: Result of the last run
* @error_code: Error code of the last run
* @complete: Used to wait for the Rx to complete
* @lock: Lock serializing access to this structure
* @debugfs_dir: dentry of this dma_test
*/
struct dma_test {
const struct tb_service *svc;
struct tb_xdomain *xd;
struct tb_ring *rx_ring;
struct tb_ring *tx_ring;
unsigned int packets_to_send;
unsigned int packets_to_receive;
unsigned int packets_sent;
unsigned int packets_received;
unsigned int link_speed;
unsigned int link_width;
unsigned int crc_errors;
unsigned int buffer_overflow_errors;
enum dma_test_result result;
enum dma_test_test_error error_code;
struct completion complete;
struct mutex lock;
struct dentry *debugfs_dir;
};
/* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */
static const uuid_t dma_test_dir_uuid =
UUID_INIT(0x3188cd10, 0x6523, 0x4a5a,
0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8);
static struct tb_property_dir *dma_test_dir;
static void *dma_test_pattern;
static void dma_test_free_rings(struct dma_test *dt)
{
if (dt->rx_ring) {
tb_ring_free(dt->rx_ring);
dt->rx_ring = NULL;
}
if (dt->tx_ring) {
tb_ring_free(dt->tx_ring);
dt->tx_ring = NULL;
}
}
static int dma_test_start_rings(struct dma_test *dt)
{
unsigned int flags = RING_FLAG_FRAME;
struct tb_xdomain *xd = dt->xd;
int ret, e2e_tx_hop = 0;
struct tb_ring *ring;
/*
* If we are both sender and receiver (traffic goes over a
* special loopback dongle) enable E2E flow control. This avoids
* losing packets.
*/
if (dt->packets_to_send && dt->packets_to_receive)
flags |= RING_FLAG_E2E;
if (dt->packets_to_send) {
ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE,
flags);
if (!ring)
return -ENOMEM;
dt->tx_ring = ring;
e2e_tx_hop = ring->hop;
}
if (dt->packets_to_receive) {
u16 sof_mask, eof_mask;
sof_mask = BIT(DMA_TEST_PDF_FRAME_START);
eof_mask = BIT(DMA_TEST_PDF_FRAME_END);
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE,
flags, e2e_tx_hop, sof_mask, eof_mask,
NULL, NULL);
if (!ring) {
dma_test_free_rings(dt);
return -ENOMEM;
}
dt->rx_ring = ring;
}
ret = tb_xdomain_enable_paths(dt->xd, DMA_TEST_HOPID,
dt->tx_ring ? dt->tx_ring->hop : 0,
DMA_TEST_HOPID,
dt->rx_ring ? dt->rx_ring->hop : 0);
if (ret) {
dma_test_free_rings(dt);
return ret;
}
if (dt->tx_ring)
tb_ring_start(dt->tx_ring);
if (dt->rx_ring)
tb_ring_start(dt->rx_ring);
return 0;
}
static void dma_test_stop_rings(struct dma_test *dt)
{
if (dt->rx_ring)
tb_ring_stop(dt->rx_ring);
if (dt->tx_ring)
tb_ring_stop(dt->tx_ring);
if (tb_xdomain_disable_paths(dt->xd))
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
dma_test_free_rings(dt);
}
static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
struct dma_test *dt = tf->dma_test;
struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
DMA_FROM_DEVICE);
kfree(tf->data);
if (canceled) {
kfree(tf);
return;
}
dt->packets_received++;
dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received,
dt->packets_to_receive);
if (tf->frame.flags & RING_DESC_CRC_ERROR)
dt->crc_errors++;
if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN)
dt->buffer_overflow_errors++;
kfree(tf);
if (dt->packets_received == dt->packets_to_receive)
complete(&dt->complete);
}
static int dma_test_submit_rx(struct dma_test *dt, size_t npackets)
{
struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
int i;
for (i = 0; i < npackets; i++) {
struct dma_test_frame *tf;
dma_addr_t dma_addr;
tf = kzalloc(sizeof(*tf), GFP_KERNEL);
if (!tf)
return -ENOMEM;
tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
if (!tf->data) {
kfree(tf);
return -ENOMEM;
}
dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
kfree(tf->data);
kfree(tf);
return -ENOMEM;
}
tf->frame.buffer_phy = dma_addr;
tf->frame.callback = dma_test_rx_callback;
tf->dma_test = dt;
INIT_LIST_HEAD(&tf->frame.list);
tb_ring_rx(dt->rx_ring, &tf->frame);
}
return 0;
}
static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
struct dma_test *dt = tf->dma_test;
struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
DMA_TO_DEVICE);
kfree(tf->data);
kfree(tf);
}
static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
{
struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
int i;
for (i = 0; i < npackets; i++) {
struct dma_test_frame *tf;
dma_addr_t dma_addr;
tf = kzalloc(sizeof(*tf), GFP_KERNEL);
if (!tf)
return -ENOMEM;
tf->frame.size = 0; /* means 4096 */
tf->dma_test = dt;
tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
if (!tf->data) {
kfree(tf);
return -ENOMEM;
}
memcpy(tf->data, dma_test_pattern, DMA_TEST_FRAME_SIZE);
dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
kfree(tf->data);
kfree(tf);
return -ENOMEM;
}
tf->frame.buffer_phy = dma_addr;
tf->frame.callback = dma_test_tx_callback;
tf->frame.sof = DMA_TEST_PDF_FRAME_START;
tf->frame.eof = DMA_TEST_PDF_FRAME_END;
INIT_LIST_HEAD(&tf->frame.list);
dt->packets_sent++;
dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent,
dt->packets_to_send);
tb_ring_tx(dt->tx_ring, &tf->frame);
}
return 0;
}
#define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \
static int __fops ## _show(void *data, u64 *val) \
{ \
struct tb_service *svc = data; \
struct dma_test *dt = tb_service_get_drvdata(svc); \
int ret; \
\
ret = mutex_lock_interruptible(&dt->lock); \
if (ret) \
return ret; \
__get(dt, val); \
mutex_unlock(&dt->lock); \
return 0; \
} \
static int __fops ## _store(void *data, u64 val) \
{ \
struct tb_service *svc = data; \
struct dma_test *dt = tb_service_get_drvdata(svc); \
int ret; \
\
ret = __validate(val); \
if (ret) \
return ret; \
ret = mutex_lock_interruptible(&dt->lock); \
if (ret) \
return ret; \
__set(dt, val); \
mutex_unlock(&dt->lock); \
return 0; \
} \
DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \
__fops ## _store, "%llu\n")
static void lanes_get(const struct dma_test *dt, u64 *val)
{
*val = dt->link_width;
}
static int lanes_validate(u64 val)
{
return val > 2 ? -EINVAL : 0;
}
static void lanes_set(struct dma_test *dt, u64 val)
{
dt->link_width = val;
}
DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set);
static void speed_get(const struct dma_test *dt, u64 *val)
{
*val = dt->link_speed;
}
static int speed_validate(u64 val)
{
switch (val) {
case 20:
case 10:
case 0:
return 0;
default:
return -EINVAL;
}
}
static void speed_set(struct dma_test *dt, u64 val)
{
dt->link_speed = val;
}
DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set);
static void packets_to_receive_get(const struct dma_test *dt, u64 *val)
{
*val = dt->packets_to_receive;
}
static int packets_to_receive_validate(u64 val)
{
return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
}
static void packets_to_receive_set(struct dma_test *dt, u64 val)
{
dt->packets_to_receive = val;
}
DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get,
packets_to_receive_validate, packets_to_receive_set);
static void packets_to_send_get(const struct dma_test *dt, u64 *val)
{
*val = dt->packets_to_send;
}
static int packets_to_send_validate(u64 val)
{
return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
}
static void packets_to_send_set(struct dma_test *dt, u64 val)
{
dt->packets_to_send = val;
}
DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get,
packets_to_send_validate, packets_to_send_set);
static int dma_test_set_bonding(struct dma_test *dt)
{
switch (dt->link_width) {
case 2:
return tb_xdomain_lane_bonding_enable(dt->xd);
case 1:
tb_xdomain_lane_bonding_disable(dt->xd);
fallthrough;
default:
return 0;
}
}
static bool dma_test_validate_config(struct dma_test *dt)
{
if (!dt->packets_to_send && !dt->packets_to_receive)
return false;
if (dt->packets_to_send && dt->packets_to_receive &&
dt->packets_to_send != dt->packets_to_receive)
return false;
return true;
}
static void dma_test_check_errors(struct dma_test *dt, int ret)
{
if (!dt->error_code) {
if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
dt->error_code = DMA_TEST_SPEED_ERROR;
} else if (dt->link_width &&
dt->xd->link_width != dt->link_width) {
dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->packets_to_send != dt->packets_sent ||
dt->packets_to_receive != dt->packets_received ||
dt->crc_errors || dt->buffer_overflow_errors) {
dt->error_code = DMA_TEST_PACKET_ERROR;
} else {
return;
}
}
dt->result = DMA_TEST_FAIL;
}
static int test_store(void *data, u64 val)
{
struct tb_service *svc = data;
struct dma_test *dt = tb_service_get_drvdata(svc);
int ret;
if (val != 1)
return -EINVAL;
ret = mutex_lock_interruptible(&dt->lock);
if (ret)
return ret;
dt->packets_sent = 0;
dt->packets_received = 0;
dt->crc_errors = 0;
dt->buffer_overflow_errors = 0;
dt->result = DMA_TEST_SUCCESS;
dt->error_code = DMA_TEST_NO_ERROR;
dev_dbg(&svc->dev, "DMA test starting\n");
if (dt->link_speed)
dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed);
if (dt->link_width)
dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width);
dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send);
dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive);
if (!dma_test_validate_config(dt)) {
dev_err(&svc->dev, "invalid test configuration\n");
dt->error_code = DMA_TEST_CONFIG_ERROR;
goto out_unlock;
}
ret = dma_test_set_bonding(dt);
if (ret) {
dev_err(&svc->dev, "failed to set lanes\n");
dt->error_code = DMA_TEST_BONDING_ERROR;
goto out_unlock;
}
ret = dma_test_start_rings(dt);
if (ret) {
dev_err(&svc->dev, "failed to enable DMA rings\n");
dt->error_code = DMA_TEST_DMA_ERROR;
goto out_unlock;
}
if (dt->packets_to_receive) {
reinit_completion(&dt->complete);
ret = dma_test_submit_rx(dt, dt->packets_to_receive);
if (ret) {
dev_err(&svc->dev, "failed to submit receive buffers\n");
dt->error_code = DMA_TEST_BUFFER_ERROR;
goto out_stop;
}
}
if (dt->packets_to_send) {
ret = dma_test_submit_tx(dt, dt->packets_to_send);
if (ret) {
dev_err(&svc->dev, "failed to submit transmit buffers\n");
dt->error_code = DMA_TEST_BUFFER_ERROR;
goto out_stop;
}
}
if (dt->packets_to_receive) {
ret = wait_for_completion_interruptible(&dt->complete);
if (ret) {
dt->error_code = DMA_TEST_INTERRUPTED;
goto out_stop;
}
}
out_stop:
dma_test_stop_rings(dt);
out_unlock:
dma_test_check_errors(dt, ret);
mutex_unlock(&dt->lock);
dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n");
static int status_show(struct seq_file *s, void *not_used)
{
struct tb_service *svc = s->private;
struct dma_test *dt = tb_service_get_drvdata(svc);
int ret;
ret = mutex_lock_interruptible(&dt->lock);
if (ret)
return ret;
seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]);
if (dt->result == DMA_TEST_NOT_RUN)
goto out_unlock;
seq_printf(s, "packets received: %u\n", dt->packets_received);
seq_printf(s, "packets sent: %u\n", dt->packets_sent);
seq_printf(s, "CRC errors: %u\n", dt->crc_errors);
seq_printf(s, "buffer overflow errors: %u\n",
dt->buffer_overflow_errors);
seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]);
out_unlock:
mutex_unlock(&dt->lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(status);
static void dma_test_debugfs_init(struct tb_service *svc)
{
struct dma_test *dt = tb_service_get_drvdata(svc);
dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir);
debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops);
debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops);
debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc,
&packets_to_receive_fops);
debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc,
&packets_to_send_fops);
debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops);
debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops);
}
static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id)
{
struct tb_xdomain *xd = tb_service_parent(svc);
struct dma_test *dt;
dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
dt->svc = svc;
dt->xd = xd;
mutex_init(&dt->lock);
init_completion(&dt->complete);
tb_service_set_drvdata(svc, dt);
dma_test_debugfs_init(svc);
return 0;
}
static void dma_test_remove(struct tb_service *svc)
{
struct dma_test *dt = tb_service_get_drvdata(svc);
mutex_lock(&dt->lock);
debugfs_remove_recursive(dt->debugfs_dir);
mutex_unlock(&dt->lock);
}
static int __maybe_unused dma_test_suspend(struct device *dev)
{
/*
* No need to do anything special here. If userspace is writing
* to the test attribute when suspend started, it comes out from
* wait_for_completion_interruptible() with -ERESTARTSYS and the
* DMA test fails tearing down the rings. Once userspace is
* thawed the kernel restarts the write syscall effectively
* re-running the test.
*/
return 0;
}
static int __maybe_unused dma_test_resume(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops dma_test_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume)
};
static const struct tb_service_id dma_test_ids[] = {
{ TB_SERVICE("dma_test", 1) },
{ },
};
MODULE_DEVICE_TABLE(tbsvc, dma_test_ids);
static struct tb_service_driver dma_test_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "thunderbolt_dma_test",
.pm = &dma_test_pm_ops,
},
.probe = dma_test_probe,
.remove = dma_test_remove,
.id_table = dma_test_ids,
};
static int __init dma_test_init(void)
{
u64 data_value = DMA_TEST_DATA_PATTERN;
int i, ret;
dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
if (!dma_test_pattern)
return -ENOMEM;
for (i = 0; i < DMA_TEST_FRAME_SIZE / sizeof(data_value); i++)
((u32 *)dma_test_pattern)[i] = data_value++;
dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid);
if (!dma_test_dir) {
ret = -ENOMEM;
goto err_free_pattern;
}
tb_property_add_immediate(dma_test_dir, "prtcid", 1);
tb_property_add_immediate(dma_test_dir, "prtcvers", 1);
tb_property_add_immediate(dma_test_dir, "prtcrevs", 0);
tb_property_add_immediate(dma_test_dir, "prtcstns", 0);
ret = tb_register_property_dir("dma_test", dma_test_dir);
if (ret)
goto err_free_dir;
ret = tb_register_service_driver(&dma_test_driver);
if (ret)
goto err_unregister_dir;
return 0;
err_unregister_dir:
tb_unregister_property_dir("dma_test", dma_test_dir);
err_free_dir:
tb_property_free_dir(dma_test_dir);
err_free_pattern:
kfree(dma_test_pattern);
return ret;
}
module_init(dma_test_init);
static void __exit dma_test_exit(void)
{
tb_unregister_service_driver(&dma_test_driver);
tb_unregister_property_dir("dma_test", dma_test_dir);
tb_property_free_dir(dma_test_dir);
kfree(dma_test_pattern);
}
module_exit(dma_test_exit);
MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("DMA traffic test driver");
MODULE_LICENSE("GPL v2");

View File

@ -48,6 +48,18 @@ static bool start_icm;
module_param(start_icm, bool, 0444);
MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
/**
* struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status
* @reply: Reply from ICM firmware is placed here
* @request: Request that is sent to ICM firmware
* @icm: Pointer to ICM private data
*/
struct usb4_switch_nvm_auth {
struct icm_usb4_switch_op_response reply;
struct icm_usb4_switch_op request;
struct icm *icm;
};
/**
* struct icm - Internal connection manager private data
* @request_lock: Makes sure only one message is send to ICM at time
@ -61,6 +73,8 @@ MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: f
* @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
* @rpm: Does the controller support runtime PM (RTD3)
* @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
* @proto_version: Firmware protocol version
* @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set)
* @veto: Is RTD3 veto in effect
* @is_supported: Checks if we can support ICM on this controller
* @cio_reset: Trigger CIO reset
@ -79,11 +93,13 @@ struct icm {
struct mutex request_lock;
struct delayed_work rescan_work;
struct pci_dev *upstream_port;
size_t max_boot_acl;
int vnd_cap;
bool safe_mode;
size_t max_boot_acl;
bool rpm;
bool can_upgrade_nvm;
u8 proto_version;
struct usb4_switch_nvm_auth *last_nvm_auth;
bool veto;
bool (*is_supported)(struct tb *tb);
int (*cio_reset)(struct tb *tb);
@ -92,7 +108,7 @@ struct icm {
void (*save_devices)(struct tb *tb);
int (*driver_ready)(struct tb *tb,
enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm);
u8 *proto_version, size_t *nboot_acl, bool *rpm);
void (*set_uuid)(struct tb *tb);
void (*device_connected)(struct tb *tb,
const struct icm_pkg_header *hdr);
@ -437,7 +453,7 @@ static void icm_fr_save_devices(struct tb *tb)
static int
icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm)
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_fr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@ -870,7 +886,13 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
pm_runtime_get_sync(sw->dev.parent);
remove_switch(sw);
pm_runtime_mark_last_busy(sw->dev.parent);
pm_runtime_put_autosuspend(sw->dev.parent);
tb_switch_put(sw);
}
@ -986,7 +1008,7 @@ static int icm_tr_cio_reset(struct tb *tb)
static int
icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm)
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_tr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@ -1002,6 +1024,9 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
if (security_level)
*security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
if (proto_version)
*proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
ICM_TR_INFO_PROTO_VERSION_SHIFT;
if (nboot_acl)
*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
ICM_TR_INFO_BOOT_ACL_SHIFT;
@ -1280,8 +1305,13 @@ icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
return;
}
pm_runtime_get_sync(sw->dev.parent);
remove_switch(sw);
pm_runtime_mark_last_busy(sw->dev.parent);
pm_runtime_put_autosuspend(sw->dev.parent);
tb_switch_put(sw);
}
@ -1450,7 +1480,7 @@ static int icm_ar_get_mode(struct tb *tb)
static int
icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm)
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_ar_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@ -1580,7 +1610,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
static int
icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm)
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_tr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@ -1594,6 +1624,10 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
if (ret)
return ret;
if (proto_version)
*proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
ICM_TR_INFO_PROTO_VERSION_SHIFT;
/* Ice Lake always supports RTD3 */
if (rpm)
*rpm = true;
@ -1702,13 +1736,14 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
static int
__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm)
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm *icm = tb_priv(tb);
unsigned int retries = 50;
int ret;
ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl,
rpm);
if (ret) {
tb_err(tb, "failed to send driver ready to ICM\n");
return ret;
@ -1918,8 +1953,8 @@ static int icm_driver_ready(struct tb *tb)
return 0;
}
ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
&icm->rpm);
ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version,
&tb->nboot_acl, &icm->rpm);
if (ret)
return ret;
@ -1930,6 +1965,9 @@ static int icm_driver_ready(struct tb *tb)
if (tb->nboot_acl > icm->max_boot_acl)
tb->nboot_acl = 0;
if (icm->proto_version >= 3)
tb_dbg(tb, "USB4 proxy operations supported\n");
return 0;
}
@ -2045,7 +2083,7 @@ static void icm_complete(struct tb *tb)
* Now all existing children should be resumed, start events
* from ICM to get updated status.
*/
__icm_driver_ready(tb, NULL, NULL, NULL);
__icm_driver_ready(tb, NULL, NULL, NULL, NULL);
/*
* We do not get notifications of devices that have been
@ -2124,6 +2162,8 @@ static void icm_stop(struct tb *tb)
tb_switch_remove(tb->root_switch);
tb->root_switch = NULL;
nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
kfree(icm->last_nvm_auth);
icm->last_nvm_auth = NULL;
}
static int icm_disconnect_pcie_paths(struct tb *tb)
@ -2131,6 +2171,165 @@ static int icm_disconnect_pcie_paths(struct tb *tb)
return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
}
static void icm_usb4_switch_nvm_auth_complete(void *data)
{
struct usb4_switch_nvm_auth *auth = data;
struct icm *icm = auth->icm;
struct tb *tb = icm_to_tb(icm);
tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n",
get_route(auth->reply.route_hi, auth->reply.route_lo),
auth->reply.hdr.flags, auth->reply.status);
mutex_lock(&tb->lock);
if (WARN_ON(icm->last_nvm_auth))
kfree(icm->last_nvm_auth);
icm->last_nvm_auth = auth;
mutex_unlock(&tb->lock);
}
static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route)
{
struct usb4_switch_nvm_auth *auth;
struct icm *icm = tb_priv(tb);
struct tb_cfg_request *req;
int ret;
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
if (!auth)
return -ENOMEM;
auth->icm = icm;
auth->request.hdr.code = ICM_USB4_SWITCH_OP;
auth->request.route_hi = upper_32_bits(route);
auth->request.route_lo = lower_32_bits(route);
auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH;
req = tb_cfg_request_alloc();
if (!req) {
ret = -ENOMEM;
goto err_free_auth;
}
req->match = icm_match;
req->copy = icm_copy;
req->request = &auth->request;
req->request_size = sizeof(auth->request);
req->request_type = TB_CFG_PKG_ICM_CMD;
req->response = &auth->reply;
req->npackets = 1;
req->response_size = sizeof(auth->reply);
req->response_type = TB_CFG_PKG_ICM_RESP;
tb_dbg(tb, "NVM_AUTH request for %llx\n", route);
mutex_lock(&icm->request_lock);
ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete,
auth);
mutex_unlock(&icm->request_lock);
tb_cfg_request_put(req);
if (ret)
goto err_free_auth;
return 0;
err_free_auth:
kfree(auth);
return ret;
}
static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
u8 *status, const void *tx_data, size_t tx_data_len,
void *rx_data, size_t rx_data_len)
{
struct icm_usb4_switch_op_response reply;
struct icm_usb4_switch_op request;
struct tb *tb = sw->tb;
struct icm *icm = tb_priv(tb);
u64 route = tb_route(sw);
int ret;
/*
* USB4 router operation proxy is supported in firmware if the
* protocol version is 3 or higher.
*/
if (icm->proto_version < 3)
return -EOPNOTSUPP;
/*
* NVM_AUTH is a special USB4 proxy operation that does not
* return immediately so handle it separately.
*/
if (opcode == USB4_SWITCH_OP_NVM_AUTH)
return icm_usb4_switch_nvm_authenticate(tb, route);
memset(&request, 0, sizeof(request));
request.hdr.code = ICM_USB4_SWITCH_OP;
request.route_hi = upper_32_bits(route);
request.route_lo = lower_32_bits(route);
request.opcode = opcode;
if (metadata)
request.metadata = *metadata;
if (tx_data_len) {
request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID;
if (tx_data_len < ARRAY_SIZE(request.data))
request.data_len_valid =
tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK;
memcpy(request.data, tx_data, tx_data_len * sizeof(u32));
}
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
if (status)
*status = reply.status;
if (metadata)
*metadata = reply.metadata;
if (rx_data_len)
memcpy(rx_data, reply.data, rx_data_len * sizeof(u32));
return 0;
}
static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
u32 *status)
{
struct usb4_switch_nvm_auth *auth;
struct tb *tb = sw->tb;
struct icm *icm = tb_priv(tb);
int ret = 0;
if (icm->proto_version < 3)
return -EOPNOTSUPP;
auth = icm->last_nvm_auth;
icm->last_nvm_auth = NULL;
if (auth && auth->reply.route_hi == sw->config.route_hi &&
auth->reply.route_lo == sw->config.route_lo) {
tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
ret = -EIO;
else
*status = auth->reply.status;
} else {
*status = 0;
}
kfree(auth);
return ret;
}
/* Falcon Ridge */
static const struct tb_cm_ops icm_fr_ops = {
.driver_ready = icm_driver_ready,
@ -2189,6 +2388,9 @@ static const struct tb_cm_ops icm_tr_ops = {
.disconnect_pcie_paths = icm_disconnect_pcie_paths,
.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
.usb4_switch_op = icm_usb4_switch_op,
.usb4_switch_nvm_authenticate_status =
icm_usb4_switch_nvm_authenticate_status,
};
/* Ice Lake */
@ -2202,6 +2404,9 @@ static const struct tb_cm_ops icm_icl_ops = {
.handle_event = icm_handle_event,
.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
.usb4_switch_op = icm_usb4_switch_op,
.usb4_switch_nvm_authenticate_status =
icm_usb4_switch_nvm_authenticate_status,
};
struct tb *icm_probe(struct tb_nhi *nhi)
@ -2300,6 +2505,17 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->rtd3_veto = icm_icl_rtd3_veto;
tb->cm_ops = &icm_icl_ops;
break;
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;
icm->driver_ready = icm_tr_driver_ready;
icm->device_connected = icm_tr_device_connected;
icm->device_disconnected = icm_tr_device_disconnected;
icm->xdomain_connected = icm_tr_xdomain_connected;
icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
tb->cm_ops = &icm_tr_ops;
break;
}
if (!icm->is_supported || !icm->is_supported(tb)) {
@ -2308,5 +2524,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
return NULL;
}
tb_dbg(tb, "using firmware connection manager\n");
return tb;
}

View File

@ -494,7 +494,7 @@ err_unlock:
static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
bool transmit, unsigned int flags,
u16 sof_mask, u16 eof_mask,
int e2e_tx_hop, u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *),
void *poll_data)
{
@ -517,6 +517,7 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
ring->is_tx = transmit;
ring->size = size;
ring->flags = flags;
ring->e2e_tx_hop = e2e_tx_hop;
ring->sof_mask = sof_mask;
ring->eof_mask = eof_mask;
ring->head = 0;
@ -561,7 +562,7 @@ err_free_ring:
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags)
{
return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
@ -571,6 +572,7 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
* @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
* @size: Number of entries in the ring
* @flags: Flags for the ring
* @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
* @sof_mask: Mask of PDF values that start a frame
* @eof_mask: Mask of PDF values that end a frame
* @start_poll: If not %NULL the ring will call this function when an
@ -579,10 +581,11 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
* @poll_data: Optional data passed to @start_poll
*/
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags, u16 sof_mask, u16 eof_mask,
unsigned int flags, int e2e_tx_hop,
u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *), void *poll_data)
{
return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask,
start_poll, poll_data);
}
EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
@ -629,6 +632,31 @@ void tb_ring_start(struct tb_ring *ring)
ring_iowrite32options(ring, sof_eof_mask, 4);
ring_iowrite32options(ring, flags, 0);
}
/*
* Now that the ring valid bit is set we can configure E2E if
* enabled for the ring.
*/
if (ring->flags & RING_FLAG_E2E) {
if (!ring->is_tx) {
u32 hop;
hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT;
hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
flags |= hop;
dev_dbg(&ring->nhi->pdev->dev,
"enabling E2E for %s %d with TX HopID %d\n",
RING_TYPE(ring), ring->hop, ring->e2e_tx_hop);
} else {
dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n",
RING_TYPE(ring), ring->hop);
}
flags |= RING_FLAG_E2E_FLOW_CONTROL;
ring_iowrite32options(ring, flags, 0);
}
ring_interrupt_active(ring, true);
ring->running = true;
err:

View File

@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware.
*/
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf

View File

@ -406,10 +406,17 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
if (!hop.pending) {
if (clear_fc) {
/* Clear flow control */
hop.ingress_fc = 0;
/*
* Clear flow control. Protocol adapters
* IFC and ISE bits are vendor defined
* in the USB4 spec so we clear them
* only for pre-USB4 adapters.
*/
if (!tb_switch_is_usb4(port->sw)) {
hop.ingress_fc = 0;
hop.ingress_shared_buffer = 0;
}
hop.egress_fc = 0;
hop.ingress_shared_buffer = 0;
hop.egress_shared_buffer = 0;
return tb_port_write(port, &hop, TB_CFG_HOPS,
@ -447,7 +454,7 @@ void tb_path_deactivate(struct tb_path *path)
return;
}
tb_dbg(path->tb,
"deactivating %s path from %llx:%x to %llx:%x\n",
"deactivating %s path from %llx:%u to %llx:%u\n",
path->name, tb_route(path->hops[0].in_port->sw),
path->hops[0].in_port->port,
tb_route(path->hops[path->path_length - 1].out_port->sw),
@ -475,7 +482,7 @@ int tb_path_activate(struct tb_path *path)
}
tb_dbg(path->tb,
"activating %s path from %llx:%x to %llx:%x\n",
"activating %s path from %llx:%u to %llx:%u\n",
path->name, tb_route(path->hops[0].in_port->sw),
path->hops[0].in_port->port,
tb_route(path->hops[path->path_length - 1].out_port->sw),

View File

@ -503,12 +503,13 @@ static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
/**
* tb_port_state() - get connectedness state of a port
* @port: the port to check
*
* The port must have a TB_CAP_PHY (i.e. it should be a real port).
*
* Return: Returns an enum tb_port_state on success or an error code on failure.
*/
static int tb_port_state(struct tb_port *port)
int tb_port_state(struct tb_port *port)
{
struct tb_cap_phy phy;
int res;
@ -932,7 +933,14 @@ int tb_port_get_link_speed(struct tb_port *port)
return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
}
static int tb_port_get_link_width(struct tb_port *port)
/**
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
*
* Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
* or negative errno in case of failure.
*/
int tb_port_get_link_width(struct tb_port *port)
{
u32 val;
int ret;
@ -1001,7 +1009,16 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_lane_bonding_enable(struct tb_port *port)
/**
* tb_port_lane_bonding_enable() - Enable bonding on port
* @port: port to enable
*
* Enable bonding by setting the link width of the port and the
* other port in case of dual link port.
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_port_lane_bonding_enable(struct tb_port *port)
{
int ret;
@ -1031,7 +1048,15 @@ static int tb_port_lane_bonding_enable(struct tb_port *port)
return 0;
}
static void tb_port_lane_bonding_disable(struct tb_port *port)
/**
* tb_port_lane_bonding_disable() - Disable bonding on port
* @port: port to disable
*
* Disable bonding by setting the link width of the port and the
* other port in case of dual link port.
*
*/
void tb_port_lane_bonding_disable(struct tb_port *port)
{
port->dual_link_port->bonded = false;
port->bonded = false;
@ -2135,6 +2160,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
fallthrough;
case 3:
case 4:
ret = tb_switch_set_uuid(sw);
if (ret)
return ret;
@ -2150,6 +2176,22 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
break;
}
if (sw->no_nvm_upgrade)
return 0;
if (tb_switch_is_usb4(sw)) {
ret = usb4_switch_nvm_authenticate_status(sw, &status);
if (ret)
return ret;
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
nvm_set_auth_status(sw, status);
}
return 0;
}
/* Root switch DMA port requires running firmware */
if (!tb_route(sw) && !tb_switch_is_icm(sw))
return 0;
@ -2158,9 +2200,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (!sw->dma_port)
return 0;
if (sw->no_nvm_upgrade)
return 0;
/*
* If there is status already set then authentication failed
* when the dma_port_flash_update_auth() returned. Power cycling

View File

@ -1534,5 +1534,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
INIT_LIST_HEAD(&tcm->dp_resources);
INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
tb_dbg(tb, "using software connection manager\n");
return tb;
}

View File

@ -367,6 +367,14 @@ struct tb_path {
* @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
* @approve_xdomain_paths: Approve (establish) XDomain DMA paths
* @disconnect_xdomain_paths: Disconnect XDomain DMA paths
* @usb4_switch_op: Optional proxy for USB4 router operations. If set
* this will be called whenever USB4 router operation is
* performed. If this returns %-EOPNOTSUPP then the
* native USB4 router operation is called.
* @usb4_switch_nvm_authenticate_status: Optional callback that the CM
* implementation can be used to
* return status of USB4 NVM_AUTH
* router operation.
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
@ -393,6 +401,11 @@ struct tb_cm_ops {
int (*disconnect_pcie_paths)(struct tb *tb);
int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata,
u8 *status, const void *tx_data, size_t tx_data_len,
void *rx_data, size_t rx_data_len);
int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw,
u32 *status);
};
static inline void *tb_priv(struct tb *tb)
@ -864,6 +877,10 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
(p) = tb_next_port_on_path((src), (dst), (p)))
int tb_port_get_link_speed(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port);
int tb_port_state(struct tb_port *port);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@ -970,6 +987,7 @@ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
const void *buf, size_t size);
int usb4_switch_nvm_authenticate(struct tb_switch *sw);
int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status);
bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
@ -1025,11 +1043,15 @@ void tb_debugfs_init(void);
void tb_debugfs_exit(void);
void tb_switch_debugfs_init(struct tb_switch *sw);
void tb_switch_debugfs_remove(struct tb_switch *sw);
void tb_service_debugfs_init(struct tb_service *svc);
void tb_service_debugfs_remove(struct tb_service *svc);
#else
static inline void tb_debugfs_init(void) { }
static inline void tb_debugfs_exit(void) { }
static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
static inline void tb_service_debugfs_init(struct tb_service *svc) { }
static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
#endif
#ifdef CONFIG_USB4_KUNIT_TEST

View File

@ -106,6 +106,7 @@ enum icm_pkg_code {
ICM_APPROVE_XDOMAIN = 0x10,
ICM_DISCONNECT_XDOMAIN = 0x11,
ICM_PREBOOT_ACL = 0x18,
ICM_USB4_SWITCH_OP = 0x20,
};
enum icm_event_code {
@ -343,6 +344,8 @@ struct icm_tr_pkg_driver_ready_response {
#define ICM_TR_FLAGS_RTD3 BIT(6)
#define ICM_TR_INFO_SLEVEL_MASK GENMASK(2, 0)
#define ICM_TR_INFO_PROTO_VERSION_MASK GENMASK(6, 4)
#define ICM_TR_INFO_PROTO_VERSION_SHIFT 4
#define ICM_TR_INFO_BOOT_ACL_SHIFT 7
#define ICM_TR_INFO_BOOT_ACL_MASK GENMASK(12, 7)
@ -478,6 +481,31 @@ struct icm_icl_event_rtd3_veto {
u32 veto_reason;
};
/* USB4 ICM messages */
struct icm_usb4_switch_op {
struct icm_pkg_header hdr;
u32 route_hi;
u32 route_lo;
u32 metadata;
u16 opcode;
u16 data_len_valid;
u32 data[16];
};
#define ICM_USB4_SWITCH_DATA_LEN_MASK GENMASK(3, 0)
#define ICM_USB4_SWITCH_DATA_VALID BIT(4)
struct icm_usb4_switch_op_response {
struct icm_pkg_header hdr;
u32 route_hi;
u32 route_lo;
u32 metadata;
u16 opcode;
u16 status;
u32 data[16];
};
/* XDomain messages */
struct tb_xdomain_header {

View File

@ -211,11 +211,25 @@ struct tb_regs_switch_header {
#define ROUTER_CS_9 0x09
#define ROUTER_CS_25 0x19
#define ROUTER_CS_26 0x1a
#define ROUTER_CS_26_OPCODE_MASK GENMASK(15, 0)
#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24)
#define ROUTER_CS_26_STATUS_SHIFT 24
#define ROUTER_CS_26_ONS BIT(30)
#define ROUTER_CS_26_OV BIT(31)
/* USB4 router operations opcodes */
enum usb4_switch_op {
USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
USB4_SWITCH_OP_NVM_WRITE = 0x20,
USB4_SWITCH_OP_NVM_AUTH = 0x21,
USB4_SWITCH_OP_NVM_READ = 0x22,
USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
USB4_SWITCH_OP_DROM_READ = 0x24,
USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
};
/* Router TMU configuration */
#define TMU_RTR_CS_0 0x00
#define TMU_RTR_CS_0_TD BIT(27)

View File

@ -34,9 +34,6 @@
#define TB_DP_AUX_PATH_OUT 1
#define TB_DP_AUX_PATH_IN 2
#define TB_DMA_PATH_OUT 0
#define TB_DMA_PATH_IN 1
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
@ -829,10 +826,10 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
* @nhi: Host controller port
* @dst: Destination null port which the other domain is connected to
* @transmit_ring: NHI ring number used to send packets towards the
* other domain
* other domain. Set to %0 if TX path is not needed.
* @transmit_path: HopID used for transmitting packets
* @receive_ring: NHI ring number used to receive packets from the
* other domain
* other domain. Set to %0 if RX path is not needed.
* @reveive_path: HopID used for receiving packets
*
* Return: Returns a tb_tunnel on success or NULL on failure.
@ -843,10 +840,19 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
int receive_path)
{
struct tb_tunnel *tunnel;
size_t npaths = 0, i = 0;
struct tb_path *path;
u32 credits;
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
if (receive_ring)
npaths++;
if (transmit_ring)
npaths++;
if (WARN_ON(!npaths))
return NULL;
tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
if (!tunnel)
return NULL;
@ -856,22 +862,28 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
credits = tb_dma_credits(nhi);
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
if (receive_ring) {
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
"DMA RX");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
}
tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
credits);
tunnel->paths[i++] = path;
}
tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
credits);
tunnel->paths[TB_DMA_PATH_IN] = path;
path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
if (transmit_ring) {
path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
"DMA TX");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
}
tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
tunnel->paths[i++] = path;
}
tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
tunnel->paths[TB_DMA_PATH_OUT] = path;
return tunnel;
}

View File

@ -16,18 +16,6 @@
#define USB4_DATA_DWORDS 16
#define USB4_DATA_RETRIES 3
enum usb4_switch_op {
USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
USB4_SWITCH_OP_NVM_WRITE = 0x20,
USB4_SWITCH_OP_NVM_AUTH = 0x21,
USB4_SWITCH_OP_NVM_READ = 0x22,
USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
USB4_SWITCH_OP_DROM_READ = 0x24,
USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
};
enum usb4_sb_target {
USB4_SB_TARGET_ROUTER,
USB4_SB_TARGET_PARTNER,
@ -74,34 +62,6 @@ static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
return -ETIMEDOUT;
}
static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
size_t dwords)
{
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
}
static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
size_t dwords)
{
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
}
static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
{
return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
}
static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
{
return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
}
static int usb4_do_read_data(u16 address, void *buf, size_t size,
read_block_fn read_block, void *read_block_data)
{
@ -171,11 +131,26 @@ static int usb4_do_write_data(unsigned int address, const void *buf, size_t size
return 0;
}
static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
void *rx_data, size_t rx_dwords)
{
u32 val;
int ret;
if (metadata) {
ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
if (ret)
return ret;
}
if (tx_dwords) {
ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
tx_dwords);
if (ret)
return ret;
}
val = opcode | ROUTER_CS_26_OV;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
if (ret)
@ -192,10 +167,73 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
if (val & ROUTER_CS_26_ONS)
return -EOPNOTSUPP;
*status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
if (status)
*status = (val & ROUTER_CS_26_STATUS_MASK) >>
ROUTER_CS_26_STATUS_SHIFT;
if (metadata) {
ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
if (ret)
return ret;
}
if (rx_dwords) {
ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
rx_dwords);
if (ret)
return ret;
}
return 0;
}
static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
u8 *status, const void *tx_data, size_t tx_dwords,
void *rx_data, size_t rx_dwords)
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
return -EINVAL;
/*
* If the connection manager implementation provides USB4 router
* operation proxy callback, call it here instead of running the
* operation natively.
*/
if (cm_ops->usb4_switch_op) {
int ret;
ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
tx_data, tx_dwords, rx_data,
rx_dwords);
if (ret != -EOPNOTSUPP)
return ret;
/*
* If the proxy was not supported then run the native
* router operation instead.
*/
}
return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
tx_dwords, rx_data, rx_dwords);
}
static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status)
{
return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
}
static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
void *rx_data, size_t rx_dwords)
{
return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
tx_dwords, rx_data, rx_dwords);
}
static void usb4_switch_check_wakes(struct tb_switch *sw)
{
struct tb_port *port;
@ -348,18 +386,12 @@ static int usb4_switch_drom_read_block(void *data,
metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
USB4_DROM_ADDRESS_MASK;
ret = usb4_switch_op_write_metadata(sw, metadata);
ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
&status, NULL, 0, buf, dwords);
if (ret)
return ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
if (ret)
return ret;
if (status)
return -EIO;
return usb4_switch_op_read_data(sw, buf, dwords);
return status ? -EIO : 0;
}
/**
@ -512,17 +544,14 @@ int usb4_switch_nvm_sector_size(struct tb_switch *sw)
u8 status;
int ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
&status);
if (ret)
return ret;
if (status)
return status == 0x2 ? -EOPNOTSUPP : -EIO;
ret = usb4_switch_op_read_metadata(sw, &metadata);
if (ret)
return ret;
return metadata & USB4_NVM_SECTOR_SIZE_MASK;
}
@ -539,18 +568,12 @@ static int usb4_switch_nvm_read_block(void *data,
metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
USB4_NVM_READ_OFFSET_MASK;
ret = usb4_switch_op_write_metadata(sw, metadata);
ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
&status, NULL, 0, buf, dwords);
if (ret)
return ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
if (ret)
return ret;
if (status)
return -EIO;
return usb4_switch_op_read_data(sw, buf, dwords);
return status ? -EIO : 0;
}
/**
@ -581,11 +604,8 @@ static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
USB4_NVM_SET_OFFSET_MASK;
ret = usb4_switch_op_write_metadata(sw, metadata);
if (ret)
return ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
&status);
if (ret)
return ret;
@ -599,11 +619,8 @@ static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
u8 status;
int ret;
ret = usb4_switch_op_write_data(sw, buf, dwords);
if (ret)
return ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
buf, dwords, NULL, 0);
if (ret)
return ret;
@ -638,32 +655,78 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
* @sw: USB4 router
*
* After the new NVM has been written via usb4_switch_nvm_write(), this
* function triggers NVM authentication process. If the authentication
* is successful the router is power cycled and the new NVM starts
* function triggers NVM authentication process. The router gets power
* cycled and if the authentication is successful the new NVM starts
* running. In case of failure returns negative errno.
*
* The caller should call usb4_switch_nvm_authenticate_status() to read
* the status of the authentication after power cycle. It should be the
* first router operation to avoid the status being lost.
*/
int usb4_switch_nvm_authenticate(struct tb_switch *sw)
{
u8 status = 0;
int ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
switch (ret) {
/*
* The router is power cycled once NVM_AUTH is started so it is
* expected to get any of the following errors back.
*/
case -EACCES:
case -ENOTCONN:
case -ETIMEDOUT:
return 0;
default:
return ret;
}
}
/**
* usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
* @sw: USB4 router
* @status: Status code of the operation
*
* The function checks if there is status available from the last NVM
* authenticate router operation. If there is status then %0 is returned
* and the status code is placed in @status. Returns negative errno in case
* of failure.
*
* Must be called before any other router operation.
*/
int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
u16 opcode;
u32 val;
int ret;
if (cm_ops->usb4_switch_nvm_authenticate_status) {
ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
if (ret != -EOPNOTSUPP)
return ret;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
if (ret)
return ret;
switch (status) {
case 0x0:
tb_sw_dbg(sw, "NVM authentication successful\n");
return 0;
case 0x1:
return -EINVAL;
case 0x2:
return -EAGAIN;
case 0x3:
return -EOPNOTSUPP;
default:
return -EIO;
/* Check that the opcode is correct */
opcode = val & ROUTER_CS_26_OPCODE_MASK;
if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
if (val & ROUTER_CS_26_OV)
return -EBUSY;
if (val & ROUTER_CS_26_ONS)
return -EOPNOTSUPP;
*status = (val & ROUTER_CS_26_STATUS_MASK) >>
ROUTER_CS_26_STATUS_SHIFT;
} else {
*status = 0;
}
return 0;
}
/**
@ -677,14 +740,12 @@ int usb4_switch_nvm_authenticate(struct tb_switch *sw)
*/
bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
u32 metadata = in->port;
u8 status;
int ret;
ret = usb4_switch_op_write_metadata(sw, in->port);
if (ret)
return false;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
&status);
/*
* If DP resource allocation is not supported assume it is
* always available.
@ -709,14 +770,12 @@ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
u32 metadata = in->port;
u8 status;
int ret;
ret = usb4_switch_op_write_metadata(sw, in->port);
if (ret)
return ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
&status);
if (ret == -EOPNOTSUPP)
return 0;
else if (ret)
@ -734,14 +793,12 @@ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
u32 metadata = in->port;
u8 status;
int ret;
ret = usb4_switch_op_write_metadata(sw, in->port);
if (ret)
return ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
&status);
if (ret == -EOPNOTSUPP)
return 0;
else if (ret)

View File

@ -8,6 +8,7 @@
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@ -21,6 +22,7 @@
#define XDOMAIN_UUID_RETRIES 10
#define XDOMAIN_PROPERTIES_RETRIES 60
#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
#define XDOMAIN_BONDING_WAIT 100 /* ms */
struct xdomain_request_work {
struct work_struct work;
@ -587,8 +589,6 @@ static void tb_xdp_handle_request(struct work_struct *work)
break;
case PROPERTIES_CHANGED_REQUEST: {
const struct tb_xdp_properties_changed *xchg =
(const struct tb_xdp_properties_changed *)pkg;
struct tb_xdomain *xd;
ret = tb_xdp_properties_changed_response(ctl, route, sequence);
@ -598,10 +598,12 @@ static void tb_xdp_handle_request(struct work_struct *work)
* the xdomain related to this connection as well in
* case there is a change in services it offers.
*/
xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
xd = tb_xdomain_find_by_route_locked(tb, route);
if (xd) {
queue_delayed_work(tb->wq, &xd->get_properties_work,
msecs_to_jiffies(50));
if (device_is_registered(&xd->dev)) {
queue_delayed_work(tb->wq, &xd->get_properties_work,
msecs_to_jiffies(50));
}
tb_xdomain_put(xd);
}
@ -777,6 +779,7 @@ static void tb_service_release(struct device *dev)
struct tb_service *svc = container_of(dev, struct tb_service, dev);
struct tb_xdomain *xd = tb_service_parent(svc);
tb_service_debugfs_remove(svc);
ida_simple_remove(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
@ -891,6 +894,8 @@ static void enumerate_services(struct tb_xdomain *xd)
svc->dev.parent = &xd->dev;
dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
tb_service_debugfs_init(svc);
if (device_register(&svc->dev)) {
put_device(&svc->dev);
break;
@ -943,6 +948,43 @@ static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
}
}
static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
{
return tb_to_switch(xd->dev.parent);
}
static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
{
bool change = false;
struct tb_port *port;
int ret;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
ret = tb_port_get_link_speed(port);
if (ret < 0)
return ret;
if (xd->link_speed != ret)
change = true;
xd->link_speed = ret;
ret = tb_port_get_link_width(port);
if (ret < 0)
return ret;
if (xd->link_width != ret)
change = true;
xd->link_width = ret;
if (change)
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
return 0;
}
static void tb_xdomain_get_uuid(struct work_struct *work)
{
struct tb_xdomain *xd = container_of(work, typeof(*xd),
@ -962,10 +1004,8 @@ static void tb_xdomain_get_uuid(struct work_struct *work)
return;
}
if (uuid_equal(&uuid, xd->local_uuid)) {
if (uuid_equal(&uuid, xd->local_uuid))
dev_dbg(&xd->dev, "intra-domain loop detected\n");
return;
}
/*
* If the UUID is different, there is another domain connected
@ -1056,6 +1096,8 @@ static void tb_xdomain_get_properties(struct work_struct *work)
xd->properties = dir;
xd->property_block_gen = gen;
tb_xdomain_update_link_attributes(xd);
tb_xdomain_restore_paths(xd);
mutex_unlock(&xd->lock);
@ -1162,9 +1204,35 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(unique_id);
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
}
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%u\n", xd->link_width);
}
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
static struct attribute *xdomain_attrs[] = {
&dev_attr_device.attr,
&dev_attr_device_name.attr,
&dev_attr_rx_lanes.attr,
&dev_attr_rx_speed.attr,
&dev_attr_tx_lanes.attr,
&dev_attr_tx_speed.attr,
&dev_attr_unique_id.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
@ -1381,6 +1449,70 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
device_unregister(&xd->dev);
}
/**
* tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
* @xd: XDomain connection
*
* Lane bonding is disabled by default for XDomains. This function tries
* to enable bonding by first enabling the port and waiting for the CL0
* state.
*
* Return: %0 in case of success and negative errno in case of error.
*/
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
{
struct tb_port *port;
int ret;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
if (!port->dual_link_port)
return -ENODEV;
ret = tb_port_enable(port->dual_link_port);
if (ret)
return ret;
ret = tb_wait_for_port(port->dual_link_port, true);
if (ret < 0)
return ret;
if (!ret)
return -ENOTCONN;
ret = tb_port_lane_bonding_enable(port);
if (ret) {
tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
}
tb_xdomain_update_link_attributes(xd);
dev_dbg(&xd->dev, "lane bonding enabled\n");
return 0;
}
EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
/**
* tb_xdomain_lane_bonding_disable() - Disable lane bonding
* @xd: XDomain connection
*
* Lane bonding is disabled by default for XDomains. If bonding has been
* enabled, this function can be used to disable it.
*/
void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
{
struct tb_port *port;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
if (port->dual_link_port) {
tb_port_lane_bonding_disable(port);
tb_port_disable(port->dual_link_port);
tb_xdomain_update_link_attributes(xd);
dev_dbg(&xd->dev, "lane bonding disabled\n");
}
}
EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
/**
* tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
* @xd: XDomain connection

View File

@ -179,6 +179,8 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @lock: Lock to serialize access to the following fields of this structure
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2)
* @is_unplugged: The XDomain is unplugged
* @resume: The XDomain is being resumed
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
@ -223,6 +225,8 @@ struct tb_xdomain {
struct mutex lock;
const char *vendor_name;
const char *device_name;
unsigned int link_speed;
unsigned int link_width;
bool is_unplugged;
bool resume;
bool needs_uuid;
@ -243,6 +247,8 @@ struct tb_xdomain {
u8 depth;
};
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
u16 transmit_ring, u16 receive_path,
u16 receive_ring);
@ -344,6 +350,9 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
* @prtcvers: Protocol version from the properties directory
* @prtcrevs: Protocol software revision from the properties directory
* @prtcstns: Protocol settings mask from the properties directory
* @debugfs_dir: Pointer to the service debugfs directory. Always created
* when debugfs is enabled. Can be used by service drivers to
* add their own entries under the service.
*
* Each domain exposes set of services it supports as collection of
* properties. For each service there will be one corresponding
@ -357,6 +366,7 @@ struct tb_service {
u32 prtcvers;
u32 prtcrevs;
u32 prtcstns;
struct dentry *debugfs_dir;
};
static inline struct tb_service *tb_service_get(struct tb_service *svc)
@ -471,6 +481,8 @@ struct tb_nhi {
* @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
* @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
* @flags: Ring specific flags
* @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
* RX ring. For TX ring this should be set to %0.
* @sof_mask: Bit mask used to detect start of frame PDF
* @eof_mask: Bit mask used to detect end of frame PDF
* @start_poll: Called when ring interrupt is triggered to start
@ -494,6 +506,7 @@ struct tb_ring {
int irq;
u8 vector;
unsigned int flags;
int e2e_tx_hop;
u16 sof_mask;
u16 eof_mask;
void (*start_poll)(void *data);
@ -504,6 +517,8 @@ struct tb_ring {
#define RING_FLAG_NO_SUSPEND BIT(0)
/* Configure the ring to be in frame mode */
#define RING_FLAG_FRAME BIT(1)
/* Enable end-to-end flow control */
#define RING_FLAG_E2E BIT(2)
struct ring_frame;
typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
@ -552,7 +567,8 @@ struct ring_frame {
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags);
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags, u16 sof_mask, u16 eof_mask,
unsigned int flags, int e2e_tx_hop,
u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *), void *poll_data);
void tb_ring_start(struct tb_ring *ring);
void tb_ring_stop(struct tb_ring *ring);